您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xfs_rw_ilock函数代码示例

51自学网 2021-06-03 10:19:58
  C++
这篇教程C++ xfs_rw_ilock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xfs_rw_ilock函数的典型用法代码示例。如果您正苦于以下问题:C++ xfs_rw_ilock函数的具体用法?C++ xfs_rw_ilock怎么用?C++ xfs_rw_ilock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xfs_rw_ilock函数的24个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: xfs_file_buffered_aio_write

STATIC ssize_txfs_file_buffered_aio_write(	struct kiocb		*iocb,	struct iov_iter		*from){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	ssize_t			ret;	int			enospc = 0;	int			iolock = XFS_IOLOCK_EXCL;	loff_t			pos = iocb->ki_pos;	size_t			count = iov_iter_count(from);	xfs_rw_ilock(ip, iolock);	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);	if (ret)		goto out;	iov_iter_truncate(from, count);	/* We can write back this queue in page reclaim */	current->backing_dev_info = mapping->backing_dev_info;write_retry:	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);	ret = generic_perform_write(file, from, pos);	if (likely(ret >= 0))		iocb->ki_pos = pos + ret;	/*	 * If we hit a space limit, try to free up some lingering preallocated	 * space before returning an error. In the case of ENOSPC, first try to	 * write back all dirty inodes to free up some of the excess reserved	 * metadata space. This reduces the chances that the eofblocks scan	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this	 * also behaves as a filter to prevent too many eofblocks scans from	 * running at the same time.	 */	if (ret == -EDQUOT && !enospc) {		enospc = xfs_inode_free_quota_eofblocks(ip);		if (enospc)			goto write_retry;	} else if (ret == -ENOSPC && !enospc) {		struct xfs_eofblocks eofb = {0};		enospc = 1;		xfs_flush_inodes(ip->i_mount);		eofb.eof_scan_owner = ip->i_ino; /* for locking */		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;		xfs_icache_free_eofblocks(ip->i_mount, &eofb);		goto write_retry;	}	current->backing_dev_info = NULL;out:	xfs_rw_iunlock(ip, iolock);	return ret;}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:60,


示例2: xfs_file_splice_read

STATIC ssize_txfs_file_splice_read(	struct file		*infilp,	loff_t			*ppos,	struct pipe_inode_info	*pipe,	size_t			count,	unsigned int		flags){	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);	int			ioflags = 0;	ssize_t			ret;	XFS_STATS_INC(xs_read_calls);	if (infilp->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:31,


示例3: xfs_file_aio_write_checks

/* * Common pre-write limit and setup checks. * * Called with the iolocked held either shared and exclusive according to * @iolock, and returns with it held.  Might upgrade the iolock to exclusive * if called for a direct write beyond i_size. */STATIC ssize_txfs_file_aio_write_checks(	struct file		*file,	loff_t			*pos,	size_t			*count,	int			*iolock){	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	int			error = 0;restart:	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));	if (error)		return error;	/*	 * If the offset is beyond the size of the file, we need to zero any	 * blocks that fall between the existing EOF and the start of this	 * write.  If zeroing is needed and we are currently holding the	 * iolock shared, we need to update it to exclusive which implies	 * having to redo all checks before.	 */	if (*pos > i_size_read(inode)) {		if (*iolock == XFS_IOLOCK_SHARED) {			xfs_rw_iunlock(ip, *iolock);			*iolock = XFS_IOLOCK_EXCL;			xfs_rw_ilock(ip, *iolock);			goto restart;		}		error = -xfs_zero_eof(ip, *pos, i_size_read(inode));		if (error)			return error;	}	/*	 * Updating the timestamps will grab the ilock again from	 * xfs_fs_dirty_inode, so we have to call it after dropping the	 * lock above.  Eventually we should look into a way to avoid	 * the pointless lock roundtrip.	 */	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {		error = file_update_time(file);		if (error)			return error;	}	/*	 * If we're writing the file then make sure to clear the setuid and	 * setgid bits if the process is not being run by root.  This keeps	 * people from modifying setuid and setgid binaries.	 */	return file_remove_suid(file);}
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:61,


示例4: xfs_aio_write_newsize_update

/* * If this was a direct or synchronous I/O that failed (such as ENOSPC) then * part of the I/O may have been written to disk before the error occurred.  In * this case the on-disk file size may have been adjusted beyond the in-memory * file size and now needs to be truncated back. */STATIC voidxfs_aio_write_newsize_update(	struct xfs_inode	*ip){	if (ip->i_new_size) {		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);		ip->i_new_size = 0;		if (ip->i_d.di_size > ip->i_size)			ip->i_d.di_size = ip->i_size;		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);	}}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:18,


示例5: xfs_file_aio_write_checks

STATIC ssize_txfs_file_aio_write_checks(    struct file		*file,    loff_t			*pos,    size_t			*count,    int			*iolock){    struct inode		*inode = file->f_mapping->host;    struct xfs_inode	*ip = XFS_I(inode);    int			error = 0;    xfs_rw_ilock(ip, XFS_ILOCK_EXCL);restart:    error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));    if (error) {        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);        return error;    }    if (*pos > i_size_read(inode)) {        if (*iolock == XFS_IOLOCK_SHARED) {            xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);            *iolock = XFS_IOLOCK_EXCL;            xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);            goto restart;        }        error = -xfs_zero_eof(ip, *pos, i_size_read(inode));    }    xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);    if (error)        return error;    if (likely(!(file->f_mode & FMODE_NOCMTIME)))        file_update_time(file);    return file_remove_suid(file);}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:38,


示例6: xfs_file_buffered_aio_read

STATIC ssize_txfs_file_buffered_aio_read(	struct kiocb		*iocb,	struct iov_iter		*to){	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));	ssize_t			ret;	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	ret = generic_file_read_iter(iocb, to);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:acton393,项目名称:linux,代码行数:16,


示例7: xfs_file_aio_write_checks

/* * Common pre-write limit and setup checks. * * Returns with iolock held according to @iolock. */STATIC ssize_txfs_file_aio_write_checks(	struct file		*file,	loff_t			*pos,	size_t			*count,	int			*iolock){	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	xfs_fsize_t		new_size;	int			error = 0;	xfs_rw_ilock(ip, XFS_ILOCK_EXCL);	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));	if (error) {		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);		*iolock = 0;		return error;	}	new_size = *pos + *count;	if (new_size > ip->i_size)		ip->i_new_size = new_size;	if (likely(!(file->f_mode & FMODE_NOCMTIME)))		file_update_time(file);	/*	 * If the offset is beyond the size of the file, we need to zero any	 * blocks that fall between the existing EOF and the start of this	 * write.	 */	if (*pos > ip->i_size)		error = -xfs_zero_eof(ip, *pos, ip->i_size);	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);	if (error)		return error;	/*	 * If we're writing the file then make sure to clear the setuid and	 * setgid bits if the process is not being run by root.  This keeps	 * people from modifying setuid and setgid binaries.	 */	return file_remove_suid(file);}
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:52,


示例8: xfs_file_buffered_aio_write

STATIC ssize_txfs_file_buffered_aio_write(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos,	size_t			ocount){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	ssize_t			ret;	int			enospc = 0;	int			iolock = XFS_IOLOCK_EXCL;	size_t			count = ocount;	xfs_rw_ilock(ip, iolock);	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);	if (ret)		goto out;	/* We can write back this queue in page reclaim */	current->backing_dev_info = mapping->backing_dev_info;write_retry:	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);	ret = generic_file_buffered_write(iocb, iovp, nr_segs,			pos, &iocb->ki_pos, count, ret);	/*	 * if we just got an ENOSPC, flush the inode now we aren't holding any	 * page locks and retry *once*	 */	if (ret == -ENOSPC && !enospc) {		enospc = 1;		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);		if (!ret)			goto write_retry;	}	current->backing_dev_info = NULL;out:	xfs_rw_iunlock(ip, iolock);	return ret;}
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:46,


示例9: xfs_file_splice_read

STATIC ssize_txfs_file_splice_read(	struct file		*infilp,	loff_t			*ppos,	struct pipe_inode_info	*pipe,	size_t			count,	unsigned int		flags){	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);	int			ioflags = 0;	ssize_t			ret;	XFS_STATS_INC(ip->i_mount, xs_read_calls);	if (infilp->f_mode & FMODE_NOCMTIME)		ioflags |= XFS_IO_INVIS;	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);	/*	 * DAX inodes cannot ues the page cache for splice, so we have to push	 * them through the VFS IO path. This means it goes through	 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we	 * cannot lock the splice operation at this level for DAX inodes.	 */	if (IS_DAX(VFS_I(ip))) {		ret = default_file_splice_read(infilp, ppos, pipe, count,					       flags);		goto out;	}	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);out:	if (ret > 0)		XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);	return ret;}
开发者ID:sjp38,项目名称:linux.doc_trans_membarrier,代码行数:42,


示例10: xfs_file_dax_read

static noinline ssize_txfs_file_dax_read(	struct kiocb		*iocb,	struct iov_iter		*to){	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);	size_t			count = iov_iter_count(to);	ssize_t			ret = 0;	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);	if (!count)		return 0; /* skip atime */	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	file_accessed(iocb->ki_filp);	return ret;}
开发者ID:acton393,项目名称:linux,代码行数:21,


示例11: xfs_aio_write_isize_update

STATIC voidxfs_aio_write_isize_update(	struct inode	*inode,	loff_t		*ppos,	ssize_t		bytes_written){	struct xfs_inode	*ip = XFS_I(inode);	xfs_fsize_t		isize = i_size_read(inode);	if (bytes_written > 0)		XFS_STATS_ADD(xs_write_bytes, bytes_written);	if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&					*ppos > isize))		*ppos = isize;	if (*ppos > ip->i_size) {		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);		if (*ppos > ip->i_size)			ip->i_size = *ppos;		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);	}}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:23,


示例12: xfs_file_dio_aio_write

/* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer.  To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. inode_dio_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */STATIC ssize_txfs_file_dio_aio_write(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos,	size_t			ocount){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			ret = 0;	size_t			count = ocount;	int			unaligned_io = 0;	int			iolock;	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?					mp->m_rtdev_targp : mp->m_ddev_targp;	if ((pos & target->bt_smask) || (count & target->bt_smask))		return -XFS_ERROR(EINVAL);	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))		unaligned_io = 1;	/*	 * We don't need to take an exclusive lock unless there page cache needs	 * to be invalidated or unaligned IO is being executed. We don't need to	 * consider the EOF extension case here because	 * xfs_file_aio_write_checks() will relock the inode as necessary for	 * EOF zeroing cases and fill out the new inode size as appropriate.	 */	if (unaligned_io || mapping->nrpages)		iolock = XFS_IOLOCK_EXCL;	else		iolock = XFS_IOLOCK_SHARED;	xfs_rw_ilock(ip, iolock);	/*	 * Recheck if there are cached pages that need invalidate after we got	 * the iolock to protect against other threads adding new pages while	 * we were waiting for the iolock.	 */	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {		xfs_rw_iunlock(ip, iolock);		iolock = XFS_IOLOCK_EXCL;		xfs_rw_ilock(ip, iolock);	}	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);	if (ret)		goto out;	if (mapping->nrpages) {		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,							FI_REMAPF_LOCKED);		if (ret)			goto out;	}	/*	 * If we are doing unaligned IO, wait for all other IO to drain,	 * otherwise demote the lock if we had to flush cached pages	 */	if (unaligned_io)		inode_dio_wait(inode);	else if (iolock == XFS_IOLOCK_EXCL) {		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);		iolock = XFS_IOLOCK_SHARED;	}	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);	ret = generic_file_direct_write(iocb, iovp,			&nr_segs, pos, &iocb->ki_pos, count, ocount);//.........这里部分代码省略.........
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:101,


示例13: xfs_file_aio_write_checks

/* * Common pre-write limit and setup checks. * * Returns with iolock held according to @iolock. */STATIC ssize_txfs_file_aio_write_checks(	struct file		*file,	loff_t			*pos,	size_t			*count,	xfs_fsize_t		*new_sizep,	int			*iolock){	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	xfs_fsize_t		new_size;	int			error = 0;	xfs_rw_ilock(ip, XFS_ILOCK_EXCL);	*new_sizep = 0;restart:	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));	if (error) {		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);		*iolock = 0;		return error;	}	if (likely(!(file->f_mode & FMODE_NOCMTIME)))		file_update_time(file);	/*	 * If the offset is beyond the size of the file, we need to zero any	 * blocks that fall between the existing EOF and the start of this	 * write. There is no need to issue zeroing if another in-flght IO ends	 * at or before this one If zeronig is needed and we are currently	 * holding the iolock shared, we need to update it to exclusive which	 * involves dropping all locks and relocking to maintain correct locking	 * order. If we do this, restart the function to ensure all checks and	 * values are still valid.	 */	if ((ip->i_new_size && *pos > ip->i_new_size) ||	    (!ip->i_new_size && *pos > ip->i_size)) {		if (*iolock == XFS_IOLOCK_SHARED) {			xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);			*iolock = XFS_IOLOCK_EXCL;			xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);			goto restart;		}		error = -xfs_zero_eof(ip, *pos, ip->i_size);	}	/*	 * If this IO extends beyond EOF, we may need to update ip->i_new_size.	 * We have already zeroed space beyond EOF (if necessary).  Only update	 * ip->i_new_size if this IO ends beyond any other in-flight writes.	 */	new_size = *pos + *count;	if (new_size > ip->i_size) {		if (new_size > ip->i_new_size)			ip->i_new_size = new_size;		*new_sizep = new_size;	}	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);	if (error)		return error;	/*	 * If we're writing the file then make sure to clear the setuid and	 * setgid bits if the process is not being run by root.  This keeps	 * people from modifying setuid and setgid binaries.	 */	return file_remove_suid(file);}
开发者ID:aaron856,项目名称:linux-3.x,代码行数:76,


示例14: xfs_file_dio_aio_write

/* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer.  To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. inode_dio_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */STATIC ssize_txfs_file_dio_aio_write(	struct kiocb		*iocb,	struct iov_iter		*from){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			ret = 0;	int			unaligned_io = 0;	int			iolock;	size_t			count = iov_iter_count(from);	loff_t			pos = iocb->ki_pos;	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?					mp->m_rtdev_targp : mp->m_ddev_targp;	/* DIO must be aligned to device logical sector size */	if ((pos | count) & target->bt_logical_sectormask)		return -EINVAL;	/* "unaligned" here means not aligned to a filesystem block */	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))		unaligned_io = 1;	/*	 * We don't need to take an exclusive lock unless there page cache needs	 * to be invalidated or unaligned IO is being executed. We don't need to	 * consider the EOF extension case here because	 * xfs_file_aio_write_checks() will relock the inode as necessary for	 * EOF zeroing cases and fill out the new inode size as appropriate.	 */	if (unaligned_io || mapping->nrpages)		iolock = XFS_IOLOCK_EXCL;	else		iolock = XFS_IOLOCK_SHARED;	xfs_rw_ilock(ip, iolock);	/*	 * Recheck if there are cached pages that need invalidate after we got	 * the iolock to protect against other threads adding new pages while	 * we were waiting for the iolock.	 */	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {		xfs_rw_iunlock(ip, iolock);		iolock = XFS_IOLOCK_EXCL;		xfs_rw_ilock(ip, iolock);	}	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);	if (ret)		goto out;	iov_iter_truncate(from, count);	if (mapping->nrpages) {		ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,						    pos, -1);		if (ret)			goto out;		truncate_pagecache_range(VFS_I(ip), pos, -1);	}	/*	 * If we are doing unaligned IO, wait for all other IO to drain,	 * otherwise demote the lock if we had to flush cached pages	 */	if (unaligned_io)		inode_dio_wait(inode);	else if (iolock == XFS_IOLOCK_EXCL) {		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);		iolock = XFS_IOLOCK_SHARED;	}	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);//.........这里部分代码省略.........
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:101,


示例15: xfs_file_aio_read

STATIC ssize_txfs_file_aio_read(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	size_t			size = 0;	ssize_t			ret = 0;	int			ioflags = 0;	xfs_fsize_t		n;	XFS_STATS_INC(xs_read_calls);	BUG_ON(iocb->ki_pos != pos);	if (unlikely(file->f_flags & O_DIRECT))		ioflags |= IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);	if (ret < 0)		return ret;	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((iocb->ki_pos & target->bt_smask) ||		    (size & target->bt_smask)) {			if (iocb->ki_pos == i_size_read(inode))				return 0;			return -XFS_ERROR(EINVAL);		}	}	n = mp->m_super->s_maxbytes - iocb->ki_pos;	if (n <= 0 || size == 0)		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	/*	 * Locking is a bit tricky here. If we take an exclusive lock	 * for direct IO, we effectively serialise all new concurrent	 * read IO to this file and block it behind IO that is currently in	 * progress because IO in progress holds the IO lock shared. We only	 * need to hold the lock exclusive to blow away the page cache, so	 * only take lock exclusively if the page cache needs invalidation.	 * This allows the normal direct IO case of no page cache pages to	 * proceeed concurrently without serialisation.	 */	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);		if (inode->i_mapping->nrpages) {			ret = -xfs_flushinval_pages(ip,					(iocb->ki_pos & PAGE_CACHE_MASK),					-1, FI_REMAPF_LOCKED);			if (ret) {				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);				return ret;			}		}		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);	}	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:87,


示例16: xfs_file_aio_write

STATIC ssize_txfs_file_aio_write(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	ssize_t			ret;	int			iolock;	size_t			ocount = 0;	XFS_STATS_INC(xs_write_calls);	BUG_ON(iocb->ki_pos != pos);	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);	if (ret)		return ret;	if (ocount == 0)		return 0;	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	if (unlikely(file->f_flags & O_DIRECT))		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,						ocount, &iolock);	else		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,						ocount, &iolock);	xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);	if (ret <= 0)		goto out_unlock;	/* Handle various SYNC-type writes */	if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {		loff_t end = pos + ret - 1;		int error, error2;		xfs_rw_iunlock(ip, iolock);		error = filemap_write_and_wait_range(mapping, pos, end);		xfs_rw_ilock(ip, iolock);		error2 = -xfs_file_fsync(file,					 (file->f_flags & __O_SYNC) ? 0 : 1);		if (error)			ret = error;		else if (error2)			ret = error2;	}out_unlock:	xfs_aio_write_newsize_update(ip);	xfs_rw_iunlock(ip, iolock);	return ret;}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:65,


示例17: xfs_file_dio_aio_write

/* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer.  To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. xfs_ioend_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */STATIC ssize_txfs_file_dio_aio_write(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos,	size_t			ocount,	int			*iolock){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			ret = 0;	size_t			count = ocount;	int			unaligned_io = 0;	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?					mp->m_rtdev_targp : mp->m_ddev_targp;	*iolock = 0;	if ((pos & target->bt_smask) || (count & target->bt_smask))		return -XFS_ERROR(EINVAL);	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))		unaligned_io = 1;	if (unaligned_io || mapping->nrpages || pos > ip->i_size)		*iolock = XFS_IOLOCK_EXCL;	else		*iolock = XFS_IOLOCK_SHARED;	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);	if (ret)		return ret;	if (mapping->nrpages) {		WARN_ON(*iolock != XFS_IOLOCK_EXCL);		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,							FI_REMAPF_LOCKED);		if (ret)			return ret;	}	/*	 * If we are doing unaligned IO, wait for all other IO to drain,	 * otherwise demote the lock if we had to flush cached pages	 */	if (unaligned_io)		xfs_ioend_wait(ip);	else if (*iolock == XFS_IOLOCK_EXCL) {		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);		*iolock = XFS_IOLOCK_SHARED;	}	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);	ret = generic_file_direct_write(iocb, iovp,			&nr_segs, pos, &iocb->ki_pos, count, ocount);	/* No fallback to buffered IO on errors for XFS. */	ASSERT(ret < 0 || ret == count);	return ret;}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:89,


示例18: xfs_file_read_iter

STATIC ssize_txfs_file_read_iter(	struct kiocb		*iocb,	struct iov_iter		*to){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	size_t			size = iov_iter_count(to);	ssize_t			ret = 0;	int			ioflags = 0;	xfs_fsize_t		n;	loff_t			pos = iocb->ki_pos;	XFS_STATS_INC(mp, xs_read_calls);	if (unlikely(iocb->ki_flags & IOCB_DIRECT))		ioflags |= XFS_IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= XFS_IO_INVIS;	if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		/* DIO must be aligned to device logical sector size */		if ((pos | size) & target->bt_logical_sectormask) {			if (pos == i_size_read(inode))				return 0;			return -EINVAL;		}	}	n = mp->m_super->s_maxbytes - pos;	if (n <= 0 || size == 0)		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	/*	 * Locking is a bit tricky here. If we take an exclusive lock for direct	 * IO, we effectively serialise all new concurrent read IO to this file	 * and block it behind IO that is currently in progress because IO in	 * progress holds the IO lock shared. We only need to hold the lock	 * exclusive to blow away the page cache, so only take lock exclusively	 * if the page cache needs invalidation. This allows the normal direct	 * IO case of no page cache pages to proceeed concurrently without	 * serialisation.	 */	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);		/*		 * The generic dio code only flushes the range of the particular		 * I/O. Because we take an exclusive lock here, this whole		 * sequence is considerably more expensive for us. This has a		 * noticeable performance impact for any file with cached pages,		 * even when outside of the range of the particular I/O.		 *		 * Hence, amortize the cost of the lock against a full file		 * flush and reduce the chances of repeated iolock cycles going		 * forward.		 */		if (inode->i_mapping->nrpages) {			ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);			if (ret) {				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);				return ret;			}			/*			 * Invalidate whole pages. This can return an error if			 * we fail to invalidate a page, but this should never			 * happen on XFS. Warn if it does fail.			 */			ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);			WARN_ON_ONCE(ret);			ret = 0;		}		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);	}	trace_xfs_file_read(ip, size, pos, ioflags);	ret = generic_file_read_iter(iocb, to);	if (ret > 0)		XFS_STATS_ADD(mp, xs_read_bytes, ret);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:sjp38,项目名称:linux.doc_trans_membarrier,代码行数:98,


示例19: xfs_file_aio_read

STATIC ssize_txfs_file_aio_read(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	size_t			size = 0;	ssize_t			ret = 0;	int			ioflags = 0;	xfs_fsize_t		n;	unsigned long		seg;	XFS_STATS_INC(xs_read_calls);	BUG_ON(iocb->ki_pos != pos);	if (unlikely(file->f_flags & O_DIRECT))		ioflags |= IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	/* START copy & waste from filemap.c */	for (seg = 0; seg < nr_segs; seg++) {		const struct iovec *iv = &iovp[seg];		/*		 * If any segment has a negative length, or the cumulative		 * length ever wraps negative then return -EINVAL.		 */		size += iv->iov_len;		if (unlikely((ssize_t)(size|iv->iov_len) < 0))			return XFS_ERROR(-EINVAL);	}	/* END copy & waste from filemap.c */	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((iocb->ki_pos & target->bt_smask) ||		    (size & target->bt_smask)) {			if (iocb->ki_pos == ip->i_size)				return 0;			return -XFS_ERROR(EINVAL);		}	}	n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;	if (n <= 0 || size == 0)		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);		if (inode->i_mapping->nrpages) {			ret = -xfs_flushinval_pages(ip,					(iocb->ki_pos & PAGE_CACHE_MASK),					-1, FI_REMAPF_LOCKED);			if (ret) {				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);				return ret;			}		}		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);	} else		xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:87,


示例20: xfs_file_dio_aio_write

/* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer.  To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. inode_dio_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */STATIC ssize_txfs_file_dio_aio_write(    struct kiocb		*iocb,    const struct iovec	*iovp,    unsigned long		nr_segs,    loff_t			pos,    size_t			ocount){    struct file		*file = iocb->ki_filp;    struct address_space	*mapping = file->f_mapping;    struct inode		*inode = mapping->host;    struct xfs_inode	*ip = XFS_I(inode);    struct xfs_mount	*mp = ip->i_mount;    ssize_t			ret = 0;    size_t			count = ocount;    int			unaligned_io = 0;    int			iolock;    struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?                                  mp->m_rtdev_targp : mp->m_ddev_targp;    if ((pos & target->bt_smask) || (count & target->bt_smask))        return -XFS_ERROR(EINVAL);    if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))        unaligned_io = 1;    if (unaligned_io || mapping->nrpages)        iolock = XFS_IOLOCK_EXCL;    else        iolock = XFS_IOLOCK_SHARED;    xfs_rw_ilock(ip, iolock);    if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {        xfs_rw_iunlock(ip, iolock);        iolock = XFS_IOLOCK_EXCL;        xfs_rw_ilock(ip, iolock);    }    ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);    if (ret)        goto out;    if (mapping->nrpages) {        ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,                                    FI_REMAPF_LOCKED);        if (ret)            goto out;    }    if (unaligned_io)        inode_dio_wait(inode);    else if (iolock == XFS_IOLOCK_EXCL) {        xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);        iolock = XFS_IOLOCK_SHARED;    }    trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);    ret = generic_file_direct_write(iocb, iovp,                                    &nr_segs, pos, &iocb->ki_pos, count, ocount);out:    xfs_rw_iunlock(ip, iolock);    ASSERT(ret < 0 || ret == count);    return ret;}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:92,


示例21: xfs_file_dio_aio_write

/* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer.  To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. inode_dio_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */STATIC ssize_txfs_file_dio_aio_write(	struct kiocb		*iocb,	struct iov_iter		*from){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			ret = 0;	int			unaligned_io = 0;	int			iolock;	size_t			count = iov_iter_count(from);	loff_t			end;	struct iov_iter		data;	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?					mp->m_rtdev_targp : mp->m_ddev_targp;	/* DIO must be aligned to device logical sector size */	if (!IS_DAX(inode) &&	    ((iocb->ki_pos | count) & target->bt_logical_sectormask))		return -EINVAL;	/* "unaligned" here means not aligned to a filesystem block */	if ((iocb->ki_pos & mp->m_blockmask) ||	    ((iocb->ki_pos + count) & mp->m_blockmask))		unaligned_io = 1;	/*	 * We don't need to take an exclusive lock unless there page cache needs	 * to be invalidated or unaligned IO is being executed. We don't need to	 * consider the EOF extension case here because	 * xfs_file_aio_write_checks() will relock the inode as necessary for	 * EOF zeroing cases and fill out the new inode size as appropriate.	 */	if (unaligned_io || mapping->nrpages)		iolock = XFS_IOLOCK_EXCL;	else		iolock = XFS_IOLOCK_SHARED;	xfs_rw_ilock(ip, iolock);	/*	 * Recheck if there are cached pages that need invalidate after we got	 * the iolock to protect against other threads adding new pages while	 * we were waiting for the iolock.	 */	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {		xfs_rw_iunlock(ip, iolock);		iolock = XFS_IOLOCK_EXCL;		xfs_rw_ilock(ip, iolock);	}	ret = xfs_file_aio_write_checks(iocb, from, &iolock);	if (ret)		goto out;	count = iov_iter_count(from);	end = iocb->ki_pos + count - 1;	/*	 * See xfs_file_read_iter() for why we do a full-file flush here.	 */	if (mapping->nrpages) {		ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);		if (ret)			goto out;		/*		 * Invalidate whole pages. This can return an error if we fail		 * to invalidate a page, but this should never happen on XFS.		 * Warn if it does fail.		 */		ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);		WARN_ON_ONCE(ret);		ret = 0;	}//.........这里部分代码省略.........
开发者ID:sjp38,项目名称:linux.doc_trans_membarrier,代码行数:101,


示例22: xfs_file_aio_write_checks

/* * Common pre-write limit and setup checks. * * Called with the iolocked held either shared and exclusive according to * @iolock, and returns with it held.  Might upgrade the iolock to exclusive * if called for a direct write beyond i_size. */STATIC ssize_txfs_file_aio_write_checks(	struct kiocb		*iocb,	struct iov_iter		*from,	int			*iolock){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	ssize_t			error = 0;	size_t			count = iov_iter_count(from);	bool			drained_dio = false;restart:	error = generic_write_checks(iocb, from);	if (error <= 0)		return error;	error = xfs_break_layouts(inode, iolock, true);	if (error)		return error;	/* For changing security info in file_remove_privs() we need i_mutex */	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {		xfs_rw_iunlock(ip, *iolock);		*iolock = XFS_IOLOCK_EXCL;		xfs_rw_ilock(ip, *iolock);		goto restart;	}	/*	 * If the offset is beyond the size of the file, we need to zero any	 * blocks that fall between the existing EOF and the start of this	 * write.  If zeroing is needed and we are currently holding the	 * iolock shared, we need to update it to exclusive which implies	 * having to redo all checks before.	 *	 * We need to serialise against EOF updates that occur in IO	 * completions here. We want to make sure that nobody is changing the	 * size while we do this check until we have placed an IO barrier (i.e.	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.	 * The spinlock effectively forms a memory barrier once we have the	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value	 * and hence be able to correctly determine if we need to run zeroing.	 */	spin_lock(&ip->i_flags_lock);	if (iocb->ki_pos > i_size_read(inode)) {		bool	zero = false;		spin_unlock(&ip->i_flags_lock);		if (!drained_dio) {			if (*iolock == XFS_IOLOCK_SHARED) {				xfs_rw_iunlock(ip, *iolock);				*iolock = XFS_IOLOCK_EXCL;				xfs_rw_ilock(ip, *iolock);				iov_iter_reexpand(from, count);			}			/*			 * We now have an IO submission barrier in place, but			 * AIO can do EOF updates during IO completion and hence			 * we now need to wait for all of them to drain. Non-AIO			 * DIO will have drained before we are given the			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a			 * no-op.			 */			inode_dio_wait(inode);			drained_dio = true;			goto restart;		}		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);		if (error)			return error;	} else		spin_unlock(&ip->i_flags_lock);	/*	 * Updating the timestamps will grab the ilock again from	 * xfs_fs_dirty_inode, so we have to call it after dropping the	 * lock above.  Eventually we should look into a way to avoid	 * the pointless lock roundtrip.	 */	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {		error = file_update_time(file);		if (error)			return error;	}	/*	 * If we're writing the file then make sure to clear the setuid and	 * setgid bits if the process is not being run by root.  This keeps	 * people from modifying setuid and setgid binaries.	 */	if (!IS_NOSEC(inode))		return file_remove_privs(file);//.........这里部分代码省略.........
开发者ID:sjp38,项目名称:linux.doc_trans_membarrier,代码行数:101,


示例23: xfs_file_read_iter

STATIC ssize_txfs_file_read_iter(	struct kiocb		*iocb,	struct iov_iter		*to){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	size_t			size = iov_iter_count(to);	ssize_t			ret = 0;	int			ioflags = 0;	xfs_fsize_t		n;	loff_t			pos = iocb->ki_pos;	XFS_STATS_INC(xs_read_calls);	if (unlikely(file->f_flags & O_DIRECT))		ioflags |= XFS_IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= XFS_IO_INVIS;	if (unlikely(ioflags & XFS_IO_ISDIRECT)) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		/* DIO must be aligned to device logical sector size */		if ((pos | size) & target->bt_logical_sectormask) {			if (pos == i_size_read(inode))				return 0;			return -EINVAL;		}	}	n = mp->m_super->s_maxbytes - pos;	if (n <= 0 || size == 0)		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	/*	 * Locking is a bit tricky here. If we take an exclusive lock	 * for direct IO, we effectively serialise all new concurrent	 * read IO to this file and block it behind IO that is currently in	 * progress because IO in progress holds the IO lock shared. We only	 * need to hold the lock exclusive to blow away the page cache, so	 * only take lock exclusively if the page cache needs invalidation.	 * This allows the normal direct IO case of no page cache pages to	 * proceeed concurrently without serialisation.	 */	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);		if (inode->i_mapping->nrpages) {			ret = filemap_write_and_wait_range(							VFS_I(ip)->i_mapping,							pos, pos + size - 1);			if (ret) {				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);				return ret;			}			/*			 * Invalidate whole pages. This can return an error if			 * we fail to invalidate a page, but this should never			 * happen on XFS. Warn if it does fail.			 */			ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,					pos >> PAGE_CACHE_SHIFT,					(pos + size - 1) >> PAGE_CACHE_SHIFT);			WARN_ON_ONCE(ret);			ret = 0;		}
开发者ID:Abioy,项目名称:kasan,代码行数:79,


示例24: xfs_file_dio_aio_read

STATIC ssize_txfs_file_dio_aio_read(	struct kiocb		*iocb,	struct iov_iter		*to){	struct address_space	*mapping = iocb->ki_filp->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	loff_t			isize = i_size_read(inode);	size_t			count = iov_iter_count(to);	struct iov_iter		data;	struct xfs_buftarg	*target;	ssize_t			ret = 0;	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);	if (!count)		return 0; /* skip atime */	if (XFS_IS_REALTIME_INODE(ip))		target = ip->i_mount->m_rtdev_targp;	else		target = ip->i_mount->m_ddev_targp;	/* DIO must be aligned to device logical sector size */	if ((iocb->ki_pos | count) & target->bt_logical_sectormask) {		if (iocb->ki_pos == isize)			return 0;		return -EINVAL;	}	file_accessed(iocb->ki_filp);	/*	 * Locking is a bit tricky here. If we take an exclusive lock for direct	 * IO, we effectively serialise all new concurrent read IO to this file	 * and block it behind IO that is currently in progress because IO in	 * progress holds the IO lock shared. We only need to hold the lock	 * exclusive to blow away the page cache, so only take lock exclusively	 * if the page cache needs invalidation. This allows the normal direct	 * IO case of no page cache pages to proceeed concurrently without	 * serialisation.	 */	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);	if (mapping->nrpages) {		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);		/*		 * The generic dio code only flushes the range of the particular		 * I/O. Because we take an exclusive lock here, this whole		 * sequence is considerably more expensive for us. This has a		 * noticeable performance impact for any file with cached pages,		 * even when outside of the range of the particular I/O.		 *		 * Hence, amortize the cost of the lock against a full file		 * flush and reduce the chances of repeated iolock cycles going		 * forward.		 */		if (mapping->nrpages) {			ret = filemap_write_and_wait(mapping);			if (ret) {				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);				return ret;			}			/*			 * Invalidate whole pages. This can return an error if			 * we fail to invalidate a page, but this should never			 * happen on XFS. Warn if it does fail.			 */			ret = invalidate_inode_pages2(mapping);			WARN_ON_ONCE(ret);			ret = 0;		}		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);	}	data = *to;	ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,			xfs_get_blocks_direct, NULL, NULL, 0);	if (ret >= 0) {		iocb->ki_pos += ret;		iov_iter_advance(to, ret);	}	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}
开发者ID:acton393,项目名称:linux,代码行数:89,



注:本文中的xfs_rw_ilock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xfs_rw_iunlock函数代码示例
C++ xfs_perag_put函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。