您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xfs_ilock函数代码示例

51自学网 2021-06-03 10:19:33
  C++
这篇教程C++ xfs_ilock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xfs_ilock函数的典型用法代码示例。如果您正苦于以下问题:C++ xfs_ilock函数的具体用法?C++ xfs_ilock怎么用?C++ xfs_ilock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xfs_ilock函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: xfs_reflink_remap_extent

/* * Unmap a range of blocks from a file, then map other blocks into the hole. * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount). * The extent irec is mapped into dest at irec->br_startoff. */STATIC intxfs_reflink_remap_extent(	struct xfs_inode	*ip,	struct xfs_bmbt_irec	*irec,	xfs_fileoff_t		destoff,	xfs_off_t		new_isize){	struct xfs_mount	*mp = ip->i_mount;	bool			real_extent = xfs_bmap_is_real_extent(irec);	struct xfs_trans	*tp;	xfs_fsblock_t		firstfsb;	unsigned int		resblks;	struct xfs_defer_ops	dfops;	struct xfs_bmbt_irec	uirec;	xfs_filblks_t		rlen;	xfs_filblks_t		unmap_len;	xfs_off_t		newlen;	int			error;	unmap_len = irec->br_startoff + irec->br_blockcount - destoff;	trace_xfs_reflink_punch_range(ip, destoff, unmap_len);	/* No reflinking if we're low on space */	if (real_extent) {		error = xfs_reflink_ag_has_free_space(mp,				XFS_FSB_TO_AGNO(mp, irec->br_startblock));		if (error)			goto out;	}	/* Start a rolling transaction to switch the mappings */	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);	if (error)		goto out;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/* If we're not just clearing space, then do we have enough quota? */	if (real_extent) {		error = xfs_trans_reserve_quota_nblks(tp, ip,				irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);		if (error)			goto out_cancel;	}	trace_xfs_reflink_remap(ip, irec->br_startoff,				irec->br_blockcount, irec->br_startblock);	/* Unmap the old blocks in the data fork. */	rlen = unmap_len;	while (rlen) {		xfs_defer_init(&dfops, &firstfsb);		error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1,				&firstfsb, &dfops);		if (error)			goto out_defer;		/*		 * Trim the extent to whatever got unmapped.		 * Remember, bunmapi works backwards.		 */		uirec.br_startblock = irec->br_startblock + rlen;		uirec.br_startoff = irec->br_startoff + rlen;		uirec.br_blockcount = unmap_len - rlen;		unmap_len = rlen;		/* If this isn't a real mapping, we're done. */		if (!real_extent || uirec.br_blockcount == 0)			goto next_extent;		trace_xfs_reflink_remap(ip, uirec.br_startoff,				uirec.br_blockcount, uirec.br_startblock);		/* Update the refcount tree */		error = xfs_refcount_increase_extent(mp, &dfops, &uirec);		if (error)			goto out_defer;		/* Map the new blocks into the data fork. */		error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec);		if (error)			goto out_defer;		/* Update quota accounting. */		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,				uirec.br_blockcount);		/* Update dest isize if needed. */		newlen = XFS_FSB_TO_B(mp,				uirec.br_startoff + uirec.br_blockcount);		newlen = min_t(xfs_off_t, newlen, new_isize);		if (newlen > i_size_read(VFS_I(ip))) {			trace_xfs_reflink_update_inode_size(ip, newlen);//.........这里部分代码省略.........
开发者ID:oscardagrach,项目名称:linux,代码行数:101,


示例2: xfs_qm_dqalloc

/* * Allocate a block and fill it with dquots. * This is called when the bmapi finds a hole. */STATIC intxfs_qm_dqalloc(	xfs_trans_t	**tpp,	xfs_mount_t	*mp,	xfs_dquot_t	*dqp,	xfs_inode_t	*quotip,	xfs_fileoff_t	offset_fsb,	xfs_buf_t	**O_bpp){	xfs_fsblock_t	firstblock;	xfs_bmap_free_t flist;	xfs_bmbt_irec_t map;	int		nmaps, error, committed;	xfs_buf_t	*bp;	xfs_trans_t	*tp = *tpp;	ASSERT(tp != NULL);	xfs_dqtrace_entry(dqp, "DQALLOC");	/*	 * Initialize the bmap freelist prior to calling bmapi code.	 */	XFS_BMAP_INIT(&flist, &firstblock);	xfs_ilock(quotip, XFS_ILOCK_EXCL);	/*	 * Return if this type of quotas is turned off while we didn't	 * have an inode lock	 */	if (XFS_IS_THIS_QUOTA_OFF(dqp)) {		xfs_iunlock(quotip, XFS_ILOCK_EXCL);		return (ESRCH);	}	/*	 * xfs_trans_commit normally decrements the vnode ref count	 * when it unlocks the inode. Since we want to keep the quota	 * inode around, we bump the vnode ref count now.	 */	VN_HOLD(XFS_ITOV(quotip));	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);	nmaps = 1;	if ((error = xfs_bmapi(tp, quotip,			      offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,			      XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,			      &firstblock,			      XFS_QM_DQALLOC_SPACE_RES(mp),			      &map, &nmaps, &flist))) {		goto error0;	}	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);	ASSERT(nmaps == 1);	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&	       (map.br_startblock != HOLESTARTBLOCK));	/*	 * Keep track of the blkno to save a lookup later	 */	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);	/* now we can just get the buffer (there's nothing to read yet) */	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,			       dqp->q_blkno,			       XFS_QI_DQCHUNKLEN(mp),			       0);	if (!bp || (error = XFS_BUF_GETERROR(bp)))		goto error1;	/*	 * Make a chunk of dquots out of this buffer and log	 * the entire thing.	 */	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);	/*	 * xfs_bmap_finish() may commit the current transaction and	 * start a second transaction if the freelist is not empty.	 *	 * Since we still want to modify this buffer, we need to	 * ensure that the buffer is not released on commit of	 * the first transaction and ensure the buffer is added to the	 * second transaction.	 *	 * If there is only one transaction then don't stop the buffer	 * from being released when it commits later on.	 */	xfs_trans_bhold(tp, bp);	if ((error = xfs_bmap_finish(tpp, &flist, firstblock, &committed))) {		goto error1;	}	if (committed) {		tp = *tpp;		xfs_trans_bjoin(tp, bp);//.........这里部分代码省略.........
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:101,


示例3: xfs_qm_dqget

//.........这里部分代码省略.........		XFS_DQ_HASH_UNLOCK(h);		xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");		return (0);	/* success */	}	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);	/*	 * Dquot cache miss. We don't want to keep the inode lock across	 * a (potential) disk read. Also we don't want to deal with the lock	 * ordering between quotainode and this inode. OTOH, dropping the inode	 * lock here means dealing with a chown that can happen before	 * we re-acquire the lock.	 */	if (ip)		xfs_iunlock(ip, XFS_ILOCK_EXCL);	/*	 * Save the hashchain version stamp, and unlock the chain, so that	 * we don't keep the lock across a disk read	 */	version = h->qh_version;	XFS_DQ_HASH_UNLOCK(h);	/*	 * Allocate the dquot on the kernel heap, and read the ondisk	 * portion off the disk. Also, do all the necessary initialization	 * This can return ENOENT if dquot didn't exist on disk and we didn't	 * ask it to allocate; ESRCH if quotas got turned off suddenly.	 */	if ((error = xfs_qm_idtodq(mp, id, type,				  flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|					   XFS_QMOPT_DOWARN),				  &dqp))) {		if (ip)			xfs_ilock(ip, XFS_ILOCK_EXCL);		return (error);	}	/*	 * See if this is mount code calling to look at the overall quota limits	 * which are stored in the id == 0 user or group's dquot.	 * Since we may not have done a quotacheck by this point, just return	 * the dquot without attaching it to any hashtables, lists, etc, or even	 * taking a reference.	 * The caller must dqdestroy this once done.	 */	if (flags & XFS_QMOPT_DQSUSER) {		ASSERT(id == 0);		ASSERT(! ip);		goto dqret;	}	/*	 * Dquot lock comes after hashlock in the lock ordering	 */	if (ip) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		if (! XFS_IS_DQTYPE_ON(mp, type)) {			/* inode stays locked on return */			xfs_qm_dqdestroy(dqp);			return XFS_ERROR(ESRCH);		}		/*		 * A dquot could be attached to this inode by now, since		 * we had dropped the ilock.		 */		if (type == XFS_DQ_USER) {
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:67,


示例4: xfs_file_splice_write

STATIC ssize_txfs_file_splice_write(	struct pipe_inode_info	*pipe,	struct file		*outfilp,	loff_t			*ppos,	size_t			count,	unsigned int		flags){	struct inode		*inode = outfilp->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	xfs_fsize_t		isize, new_size;	int			ioflags = 0;	ssize_t			ret;	XFS_STATS_INC(xs_write_calls);	if (outfilp->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	xfs_ilock(ip, XFS_IOLOCK_EXCL);	if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {		int iolock = XFS_IOLOCK_EXCL;		int error;		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,					FILP_DELAY_FLAG(outfilp), &iolock);		if (error) {			xfs_iunlock(ip, XFS_IOLOCK_EXCL);			return -error;		}	}	new_size = *ppos + count;	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (new_size > ip->i_size)		ip->i_new_size = new_size;	xfs_iunlock(ip, XFS_ILOCK_EXCL);	trace_xfs_file_splice_write(ip, count, *ppos, ioflags);	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);	if (ret > 0)		XFS_STATS_ADD(xs_write_bytes, ret);	isize = i_size_read(inode);	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))		*ppos = isize;	if (*ppos > ip->i_size) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		if (*ppos > ip->i_size)			ip->i_size = *ppos;		xfs_iunlock(ip, XFS_ILOCK_EXCL);	}	if (ip->i_new_size) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		ip->i_new_size = 0;		if (ip->i_d.di_size > ip->i_size)			ip->i_d.di_size = ip->i_size;		xfs_iunlock(ip, XFS_ILOCK_EXCL);	}	xfs_iunlock(ip, XFS_IOLOCK_EXCL);	return ret;}
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:71,


示例5: xfs_file_aio_write

STATIC ssize_txfs_file_aio_write(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos){	struct file		*file = iocb->ki_filp;	struct address_space	*mapping = file->f_mapping;	struct inode		*inode = mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			ret = 0, error = 0;	int			ioflags = 0;	xfs_fsize_t		isize, new_size;	int			iolock;	int			eventsent = 0;	size_t			ocount = 0, count;	int			need_i_mutex;	XFS_STATS_INC(xs_write_calls);	BUG_ON(iocb->ki_pos != pos);	if (unlikely(file->f_flags & O_DIRECT))		ioflags |= IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);	if (error)		return error;	count = ocount;	if (count == 0)		return 0;	xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;relock:	if (ioflags & IO_ISDIRECT) {		iolock = XFS_IOLOCK_SHARED;		need_i_mutex = 0;	} else {		iolock = XFS_IOLOCK_EXCL;		need_i_mutex = 1;		mutex_lock(&inode->i_mutex);	}	xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);start:	error = -generic_write_checks(file, &pos, &count,					S_ISBLK(inode->i_mode));	if (error) {		xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);		goto out_unlock_mutex;	}	if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&	    !(ioflags & IO_INVIS) && !eventsent)) {		int		dmflags = FILP_DELAY_FLAG(file);		if (need_i_mutex)			dmflags |= DM_FLAGS_IMUX;		xfs_iunlock(ip, XFS_ILOCK_EXCL);		error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,				      pos, count, dmflags, &iolock);		if (error) {			goto out_unlock_internal;		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		eventsent = 1;		/*		 * The iolock was dropped and reacquired in XFS_SEND_DATA		 * so we have to recheck the size when appending.		 * We will only "goto start;" once, since having sent the		 * event prevents another call to XFS_SEND_DATA, which is		 * what allows the size to change in the first place.		 */		if ((file->f_flags & O_APPEND) && pos != ip->i_size)			goto start;	}	if (ioflags & IO_ISDIRECT) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((pos & target->bt_smask) || (count & target->bt_smask)) {			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);			return XFS_ERROR(-EINVAL);		}		if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {//.........这里部分代码省略.........
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:101,


示例6: or

ssize_t			/* bytes read, or (-)  error */xfs_read(	bhv_desc_t		*bdp,	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned int		segs,	loff_t			*offset,	int			ioflags,	cred_t			*credp){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	size_t			size = 0;	ssize_t			ret;	xfs_fsize_t		n;	xfs_inode_t		*ip;	xfs_mount_t		*mp;	vnode_t			*vp;	unsigned long		seg;	ip = XFS_BHVTOI(bdp);	vp = BHV_TO_VNODE(bdp);	mp = ip->i_mount;	XFS_STATS_INC(xs_read_calls);	/* START copy & waste from filemap.c */	for (seg = 0; seg < segs; seg++) {		const struct iovec *iv = &iovp[seg];		/*		 * If any segment has a negative length, or the cumulative		 * length ever wraps negative then return -EINVAL.		 */		size += iv->iov_len;		if (unlikely((ssize_t)(size|iv->iov_len) < 0))			return XFS_ERROR(-EINVAL);	}	/* END copy & waste from filemap.c */	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_buftarg_t	*target =			(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((*offset & target->pbr_smask) ||		    (size & target->pbr_smask)) {			if (*offset == ip->i_d.di_size) {				return (0);			}			return -XFS_ERROR(EINVAL);		}	}	n = XFS_MAXIOFFSET(mp) - *offset;	if ((n <= 0) || (size == 0))		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp)) {		return -EIO;	}	if (unlikely(ioflags & IO_ISDIRECT))		down(&inode->i_sem);	xfs_ilock(ip, XFS_IOLOCK_SHARED);	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&	    !(ioflags & IO_INVIS)) {		vrwlock_t locktype = VRWLOCK_READ;		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,					BHV_TO_VNODE(bdp), *offset, size,					FILP_DELAY_FLAG(file), &locktype);		if (ret) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			goto unlock_isem;		}	}	xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,				(void *)iovp, segs, *offset, ioflags);	ret = __generic_file_aio_read(iocb, iovp, segs, offset);	if (ret == -EIOCBQUEUED)		ret = wait_on_sync_kiocb(iocb);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);	if (likely(!(ioflags & IO_INVIS)))		xfs_ichgtime(ip, XFS_ICHGTIME_ACC);unlock_isem:	if (unlikely(ioflags & IO_ISDIRECT))		up(&inode->i_sem);	return ret;}
开发者ID:GodFox,项目名称:magx_kernel_xpixl,代码行数:99,


示例7: xfs_file_fsync

STATIC intxfs_file_fsync(	struct file		*file,	loff_t			start,	loff_t			end,	int			datasync){	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	struct xfs_trans	*tp;	int			error = 0;	int			log_flushed = 0;	trace_xfs_file_fsync(ip);	error = filemap_write_and_wait_range(inode->i_mapping, start, end);	if (error)		return error;	if (XFS_FORCED_SHUTDOWN(mp))		return -XFS_ERROR(EIO);	xfs_iflags_clear(ip, XFS_ITRUNCATED);	xfs_ilock(ip, XFS_IOLOCK_SHARED);	xfs_ioend_wait(ip);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);	if (mp->m_flags & XFS_MOUNT_BARRIER) {		/*		 * If we have an RT and/or log subvolume we need to make sure		 * to flush the write cache the device used for file data		 * first.  This is to ensure newly written file data make		 * it to disk before logging the new inode size in case of		 * an extending write.		 */		if (XFS_IS_REALTIME_INODE(ip))			xfs_blkdev_issue_flush(mp->m_rtdev_targp);		else if (mp->m_logdev_targp != mp->m_ddev_targp)			xfs_blkdev_issue_flush(mp->m_ddev_targp);	}	/*	 * We always need to make sure that the required inode state is safe on	 * disk.  The inode might be clean but we still might need to force the	 * log because of committed transactions that haven't hit the disk yet.	 * Likewise, there could be unflushed non-transactional changes to the	 * inode core that have to go to disk and this requires us to issue	 * a synchronous transaction to capture these changes correctly.	 *	 * This code relies on the assumption that if the i_update_core field	 * of the inode is clear and the inode is unpinned then it is clean	 * and no action is required.	 */	xfs_ilock(ip, XFS_ILOCK_SHARED);	/*	 * First check if the VFS inode is marked dirty.  All the dirtying	 * of non-transactional updates no goes through mark_inode_dirty*,	 * which allows us to distinguish beteeen pure timestamp updates	 * and i_size updates which need to be caught for fdatasync.	 * After that also theck for the dirty state in the XFS inode, which	 * might gets cleared when the inode gets written out via the AIL	 * or xfs_iflush_cluster.	 */	if (((inode->i_state & I_DIRTY_DATASYNC) ||	    ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&	    ip->i_update_core) {		/*		 * Kick off a transaction to log the inode core to get the		 * updates.  The sync transaction will also force the log.		 */		xfs_iunlock(ip, XFS_ILOCK_SHARED);		tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);		error = xfs_trans_reserve(tp, 0,				XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);		if (error) {			xfs_trans_cancel(tp, 0);			return -error;		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		/*		 * Note - it's possible that we might have pushed ourselves out		 * of the way during trans_reserve which would flush the inode.		 * But there's no guarantee that the inode buffer has actually		 * gone out yet (it's delwri).	Plus the buffer could be pinned		 * anyway if it's part of an inode in another recent		 * transaction.	 So we play it safe and fire off the		 * transaction anyway.		 */		xfs_trans_ijoin(tp, ip);		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);		xfs_trans_set_sync(tp);		error = _xfs_trans_commit(tp, 0, &log_flushed);		xfs_iunlock(ip, XFS_ILOCK_EXCL);	} else {		/*//.........这里部分代码省略.........
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:101,


示例8: xfs_ioctl_setattr

STATIC intxfs_ioctl_setattr(	xfs_inode_t		*ip,	struct fsxattr		*fa,	int			mask){	struct xfs_mount	*mp = ip->i_mount;	struct xfs_trans	*tp;	unsigned int		lock_flags = 0;	struct xfs_dquot	*udqp = NULL;	struct xfs_dquot	*gdqp = NULL;	struct xfs_dquot	*olddquot = NULL;	int			code;	trace_xfs_ioctl_setattr(ip);	if (mp->m_flags & XFS_MOUNT_RDONLY)		return XFS_ERROR(EROFS);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	/*	 * Disallow 32bit project ids when projid32bit feature is not enabled.	 */	if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&			!xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))		return XFS_ERROR(EINVAL);	/*	 * If disk quotas is on, we make sure that the dquots do exist on disk,	 * before we start any other transactions. Trying to do this later	 * is messy. We don't care to take a readlock to look at the ids	 * in inode here, because we can't hold it across the trans_reserve.	 * If the IDs do change before we take the ilock, we're covered	 * because the i_*dquot fields will get updated anyway.	 */	if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {		code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,					 ip->i_d.di_gid, fa->fsx_projid,					 XFS_QMOPT_PQUOTA, &udqp, &gdqp);		if (code)			return code;	}	/*	 * For the other attributes, we acquire the inode lock and	 * first do an error checking pass.	 */	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);	code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);	if (code)		goto error_return;	lock_flags = XFS_ILOCK_EXCL;	xfs_ilock(ip, lock_flags);	/*	 * CAP_FOWNER overrides the following restrictions:	 *	 * The user ID of the calling process must be equal	 * to the file owner ID, except in cases where the	 * CAP_FSETID capability is applicable.	 */	if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {		code = XFS_ERROR(EPERM);		goto error_return;	}	/*	 * Do a quota reservation only if projid is actually going to change.	 */	if (mask & FSX_PROJID) {		if (XFS_IS_QUOTA_RUNNING(mp) &&		    XFS_IS_PQUOTA_ON(mp) &&		    xfs_get_projid(ip) != fa->fsx_projid) {			ASSERT(tp);			code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,						capable(CAP_FOWNER) ?						XFS_QMOPT_FORCE_RES : 0);			if (code)	/* out of quota */				goto error_return;		}	}	if (mask & FSX_EXTSIZE) {		/*		 * Can't change extent size if any extents are allocated.		 */		if (ip->i_d.di_nextents &&		    ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=		     fa->fsx_extsize)) {			code = XFS_ERROR(EINVAL);	/* EFBIG? */			goto error_return;		}		/*		 * Extent size must be a multiple of the appropriate block		 * size, if set at all. It must also be smaller than the		 * maximum extent size supported by the filesystem.		 *//.........这里部分代码省略.........
开发者ID:garyvan,项目名称:openwrt-1.6,代码行数:101,


示例9: xfs_reflink_end_cow

/* * Remap parts of a file's data fork after a successful CoW. */intxfs_reflink_end_cow(	struct xfs_inode		*ip,	xfs_off_t			offset,	xfs_off_t			count){	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);	struct xfs_bmbt_irec		got, del;	struct xfs_trans		*tp;	xfs_fileoff_t			offset_fsb;	xfs_fileoff_t			end_fsb;	xfs_fsblock_t			firstfsb;	struct xfs_defer_ops		dfops;	int				error;	unsigned int			resblks;	xfs_filblks_t			rlen;	xfs_extnum_t			idx;	trace_xfs_reflink_end_cow(ip, offset, count);	/* No COW extents?  That's easy! */	if (ifp->if_bytes == 0)		return 0;	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);	/*	 * Start a rolling transaction to switch the mappings.  We're	 * unlikely ever to have to remap 16T worth of single-block	 * extents, so just cap the worst case extent count to 2^32-1.	 * Stick a warning in just in case, and avoid 64-bit division.	 */	BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);	if (end_fsb - offset_fsb > UINT_MAX) {		error = -EFSCORRUPTED;		xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);		ASSERT(0);		goto out;	}	resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,			(unsigned int)(end_fsb - offset_fsb),			XFS_DATA_FORK);	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,			resblks, 0, 0, &tp);	if (error)		goto out;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/* If there is a hole at end_fsb - 1 go to the previous extent */	if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||	    got.br_startoff > end_fsb) {		/*		 * In case of racing, overlapping AIO writes no COW extents		 * might be left by the time I/O completes for the loser of		 * the race.  In that case we are done.		 */		if (idx <= 0)			goto out_cancel;		xfs_iext_get_extent(ifp, --idx, &got);	}	/* Walk backwards until we're out of the I/O range... */	while (got.br_startoff + got.br_blockcount > offset_fsb) {		del = got;		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);		/* Extent delete may have bumped idx forward */		if (!del.br_blockcount) {			idx--;			goto next_extent;		}		ASSERT(!isnullstartblock(got.br_startblock));		/*		 * Don't remap unwritten extents; these are		 * speculatively preallocated CoW extents that have been		 * allocated but have not yet been involved in a write.		 */		if (got.br_state == XFS_EXT_UNWRITTEN) {			idx--;			goto next_extent;		}		/* Unmap the old blocks in the data fork. */		xfs_defer_init(&dfops, &firstfsb);		rlen = del.br_blockcount;		error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,				&firstfsb, &dfops);		if (error)			goto out_defer;		/* Trim the extent to whatever got unmapped. */		if (rlen) {//.........这里部分代码省略.........
开发者ID:oscardagrach,项目名称:linux,代码行数:101,


示例10: xfs_filestream_new_ag

intxfs_filestream_new_ag(	xfs_bmalloca_t	*ap,	xfs_agnumber_t	*agp){	int		flags, err;	xfs_inode_t	*ip, *pip = NULL;	xfs_mount_t	*mp;	xfs_mru_cache_t	*cache;	xfs_extlen_t	minlen;	fstrm_item_t	*dir, *file;	xfs_agnumber_t	ag = NULLAGNUMBER;	ip = ap->ip;	mp = ip->i_mount;	cache = mp->m_filestream;	minlen = ap->alen;	*agp = NULLAGNUMBER;	/*	 * Look for the file in the cache, removing it if it's found.  Doing	 * this allows it to be held across the dir lookup that follows.	 */	file = xfs_mru_cache_remove(cache, ip->i_ino);	if (file) {		ASSERT(ip == file->ip);		/* Save the file's parent inode and old AG number for later. */		pip = file->pip;		ag = file->ag;		/* Look for the file's directory in the cache. */		dir = xfs_mru_cache_lookup(cache, pip->i_ino);		if (dir) {			ASSERT(pip == dir->ip);			/*			 * If the directory has already moved on to a new AG,			 * use that AG as the new AG for the file. Don't			 * forget to twiddle the AG refcounts to match the			 * movement.			 */			if (dir->ag != file->ag) {				xfs_filestream_put_ag(mp, file->ag);				xfs_filestream_get_ag(mp, dir->ag);				*agp = file->ag = dir->ag;			}			xfs_mru_cache_done(cache);		}		/*		 * Put the file back in the cache.  If this fails, the free		 * function needs to be called to tidy up in the same way as if		 * the item had simply expired from the cache.		 */		err = xfs_mru_cache_insert(cache, ip->i_ino, file);		if (err) {			xfs_fstrm_free_func(ip->i_ino, file);			return err;		}		/*		 * If the file's AG was moved to the directory's new AG, there's		 * nothing more to be done.		 */		if (*agp != NULLAGNUMBER) {			TRACE_MOVEAG(mp, ip, pip,					ag, xfs_filestream_peek_ag(mp, ag),					*agp, xfs_filestream_peek_ag(mp, *agp));			return 0;		}	}	/*	 * If the file's parent directory is known, take its iolock in exclusive	 * mode to prevent two sibling files from racing each other to migrate	 * themselves and their parent to different AGs.	 */	if (pip)		xfs_ilock(pip, XFS_IOLOCK_EXCL);	/*	 * A new AG needs to be found for the file.  If the file's parent	 * directory is also known, it will be moved to the new AG as well to	 * ensure that files created inside it in future use the new AG.	 */	ag = (ag == NULLAGNUMBER) ? 0 : (ag + 1) % mp->m_sb.sb_agcount;	flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |	        (ap->low ? XFS_PICK_LOWSPACE : 0);	err = _xfs_filestream_pick_ag(mp, ag, agp, flags, minlen);	if (err || *agp == NULLAGNUMBER)		goto exit;	/*	 * If the file wasn't found in the file cache, then its parent directory	 * inode isn't known.  For this to have happened, the file must either	 * be pre-existing, or it was created long enough ago that its cache	 * entry has expired.  This isn't the sort of usage that the filestreams//.........这里部分代码省略.........
开发者ID:flwh,项目名称:Alcatel_OT_985_kernel,代码行数:101,


示例11: xfs_reflink_allocate_cow

/* Allocate all CoW reservations covering a range of blocks in a file. */intxfs_reflink_allocate_cow(	struct xfs_inode	*ip,	struct xfs_bmbt_irec	*imap,	bool			*shared,	uint			*lockmode){	struct xfs_mount	*mp = ip->i_mount;	xfs_fileoff_t		offset_fsb = imap->br_startoff;	xfs_filblks_t		count_fsb = imap->br_blockcount;	struct xfs_bmbt_irec	got;	struct xfs_defer_ops	dfops;	struct xfs_trans	*tp = NULL;	xfs_fsblock_t		first_block;	int			nimaps, error = 0;	bool			trimmed;	xfs_filblks_t		resaligned;	xfs_extlen_t		resblks = 0;	xfs_extnum_t		idx;retry:	ASSERT(xfs_is_reflink_inode(ip));	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));	/*	 * Even if the extent is not shared we might have a preallocation for	 * it in the COW fork.  If so use it.	 */	if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &idx, &got) &&	    got.br_startoff <= offset_fsb) {		*shared = true;		/* If we have a real allocation in the COW fork we're done. */		if (!isnullstartblock(got.br_startblock)) {			xfs_trim_extent(&got, offset_fsb, count_fsb);			*imap = got;			goto convert;		}		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);	} else {		error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);		if (error || !*shared)			goto out;	}	if (!tp) {		resaligned = xfs_aligned_fsb_count(imap->br_startoff,			imap->br_blockcount, xfs_get_cowextsz_hint(ip));		resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);		xfs_iunlock(ip, *lockmode);		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);		*lockmode = XFS_ILOCK_EXCL;		xfs_ilock(ip, *lockmode);		if (error)			return error;		error = xfs_qm_dqattach_locked(ip, 0);		if (error)			goto out;		goto retry;	}	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,			XFS_QMOPT_RES_REGBLKS);	if (error)		goto out;	xfs_trans_ijoin(tp, ip, 0);	xfs_defer_init(&dfops, &first_block);	nimaps = 1;	/* Allocate the entire reservation as unwritten blocks. */	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,			resblks, imap, &nimaps, &dfops);	if (error)		goto out_bmap_cancel;	/* Finish up. */	error = xfs_defer_finish(&tp, &dfops);	if (error)		goto out_bmap_cancel;	error = xfs_trans_commit(tp);	if (error)		return error;convert:	return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,			&dfops);out_bmap_cancel:	xfs_defer_cancel(&dfops);	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,			XFS_QMOPT_RES_REGBLKS);out:	if (tp)//.........这里部分代码省略.........
开发者ID:oscardagrach,项目名称:linux,代码行数:101,


示例12: xfs_reflink_dirty_extents

/* * The user wants to preemptively CoW all shared blocks in this file, * which enables us to turn off the reflink flag.  Iterate all * extents which are not prealloc/delalloc to see which ranges are * mentioned in the refcount tree, then read those blocks into the * pagecache, dirty them, fsync them back out, and then we can update * the inode flag.  What happens if we run out of memory? :) */STATIC intxfs_reflink_dirty_extents(	struct xfs_inode	*ip,	xfs_fileoff_t		fbno,	xfs_filblks_t		end,	xfs_off_t		isize){	struct xfs_mount	*mp = ip->i_mount;	xfs_agnumber_t		agno;	xfs_agblock_t		agbno;	xfs_extlen_t		aglen;	xfs_agblock_t		rbno;	xfs_extlen_t		rlen;	xfs_off_t		fpos;	xfs_off_t		flen;	struct xfs_bmbt_irec	map[2];	int			nmaps;	int			error = 0;	while (end - fbno > 0) {		nmaps = 1;		/*		 * Look for extents in the file.  Skip holes, delalloc, or		 * unwritten extents; they can't be reflinked.		 */		error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);		if (error)			goto out;		if (nmaps == 0)			break;		if (!xfs_bmap_is_real_extent(&map[0]))			goto next;		map[1] = map[0];		while (map[1].br_blockcount) {			agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);			agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);			aglen = map[1].br_blockcount;			error = xfs_reflink_find_shared(mp, NULL, agno, agbno,					aglen, &rbno, &rlen, true);			if (error)				goto out;			if (rbno == NULLAGBLOCK)				break;			/* Dirty the pages */			xfs_iunlock(ip, XFS_ILOCK_EXCL);			fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +					(rbno - agbno));			flen = XFS_FSB_TO_B(mp, rlen);			if (fpos + flen > isize)				flen = isize - fpos;			error = iomap_file_dirty(VFS_I(ip), fpos, flen,					&xfs_iomap_ops);			xfs_ilock(ip, XFS_ILOCK_EXCL);			if (error)				goto out;			map[1].br_blockcount -= (rbno - agbno + rlen);			map[1].br_startoff += (rbno - agbno + rlen);			map[1].br_startblock += (rbno - agbno + rlen);		}next:		fbno = map[0].br_startoff + map[0].br_blockcount;	}out:	return error;}
开发者ID:oscardagrach,项目名称:linux,代码行数:78,


示例13: xfs_reflink_remap_range

/* * Link a range of blocks from one file to another. */intxfs_reflink_remap_range(	struct file		*file_in,	loff_t			pos_in,	struct file		*file_out,	loff_t			pos_out,	u64			len,	bool			is_dedupe){	struct inode		*inode_in = file_inode(file_in);	struct xfs_inode	*src = XFS_I(inode_in);	struct inode		*inode_out = file_inode(file_out);	struct xfs_inode	*dest = XFS_I(inode_out);	struct xfs_mount	*mp = src->i_mount;	bool			same_inode = (inode_in == inode_out);	xfs_fileoff_t		sfsbno, dfsbno;	xfs_filblks_t		fsblen;	xfs_extlen_t		cowextsize;	ssize_t			ret;	if (!xfs_sb_version_hasreflink(&mp->m_sb))		return -EOPNOTSUPP;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	/* Lock both files against IO */	lock_two_nondirectories(inode_in, inode_out);	if (same_inode)		xfs_ilock(src, XFS_MMAPLOCK_EXCL);	else		xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);	/* Check file eligibility and prepare for block sharing. */	ret = -EINVAL;	/* Don't reflink realtime inodes */	if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))		goto out_unlock;	/* Don't share DAX file data for now. */	if (IS_DAX(inode_in) || IS_DAX(inode_out))		goto out_unlock;	ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,			&len, is_dedupe);	if (ret <= 0)		goto out_unlock;	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);	/* Set flags and remap blocks. */	ret = xfs_reflink_set_inode_flag(src, dest);	if (ret)		goto out_unlock;	dfsbno = XFS_B_TO_FSBT(mp, pos_out);	sfsbno = XFS_B_TO_FSBT(mp, pos_in);	fsblen = XFS_B_TO_FSB(mp, len);	ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,			pos_out + len);	if (ret)		goto out_unlock;	/* Zap any page cache for the destination file's range. */	truncate_inode_pages_range(&inode_out->i_data, pos_out,				   PAGE_ALIGN(pos_out + len) - 1);	/*	 * Carry the cowextsize hint from src to dest if we're sharing the	 * entire source file to the entire destination file, the source file	 * has a cowextsize hint, and the destination file does not.	 */	cowextsize = 0;	if (pos_in == 0 && len == i_size_read(inode_in) &&	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&	    pos_out == 0 && len >= i_size_read(inode_out) &&	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))		cowextsize = src->i_d.di_cowextsize;	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,			is_dedupe);out_unlock:	xfs_iunlock(src, XFS_MMAPLOCK_EXCL);	if (!same_inode)		xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);	unlock_two_nondirectories(inode_in, inode_out);	if (ret)		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);	return ret;}
开发者ID:oscardagrach,项目名称:linux,代码行数:94,


示例14: xfs_symlink

intxfs_symlink(	struct xfs_inode	*dp,	struct xfs_name		*link_name,	const char		*target_path,	umode_t			mode,	struct xfs_inode	**ipp){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_trans	*tp = NULL;	struct xfs_inode	*ip = NULL;	int			error = 0;	int			pathlen;	struct xfs_bmap_free	free_list;	xfs_fsblock_t		first_block;	bool			unlock_dp_on_error = false;	uint			cancel_flags;	int			committed;	xfs_fileoff_t		first_fsb;	xfs_filblks_t		fs_blocks;	int			nmaps;	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];	xfs_daddr_t		d;	const char		*cur_chunk;	int			byte_cnt;	int			n;	xfs_buf_t		*bp;	prid_t			prid;	struct xfs_dquot	*udqp = NULL;	struct xfs_dquot	*gdqp = NULL;	struct xfs_dquot	*pdqp = NULL;	uint			resblks;	*ipp = NULL;	trace_xfs_symlink(dp, link_name);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	/*	 * Check component lengths of the target path name.	 */	pathlen = strlen(target_path);	if (pathlen >= MAXPATHLEN)      /* total string too long */		return XFS_ERROR(ENAMETOOLONG);	udqp = gdqp = NULL;	if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)		prid = xfs_get_projid(dp);	else		prid = XFS_PROJID_DEFAULT;	/*	 * Make sure that we have allocated dquot(s) on disk.	 */	error = xfs_qm_vop_dqalloc(dp,			xfs_kuid_to_uid(current_fsuid()),			xfs_kgid_to_gid(current_fsgid()), prid,			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,			&udqp, &gdqp, &pdqp);	if (error)		goto std_return;	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;	/*	 * The symlink will fit into the inode data fork?	 * There can't be any attributes so we get the whole variable part.	 */	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))		fs_blocks = 0;	else		fs_blocks = xfs_symlink_blocks(mp, pathlen);	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);	if (error == ENOSPC && fs_blocks == 0) {		resblks = 0;		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);	}	if (error) {		cancel_flags = 0;		goto error_return;	}	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);	unlock_dp_on_error = true;	/*	 * Check whether the directory allows new symlinks or not.	 */	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {		error = XFS_ERROR(EPERM);		goto error_return;	}	/*	 * Reserve disk quota : blocks and inode.	 */	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,//.........这里部分代码省略.........
开发者ID:spacex,项目名称:kernel-centos7,代码行数:101,


示例15: xfs_reclaim_inode

/* * Inodes in different states need to be treated differently. The following * table lists the inode states and the reclaim actions necessary: * *	inode state	     iflush ret		required action *      ---------------      ----------         --------------- *	bad			-		reclaim *	shutdown		EIO		unpin and reclaim *	clean, unpinned		0		reclaim *	stale, unpinned		0		reclaim *	clean, pinned(*)	0		requeue *	stale, pinned		EAGAIN		requeue *	dirty, async		-		requeue *	dirty, sync		0		reclaim * * (*) dgc: I don't think the clean, pinned state is possible but it gets * handled anyway given the order of checks implemented. * * Also, because we get the flush lock first, we know that any inode that has * been flushed delwri has had the flush completed by the time we check that * the inode is clean. * * Note that because the inode is flushed delayed write by AIL pushing, the * flush lock may already be held here and waiting on it can result in very * long latencies.  Hence for sync reclaims, where we wait on the flush lock, * the caller should push the AIL first before trying to reclaim inodes to * minimise the amount of time spent waiting.  For background relaim, we only * bother to reclaim clean inodes anyway. * * Hence the order of actions after gaining the locks should be: *	bad		=> reclaim *	shutdown	=> unpin and reclaim *	pinned, async	=> requeue *	pinned, sync	=> unpin *	stale		=> reclaim *	clean		=> reclaim *	dirty, async	=> requeue *	dirty, sync	=> flush, wait and reclaim */STATIC intxfs_reclaim_inode(	struct xfs_inode	*ip,	struct xfs_perag	*pag,	int			sync_mode){	struct xfs_buf		*bp = NULL;	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */	int			error;restart:	error = 0;	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (!xfs_iflock_nowait(ip)) {		if (!(sync_mode & SYNC_WAIT))			goto out;		xfs_iflock(ip);	}	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {		xfs_iunpin_wait(ip);		/* xfs_iflush_abort() drops the flush lock */		xfs_iflush_abort(ip, false);		goto reclaim;	}	if (xfs_ipincount(ip)) {		if (!(sync_mode & SYNC_WAIT))			goto out_ifunlock;		xfs_iunpin_wait(ip);	}	if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {		xfs_ifunlock(ip);		goto reclaim;	}	/*	 * Never flush out dirty data during non-blocking reclaim, as it would	 * just contend with AIL pushing trying to do the same job.	 */	if (!(sync_mode & SYNC_WAIT))		goto out_ifunlock;	/*	 * Now we have an inode that needs flushing.	 *	 * Note that xfs_iflush will never block on the inode buffer lock, as	 * xfs_ifree_cluster() can lock the inode buffer before it locks the	 * ip->i_lock, and we are doing the exact opposite here.  As a result,	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would	 * result in an ABBA deadlock with xfs_ifree_cluster().	 *	 * As xfs_ifree_cluser() must gather all inodes that are active in the	 * cache to mark them stale, if we hit this case we don't actually want	 * to do IO here - we want the inode marked stale so we can simply	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the	 * inode, back off and try again.  Hopefully the next pass through will	 * see the stale flag set on the inode.	 */	error = xfs_iflush(ip, &bp);	if (error == -EAGAIN) {		xfs_iunlock(ip, XFS_ILOCK_EXCL);//.........这里部分代码省略.........
开发者ID:BWhitten,项目名称:linux-stable,代码行数:101,


示例16: xfs_inactive_symlink_rmt

/* * Free a symlink that has blocks associated with it. */STATIC intxfs_inactive_symlink_rmt(	struct xfs_inode *ip){	xfs_buf_t	*bp;	int		committed;	int		done;	int		error;	xfs_fsblock_t	first_block;	xfs_bmap_free_t	free_list;	int		i;	xfs_mount_t	*mp;	xfs_bmbt_irec_t	mval[XFS_SYMLINK_MAPS];	int		nmaps;	int		size;	xfs_trans_t	*tp;	mp = ip->i_mount;	ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);	/*	 * We're freeing a symlink that has some	 * blocks allocated to it.  Free the	 * blocks here.  We know that we've got	 * either 1 or 2 extents and that we can	 * free them all in one bunmapi call.	 */	ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);	if (error) {		xfs_trans_cancel(tp, 0);		return error;	}	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/*	 * Lock the inode, fix the size, and join it to the transaction.	 * Hold it so in the normal path, we still have it locked for	 * the second transaction.  In the error paths we need it	 * held so the cancel won't rele it, see below.	 */	size = (int)ip->i_d.di_size;	ip->i_d.di_size = 0;	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);	/*	 * Find the block(s) so we can inval and unmap them.	 */	done = 0;	xfs_bmap_init(&free_list, &first_block);	nmaps = ARRAY_SIZE(mval);	error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),				mval, &nmaps, 0);	if (error)		goto error_trans_cancel;	/*	 * Invalidate the block(s). No validation is done.	 */	for (i = 0; i < nmaps; i++) {		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,			XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);		if (!bp) {			error = ENOMEM;			goto error_bmap_cancel;		}		xfs_trans_binval(tp, bp);	}	/*	 * Unmap the dead block(s) to the free_list.	 */	error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,			    &first_block, &free_list, &done);	if (error)		goto error_bmap_cancel;	ASSERT(done);	/*	 * Commit the first transaction.  This logs the EFI and the inode.	 */	error = xfs_bmap_finish(&tp, &free_list, &committed);	if (error)		goto error_bmap_cancel;	/*	 * The transaction must have been committed, since there were	 * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.	 * The new tp has the extent freeing and EFDs.	 */	ASSERT(committed);	/*	 * The first xact was committed, so add the inode to the new one.	 * Mark it dirty so it will be logged and moved forward in the log as	 * part of every commit.	 */	xfs_trans_ijoin(tp, ip, 0);	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);//.........这里部分代码省略.........
开发者ID:spacex,项目名称:kernel-centos7,代码行数:101,


示例17: xfs_setattr_nonsize

intxfs_setattr_nonsize(	struct xfs_inode	*ip,	struct iattr		*iattr,	int			flags){	xfs_mount_t		*mp = ip->i_mount;	struct inode		*inode = VFS_I(ip);	int			mask = iattr->ia_valid;	xfs_trans_t		*tp;	int			error;	kuid_t			uid = GLOBAL_ROOT_UID, iuid = GLOBAL_ROOT_UID;	kgid_t			gid = GLOBAL_ROOT_GID, igid = GLOBAL_ROOT_GID;	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;	trace_xfs_setattr(ip);	/* If acls are being inherited, we already have this checked */	if (!(flags & XFS_ATTR_NOACL)) {		if (mp->m_flags & XFS_MOUNT_RDONLY)			return -EROFS;		if (XFS_FORCED_SHUTDOWN(mp))			return -EIO;		error = inode_change_ok(inode, iattr);		if (error)			return error;	}	ASSERT((mask & ATTR_SIZE) == 0);	/*	 * If disk quotas is on, we make sure that the dquots do exist on disk,	 * before we start any other transactions. Trying to do this later	 * is messy. We don't care to take a readlock to look at the ids	 * in inode here, because we can't hold it across the trans_reserve.	 * If the IDs do change before we take the ilock, we're covered	 * because the i_*dquot fields will get updated anyway.	 */	if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {		uint	qflags = 0;		if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {			uid = iattr->ia_uid;			qflags |= XFS_QMOPT_UQUOTA;		} else {			uid = inode->i_uid;		}		if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {			gid = iattr->ia_gid;			qflags |= XFS_QMOPT_GQUOTA;		}  else {			gid = inode->i_gid;		}		/*		 * We take a reference when we initialize udqp and gdqp,		 * so it is important that we never blindly double trip on		 * the same variable. See xfs_create() for an example.		 */		ASSERT(udqp == NULL);		ASSERT(gdqp == NULL);		error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),					   xfs_kgid_to_gid(gid),					   xfs_get_projid(ip),					   qflags, &udqp, &gdqp, NULL);		if (error)			return error;	}	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);	if (error)		goto out_dqrele;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/*	 * Change file ownership.  Must be the owner or privileged.	 */	if (mask & (ATTR_UID|ATTR_GID)) {		/*		 * These IDs could have changed since we last looked at them.		 * But, we're assured that if the ownership did change		 * while we didn't have the inode locked, inode's dquot(s)		 * would have changed also.		 */		iuid = inode->i_uid;		igid = inode->i_gid;		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;		/*		 * Do a quota reservation only if uid/gid is actually		 * going to change.		 */		if (XFS_IS_QUOTA_RUNNING(mp) &&		    ((XFS_IS_UQUOTA_ON(mp) && !uid_eq(iuid, uid)) ||//.........这里部分代码省略.........
开发者ID:gxt,项目名称:linux,代码行数:101,


示例18: xfs_sendfile

ssize_txfs_sendfile(	bhv_desc_t		*bdp,	struct file		*filp,	loff_t			*offset,	int			ioflags,	size_t			count,	read_actor_t		actor,	void			*target,	cred_t			*credp){	ssize_t			ret;	xfs_fsize_t		n;	xfs_inode_t		*ip;	xfs_mount_t		*mp;	vnode_t			*vp;	ip = XFS_BHVTOI(bdp);	vp = BHV_TO_VNODE(bdp);	mp = ip->i_mount;	XFS_STATS_INC(xs_read_calls);	n = XFS_MAXIOFFSET(mp) - *offset;	if ((n <= 0) || (count == 0))		return 0;	if (n < count)		count = n;	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	xfs_ilock(ip, XFS_IOLOCK_SHARED);	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&	    (!(ioflags & IO_INVIS))) {		vrwlock_t locktype = VRWLOCK_READ;		int error;		error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,				      FILP_DELAY_FLAG(filp), &locktype);		if (error) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			return -error;		}	}	xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,		   (void *)(unsigned long)target, count, *offset, ioflags);	ret = generic_file_sendfile(filp, offset, count, actor, target);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	if (likely(!(ioflags & IO_INVIS)))		xfs_ichgtime(ip, XFS_ICHGTIME_ACC);	return ret;}
开发者ID:GodFox,项目名称:magx_kernel_xpixl,代码行数:61,


示例19: xfs_setattr_size

//.........这里部分代码省略.........	 */	if (did_zeroing ||	    (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,						      ip->i_d.di_size, newsize);		if (error)			return error;	}	/*	 * We've already locked out new page faults, so now we can safely remove	 * pages from the page cache knowing they won't get refaulted until we	 * drop the XFS_MMAP_EXCL lock after the extent manipulations are	 * complete. The truncate_setsize() call also cleans partial EOF page	 * PTEs on extending truncates and hence ensures sub-page block size	 * filesystems are correctly handled, too.	 *	 * We have to do all the page cache truncate work outside the	 * transaction context as the "lock" order is page lock->log space	 * reservation as defined by extent allocation in the writeback path.	 * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but	 * having already truncated the in-memory version of the file (i.e. made	 * user visible changes). There's not much we can do about this, except	 * to hope that the caller sees ENOMEM and retries the truncate	 * operation.	 */	truncate_setsize(inode, newsize);	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);	if (error)		return error;	lock_flags |= XFS_ILOCK_EXCL;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/*	 * Only change the c/mtime if we are changing the size or we are	 * explicitly asked to change it.  This handles the semantic difference	 * between truncate() and ftruncate() as implemented in the VFS.	 *	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a	 * special case where we need to update the times despite not having	 * these flags set.  For all other operations the VFS set these flags	 * explicitly if it wants a timestamp update.	 */	if (newsize != oldsize &&	    !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {		iattr->ia_ctime = iattr->ia_mtime =			current_fs_time(inode->i_sb);		iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;	}	/*	 * The first thing we do is set the size to new_size permanently on	 * disk.  This way we don't have to worry about anyone ever being able	 * to look at the data being freed even in the face of a crash.	 * What we're getting around here is the case where we free a block, it	 * is allocated to another file, it is written to, and then we crash.	 * If the new data gets written to the file but the log buffers	 * containing the free and reallocation don't, then we'd end up with	 * garbage in the blocks being freed.  As long as we make the new size	 * permanent before actually freeing any blocks it doesn't matter if	 * they get written to.	 */	ip->i_d.di_size = newsize;
开发者ID:gxt,项目名称:linux,代码行数:67,


示例20: xfs_file_aio_read

STATIC ssize_txfs_file_aio_read(	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned long		nr_segs,	loff_t			pos){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	size_t			size = 0;	ssize_t			ret = 0;	int			ioflags = 0;	xfs_fsize_t		n;	unsigned long		seg;	XFS_STATS_INC(xs_read_calls);	BUG_ON(iocb->ki_pos != pos);	if (unlikely(file->f_flags & O_DIRECT))		ioflags |= IO_ISDIRECT;	if (file->f_mode & FMODE_NOCMTIME)		ioflags |= IO_INVIS;	/* START copy & waste from filemap.c */	for (seg = 0; seg < nr_segs; seg++) {		const struct iovec *iv = &iovp[seg];		/*		 * If any segment has a negative length, or the cumulative		 * length ever wraps negative then return -EINVAL.		 */		size += iv->iov_len;		if (unlikely((ssize_t)(size|iv->iov_len) < 0))			return XFS_ERROR(-EINVAL);	}	/* END copy & waste from filemap.c */	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_buftarg_t	*target =			XFS_IS_REALTIME_INODE(ip) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((iocb->ki_pos & target->bt_smask) ||		    (size & target->bt_smask)) {			if (iocb->ki_pos == ip->i_size)				return 0;			return -XFS_ERROR(EINVAL);		}	}	n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;	if (n <= 0 || size == 0)		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	if (unlikely(ioflags & IO_ISDIRECT))		mutex_lock(&inode->i_mutex);	xfs_ilock(ip, XFS_IOLOCK_SHARED);	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);		int iolock = XFS_IOLOCK_SHARED;		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,					dmflags, &iolock);		if (ret) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			if (unlikely(ioflags & IO_ISDIRECT))				mutex_unlock(&inode->i_mutex);			return ret;		}	}	if (unlikely(ioflags & IO_ISDIRECT)) {		if (inode->i_mapping->nrpages) {			ret = -xfs_flushinval_pages(ip,					(iocb->ki_pos & PAGE_CACHE_MASK),					-1, FI_REMAPF_LOCKED);		}		mutex_unlock(&inode->i_mutex);		if (ret) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			return ret;		}	}	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);//.........这里部分代码省略.........
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:101,


示例21: xfs_reclaim_inode

/* * Inodes in different states need to be treated differently, and the return * value of xfs_iflush is not sufficient to get this right. The following table * lists the inode states and the reclaim actions necessary for non-blocking * reclaim: * * *	inode state	     iflush ret		required action *      ---------------      ----------         --------------- *	bad			-		reclaim *	shutdown		EIO		unpin and reclaim *	clean, unpinned		0		reclaim *	stale, unpinned		0		reclaim *	clean, pinned(*)	0		requeue *	stale, pinned		EAGAIN		requeue *	dirty, delwri ok	0		requeue *	dirty, delwri blocked	EAGAIN		requeue *	dirty, sync flush	0		reclaim * * (*) dgc: I don't think the clean, pinned state is possible but it gets * handled anyway given the order of checks implemented. * * As can be seen from the table, the return value of xfs_iflush() is not * sufficient to correctly decide the reclaim action here. The checks in * xfs_iflush() might look like duplicates, but they are not. * * Also, because we get the flush lock first, we know that any inode that has * been flushed delwri has had the flush completed by the time we check that * the inode is clean. The clean inode check needs to be done before flushing * the inode delwri otherwise we would loop forever requeuing clean inodes as * we cannot tell apart a successful delwri flush and a clean inode from the * return value of xfs_iflush(). * * Note that because the inode is flushed delayed write by background * writeback, the flush lock may already be held here and waiting on it can * result in very long latencies. Hence for sync reclaims, where we wait on the * flush lock, the caller should push out delayed write inodes first before * trying to reclaim them to minimise the amount of time spent waiting. For * background relaim, we just requeue the inode for the next pass. * * Hence the order of actions after gaining the locks should be: *	bad		=> reclaim *	shutdown	=> unpin and reclaim *	pinned, delwri	=> requeue *	pinned, sync	=> unpin *	stale		=> reclaim *	clean		=> reclaim *	dirty, delwri	=> flush and requeue *	dirty, sync	=> flush, wait and reclaim */STATIC intxfs_reclaim_inode(	struct xfs_inode	*ip,	struct xfs_perag	*pag,	int			sync_mode){	int	error;restart:	error = 0;	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (!xfs_iflock_nowait(ip)) {		if (!(sync_mode & SYNC_WAIT))			goto out;		/*		 * If we only have a single dirty inode in a cluster there is		 * a fair chance that the AIL push may have pushed it into		 * the buffer, but xfsbufd won't touch it until 30 seconds		 * from now, and thus we will lock up here.		 *		 * Promote the inode buffer to the front of the delwri list		 * and wake up xfsbufd now.		 */		xfs_promote_inode(ip);		xfs_iflock(ip);	}	if (is_bad_inode(VFS_I(ip)))		goto reclaim;	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {		xfs_iunpin_wait(ip);		goto reclaim;	}	if (xfs_ipincount(ip)) {		if (!(sync_mode & SYNC_WAIT)) {			xfs_ifunlock(ip);			goto out;		}		xfs_iunpin_wait(ip);	}	if (xfs_iflags_test(ip, XFS_ISTALE))		goto reclaim;	if (xfs_inode_clean(ip))		goto reclaim;	/*	 * Now we have an inode that needs flushing.	 *	 * We do a nonblocking flush here even if we are doing a SYNC_WAIT//.........这里部分代码省略.........
开发者ID:1yankeedt,项目名称:D710BST_FL24_Kernel,代码行数:101,


示例22: error

int					/* error (positive) */xfs_zero_eof(	xfs_inode_t	*ip,	xfs_off_t	offset,		/* starting I/O offset */	xfs_fsize_t	isize)		/* current inode size */{	xfs_mount_t	*mp = ip->i_mount;	xfs_fileoff_t	start_zero_fsb;	xfs_fileoff_t	end_zero_fsb;	xfs_fileoff_t	zero_count_fsb;	xfs_fileoff_t	last_fsb;	xfs_fileoff_t	zero_off;	xfs_fsize_t	zero_len;	int		nimaps;	int		error = 0;	xfs_bmbt_irec_t	imap;	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));	ASSERT(offset > isize);	/*	 * First handle zeroing the block on which isize resides.	 * We only zero a part of that block so it is handled specially.	 */	error = xfs_zero_last_block(ip, offset, isize);	if (error) {		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));		return error;	}	/*	 * Calculate the range between the new size and the old	 * where blocks needing to be zeroed may exist.  To get the	 * block where the last byte in the file currently resides,	 * we need to subtract one from the size and truncate back	 * to a block boundary.  We subtract 1 in case the size is	 * exactly on a block boundary.	 */	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);	if (last_fsb == end_zero_fsb) {		/*		 * The size was only incremented on its last block.		 * We took care of that above, so just return.		 */		return 0;	}	ASSERT(start_zero_fsb <= end_zero_fsb);	while (start_zero_fsb <= end_zero_fsb) {		nimaps = 1;		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;		error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,				  0, NULL, 0, &imap, &nimaps, NULL, NULL);		if (error) {			ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));			return error;		}		ASSERT(nimaps > 0);		if (imap.br_state == XFS_EXT_UNWRITTEN ||		    imap.br_startblock == HOLESTARTBLOCK) {			/*			 * This loop handles initializing pages that were			 * partially initialized by the code below this			 * loop. It basically zeroes the part of the page			 * that sits on a hole and sets the page as P_HOLE			 * and calls remapf if it is a mapped file.			 */			start_zero_fsb = imap.br_startoff + imap.br_blockcount;			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));			continue;		}		/*		 * There are blocks we need to zero.		 * Drop the inode lock while we're doing the I/O.		 * We'll still have the iolock to protect us.		 */		xfs_iunlock(ip, XFS_ILOCK_EXCL);		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);		if ((zero_off + zero_len) > offset)			zero_len = offset - zero_off;		error = xfs_iozero(ip, zero_off, zero_len);		if (error) {			goto out_lock;		}		start_zero_fsb = imap.br_startoff + imap.br_blockcount;		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));		xfs_ilock(ip, XFS_ILOCK_EXCL);	}//.........这里部分代码省略.........
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:101,


示例23: xfs_ioctl_setattr

STATIC intxfs_ioctl_setattr(	xfs_inode_t		*ip,	struct fsxattr		*fa,	int			mask){	struct xfs_mount	*mp = ip->i_mount;	struct xfs_trans	*tp;	unsigned int		lock_flags = 0;	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;	struct xfs_dquot	*olddquot = NULL;	int			code;	xfs_itrace_entry(ip);	if (mp->m_flags & XFS_MOUNT_RDONLY)		return XFS_ERROR(EROFS);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	/*	 * If disk quotas is on, we make sure that the dquots do exist on disk,	 * before we start any other transactions. Trying to do this later	 * is messy. We don't care to take a readlock to look at the ids	 * in inode here, because we can't hold it across the trans_reserve.	 * If the IDs do change before we take the ilock, we're covered	 * because the i_*dquot fields will get updated anyway.	 */	if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {		code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid,					 ip->i_d.di_gid, fa->fsx_projid,					 XFS_QMOPT_PQUOTA, &udqp, &gdqp);		if (code)			return code;	}	/*	 * For the other attributes, we acquire the inode lock and	 * first do an error checking pass.	 */	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);	code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);	if (code)		goto error_return;	lock_flags = XFS_ILOCK_EXCL;	xfs_ilock(ip, lock_flags);	/*	 * CAP_FOWNER overrides the following restrictions:	 *	 * The user ID of the calling process must be equal	 * to the file owner ID, except in cases where the	 * CAP_FSETID capability is applicable.	 */	if (current->fsuid != ip->i_d.di_uid && !capable(CAP_FOWNER)) {		code = XFS_ERROR(EPERM);		goto error_return;	}	/*	 * Do a quota reservation only if projid is actually going to change.	 */	if (mask & FSX_PROJID) {		if (XFS_IS_PQUOTA_ON(mp) &&		    ip->i_d.di_projid != fa->fsx_projid) {			ASSERT(tp);			code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,						capable(CAP_FOWNER) ?						XFS_QMOPT_FORCE_RES : 0);			if (code)	/* out of quota */				goto error_return;		}	}	if (mask & FSX_EXTSIZE) {		/*		 * Can't change extent size if any extents are allocated.		 */		if (ip->i_d.di_nextents &&		    ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=		     fa->fsx_extsize)) {			code = XFS_ERROR(EINVAL);	/* EFBIG? */			goto error_return;		}		/*		 * Extent size must be a multiple of the appropriate block		 * size, if set at all.		 */		if (fa->fsx_extsize != 0) {			xfs_extlen_t	size;			if (XFS_IS_REALTIME_INODE(ip) ||			    ((mask & FSX_XFLAGS) &&			    (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {				size = mp->m_sb.sb_rextsize <<				       mp->m_sb.sb_blocklog;			} else {				size = mp->m_sb.sb_blocksize;//.........这里部分代码省略.........
开发者ID:Mr-Aloof,项目名称:wl500g,代码行数:101,


示例24: xfs_file_fsync

STATIC intxfs_file_fsync(	struct file		*file,	struct dentry		*dentry,	int			datasync){	struct xfs_inode	*ip = XFS_I(dentry->d_inode);	struct xfs_trans	*tp;	int			error = 0;	int			log_flushed = 0;	xfs_itrace_entry(ip);	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -XFS_ERROR(EIO);	xfs_iflags_clear(ip, XFS_ITRUNCATED);	/*	 * We always need to make sure that the required inode state is safe on	 * disk.  The inode might be clean but we still might need to force the	 * log because of committed transactions that haven't hit the disk yet.	 * Likewise, there could be unflushed non-transactional changes to the	 * inode core that have to go to disk and this requires us to issue	 * a synchronous transaction to capture these changes correctly.	 *	 * This code relies on the assumption that if the i_update_core field	 * of the inode is clear and the inode is unpinned then it is clean	 * and no action is required.	 */	xfs_ilock(ip, XFS_ILOCK_SHARED);	/*	 * First check if the VFS inode is marked dirty.  All the dirtying	 * of non-transactional updates no goes through mark_inode_dirty*,	 * which allows us to distinguish beteeen pure timestamp updates	 * and i_size updates which need to be caught for fdatasync.	 * After that also theck for the dirty state in the XFS inode, which	 * might gets cleared when the inode gets written out via the AIL	 * or xfs_iflush_cluster.	 */	if (((dentry->d_inode->i_state & I_DIRTY_DATASYNC) ||	    ((dentry->d_inode->i_state & I_DIRTY_SYNC) && !datasync)) &&	    ip->i_update_core) {		/*		 * Kick off a transaction to log the inode core to get the		 * updates.  The sync transaction will also force the log.		 */		xfs_iunlock(ip, XFS_ILOCK_SHARED);		tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);		error = xfs_trans_reserve(tp, 0,				XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);		if (error) {			xfs_trans_cancel(tp, 0);			return -error;		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		/*		 * Note - it's possible that we might have pushed ourselves out		 * of the way during trans_reserve which would flush the inode.		 * But there's no guarantee that the inode buffer has actually		 * gone out yet (it's delwri).	Plus the buffer could be pinned		 * anyway if it's part of an inode in another recent		 * transaction.	 So we play it safe and fire off the		 * transaction anyway.		 */		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);		xfs_trans_ihold(tp, ip);		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);		xfs_trans_set_sync(tp);		error = _xfs_trans_commit(tp, 0, &log_flushed);		xfs_iunlock(ip, XFS_ILOCK_EXCL);	} else {		/*		 * Timestamps/size haven't changed since last inode flush or		 * inode transaction commit.  That means either nothing got		 * written or a transaction committed which caught the updates.		 * If the latter happened and the transaction hasn't hit the		 * disk yet, the inode will be still be pinned.  If it is,		 * force the log.		 */		if (xfs_ipincount(ip)) {			error = _xfs_log_force_lsn(ip->i_mount,					ip->i_itemp->ili_last_lsn,					XFS_LOG_SYNC, &log_flushed);		}		xfs_iunlock(ip, XFS_ILOCK_SHARED);	}	if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {		/*		 * If the log write didn't issue an ordered tag we need		 * to flush the disk cache for the data device now.		 */		if (!log_flushed)			xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);		/*//.........这里部分代码省略.........
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:101,


示例25: xfs_attr_set

intxfs_attr_set(	struct xfs_inode	*dp,	const unsigned char	*name,	unsigned char		*value,	int			valuelen,	int			flags){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_da_args	args;	struct xfs_bmap_free	flist;	struct xfs_trans_res	tres;	xfs_fsblock_t		firstblock;	int			rsvd = (flags & ATTR_ROOT) != 0;	int			error, err2, committed, local;	XFS_STATS_INC(xs_attr_set);	if (XFS_FORCED_SHUTDOWN(dp->i_mount))		return -EIO;	error = xfs_attr_args_init(&args, dp, name, flags);	if (error)		return error;	args.value = value;	args.valuelen = valuelen;	args.firstblock = &firstblock;	args.flist = &flist;	args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;	args.total = xfs_attr_calc_size(&args, &local);	error = xfs_qm_dqattach(dp, 0);	if (error)		return error;	/*	 * If the inode doesn't have an attribute fork, add one.	 * (inode must not be locked when we call this routine)	 */	if (XFS_IFORK_Q(dp) == 0) {		int sf_size = sizeof(xfs_attr_sf_hdr_t) +			XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);		error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);		if (error)			return error;	}	/*	 * Start our first transaction of the day.	 *	 * All future transactions during this code must be "chained" off	 * this one via the trans_dup() call.  All transactions will contain	 * the inode, and the inode will always be marked with trans_ihold().	 * Since the inode will be locked in all transactions, we must log	 * the inode in every transaction to let it float upward through	 * the log.	 */	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);	/*	 * Root fork attributes can use reserved data blocks for this	 * operation if necessary	 */	if (rsvd)		args.trans->t_flags |= XFS_TRANS_RESERVE;	tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +			 M_RES(mp)->tr_attrsetrt.tr_logres * args.total;	tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;	error = xfs_trans_reserve(args.trans, &tres, args.total, 0);	if (error) {		xfs_trans_cancel(args.trans);		return error;	}	xfs_ilock(dp, XFS_ILOCK_EXCL);	error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :				       XFS_QMOPT_RES_REGBLKS);	if (error) {		xfs_iunlock(dp, XFS_ILOCK_EXCL);		xfs_trans_cancel(args.trans);		return error;	}	xfs_trans_ijoin(args.trans, dp, 0);	/*	 * If the attribute list is non-existent or a shortform list,	 * upgrade it to a single-leaf-block attribute list.	 */	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&	     dp->i_d.di_anextents == 0)) {		/*//.........这里部分代码省略.........
开发者ID:kenhys,项目名称:partclone,代码行数:101,


示例26: xfs_qm_dqtobp

/* * Maps a dquot to the buffer containing its on-disk version. * This returns a ptr to the buffer containing the on-disk dquot * in the bpp param, and a ptr to the on-disk dquot within that buffer */STATIC intxfs_qm_dqtobp(	xfs_trans_t		**tpp,	xfs_dquot_t		*dqp,	xfs_disk_dquot_t	**O_ddpp,	xfs_buf_t		**O_bpp,	uint			flags){	xfs_bmbt_irec_t map;	int		nmaps, error;	xfs_buf_t	*bp;	xfs_inode_t	*quotip;	xfs_mount_t	*mp;	xfs_disk_dquot_t *ddq;	xfs_dqid_t	id;	boolean_t	newdquot;	xfs_trans_t	*tp = (tpp ? *tpp : NULL);	mp = dqp->q_mount;	id = be32_to_cpu(dqp->q_core.d_id);	nmaps = 1;	newdquot = B_FALSE;	/*	 * If we don't know where the dquot lives, find out.	 */	if (dqp->q_blkno == (xfs_daddr_t) 0) {		/* We use the id as an index */		dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp);		nmaps = 1;		quotip = XFS_DQ_TO_QIP(dqp);		xfs_ilock(quotip, XFS_ILOCK_SHARED);		/*		 * Return if this type of quotas is turned off while we didn't		 * have an inode lock		 */		if (XFS_IS_THIS_QUOTA_OFF(dqp)) {			xfs_iunlock(quotip, XFS_ILOCK_SHARED);			return (ESRCH);		}		/*		 * Find the block map; no allocations yet		 */		error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,				  XFS_DQUOT_CLUSTER_SIZE_FSB,				  XFS_BMAPI_METADATA,				  NULL, 0, &map, &nmaps, NULL);		xfs_iunlock(quotip, XFS_ILOCK_SHARED);		if (error)			return (error);		ASSERT(nmaps == 1);		ASSERT(map.br_blockcount == 1);		/*		 * offset of dquot in the (fixed sized) dquot chunk.		 */		dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *			sizeof(xfs_dqblk_t);		if (map.br_startblock == HOLESTARTBLOCK) {			/*			 * We don't allocate unless we're asked to			 */			if (!(flags & XFS_QMOPT_DQALLOC))				return (ENOENT);			ASSERT(tp);			if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,						dqp->q_fileoffset, &bp)))				return (error);			tp = *tpp;			newdquot = B_TRUE;		} else {			/*			 * store the blkno etc so that we don't have to do the			 * mapping all the time			 */			dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);		}	}	ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);	ASSERT(dqp->q_blkno != HOLESTARTBLOCK);	/*	 * Read in the buffer, unless we've just done the allocation	 * (in which case we already have the buf).	 */	if (! newdquot) {		xfs_dqtrace_entry(dqp, "DQTOBP READBUF");		if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,					       dqp->q_blkno,					       XFS_QI_DQCHUNKLEN(mp),					       0, &bp))) {			return (error);		}//.........这里部分代码省略.........
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:101,


示例27: xfs_attr_remove

/* * Generic handler routine to remove a name from an attribute list. * Transitions attribute list from Btree to shortform as necessary. */intxfs_attr_remove(	struct xfs_inode	*dp,	const unsigned char	*name,	int			flags){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_da_args	args;	struct xfs_bmap_free	flist;	xfs_fsblock_t		firstblock;	int			error;	XFS_STATS_INC(xs_attr_remove);	if (XFS_FORCED_SHUTDOWN(dp->i_mount))		return -EIO;	if (!xfs_inode_hasattr(dp))		return -ENOATTR;	error = xfs_attr_args_init(&args, dp, name, flags);	if (error)		return error;	args.firstblock = &firstblock;	args.flist = &flist;	/*	 * we have no control over the attribute names that userspace passes us	 * to remove, so we have to allow the name lookup prior to attribute	 * removal to fail.	 */	args.op_flags = XFS_DA_OP_OKNOENT;	error = xfs_qm_dqattach(dp, 0);	if (error)		return error;	/*	 * Start our first transaction of the day.	 *	 * All future transactions during this code must be "chained" off	 * this one via the trans_dup() call.  All transactions will contain	 * the inode, and the inode will always be marked with trans_ihold().	 * Since the inode will be locked in all transactions, we must log	 * the inode in every transaction to let it float upward through	 * the log.	 */	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);	/*	 * Root fork attributes can use reserved data blocks for this	 * operation if necessary	 */	if (flags & ATTR_ROOT)		args.trans->t_flags |= XFS_TRANS_RESERVE;	error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,				  XFS_ATTRRM_SPACE_RES(mp), 0);	if (error) {		xfs_trans_cancel(args.trans);		return error;	}	xfs_ilock(dp, XFS_ILOCK_EXCL);	/*	 * No need to make quota reservations here. We expect to release some	 * blocks not allocate in the common case.	 */	xfs_trans_ijoin(args.trans, dp, 0);	if (!xfs_inode_hasattr(dp)) {		error = -ENOATTR;	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);		error = xfs_attr_shortform_remove(&args);	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {		error = xfs_attr_leaf_removename(&args);	} else {		error = xfs_attr_node_removename(&args);	}	if (error)		goto out;	/*	 * If this is a synchronous mount, make sure that the	 * transaction goes to disk before returning to the user.	 */	if (mp->m_flags & XFS_MOUNT_WSYNC)		xfs_trans_set_sync(args.trans);	if ((flags & ATTR_KERNOTIME) == 0)		xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);//.........这里部分代码省略.........
开发者ID:kenhys,项目名称:partclone,代码行数:101,


示例28: xfs_map_blocks

STATIC intxfs_map_blocks(	struct inode		*inode,	loff_t			offset,	struct xfs_bmbt_irec	*imap,	int			type,	int			nonblocking){	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	ssize_t			count = 1 << inode->i_blkbits;	xfs_fileoff_t		offset_fsb, end_fsb;	int			error = 0;	int			bmapi_flags = XFS_BMAPI_ENTIRE;	int			nimaps = 1;	if (XFS_FORCED_SHUTDOWN(mp))		return -XFS_ERROR(EIO);	if (type == IO_UNWRITTEN)		bmapi_flags |= XFS_BMAPI_IGSTATE;	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {		if (nonblocking)			return -XFS_ERROR(EAGAIN);		xfs_ilock(ip, XFS_ILOCK_SHARED);	}	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||	       (ip->i_df.if_flags & XFS_IFEXTENTS));	ASSERT(offset <= mp->m_maxioffset);	if (offset + count > mp->m_maxioffset)		count = mp->m_maxioffset - offset;	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,				imap, &nimaps, bmapi_flags);	xfs_iunlock(ip, XFS_ILOCK_SHARED);	if (error)		return -XFS_ERROR(error);	if (type == IO_DELALLOC &&	    (!nimaps || isnullstartblock(imap->br_startblock))) {		error = xfs_iomap_write_allocate(ip, offset, count, imap);		if (!error)			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);		return -XFS_ERROR(error);	}#ifdef DEBUG	if (type == IO_UNWRITTEN) {		ASSERT(nimaps);		ASSERT(imap->br_startblock != HOLESTARTBLOCK);		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);	}#endif	if (nimaps)		trace_xfs_map_blocks_found(ip, offset, count, type, imap);	return 0;}
开发者ID:404992361,项目名称:mi1_kernel,代码行数:62,


示例29: xfs_symlink

intxfs_symlink(	struct xfs_inode	*dp,	struct xfs_name		*link_name,	const char		*target_path,	umode_t			mode,	struct xfs_inode	**ipp){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_trans	*tp = NULL;	struct xfs_inode	*ip = NULL;	int			error = 0;	int			pathlen;	struct xfs_defer_ops	dfops;	xfs_fsblock_t		first_block;	bool                    unlock_dp_on_error = false;	xfs_fileoff_t		first_fsb;	xfs_filblks_t		fs_blocks;	int			nmaps;	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];	xfs_daddr_t		d;	const char		*cur_chunk;	int			byte_cnt;	int			n;	xfs_buf_t		*bp;	prid_t			prid;	struct xfs_dquot	*udqp = NULL;	struct xfs_dquot	*gdqp = NULL;	struct xfs_dquot	*pdqp = NULL;	uint			resblks;	*ipp = NULL;	trace_xfs_symlink(dp, link_name);	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	/*	 * Check component lengths of the target path name.	 */	pathlen = strlen(target_path);	if (pathlen >= MAXPATHLEN)      /* total string too long */		return -ENAMETOOLONG;	udqp = gdqp = NULL;	prid = xfs_get_initial_prid(dp);	/*	 * Make sure that we have allocated dquot(s) on disk.	 */	error = xfs_qm_vop_dqalloc(dp,			xfs_kuid_to_uid(current_fsuid()),			xfs_kgid_to_gid(current_fsgid()), prid,			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,			&udqp, &gdqp, &pdqp);	if (error)		return error;	/*	 * The symlink will fit into the inode data fork?	 * There can't be any attributes so we get the whole variable part.	 */	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))		fs_blocks = 0;	else		fs_blocks = xfs_symlink_blocks(mp, pathlen);	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);	if (error == -ENOSPC && fs_blocks == 0) {		resblks = 0;		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,				&tp);	}	if (error)		goto out_release_inode;	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);	unlock_dp_on_error = true;	/*	 * Check whether the directory allows new symlinks or not.	 */	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {		error = -EPERM;		goto out_trans_cancel;	}	/*	 * Reserve disk quota : blocks and inode.	 */	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,						pdqp, resblks, 1, 0);	if (error)		goto out_trans_cancel;	/*	 * Check for ability to enter directory entry, if no space reserved.	 *///.........这里部分代码省略.........
开发者ID:AshishNamdev,项目名称:linux,代码行数:101,



注:本文中的xfs_ilock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xfs_ilock_nowait函数代码示例
C++ xfs_iflags_clear函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。