您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xfs_trans_reserve函数代码示例

51自学网 2021-06-03 10:20:59
  C++
这篇教程C++ xfs_trans_reserve函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xfs_trans_reserve函数的典型用法代码示例。如果您正苦于以下问题:C++ xfs_trans_reserve函数的具体用法?C++ xfs_trans_reserve怎么用?C++ xfs_trans_reserve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xfs_trans_reserve函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: xfs_rename

//.........这里部分代码省略.........		/*		 * We have nothing locked, no inode references, and		 * no transaction, so just get out.		 */		goto std_return;	}	ASSERT(src_ip != NULL);	if ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {		/*		 * Check for link count overflow on target_dp		 */		if (target_ip == NULL && (src_dp != target_dp) &&		    target_dp->i_d.di_nlink >= XFS_MAXLINK) {			error = XFS_ERROR(EMLINK);			xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);			goto rele_return;		}	}	new_parent = (src_dp != target_dp);	src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);	/*	 * Drop the locks on our inodes so that we can start the transaction.	 */	xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);	XFS_BMAP_INIT(&free_list, &first_block);	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;	spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);	error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,			XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);	if (error == ENOSPC) {		spaceres = 0;		error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);	}	if (error) {		xfs_trans_cancel(tp, 0);		goto rele_return;	}	/*	 * Attach the dquots to the inodes	 */	if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {		xfs_trans_cancel(tp, cancel_flags);		goto rele_return;	}	/*	 * Reacquire the inode locks we dropped above.	 */	xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL);	/*	 * Join all the inodes to the transaction. From this point on,	 * we can rely on either trans_commit or trans_cancel to unlock	 * them.  Note that we need to add a vnode reference to the	 * directories since trans_commit & trans_cancel will decrement	 * them when they unlock the inodes.  Also, we need to be careful	 * not to add an inode to the transaction more than once.	 */
开发者ID:kzlin129,项目名称:tt-gpl,代码行数:67,


示例2: xfs_attr_set

intxfs_attr_set(	struct xfs_inode	*dp,	const unsigned char	*name,	unsigned char		*value,	int			valuelen,	int			flags){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_da_args	args;	struct xfs_bmap_free	flist;	struct xfs_trans_res	tres;	xfs_fsblock_t		firstblock;	int			rsvd = (flags & ATTR_ROOT) != 0;	int			error, err2, committed, local;	XFS_STATS_INC(xs_attr_set);	if (XFS_FORCED_SHUTDOWN(dp->i_mount))		return -EIO;	error = xfs_attr_args_init(&args, dp, name, flags);	if (error)		return error;	args.value = value;	args.valuelen = valuelen;	args.firstblock = &firstblock;	args.flist = &flist;	args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;	args.total = xfs_attr_calc_size(&args, &local);	error = xfs_qm_dqattach(dp, 0);	if (error)		return error;	/*	 * If the inode doesn't have an attribute fork, add one.	 * (inode must not be locked when we call this routine)	 */	if (XFS_IFORK_Q(dp) == 0) {		int sf_size = sizeof(xfs_attr_sf_hdr_t) +			XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);		error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);		if (error)			return error;	}	/*	 * Start our first transaction of the day.	 *	 * All future transactions during this code must be "chained" off	 * this one via the trans_dup() call.  All transactions will contain	 * the inode, and the inode will always be marked with trans_ihold().	 * Since the inode will be locked in all transactions, we must log	 * the inode in every transaction to let it float upward through	 * the log.	 */	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);	/*	 * Root fork attributes can use reserved data blocks for this	 * operation if necessary	 */	if (rsvd)		args.trans->t_flags |= XFS_TRANS_RESERVE;	tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +			 M_RES(mp)->tr_attrsetrt.tr_logres * args.total;	tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;	error = xfs_trans_reserve(args.trans, &tres, args.total, 0);	if (error) {		xfs_trans_cancel(args.trans, 0);		return error;	}	xfs_ilock(dp, XFS_ILOCK_EXCL);	error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :				       XFS_QMOPT_RES_REGBLKS);	if (error) {		xfs_iunlock(dp, XFS_ILOCK_EXCL);		xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);		return error;	}	xfs_trans_ijoin(args.trans, dp, 0);	/*	 * If the attribute list is non-existent or a shortform list,	 * upgrade it to a single-leaf-block attribute list.	 */	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&	     dp->i_d.di_anextents == 0)) {		/*//.........这里部分代码省略.........
开发者ID:3null,项目名称:linux,代码行数:101,


示例3: xfs_truncate_file

/* * Try to truncate the given file to 0 length.  Currently called * only out of xfs_remove when it has to truncate a file to free * up space for the remove to proceed. */intxfs_truncate_file(	xfs_mount_t	*mp,	xfs_inode_t	*ip){	xfs_trans_t	*tp;	int		error;#ifdef QUOTADEBUG	/*	 * This is called to truncate the quotainodes too.	 */	if (XFS_IS_UQUOTA_ON(mp)) {		if (ip->i_ino != mp->m_sb.sb_uquotino)			ASSERT(ip->i_udquot);	}	if (XFS_IS_OQUOTA_ON(mp)) {		if (ip->i_ino != mp->m_sb.sb_gquotino)			ASSERT(ip->i_gdquot);	}#endif	/*	 * Make the call to xfs_itruncate_start before starting the	 * transaction, because we cannot make the call while we're	 * in a transaction.	 */	xfs_ilock(ip, XFS_IOLOCK_EXCL);	xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0);	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);	if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,				      XFS_TRANS_PERM_LOG_RES,				      XFS_ITRUNCATE_LOG_COUNT))) {		xfs_trans_cancel(tp, 0);		xfs_iunlock(ip, XFS_IOLOCK_EXCL);		return error;	}	/*	 * Follow the normal truncate locking protocol.  Since we	 * hold the inode in the transaction, we know that it's number	 * of references will stay constant.	 */	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);	xfs_trans_ihold(tp, ip);	/*	 * Signal a sync xaction.  The only case where that isn't	 * the case is if we're truncating an already unlinked file	 * on a wsync fs.  In that case, we know the blocks can't	 * reappear in the file because the links to file are	 * permanently toast.  Currently, we're always going to	 * want a sync transaction because this code is being	 * called from places where nlink is guaranteed to be 1	 * but I'm leaving the tests in to protect against future	 * changes -- rcc.	 */	error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0,				     XFS_DATA_FORK,				     ((ip->i_d.di_nlink != 0 ||				       !(mp->m_flags & XFS_MOUNT_WSYNC))				      ? 1 : 0));	if (error) {		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |				 XFS_TRANS_ABORT);	} else {		xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,					 NULL);	}	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);	return error;}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:79,


示例4: xfs_file_fsync

STATIC intxfs_file_fsync(	struct file		*file,	int			datasync){	struct inode		*inode = file->f_mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	struct xfs_mount	*mp = ip->i_mount;	struct xfs_trans	*tp;	int			error = 0;	int			log_flushed = 0;	trace_xfs_file_fsync(ip);	if (XFS_FORCED_SHUTDOWN(mp))		return -XFS_ERROR(EIO);	xfs_iflags_clear(ip, XFS_ITRUNCATED);	xfs_ioend_wait(ip);	if (mp->m_flags & XFS_MOUNT_BARRIER) {		/*		 * If we have an RT and/or log subvolume we need to make sure		 * to flush the write cache the device used for file data		 * first.  This is to ensure newly written file data make		 * it to disk before logging the new inode size in case of		 * an extending write.		 */		if (XFS_IS_REALTIME_INODE(ip))			xfs_blkdev_issue_flush(mp->m_rtdev_targp);		else if (mp->m_logdev_targp != mp->m_ddev_targp)			xfs_blkdev_issue_flush(mp->m_ddev_targp);	}	/*	 * We always need to make sure that the required inode state is safe on	 * disk.  The inode might be clean but we still might need to force the	 * log because of committed transactions that haven't hit the disk yet.	 * Likewise, there could be unflushed non-transactional changes to the	 * inode core that have to go to disk and this requires us to issue	 * a synchronous transaction to capture these changes correctly.	 *	 * This code relies on the assumption that if the i_update_core field	 * of the inode is clear and the inode is unpinned then it is clean	 * and no action is required.	 */	xfs_ilock(ip, XFS_ILOCK_SHARED);	/*	 * First check if the VFS inode is marked dirty.  All the dirtying	 * of non-transactional updates no goes through mark_inode_dirty*,	 * which allows us to distinguish beteeen pure timestamp updates	 * and i_size updates which need to be caught for fdatasync.	 * After that also theck for the dirty state in the XFS inode, which	 * might gets cleared when the inode gets written out via the AIL	 * or xfs_iflush_cluster.	 */	if (((inode->i_state & I_DIRTY_DATASYNC) ||	    ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&	    ip->i_update_core) {		/*		 * Kick off a transaction to log the inode core to get the		 * updates.  The sync transaction will also force the log.		 */		xfs_iunlock(ip, XFS_ILOCK_SHARED);		tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);		error = xfs_trans_reserve(tp, 0,				XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);		if (error) {			xfs_trans_cancel(tp, 0);			return -error;		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		/*		 * Note - it's possible that we might have pushed ourselves out		 * of the way during trans_reserve which would flush the inode.		 * But there's no guarantee that the inode buffer has actually		 * gone out yet (it's delwri).	Plus the buffer could be pinned		 * anyway if it's part of an inode in another recent		 * transaction.	 So we play it safe and fire off the		 * transaction anyway.		 */		xfs_trans_ijoin(tp, ip);		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);		xfs_trans_set_sync(tp);		error = _xfs_trans_commit(tp, 0, &log_flushed);		xfs_iunlock(ip, XFS_ILOCK_EXCL);	} else {		/*		 * Timestamps/size haven't changed since last inode flush or		 * inode transaction commit.  That means either nothing got		 * written or a transaction committed which caught the updates.		 * If the latter happened and the transaction hasn't hit the		 * disk yet, the inode will be still be pinned.  If it is,		 * force the log.		 */		if (xfs_ipincount(ip)) {//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,


示例5: xfs_setattr_nonsize

intxfs_setattr_nonsize(	struct xfs_inode	*ip,	struct iattr		*iattr,	int			flags){	xfs_mount_t		*mp = ip->i_mount;	struct inode		*inode = VFS_I(ip);	int			mask = iattr->ia_valid;	xfs_trans_t		*tp;	int			error;	uid_t			uid = 0, iuid = 0;	gid_t			gid = 0, igid = 0;	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;	trace_xfs_setattr(ip);	if (mp->m_flags & XFS_MOUNT_RDONLY)		return XFS_ERROR(EROFS);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	error = -inode_change_ok(inode, iattr);	if (error)		return XFS_ERROR(error);	ASSERT((mask & ATTR_SIZE) == 0);	/*	 * If disk quotas is on, we make sure that the dquots do exist on disk,	 * before we start any other transactions. Trying to do this later	 * is messy. We don't care to take a readlock to look at the ids	 * in inode here, because we can't hold it across the trans_reserve.	 * If the IDs do change before we take the ilock, we're covered	 * because the i_*dquot fields will get updated anyway.	 */	if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {		uint	qflags = 0;		if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {			uid = iattr->ia_uid;			qflags |= XFS_QMOPT_UQUOTA;		} else {			uid = ip->i_d.di_uid;		}		if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {			gid = iattr->ia_gid;			qflags |= XFS_QMOPT_GQUOTA;		}  else {			gid = ip->i_d.di_gid;		}		/*		 * We take a reference when we initialize udqp and gdqp,		 * so it is important that we never blindly double trip on		 * the same variable. See xfs_create() for an example.		 */		ASSERT(udqp == NULL);		ASSERT(gdqp == NULL);		error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),					 qflags, &udqp, &gdqp);		if (error)			return error;	}	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);	if (error)		goto out_dqrele;	xfs_ilock(ip, XFS_ILOCK_EXCL);	/*	 * Change file ownership.  Must be the owner or privileged.	 */	if (mask & (ATTR_UID|ATTR_GID)) {		/*		 * These IDs could have changed since we last looked at them.		 * But, we're assured that if the ownership did change		 * while we didn't have the inode locked, inode's dquot(s)		 * would have changed also.		 */		iuid = ip->i_d.di_uid;		igid = ip->i_d.di_gid;		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;		/*		 * Do a quota reservation only if uid/gid is actually		 * going to change.		 */		if (XFS_IS_QUOTA_RUNNING(mp) &&		    ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||		     (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {			ASSERT(tp);			error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,						capable(CAP_FOWNER) ?						XFS_QMOPT_FORCE_RES : 0);//.........这里部分代码省略.........
开发者ID:AndroidDeveloperAlliance,项目名称:ZenKernel_Grouper,代码行数:101,


示例6: xfs_swapext

//.........这里部分代码省略.........	if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) ||	    (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) ||	    (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) ||	    (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) {		error = XFS_ERROR(EBUSY);		goto error0;	}	/* We need to fail if the file is memory mapped.  Once we have tossed	 * all existing pages, the page fault will have no option	 * but to go to the filesystem for pages. By making the page fault call	 * VOP_READ (or write in the case of autogrow) they block on the iolock	 * until we have switched the extents.	 */	if (VN_MAPPED(vp)) {		error = XFS_ERROR(EBUSY);		goto error0;	}	xfs_iunlock(ip, XFS_ILOCK_EXCL);	xfs_iunlock(tip, XFS_ILOCK_EXCL);	/*	 * There is a race condition here since we gave up the	 * ilock.  However, the data fork will not change since	 * we have the iolock (locked for truncation too) so we	 * are safe.  We don't really care if non-io related	 * fields change.	 */	VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);	if ((error = xfs_trans_reserve(tp, 0,				     XFS_ICHANGE_LOG_RES(mp), 0,				     0, 0))) {		xfs_iunlock(ip,  XFS_IOLOCK_EXCL);		xfs_iunlock(tip, XFS_IOLOCK_EXCL);		xfs_trans_cancel(tp, 0);		return error;	}	xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);	/*	 * Count the number of extended attribute blocks	 */	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);		if (error) {			xfs_iunlock(ip,  lock_flags);			xfs_iunlock(tip, lock_flags);			xfs_trans_cancel(tp, 0);			return error;		}	}	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,			&taforkblks);		if (error) {			xfs_iunlock(ip,  lock_flags);			xfs_iunlock(tip, lock_flags);			xfs_trans_cancel(tp, 0);			return error;		}
开发者ID:OpenHMR,项目名称:Open-HMR600,代码行数:67,


示例7: xfs_iomap_write_direct

intxfs_iomap_write_direct(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count,	int		flags,	xfs_bmbt_irec_t *ret_imap,	int		*nmaps,	int		found){	xfs_mount_t	*mp = ip->i_mount;	xfs_fileoff_t	offset_fsb;	xfs_fileoff_t	last_fsb;	xfs_filblks_t	count_fsb, resaligned;	xfs_fsblock_t	firstfsb;	xfs_extlen_t	extsz, temp;	int		nimaps;	int		bmapi_flag;	int		quota_flag;	int		rt;	xfs_trans_t	*tp;	xfs_bmbt_irec_t imap;	xfs_bmap_free_t free_list;	uint		qblocks, resblks, resrtextents;	int		committed;	int		error;	/*	 * Make sure that the dquots are there. This doesn't hold	 * the ilock across a disk read.	 */	error = xfs_qm_dqattach_locked(ip, 0);	if (error)		return XFS_ERROR(error);	rt = XFS_IS_REALTIME_INODE(ip);	extsz = xfs_get_extsz_hint(ip);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));	if ((offset + count) > ip->i_size) {		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);		if (error)			goto error_out;	} else {		if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))			last_fsb = MIN(last_fsb, (xfs_fileoff_t)					ret_imap->br_blockcount +					ret_imap->br_startoff);	}	count_fsb = last_fsb - offset_fsb;	ASSERT(count_fsb > 0);	resaligned = count_fsb;	if (unlikely(extsz)) {		if ((temp = do_mod(offset_fsb, extsz)))			resaligned += temp;		if ((temp = do_mod(resaligned, extsz)))			resaligned += extsz - temp;	}	if (unlikely(rt)) {		resrtextents = qblocks = resaligned;		resrtextents /= mp->m_sb.sb_rextsize;		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);		quota_flag = XFS_QMOPT_RES_RTBLKS;	} else {		resrtextents = 0;		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);		quota_flag = XFS_QMOPT_RES_REGBLKS;	}	/*	 * Allocate and setup the transaction	 */	xfs_iunlock(ip, XFS_ILOCK_EXCL);	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);	error = xfs_trans_reserve(tp, resblks,			XFS_WRITE_LOG_RES(mp), resrtextents,			XFS_TRANS_PERM_LOG_RES,			XFS_WRITE_LOG_COUNT);	/*	 * Check for running out of space, note: need lock to return	 */	if (error)		xfs_trans_cancel(tp, 0);	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (error)		goto error_out;	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);	if (error)		goto error1;	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);	xfs_trans_ihold(tp, ip);	bmapi_flag = XFS_BMAPI_WRITE;	if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))		bmapi_flag |= XFS_BMAPI_PREALLOC;//.........这里部分代码省略.........
开发者ID:ArthySundaram,项目名称:firstrepo,代码行数:101,


示例8: xfs_iomap_write_allocate

/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. * * We no longer bother to look at the incoming map - all we have to * guarantee is that whatever we allocate fills the required range. */intxfs_iomap_write_allocate(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count,	xfs_bmbt_irec_t *map,	int		*retmap){	xfs_mount_t	*mp = ip->i_mount;	xfs_fileoff_t	offset_fsb, last_block;	xfs_fileoff_t	end_fsb, map_start_fsb;	xfs_fsblock_t	first_block;	xfs_bmap_free_t	free_list;	xfs_filblks_t	count_fsb;	xfs_bmbt_irec_t	imap;	xfs_trans_t	*tp;	int		nimaps, committed;	int		error = 0;	int		nres;	*retmap = 0;	/*	 * Make sure that the dquots are there.	 */	error = xfs_qm_dqattach(ip, 0);	if (error)		return XFS_ERROR(error);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = map->br_blockcount;	map_start_fsb = map->br_startoff;	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));	while (count_fsb != 0) {		/*		 * Set up a transaction with which to allocate the		 * backing store for the file.  Do allocations in a		 * loop until we get some space in the range we are		 * interested in.  The other space that might be allocated		 * is in the delayed allocation extent on which we sit		 * but before our buffer starts.		 */		nimaps = 0;		while (nimaps == 0) {			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);			tp->t_flags |= XFS_TRANS_RESERVE;			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);			error = xfs_trans_reserve(tp, nres,					XFS_WRITE_LOG_RES(mp),					0, XFS_TRANS_PERM_LOG_RES,					XFS_WRITE_LOG_COUNT);			if (error) {				xfs_trans_cancel(tp, 0);				return XFS_ERROR(error);			}			xfs_ilock(ip, XFS_ILOCK_EXCL);			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);			xfs_trans_ihold(tp, ip);			xfs_bmap_init(&free_list, &first_block);			/*			 * it is possible that the extents have changed since			 * we did the read call as we dropped the ilock for a			 * while. We have to be careful about truncates or hole			 * punchs here - we are not allowed to allocate			 * non-delalloc blocks here.			 *			 * The only protection against truncation is the pages			 * for the range we are being asked to convert are			 * locked and hence a truncate will block on them			 * first.			 *			 * As a result, if we go beyond the range we really			 * need and hit an delalloc extent boundary followed by			 * a hole while we have excess blocks in the map, we			 * will fill the hole incorrectly and overrun the			 * transaction reservation.			 *			 * Using a single map prevents this as we are forced to			 * check each map we look for overlap with the desired			 * range and abort as soon as we find it. Also, given			 * that we only return a single map, having one beyond			 * what we can return is probably a bit silly.			 *			 * We also need to check that we don't go beyond EOF;			 * this is a truncate optimisation as a truncate sets//.........这里部分代码省略.........
开发者ID:ArthySundaram,项目名称:firstrepo,代码行数:101,


示例9: xfs_qm_idtodq

/* * allocate an incore dquot from the kernel heap, * and fill its core with quota information kept on disk. * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk * if it wasn't already allocated. */STATIC intxfs_qm_idtodq(	xfs_mount_t	*mp,	xfs_dqid_t	id,	 /* gid or uid, depending on type */	uint		type,	 /* UDQUOT or GDQUOT */	uint		flags,	 /* DQALLOC, DQREPAIR */	xfs_dquot_t	**O_dqpp)/* OUT : incore dquot, not locked */{	xfs_dquot_t	*dqp;	int		error;	xfs_trans_t	*tp;	int		cancelflags=0;	dqp = xfs_qm_dqinit(mp, id, type);	tp = NULL;	if (flags & XFS_QMOPT_DQALLOC) {		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);		if ((error = xfs_trans_reserve(tp,				       XFS_QM_DQALLOC_SPACE_RES(mp),				       XFS_WRITE_LOG_RES(mp) +					      BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 +					      128,				       0,				       XFS_TRANS_PERM_LOG_RES,				       XFS_WRITE_LOG_COUNT))) {			cancelflags = 0;			goto error0;		}		cancelflags = XFS_TRANS_RELEASE_LOG_RES;	}	/*	 * Read it from disk; xfs_dqread() takes care of	 * all the necessary initialization of dquot's fields (locks, etc)	 */	if ((error = xfs_qm_dqread(tp, id, dqp, flags))) {		/*		 * This can happen if quotas got turned off (ESRCH),		 * or if the dquot didn't exist on disk and we ask to		 * allocate (ENOENT).		 */		xfs_dqtrace_entry(dqp, "DQREAD FAIL");		cancelflags |= XFS_TRANS_ABORT;		goto error0;	}	if (tp) {		if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,					     NULL)))			goto error1;	}	*O_dqpp = dqp;	return (0); error0:	ASSERT(error);	if (tp)		xfs_trans_cancel(tp, cancelflags); error1:	xfs_qm_dqdestroy(dqp);	*O_dqpp = NULL;	return (error);}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:69,


示例10: xfs_qm_scall_setqlim

/* * Adjust quota limits, and start/stop timers accordingly. */intxfs_qm_scall_setqlim(	xfs_mount_t		*mp,	xfs_dqid_t		id,	uint			type,	fs_disk_quota_t		*newlim){	struct xfs_quotainfo	*q = mp->m_quotainfo;	xfs_disk_dquot_t	*ddq;	xfs_dquot_t		*dqp;	xfs_trans_t		*tp;	int			error;	xfs_qcnt_t		hard, soft;	if (newlim->d_fieldmask & ~XFS_DQ_MASK)		return EINVAL;	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)		return 0;	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,				      0, 0, XFS_DEFAULT_LOG_COUNT))) {		xfs_trans_cancel(tp, 0);		return (error);	}	/*	 * We don't want to race with a quotaoff so take the quotaoff lock.	 * (We don't hold an inode lock, so there's nothing else to stop	 * a quotaoff from happening). (XXXThis doesn't currently happen	 * because we take the vfslock before calling xfs_qm_sysent).	 */	mutex_lock(&q->qi_quotaofflock);	/*	 * Get the dquot (locked), and join it to the transaction.	 * Allocate the dquot if this doesn't exist.	 */	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {		xfs_trans_cancel(tp, XFS_TRANS_ABORT);		ASSERT(error != ENOENT);		goto out_unlock;	}	xfs_trans_dqjoin(tp, dqp);	ddq = &dqp->q_core;	/*	 * Make sure that hardlimits are >= soft limits before changing.	 */	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :			be64_to_cpu(ddq->d_blk_hardlimit);	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :			be64_to_cpu(ddq->d_blk_softlimit);	if (hard == 0 || hard >= soft) {		ddq->d_blk_hardlimit = cpu_to_be64(hard);		ddq->d_blk_softlimit = cpu_to_be64(soft);		if (id == 0) {			q->qi_bhardlimit = hard;			q->qi_bsoftlimit = soft;		}	} else {		xfs_debug(mp, "blkhard %Ld < blksoft %Ld/n", hard, soft);	}	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :			be64_to_cpu(ddq->d_rtb_hardlimit);	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :			be64_to_cpu(ddq->d_rtb_softlimit);	if (hard == 0 || hard >= soft) {		ddq->d_rtb_hardlimit = cpu_to_be64(hard);		ddq->d_rtb_softlimit = cpu_to_be64(soft);		if (id == 0) {			q->qi_rtbhardlimit = hard;			q->qi_rtbsoftlimit = soft;		}	} else {		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld/n", hard, soft);	}	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?		(xfs_qcnt_t) newlim->d_ino_hardlimit :			be64_to_cpu(ddq->d_ino_hardlimit);	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?		(xfs_qcnt_t) newlim->d_ino_softlimit :			be64_to_cpu(ddq->d_ino_softlimit);	if (hard == 0 || hard >= soft) {		ddq->d_ino_hardlimit = cpu_to_be64(hard);		ddq->d_ino_softlimit = cpu_to_be64(soft);		if (id == 0) {			q->qi_ihardlimit = hard;			q->qi_isoftlimit = soft;		}	} else {		xfs_debug(mp, "ihard %Ld < isoft %Ld/n", hard, soft);//.........这里部分代码省略.........
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:101,


示例11: xfs_setattr_size

//.........这里部分代码省略.........			return error;	}	/* Now wait for all direct I/O to complete. */	inode_dio_wait(inode);	/*	 * We've already locked out new page faults, so now we can safely remove	 * pages from the page cache knowing they won't get refaulted until we	 * drop the XFS_MMAP_EXCL lock after the extent manipulations are	 * complete. The truncate_setsize() call also cleans partial EOF page	 * PTEs on extending truncates and hence ensures sub-page block size	 * filesystems are correctly handled, too.	 *	 * We have to do all the page cache truncate work outside the	 * transaction context as the "lock" order is page lock->log space	 * reservation as defined by extent allocation in the writeback path.	 * Hence a truncate can fail with ENOMEM from xfs_trans_reserve(), but	 * having already truncated the in-memory version of the file (i.e. made	 * user visible changes). There's not much we can do about this, except	 * to hope that the caller sees ENOMEM and retries the truncate	 * operation.	 */	if (IS_DAX(inode))		error = dax_truncate_page(inode, newsize, xfs_get_blocks_direct);	else		error = block_truncate_page(inode->i_mapping, newsize,					    xfs_get_blocks);	if (error)		return error;	truncate_setsize(inode, newsize);	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);	if (error)		goto out_trans_cancel;	lock_flags |= XFS_ILOCK_EXCL;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, 0);	/*	 * Only change the c/mtime if we are changing the size or we are	 * explicitly asked to change it.  This handles the semantic difference	 * between truncate() and ftruncate() as implemented in the VFS.	 *	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a	 * special case where we need to update the times despite not having	 * these flags set.  For all other operations the VFS set these flags	 * explicitly if it wants a timestamp update.	 */	if (newsize != oldsize &&	    !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {		iattr->ia_ctime = iattr->ia_mtime =			current_fs_time(inode->i_sb);		iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;	}	/*	 * The first thing we do is set the size to new_size permanently on	 * disk.  This way we don't have to worry about anyone ever being able	 * to look at the data being freed even in the face of a crash.	 * What we're getting around here is the case where we free a block, it	 * is allocated to another file, it is written to, and then we crash.	 * If the new data gets written to the file but the log buffers	 * containing the free and reallocation don't, then we'd end up with
开发者ID:Chong-Li,项目名称:cse522,代码行数:67,


示例12: xfs_iomap_write_unwritten

intxfs_iomap_write_unwritten(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb;	xfs_filblks_t	count_fsb;	xfs_filblks_t	numblks_fsb;	xfs_fsblock_t	firstfsb;	int		nimaps;	xfs_trans_t	*tp;	xfs_bmbt_irec_t imap;	xfs_bmap_free_t free_list;	uint		resblks;	int		committed;	int		error;	xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,				&ip->i_iocore, offset, count);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;	do {		/*		 * set up a transaction to convert the range of extents		 * from unwritten to real. Do allocations in a loop until		 * we have covered the range passed in.		 */		tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);		error = xfs_trans_reserve(tp, resblks,				XFS_WRITE_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES,				XFS_WRITE_LOG_COUNT);		if (error) {			xfs_trans_cancel(tp, 0);			return XFS_ERROR(error);		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);		xfs_trans_ihold(tp, ip);		/*		 * Modify the unwritten extent state of the buffer.		 */		XFS_BMAP_INIT(&free_list, &firstfsb);		nimaps = 1;		error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,				  XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,				  1, &imap, &nimaps, &free_list, NULL);		if (error)			goto error_on_bmapi_transaction;		error = xfs_bmap_finish(&(tp), &(free_list),				firstfsb, &committed);		if (error)			goto error_on_bmapi_transaction;		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);		xfs_iunlock(ip, XFS_ILOCK_EXCL);		if (error)			return XFS_ERROR(error);		if (unlikely(!imap.br_startblock &&			     !(io->io_flags & XFS_IOCORE_RT)))			return xfs_cmn_err_fsblock_zero(ip, &imap);		if ((numblks_fsb = imap.br_blockcount) == 0) {			/*			 * The numblks_fsb value should always get			 * smaller, otherwise the loop is stuck.			 */			ASSERT(imap.br_blockcount);			break;		}		offset_fsb += numblks_fsb;		count_fsb -= numblks_fsb;	} while (count_fsb > 0);	return 0;error_on_bmapi_transaction:	xfs_bmap_cancel(&free_list);	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));	xfs_iunlock(ip, XFS_ILOCK_EXCL);	return XFS_ERROR(error);}
开发者ID:xiandaicxsj,项目名称:copyKvm,代码行数:95,


示例13: xfs_iomap_write_allocate

/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. */intxfs_iomap_write_allocate(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count,	xfs_bmbt_irec_t *map,	int		*retmap){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb, last_block;	xfs_fileoff_t	end_fsb, map_start_fsb;	xfs_fsblock_t	first_block;	xfs_bmap_free_t	free_list;	xfs_filblks_t	count_fsb;	xfs_bmbt_irec_t	imap[XFS_STRAT_WRITE_IMAPS];	xfs_trans_t	*tp;	int		i, nimaps, committed;	int		error = 0;	int		nres;	*retmap = 0;	/*	 * Make sure that the dquots are there.	 */	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))		return XFS_ERROR(error);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = map->br_blockcount;	map_start_fsb = map->br_startoff;	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));	while (count_fsb != 0) {		/*		 * Set up a transaction with which to allocate the		 * backing store for the file.  Do allocations in a		 * loop until we get some space in the range we are		 * interested in.  The other space that might be allocated		 * is in the delayed allocation extent on which we sit		 * but before our buffer starts.		 */		nimaps = 0;		while (nimaps == 0) {			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);			error = xfs_trans_reserve(tp, nres,					XFS_WRITE_LOG_RES(mp),					0, XFS_TRANS_PERM_LOG_RES,					XFS_WRITE_LOG_COUNT);			if (error == ENOSPC) {				error = xfs_trans_reserve(tp, 0,						XFS_WRITE_LOG_RES(mp),						0,						XFS_TRANS_PERM_LOG_RES,						XFS_WRITE_LOG_COUNT);			}			if (error) {				xfs_trans_cancel(tp, 0);				return XFS_ERROR(error);			}			xfs_ilock(ip, XFS_ILOCK_EXCL);			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);			xfs_trans_ihold(tp, ip);			XFS_BMAP_INIT(&free_list, &first_block);			nimaps = XFS_STRAT_WRITE_IMAPS;			/*			 * Ensure we don't go beyond eof - it is possible			 * the extents changed since we did the read call,			 * we dropped the ilock in the interim.			 */			end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);			xfs_bmap_last_offset(NULL, ip, &last_block,				XFS_DATA_FORK);			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);			if ((map_start_fsb + count_fsb) > last_block) {				count_fsb = last_block - map_start_fsb;				if (count_fsb == 0) {					error = EAGAIN;					goto trans_cancel;				}			}			/* Go get the actual blocks */			error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,					XFS_BMAPI_WRITE, &first_block, 1,					imap, &nimaps, &free_list, NULL);//.........这里部分代码省略.........
开发者ID:xiandaicxsj,项目名称:copyKvm,代码行数:101,


示例14: xfs_qm_dqread

/* * Read in the ondisk dquot using dqtobp() then copy it to an incore version, * and release the buffer immediately. * * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. */intxfs_qm_dqread(	struct xfs_mount	*mp,	xfs_dqid_t		id,	uint			type,	uint			flags,	struct xfs_dquot	**O_dqpp){	struct xfs_dquot	*dqp;	struct xfs_disk_dquot	*ddqp;	struct xfs_buf		*bp;	struct xfs_trans	*tp = NULL;	int			error;	int			cancelflags = 0;	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);	dqp->dq_flags = type;	dqp->q_core.d_id = cpu_to_be32(id);	dqp->q_mount = mp;	INIT_LIST_HEAD(&dqp->q_lru);	mutex_init(&dqp->q_qlock);	init_waitqueue_head(&dqp->q_pinwait);	/*	 * Because we want to use a counting completion, complete	 * the flush completion once to allow a single access to	 * the flush completion without blocking.	 */	init_completion(&dqp->q_flush);	complete(&dqp->q_flush);	/*	 * Make sure group quotas have a different lock class than user	 * quotas.	 */	switch (type) {	case XFS_DQ_USER:		/* uses the default lock class */		break;	case XFS_DQ_GROUP:		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);		break;	case XFS_DQ_PROJ:		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);		break;	default:		ASSERT(0);		break;	}	XFS_STATS_INC(xs_qm_dquot);	trace_xfs_dqread(dqp);	if (flags & XFS_QMOPT_DQALLOC) {		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);		if (error)			goto error1;		cancelflags = XFS_TRANS_RELEASE_LOG_RES;	}	/*	 * get a pointer to the on-disk dquot and the buffer containing it	 * dqp already knows its own type (GROUP/USER).	 */	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);	if (error) {		/*		 * This can happen if quotas got turned off (ESRCH),		 * or if the dquot didn't exist on disk and we ask to		 * allocate (ENOENT).		 */		trace_xfs_dqread_fail(dqp);		cancelflags |= XFS_TRANS_ABORT;		goto error1;	}	/* copy everything from disk dquot to the incore dquot */	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));	xfs_qm_dquot_logitem_init(dqp);	/*	 * Reservation counters are defined as reservation plus current usage	 * to avoid having to add every time.	 */	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);	/* initialize the dquot speculative prealloc thresholds *///.........这里部分代码省略.........
开发者ID:MaxChina,项目名称:linux,代码行数:101,


示例15: xfs_iomap_write_unwritten

intxfs_iomap_write_unwritten(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count){	xfs_mount_t	*mp = ip->i_mount;	xfs_fileoff_t	offset_fsb;	xfs_filblks_t	count_fsb;	xfs_filblks_t	numblks_fsb;	xfs_fsblock_t	firstfsb;	int		nimaps;	xfs_trans_t	*tp;	xfs_bmbt_irec_t imap;	xfs_bmap_free_t free_list;	uint		resblks;	int		committed;	int		error;	xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);	/*	 * Reserve enough blocks in this transaction for two complete extent	 * btree splits.  We may be converting the middle part of an unwritten	 * extent and in this case we will insert two new extents in the btree	 * each of which could cause a full split.	 *	 * This reservation amount will be used in the first call to	 * xfs_bmbt_split() to select an AG with enough space to satisfy the	 * rest of the operation.	 */	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;	do {		/*		 * set up a transaction to convert the range of extents		 * from unwritten to real. Do allocations in a loop until		 * we have covered the range passed in.		 *		 * Note that we open code the transaction allocation here		 * to pass KM_NOFS--we can't risk to recursing back into		 * the filesystem here as we might be asked to write out		 * the same inode that we complete here and might deadlock		 * on the iolock.		 */		xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);		tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);		tp->t_flags |= XFS_TRANS_RESERVE;		error = xfs_trans_reserve(tp, resblks,				XFS_WRITE_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES,				XFS_WRITE_LOG_COUNT);		if (error) {			xfs_trans_cancel(tp, 0);			return XFS_ERROR(error);		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);		xfs_trans_ihold(tp, ip);		/*		 * Modify the unwritten extent state of the buffer.		 */		xfs_bmap_init(&free_list, &firstfsb);		nimaps = 1;		error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,				  XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,				  1, &imap, &nimaps, &free_list, NULL);		if (error)			goto error_on_bmapi_transaction;		error = xfs_bmap_finish(&(tp), &(free_list), &committed);		if (error)			goto error_on_bmapi_transaction;		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);		xfs_iunlock(ip, XFS_ILOCK_EXCL);		if (error)			return XFS_ERROR(error);		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))			return xfs_cmn_err_fsblock_zero(ip, &imap);		if ((numblks_fsb = imap.br_blockcount) == 0) {			/*			 * The numblks_fsb value should always get			 * smaller, otherwise the loop is stuck.			 */			ASSERT(imap.br_blockcount);			break;		}		offset_fsb += numblks_fsb;		count_fsb -= numblks_fsb;	} while (count_fsb > 0);//.........这里部分代码省略.........
开发者ID:ArthySundaram,项目名称:firstrepo,代码行数:101,


示例16: xfs_rename

/* * xfs_rename */intxfs_rename(	xfs_inode_t	*src_dp,	struct xfs_name	*src_name,	xfs_inode_t	*src_ip,	xfs_inode_t	*target_dp,	struct xfs_name	*target_name,	xfs_inode_t	*target_ip){	xfs_trans_t	*tp = NULL;	xfs_mount_t	*mp = src_dp->i_mount;	int		new_parent;		/* moving to a new dir */	int		src_is_directory;	/* src_name is a directory */	int		error;	xfs_bmap_free_t free_list;	xfs_fsblock_t   first_block;	int		cancel_flags;	int		committed;	xfs_inode_t	*inodes[4];	int		spaceres;	int		num_inodes;	xfs_itrace_entry(src_dp);	xfs_itrace_entry(target_dp);	if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) ||	    DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) {		error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,					src_dp, DM_RIGHT_NULL,					target_dp, DM_RIGHT_NULL,					src_name->name, target_name->name,					0, 0, 0);		if (error)			return error;	}	/* Return through std_return after this point. */	new_parent = (src_dp != target_dp);	src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);	if (src_is_directory) {		/*		 * Check for link count overflow on target_dp		 */		if (target_ip == NULL && new_parent &&		    target_dp->i_d.di_nlink >= XFS_MAXLINK) {			error = XFS_ERROR(EMLINK);			goto std_return;		}	}	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,				inodes, &num_inodes);	xfs_bmap_init(&free_list, &first_block);	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);	error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,			XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);	if (error == ENOSPC) {		spaceres = 0;		error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);	}	if (error) {		xfs_trans_cancel(tp, 0);		goto std_return;	}	/*	 * Attach the dquots to the inodes	 */	if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {		xfs_trans_cancel(tp, cancel_flags);		goto std_return;	}	/*	 * Lock all the participating inodes. Depending upon whether	 * the target_name exists in the target directory, and	 * whether the target directory is the same as the source	 * directory, we can lock from 2 to 4 inodes.	 */	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);	/*	 * Join all the inodes to the transaction. From this point on,	 * we can rely on either trans_commit or trans_cancel to unlock	 * them.  Note that we need to add a vnode reference to the	 * directories since trans_commit & trans_cancel will decrement	 * them when they unlock the inodes.  Also, we need to be careful	 * not to add an inode to the transaction more than once.	 */	IHOLD(src_dp);	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);//.........这里部分代码省略.........
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:101,


示例17: xfs_qm_qino_alloc

/* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */STATIC intxfs_qm_qino_alloc(	xfs_mount_t	*mp,	xfs_inode_t	**ip,	uint		flags){	xfs_trans_t	*tp;	int		error;	int		committed;	bool		need_alloc = true;	*ip = NULL;	/*	 * With superblock that doesn't have separate pquotino, we	 * share an inode between gquota and pquota. If the on-disk	 * superblock has GQUOTA and the filesystem is now mounted	 * with PQUOTA, just use sb_gquotino for sb_pquotino and	 * vice-versa.	 */	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {		xfs_ino_t ino = NULLFSINO;		if ((flags & XFS_QMOPT_PQUOTA) &&			     (mp->m_sb.sb_gquotino != NULLFSINO)) {			ino = mp->m_sb.sb_gquotino;			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);		} else if ((flags & XFS_QMOPT_GQUOTA) &&			     (mp->m_sb.sb_pquotino != NULLFSINO)) {			ino = mp->m_sb.sb_pquotino;			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);		}		if (ino != NULLFSINO) {			error = xfs_iget(mp, NULL, ino, 0, 0, ip);			if (error)				return error;			mp->m_sb.sb_gquotino = NULLFSINO;			mp->m_sb.sb_pquotino = NULLFSINO;			need_alloc = false;		}	}	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);	if (error) {		xfs_trans_cancel(tp, 0);		return error;	}	if (need_alloc) {		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,								&committed);		if (error) {			xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |					 XFS_TRANS_ABORT);			return error;		}	}	/*	 * Make the changes in the superblock, and log those too.	 * sbfields arg may contain fields other than *QUOTINO;	 * VERSIONNUM for example.	 */	spin_lock(&mp->m_sb_lock);	if (flags & XFS_QMOPT_SBVERSION) {		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));		xfs_sb_version_addquota(&mp->m_sb);		mp->m_sb.sb_uquotino = NULLFSINO;		mp->m_sb.sb_gquotino = NULLFSINO;		mp->m_sb.sb_pquotino = NULLFSINO;		/* qflags will get updated fully _after_ quotacheck */		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;	}	if (flags & XFS_QMOPT_UQUOTA)		mp->m_sb.sb_uquotino = (*ip)->i_ino;	else if (flags & XFS_QMOPT_GQUOTA)		mp->m_sb.sb_gquotino = (*ip)->i_ino;	else		mp->m_sb.sb_pquotino = (*ip)->i_ino;	spin_unlock(&mp->m_sb_lock);	xfs_log_sb(tp);	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);	if (error) {		ASSERT(XFS_FORCED_SHUTDOWN(mp));		xfs_alert(mp, "%s failed (error %d)!", __func__, error);	}	if (need_alloc)		xfs_finish_inode_setup(*ip);	return error;}
开发者ID:hejin,项目名称:kernel-3.10.0-327.13.1.el7.x86_64-fs,代码行数:99,


示例18: xfs_setattr_size

//.........这里部分代码省略.........	xfs_iunlock(ip, XFS_ILOCK_EXCL);	lock_flags &= ~XFS_ILOCK_EXCL;	/*	 * We are going to log the inode size change in this transaction so	 * any previous writes that are beyond the on disk EOF and the new	 * EOF that have not been written out need to be written here.  If we	 * do not write the data out, we expose ourselves to the null files	 * problem.	 *	 * Only flush from the on disk size to the smaller of the in memory	 * file size or the new size as that's the range we really care about	 * here and prevents waiting for other data not within the range we	 * care about here.	 */	if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {		error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size,					XBF_ASYNC, FI_NONE);		if (error)			goto out_unlock;	}	/*	 * Wait for all I/O to complete.	 */	xfs_ioend_wait(ip);	error = -block_truncate_page(inode->i_mapping, iattr->ia_size,				     xfs_get_blocks);	if (error)		goto out_unlock;	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,				 XFS_TRANS_PERM_LOG_RES,				 XFS_ITRUNCATE_LOG_COUNT);	if (error)		goto out_trans_cancel;	truncate_setsize(inode, iattr->ia_size);	commit_flags = XFS_TRANS_RELEASE_LOG_RES;	lock_flags |= XFS_ILOCK_EXCL;	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip);	/*	 * Only change the c/mtime if we are changing the size or we are	 * explicitly asked to change it.  This handles the semantic difference	 * between truncate() and ftruncate() as implemented in the VFS.	 *	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a	 * special case where we need to update the times despite not having	 * these flags set.  For all other operations the VFS set these flags	 * explicitly if it wants a timestamp update.	 */	if (iattr->ia_size != ip->i_size &&	    (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {		iattr->ia_ctime = iattr->ia_mtime =			current_fs_time(inode->i_sb);		mask |= ATTR_CTIME | ATTR_MTIME;	}	if (iattr->ia_size > ip->i_size) {
开发者ID:AndroidDeveloperAlliance,项目名称:ZenKernel_Grouper,代码行数:67,


示例19: xfs_attr_inactive

intxfs_attr_inactive(xfs_inode_t *dp){	xfs_trans_t *trans;	xfs_mount_t *mp;	int error;	mp = dp->i_mount;	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));	xfs_ilock(dp, XFS_ILOCK_SHARED);	if (!xfs_inode_hasattr(dp) ||	    dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {		xfs_iunlock(dp, XFS_ILOCK_SHARED);		return 0;	}	xfs_iunlock(dp, XFS_ILOCK_SHARED);	/*	 * Start our first transaction of the day.	 *	 * All future transactions during this code must be "chained" off	 * this one via the trans_dup() call.  All transactions will contain	 * the inode, and the inode will always be marked with trans_ihold().	 * Since the inode will be locked in all transactions, we must log	 * the inode in every transaction to let it float upward through	 * the log.	 */	trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);	error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);	if (error) {		xfs_trans_cancel(trans, 0);		return error;	}	xfs_ilock(dp, XFS_ILOCK_EXCL);	/*	 * No need to make quota reservations here. We expect to release some	 * blocks, not allocate, in the common case.	 */	xfs_trans_ijoin(trans, dp, 0);	/*	 * Decide on what work routines to call based on the inode size.	 */	if (!xfs_inode_hasattr(dp) ||	    dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {		error = 0;		goto out;	}	error = xfs_attr3_root_inactive(&trans, dp);	if (error)		goto out;	error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);	if (error)		goto out;	error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);	xfs_iunlock(dp, XFS_ILOCK_EXCL);	return error;out:	xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);	xfs_iunlock(dp, XFS_ILOCK_EXCL);	return error;}
开发者ID:19Dan01,项目名称:linux,代码行数:68,


示例20: xfs_swap_extents

//.........这里部分代码省略.........	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {		error = XFS_ERROR(EBUSY);		goto out_unlock;	}	/* We need to fail if the file is memory mapped.  Once we have tossed	 * all existing pages, the page fault will have no option	 * but to go to the filesystem for pages. By making the page fault call	 * vop_read (or write in the case of autogrow) they block on the iolock	 * until we have switched the extents.	 */	if (VN_MAPPED(VFS_I(ip))) {		error = XFS_ERROR(EBUSY);		goto out_unlock;	}	xfs_iunlock(ip, XFS_ILOCK_EXCL);	xfs_iunlock(tip, XFS_ILOCK_EXCL);	/*	 * There is a race condition here since we gave up the	 * ilock.  However, the data fork will not change since	 * we have the iolock (locked for truncation too) so we	 * are safe.  We don't really care if non-io related	 * fields change.	 */	xfs_tosspages(ip, 0, -1, FI_REMAPF);	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);	if ((error = xfs_trans_reserve(tp, 0,				     XFS_ICHANGE_LOG_RES(mp), 0,				     0, 0))) {		xfs_iunlock(ip,  XFS_IOLOCK_EXCL);		xfs_iunlock(tip, XFS_IOLOCK_EXCL);		xfs_trans_cancel(tp, 0);		goto out;	}	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);	/*	 * Count the number of extended attribute blocks	 */	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);		if (error)			goto out_trans_cancel;	}	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,			&taforkblks);		if (error)			goto out_trans_cancel;	}	/*	 * Swap the data forks of the inodes	 */	ifp = &ip->i_df;	tifp = &tip->i_df;	*tempifp = *ifp;	/* struct copy */
开发者ID:dkati,项目名称:Hulk-Kernel-V2,代码行数:67,


示例21: xfs_setattr_nonsize

intxfs_setattr_nonsize(	struct xfs_inode	*ip,	struct iattr		*iattr,	int			flags){	xfs_mount_t		*mp = ip->i_mount;	struct inode		*inode = VFS_I(ip);	int			mask = iattr->ia_valid;	xfs_trans_t		*tp;	int			error;	uid_t			uid = 0, iuid = 0;	gid_t			gid = 0, igid = 0;	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;	trace_xfs_setattr(ip);	if (mp->m_flags & XFS_MOUNT_RDONLY)		return XFS_ERROR(EROFS);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	error = -inode_change_ok(inode, iattr);	if (error)		return XFS_ERROR(error);	ASSERT((mask & ATTR_SIZE) == 0);	if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {		uint	qflags = 0;		if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {			uid = iattr->ia_uid;			qflags |= XFS_QMOPT_UQUOTA;		} else {			uid = ip->i_d.di_uid;		}		if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {			gid = iattr->ia_gid;			qflags |= XFS_QMOPT_GQUOTA;		}  else {			gid = ip->i_d.di_gid;		}		ASSERT(udqp == NULL);		ASSERT(gdqp == NULL);		error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),					 qflags, &udqp, &gdqp);		if (error)			return error;	}	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);	if (error)		goto out_dqrele;	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (mask & (ATTR_UID|ATTR_GID)) {		iuid = ip->i_d.di_uid;		igid = ip->i_d.di_gid;		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;		if (XFS_IS_QUOTA_RUNNING(mp) &&		    ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||		     (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {			ASSERT(tp);			error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,						capable(CAP_FOWNER) ?						XFS_QMOPT_FORCE_RES : 0);			if (error)					goto out_trans_cancel;		}	}	xfs_trans_ijoin(tp, ip, 0);	if (mask & (ATTR_UID|ATTR_GID)) {		if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&		    !capable(CAP_FSETID))			ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);		if (iuid != uid) {			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {				ASSERT(mask & ATTR_UID);				ASSERT(udqp);				olddquot1 = xfs_qm_vop_chown(tp, ip,							&ip->i_udquot, udqp);			}			ip->i_d.di_uid = uid;			inode->i_uid = uid;		}		if (igid != gid) {			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {				ASSERT(!XFS_IS_PQUOTA_ON(mp));				ASSERT(mask & ATTR_GID);//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,


示例22: xfs_qm_qino_alloc

/* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */STATIC intxfs_qm_qino_alloc(	xfs_mount_t	*mp,	xfs_inode_t	**ip,	__int64_t	sbfields,	uint		flags){	xfs_trans_t	*tp;	int		error;	int		committed;	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);	if ((error = xfs_trans_reserve(tp,				      XFS_QM_QINOCREATE_SPACE_RES(mp),				      XFS_CREATE_LOG_RES(mp), 0,				      XFS_TRANS_PERM_LOG_RES,				      XFS_CREATE_LOG_COUNT))) {		xfs_trans_cancel(tp, 0);		return error;	}	error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);	if (error) {		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |				 XFS_TRANS_ABORT);		return error;	}	/*	 * Make the changes in the superblock, and log those too.	 * sbfields arg may contain fields other than *QUOTINO;	 * VERSIONNUM for example.	 */	spin_lock(&mp->m_sb_lock);	if (flags & XFS_QMOPT_SBVERSION) {		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |				   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==		       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |			XFS_SB_GQUOTINO | XFS_SB_QFLAGS));		xfs_sb_version_addquota(&mp->m_sb);		mp->m_sb.sb_uquotino = NULLFSINO;		mp->m_sb.sb_gquotino = NULLFSINO;		/* qflags will get updated _after_ quotacheck */		mp->m_sb.sb_qflags = 0;	}	if (flags & XFS_QMOPT_UQUOTA)		mp->m_sb.sb_uquotino = (*ip)->i_ino;	else		mp->m_sb.sb_gquotino = (*ip)->i_ino;	spin_unlock(&mp->m_sb_lock);	xfs_mod_sb(tp, sbfields);	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {		xfs_alert(mp, "%s failed (error %d)!", __func__, error);		return error;	}	return 0;}
开发者ID:AD5GB,项目名称:kernel_n5_3.10-experimental,代码行数:65,


示例23: xfs_setattr_size

intxfs_setattr_size(	struct xfs_inode	*ip,	struct iattr		*iattr,	int			flags){	struct xfs_mount	*mp = ip->i_mount;	struct inode		*inode = VFS_I(ip);	int			mask = iattr->ia_valid;	xfs_off_t		oldsize, newsize;	struct xfs_trans	*tp;	int			error;	uint			lock_flags;	uint			commit_flags = 0;	trace_xfs_setattr(ip);	if (mp->m_flags & XFS_MOUNT_RDONLY)		return XFS_ERROR(EROFS);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	error = -inode_change_ok(inode, iattr);	if (error)		return XFS_ERROR(error);	ASSERT(S_ISREG(ip->i_d.di_mode));	ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|			ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|			ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);	lock_flags = XFS_ILOCK_EXCL;	if (!(flags & XFS_ATTR_NOLOCK))		lock_flags |= XFS_IOLOCK_EXCL;	xfs_ilock(ip, lock_flags);	oldsize = inode->i_size;	newsize = iattr->ia_size;	if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {		if (!(mask & (ATTR_CTIME|ATTR_MTIME)))			goto out_unlock;		xfs_iunlock(ip, lock_flags);		iattr->ia_valid &= ~ATTR_SIZE;		return xfs_setattr_nonsize(ip, iattr, 0);	}	error = xfs_qm_dqattach_locked(ip, 0);	if (error)		goto out_unlock;	if (newsize > oldsize) {		error = xfs_zero_eof(ip, newsize, oldsize);		if (error)			goto out_unlock;	}	xfs_iunlock(ip, XFS_ILOCK_EXCL);	lock_flags &= ~XFS_ILOCK_EXCL;	/*	 * We are going to log the inode size change in this transaction so	 * any previous writes that are beyond the on disk EOF and the new	 * EOF that have not been written out need to be written here.  If we	 * do not write the data out, we expose ourselves to the null files	 * problem.	 *	 * Only flush from the on disk size to the smaller of the in memory	 * file size or the new size as that's the range we really care about	 * here and prevents waiting for other data not within the range we	 * care about here.	 */	if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {		error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0,					FI_NONE);		if (error)			goto out_unlock;	}	inode_dio_wait(inode);	error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);	if (error)		goto out_unlock;	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,				 XFS_TRANS_PERM_LOG_RES,				 XFS_ITRUNCATE_LOG_COUNT);	if (error)		goto out_trans_cancel;	truncate_setsize(inode, newsize);	commit_flags = XFS_TRANS_RELEASE_LOG_RES;	lock_flags |= XFS_ILOCK_EXCL;	xfs_ilock(ip, XFS_ILOCK_EXCL);//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,


示例24: xfs_qm_scall_setqlim

/* * Adjust quota limits, and start/stop timers accordingly. */STATIC intxfs_qm_scall_setqlim(	xfs_mount_t		*mp,	xfs_dqid_t		id,	uint			type,	fs_disk_quota_t		*newlim){	xfs_disk_dquot_t	*ddq;	xfs_dquot_t		*dqp;	xfs_trans_t		*tp;	int			error;	xfs_qcnt_t		hard, soft;	if (!capable(CAP_SYS_ADMIN))		return XFS_ERROR(EPERM);	if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0)		return (0);	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,				      0, 0, XFS_DEFAULT_LOG_COUNT))) {		xfs_trans_cancel(tp, 0);		return (error);	}	/*	 * We don't want to race with a quotaoff so take the quotaoff lock.	 * (We don't hold an inode lock, so there's nothing else to stop	 * a quotaoff from happening). (XXXThis doesn't currently happen	 * because we take the vfslock before calling xfs_qm_sysent).	 */	mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);	/*	 * Get the dquot (locked), and join it to the transaction.	 * Allocate the dquot if this doesn't exist.	 */	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {		xfs_trans_cancel(tp, XFS_TRANS_ABORT);		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));		ASSERT(error != ENOENT);		return (error);	}	xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");	xfs_trans_dqjoin(tp, dqp);	ddq = &dqp->q_core;	/*	 * Make sure that hardlimits are >= soft limits before changing.	 */	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :			INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT);	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :			INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT);	if (hard == 0 || hard >= soft) {		INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard);		INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft);		if (id == 0) {			mp->m_quotainfo->qi_bhardlimit = hard;			mp->m_quotainfo->qi_bsoftlimit = soft;		}	} else {		qdprintk("blkhard %Ld < blksoft %Ld/n", hard, soft);	}	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :			INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT);	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :			INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT);	if (hard == 0 || hard >= soft) {		INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard);		INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft);		if (id == 0) {			mp->m_quotainfo->qi_rtbhardlimit = hard;			mp->m_quotainfo->qi_rtbsoftlimit = soft;		}	} else {		qdprintk("rtbhard %Ld < rtbsoft %Ld/n", hard, soft);	}	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?		(xfs_qcnt_t) newlim->d_ino_hardlimit :			INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT);	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?		(xfs_qcnt_t) newlim->d_ino_softlimit :			INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT);	if (hard == 0 || hard >= soft) {		INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard);		INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft);		if (id == 0) {			mp->m_quotainfo->qi_ihardlimit = hard;			mp->m_quotainfo->qi_isoftlimit = soft;		}//.........这里部分代码省略.........
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:101,


示例25: xfs_attr_remove

/* * Generic handler routine to remove a name from an attribute list. * Transitions attribute list from Btree to shortform as necessary. */intxfs_attr_remove(	struct xfs_inode	*dp,	const unsigned char	*name,	int			flags){	struct xfs_mount	*mp = dp->i_mount;	struct xfs_da_args	args;	struct xfs_bmap_free	flist;	xfs_fsblock_t		firstblock;	int			error;	XFS_STATS_INC(xs_attr_remove);	if (XFS_FORCED_SHUTDOWN(dp->i_mount))		return -EIO;	if (!xfs_inode_hasattr(dp))		return -ENOATTR;	error = xfs_attr_args_init(&args, dp, name, flags);	if (error)		return error;	args.firstblock = &firstblock;	args.flist = &flist;	/*	 * we have no control over the attribute names that userspace passes us	 * to remove, so we have to allow the name lookup prior to attribute	 * removal to fail.	 */	args.op_flags = XFS_DA_OP_OKNOENT;	error = xfs_qm_dqattach(dp, 0);	if (error)		return error;	/*	 * Start our first transaction of the day.	 *	 * All future transactions during this code must be "chained" off	 * this one via the trans_dup() call.  All transactions will contain	 * the inode, and the inode will always be marked with trans_ihold().	 * Since the inode will be locked in all transactions, we must log	 * the inode in every transaction to let it float upward through	 * the log.	 */	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);	/*	 * Root fork attributes can use reserved data blocks for this	 * operation if necessary	 */	if (flags & ATTR_ROOT)		args.trans->t_flags |= XFS_TRANS_RESERVE;	error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,				  XFS_ATTRRM_SPACE_RES(mp), 0);	if (error) {		xfs_trans_cancel(args.trans, 0);		return error;	}	xfs_ilock(dp, XFS_ILOCK_EXCL);	/*	 * No need to make quota reservations here. We expect to release some	 * blocks not allocate in the common case.	 */	xfs_trans_ijoin(args.trans, dp, 0);	if (!xfs_inode_hasattr(dp)) {		error = -ENOATTR;	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);		error = xfs_attr_shortform_remove(&args);	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {		error = xfs_attr_leaf_removename(&args);	} else {		error = xfs_attr_node_removename(&args);	}	if (error)		goto out;	/*	 * If this is a synchronous mount, make sure that the	 * transaction goes to disk before returning to the user.	 */	if (mp->m_flags & XFS_MOUNT_WSYNC)		xfs_trans_set_sync(args.trans);	if ((flags & ATTR_KERNOTIME) == 0)		xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);//.........这里部分代码省略.........
开发者ID:3null,项目名称:linux,代码行数:101,


示例26: xfs_ioc_space

//.........这里部分代码省略.........	 */	switch (cmd) {	case XFS_IOC_ZERO_RANGE:	case XFS_IOC_RESVSP:	case XFS_IOC_RESVSP64:	case XFS_IOC_UNRESVSP:	case XFS_IOC_UNRESVSP64:		if (bf->l_len <= 0) {			error = XFS_ERROR(EINVAL);			goto out_unlock;		}		break;	default:		bf->l_len = 0;		break;	}	if (bf->l_start < 0 ||	    bf->l_start > mp->m_super->s_maxbytes ||	    bf->l_start + bf->l_len < 0 ||	    bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {		error = XFS_ERROR(EINVAL);		goto out_unlock;	}	switch (cmd) {	case XFS_IOC_ZERO_RANGE:		error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);		if (!error)			setprealloc = true;		break;	case XFS_IOC_RESVSP:	case XFS_IOC_RESVSP64:		error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,						XFS_BMAPI_PREALLOC);		if (!error)			setprealloc = true;		break;	case XFS_IOC_UNRESVSP:	case XFS_IOC_UNRESVSP64:		error = xfs_free_file_space(ip, bf->l_start, bf->l_len);		break;	case XFS_IOC_ALLOCSP:	case XFS_IOC_ALLOCSP64:	case XFS_IOC_FREESP:	case XFS_IOC_FREESP64:		if (bf->l_start > XFS_ISIZE(ip)) {			error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),					bf->l_start - XFS_ISIZE(ip), 0);			if (error)				goto out_unlock;		}		iattr.ia_valid = ATTR_SIZE;		iattr.ia_size = bf->l_start;		error = xfs_setattr_size(ip, &iattr);		if (!error)			clrprealloc = true;		break;	default:		ASSERT(0);		error = XFS_ERROR(EINVAL);	}	if (error)		goto out_unlock;	tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);	if (error) {		xfs_trans_cancel(tp, 0);		goto out_unlock;	}	xfs_ilock(ip, XFS_ILOCK_EXCL);	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);	if (!(ioflags & IO_INVIS)) {		ip->i_d.di_mode &= ~S_ISUID;		if (ip->i_d.di_mode & S_IXGRP)			ip->i_d.di_mode &= ~S_ISGID;		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);	}	if (setprealloc)		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;	else if (clrprealloc)		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);	if (filp->f_flags & O_DSYNC)		xfs_trans_set_sync(tp);	error = xfs_trans_commit(tp, 0);out_unlock:	xfs_iunlock(ip, XFS_IOLOCK_EXCL);	mnt_drop_write_file(filp);	return -error;}
开发者ID:7799,项目名称:linux,代码行数:101,


示例27: xfs_ioctl_setattr

STATIC intxfs_ioctl_setattr(    xfs_inode_t		*ip,    struct fsxattr		*fa,    int			mask){    struct xfs_mount	*mp = ip->i_mount;    struct xfs_trans	*tp;    unsigned int		lock_flags = 0;    struct xfs_dquot	*udqp = NULL;    struct xfs_dquot	*gdqp = NULL;    struct xfs_dquot	*olddquot = NULL;    int			code;    trace_xfs_ioctl_setattr(ip);    if (mp->m_flags & XFS_MOUNT_RDONLY)        return XFS_ERROR(EROFS);    if (XFS_FORCED_SHUTDOWN(mp))        return XFS_ERROR(EIO);    /*     * Disallow 32bit project ids when projid32bit feature is not enabled.     */    if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&            !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))        return XFS_ERROR(EINVAL);    /*     * If disk quotas is on, we make sure that the dquots do exist on disk,     * before we start any other transactions. Trying to do this later     * is messy. We don't care to take a readlock to look at the ids     * in inode here, because we can't hold it across the trans_reserve.     * If the IDs do change before we take the ilock, we're covered     * because the i_*dquot fields will get updated anyway.     */    if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {        code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,                                  ip->i_d.di_gid, fa->fsx_projid,                                  XFS_QMOPT_PQUOTA, &udqp, &gdqp);        if (code)            return code;    }    /*     * For the other attributes, we acquire the inode lock and     * first do an error checking pass.     */    tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);    code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);    if (code)        goto error_return;    lock_flags = XFS_ILOCK_EXCL;    xfs_ilock(ip, lock_flags);    /*     * CAP_FOWNER overrides the following restrictions:     *     * The user ID of the calling process must be equal     * to the file owner ID, except in cases where the     * CAP_FSETID capability is applicable.     */    if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {        code = XFS_ERROR(EPERM);        goto error_return;    }    /*     * Do a quota reservation only if projid is actually going to change.     */    if (mask & FSX_PROJID) {        if (XFS_IS_QUOTA_RUNNING(mp) &&                XFS_IS_PQUOTA_ON(mp) &&                xfs_get_projid(ip) != fa->fsx_projid) {            ASSERT(tp);            code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,                                            capable(CAP_FOWNER) ?                                            XFS_QMOPT_FORCE_RES : 0);            if (code)	/* out of quota */                goto error_return;        }    }    if (mask & FSX_EXTSIZE) {        /*         * Can't change extent size if any extents are allocated.         */        if (ip->i_d.di_nextents &&                ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=                 fa->fsx_extsize)) {            code = XFS_ERROR(EINVAL);	/* EFBIG? */            goto error_return;        }        /*         * Extent size must be a multiple of the appropriate block         * size, if set at all. It must also be smaller than the         * maximum extent size supported by the filesystem.         *//.........这里部分代码省略.........
开发者ID:sandrico555,项目名称:android_kernel_jena_msm7x27a,代码行数:101,


示例28: xfs_inactive_symlink_rmt

//.........这里部分代码省略.........	 * held so the cancel won't rele it, see below.	 */	size = (int)ip->i_d.di_size;	ip->i_d.di_size = 0;	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);	/*	 * Find the block(s) so we can inval and unmap them.	 */	done = 0;	xfs_bmap_init(&free_list, &first_block);	nmaps = ARRAY_SIZE(mval);	error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),				mval, &nmaps, 0);	if (error)		goto error0;	/*	 * Invalidate the block(s). No validation is done.	 */	for (i = 0; i < nmaps; i++) {		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,			XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);		if (!bp) {			error = ENOMEM;			goto error1;		}		xfs_trans_binval(tp, bp);	}	/*	 * Unmap the dead block(s) to the free_list.	 */	if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,			&first_block, &free_list, &done)))		goto error1;	ASSERT(done);	/*	 * Commit the first transaction.  This logs the EFI and the inode.	 */	if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))		goto error1;	/*	 * The transaction must have been committed, since there were	 * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.	 * The new tp has the extent freeing and EFDs.	 */	ASSERT(committed);	/*	 * The first xact was committed, so add the inode to the new one.	 * Mark it dirty so it will be logged and moved forward in the log as	 * part of every commit.	 */	xfs_trans_ijoin(tp, ip, 0);	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);	/*	 * Get a new, empty transaction to return to our caller.	 */	ntp = xfs_trans_dup(tp);	/*	 * Commit the transaction containing extent freeing and EFDs.	 * If we get an error on the commit here or on the reserve below,	 * we need to unlock the inode since the new transaction doesn't	 * have the inode attached.	 */	error = xfs_trans_commit(tp, 0);	tp = ntp;	if (error) {		ASSERT(XFS_FORCED_SHUTDOWN(mp));		goto error0;	}	/*	 * transaction commit worked ok so we can drop the extra ticket	 * reference that we gained in xfs_trans_dup()	 */	xfs_log_ticket_put(tp->t_ticket);	/*	 * Remove the memory for extent descriptions (just bookkeeping).	 */	if (ip->i_df.if_bytes)		xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);	ASSERT(ip->i_df.if_bytes == 0);	/*	 * Put an itruncate log reservation in the new transaction	 * for our caller.	 */	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);	if (error) {		ASSERT(XFS_FORCED_SHUTDOWN(mp));		goto error0;	}	xfs_trans_ijoin(tp, ip, 0);	*tpp = tp;	return 0; error1:	xfs_bmap_cancel(&free_list); error0:	return error;}
开发者ID:aplnosun,项目名称:linux,代码行数:101,



注:本文中的xfs_trans_reserve函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xfs_warn函数代码示例
C++ xfs_trans_log_inode函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。