您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xfs_dqunlock函数代码示例

51自学网 2021-06-03 10:19:29
  C++
这篇教程C++ xfs_dqunlock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xfs_dqunlock函数的典型用法代码示例。如果您正苦于以下问题:C++ xfs_dqunlock函数的具体用法?C++ xfs_dqunlock怎么用?C++ xfs_dqunlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xfs_dqunlock函数的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: xfs_qm_dquot_logitem_pushbuf

/* * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that * the dquot is locked by us, but the flush lock isn't. So, here we are * going to see if the relevant dquot buffer is incore, waiting on DELWRI. * If so, we want to push it out to help us take this item off the AIL as soon * as possible. * * We must not be holding the AIL lock at this point. Calling incore() to * search the buffer cache can be a time consuming thing, and AIL lock is a * spinlock. */STATIC voidxfs_qm_dquot_logitem_pushbuf(	struct xfs_log_item	*lip){	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);	struct xfs_dquot	*dqp = qlip->qli_dquot;	struct xfs_buf		*bp;	ASSERT(XFS_DQ_IS_LOCKED(dqp));	/*	 * If flushlock isn't locked anymore, chances are that the	 * inode flush completed and the inode was taken off the AIL.	 * So, just get out.	 */	if (completion_done(&dqp->q_flush) ||	    !(lip->li_flags & XFS_LI_IN_AIL)) {		xfs_dqunlock(dqp);		return;	}	bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,			dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);	xfs_dqunlock(dqp);	if (!bp)		return;	if (XFS_BUF_ISDELAYWRITE(bp))		xfs_buf_delwri_promote(bp);	xfs_buf_relse(bp);}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:41,


示例2: xfs_qm_dqget_cache_lookup

/* * Look up the dquot in the in-core cache.  If found, the dquot is returned * locked and ready to go. */static struct xfs_dquot *xfs_qm_dqget_cache_lookup(	struct xfs_mount	*mp,	struct xfs_quotainfo	*qi,	struct radix_tree_root	*tree,	xfs_dqid_t		id){	struct xfs_dquot	*dqp;restart:	mutex_lock(&qi->qi_tree_lock);	dqp = radix_tree_lookup(tree, id);	if (!dqp) {		mutex_unlock(&qi->qi_tree_lock);		XFS_STATS_INC(mp, xs_qm_dqcachemisses);		return NULL;	}	xfs_dqlock(dqp);	if (dqp->dq_flags & XFS_DQ_FREEING) {		xfs_dqunlock(dqp);		mutex_unlock(&qi->qi_tree_lock);		trace_xfs_dqget_freeing(dqp);		delay(1);		goto restart;	}	dqp->q_nrefs++;	mutex_unlock(&qi->qi_tree_lock);	trace_xfs_dqget_hit(dqp);	XFS_STATS_INC(mp, xs_qm_dqcachehits);	return dqp;}
开发者ID:avagin,项目名称:linux,代码行数:38,


示例3: xfs_qm_dquot_logitem_push

/* * Given the logitem, this writes the corresponding dquot entry to disk * asynchronously. This is called with the dquot entry securely locked; * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot * at the end. */STATIC voidxfs_qm_dquot_logitem_push(	struct xfs_log_item	*lip){	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;	int			error;	ASSERT(XFS_DQ_IS_LOCKED(dqp));	ASSERT(!completion_done(&dqp->q_flush));	/*	 * Since we were able to lock the dquot's flush lock and	 * we found it on the AIL, the dquot must be dirty.  This	 * is because the dquot is removed from the AIL while still	 * holding the flush lock in xfs_dqflush_done().  Thus, if	 * we found it in the AIL and were able to obtain the flush	 * lock without sleeping, then there must not have been	 * anyone in the process of flushing the dquot.	 */	error = xfs_qm_dqflush(dqp, 0);	if (error)		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",			error, dqp);	xfs_dqunlock(dqp);}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:32,


示例4: xfs_qm_flush_one

STATIC intxfs_qm_flush_one(	struct xfs_dquot	*dqp,	void			*data){	struct list_head	*buffer_list = data;	struct xfs_buf		*bp = NULL;	int			error = 0;	xfs_dqlock(dqp);	if (dqp->dq_flags & XFS_DQ_FREEING)		goto out_unlock;	if (!XFS_DQ_IS_DIRTY(dqp))		goto out_unlock;	xfs_dqflock(dqp);	error = xfs_qm_dqflush(dqp, &bp);	if (error)		goto out_unlock;	xfs_buf_delwri_queue(bp, buffer_list);	xfs_buf_relse(bp);out_unlock:	xfs_dqunlock(dqp);	return error;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:26,


示例5: xfs_qm_dqput_final

STATIC voidxfs_qm_dqput_final(	struct xfs_dquot	*dqp){	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;	struct xfs_dquot	*gdqp;	trace_xfs_dqput_free(dqp);	mutex_lock(&qi->qi_lru_lock);	if (list_empty(&dqp->q_lru)) {		list_add_tail(&dqp->q_lru, &qi->qi_lru_list);		qi->qi_lru_count++;		XFS_STATS_INC(xs_qm_dquot_unused);	}	mutex_unlock(&qi->qi_lru_lock);	/*	 * If we just added a udquot to the freelist, then we want to release	 * the gdquot reference that it (probably) has. Otherwise it'll keep	 * the gdquot from getting reclaimed.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	xfs_dqunlock(dqp);	/*	 * If we had a group quota hint, release it now.	 */	if (gdqp)		xfs_qm_dqput(gdqp);}
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:35,


示例6: xfs_qm_dquot_logitem_push

/* * Given the logitem, this writes the corresponding dquot entry to disk * asynchronously. This is called with the dquot entry securely locked; * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot * at the end. */STATIC voidxfs_qm_dquot_logitem_push(	xfs_dq_logitem_t	*logitem){	xfs_dquot_t	*dqp;	int		error;	dqp = logitem->qli_dquot;	ASSERT(XFS_DQ_IS_LOCKED(dqp));	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));	/*	 * Since we were able to lock the dquot's flush lock and	 * we found it on the AIL, the dquot must be dirty.  This	 * is because the dquot is removed from the AIL while still	 * holding the flush lock in xfs_dqflush_done().  Thus, if	 * we found it in the AIL and were able to obtain the flush	 * lock without sleeping, then there must not have been	 * anyone in the process of flushing the dquot.	 */	error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);	if (error)		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",			error, dqp);	xfs_dqunlock(dqp);}
开发者ID:maraz,项目名称:linux-2.6,代码行数:34,


示例7: xfs_trans_unreserve_and_mod_dquots

/* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */voidxfs_trans_unreserve_and_mod_dquots(	xfs_trans_t		*tp){	int			i, j;	xfs_dquot_t		*dqp;	xfs_dqtrx_t		*qtrx, *qa;	boolean_t		locked;	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))		return;	qa = tp->t_dqinfo->dqa_usrdquots;	for (j = 0; j < 2; j++) {		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {			qtrx = &qa[i];			/*			 * We assume that the array of dquots is filled			 * sequentially, not sparsely.			 */			if ((dqp = qtrx->qt_dquot) == NULL)				break;			/*			 * Unreserve the original reservation. We don't care			 * about the number of blocks used field, or deltas.			 * Also we don't bother to zero the fields.			 */			locked = B_FALSE;			if (qtrx->qt_blk_res) {				xfs_dqlock(dqp);				locked = B_TRUE;				dqp->q_res_bcount -=					(xfs_qcnt_t)qtrx->qt_blk_res;			}			if (qtrx->qt_ino_res) {				if (!locked) {					xfs_dqlock(dqp);					locked = B_TRUE;				}				dqp->q_res_icount -=					(xfs_qcnt_t)qtrx->qt_ino_res;			}			if (qtrx->qt_rtblk_res) {				if (!locked) {					xfs_dqlock(dqp);					locked = B_TRUE;				}				dqp->q_res_rtbcount -=					(xfs_qcnt_t)qtrx->qt_rtblk_res;			}			if (locked)				xfs_dqunlock(dqp);		}		qa = tp->t_dqinfo->dqa_grpdquots;	}}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:66,


示例8: xfs_qm_dqput

/* * Release a reference to the dquot (decrement ref-count) and unlock it. * * If there is a group quota attached to this dquot, carefully release that * too without tripping over deadlocks'n'stuff. */voidxfs_qm_dqput(	struct xfs_dquot	*dqp){	ASSERT(dqp->q_nrefs > 0);	ASSERT(XFS_DQ_IS_LOCKED(dqp));	trace_xfs_dqput(dqp);	if (--dqp->q_nrefs > 0)		xfs_dqunlock(dqp);	else		xfs_qm_dqput_final(dqp);}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:20,


示例9: xfs_qm_dqattach_one

STATIC intxfs_qm_dqattach_one(	xfs_inode_t	*ip,	xfs_dqid_t	id,	uint		type,	uint		doalloc,	xfs_dquot_t	**IO_idqpp){	xfs_dquot_t	*dqp;	int		error;	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));	error = 0;	/*	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot	 * or &i_gdquot. This made the code look weird, but made the logic a lot	 * simpler.	 */	dqp = *IO_idqpp;	if (dqp) {		trace_xfs_dqattach_found(dqp);		return 0;	}	/*	 * Find the dquot from somewhere. This bumps the reference count of	 * dquot and returns it locked.  This can return ENOENT if dquot didn't	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got	 * turned off suddenly.	 */	error = xfs_qm_dqget(ip->i_mount, ip, id, type,			     doalloc | XFS_QMOPT_DOWARN, &dqp);	if (error)		return error;	trace_xfs_dqattach_get(dqp);	/*	 * dqget may have dropped and re-acquired the ilock, but it guarantees	 * that the dquot returned is the one that should go in the inode.	 */	*IO_idqpp = dqp;	xfs_dqunlock(dqp);	return 0;}
开发者ID:MaxChina,项目名称:linux,代码行数:46,


示例10: xfs_qm_dqput

/* * Release a reference to the dquot (decrement ref-count) and unlock it. * * If there is a group quota attached to this dquot, carefully release that * too without tripping over deadlocks'n'stuff. */voidxfs_qm_dqput(	struct xfs_dquot	*dqp){	ASSERT(dqp->q_nrefs > 0);	ASSERT(XFS_DQ_IS_LOCKED(dqp));	trace_xfs_dqput(dqp);	if (--dqp->q_nrefs == 0) {		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;		trace_xfs_dqput_free(dqp);		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))			XFS_STATS_INC(xs_qm_dquot_unused);	}	xfs_dqunlock(dqp);}
开发者ID:MaxChina,项目名称:linux,代码行数:24,


示例11: xfs_qm_dquot_logitem_unlock

/* * Unlock the dquot associated with the log item. * Clear the fields of the dquot and dquot log item that * are specific to the current transaction.  If the * hold flags is set, do not unlock the dquot. */STATIC voidxfs_qm_dquot_logitem_unlock(	struct xfs_log_item	*lip){	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;	ASSERT(XFS_DQ_IS_LOCKED(dqp));	/*	 * Clear the transaction pointer in the dquot	 */	dqp->q_transp = NULL;	/*	 * dquots are never 'held' from getting unlocked at the end of	 * a transaction.  Their locking and unlocking is hidden inside the	 * transaction layer, within trans_commit. Hence, no LI_HOLD flag	 * for the logitem.	 */	xfs_dqunlock(dqp);}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:27,


示例12: xfs_qm_dqput_final

STATIC voidxfs_qm_dqput_final(	struct xfs_dquot	*dqp){	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;	struct xfs_dquot	*gdqp;	struct xfs_dquot	*pdqp;	trace_xfs_dqput_free(dqp);	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))		XFS_STATS_INC(xs_qm_dquot_unused);	/*	 * If we just added a udquot to the freelist, then we want to release	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll	 * keep the gdquot/pdquot from getting reclaimed.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	pdqp = dqp->q_pdquot;	if (pdqp) {		xfs_dqlock(pdqp);		dqp->q_pdquot = NULL;	}	xfs_dqunlock(dqp);	/*	 * If we had a group/project quota hint, release it now.	 */	if (gdqp)		xfs_qm_dqput(gdqp);	if (pdqp)		xfs_qm_dqput(pdqp);}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:39,


示例13: xfs_qm_dqattach_grouphint

/* * Given a udquot and gdquot, attach a ptr to the group dquot in the * udquot as a hint for future lookups. */STATIC voidxfs_qm_dqattach_grouphint(	xfs_dquot_t	*udq,	xfs_dquot_t	*gdq){	xfs_dquot_t	*tmp;	xfs_dqlock(udq);	tmp = udq->q_gdquot;	if (tmp) {		if (tmp == gdq)			goto done;		udq->q_gdquot = NULL;		xfs_qm_dqrele(tmp);	}	udq->q_gdquot = xfs_qm_dqhold(gdq);done:	xfs_dqunlock(udq);}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:26,


示例14: xfs_qm_dqput

/* * Release a reference to the dquot (decrement ref-count) and unlock it. * * If there is a group quota attached to this dquot, carefully release that * too without tripping over deadlocks'n'stuff. */voidxfs_qm_dqput(	struct xfs_dquot	*dqp){	ASSERT(dqp->q_nrefs > 0);	ASSERT(XFS_DQ_IS_LOCKED(dqp));	trace_xfs_dqput(dqp);	if (--dqp->q_nrefs == 0) {		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;		trace_xfs_dqput_free(dqp);		mutex_lock(&qi->qi_lru_lock);		if (list_empty(&dqp->q_lru)) {			list_add_tail(&dqp->q_lru, &qi->qi_lru_list);			qi->qi_lru_count++;			XFS_STATS_INC(xs_qm_dquot_unused);		}		mutex_unlock(&qi->qi_lru_lock);	}	xfs_dqunlock(dqp);}
开发者ID:hejin,项目名称:kernel-3.10.0-327.13.1.el7.x86_64-fs,代码行数:30,


示例15: xfs_trans_dqresv

//.........这里部分代码省略.........	}	error = 0;	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&	    dqp->q_core.d_id &&	    XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {#ifdef QUOTADEBUG		cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"			  " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);#endif		if (nblks > 0) {			/*			 * dquot is locked already. See if we'd go over the			 * hardlimit or exceed the timelimit if we allocate			 * nblks.			 */			if (hardlimit > 0ULL &&			     (hardlimit <= nblks + *resbcountp)) {				error = EDQUOT;				goto error_return;			}			if (softlimit > 0ULL &&			     (softlimit <= nblks + *resbcountp)) {				/*				 * If timer or warnings has expired,				 * return EDQUOT				 */				if ((timer != 0 && get_seconds() > timer) ||				    (warns != 0 && warns >= warnlimit)) {					error = EDQUOT;					goto error_return;				}			}		}		if (ninos > 0) {			count = be64_to_cpu(dqp->q_core.d_icount);			timer = be32_to_cpu(dqp->q_core.d_itimer);			warns = be16_to_cpu(dqp->q_core.d_iwarns);			warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);			hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);			if (!hardlimit)				hardlimit = q->qi_ihardlimit;			softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);			if (!softlimit)				softlimit = q->qi_isoftlimit;			if (hardlimit > 0ULL && count >= hardlimit) {				error = EDQUOT;				goto error_return;			} else if (softlimit > 0ULL && count >= softlimit) {				/*				 * If timer or warnings has expired,				 * return EDQUOT				 */				if ((timer != 0 && get_seconds() > timer) ||				     (warns != 0 && warns >= warnlimit)) {					error = EDQUOT;					goto error_return;				}			}		}	}	/*	 * Change the reservation, but not the actual usage.	 * Note that q_res_bcount = q_core.d_bcount + resv	 */	(*resbcountp) += (xfs_qcnt_t)nblks;	if (ninos != 0)		dqp->q_res_icount += (xfs_qcnt_t)ninos;	/*	 * note the reservation amt in the trans struct too,	 * so that the transaction knows how much was reserved by	 * it against this particular dquot.	 * We don't do this when we are reserving for a delayed allocation,	 * because we don't have the luxury of a transaction envelope then.	 */	if (tp) {		ASSERT(tp->t_dqinfo);		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);		if (nblks != 0)			xfs_trans_mod_dquot(tp, dqp,					    flags & XFS_QMOPT_RESBLK_MASK,					    nblks);		if (ninos != 0)			xfs_trans_mod_dquot(tp, dqp,					    XFS_TRANS_DQ_RES_INOS,					    ninos);	}	ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));	ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));	ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));error_return:	if (! (flags & XFS_QMOPT_DQLOCK)) {		xfs_dqunlock(dqp);	}	return (error);}
开发者ID:gnensis,项目名称:linux-2.6.15,代码行数:101,


示例16: xfs_qm_dqattach_one

STATIC intxfs_qm_dqattach_one(	xfs_inode_t	*ip,	xfs_dqid_t	id,	uint		type,	uint		doalloc,	xfs_dquot_t	*udqhint, /* hint */	xfs_dquot_t	**IO_idqpp){	xfs_dquot_t	*dqp;	int		error;	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));	error = 0;	/*	 * See if we already have it in the inode itself. IO_idqpp is	 * &i_udquot or &i_gdquot. This made the code look weird, but	 * made the logic a lot simpler.	 */	dqp = *IO_idqpp;	if (dqp) {		trace_xfs_dqattach_found(dqp);		return 0;	}	/*	 * udqhint is the i_udquot field in inode, and is non-NULL only	 * when the type arg is group/project. Its purpose is to save a	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside	 * the user dquot.	 */	if (udqhint) {		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);		xfs_dqlock(udqhint);		/*		 * No need to take dqlock to look at the id.		 *		 * The ID can't change until it gets reclaimed, and it won't		 * be reclaimed as long as we have a ref from inode and we		 * hold the ilock.		 */		dqp = udqhint->q_gdquot;		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {			ASSERT(*IO_idqpp == NULL);			*IO_idqpp = xfs_qm_dqhold(dqp);			xfs_dqunlock(udqhint);			return 0;		}		/*		 * We can't hold a dquot lock when we call the dqget code.		 * We'll deadlock in no time, because of (not conforming to)		 * lock ordering - the inodelock comes before any dquot lock,		 * and we may drop and reacquire the ilock in xfs_qm_dqget().		 */		xfs_dqunlock(udqhint);	}	/*	 * Find the dquot from somewhere. This bumps the	 * reference count of dquot and returns it locked.	 * This can return ENOENT if dquot didn't exist on	 * disk and we didn't ask it to allocate;	 * ESRCH if quotas got turned off suddenly.	 */	error = xfs_qm_dqget(ip->i_mount, ip, id, type,			     doalloc | XFS_QMOPT_DOWARN, &dqp);	if (error)		return error;	trace_xfs_dqattach_get(dqp);	/*	 * dqget may have dropped and re-acquired the ilock, but it guarantees	 * that the dquot returned is the one that should go in the inode.	 */	*IO_idqpp = dqp;	xfs_dqunlock(dqp);	return 0;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:83,


示例17: xfs_qm_vop_dqalloc

/* * Given an inode, a uid, gid and prid make sure that we have * allocated relevant dquot(s) on disk, and that we won't exceed inode * quotas by creating this file. * This also attaches dquot(s) to the given inode after locking it, * and returns the dquots corresponding to the uid and/or gid. * * in	: inode (unlocked) * out	: udquot, gdquot with references taken and unlocked */intxfs_qm_vop_dqalloc(	struct xfs_inode	*ip,	uid_t			uid,	gid_t			gid,	prid_t			prid,	uint			flags,	struct xfs_dquot	**O_udqpp,	struct xfs_dquot	**O_gdqpp){	struct xfs_mount	*mp = ip->i_mount;	struct xfs_dquot	*uq, *gq;	int			error;	uint			lockflags;	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))		return 0;	lockflags = XFS_ILOCK_EXCL;	xfs_ilock(ip, lockflags);	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))		gid = ip->i_d.di_gid;	/*	 * Attach the dquot(s) to this inode, doing a dquot allocation	 * if necessary. The dquot(s) will not be locked.	 */	if (XFS_NOT_DQATTACHED(mp, ip)) {		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);		if (error) {			xfs_iunlock(ip, lockflags);			return error;		}	}	uq = gq = NULL;	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {		if (ip->i_d.di_uid != uid) {			/*			 * What we need is the dquot that has this uid, and			 * if we send the inode to dqget, the uid of the inode			 * takes priority over what's sent in the uid argument.			 * We must unlock inode here before calling dqget if			 * we're not sending the inode, because otherwise			 * we'll deadlock by doing trans_reserve while			 * holding ilock.			 */			xfs_iunlock(ip, lockflags);			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,						 XFS_DQ_USER,						 XFS_QMOPT_DQALLOC |						 XFS_QMOPT_DOWARN,						 &uq))) {				ASSERT(error != ENOENT);				return error;			}			/*			 * Get the ilock in the right order.			 */			xfs_dqunlock(uq);			lockflags = XFS_ILOCK_SHARED;			xfs_ilock(ip, lockflags);		} else {			/*			 * Take an extra reference, because we'll return			 * this to caller			 */			ASSERT(ip->i_udquot);			uq = xfs_qm_dqhold(ip->i_udquot);		}	}	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {		if (ip->i_d.di_gid != gid) {			xfs_iunlock(ip, lockflags);			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,						 XFS_DQ_GROUP,						 XFS_QMOPT_DQALLOC |						 XFS_QMOPT_DOWARN,						 &gq))) {				if (uq)					xfs_qm_dqrele(uq);				ASSERT(error != ENOENT);				return error;			}			xfs_dqunlock(gq);			lockflags = XFS_ILOCK_SHARED;			xfs_ilock(ip, lockflags);		} else {			ASSERT(ip->i_gdquot);//.........这里部分代码省略.........
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:101,


示例18: xfs_qm_dqreclaim_one

STATIC voidxfs_qm_dqreclaim_one(	struct xfs_dquot	*dqp,	struct list_head	*buffer_list,	struct list_head	*dispose_list){	struct xfs_mount	*mp = dqp->q_mount;	struct xfs_quotainfo	*qi = mp->m_quotainfo;	int			error;	if (!xfs_dqlock_nowait(dqp))		goto out_move_tail;	/*	 * This dquot has acquired a reference in the meantime remove it from	 * the freelist and try again.	 */	if (dqp->q_nrefs) {		xfs_dqunlock(dqp);		trace_xfs_dqreclaim_want(dqp);		XFS_STATS_INC(xs_qm_dqwants);		list_del_init(&dqp->q_lru);		qi->qi_lru_count--;		XFS_STATS_DEC(xs_qm_dquot_unused);		return;	}	/*	 * Try to grab the flush lock. If this dquot is in the process of	 * getting flushed to disk, we don't want to reclaim it.	 */	if (!xfs_dqflock_nowait(dqp))		goto out_unlock_move_tail;	if (XFS_DQ_IS_DIRTY(dqp)) {		struct xfs_buf	*bp = NULL;		trace_xfs_dqreclaim_dirty(dqp);		error = xfs_qm_dqflush(dqp, &bp);		if (error) {			xfs_warn(mp, "%s: dquot %p flush failed",				 __func__, dqp);			goto out_unlock_move_tail;		}		xfs_buf_delwri_queue(bp, buffer_list);		xfs_buf_relse(bp);		/*		 * Give the dquot another try on the freelist, as the		 * flushing will take some time.		 */		goto out_unlock_move_tail;	}	xfs_dqfunlock(dqp);	/*	 * Prevent lookups now that we are past the point of no return.	 */	dqp->dq_flags |= XFS_DQ_FREEING;	xfs_dqunlock(dqp);	ASSERT(dqp->q_nrefs == 0);	list_move_tail(&dqp->q_lru, dispose_list);	qi->qi_lru_count--;	XFS_STATS_DEC(xs_qm_dquot_unused);	trace_xfs_dqreclaim_done(dqp);	XFS_STATS_INC(xs_qm_dqreclaims);	return;	/*	 * Move the dquot to the tail of the list so that we don't spin on it.	 */out_unlock_move_tail:	xfs_dqunlock(dqp);out_move_tail:	list_move_tail(&dqp->q_lru, &qi->qi_lru_list);	trace_xfs_dqreclaim_busy(dqp);	XFS_STATS_INC(xs_qm_dqreclaim_misses);}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:83,


示例19: xfs_qm_dqpurge

/* * Purge a dquot from all tracking data structures and free it. */STATIC intxfs_qm_dqpurge(	struct xfs_dquot	*dqp,	void			*data){	struct xfs_mount	*mp = dqp->q_mount;	struct xfs_quotainfo	*qi = mp->m_quotainfo;	struct xfs_dquot	*gdqp = NULL;	xfs_dqlock(dqp);	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {		xfs_dqunlock(dqp);		return EAGAIN;	}	/*	 * If this quota has a group hint attached, prepare for releasing it	 * now.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	dqp->dq_flags |= XFS_DQ_FREEING;	xfs_dqflock(dqp);	/*	 * If we are turning this type of quotas off, we don't care	 * about the dirty metadata sitting in this dquot. OTOH, if	 * we're unmounting, we do care, so we flush it and wait.	 */	if (XFS_DQ_IS_DIRTY(dqp)) {		struct xfs_buf	*bp = NULL;		int		error;		/*		 * We don't care about getting disk errors here. We need		 * to purge this dquot anyway, so we go ahead regardless.		 */		error = xfs_qm_dqflush(dqp, &bp);		if (error) {			xfs_warn(mp, "%s: dquot %p flush failed",				__func__, dqp);		} else {			error = xfs_bwrite(bp);			xfs_buf_relse(bp);		}		xfs_dqflock(dqp);	}	ASSERT(atomic_read(&dqp->q_pincount) == 0);	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));	xfs_dqfunlock(dqp);	xfs_dqunlock(dqp);	radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),			  be32_to_cpu(dqp->q_core.d_id));	qi->qi_dquots--;	/*	 * We move dquots to the freelist as soon as their reference count	 * hits zero, so it really should be on the freelist here.	 */	mutex_lock(&qi->qi_lru_lock);	ASSERT(!list_empty(&dqp->q_lru));	list_del_init(&dqp->q_lru);	qi->qi_lru_count--;	XFS_STATS_DEC(xs_qm_dquot_unused);	mutex_unlock(&qi->qi_lru_lock);	xfs_qm_dqdestroy(dqp);	if (gdqp)		xfs_qm_dqput(gdqp);	return 0;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:84,


示例20: xfs_qm_dqget

/* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. * When both an inode and an id are given, the inode's id takes precedence. * That is, if the id changes while we don't hold the ilock inside this * function, the new dquot is returned, not necessarily the one requested * in the id argument. */intxfs_qm_dqget(	xfs_mount_t	*mp,	xfs_inode_t	*ip,	  /* locked inode (optional) */	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */{	struct xfs_quotainfo	*qi = mp->m_quotainfo;	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);	struct xfs_dquot	*dqp;	int			error;	ASSERT(XFS_IS_QUOTA_RUNNING(mp));	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {		return (ESRCH);	}#ifdef DEBUG	if (xfs_do_dqerror) {		if ((xfs_dqerror_target == mp->m_ddev_targp) &&		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {			xfs_debug(mp, "Returning error in dqget");			return (EIO);		}	}	ASSERT(type == XFS_DQ_USER ||	       type == XFS_DQ_PROJ ||	       type == XFS_DQ_GROUP);	if (ip) {		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));		ASSERT(xfs_inode_dquot(ip, type) == NULL);	}#endifrestart:	mutex_lock(&qi->qi_tree_lock);	dqp = radix_tree_lookup(tree, id);	if (dqp) {		xfs_dqlock(dqp);		if (dqp->dq_flags & XFS_DQ_FREEING) {			xfs_dqunlock(dqp);			mutex_unlock(&qi->qi_tree_lock);			trace_xfs_dqget_freeing(dqp);			delay(1);			goto restart;		}		dqp->q_nrefs++;		mutex_unlock(&qi->qi_tree_lock);		trace_xfs_dqget_hit(dqp);		XFS_STATS_INC(xs_qm_dqcachehits);		*O_dqpp = dqp;		return 0;	}	mutex_unlock(&qi->qi_tree_lock);	XFS_STATS_INC(xs_qm_dqcachemisses);	/*	 * Dquot cache miss. We don't want to keep the inode lock across	 * a (potential) disk read. Also we don't want to deal with the lock	 * ordering between quotainode and this inode. OTOH, dropping the inode	 * lock here means dealing with a chown that can happen before	 * we re-acquire the lock.	 */	if (ip)		xfs_iunlock(ip, XFS_ILOCK_EXCL);	error = xfs_qm_dqread(mp, id, type, flags, &dqp);	if (ip)		xfs_ilock(ip, XFS_ILOCK_EXCL);	if (error)		return error;	if (ip) {		/*		 * A dquot could be attached to this inode by now, since		 * we had dropped the ilock.		 */		if (xfs_this_quota_on(mp, type)) {			struct xfs_dquot	*dqp1;			dqp1 = xfs_inode_dquot(ip, type);			if (dqp1) {				xfs_qm_dqdestroy(dqp);//.........这里部分代码省略.........
开发者ID:MaxChina,项目名称:linux,代码行数:101,


示例21: xfs_qm_scall_setqlim

/* * Adjust quota limits, and start/stop timers accordingly. */intxfs_qm_scall_setqlim(    struct xfs_mount	*mp,    xfs_dqid_t		id,    uint			type,    struct qc_dqblk		*newlim){    struct xfs_quotainfo	*q = mp->m_quotainfo;    struct xfs_disk_dquot	*ddq;    struct xfs_dquot	*dqp;    struct xfs_trans	*tp;    int			error;    xfs_qcnt_t		hard, soft;    if (newlim->d_fieldmask & ~XFS_QC_MASK)        return EINVAL;    if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)        return 0;    /*     * We don't want to race with a quotaoff so take the quotaoff lock.     * We don't hold an inode lock, so there's nothing else to stop     * a quotaoff from happening.     */    mutex_lock(&q->qi_quotaofflock);    /*     * Get the dquot (locked) before we start, as we need to do a     * transaction to allocate it if it doesn't exist. Once we have the     * dquot, unlock it so we can start the next transaction safely. We hold     * a reference to the dquot, so it's safe to do this unlock/lock without     * it being reclaimed in the mean time.     */    error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);    if (error) {        ASSERT(error != ENOENT);        goto out_unlock;    }    xfs_dqunlock(dqp);    tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);    error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);    if (error) {        xfs_trans_cancel(tp, 0);        goto out_rele;    }    xfs_dqlock(dqp);    xfs_trans_dqjoin(tp, dqp);    ddq = &dqp->q_core;    /*     * Make sure that hardlimits are >= soft limits before changing.     */    hard = (newlim->d_fieldmask & QC_SPC_HARD) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :           be64_to_cpu(ddq->d_blk_hardlimit);    soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :           be64_to_cpu(ddq->d_blk_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_blk_hardlimit = cpu_to_be64(hard);        ddq->d_blk_softlimit = cpu_to_be64(soft);        xfs_dquot_set_prealloc_limits(dqp);        if (id == 0) {            q->qi_bhardlimit = hard;            q->qi_bsoftlimit = soft;        }    } else {        xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);    }    hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :           be64_to_cpu(ddq->d_rtb_hardlimit);    soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :           be64_to_cpu(ddq->d_rtb_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_rtb_hardlimit = cpu_to_be64(hard);        ddq->d_rtb_softlimit = cpu_to_be64(soft);        if (id == 0) {            q->qi_rtbhardlimit = hard;            q->qi_rtbsoftlimit = soft;        }    } else {        xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);    }    hard = (newlim->d_fieldmask & QC_INO_HARD) ?           (xfs_qcnt_t) newlim->d_ino_hardlimit :           be64_to_cpu(ddq->d_ino_hardlimit);    soft = (newlim->d_fieldmask & QC_INO_SOFT) ?           (xfs_qcnt_t) newlim->d_ino_softlimit :           be64_to_cpu(ddq->d_ino_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_ino_hardlimit = cpu_to_be64(hard);        ddq->d_ino_softlimit = cpu_to_be64(soft);//.........这里部分代码省略.........
开发者ID:daltenty,项目名称:kernel-ubuntu.trusty-vgt,代码行数:101,


示例22: xfs_qm_dqlookup

/* * Lookup a dquot in the incore dquot hashtable. We keep two separate * hashtables for user and group dquots; and, these are global tables * inside the XQM, not per-filesystem tables. * The hash chain must be locked by caller, and it is left locked * on return. Returning dquot is locked. */STATIC intxfs_qm_dqlookup(	xfs_mount_t		*mp,	xfs_dqid_t		id,	xfs_dqhash_t		*qh,	xfs_dquot_t		**O_dqpp){	xfs_dquot_t		*dqp;	uint			flist_locked;	xfs_dquot_t		*d;	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));	flist_locked = B_FALSE;	/*	 * Traverse the hashchain looking for a match	 */	for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {		/*		 * We already have the hashlock. We don't need the		 * dqlock to look at the id field of the dquot, since the		 * id can't be modified without the hashlock anyway.		 */		if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {			xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");			/*			 * All in core dquots must be on the dqlist of mp			 */			ASSERT(dqp->MPL_PREVP != NULL);			xfs_dqlock(dqp);			if (dqp->q_nrefs == 0) {				ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));				if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {					xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");					/*					 * We may have raced with dqreclaim_one()					 * (and lost). So, flag that we don't					 * want the dquot to be reclaimed.					 */					dqp->dq_flags |= XFS_DQ_WANT;					xfs_dqunlock(dqp);					xfs_qm_freelist_lock(xfs_Gqm);					xfs_dqlock(dqp);					dqp->dq_flags &= ~(XFS_DQ_WANT);				}				flist_locked = B_TRUE;			}			/*			 * id couldn't have changed; we had the hashlock all			 * along			 */			ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);			if (flist_locked) {				if (dqp->q_nrefs != 0) {					xfs_qm_freelist_unlock(xfs_Gqm);					flist_locked = B_FALSE;				} else {					/*					 * take it off the freelist					 */					xfs_dqtrace_entry(dqp,							"DQLOOKUP: TAKEOFF FL");					XQM_FREELIST_REMOVE(dqp);					/* xfs_qm_freelist_print(&(xfs_Gqm->							qm_dqfreelist),							"after removal"); */				}			}			/*			 * grab a reference			 */			XFS_DQHOLD(dqp);			if (flist_locked)				xfs_qm_freelist_unlock(xfs_Gqm);			/*			 * move the dquot to the front of the hashchain			 */			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));			if (dqp->HL_PREVP != &qh->qh_next) {				xfs_dqtrace_entry(dqp,						  "DQLOOKUP: HASH MOVETOFRONT");				if ((d = dqp->HL_NEXT))					d->HL_PREVP = dqp->HL_PREVP;				*(dqp->HL_PREVP) = d;				d = qh->qh_next;				d->HL_PREVP = &dqp->HL_NEXT;//.........这里部分代码省略.........
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:101,


示例23: xfs_qm_dqpurge

/* ARGSUSED */intxfs_qm_dqpurge(	xfs_dquot_t	*dqp,	uint		flags){	xfs_dqhash_t	*thishash;	xfs_mount_t	*mp;	mp = dqp->q_mount;	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));	ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));	xfs_dqlock(dqp);	/*	 * We really can't afford to purge a dquot that is	 * referenced, because these are hard refs.	 * It shouldn't happen in general because we went thru _all_ inodes in	 * dqrele_all_inodes before calling this and didn't let the mountlock go.	 * However it is possible that we have dquots with temporary	 * references that are not attached to an inode. e.g. see xfs_setattr().	 */	if (dqp->q_nrefs != 0) {		xfs_dqunlock(dqp);		XFS_DQ_HASH_UNLOCK(dqp->q_hash);		return (1);	}	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));	/*	 * If we're turning off quotas, we have to make sure that, for	 * example, we don't delete quota disk blocks while dquots are	 * in the process of getting written to those disk blocks.	 * This dquot might well be on AIL, and we can't leave it there	 * if we're turning off quotas. Basically, we need this flush	 * lock, and are willing to block on it.	 */	if (! xfs_qm_dqflock_nowait(dqp)) {		/*		 * Block on the flush lock after nudging dquot buffer,		 * if it is incore.		 */		xfs_qm_dqflock_pushbuf_wait(dqp);	}	/*	 * XXXIf we're turning this type of quotas off, we don't care	 * about the dirty metadata sitting in this dquot. OTOH, if	 * we're unmounting, we do care, so we flush it and wait.	 */	if (XFS_DQ_IS_DIRTY(dqp)) {		xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");		/* dqflush unlocks dqflock */		/*		 * Given that dqpurge is a very rare occurrence, it is OK		 * that we're holding the hashlist and mplist locks		 * across the disk write. But, ... XXXsup		 *		 * We don't care about getting disk errors here. We need		 * to purge this dquot anyway, so we go ahead regardless.		 */		(void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);		xfs_dqflock(dqp);	}	ASSERT(dqp->q_pincount == 0);	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));	thishash = dqp->q_hash;	XQM_HASHLIST_REMOVE(thishash, dqp);	XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);	/*	 * XXX Move this to the front of the freelist, if we can get the	 * freelist lock.	 */	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));	dqp->q_mount = NULL;	dqp->q_hash = NULL;	dqp->dq_flags = XFS_DQ_INACTIVE;	memset(&dqp->q_core, 0, sizeof(dqp->q_core));	xfs_dqfunlock(dqp);	xfs_dqunlock(dqp);	XFS_DQ_HASH_UNLOCK(thishash);	return (0);}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:88,


示例24: xfs_qm_dqput

/* * Release a reference to the dquot (decrement ref-count) * and unlock it. If there is a group quota attached to this * dquot, carefully release that too without tripping over * deadlocks'n'stuff. */voidxfs_qm_dqput(	xfs_dquot_t	*dqp){	xfs_dquot_t	*gdqp;	ASSERT(dqp->q_nrefs > 0);	ASSERT(XFS_DQ_IS_LOCKED(dqp));	xfs_dqtrace_entry(dqp, "DQPUT");	if (dqp->q_nrefs != 1) {		dqp->q_nrefs--;		xfs_dqunlock(dqp);		return;	}	/*	 * drop the dqlock and acquire the freelist and dqlock	 * in the right order; but try to get it out-of-order first	 */	if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {		xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");		xfs_dqunlock(dqp);		xfs_qm_freelist_lock(xfs_Gqm);		xfs_dqlock(dqp);	}	while (1) {		gdqp = NULL;		/* We can't depend on nrefs being == 1 here */		if (--dqp->q_nrefs == 0) {			xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");			/*			 * insert at end of the freelist.			 */			XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);			/*			 * If we just added a udquot to the freelist, then			 * we want to release the gdquot reference that			 * it (probably) has. Otherwise it'll keep the			 * gdquot from getting reclaimed.			 */			if ((gdqp = dqp->q_gdquot)) {				/*				 * Avoid a recursive dqput call				 */				xfs_dqlock(gdqp);				dqp->q_gdquot = NULL;			}			/* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),			   "@@@@@++ Free list (after append) @@@@@+");			   */		}		xfs_dqunlock(dqp);		/*		 * If we had a group quota inside the user quota as a hint,		 * release it now.		 */		if (! gdqp)			break;		dqp = gdqp;	}	xfs_qm_freelist_unlock(xfs_Gqm);}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:74,


示例25: xfs_qm_dquot_logitem_pushbuf

/* * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that * the dquot is locked by us, but the flush lock isn't. So, here we are * going to see if the relevant dquot buffer is incore, waiting on DELWRI. * If so, we want to push it out to help us take this item off the AIL as soon * as possible. * * We must not be holding the AIL lock at this point. Calling incore() to * search the buffer cache can be a time consuming thing, and AIL lock is a * spinlock. */STATIC voidxfs_qm_dquot_logitem_pushbuf(	xfs_dq_logitem_t    *qip){	xfs_dquot_t	*dqp;	xfs_mount_t	*mp;	xfs_buf_t	*bp;	uint		dopush;	dqp = qip->qli_dquot;	ASSERT(XFS_DQ_IS_LOCKED(dqp));	/*	 * The qli_pushbuf_flag keeps others from	 * trying to duplicate our effort.	 */	ASSERT(qip->qli_pushbuf_flag != 0);	ASSERT(qip->qli_push_owner == current_pid());	/*	 * If flushlock isn't locked anymore, chances are that the	 * inode flush completed and the inode was taken off the AIL.	 * So, just get out.	 */	if (!issemalocked(&(dqp->q_flock))  ||	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {		qip->qli_pushbuf_flag = 0;		xfs_dqunlock(dqp);		return;	}	mp = dqp->q_mount;	bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,		    XFS_QI_DQCHUNKLEN(mp),		    XFS_INCORE_TRYLOCK);	if (bp != NULL) {		if (XFS_BUF_ISDELAYWRITE(bp)) {			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&				  issemalocked(&(dqp->q_flock)));			qip->qli_pushbuf_flag = 0;			xfs_dqunlock(dqp);			if (XFS_BUF_ISPINNED(bp)) {				xfs_log_force(mp, (xfs_lsn_t)0,					      XFS_LOG_FORCE);			}			if (dopush) {				int	error;#ifdef XFSRACEDEBUG				delay_for_intr();				delay(300);#endif				error = xfs_bawrite(mp, bp);				if (error)					xfs_fs_cmn_err(CE_WARN, mp,	"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",							error, qip, bp);			} else {				xfs_buf_relse(bp);			}		} else {			qip->qli_pushbuf_flag = 0;			xfs_dqunlock(dqp);			xfs_buf_relse(bp);		}		return;	}	qip->qli_pushbuf_flag = 0;	xfs_dqunlock(dqp);}
开发者ID:maraz,项目名称:linux-2.6,代码行数:81,



注:本文中的xfs_dqunlock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xfs_get_projid函数代码示例
C++ xfs_dqlock函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。