您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xfs_dqlock函数代码示例

51自学网 2021-06-03 10:19:29
  C++
这篇教程C++ xfs_dqlock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xfs_dqlock函数的典型用法代码示例。如果您正苦于以下问题:C++ xfs_dqlock函数的具体用法?C++ xfs_dqlock怎么用?C++ xfs_dqlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xfs_dqlock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: xfs_trans_unreserve_and_mod_dquots

/* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */voidxfs_trans_unreserve_and_mod_dquots(	xfs_trans_t		*tp){	int			i, j;	xfs_dquot_t		*dqp;	xfs_dqtrx_t		*qtrx, *qa;	boolean_t		locked;	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))		return;	qa = tp->t_dqinfo->dqa_usrdquots;	for (j = 0; j < 2; j++) {		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {			qtrx = &qa[i];			/*			 * We assume that the array of dquots is filled			 * sequentially, not sparsely.			 */			if ((dqp = qtrx->qt_dquot) == NULL)				break;			/*			 * Unreserve the original reservation. We don't care			 * about the number of blocks used field, or deltas.			 * Also we don't bother to zero the fields.			 */			locked = B_FALSE;			if (qtrx->qt_blk_res) {				xfs_dqlock(dqp);				locked = B_TRUE;				dqp->q_res_bcount -=					(xfs_qcnt_t)qtrx->qt_blk_res;			}			if (qtrx->qt_ino_res) {				if (!locked) {					xfs_dqlock(dqp);					locked = B_TRUE;				}				dqp->q_res_icount -=					(xfs_qcnt_t)qtrx->qt_ino_res;			}			if (qtrx->qt_rtblk_res) {				if (!locked) {					xfs_dqlock(dqp);					locked = B_TRUE;				}				dqp->q_res_rtbcount -=					(xfs_qcnt_t)qtrx->qt_rtblk_res;			}			if (locked)				xfs_dqunlock(dqp);		}		qa = tp->t_dqinfo->dqa_grpdquots;	}}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:66,


示例2: xfs_qm_flush_one

STATIC intxfs_qm_flush_one(	struct xfs_dquot	*dqp,	void			*data){	struct list_head	*buffer_list = data;	struct xfs_buf		*bp = NULL;	int			error = 0;	xfs_dqlock(dqp);	if (dqp->dq_flags & XFS_DQ_FREEING)		goto out_unlock;	if (!XFS_DQ_IS_DIRTY(dqp))		goto out_unlock;	xfs_dqflock(dqp);	error = xfs_qm_dqflush(dqp, &bp);	if (error)		goto out_unlock;	xfs_buf_delwri_queue(bp, buffer_list);	xfs_buf_relse(bp);out_unlock:	xfs_dqunlock(dqp);	return error;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:26,


示例3: xfs_qm_dqget_cache_insert

/* * Try to insert a new dquot into the in-core cache.  If an error occurs the * caller should throw away the dquot and start over.  Otherwise, the dquot * is returned locked (and held by the cache) as if there had been a cache * hit. */static intxfs_qm_dqget_cache_insert(	struct xfs_mount	*mp,	struct xfs_quotainfo	*qi,	struct radix_tree_root	*tree,	xfs_dqid_t		id,	struct xfs_dquot	*dqp){	int			error;	mutex_lock(&qi->qi_tree_lock);	error = radix_tree_insert(tree, id, dqp);	if (unlikely(error)) {		/* Duplicate found!  Caller must try again. */		WARN_ON(error != -EEXIST);		mutex_unlock(&qi->qi_tree_lock);		trace_xfs_dqget_dup(dqp);		return error;	}	/* Return a locked dquot to the caller, with a reference taken. */	xfs_dqlock(dqp);	dqp->q_nrefs = 1;	qi->qi_dquots++;	mutex_unlock(&qi->qi_tree_lock);	return 0;}
开发者ID:avagin,项目名称:linux,代码行数:35,


示例4: xfs_qm_dqget_cache_lookup

/* * Look up the dquot in the in-core cache.  If found, the dquot is returned * locked and ready to go. */static struct xfs_dquot *xfs_qm_dqget_cache_lookup(	struct xfs_mount	*mp,	struct xfs_quotainfo	*qi,	struct radix_tree_root	*tree,	xfs_dqid_t		id){	struct xfs_dquot	*dqp;restart:	mutex_lock(&qi->qi_tree_lock);	dqp = radix_tree_lookup(tree, id);	if (!dqp) {		mutex_unlock(&qi->qi_tree_lock);		XFS_STATS_INC(mp, xs_qm_dqcachemisses);		return NULL;	}	xfs_dqlock(dqp);	if (dqp->dq_flags & XFS_DQ_FREEING) {		xfs_dqunlock(dqp);		mutex_unlock(&qi->qi_tree_lock);		trace_xfs_dqget_freeing(dqp);		delay(1);		goto restart;	}	dqp->q_nrefs++;	mutex_unlock(&qi->qi_tree_lock);	trace_xfs_dqget_hit(dqp);	XFS_STATS_INC(mp, xs_qm_dqcachehits);	return dqp;}
开发者ID:avagin,项目名称:linux,代码行数:38,


示例5: xfs_qm_dqput_final

STATIC voidxfs_qm_dqput_final(	struct xfs_dquot	*dqp){	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;	struct xfs_dquot	*gdqp;	trace_xfs_dqput_free(dqp);	mutex_lock(&qi->qi_lru_lock);	if (list_empty(&dqp->q_lru)) {		list_add_tail(&dqp->q_lru, &qi->qi_lru_list);		qi->qi_lru_count++;		XFS_STATS_INC(xs_qm_dquot_unused);	}	mutex_unlock(&qi->qi_lru_lock);	/*	 * If we just added a udquot to the freelist, then we want to release	 * the gdquot reference that it (probably) has. Otherwise it'll keep	 * the gdquot from getting reclaimed.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	xfs_dqunlock(dqp);	/*	 * If we had a group quota hint, release it now.	 */	if (gdqp)		xfs_qm_dqput(gdqp);}
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:35,


示例6: xfs_qm_dqput_final

STATIC voidxfs_qm_dqput_final(	struct xfs_dquot	*dqp){	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;	struct xfs_dquot	*gdqp;	struct xfs_dquot	*pdqp;	trace_xfs_dqput_free(dqp);	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))		XFS_STATS_INC(xs_qm_dquot_unused);	/*	 * If we just added a udquot to the freelist, then we want to release	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll	 * keep the gdquot/pdquot from getting reclaimed.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	pdqp = dqp->q_pdquot;	if (pdqp) {		xfs_dqlock(pdqp);		dqp->q_pdquot = NULL;	}	xfs_dqunlock(dqp);	/*	 * If we had a group/project quota hint, release it now.	 */	if (gdqp)		xfs_qm_dqput(gdqp);	if (pdqp)		xfs_qm_dqput(pdqp);}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:39,


示例7: xfs_qm_dqrele

/* * Release a dquot. Flush it if dirty, then dqput() it. * dquot must not be locked. */voidxfs_qm_dqrele(	xfs_dquot_t	*dqp){	ASSERT(dqp);	xfs_dqtrace_entry(dqp, "DQRELE");	xfs_dqlock(dqp);	/*	 * We don't care to flush it if the dquot is dirty here.	 * That will create stutters that we want to avoid.	 * Instead we do a delayed write when we try to reclaim	 * a dirty dquot. Also xfs_sync will take part of the burden...	 */	xfs_qm_dqput(dqp);}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:20,


示例8: xfs_qm_dqattach_grouphint

/* * Given a udquot and gdquot, attach a ptr to the group dquot in the * udquot as a hint for future lookups. */STATIC voidxfs_qm_dqattach_grouphint(	xfs_dquot_t	*udq,	xfs_dquot_t	*gdq){	xfs_dquot_t	*tmp;	xfs_dqlock(udq);	tmp = udq->q_gdquot;	if (tmp) {		if (tmp == gdq)			goto done;		udq->q_gdquot = NULL;		xfs_qm_dqrele(tmp);	}	udq->q_gdquot = xfs_qm_dqhold(gdq);done:	xfs_dqunlock(udq);}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:26,


示例9: xfs_dqlock2

voidxfs_dqlock2(	xfs_dquot_t	*d1,	xfs_dquot_t	*d2){	if (d1 && d2) {		ASSERT(d1 != d2);		if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) {			xfs_dqlock(d2);			xfs_dqlock(d1);		} else {			xfs_dqlock(d1);			xfs_dqlock(d2);		}	} else {		if (d1) {			xfs_dqlock(d1);		} else if (d2) {			xfs_dqlock(d2);		}	}}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:22,


示例10: xfs_dqlock2

voidxfs_dqlock2(	xfs_dquot_t	*d1,	xfs_dquot_t	*d2){	if (d1 && d2) {		ASSERT(d1 != d2);		if (be32_to_cpu(d1->q_core.d_id) >		    be32_to_cpu(d2->q_core.d_id)) {			xfs_dqlock(d2);			xfs_dqlock(d1);		} else {			xfs_dqlock(d1);			xfs_dqlock(d2);		}	} else {		if (d1) {			xfs_dqlock(d1);		} else if (d2) {			xfs_dqlock(d2);		}	}}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:23,


示例11: xfs_trans_dqresv

/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. * Returns EDQUOT if quota is exceeded. */STATIC intxfs_trans_dqresv(	xfs_trans_t	*tp,	xfs_mount_t	*mp,	xfs_dquot_t	*dqp,	long		nblks,	long		ninos,	uint		flags){	int		error;	xfs_qcnt_t	hardlimit;	xfs_qcnt_t	softlimit;	time_t		timer;	xfs_qwarncnt_t	warns;	xfs_qwarncnt_t	warnlimit;	xfs_qcnt_t	count;	xfs_qcnt_t	*resbcountp;	xfs_quotainfo_t	*q = mp->m_quotainfo;	if (! (flags & XFS_QMOPT_DQLOCK)) {		xfs_dqlock(dqp);	}	ASSERT(XFS_DQ_IS_LOCKED(dqp));	if (flags & XFS_TRANS_DQ_RES_BLKS) {		hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);		if (!hardlimit)			hardlimit = q->qi_bhardlimit;		softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);		if (!softlimit)			softlimit = q->qi_bsoftlimit;		timer = be32_to_cpu(dqp->q_core.d_btimer);		warns = be16_to_cpu(dqp->q_core.d_bwarns);		warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);		resbcountp = &dqp->q_res_bcount;	} else {		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);		hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);		if (!hardlimit)			hardlimit = q->qi_rtbhardlimit;		softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);		if (!softlimit)			softlimit = q->qi_rtbsoftlimit;		timer = be32_to_cpu(dqp->q_core.d_rtbtimer);		warns = be16_to_cpu(dqp->q_core.d_rtbwarns);		warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);		resbcountp = &dqp->q_res_rtbcount;	}	error = 0;	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&	    dqp->q_core.d_id &&	    XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {#ifdef QUOTADEBUG		cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"			  " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);#endif		if (nblks > 0) {			/*			 * dquot is locked already. See if we'd go over the			 * hardlimit or exceed the timelimit if we allocate			 * nblks.			 */			if (hardlimit > 0ULL &&			     (hardlimit <= nblks + *resbcountp)) {				error = EDQUOT;				goto error_return;			}			if (softlimit > 0ULL &&			     (softlimit <= nblks + *resbcountp)) {				/*				 * If timer or warnings has expired,				 * return EDQUOT				 */				if ((timer != 0 && get_seconds() > timer) ||				    (warns != 0 && warns >= warnlimit)) {					error = EDQUOT;					goto error_return;				}			}		}		if (ninos > 0) {			count = be64_to_cpu(dqp->q_core.d_icount);			timer = be32_to_cpu(dqp->q_core.d_itimer);			warns = be16_to_cpu(dqp->q_core.d_iwarns);			warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);			hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);			if (!hardlimit)				hardlimit = q->qi_ihardlimit;			softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);			if (!softlimit)				softlimit = q->qi_isoftlimit;			if (hardlimit > 0ULL && count >= hardlimit) {//.........这里部分代码省略.........
开发者ID:gnensis,项目名称:linux-2.6.15,代码行数:101,


示例12: xfs_qm_dqattach_one

STATIC intxfs_qm_dqattach_one(	xfs_inode_t	*ip,	xfs_dqid_t	id,	uint		type,	uint		doalloc,	xfs_dquot_t	*udqhint, /* hint */	xfs_dquot_t	**IO_idqpp){	xfs_dquot_t	*dqp;	int		error;	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));	error = 0;	/*	 * See if we already have it in the inode itself. IO_idqpp is	 * &i_udquot or &i_gdquot. This made the code look weird, but	 * made the logic a lot simpler.	 */	dqp = *IO_idqpp;	if (dqp) {		trace_xfs_dqattach_found(dqp);		return 0;	}	/*	 * udqhint is the i_udquot field in inode, and is non-NULL only	 * when the type arg is group/project. Its purpose is to save a	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside	 * the user dquot.	 */	if (udqhint) {		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);		xfs_dqlock(udqhint);		/*		 * No need to take dqlock to look at the id.		 *		 * The ID can't change until it gets reclaimed, and it won't		 * be reclaimed as long as we have a ref from inode and we		 * hold the ilock.		 */		dqp = udqhint->q_gdquot;		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {			ASSERT(*IO_idqpp == NULL);			*IO_idqpp = xfs_qm_dqhold(dqp);			xfs_dqunlock(udqhint);			return 0;		}		/*		 * We can't hold a dquot lock when we call the dqget code.		 * We'll deadlock in no time, because of (not conforming to)		 * lock ordering - the inodelock comes before any dquot lock,		 * and we may drop and reacquire the ilock in xfs_qm_dqget().		 */		xfs_dqunlock(udqhint);	}	/*	 * Find the dquot from somewhere. This bumps the	 * reference count of dquot and returns it locked.	 * This can return ENOENT if dquot didn't exist on	 * disk and we didn't ask it to allocate;	 * ESRCH if quotas got turned off suddenly.	 */	error = xfs_qm_dqget(ip->i_mount, ip, id, type,			     doalloc | XFS_QMOPT_DOWARN, &dqp);	if (error)		return error;	trace_xfs_dqattach_get(dqp);	/*	 * dqget may have dropped and re-acquired the ilock, but it guarantees	 * that the dquot returned is the one that should go in the inode.	 */	*IO_idqpp = dqp;	xfs_dqunlock(dqp);	return 0;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:83,


示例13: xfs_qm_dqpurge

/* * Purge a dquot from all tracking data structures and free it. */STATIC intxfs_qm_dqpurge(	struct xfs_dquot	*dqp,	void			*data){	struct xfs_mount	*mp = dqp->q_mount;	struct xfs_quotainfo	*qi = mp->m_quotainfo;	struct xfs_dquot	*gdqp = NULL;	xfs_dqlock(dqp);	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {		xfs_dqunlock(dqp);		return EAGAIN;	}	/*	 * If this quota has a group hint attached, prepare for releasing it	 * now.	 */	gdqp = dqp->q_gdquot;	if (gdqp) {		xfs_dqlock(gdqp);		dqp->q_gdquot = NULL;	}	dqp->dq_flags |= XFS_DQ_FREEING;	xfs_dqflock(dqp);	/*	 * If we are turning this type of quotas off, we don't care	 * about the dirty metadata sitting in this dquot. OTOH, if	 * we're unmounting, we do care, so we flush it and wait.	 */	if (XFS_DQ_IS_DIRTY(dqp)) {		struct xfs_buf	*bp = NULL;		int		error;		/*		 * We don't care about getting disk errors here. We need		 * to purge this dquot anyway, so we go ahead regardless.		 */		error = xfs_qm_dqflush(dqp, &bp);		if (error) {			xfs_warn(mp, "%s: dquot %p flush failed",				__func__, dqp);		} else {			error = xfs_bwrite(bp);			xfs_buf_relse(bp);		}		xfs_dqflock(dqp);	}	ASSERT(atomic_read(&dqp->q_pincount) == 0);	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));	xfs_dqfunlock(dqp);	xfs_dqunlock(dqp);	radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),			  be32_to_cpu(dqp->q_core.d_id));	qi->qi_dquots--;	/*	 * We move dquots to the freelist as soon as their reference count	 * hits zero, so it really should be on the freelist here.	 */	mutex_lock(&qi->qi_lru_lock);	ASSERT(!list_empty(&dqp->q_lru));	list_del_init(&dqp->q_lru);	qi->qi_lru_count--;	XFS_STATS_DEC(xs_qm_dquot_unused);	mutex_unlock(&qi->qi_lru_lock);	xfs_qm_dqdestroy(dqp);	if (gdqp)		xfs_qm_dqput(gdqp);	return 0;}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:84,


示例14: xfs_qm_dqget_inode

/* * Return the dquot for a given inode and type.  If @can_alloc is true, then * allocate blocks if needed.  The inode's ILOCK must be held and it must not * have already had an inode attached. */intxfs_qm_dqget_inode(	struct xfs_inode	*ip,	uint			type,	bool			can_alloc,	struct xfs_dquot	**O_dqpp){	struct xfs_mount	*mp = ip->i_mount;	struct xfs_quotainfo	*qi = mp->m_quotainfo;	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);	struct xfs_dquot	*dqp;	xfs_dqid_t		id;	int			error;	error = xfs_qm_dqget_checks(mp, type);	if (error)		return error;	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));	ASSERT(xfs_inode_dquot(ip, type) == NULL);	id = xfs_qm_id_for_quotatype(ip, type);restart:	dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);	if (dqp) {		*O_dqpp = dqp;		return 0;	}	/*	 * Dquot cache miss. We don't want to keep the inode lock across	 * a (potential) disk read. Also we don't want to deal with the lock	 * ordering between quotainode and this inode. OTOH, dropping the inode	 * lock here means dealing with a chown that can happen before	 * we re-acquire the lock.	 */	xfs_iunlock(ip, XFS_ILOCK_EXCL);	error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (error)		return error;	/*	 * A dquot could be attached to this inode by now, since we had	 * dropped the ilock.	 */	if (xfs_this_quota_on(mp, type)) {		struct xfs_dquot	*dqp1;		dqp1 = xfs_inode_dquot(ip, type);		if (dqp1) {			xfs_qm_dqdestroy(dqp);			dqp = dqp1;			xfs_dqlock(dqp);			goto dqret;		}	} else {		/* inode stays locked on return */		xfs_qm_dqdestroy(dqp);		return -ESRCH;	}	error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);	if (error) {		/*		 * Duplicate found. Just throw away the new dquot and start		 * over.		 */		xfs_qm_dqdestroy(dqp);		XFS_STATS_INC(mp, xs_qm_dquot_dups);		goto restart;	}dqret:	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));	trace_xfs_dqget_miss(dqp);	*O_dqpp = dqp;	return 0;}
开发者ID:avagin,项目名称:linux,代码行数:85,


示例15: xfs_qm_dqget

/* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. * When both an inode and an id are given, the inode's id takes precedence. * That is, if the id changes while we don't hold the ilock inside this * function, the new dquot is returned, not necessarily the one requested * in the id argument. */intxfs_qm_dqget(	xfs_mount_t	*mp,	xfs_inode_t	*ip,	  /* locked inode (optional) */	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */{	struct xfs_quotainfo	*qi = mp->m_quotainfo;	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);	struct xfs_dquot	*dqp;	int			error;	ASSERT(XFS_IS_QUOTA_RUNNING(mp));	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {		return (ESRCH);	}#ifdef DEBUG	if (xfs_do_dqerror) {		if ((xfs_dqerror_target == mp->m_ddev_targp) &&		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {			xfs_debug(mp, "Returning error in dqget");			return (EIO);		}	}	ASSERT(type == XFS_DQ_USER ||	       type == XFS_DQ_PROJ ||	       type == XFS_DQ_GROUP);	if (ip) {		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));		ASSERT(xfs_inode_dquot(ip, type) == NULL);	}#endifrestart:	mutex_lock(&qi->qi_tree_lock);	dqp = radix_tree_lookup(tree, id);	if (dqp) {		xfs_dqlock(dqp);		if (dqp->dq_flags & XFS_DQ_FREEING) {			xfs_dqunlock(dqp);			mutex_unlock(&qi->qi_tree_lock);			trace_xfs_dqget_freeing(dqp);			delay(1);			goto restart;		}		dqp->q_nrefs++;		mutex_unlock(&qi->qi_tree_lock);		trace_xfs_dqget_hit(dqp);		XFS_STATS_INC(xs_qm_dqcachehits);		*O_dqpp = dqp;		return 0;	}	mutex_unlock(&qi->qi_tree_lock);	XFS_STATS_INC(xs_qm_dqcachemisses);	/*	 * Dquot cache miss. We don't want to keep the inode lock across	 * a (potential) disk read. Also we don't want to deal with the lock	 * ordering between quotainode and this inode. OTOH, dropping the inode	 * lock here means dealing with a chown that can happen before	 * we re-acquire the lock.	 */	if (ip)		xfs_iunlock(ip, XFS_ILOCK_EXCL);	error = xfs_qm_dqread(mp, id, type, flags, &dqp);	if (ip)		xfs_ilock(ip, XFS_ILOCK_EXCL);	if (error)		return error;	if (ip) {		/*		 * A dquot could be attached to this inode by now, since		 * we had dropped the ilock.		 */		if (xfs_this_quota_on(mp, type)) {			struct xfs_dquot	*dqp1;			dqp1 = xfs_inode_dquot(ip, type);			if (dqp1) {				xfs_qm_dqdestroy(dqp);//.........这里部分代码省略.........
开发者ID:MaxChina,项目名称:linux,代码行数:101,


示例16: xfs_qm_dqget

//.........这里部分代码省略.........	/*	 * See if this is mount code calling to look at the overall quota limits	 * which are stored in the id == 0 user or group's dquot.	 * Since we may not have done a quotacheck by this point, just return	 * the dquot without attaching it to any hashtables, lists, etc, or even	 * taking a reference.	 * The caller must dqdestroy this once done.	 */	if (flags & XFS_QMOPT_DQSUSER) {		ASSERT(id == 0);		ASSERT(! ip);		goto dqret;	}	/*	 * Dquot lock comes after hashlock in the lock ordering	 */	if (ip) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		if (! XFS_IS_DQTYPE_ON(mp, type)) {			/* inode stays locked on return */			xfs_qm_dqdestroy(dqp);			return XFS_ERROR(ESRCH);		}		/*		 * A dquot could be attached to this inode by now, since		 * we had dropped the ilock.		 */		if (type == XFS_DQ_USER) {			if (ip->i_udquot) {				xfs_qm_dqdestroy(dqp);				dqp = ip->i_udquot;				xfs_dqlock(dqp);				goto dqret;			}		} else {			if (ip->i_gdquot) {				xfs_qm_dqdestroy(dqp);				dqp = ip->i_gdquot;				xfs_dqlock(dqp);				goto dqret;			}		}	}	/*	 * Hashlock comes after ilock in lock order	 */	XFS_DQ_HASH_LOCK(h);	if (version != h->qh_version) {		xfs_dquot_t *tmpdqp;		/*		 * Now, see if somebody else put the dquot in the		 * hashtable before us. This can happen because we didn't		 * keep the hashchain lock. We don't have to worry about		 * lock order between the two dquots here since dqp isn't		 * on any findable lists yet.		 */		if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {			/*			 * Duplicate found. Just throw away the new dquot			 * and start over.			 */			xfs_qm_dqput(tmpdqp);			XFS_DQ_HASH_UNLOCK(h);
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:67,


示例17: xfs_qm_dqlookup

/* * Lookup a dquot in the incore dquot hashtable. We keep two separate * hashtables for user and group dquots; and, these are global tables * inside the XQM, not per-filesystem tables. * The hash chain must be locked by caller, and it is left locked * on return. Returning dquot is locked. */STATIC intxfs_qm_dqlookup(	xfs_mount_t		*mp,	xfs_dqid_t		id,	xfs_dqhash_t		*qh,	xfs_dquot_t		**O_dqpp){	xfs_dquot_t		*dqp;	uint			flist_locked;	xfs_dquot_t		*d;	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));	flist_locked = B_FALSE;	/*	 * Traverse the hashchain looking for a match	 */	for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {		/*		 * We already have the hashlock. We don't need the		 * dqlock to look at the id field of the dquot, since the		 * id can't be modified without the hashlock anyway.		 */		if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {			xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");			/*			 * All in core dquots must be on the dqlist of mp			 */			ASSERT(dqp->MPL_PREVP != NULL);			xfs_dqlock(dqp);			if (dqp->q_nrefs == 0) {				ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));				if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {					xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");					/*					 * We may have raced with dqreclaim_one()					 * (and lost). So, flag that we don't					 * want the dquot to be reclaimed.					 */					dqp->dq_flags |= XFS_DQ_WANT;					xfs_dqunlock(dqp);					xfs_qm_freelist_lock(xfs_Gqm);					xfs_dqlock(dqp);					dqp->dq_flags &= ~(XFS_DQ_WANT);				}				flist_locked = B_TRUE;			}			/*			 * id couldn't have changed; we had the hashlock all			 * along			 */			ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);			if (flist_locked) {				if (dqp->q_nrefs != 0) {					xfs_qm_freelist_unlock(xfs_Gqm);					flist_locked = B_FALSE;				} else {					/*					 * take it off the freelist					 */					xfs_dqtrace_entry(dqp,							"DQLOOKUP: TAKEOFF FL");					XQM_FREELIST_REMOVE(dqp);					/* xfs_qm_freelist_print(&(xfs_Gqm->							qm_dqfreelist),							"after removal"); */				}			}			/*			 * grab a reference			 */			XFS_DQHOLD(dqp);			if (flist_locked)				xfs_qm_freelist_unlock(xfs_Gqm);			/*			 * move the dquot to the front of the hashchain			 */			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));			if (dqp->HL_PREVP != &qh->qh_next) {				xfs_dqtrace_entry(dqp,						  "DQLOOKUP: HASH MOVETOFRONT");				if ((d = dqp->HL_NEXT))					d->HL_PREVP = dqp->HL_PREVP;				*(dqp->HL_PREVP) = d;				d = qh->qh_next;				d->HL_PREVP = &dqp->HL_NEXT;//.........这里部分代码省略.........
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:101,


示例18: xfs_qm_dqpurge

/* ARGSUSED */intxfs_qm_dqpurge(	xfs_dquot_t	*dqp,	uint		flags){	xfs_dqhash_t	*thishash;	xfs_mount_t	*mp;	mp = dqp->q_mount;	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));	ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));	xfs_dqlock(dqp);	/*	 * We really can't afford to purge a dquot that is	 * referenced, because these are hard refs.	 * It shouldn't happen in general because we went thru _all_ inodes in	 * dqrele_all_inodes before calling this and didn't let the mountlock go.	 * However it is possible that we have dquots with temporary	 * references that are not attached to an inode. e.g. see xfs_setattr().	 */	if (dqp->q_nrefs != 0) {		xfs_dqunlock(dqp);		XFS_DQ_HASH_UNLOCK(dqp->q_hash);		return (1);	}	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));	/*	 * If we're turning off quotas, we have to make sure that, for	 * example, we don't delete quota disk blocks while dquots are	 * in the process of getting written to those disk blocks.	 * This dquot might well be on AIL, and we can't leave it there	 * if we're turning off quotas. Basically, we need this flush	 * lock, and are willing to block on it.	 */	if (! xfs_qm_dqflock_nowait(dqp)) {		/*		 * Block on the flush lock after nudging dquot buffer,		 * if it is incore.		 */		xfs_qm_dqflock_pushbuf_wait(dqp);	}	/*	 * XXXIf we're turning this type of quotas off, we don't care	 * about the dirty metadata sitting in this dquot. OTOH, if	 * we're unmounting, we do care, so we flush it and wait.	 */	if (XFS_DQ_IS_DIRTY(dqp)) {		xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");		/* dqflush unlocks dqflock */		/*		 * Given that dqpurge is a very rare occurrence, it is OK		 * that we're holding the hashlist and mplist locks		 * across the disk write. But, ... XXXsup		 *		 * We don't care about getting disk errors here. We need		 * to purge this dquot anyway, so we go ahead regardless.		 */		(void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);		xfs_dqflock(dqp);	}	ASSERT(dqp->q_pincount == 0);	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));	thishash = dqp->q_hash;	XQM_HASHLIST_REMOVE(thishash, dqp);	XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);	/*	 * XXX Move this to the front of the freelist, if we can get the	 * freelist lock.	 */	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));	dqp->q_mount = NULL;	dqp->q_hash = NULL;	dqp->dq_flags = XFS_DQ_INACTIVE;	memset(&dqp->q_core, 0, sizeof(dqp->q_core));	xfs_dqfunlock(dqp);	xfs_dqunlock(dqp);	XFS_DQ_HASH_UNLOCK(thishash);	return (0);}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:88,


示例19: xfs_qm_dqput

/* * Release a reference to the dquot (decrement ref-count) * and unlock it. If there is a group quota attached to this * dquot, carefully release that too without tripping over * deadlocks'n'stuff. */voidxfs_qm_dqput(	xfs_dquot_t	*dqp){	xfs_dquot_t	*gdqp;	ASSERT(dqp->q_nrefs > 0);	ASSERT(XFS_DQ_IS_LOCKED(dqp));	xfs_dqtrace_entry(dqp, "DQPUT");	if (dqp->q_nrefs != 1) {		dqp->q_nrefs--;		xfs_dqunlock(dqp);		return;	}	/*	 * drop the dqlock and acquire the freelist and dqlock	 * in the right order; but try to get it out-of-order first	 */	if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {		xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");		xfs_dqunlock(dqp);		xfs_qm_freelist_lock(xfs_Gqm);		xfs_dqlock(dqp);	}	while (1) {		gdqp = NULL;		/* We can't depend on nrefs being == 1 here */		if (--dqp->q_nrefs == 0) {			xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");			/*			 * insert at end of the freelist.			 */			XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);			/*			 * If we just added a udquot to the freelist, then			 * we want to release the gdquot reference that			 * it (probably) has. Otherwise it'll keep the			 * gdquot from getting reclaimed.			 */			if ((gdqp = dqp->q_gdquot)) {				/*				 * Avoid a recursive dqput call				 */				xfs_dqlock(gdqp);				dqp->q_gdquot = NULL;			}			/* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),			   "@@@@@++ Free list (after append) @@@@@+");			   */		}		xfs_dqunlock(dqp);		/*		 * If we had a group quota inside the user quota as a hint,		 * release it now.		 */		if (! gdqp)			break;		dqp = gdqp;	}	xfs_qm_freelist_unlock(xfs_Gqm);}
开发者ID:jameshilliard,项目名称:actiontec_opensrc_mi424wr-rev-e-f_fw-20-10-7-5,代码行数:74,


示例20: xfs_qm_scall_setqlim

/* * Adjust quota limits, and start/stop timers accordingly. */intxfs_qm_scall_setqlim(    struct xfs_mount	*mp,    xfs_dqid_t		id,    uint			type,    struct qc_dqblk		*newlim){    struct xfs_quotainfo	*q = mp->m_quotainfo;    struct xfs_disk_dquot	*ddq;    struct xfs_dquot	*dqp;    struct xfs_trans	*tp;    int			error;    xfs_qcnt_t		hard, soft;    if (newlim->d_fieldmask & ~XFS_QC_MASK)        return EINVAL;    if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)        return 0;    /*     * We don't want to race with a quotaoff so take the quotaoff lock.     * We don't hold an inode lock, so there's nothing else to stop     * a quotaoff from happening.     */    mutex_lock(&q->qi_quotaofflock);    /*     * Get the dquot (locked) before we start, as we need to do a     * transaction to allocate it if it doesn't exist. Once we have the     * dquot, unlock it so we can start the next transaction safely. We hold     * a reference to the dquot, so it's safe to do this unlock/lock without     * it being reclaimed in the mean time.     */    error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);    if (error) {        ASSERT(error != ENOENT);        goto out_unlock;    }    xfs_dqunlock(dqp);    tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);    error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);    if (error) {        xfs_trans_cancel(tp, 0);        goto out_rele;    }    xfs_dqlock(dqp);    xfs_trans_dqjoin(tp, dqp);    ddq = &dqp->q_core;    /*     * Make sure that hardlimits are >= soft limits before changing.     */    hard = (newlim->d_fieldmask & QC_SPC_HARD) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :           be64_to_cpu(ddq->d_blk_hardlimit);    soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :           be64_to_cpu(ddq->d_blk_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_blk_hardlimit = cpu_to_be64(hard);        ddq->d_blk_softlimit = cpu_to_be64(soft);        xfs_dquot_set_prealloc_limits(dqp);        if (id == 0) {            q->qi_bhardlimit = hard;            q->qi_bsoftlimit = soft;        }    } else {        xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);    }    hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :           be64_to_cpu(ddq->d_rtb_hardlimit);    soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :           be64_to_cpu(ddq->d_rtb_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_rtb_hardlimit = cpu_to_be64(hard);        ddq->d_rtb_softlimit = cpu_to_be64(soft);        if (id == 0) {            q->qi_rtbhardlimit = hard;            q->qi_rtbsoftlimit = soft;        }    } else {        xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);    }    hard = (newlim->d_fieldmask & QC_INO_HARD) ?           (xfs_qcnt_t) newlim->d_ino_hardlimit :           be64_to_cpu(ddq->d_ino_hardlimit);    soft = (newlim->d_fieldmask & QC_INO_SOFT) ?           (xfs_qcnt_t) newlim->d_ino_softlimit :           be64_to_cpu(ddq->d_ino_softlimit);    if (hard == 0 || hard >= soft) {        ddq->d_ino_hardlimit = cpu_to_be64(hard);        ddq->d_ino_softlimit = cpu_to_be64(soft);//.........这里部分代码省略.........
开发者ID:daltenty,项目名称:kernel-ubuntu.trusty-vgt,代码行数:101,



注:本文中的xfs_dqlock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xfs_dqunlock函数代码示例
C++ xfs_dir2_block_tail_p函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。