Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf72de31 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Ben Myers
Browse files

xfs: nest qm_dqfrlist_lock inside the dquot qlock



Allow xfs_qm_dqput to work without trylock loops by nesting the freelist lock
inside the dquot qlock.  In turn that requires trylocks in the reclaim path
instead, but given it's a classic tradeoff between fast and slow path, and
we follow the model of the inode and dentry caches.

Document our new lock order now that it has settled.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent 92678554
Loading
Loading
Loading
Loading
+38 −59
Original line number Diff line number Diff line
@@ -39,19 +39,18 @@
#include "xfs_qm.h"
#include "xfs_trace.h"


/*
   LOCK ORDER

   inode lock		    (ilock)
   dquot hash-chain lock    (hashlock)
   xqm dquot freelist lock  (freelistlock
   mount's dquot list lock  (mplistlock)
   user dquot lock - lock ordering among dquots is based on the uid or gid
   group dquot lock - similar to udquots. Between the two dquots, the udquot
		      has to be locked first.
   pin lock - the dquot lock must be held to take this lock.
   flush lock - ditto.
 * Lock order:
 *
 * ip->i_lock
 *   qh->qh_lock
 *     qi->qi_dqlist_lock
 *       dquot->q_qlock (xfs_dqlock() and friends)
 *         dquot->q_flush (xfs_dqflock() and friends)
 *         xfs_Gqm->qm_dqfrlist_lock
 *
 * If two dquots need to be locked the order is user before group/project,
 * otherwise by the lowest id first, see xfs_dqlock2.
 */

#ifdef DEBUG
@@ -984,69 +983,49 @@ xfs_qm_dqget(
 */
void
xfs_qm_dqput(
	xfs_dquot_t	*dqp)
	struct xfs_dquot	*dqp)
{
	xfs_dquot_t	*gdqp;
	struct xfs_dquot	*gdqp;

	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	trace_xfs_dqput(dqp);

	if (dqp->q_nrefs != 1) {
		dqp->q_nrefs--;
recurse:
	if (--dqp->q_nrefs > 0) {
		xfs_dqunlock(dqp);
		return;
	}

	/*
	 * drop the dqlock and acquire the freelist and dqlock
	 * in the right order; but try to get it out-of-order first
	 */
	if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
		trace_xfs_dqput_wait(dqp);
		xfs_dqunlock(dqp);
		mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
		xfs_dqlock(dqp);
	}

	while (1) {
		gdqp = NULL;

		/* We can't depend on nrefs being == 1 here */
		if (--dqp->q_nrefs == 0) {
	trace_xfs_dqput_free(dqp);

	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
	if (list_empty(&dqp->q_freelist)) {
		list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
		xfs_Gqm->qm_dqfrlist_cnt++;
	}
	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);

	/*
			 * If we just added a udquot to the freelist, then
			 * we want to release the gdquot reference that
			 * it (probably) has. Otherwise it'll keep the
			 * gdquot from getting reclaimed.
			 */
			if ((gdqp = dqp->q_gdquot)) {
				/*
				 * Avoid a recursive dqput call
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot reference that it (probably) has. Otherwise it'll keep
	 * the gdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}
		}
	xfs_dqunlock(dqp);

	/*
		 * If we had a group quota inside the user quota as a hint,
		 * release it now.
	 * If we had a group quota hint, release it now.
	 */
		if (! gdqp)
			break;
	if (gdqp) {
		dqp = gdqp;
		goto recurse;
	}
	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
}

/*
+3 −1
Original line number Diff line number Diff line
@@ -1668,7 +1668,9 @@ xfs_qm_dqreclaim_one(void)
restart:
	list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
		struct xfs_mount *mp = dqp->q_mount;
		xfs_dqlock(dqp);

		if (!xfs_dqlock_nowait(dqp))
			continue;

		/*
		 * This dquot has already been grabbed by dqlookup.