Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d9ef7522 authored by Wengang Wang's avatar Wengang Wang Committed by Joel Becker
Browse files

ocfs2:dlm: avoid dlm->ast_lock lockres->spinlock dependency break



Currently we process a dirty lockres with the lockres->spinlock taken. While
during the process, we may need to lock on dlm->ast_lock. This breaks the
dependency of dlm->ast_lock(lock first) and lockres->spinlock(lock second).

This patch fixes the problem.
Since we can't release lockres->spinlock, we have to take dlm->ast_lock
just before taking the lockres->spinlock and release it after lockres->spinlock
is released. And use __dlm_queue_bast()/__dlm_queue_ast(), the nolock version,
in dlm_shuffle_lists(). There are no too many locks on a lockres, so there is no
performance harm.

Signed-off-by: default avatarWengang Wang <wen.gang.wang@oracle.com>
Signed-off-by: default avatarJoel Becker <joel.becker@oracle.com>
parent d5a7df06
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
	return 0;
}

static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
	mlog_entry_void();

@@ -146,7 +146,7 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
}


static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
	mlog_entry_void();

+2 −0
Original line number Diff line number Diff line
@@ -904,6 +904,8 @@ void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,

void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_do_local_ast(struct dlm_ctxt *dlm,
		      struct dlm_lock_resource *res,
		      struct dlm_lock *lock);
+10 −6
Original line number Diff line number Diff line
@@ -310,6 +310,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
	 * spinlock, and because we know that it is not migrating/
	 * recovering/in-progress, it is fine to reserve asts and
	 * basts right before queueing them all throughout */
	assert_spin_locked(&dlm->ast_lock);
	assert_spin_locked(&res->spinlock);
	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
			      DLM_LOCK_RES_RECOVERING|
@@ -338,7 +339,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
			/* queue the BAST if not already */
			if (lock->ml.highest_blocked == LKM_IVMODE) {
				__dlm_lockres_reserve_ast(res);
				dlm_queue_bast(dlm, lock);
				__dlm_queue_bast(dlm, lock);
			}
			/* update the highest_blocked if needed */
			if (lock->ml.highest_blocked < target->ml.convert_type)
@@ -356,7 +357,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
			can_grant = 0;
			if (lock->ml.highest_blocked == LKM_IVMODE) {
				__dlm_lockres_reserve_ast(res);
				dlm_queue_bast(dlm, lock);
				__dlm_queue_bast(dlm, lock);
			}
			if (lock->ml.highest_blocked < target->ml.convert_type)
				lock->ml.highest_blocked =
@@ -384,7 +385,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
		spin_unlock(&target->spinlock);

		__dlm_lockres_reserve_ast(res);
		dlm_queue_ast(dlm, target);
		__dlm_queue_ast(dlm, target);
		/* go back and check for more */
		goto converting;
	}
@@ -403,7 +404,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
			can_grant = 0;
			if (lock->ml.highest_blocked == LKM_IVMODE) {
				__dlm_lockres_reserve_ast(res);
				dlm_queue_bast(dlm, lock);
				__dlm_queue_bast(dlm, lock);
			}
			if (lock->ml.highest_blocked < target->ml.type)
				lock->ml.highest_blocked = target->ml.type;
@@ -419,7 +420,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
			can_grant = 0;
			if (lock->ml.highest_blocked == LKM_IVMODE) {
				__dlm_lockres_reserve_ast(res);
				dlm_queue_bast(dlm, lock);
				__dlm_queue_bast(dlm, lock);
			}
			if (lock->ml.highest_blocked < target->ml.type)
				lock->ml.highest_blocked = target->ml.type;
@@ -445,7 +446,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
		spin_unlock(&target->spinlock);

		__dlm_lockres_reserve_ast(res);
		dlm_queue_ast(dlm, target);
		__dlm_queue_ast(dlm, target);
		/* go back and check for more */
		goto converting;
	}
@@ -675,6 +676,7 @@ static int dlm_thread(void *data)
		 	/* lockres can be re-dirtied/re-added to the
			 * dirty_list in this gap, but that is ok */

			spin_lock(&dlm->ast_lock);
			spin_lock(&res->spinlock);
			if (res->owner != dlm->node_num) {
				__dlm_print_one_lock_resource(res);
@@ -695,6 +697,7 @@ static int dlm_thread(void *data)
				/* move it to the tail and keep going */
				res->state &= ~DLM_LOCK_RES_DIRTY;
				spin_unlock(&res->spinlock);
				spin_unlock(&dlm->ast_lock);
				mlog(0, "delaying list shuffling for in-"
				     "progress lockres %.*s, state=%d\n",
				     res->lockname.len, res->lockname.name,
@@ -716,6 +719,7 @@ static int dlm_thread(void *data)
			dlm_shuffle_lists(dlm, res);
			res->state &= ~DLM_LOCK_RES_DIRTY;
			spin_unlock(&res->spinlock);
			spin_unlock(&dlm->ast_lock);

			dlm_lockres_calc_usage(dlm, res);