Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa1cc966 authored by Jinshan Xiong's avatar Jinshan Xiong Committed by Greg Kroah-Hartman
Browse files

staging: lustre: osc: allow to call brw_commit() multiple times



Sometimes the rq_commit_cb of BRW RPC can be called twice if that RPC
has already committed at reply time. This will cause inaccuracy of
unstable pages accounting and then assertion.

Signed-off-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3274
Reviewed-on: http://review.whamcloud.com/8215


Reviewed-by: default avatarPrakash Surya <surya1@llnl.gov>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 66bff4ad
Loading
Loading
Loading
Loading
+4 −15
Original line number Original line Diff line number Diff line
@@ -1875,11 +1875,6 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
	atomic_sub(page_count, &obd_unstable_pages);
	atomic_sub(page_count, &obd_unstable_pages);
	LASSERT(atomic_read(&obd_unstable_pages) >= 0);
	LASSERT(atomic_read(&obd_unstable_pages) >= 0);


	spin_lock(&req->rq_lock);
	req->rq_committed = 1;
	req->rq_unstable  = 0;
	spin_unlock(&req->rq_lock);

	wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
	wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
}
}


@@ -1909,28 +1904,22 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
	LASSERT(atomic_read(&obd_unstable_pages) >= 0);
	LASSERT(atomic_read(&obd_unstable_pages) >= 0);
	atomic_add(page_count, &obd_unstable_pages);
	atomic_add(page_count, &obd_unstable_pages);


	spin_lock(&req->rq_lock);

	/*
	/*
	 * If the request has already been committed (i.e. brw_commit
	 * If the request has already been committed (i.e. brw_commit
	 * called via rq_commit_cb), we need to undo the unstable page
	 * called via rq_commit_cb), we need to undo the unstable page
	 * increments we just performed because rq_commit_cb wont be
	 * increments we just performed because rq_commit_cb wont be
	 * called again. Otherwise, just set the commit callback so the
	 * called again.
	 * unstable page accounting is properly updated when the request
	 * is committed
	 */
	 */
	if (req->rq_committed) {
	spin_lock(&req->rq_lock);
	if (unlikely(req->rq_committed)) {
		/* Drop lock before calling osc_dec_unstable_pages */
		/* Drop lock before calling osc_dec_unstable_pages */
		spin_unlock(&req->rq_lock);
		spin_unlock(&req->rq_lock);
		osc_dec_unstable_pages(req);
		osc_dec_unstable_pages(req);
		spin_lock(&req->rq_lock);
	} else {
	} else {
		req->rq_unstable = 1;
		req->rq_unstable = 1;
		req->rq_commit_cb = osc_dec_unstable_pages;
	}

		spin_unlock(&req->rq_lock);
		spin_unlock(&req->rq_lock);
	}
	}
}


/* this must be called holding the loi list lock to give coverage to exit_cache,
/* this must be called holding the loi list lock to give coverage to exit_cache,
 * async_flag maintenance, and oap_request
 * async_flag maintenance, and oap_request
+4 −4
Original line number Original line Diff line number Diff line
@@ -1847,22 +1847,22 @@ static int brw_interpret(const struct lu_env *env,


static void brw_commit(struct ptlrpc_request *req)
static void brw_commit(struct ptlrpc_request *req)
{
{
	spin_lock(&req->rq_lock);
	/*
	/*
	 * If osc_inc_unstable_pages (via osc_extent_finish) races with
	 * If osc_inc_unstable_pages (via osc_extent_finish) races with
	 * this called via the rq_commit_cb, I need to ensure
	 * this called via the rq_commit_cb, I need to ensure
	 * osc_dec_unstable_pages is still called. Otherwise unstable
	 * osc_dec_unstable_pages is still called. Otherwise unstable
	 * pages may be leaked.
	 * pages may be leaked.
	 */
	 */
	if (req->rq_unstable) {
	spin_lock(&req->rq_lock);
	if (unlikely(req->rq_unstable)) {
		req->rq_unstable = 0;
		spin_unlock(&req->rq_lock);
		spin_unlock(&req->rq_lock);
		osc_dec_unstable_pages(req);
		osc_dec_unstable_pages(req);
		spin_lock(&req->rq_lock);
	} else {
	} else {
		req->rq_committed = 1;
		req->rq_committed = 1;
	}
		spin_unlock(&req->rq_lock);
		spin_unlock(&req->rq_lock);
	}
	}
}


/**
/**
 * Build an RPC by the list of extent @ext_list. The caller must ensure
 * Build an RPC by the list of extent @ext_list. The caller must ensure