Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea9e0fb8 authored by Nicolai Hähnle's avatar Nicolai Hähnle Committed by Ingo Molnar
Browse files

locking/ww_mutex: Set use_ww_ctx even when locking without a context



We will add a new field to struct mutex_waiter.  This field must be
initialized for all waiters if any waiter uses the ww_use_ctx path.

So there is a trade-off: Keep ww_mutex locking without a context on
the faster non-use_ww_ctx path, at the cost of adding the
initialization to all mutex locks (including non-ww_mutexes), or avoid
the additional cost for non-ww_mutex locks, at the cost of adding
additional checks to the use_ww_ctx path.

We take the latter choice.  It may be worth eliminating the users of
ww_mutex_lock(lock, NULL), but there are a lot of them.

Signed-off-by: default avatarNicolai Hähnle <Nicolai.Haehnle@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dri-devel@lists.freedesktop.org
Link: http://lkml.kernel.org/r/1482346000-9927-5-git-send-email-nhaehnle@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3822da3e
Loading
Loading
Loading
Loading
+2 −9
Original line number Diff line number Diff line
@@ -222,11 +222,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
 */
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
	if (ctx)
	return __ww_mutex_lock(lock, ctx);

	mutex_lock(&lock->base);
	return 0;
}

/**
@@ -262,10 +258,7 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
							   struct ww_acquire_ctx *ctx)
{
	if (ctx)
	return __ww_mutex_lock_interruptible(lock, ctx);
	else
		return mutex_lock_interruptible(&lock->base);
}

/**
+17 −12
Original line number Diff line number Diff line
@@ -469,7 +469,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
	for (;;) {
		struct task_struct *owner;

		if (use_ww_ctx && ww_ctx->acquired > 0) {
		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
			struct ww_mutex *ww;

			ww = container_of(lock, struct ww_mutex, base);
@@ -629,8 +629,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
	struct ww_mutex *ww;
	int ret;

	if (use_ww_ctx) {
	ww = container_of(lock, struct ww_mutex, base);

	if (use_ww_ctx && ww_ctx) {
		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
			return -EALREADY;
	}
@@ -642,7 +643,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
		/* got the lock, yay! */
		lock_acquired(&lock->dep_map, ip);
		if (use_ww_ctx)
		if (use_ww_ctx && ww_ctx)
			ww_mutex_set_context_fastpath(ww, ww_ctx);
		preempt_enable();
		return 0;
@@ -688,7 +689,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
			goto err;
		}

		if (use_ww_ctx && ww_ctx->acquired > 0) {
		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
			if (ret)
				goto err;
@@ -728,7 +729,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
	/* got the lock - cleanup and rejoice! */
	lock_acquired(&lock->dep_map, ip);

	if (use_ww_ctx)
	if (use_ww_ctx && ww_ctx)
		ww_mutex_set_context_slowpath(ww, ww_ctx);

	spin_unlock_mutex(&lock->wait_lock, flags);
@@ -816,8 +817,9 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)

	might_sleep();
	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
	if (!ret && ctx->acquired > 1)
				   0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
				   ctx, 1);
	if (!ret && ctx && ctx->acquired > 1)
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
@@ -831,9 +833,10 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)

	might_sleep();
	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
				  0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
				  ctx, 1);

	if (!ret && ctx->acquired > 1)
	if (!ret && ctx && ctx->acquired > 1)
		return ww_mutex_deadlock_injection(lock, ctx);

	return ret;
@@ -1021,6 +1024,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
	might_sleep();

	if (__mutex_trylock_fast(&lock->base)) {
		if (ctx)
			ww_mutex_set_context_fastpath(lock, ctx);
		return 0;
	}
@@ -1035,6 +1039,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
	might_sleep();

	if (__mutex_trylock_fast(&lock->base)) {
		if (ctx)
			ww_mutex_set_context_fastpath(lock, ctx);
		return 0;
	}