Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eaff0e70 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar
Browse files

locking/pvqspinlock: Move lock stealing count tracking code into pv_queued_spin_steal_lock()



This patch moves the lock stealing count tracking code into
pv_queued_spin_steal_lock() instead of via a jacket function simplifying
the code.

Signed-off-by: default avatarWaiman Long <Waiman.Long@hpe.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1449778666-13593-3-git-send-email-Waiman.Long@hpe.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 920c720a
Loading
Loading
Loading
Loading
+9 −7
Original line number Diff line number Diff line
@@ -54,6 +54,11 @@ struct pv_node {
	u8			state;
};

/*
 * Include queued spinlock statistics code
 */
#include "qspinlock_stat.h"

/*
 * By replacing the regular queued_spin_trylock() with the function below,
 * it will be called once when a lock waiter enter the PV slowpath before
@@ -65,9 +70,11 @@ struct pv_node {
static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
{
	struct __qspinlock *l = (void *)lock;

	return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
	int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
		   (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);

	qstat_inc(qstat_pv_lock_stealing, ret);
	return ret;
}

/*
@@ -137,11 +144,6 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
}
#endif /* _Q_PENDING_BITS == 8 */

/*
 * Include queued spinlock statistics code
 */
#include "qspinlock_stat.h"

/*
 * Lock and MCS node addresses hash table for fast lookup
 *
+0 −13
Original line number Diff line number Diff line
@@ -279,19 +279,6 @@ static inline void __pv_wait(u8 *ptr, u8 val)
#define pv_kick(c)	__pv_kick(c)
#define pv_wait(p, v)	__pv_wait(p, v)

/*
 * PV unfair trylock count tracking function
 */
static inline int qstat_spin_steal_lock(struct qspinlock *lock)
{
	int ret = pv_queued_spin_steal_lock(lock);

	qstat_inc(qstat_pv_lock_stealing, ret);
	return ret;
}
#undef  queued_spin_trylock
#define queued_spin_trylock(l)	qstat_spin_steal_lock(l)

#else /* CONFIG_QUEUED_LOCK_STAT */

static inline void qstat_inc(enum qlock_stats stat, bool cond)	{ }