Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bae8f567 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/spinlock,rwlock: always to a load-and-test first



In case a lock is contended it is better to do a load-and-test first
before trying to get the lock with compare-and-swap. This helps to avoid
unnecessary cache invalidations of the cacheline for the lock if the
CPU has to wait for the lock. For an uncontended lock doing the
compare-and-swap directly is a bit better, if the CPU does not have the
cacheline in its cache yet the compare-and-swap will get it read-write
immediately while a load-and-test would get it read-only first.

Always to the load-and-test first to avoid the cacheline invalidations
for the contended case outweight the potential read-only to read-write
cacheline upgrade for the uncontended case.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 2bf29df7
Loading
Loading
Loading
Loading
+30 −20
Original line number Diff line number Diff line
@@ -59,7 +59,9 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)

static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
	return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL);
	barrier();
	return likely(arch_spin_value_unlocked(*lp) &&
		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
}

static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
@@ -69,20 +71,20 @@ static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)

static inline void arch_spin_lock(arch_spinlock_t *lp)
{
	if (unlikely(!arch_spin_trylock_once(lp)))
	if (!arch_spin_trylock_once(lp))
		arch_spin_lock_wait(lp);
}

static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
					unsigned long flags)
{
	if (unlikely(!arch_spin_trylock_once(lp)))
	if (!arch_spin_trylock_once(lp))
		arch_spin_lock_wait_flags(lp, flags);
}

static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
	if (unlikely(!arch_spin_trylock_once(lp)))
	if (!arch_spin_trylock_once(lp))
		return arch_spin_trylock_retry(lp);
	return 1;
}
@@ -128,19 +130,29 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);

static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
	unsigned int old = ACCESS_ONCE(rw->lock);
	return likely((int) old >= 0 &&
		      _raw_compare_and_swap(&rw->lock, old, old + 1));
}

static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{
	unsigned int old = ACCESS_ONCE(rw->lock);
	return likely(old == 0 &&
		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
}

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	unsigned int old;
	old = rw->lock & 0x7fffffffU;
	if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
	if (!arch_read_trylock_once(rw))
		_raw_read_lock_wait(rw);
}

static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
	unsigned int old;
	old = rw->lock & 0x7fffffffU;
	if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
	if (!arch_read_trylock_once(rw))
		_raw_read_lock_wait_flags(rw, flags);
}

@@ -155,13 +167,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)

static inline void arch_write_lock(arch_rwlock_t *rw)
{
	if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
	if (!arch_write_trylock_once(rw))
		_raw_write_lock_wait(rw);
}

static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
	if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
	if (!arch_write_trylock_once(rw))
		_raw_write_lock_wait_flags(rw, flags);
}

@@ -172,18 +184,16 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned int old;
	old = rw->lock & 0x7fffffffU;
	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1)))
		return 1;
	if (!arch_read_trylock_once(rw))
		return _raw_read_trylock_retry(rw);
	return 1;
}

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
		return 1;
	if (!arch_write_trylock_once(rw))
		return _raw_write_trylock_retry(rw);
	return 1;
}

#define arch_read_relax(lock)	cpu_relax()
+16 −13
Original line number Diff line number Diff line
@@ -100,12 +100,9 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
	int count;

	for (count = spin_retry; count > 0; count--) {
		if (arch_spin_is_locked(lp))
			continue;
	for (count = spin_retry; count > 0; count--)
		if (arch_spin_trylock_once(lp))
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
@@ -120,9 +117,9 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
			smp_yield();
			count = spin_retry;
		}
		if (!arch_read_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if ((int) old < 0)
			continue;
		old = rw->lock & 0x7fffffffU;
		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
			return;
	}
@@ -140,9 +137,9 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
			smp_yield();
			count = spin_retry;
		}
		if (!arch_read_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if ((int) old < 0)
			continue;
		old = rw->lock & 0x7fffffffU;
		local_irq_disable();
		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
			return;
@@ -156,9 +153,9 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
	int count = spin_retry;

	while (count-- > 0) {
		if (!arch_read_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if ((int) old < 0)
			continue;
		old = rw->lock & 0x7fffffffU;
		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
			return 1;
	}
@@ -168,6 +165,7 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);

void _raw_write_lock_wait(arch_rwlock_t *rw)
{
	unsigned int old;
	int count = spin_retry;

	while (1) {
@@ -175,7 +173,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
			smp_yield();
			count = spin_retry;
		}
		if (!arch_write_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if (old)
			continue;
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
			return;
@@ -185,6 +184,7 @@ EXPORT_SYMBOL(_raw_write_lock_wait);

void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
	unsigned int old;
	int count = spin_retry;

	local_irq_restore(flags);
@@ -193,7 +193,8 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
			smp_yield();
			count = spin_retry;
		}
		if (!arch_write_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if (old)
			continue;
		local_irq_disable();
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
@@ -204,10 +205,12 @@ EXPORT_SYMBOL(_raw_write_lock_wait_flags);

int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
	unsigned int old;
	int count = spin_retry;

	while (count-- > 0) {
		if (!arch_write_can_lock(rw))
		old = ACCESS_ONCE(rw->lock);
		if (old)
			continue;
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
			return 1;