Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81533803 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/spinlock: use the cpu number +1 as spinlock value



The queued spinlock code will come out simpler if the encoding of
the CPU that holds the spinlock is (cpu+1) instead of (~cpu).

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 1887aa07
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -48,7 +48,7 @@ static inline void arch_spin_relax(arch_spinlock_t *lock)

static inline u32 arch_spin_lockval(int cpu)
{
	return ~cpu;
	return cpu + 1;
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+16 −16
Original line number Diff line number Diff line
@@ -67,8 +67,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)

	/* Pass the virtual CPU to the lock holder if it is not running */
	owner = arch_load_niai4(&lp->lock);
	if (owner && arch_vcpu_is_preempted(~owner))
		smp_yield_cpu(~owner);
	if (owner && arch_vcpu_is_preempted(owner - 1))
		smp_yield_cpu(owner - 1);

	count = spin_retry;
	while (1) {
@@ -87,8 +87,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
		 * yield the CPU unconditionally. For LPAR rely on the
		 * sense running status.
		 */
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
			smp_yield_cpu(~owner);
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
			smp_yield_cpu(owner - 1);
	}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -102,8 +102,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)

	/* Pass the virtual CPU to the lock holder if it is not running */
	owner = arch_load_niai4(&lp->lock);
	if (owner && arch_vcpu_is_preempted(~owner))
		smp_yield_cpu(~owner);
	if (owner && arch_vcpu_is_preempted(owner - 1))
		smp_yield_cpu(owner - 1);

	count = spin_retry;
	while (1) {
@@ -124,8 +124,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
		 * yield the CPU unconditionally. For LPAR rely on the
		 * sense running status.
		 */
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
			smp_yield_cpu(~owner);
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
			smp_yield_cpu(owner - 1);
	}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
@@ -158,8 +158,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
	owner = 0;
	while (1) {
		if (count-- <= 0) {
			if (owner && arch_vcpu_is_preempted(~owner))
				smp_yield_cpu(~owner);
			if (owner && arch_vcpu_is_preempted(owner - 1))
				smp_yield_cpu(owner - 1);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
@@ -198,8 +198,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
	owner = 0;
	while (1) {
		if (count-- <= 0) {
			if (owner && arch_vcpu_is_preempted(~owner))
				smp_yield_cpu(~owner);
			if (owner && arch_vcpu_is_preempted(owner - 1))
				smp_yield_cpu(owner - 1);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
@@ -226,8 +226,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
	owner = 0;
	while (1) {
		if (count-- <= 0) {
			if (owner && arch_vcpu_is_preempted(~owner))
				smp_yield_cpu(~owner);
			if (owner && arch_vcpu_is_preempted(owner - 1))
				smp_yield_cpu(owner - 1);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
@@ -265,8 +265,8 @@ void arch_lock_relax(int cpu)
{
	if (!cpu)
		return;
	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
		return;
	smp_yield_cpu(~cpu);
	smp_yield_cpu(cpu - 1);
}
EXPORT_SYMBOL(arch_lock_relax);