Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d59b93da authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/rwlock: use directed yield for write-locked rwlocks



Add an owner field to the arch_rwlock_t to be able to pass the timeslice
of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock
is write-locked. The undirected yield in case the rwlock is acquired
writable but the lock is read-locked is removed.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 46b05c7b
Loading
Loading
Loading
Loading
+0 −2
Original line number Original line Diff line number Diff line
@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
extern int smp_store_status(int cpu);
extern int smp_vcpu_scheduled(int cpu);
extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu);
extern void smp_yield_cpu(int cpu);
extern void smp_yield(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_fill_possible_mask(void);
@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_fill_possible_mask(void) { }
static inline void smp_fill_possible_mask(void) { }


#endif /* CONFIG_SMP */
#endif /* CONFIG_SMP */
+23 −5
Original line number Original line Diff line number Diff line
@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
 * (the type definitions are in asm/spinlock_types.h)
 * (the type definitions are in asm/spinlock_types.h)
 */
 */


void arch_lock_relax(unsigned int cpu);

void arch_spin_lock_wait(arch_spinlock_t *);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);


static inline void arch_spin_relax(arch_spinlock_t *lock)
{
	arch_lock_relax(lock->lock);
}

static inline u32 arch_spin_lockval(int cpu)
static inline u32 arch_spin_lockval(int cpu)
{
{
	return ~cpu;
	return ~cpu;
@@ -170,17 +176,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
{
	if (!arch_write_trylock_once(rw))
	if (!arch_write_trylock_once(rw))
		_raw_write_lock_wait(rw);
		_raw_write_lock_wait(rw);
	rw->owner = SPINLOCK_LOCKVAL;
}
}


static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
{
	if (!arch_write_trylock_once(rw))
	if (!arch_write_trylock_once(rw))
		_raw_write_lock_wait_flags(rw, flags);
		_raw_write_lock_wait_flags(rw, flags);
	rw->owner = SPINLOCK_LOCKVAL;
}
}


static inline void arch_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
{
	typecheck(unsigned int, rw->lock);
	typecheck(unsigned int, rw->lock);

	rw->owner = 0;
	asm volatile(
	asm volatile(
		__ASM_BARRIER
		__ASM_BARRIER
		"st	%1,%0\n"
		"st	%1,%0\n"
@@ -198,12 +208,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)


static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
{
	if (!arch_write_trylock_once(rw))
	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
		return _raw_write_trylock_retry(rw);
		return 0;
	rw->owner = SPINLOCK_LOCKVAL;
	return 1;
	return 1;
}
}


#define arch_read_relax(lock)	cpu_relax()
static inline void arch_read_relax(arch_rwlock_t *rw)
#define arch_write_relax(lock)	cpu_relax()
{
	arch_lock_relax(rw->owner);
}

static inline void arch_write_relax(arch_rwlock_t *rw)
{
	arch_lock_relax(rw->owner);
}


#endif /* __ASM_SPINLOCK_H */
#endif /* __ASM_SPINLOCK_H */
+1 −0
Original line number Original line Diff line number Diff line
@@ -13,6 +13,7 @@ typedef struct {


typedef struct {
typedef struct {
	unsigned int lock;
	unsigned int lock;
	unsigned int owner;
} arch_rwlock_t;
} arch_rwlock_t;


#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+0 −6
Original line number Original line Diff line number Diff line
@@ -333,12 +333,6 @@ int smp_vcpu_scheduled(int cpu)
	return pcpu_running(pcpu_devices + cpu);
	return pcpu_running(pcpu_devices + cpu);
}
}


void smp_yield(void)
{
	if (MACHINE_HAS_DIAG44)
		asm volatile("diag 0,0,0x44");
}

void smp_yield_cpu(int cpu)
void smp_yield_cpu(int cpu)
{
{
	if (MACHINE_HAS_DIAG9C)
	if (MACHINE_HAS_DIAG9C)
+30 −19
Original line number Original line Diff line number Diff line
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
EXPORT_SYMBOL(arch_spin_lock_wait_flags);


void arch_spin_relax(arch_spinlock_t *lp)
{
	unsigned int cpu = lp->lock;
	if (cpu != 0) {
		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
		    !smp_vcpu_scheduled(~cpu))
			smp_yield_cpu(~cpu);
	}
}
EXPORT_SYMBOL(arch_spin_relax);

int arch_spin_trylock_retry(arch_spinlock_t *lp)
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
{
	int count;
	int count;
@@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);


void _raw_read_lock_wait(arch_rwlock_t *rw)
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
{
	unsigned int old;
	unsigned int owner, old;
	int count = spin_retry;
	int count = spin_retry;


	owner = 0;
	while (1) {
	while (1) {
		if (count-- <= 0) {
		if (count-- <= 0) {
			smp_yield();
			if (owner && !smp_vcpu_scheduled(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
			count = spin_retry;
		}
		}
		old = ACCESS_ONCE(rw->lock);
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		if ((int) old < 0)
		if ((int) old < 0)
			continue;
			continue;
		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait);


void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
{
	unsigned int old;
	unsigned int owner, old;
	int count = spin_retry;
	int count = spin_retry;


	local_irq_restore(flags);
	local_irq_restore(flags);
	owner = 0;
	while (1) {
	while (1) {
		if (count-- <= 0) {
		if (count-- <= 0) {
			smp_yield();
			if (owner && !smp_vcpu_scheduled(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
			count = spin_retry;
		}
		}
		old = ACCESS_ONCE(rw->lock);
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		if ((int) old < 0)
		if ((int) old < 0)
			continue;
			continue;
		local_irq_disable();
		local_irq_disable();
@@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);


void _raw_write_lock_wait(arch_rwlock_t *rw)
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
{
	unsigned int old;
	unsigned int owner, old;
	int count = spin_retry;
	int count = spin_retry;


	owner = 0;
	while (1) {
	while (1) {
		if (count-- <= 0) {
		if (count-- <= 0) {
			smp_yield();
			if (owner && !smp_vcpu_scheduled(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
			count = spin_retry;
		}
		}
		old = ACCESS_ONCE(rw->lock);
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		if (old)
		if (old)
			continue;
			continue;
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
@@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait);


void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
{
	unsigned int old;
	unsigned int owner, old;
	int count = spin_retry;
	int count = spin_retry;


	local_irq_restore(flags);
	local_irq_restore(flags);
	owner = 0;
	while (1) {
	while (1) {
		if (count-- <= 0) {
		if (count-- <= 0) {
			smp_yield();
			if (owner && !smp_vcpu_scheduled(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
			count = spin_retry;
		}
		}
		old = ACCESS_ONCE(rw->lock);
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		if (old)
		if (old)
			continue;
			continue;
		local_irq_disable();
		local_irq_disable();
@@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
	return 0;
	return 0;
}
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
EXPORT_SYMBOL(_raw_write_trylock_retry);

void arch_lock_relax(unsigned int cpu)
{
	if (!cpu)
		return;
	if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
		return;
	smp_yield_cpu(~cpu);
}
EXPORT_SYMBOL(arch_lock_relax);