Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b232816 authored by John David Anglin's avatar John David Anglin Committed by James Bottomley
Browse files

[PARISC] futex: Use same lock set as lws calls



In debugging the failure of the glibc tst-cond18 test on parisc, I realized
that futexes need to use the same locks the lws calls.  This fixes all the
pthread 'cond' tests.  Sadly, there are still problems with thread cancellation.

[jejb: checkpatch fixes]
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 500dd237
Loading
Loading
Loading
Loading
+27 −4
Original line number Original line Diff line number Diff line
@@ -8,6 +8,29 @@
#include <asm/atomic.h>
#include <asm/atomic.h>
#include <asm/errno.h>
#include <asm/errno.h>


/* The following has to match the LWS code in syscall.S.  We have
   sixteen four-word locks. */

static inline void
_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
{
	extern u32 lws_lock_start[];
	long index = ((long)uaddr & 0xf0) >> 2;
	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
	local_irq_save(*flags);
	arch_spin_lock(s);
}

static inline void
_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
{
	extern u32 lws_lock_start[];
	long index = ((long)uaddr & 0xf0) >> 2;
	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
	arch_spin_unlock(s);
	local_irq_restore(*flags);
}

static inline int
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
{
@@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)


	pagefault_disable();
	pagefault_disable();


	_atomic_spin_lock_irqsave(uaddr, flags);
	_futex_spin_lock_irqsave(uaddr, &flags);


	switch (op) {
	switch (op) {
	case FUTEX_OP_SET:
	case FUTEX_OP_SET:
@@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
		ret = -ENOSYS;
		ret = -ENOSYS;
	}
	}


	_atomic_spin_unlock_irqrestore(uaddr, flags);
	_futex_spin_unlock_irqrestore(uaddr, &flags);


	pagefault_enable();
	pagefault_enable();


@@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
	 * address. This should scale to a couple of CPUs.
	 * address. This should scale to a couple of CPUs.
	 */
	 */


	_atomic_spin_lock_irqsave(uaddr, flags);
	_futex_spin_lock_irqsave(uaddr, &flags);


	ret = get_user(val, uaddr);
	ret = get_user(val, uaddr);


@@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,


	*uval = val;
	*uval = val;


	_atomic_spin_unlock_irqrestore(uaddr, flags);
	_futex_spin_unlock_irqrestore(uaddr, &flags);


	return ret;
	return ret;
}
}