Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14e256c1 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Kyle McMartin
Browse files

[PARISC] Update spinlocks from parisc tree



Neaten up the CONFIG_PA20 ifdefs

More merge fixes, this time for SMP

Signed-off-by: default avatarMatthew Wilcox <willy@parisc-linux.org>

Prettify the CONFIG_DEBUG_SPINLOCK __SPIN_LOCK_UNLOCKED initializers.

Clean up some warnings with CONFIG_DEBUG_SPINLOCK enabled.

Fix build with spinlock debugging turned on. Patch is cleaner like this,
too.

Remove mandatory 16-byte alignment requirement on PA2.0 processors by
using the ldcw,CO completer. Provides a nice insn savings.

Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent 04d472dc
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -5,11 +5,6 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>

/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
 * since it only has load-and-zero. Moreover, at least on some PA processors,
 * the semaphore address has to be 16-byte aligned.
 */

static inline int __raw_spin_is_locked(raw_spinlock_t *x)
{
	volatile unsigned int *a = __ldcw_align(x);
+6 −2
Original line number Diff line number Diff line
@@ -6,10 +6,14 @@
#endif

typedef struct {
#ifdef CONFIG_PA20
	volatile unsigned int slock;
# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
#else
	volatile unsigned int lock[4];
} raw_spinlock_t;

# define __RAW_SPIN_LOCK_UNLOCKED	{ { 1, 1, 1, 1 } }
#endif
} raw_spinlock_t;

typedef struct {
	raw_spinlock_t lock;
+24 −7
Original line number Diff line number Diff line
@@ -138,13 +138,7 @@ static inline void set_eiem(unsigned long val)
#define set_wmb(var, value)		do { var = value; wmb(); } while (0)


/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
#define __ldcw(a) ({ \
	unsigned __ret; \
	__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
	__ret; \
})

#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
   and GCC only guarantees 8-byte alignment for stack locals, we can't
   be assured of 16-byte alignment for atomic lock data even if we
@@ -152,12 +146,35 @@ static inline void set_eiem(unsigned long val)
   we use a struct containing an array of four ints for the atomic lock
   type and dynamically select the 16-byte aligned int from the array
   for the semaphore.  */

#define __PA_LDCW_ALIGNMENT 16
#define __ldcw_align(a) ({ \
  unsigned long __ret = (unsigned long) &(a)->lock[0];        		\
  __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
  (volatile unsigned int *) __ret;                                      \
})
#define LDCW	"ldcw"

#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
   I've attached a summary of the change, but basically, for PA 2.0, as
   long as the ",CO" (coherent operation) completer is specified, then the
   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
   they only require "natural" alignment (4-byte for ldcw, 8-byte for
   ldcd). */

#define __PA_LDCW_ALIGNMENT 4
#define __ldcw_align(a) ((volatile unsigned int *)a)
#define LDCW	"ldcw,co"

#endif /*!CONFIG_PA20*/

/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
#define __ldcw(a) ({ \
	unsigned __ret; \
	__asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \
	__ret; \
})

#ifdef CONFIG_SMP
# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))