Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0983f050 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge master.kernel.org:/home/rmk/linux-2.6-arm-smp

parents db776a14 186efd52
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -425,6 +425,9 @@ static void __init build_mem_type_table(void)
		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;

		mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
		mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
	}

	cp = &cache_policies[cachepolicy];
+2 −2
Original line number Diff line number Diff line
@@ -21,8 +21,8 @@

#include <asm/system.h>

#define smp_mb__before_clear_bit()	do { } while (0)
#define smp_mb__after_clear_bit()	do { } while (0)
#define smp_mb__before_clear_bit()	mb()
#define smp_mb__after_clear_bit()	mb()

/*
 * These functions are the basis of our bit ops.
+24 −12
Original line number Diff line number Diff line
@@ -28,7 +28,8 @@
"	blmi	" #fail				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	})

#define __down_op_ret(ptr,fail)			\
@@ -48,12 +49,14 @@
"	mov	%0, ip"				\
	: "=&r" (ret)				\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	ret;					\
	})

#define __up_op(ptr,wake)			\
	({					\
	smp_mb();				\
	__asm__ __volatile__(			\
	"@ up_op\n"				\
"1:	ldrex	lr, [%0]\n"			\
@@ -66,7 +69,7 @@
"	blle	" #wake				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	})

/*
@@ -92,11 +95,13 @@
"	blne	" #fail				\
	:					\
	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	})

#define __up_op_write(ptr,wake)			\
	({					\
	smp_mb();				\
	__asm__ __volatile__(			\
	"@ up_op_read\n"			\
"1:	ldrex	lr, [%0]\n"			\
@@ -108,7 +113,7 @@
"	blcs	" #wake				\
	:					\
	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	})

#define __down_op_read(ptr,fail)		\
@@ -116,6 +121,7 @@

#define __up_op_read(ptr,wake)			\
	({					\
	smp_mb();				\
	__asm__ __volatile__(			\
	"@ up_op_read\n"			\
"1:	ldrex	lr, [%0]\n"			\
@@ -128,7 +134,7 @@
"	bleq	" #wake				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	})

#else
@@ -148,7 +154,8 @@
"	blmi	" #fail				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	})

#define __down_op_ret(ptr,fail)			\
@@ -169,12 +176,14 @@
"	mov	%0, ip"				\
	: "=&r" (ret)				\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	ret;					\
	})

#define __up_op(ptr,wake)			\
	({					\
	smp_mb();				\
	__asm__ __volatile__(			\
	"@ up_op\n"				\
"	mrs	ip, cpsr\n"			\
@@ -188,7 +197,7 @@
"	blle	" #wake				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	})

/*
@@ -215,7 +224,8 @@
"	blne	" #fail				\
	:					\
	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	})

#define __up_op_write(ptr,wake)			\
@@ -233,7 +243,8 @@
"	blcs	" #wake				\
	:					\
	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	smp_mb();				\
	})

#define __down_op_read(ptr,fail)		\
@@ -241,6 +252,7 @@

#define __up_op_read(ptr,wake)			\
	({					\
	smp_mb();				\
	__asm__ __volatile__(			\
	"@ up_op_read\n"			\
"	mrs	ip, cpsr\n"			\
@@ -254,7 +266,7 @@
"	bleq	" #wake				\
	:					\
	: "r" (ptr), "I" (1)			\
	: "ip", "lr", "cc", "memory");		\
	: "ip", "lr", "cc");			\
	})

#endif
+38 −15
Original line number Diff line number Diff line
@@ -8,9 +8,10 @@
/*
 * ARMv6 Spin-locking.
 *
 * We (exclusively) read the old value, and decrement it.  If it
 * hits zero, we may have won the lock, so we try (exclusively)
 * storing it.
 * We exclusively read the old value.  If it is zero, we may have
 * won the lock, so we try exclusively storing it.  A memory barrier
 * is required after we get a lock, and before we release it, because
 * V6 CPUs are assumed to have weakly ordered memory.
 *
 * Unlocked value: 0
 * Locked value: 1
@@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock)
"	bne	1b"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (1)
	: "cc", "memory");
	: "cc");

	smp_mb();
}

static inline int _raw_spin_trylock(spinlock_t *lock)
@@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
"	strexeq	%0, %2, [%1]"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (1)
	: "cc", "memory");
	: "cc");

	return tmp == 0;
	if (tmp == 0) {
		smp_mb();
		return 1;
	} else {
		return 0;
	}
}

static inline void _raw_spin_unlock(spinlock_t *lock)
{
	smp_mb();

	__asm__ __volatile__(
"	str	%1, [%0]"
	:
	: "r" (&lock->lock), "r" (0)
	: "cc", "memory");
	: "cc");
}

/*
@@ -98,7 +108,9 @@ static inline void _raw_write_lock(rwlock_t *rw)
"	bne	1b"
	: "=&r" (tmp)
	: "r" (&rw->lock), "r" (0x80000000)
	: "cc", "memory");
	: "cc");

	smp_mb();
}

static inline int _raw_write_trylock(rwlock_t *rw)
@@ -111,18 +123,25 @@ static inline int _raw_write_trylock(rwlock_t *rw)
"	strexeq	%0, %2, [%1]"
	: "=&r" (tmp)
	: "r" (&rw->lock), "r" (0x80000000)
	: "cc", "memory");
	: "cc");

	return tmp == 0;
	if (tmp == 0) {
		smp_mb();
		return 1;
	} else {
		return 0;
	}
}

static inline void _raw_write_unlock(rwlock_t *rw)
{
	smp_mb();

	__asm__ __volatile__(
	"str	%1, [%0]"
	:
	: "r" (&rw->lock), "r" (0)
	: "cc", "memory");
	: "cc");
}

/*
@@ -149,13 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw)
"	bmi	1b"
	: "=&r" (tmp), "=&r" (tmp2)
	: "r" (&rw->lock)
	: "cc", "memory");
	: "cc");

	smp_mb();
}

static inline void _raw_read_unlock(rwlock_t *rw)
{
	unsigned long tmp, tmp2;

	smp_mb();

	__asm__ __volatile__(
"1:	ldrex	%0, [%2]\n"
"	sub	%0, %0, #1\n"
@@ -164,7 +187,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
"	bne	1b"
	: "=&r" (tmp), "=&r" (tmp2)
	: "r" (&rw->lock)
	: "cc", "memory");
	: "cc");
}

#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+63 −29
Original line number Diff line number Diff line
@@ -139,7 +139,12 @@ extern unsigned int user_debug;
#define vectors_high()	(0)
#endif

#if __LINUX_ARM_ARCH__ >= 6
#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
                                   : : "r" (0) : "memory")
#else
#define mb() __asm__ __volatile__ ("" : : : "memory")
#endif
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
@@ -323,12 +328,8 @@ do { \
 * NOTE that this solution won't work on an SMP system, so explcitly
 * forbid it here.
 */
#ifdef CONFIG_SMP
#error SMP is not supported on SA1100/SA110
#else
#define swp_is_buggy
#endif
#endif

static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
@@ -337,9 +338,36 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#ifdef swp_is_buggy
	unsigned long flags;
#endif
#if __LINUX_ARM_ARCH__ >= 6
	unsigned int tmp;
#endif

	switch (size) {
#ifdef swp_is_buggy
#if __LINUX_ARM_ARCH__ >= 6
	case 1:
		asm volatile("@	__xchg1\n"
		"1:	ldrexb	%0, [%3]\n"
		"	strexb	%1, %2, [%3]\n"
		"	teq	%1, #0\n"
		"	bne	1b"
			: "=&r" (ret), "=&r" (tmp)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
	case 4:
		asm volatile("@	__xchg4\n"
		"1:	ldrex	%0, [%3]\n"
		"	strex	%1, %2, [%3]\n"
		"	teq	%1, #0\n"
		"	bne	1b"
			: "=&r" (ret), "=&r" (tmp)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
#elif defined(swp_is_buggy)
#ifdef CONFIG_SMP
#error SMP is not supported on this platform
#endif
	case 1:
		local_irq_save(flags);
		ret = *(volatile unsigned char *)ptr;
@@ -354,18 +382,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
		local_irq_restore(flags);
		break;
#else
		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
	case 1:
		asm volatile("@	__xchg1\n"
		"	swpb	%0, %1, [%2]"
			: "=&r" (ret)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
	case 4:
		asm volatile("@	__xchg4\n"
		"	swp	%0, %1, [%2]"
			: "=&r" (ret)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
#endif
		default: __bad_xchg(ptr, size), ret = 0;
	default:
		__bad_xchg(ptr, size), ret = 0;
		break;
	}

	return ret;