Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 67dadcb3 authored by David Vrabel's avatar David Vrabel
Browse files

Merge commit 'e26a9e00' into stable/for-linus-3.15

parents eb47f712 e26a9e00
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804)	+= timer-sp.o
obj-$(CONFIG_MCPM)		+= mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o	= -pg
AFLAGS_mcpm_head.o		:= -march=armv7-a
AFLAGS_vlock.o			:= -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA)	+= edma.o
+4 −4
Original line number Diff line number Diff line
@@ -30,8 +30,8 @@
 * Endian independent macros for shifting bytes within registers.
 */
#ifndef __ARMEB__
#define pull            lsr
#define push            lsl
#define lspull          lsr
#define lspush          lsl
#define get_byte_0      lsl #0
#define get_byte_1	lsr #8
#define get_byte_2	lsr #16
@@ -41,8 +41,8 @@
#define put_byte_2	lsl #16
#define put_byte_3	lsl #24
#else
#define pull            lsl
#define push            lsr
#define lspull          lsl
#define lspush          lsr
#define get_byte_0	lsr #24
#define get_byte_1	lsr #16
#define get_byte_2	lsr #8
+40 −4
Original line number Diff line number Diff line
@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
	int result;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic_add_return\n"
"1:	ldrex	%0, [%3]\n"
@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
	int result;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic_sub_return\n"
"1:	ldrex	%0, [%3]\n"
@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
	unsigned long res;

	smp_mb();
	prefetchw(&ptr->counter);

	do {
		__asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
	return oldval;
}

static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int oldval, newval;
	unsigned long tmp;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__ ("@ atomic_add_unless\n"
"1:	ldrex	%0, [%4]\n"
"	teq	%0, %5\n"
"	beq	2f\n"
"	add	%1, %0, %6\n"
"	strex	%2, %1, [%4]\n"
"	teq	%2, #0\n"
"	bne	1b\n"
"2:"
	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
	: "r" (&v->counter), "r" (u), "r" (a)
	: "cc");

	if (oldval != u)
		smp_mb();

	return oldval;
}

#else /* ARM_ARCH_6 */

#ifdef CONFIG_SMP
@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
	return ret;
}

#endif /* __LINUX_ARM_ARCH__ */

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int c, old;
@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
	return c;
}

#endif /* __LINUX_ARM_ARCH__ */

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_inc(v)		atomic_add(1, v)
#define atomic_dec(v)		atomic_sub(1, v)

@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
	unsigned long tmp;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic64_add_return\n"
"1:	ldrexd	%0, %H0, [%3]\n"
@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
	unsigned long tmp;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic64_sub_return\n"
"1:	ldrexd	%0, %H0, [%3]\n"
@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
	unsigned long res;

	smp_mb();
	prefetchw(&ptr->counter);

	do {
		__asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
	unsigned long tmp;

	smp_mb();
	prefetchw(&ptr->counter);

	__asm__ __volatile__("@ atomic64_xchg\n"
"1:	ldrexd	%0, %H0, [%3]\n"
@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
	unsigned long tmp;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1:	ldrexd	%0, %H0, [%3]\n"
@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
	int ret = 1;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__("@ atomic64_add_unless\n"
"1:	ldrexd	%0, %H0, [%4]\n"
+6 −0
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H

#include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h>

#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif

	smp_mb();
	prefetchw((const void *)ptr);

	switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
	unsigned long oldval, res;

	prefetchw((const void *)ptr);

	switch (size) {
#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
	case 1:
@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
	unsigned long long oldval;
	unsigned long res;

	prefetchw(ptr);

	__asm__ __volatile__(
"1:	ldrexd		%1, %H1, [%3]\n"
"	teq		%1, %4\n"
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A5		0xC050
#define ARM_CPU_PART_CORTEX_A15		0xC0F0
#define ARM_CPU_PART_CORTEX_A7		0xC070
#define ARM_CPU_PART_CORTEX_A12		0xC0D0

#define ARM_CPU_XSCALE_ARCH_MASK	0xe000
#define ARM_CPU_XSCALE_ARCH_V1		0x2000
Loading