Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0004a9df authored by Ralf Baechle's avatar Ralf Baechle
Browse files

[MIPS] Cleanup memory barriers for weakly ordered systems.



Also the R4000 / R4600 LL/SC instructions imply a sync so no explicit sync
needed.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 08f57f7f
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -1277,6 +1277,7 @@ config CPU_RM9000
	select CPU_SUPPORTS_32BIT_KERNEL
	select CPU_SUPPORTS_32BIT_KERNEL
	select CPU_SUPPORTS_64BIT_KERNEL
	select CPU_SUPPORTS_64BIT_KERNEL
	select CPU_SUPPORTS_HIGHMEM
	select CPU_SUPPORTS_HIGHMEM
	select WEAK_ORDERING


config CPU_SB1
config CPU_SB1
	bool "SB1"
	bool "SB1"
@@ -1285,6 +1286,7 @@ config CPU_SB1
	select CPU_SUPPORTS_32BIT_KERNEL
	select CPU_SUPPORTS_32BIT_KERNEL
	select CPU_SUPPORTS_64BIT_KERNEL
	select CPU_SUPPORTS_64BIT_KERNEL
	select CPU_SUPPORTS_HIGHMEM
	select CPU_SUPPORTS_HIGHMEM
	select WEAK_ORDERING


endchoice
endchoice


@@ -1345,6 +1347,8 @@ config SYS_HAS_CPU_RM9000
config SYS_HAS_CPU_SB1
config SYS_HAS_CPU_SB1
	bool
	bool


config WEAK_ORDERING
	bool
endmenu
endmenu


#
#
+3 −3
Original line number Original line Diff line number Diff line
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,


	spin_lock(&smp_call_lock);
	spin_lock(&smp_call_lock);
	call_data = &data;
	call_data = &data;
	mb();
	smp_mb();


	/* Send a message to all other CPUs and wait for them to respond */
	/* Send a message to all other CPUs and wait for them to respond */
	for_each_online_cpu(i)
	for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
	 * Notify initiating CPU that I've grabbed the data and am
	 * Notify initiating CPU that I've grabbed the data and am
	 * about to execute the function.
	 * about to execute the function.
	 */
	 */
	mb();
	smp_mb();
	atomic_inc(&call_data->started);
	atomic_inc(&call_data->started);


	/*
	/*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
	irq_exit();
	irq_exit();


	if (wait) {
	if (wait) {
		mb();
		smp_mb();
		atomic_inc(&call_data->finished);
		atomic_inc(&call_data->finished);
	}
	}
}
}
+25 −12
Original line number Original line Diff line number Diff line
@@ -15,6 +15,7 @@
#define _ASM_ATOMIC_H
#define _ASM_ATOMIC_H


#include <linux/irqflags.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include <asm/war.h>


@@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
		"	sc	%0, %2					\n"
		"	sc	%0, %2					\n"
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	addu	%0, %1, %3				\n"
		"	addu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
		"	sc	%0, %2					\n"
		"	sc	%0, %2					\n"
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	addu	%0, %1, %3				\n"
		"	addu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


@@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
		"	sc	%0, %2					\n"
		"	sc	%0, %2					\n"
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	subu	%0, %1, %3				\n"
		"	subu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
		"	sc	%0, %2					\n"
		"	sc	%0, %2					\n"
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	subu	%0, %1, %3				\n"
		"	subu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


@@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	 subu	%0, %1, %3				\n"
		"	 subu	%0, %1, %3				\n"
		"	.set	reorder					\n"
		"	.set	reorder					\n"
		"	sync						\n"
		"1:							\n"
		"1:							\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	 subu	%0, %1, %3				\n"
		"	 subu	%0, %1, %3				\n"
		"	.set	reorder					\n"
		"	.set	reorder					\n"
		"	sync						\n"
		"1:							\n"
		"1:							\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


@@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
		"	scd	%0, %2					\n"
		"	scd	%0, %2					\n"
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	addu	%0, %1, %3				\n"
		"	addu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
		"	scd	%0, %2					\n"
		"	scd	%0, %2					\n"
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	addu	%0, %1, %3				\n"
		"	addu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


@@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
		"	scd	%0, %2					\n"
		"	scd	%0, %2					\n"
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	subu	%0, %1, %3				\n"
		"	subu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
		"	scd	%0, %2					\n"
		"	scd	%0, %2					\n"
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	subu	%0, %1, %3				\n"
		"	subu	%0, %1, %3				\n"
		"	sync						\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "Ir" (i), "m" (v->counter)
		: "Ir" (i), "m" (v->counter)
@@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


@@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();

	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;


@@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		"	beqzl	%0, 1b					\n"
		"	beqzl	%0, 1b					\n"
		"	 dsubu	%0, %1, %3				\n"
		"	 dsubu	%0, %1, %3				\n"
		"	.set	reorder					\n"
		"	.set	reorder					\n"
		"	sync						\n"
		"1:							\n"
		"1:							\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		"	beqz	%0, 1b					\n"
		"	beqz	%0, 1b					\n"
		"	 dsubu	%0, %1, %3				\n"
		"	 dsubu	%0, %1, %3				\n"
		"	.set	reorder					\n"
		"	.set	reorder					\n"
		"	sync						\n"
		"1:							\n"
		"1:							\n"
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		local_irq_restore(flags);
		local_irq_restore(flags);
	}
	}


	smp_mb();

	return result;
	return result;
}
}


+132 −0
Original line number Original line Diff line number Diff line
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H

/*
 * read_barrier_depends - Flush all pending reads that subsequents reads
 * depend on.
 *
 * No data-dependent reads from memory-like regions are ever reordered
 * over this barrier.  All reads preceding this primitive are guaranteed
 * to access memory (but not necessarily other CPUs' caches) before any
 * reads following this primitive that depend on the data return by
 * any of the preceding reads.  This primitive is much lighter weight than
 * rmb() on most CPUs, and is never heavier weight than is
 * rmb().
 *
 * These ordering constraints are respected by both the local CPU
 * and the compiler.
 *
 * Ordering is not guaranteed by anything other than these primitives,
 * not even by data dependencies.  See the documentation for
 * memory_barrier() for examples and URLs to more information.
 *
 * For example, the following code would force ordering (the initial
 * value of "a" is zero, "b" is one, and "p" is "&a"):
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	b = 2;
 *	memory_barrier();
 *	p = &b;				q = p;
 *					read_barrier_depends();
 *					d = *q;
 * </programlisting>
 *
 * because the read of "*q" depends on the read of "p" and these
 * two reads are separated by a read_barrier_depends().  However,
 * the following code, with the same initial values for "a" and "b":
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	a = 2;
 *	memory_barrier();
 *	b = 3;				y = b;
 *					read_barrier_depends();
 *					x = a;
 * </programlisting>
 *
 * does not enforce ordering, since there is no data dependency between
 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 * in cases like this where there are no data dependencies.
 */

#define read_barrier_depends()		do { } while(0)
#define smp_read_barrier_depends()	do { } while(0)

#ifdef CONFIG_CPU_HAS_SYNC
#define __sync()				\
	__asm__ __volatile__(			\
		".set	push\n\t"		\
		".set	noreorder\n\t"		\
		".set	mips2\n\t"		\
		"sync\n\t"			\
		".set	pop"			\
		: /* no output */		\
		: /* no input */		\
		: "memory")
#else
#define __sync()	do { } while(0)
#endif

#define __fast_iob()				\
	__asm__ __volatile__(			\
		".set	push\n\t"		\
		".set	noreorder\n\t"		\
		"lw	$0,%0\n\t"		\
		"nop\n\t"			\
		".set	pop"			\
		: /* no output */		\
		: "m" (*(int *)CKSEG1)		\
		: "memory")

#define fast_wmb()	__sync()
#define fast_rmb()	__sync()
#define fast_mb()	__sync()
#define fast_iob()				\
	do {					\
		__sync();			\
		__fast_iob();			\
	} while (0)

#ifdef CONFIG_CPU_HAS_WB

#include <asm/wbflush.h>

#define wmb()		fast_wmb()
#define rmb()		fast_rmb()
#define mb()		wbflush()
#define iob()		wbflush()

#else /* !CONFIG_CPU_HAS_WB */

#define wmb()		fast_wmb()
#define rmb()		fast_rmb()
#define mb()		fast_mb()
#define iob()		fast_iob()

#endif /* !CONFIG_CPU_HAS_WB */

#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
#define __WEAK_ORDERING_MB	"       sync	\n"
#else
#define __WEAK_ORDERING_MB	"		\n"
#endif

#define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_wmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")

#define set_mb(var, value) \
	do { var = value; smp_mb(); } while (0)

#endif /* __ASM_BARRIER_H */
+8 −19
Original line number Original line Diff line number Diff line
@@ -3,7 +3,7 @@
 * License.  See the file "COPYING" in the main directory of this archive
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 * for more details.
 *
 *
 * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
 * Copyright (c) 1994 - 1997, 1999, 2000, 06  Ralf Baechle (ralf@linux-mips.org)
 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
 */
 */
#ifndef _ASM_BITOPS_H
#ifndef _ASM_BITOPS_H
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/bug.h>
#include <asm/bug.h>
#include <asm/byteorder.h>		/* sigh ... */
#include <asm/byteorder.h>		/* sigh ... */
#include <asm/cpu-features.h>
#include <asm/cpu-features.h>
@@ -204,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr,
		"	" __SC	"%2, %1					\n"
		"	" __SC	"%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
		"	and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -226,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr,
		"	" __SC	"%2, %1					\n"
		"	" __SC	"%2, %1					\n"
		"	beqz	%2, 1b					\n"
		"	beqz	%2, 1b					\n"
		"	 and	%2, %0, %3				\n"
		"	 and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	pop					\n"
		"	.set	pop					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -250,6 +245,8 @@ static inline int test_and_set_bit(unsigned long nr,


		return retval;
		return retval;
	}
	}

	smp_mb();
}
}


/*
/*
@@ -275,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr,
		"	" __SC 	"%2, %1					\n"
		"	" __SC 	"%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
		"	and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -298,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr,
		"	" __SC 	"%2, %1					\n"
		"	" __SC 	"%2, %1					\n"
		"	beqz	%2, 1b					\n"
		"	beqz	%2, 1b					\n"
		"	 and	%2, %0, %3				\n"
		"	 and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	pop					\n"
		"	.set	pop					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -322,6 +313,8 @@ static inline int test_and_clear_bit(unsigned long nr,


		return retval;
		return retval;
	}
	}

	smp_mb();
}
}


/*
/*
@@ -346,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr,
		"	" __SC	"%2, %1					\n"
		"	" __SC	"%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
		"	and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	mips0					\n"
		"	.set	mips0					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -368,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr,
		"	" __SC	"\t%2, %1				\n"
		"	" __SC	"\t%2, %1				\n"
		"	beqz	%2, 1b					\n"
		"	beqz	%2, 1b					\n"
		"	 and	%2, %0, %3				\n"
		"	 and	%2, %0, %3				\n"
#ifdef CONFIG_SMP
		"	sync						\n"
#endif
		"	.set	pop					\n"
		"	.set	pop					\n"
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "=&r" (temp), "=m" (*m), "=&r" (res)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -391,6 +378,8 @@ static inline int test_and_change_bit(unsigned long nr,


		return retval;
		return retval;
	}
	}

	smp_mb();
}
}


#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/non-atomic.h>
Loading