Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef26b169 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  include/linux/compiler-gcc4.h: Fix build bug - gcc-4.0.2 doesn't understand __builtin_object_size
  x86/alternatives: No need for alternatives-asm.h to re-invent stuff already in asm.h
  x86/alternatives: Check replacementlen <= instrlen at build time
  x86, 64-bit: Set data segments to null after switching to 64-bit mode
  x86: Clean up the loadsegment() macro
  x86: Optimize loadsegment()
  x86: Add missing might_fault() checks to copy_{to,from}_user()
  x86-64: __copy_from_user_inatomic() adjustments
  x86: Remove unused thread_return label from switch_to()
  x86, 64-bit: Fix bstep_iret jump
  x86: Don't use the strict copy checks when branch profiling is in use
  x86, 64-bit: Move K8 B step iret fixup to fault entry asm
  x86: Generate cmpxchg build failures
  x86: Add a Kconfig option to turn the copy_from_user warnings into errors
  x86: Turn the copy_from_user check into an (optional) compile time warning
  x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
  x86: Use __builtin_object_size() to validate the buffer size for copy_from_user()
parents a77d2e08 7cff7ce9
Loading
Loading
Loading
Loading
+14 −0
Original line number Original line Diff line number Diff line
@@ -296,4 +296,18 @@ config OPTIMIZE_INLINING


	  If unsure, say N.
	  If unsure, say N.


config DEBUG_STRICT_USER_COPY_CHECKS
	bool "Strict copy size checks"
	depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
	---help---
	  Enabling this option turns a certain set of sanity checks for user
	  copy operations into compile time failures.

	  The copy_from_user() etc checks are there to help test if there
	  are sufficient security checks on the length argument of
	  the copy operation, by having gcc prove that the argument is
	  within bounds.

	  If unsure, or if you run an older (pre 4.4) gcc, say N.

endmenu
endmenu
+3 −7
Original line number Original line Diff line number Diff line
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLY__


#ifdef CONFIG_X86_32
#include <asm/asm.h>
# define X86_ALIGN .long
#else
# define X86_ALIGN .quad
#endif


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	.macro LOCK_PREFIX
	.macro LOCK_PREFIX
1:	lock
1:	lock
	.section .smp_locks,"a"
	.section .smp_locks,"a"
	.align 4
	_ASM_ALIGN
	X86_ALIGN 1b
	_ASM_PTR 1b
	.previous
	.previous
	.endm
	.endm
#else
#else
+1 −0
Original line number Original line Diff line number Diff line
@@ -84,6 +84,7 @@ static inline void alternatives_smp_switch(int smp) {}
      "	 .byte " __stringify(feature) "\n"	/* feature bit     */	\
      "	 .byte " __stringify(feature) "\n"	/* feature bit     */	\
      "	 .byte 662b-661b\n"			/* sourcelen       */	\
      "	 .byte 662b-661b\n"			/* sourcelen       */	\
      "	 .byte 664f-663f\n"			/* replacementlen  */	\
      "	 .byte 664f-663f\n"			/* replacementlen  */	\
      "	 .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */	\
      ".previous\n"							\
      ".previous\n"							\
      ".section .altinstr_replacement, \"ax\"\n"			\
      ".section .altinstr_replacement, \"ax\"\n"			\
      "663:\n\t" newinstr "\n664:\n"		/* replacement     */	\
      "663:\n\t" newinstr "\n664:\n"		/* replacement     */	\
+86 −132
Original line number Original line Diff line number Diff line
@@ -8,14 +8,50 @@
 *       you need to test for the feature in boot_cpu_data.
 *       you need to test for the feature in boot_cpu_data.
 */
 */


#define xchg(ptr, v)							\
extern void __xchg_wrong_size(void);
	((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 */


struct __xchg_dummy {
struct __xchg_dummy {
	unsigned long a[100];
	unsigned long a[100];
};
};
#define __xg(x) ((struct __xchg_dummy *)(x))
#define __xg(x) ((struct __xchg_dummy *)(x))


#define __xchg(x, ptr, size)						\
({									\
	__typeof(*(ptr)) __x = (x);					\
	switch (size) {							\
	case 1:								\
		asm volatile("xchgb %b0,%1"				\
			     : "=q" (__x)				\
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");				\
		break;							\
	case 2:								\
		asm volatile("xchgw %w0,%1"				\
			     : "=r" (__x)				\
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");				\
		break;							\
	case 4:								\
		asm volatile("xchgl %0,%1"				\
			     : "=r" (__x)				\
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");				\
		break;							\
	default:							\
		__xchg_wrong_size();					\
	}								\
	__x;								\
})

#define xchg(ptr, v)							\
	__xchg((v), (ptr), sizeof(*ptr))

/*
/*
 * The semantics of XCHGCMP8B are a bit strange, this is why
 * The semantics of XCHGCMP8B are a bit strange, this is why
 * there is a loop and the loading of %%eax and %%edx has to
 * there is a loop and the loading of %%eax and %%edx has to
@@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr,
		       (unsigned int)((value) >> 32))			\
		       (unsigned int)((value) >> 32))			\
	 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
	 : __set_64bit(ptr, ll_low((value)), ll_high((value))))


/*
extern void __cmpxchg_wrong_size(void);
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 */
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
				   int size)
{
	switch (size) {
	case 1:
		asm volatile("xchgb %b0,%1"
			     : "=q" (x)
			     : "m" (*__xg(ptr)), "0" (x)
			     : "memory");
		break;
	case 2:
		asm volatile("xchgw %w0,%1"
			     : "=r" (x)
			     : "m" (*__xg(ptr)), "0" (x)
			     : "memory");
		break;
	case 4:
		asm volatile("xchgl %0,%1"
			     : "=r" (x)
			     : "m" (*__xg(ptr)), "0" (x)
			     : "memory");
		break;
	}
	return x;
}


/*
/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 * indicated by comparing RETURN with OLD.
 */
 */
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
({									\
	__typeof__(*(ptr)) __ret;					\
	__typeof__(*(ptr)) __old = (old);				\
	__typeof__(*(ptr)) __new = (new);				\
	switch (size) {							\
	case 1:								\
		asm volatile(lock "cmpxchgb %b1,%2"			\
			     : "=a"(__ret)				\
			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	case 2:								\
		asm volatile(lock "cmpxchgw %w1,%2"			\
			     : "=a"(__ret)				\
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	case 4:								\
		asm volatile(lock "cmpxchgl %1,%2"			\
			     : "=a"(__ret)				\
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	default:							\
		__cmpxchg_wrong_size();					\
	}								\
	__ret;								\
})

#define __cmpxchg(ptr, old, new, size)					\
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

#define __sync_cmpxchg(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")

#define __cmpxchg_local(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "")


#ifdef CONFIG_X86_CMPXCHG
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, o, n)						\

	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
#define cmpxchg(ptr, old, new)						\
				       (unsigned long)(n),		\
	__cmpxchg((ptr), (old), (new), sizeof(*ptr))
				       sizeof(*(ptr))))

#define sync_cmpxchg(ptr, o, n)						\
#define sync_cmpxchg(ptr, old, new)					\
	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\
	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
					    (unsigned long)(n),		\

					    sizeof(*(ptr))))
#define cmpxchg_local(ptr, old, new)					\
#define cmpxchg_local(ptr, o, n)					\
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
					     (unsigned long)(n),	\
					     sizeof(*(ptr))))
#endif
#endif


#ifdef CONFIG_X86_CMPXCHG64
#ifdef CONFIG_X86_CMPXCHG64
@@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
					       (unsigned long long)(n)))
					       (unsigned long long)(n)))
#endif
#endif


static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
				      unsigned long new, int size)
{
	unsigned long prev;
	switch (size) {
	case 1:
		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}

/*
 * Always use locked operations when touching memory shared with a
 * hypervisor, since the system may be SMP even if the guest kernel
 * isn't.
 */
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
					   unsigned long old,
					   unsigned long new, int size)
{
	unsigned long prev;
	switch (size) {
	case 1:
		asm volatile("lock; cmpxchgb %b1,%2"
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile("lock; cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile("lock; cmpxchgl %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}

static inline unsigned long __cmpxchg_local(volatile void *ptr,
					    unsigned long old,
					    unsigned long new, int size)
{
	unsigned long prev;
	switch (size) {
	case 1:
		asm volatile("cmpxchgb %b1,%2"
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile("cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile("cmpxchgl %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}

static inline unsigned long long __cmpxchg64(volatile void *ptr,
static inline unsigned long long __cmpxchg64(volatile void *ptr,
					     unsigned long long old,
					     unsigned long long old,
					     unsigned long long new)
					     unsigned long long new)
+91 −143
Original line number Original line Diff line number Diff line
@@ -3,9 +3,6 @@


#include <asm/alternative.h> /* Provides LOCK_PREFIX */
#include <asm/alternative.h> /* Provides LOCK_PREFIX */


#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
						 (ptr), sizeof(*(ptr))))

#define __xg(x) ((volatile long *)(x))
#define __xg(x) ((volatile long *)(x))


static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
@@ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)


#define _set_64bit set_64bit
#define _set_64bit set_64bit


extern void __xchg_wrong_size(void);
extern void __cmpxchg_wrong_size(void);

/*
/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
 */
 */
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
#define __xchg(x, ptr, size)						\
				   int size)
({									\
{
	__typeof(*(ptr)) __x = (x);					\
	switch (size) {
	switch (size) {							\
	case 1:
	case 1:								\
		asm volatile("xchgb %b0,%1"
		asm volatile("xchgb %b0,%1"				\
			     : "=q" (x)
			     : "=q" (__x)				\
			     : "m" (*__xg(ptr)), "0" (x)
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");
			     : "memory");				\
		break;
		break;							\
	case 2:
	case 2:								\
		asm volatile("xchgw %w0,%1"
		asm volatile("xchgw %w0,%1"				\
			     : "=r" (x)
			     : "=r" (__x)				\
			     : "m" (*__xg(ptr)), "0" (x)
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");
			     : "memory");				\
		break;
		break;							\
	case 4:
	case 4:								\
		asm volatile("xchgl %k0,%1"
		asm volatile("xchgl %k0,%1"				\
			     : "=r" (x)
			     : "=r" (__x)				\
			     : "m" (*__xg(ptr)), "0" (x)
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");
			     : "memory");				\
		break;
		break;							\
	case 8:
	case 8:								\
		asm volatile("xchgq %0,%1"
		asm volatile("xchgq %0,%1"				\
			     : "=r" (x)
			     : "=r" (__x)				\
			     : "m" (*__xg(ptr)), "0" (x)
			     : "m" (*__xg(ptr)), "0" (__x)		\
			     : "memory");
			     : "memory");				\
		break;
		break;							\
	}
	default:							\
	return x;
		__xchg_wrong_size();					\
}
	}								\
	__x;								\
})

#define xchg(ptr, v)							\
	__xchg((v), (ptr), sizeof(*ptr))

#define __HAVE_ARCH_CMPXCHG 1


/*
/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 * indicated by comparing RETURN with OLD.
 */
 */
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
({									\
	__typeof__(*(ptr)) __ret;					\
	__typeof__(*(ptr)) __old = (old);				\
	__typeof__(*(ptr)) __new = (new);				\
	switch (size) {							\
	case 1:								\
		asm volatile(lock "cmpxchgb %b1,%2"			\
			     : "=a"(__ret)				\
			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	case 2:								\
		asm volatile(lock "cmpxchgw %w1,%2"			\
			     : "=a"(__ret)				\
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	case 4:								\
		asm volatile(lock "cmpxchgl %k1,%2"			\
			     : "=a"(__ret)				\
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	case 8:								\
		asm volatile(lock "cmpxchgq %1,%2"			\
			     : "=a"(__ret)				\
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
			     : "memory");				\
		break;							\
	default:							\
		__cmpxchg_wrong_size();					\
	}								\
	__ret;								\
})


#define __HAVE_ARCH_CMPXCHG 1
#define __cmpxchg(ptr, old, new, size)					\
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)


static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define __sync_cmpxchg(ptr, old, new, size)				\
				      unsigned long new, int size)
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
{
	unsigned long prev;
	switch (size) {
	case 1:
		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 8:
		asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}


/*
#define __cmpxchg_local(ptr, old, new, size)				\
 * Always use locked operations when touching memory shared with a
	__raw_cmpxchg((ptr), (old), (new), (size), "")
 * hypervisor, since the system may be SMP even if the guest kernel
 * isn't.
 */
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
					   unsigned long old,
					   unsigned long new, int size)
{
	unsigned long prev;
	switch (size) {
	case 1:
		asm volatile("lock; cmpxchgb %b1,%2"
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile("lock; cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile("lock; cmpxchgl %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}


static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg(ptr, old, new)						\
					    unsigned long old,
	__cmpxchg((ptr), (old), (new), sizeof(*ptr))
					    unsigned long new, int size)

{
#define sync_cmpxchg(ptr, old, new)					\
	unsigned long prev;
	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
	switch (size) {

	case 1:
#define cmpxchg_local(ptr, old, new)					\
		asm volatile("cmpxchgb %b1,%2"
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
			     : "=a"(prev)
			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 2:
		asm volatile("cmpxchgw %w1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 4:
		asm volatile("cmpxchgl %k1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	case 8:
		asm volatile("cmpxchgq %1,%2"
			     : "=a"(prev)
			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
			     : "memory");
		return prev;
	}
	return old;
}


#define cmpxchg(ptr, o, n)						\
	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
				       (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n)						\
#define cmpxchg64(ptr, o, n)						\
({									\
({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	cmpxchg((ptr), (o), (n));					\
	cmpxchg((ptr), (o), (n));					\
})
})
#define cmpxchg_local(ptr, o, n)					\

	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
					     (unsigned long)(n),	\
					     sizeof(*(ptr))))
#define sync_cmpxchg(ptr, o, n)						\
	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\
					    (unsigned long)(n),		\
					    sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n)					\
#define cmpxchg64_local(ptr, o, n)					\
({									\
({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
Loading