Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d9ce177 authored by Avi Kivity's avatar Avi Kivity Committed by Linus Torvalds
Browse files

i386: Allow KVM on i386 nonpae



Currently, CONFIG_X86_CMPXCHG64 both enables boot-time checking of
the cmpxchg64b feature and enables compilation of the set_64bit() family.
Since the option is dependent on PAE, and since KVM depends on set_64bit(),
this effectively disables KVM on i386 nopae.

Simplify by removing the config option altogether: the boot check is made
dependent on CONFIG_X86_PAE directly, and the set_64bit() family is exposed
without constraints.  It is up to users to check for the feature flag (KVM
does not as virtualiation extensions imply its existence).

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3e1f900b
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -297,11 +297,6 @@ config X86_POPAD_OK
	depends on !M386
	default y

config X86_CMPXCHG64
	bool
	depends on X86_PAE
	default y

config X86_ALIGNMENT_16
	bool
	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+0 −1
Original line number Diff line number Diff line
@@ -166,7 +166,6 @@ CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_GOOD_APIC=y
CONFIG_X86_INTEL_USERCOPY=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
+0 −1
Original line number Diff line number Diff line
@@ -52,7 +52,6 @@ CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_GOOD_APIC=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
CONFIG_X86_TSC=y
+0 −1
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@ if VIRTUALIZATION
config KVM
	tristate "Kernel-based Virtual Machine (KVM) support"
	depends on X86 && EXPERIMENTAL
	depends on X86_CMPXCHG64 || 64BIT
	---help---
	  Support hosting fully virtualized guest machines using hardware
	  virtualization extensions.  You will need a fairly recent
+5 −9
Original line number Diff line number Diff line
@@ -3,14 +3,16 @@

#include <linux/bitops.h> /* for LOCK_PREFIX */

/*
 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
 *       you need to test for the feature in boot_cpu_data.
 */

#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))

struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))


#ifdef CONFIG_X86_CMPXCHG64

/*
 * The semantics of XCHGCMP8B are a bit strange, this is why
 * there is a loop and the loading of %%eax and %%edx has to
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
 __set_64bit(ptr, ll_low(value), ll_high(value)) )

#endif

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
})
#endif

#ifdef CONFIG_X86_CMPXCHG64

static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
				      unsigned long long new)
{
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
					(unsigned long long)(n)))
#endif

#endif
Loading