Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9298b815 authored by Dave Hansen's avatar Dave Hansen Committed by H. Peter Anvin
Browse files

x86: Add more disabled features



The original motivation for these patches was for an Intel CPU
feature called MPX.  The patch to add a disabled feature for it
will go in with the other parts of the support.

But, in the meantime, there are a few other features than MPX
that we can make assumptions about at compile-time based on
compile options.  Add them to disabled-features.h and check them
with cpu_feature_enabled().

Note that this gets rid of the last things that needed an #ifdef
CONFIG_X86_64 in cpufeature.h.  Yay!

Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Link: http://lkml.kernel.org/r/20140911211524.C0EC332A@viggo.jf.intel.com


Acked-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 381aa07a
Loading
Loading
Loading
Loading
+0 −20
Original line number Original line Diff line number Diff line
@@ -324,7 +324,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
} while (0)
} while (0)


#define cpu_has_fpu		boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_fpu		boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_vme		boot_cpu_has(X86_FEATURE_VME)
#define cpu_has_de		boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_de		boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse		boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_pse		boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc		boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_tsc		boot_cpu_has(X86_FEATURE_TSC)
@@ -343,9 +342,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_k6_mtrr		boot_cpu_has(X86_FEATURE_K6_MTRR)
#define cpu_has_cyrix_arr	boot_cpu_has(X86_FEATURE_CYRIX_ARR)
#define cpu_has_centaur_mcr	boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt		boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt		boot_cpu_has(X86_FEATURE_XCRYPT)
@@ -380,22 +376,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_eager_fpu	boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_eager_fpu	boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext		boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_topoext		boot_cpu_has(X86_FEATURE_TOPOEXT)


#ifdef CONFIG_X86_64

#undef  cpu_has_vme
#define cpu_has_vme		0

#undef  cpu_has_k6_mtrr
#define cpu_has_k6_mtrr		0

#undef  cpu_has_cyrix_arr
#define cpu_has_cyrix_arr	0

#undef  cpu_has_centaur_mcr
#define cpu_has_centaur_mcr	0

#endif /* CONFIG_X86_64 */

#if __GNUC__ >= 4
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
extern bool __static_cpu_has_safe(u16 bit);
+14 −2
Original line number Original line Diff line number Diff line
@@ -10,13 +10,25 @@
 * cpu_feature_enabled().
 * cpu_feature_enabled().
 */
 */


#ifdef CONFIG_X86_64
# define DISABLE_VME		(1<<(X86_FEATURE_VME & 31))
# define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR	(1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR	(1<<(X86_FEATURE_CENTAUR_MCR & 31))
#else
# define DISABLE_VME		0
# define DISABLE_K6_MTRR	0
# define DISABLE_CYRIX_ARR	0
# define DISABLE_CENTAUR_MCR	0
#endif /* CONFIG_X86_64 */

/*
/*
 * Make sure to add features to the correct mask
 * Make sure to add features to the correct mask
 */
 */
#define DISABLED_MASK0	0
#define DISABLED_MASK0	(DISABLE_VME)
#define DISABLED_MASK1	0
#define DISABLED_MASK1	0
#define DISABLED_MASK2	0
#define DISABLED_MASK2	0
#define DISABLED_MASK3	0
#define DISABLED_MASK3	(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
#define DISABLED_MASK4	0
#define DISABLED_MASK4	0
#define DISABLED_MASK5	0
#define DISABLED_MASK5	0
#define DISABLED_MASK6	0
#define DISABLED_MASK6	0
+1 −1
Original line number Original line Diff line number Diff line
@@ -1391,7 +1391,7 @@ void cpu_init(void)


	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
	printk(KERN_INFO "Initializing CPU#%d\n", cpu);


	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
	if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);


	load_current_idt();
	load_current_idt();
+3 −3
Original line number Original line Diff line number Diff line
@@ -707,7 +707,7 @@ void __init mtrr_bp_init(void)
	} else {
	} else {
		switch (boot_cpu_data.x86_vendor) {
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_AMD:
		case X86_VENDOR_AMD:
			if (cpu_has_k6_mtrr) {
			if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
				/* Pre-Athlon (K6) AMD CPU MTRRs */
				/* Pre-Athlon (K6) AMD CPU MTRRs */
				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
				size_or_mask = SIZE_OR_MASK_BITS(32);
				size_or_mask = SIZE_OR_MASK_BITS(32);
@@ -715,14 +715,14 @@ void __init mtrr_bp_init(void)
			}
			}
			break;
			break;
		case X86_VENDOR_CENTAUR:
		case X86_VENDOR_CENTAUR:
			if (cpu_has_centaur_mcr) {
			if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
				size_or_mask = SIZE_OR_MASK_BITS(32);
				size_or_mask = SIZE_OR_MASK_BITS(32);
				size_and_mask = 0;
				size_and_mask = 0;
			}
			}
			break;
			break;
		case X86_VENDOR_CYRIX:
		case X86_VENDOR_CYRIX:
			if (cpu_has_cyrix_arr) {
			if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
				size_or_mask = SIZE_OR_MASK_BITS(32);
				size_or_mask = SIZE_OR_MASK_BITS(32);
				size_and_mask = 0;
				size_and_mask = 0;