Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19e3a2be authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Greg Kroah-Hartman
Browse files

x86/bugs/intel: Set proper CPU features and setup RDS



commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream

Intel CPUs expose methods to:

 - Detect whether RDS capability is available via CPUID.7.0.EDX[31],

 - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS.

 - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS.

With that in mind if spec_store_bypass_disable=[auto,on] is selected set at
boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it.

Note that this does not fix the KVM case where the SPEC_CTRL is exposed to
guests which can muck with it, see patch titled :
 KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS.

And for the firmware (IBRS to be set), see patch titled:
 x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits

[ tglx: Distangled it from the intel implementation and kept the call order ]

Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6f70a553
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@
#define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP			(1 << 1)   /* Single Thread Indirect Branch Predictors */
#define SPEC_CTRL_RDS			(1 << 2)   /* Reduced Data Speculation */

#define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
@@ -61,6 +62,11 @@
#define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
#define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
#define ARCH_CAP_RDS_NO			(1 << 4)   /*
						    * Not susceptible to Speculative Store Bypass
						    * attack, so no Reduced Data Speculation control
						    * required.
						    */

#define MSR_IA32_BBL_CR_CTL		0x00000119
#define MSR_IA32_BBL_CR_CTL3		0x0000011e
+28 −2
Original line number Diff line number Diff line
@@ -116,7 +116,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;

void x86_spec_ctrl_set(u64 val)
{
	if (val & ~SPEC_CTRL_IBRS)
	if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
		WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
	else
		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
@@ -443,8 +443,28 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
		break;
	}

	if (mode != SPEC_STORE_BYPASS_NONE)
	/*
	 * We have three CPU feature flags that are in play here:
	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
	 *  - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
	 */
	if (mode != SPEC_STORE_BYPASS_NONE) {
		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
		/*
		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
		 * a completely different MSR and bit dependent on family.
		 */
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_INTEL:
			x86_spec_ctrl_base |= SPEC_CTRL_RDS;
			x86_spec_ctrl_set(SPEC_CTRL_RDS);
			break;
		case X86_VENDOR_AMD:
			break;
		}
	}

	return mode;
}

@@ -458,6 +478,12 @@ static void ssb_select_mitigation()

#undef pr_fmt

void x86_spec_ctrl_setup_ap(void)
{
	if (boot_cpu_has(X86_FEATURE_IBRS))
		x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
}

#ifdef CONFIG_SYSFS

ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+6 −4
Original line number Diff line number Diff line
@@ -903,7 +903,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
	u64 ia32_cap = 0;

	if (!x86_match_cpu(cpu_no_spec_store_bypass))
	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);

	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
	   !(ia32_cap & ARCH_CAP_RDS_NO))
		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);

	if (x86_match_cpu(cpu_no_speculation))
@@ -915,9 +919,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
	if (x86_match_cpu(cpu_no_meltdown))
		return;

	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);

	/* Rogue Data Cache Load? No! */
	if (ia32_cap & ARCH_CAP_RDCL_NO)
		return;
@@ -1339,6 +1340,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
#endif
	mtrr_ap_init();
	validate_apic_and_package_id(c);
	x86_spec_ctrl_setup_ap();
}

struct msr_range {
+3 −0
Original line number Diff line number Diff line
@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],

extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);

extern void x86_spec_ctrl_setup_ap(void);

#endif /* ARCH_X86_CPU_H */
+1 −0
Original line number Diff line number Diff line
@@ -154,6 +154,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
		setup_clear_cpu_cap(X86_FEATURE_STIBP);
		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
		setup_clear_cpu_cap(X86_FEATURE_RDS);
	}

	/*