Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cc6810e3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull cpu hotplug fixes from Thomas Gleixner:
 "Two fixes for the cpu hotplug machinery:

   - Replace the overly clever 'SMT disabled by BIOS' detection logic as
     it breaks KVM scenarios and prevents speculation control updates
     when the Hyperthreads are brought online late after boot.

   - Remove a redundant invocation of the speculation control update
     function"

* 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cpu/hotplug: Fix "SMT disabled by BIOS" detection for KVM
  x86/speculation: Remove redundant arch_smt_update() invocation
parents 58f6d428 b284909a
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -71,7 +71,7 @@ void __init check_bugs(void)
	 * identify_boot_cpu() initialized SMT support information, let the
	 * identify_boot_cpu() initialized SMT support information, let the
	 * core code know.
	 * core code know.
	 */
	 */
	cpu_smt_check_topology_early();
	cpu_smt_check_topology();


	if (!IS_ENABLED(CONFIG_SMP)) {
	if (!IS_ENABLED(CONFIG_SMP)) {
		pr_info("CPU: ");
		pr_info("CPU: ");
+2 −1
Original line number Original line Diff line number Diff line
@@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/mod_devicetable.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
#include <linux/trace_events.h>
@@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
			 * Warn upon starting the first VM in a potentially
			 * Warn upon starting the first VM in a potentially
			 * insecure environment.
			 * insecure environment.
			 */
			 */
			if (cpu_smt_control == CPU_SMT_ENABLED)
			if (sched_smt_active())
				pr_warn_once(L1TF_MSG_SMT);
				pr_warn_once(L1TF_MSG_SMT);
			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
				pr_warn_once(L1TF_MSG_L1D);
				pr_warn_once(L1TF_MSG_L1D);
+0 −2
Original line number Original line Diff line number Diff line
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology_early(void);
extern void cpu_smt_check_topology(void);
extern void cpu_smt_check_topology(void);
#else
#else
# define cpu_smt_control		(CPU_SMT_ENABLED)
# define cpu_smt_control		(CPU_SMT_ENABLED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
static inline void cpu_smt_check_topology(void) { }
#endif
#endif


+5 −33
Original line number Original line Diff line number Diff line
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }


#ifdef CONFIG_HOTPLUG_SMT
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
EXPORT_SYMBOL_GPL(cpu_smt_control);

static bool cpu_smt_available __read_mostly;


void __init cpu_smt_disable(bool force)
void __init cpu_smt_disable(bool force)
{
{
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)


/*
/*
 * The decision whether SMT is supported can only be done after the full
 * The decision whether SMT is supported can only be done after the full
 * CPU identification. Called from architecture code before non boot CPUs
 * CPU identification. Called from architecture code.
 * are brought up.
 */
void __init cpu_smt_check_topology_early(void)
{
	if (!topology_smt_supported())
		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}

/*
 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
 * brought online. This ensures the smt/l1tf sysfs entries are consistent
 * with reality. cpu_smt_available is set to true during the bringup of non
 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
 * cpu_smt_control's previous setting.
 */
 */
void __init cpu_smt_check_topology(void)
void __init cpu_smt_check_topology(void)
{
{
	if (!cpu_smt_available)
	if (!topology_smt_supported())
		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}
}


@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);


static inline bool cpu_smt_allowed(unsigned int cpu)
static inline bool cpu_smt_allowed(unsigned int cpu)
{
{
	if (topology_is_primary_thread(cpu))
	if (cpu_smt_control == CPU_SMT_ENABLED)
		return true;
		return true;


	/*
	if (topology_is_primary_thread(cpu))
	 * If the CPU is not a 'primary' thread and the booted_once bit is
	 * set then the processor has SMT support. Store this information
	 * for the late check of SMT support in cpu_smt_check_topology().
	 */
	if (per_cpu(cpuhp_state, cpu).booted_once)
		cpu_smt_available = true;

	if (cpu_smt_control == CPU_SMT_ENABLED)
		return true;
		return true;


	/*
	/*
@@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
		 */
		 */
		cpuhp_offline_cpu_device(cpu);
		cpuhp_offline_cpu_device(cpu);
	}
	}
	if (!ret) {
	if (!ret)
		cpu_smt_control = ctrlval;
		cpu_smt_control = ctrlval;
		arch_smt_update();
	}
	cpu_maps_update_done();
	cpu_maps_update_done();
	return ret;
	return ret;
}
}
@@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void)


	cpu_maps_update_begin();
	cpu_maps_update_begin();
	cpu_smt_control = CPU_SMT_ENABLED;
	cpu_smt_control = CPU_SMT_ENABLED;
	arch_smt_update();
	for_each_present_cpu(cpu) {
	for_each_present_cpu(cpu) {
		/* Skip online CPUs and CPUs on offline nodes */
		/* Skip online CPUs and CPUs on offline nodes */
		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+1 −0
Original line number Original line Diff line number Diff line
@@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p


#ifdef CONFIG_SCHED_SMT
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);


static inline void set_idle_cores(int cpu, int val)
static inline void set_idle_cores(int cpu, int val)
{
{
Loading