Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 047ac82a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman
Browse files

x86/cpu: Switch to arch_cpu_finalize_init()



commit 7c7077a72674402654f3291354720cd73cdf649e upstream

check_bugs() is a dumping ground for finalizing the CPU bringup. Only parts of
it has to do with actual CPU bugs.

Split it apart into arch_cpu_finalize_init() and cpu_select_mitigations().

Fixup the bogus 32bit comments while at it.

No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230613224545.019583869@linutronix.de


Signed-off-by: default avatarDaniel Sneddon <daniel.sneddon@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ecc9d725
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ config X86
	select ARCH_CLOCKSOURCE_DATA
	select ARCH_DISCARD_MEMBLOCK
	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
	select ARCH_HAS_CPU_FINALIZE_INIT
	select ARCH_HAS_DEBUG_VIRTUAL
	select ARCH_HAS_DEVMEM_IS_ALLOWED
	select ARCH_HAS_ELF_RANDOMIZE
+0 −2
Original line number Diff line number Diff line
@@ -4,8 +4,6 @@

#include <asm/processor.h>

extern void check_bugs(void);

#if defined(CONFIG_CPU_SUP_INTEL)
void check_mpx_erratum(struct cpuinfo_x86 *c);
#else
+1 −50
Original line number Diff line number Diff line
@@ -9,7 +9,6 @@
 *	- Andrew D. Balsa (code cleanup).
 */
#include <linux/init.h>
#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nospec.h>
@@ -25,9 +24,7 @@
#include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
@@ -115,21 +112,8 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);

void __init check_bugs(void)
void __init cpu_select_mitigations(void)
{
	identify_boot_cpu();

	/*
	 * identify_boot_cpu() initialized SMT support information, let the
	 * core code know.
	 */
	cpu_smt_check_topology();

	if (!IS_ENABLED(CONFIG_SMP)) {
		pr_info("CPU: ");
		print_cpu_info(&boot_cpu_data);
	}

	/*
	 * Read the SPEC_CTRL MSR to account for reserved bits which may
	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
@@ -165,39 +149,6 @@ void __init check_bugs(void)
	l1tf_select_mitigation();
	md_clear_select_mitigation();
	srbds_select_mitigation();

	arch_smt_update();

#ifdef CONFIG_X86_32
	/*
	 * Check whether we are able to run this kernel safely on SMP.
	 *
	 * - i386 is no longer supported.
	 * - In order to run on anything without a TSC, we need to be
	 *   compiled for a i486.
	 */
	if (boot_cpu_data.x86 < 4)
		panic("Kernel requires i486+ for 'invlpg' and other features");

	init_utsname()->machine[1] =
		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
	alternative_instructions();

	fpu__init_check_bugs();
#else /* CONFIG_X86_64 */
	alternative_instructions();

	/*
	 * Make sure the first 2MB area is not mapped by huge pages
	 * There are typically fixed size MTRRs in there and overlapping
	 * MTRRs into large pages causes slow downs.
	 *
	 * Right now we don't do that with gbpages because there seems
	 * very little benefit for that case.
	 */
	if (!direct_gbpages)
		set_memory_4k((unsigned long)__va(0), 1);
#endif
}

/*
+55 −0
Original line number Diff line number Diff line
@@ -13,14 +13,19 @@
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>

#include <asm/stackprotector.h>
#include <linux/utsname.h>

#include <asm/alternative.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/archrandom.h>
@@ -56,6 +61,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
#endif
#include <asm/set_memory.h>

#include "cpu.h"

@@ -2097,3 +2103,52 @@ void microcode_check(void)
	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
}

void __init arch_cpu_finalize_init(void)
{
	identify_boot_cpu();

	/*
	 * identify_boot_cpu() initialized SMT support information, let the
	 * core code know.
	 */
	cpu_smt_check_topology();

	if (!IS_ENABLED(CONFIG_SMP)) {
		pr_info("CPU: ");
		print_cpu_info(&boot_cpu_data);
	}

	cpu_select_mitigations();

	arch_smt_update();

	if (IS_ENABLED(CONFIG_X86_32)) {
		/*
		 * Check whether this is a real i386 which is not longer
		 * supported and fixup the utsname.
		 */
		if (boot_cpu_data.x86 < 4)
			panic("Kernel requires i486+ for 'invlpg' and other features");

		init_utsname()->machine[1] =
			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
	}

	alternative_instructions();

	if (IS_ENABLED(CONFIG_X86_64)) {
		/*
		 * Make sure the first 2MB area is not mapped by huge pages
		 * There are typically fixed size MTRRs in there and overlapping
		 * MTRRs into large pages causes slow downs.
		 *
		 * Right now we don't do that with gbpages because there seems
		 * very little benefit for that case.
		 */
		if (!direct_gbpages)
			set_memory_4k((unsigned long)__va(0), 1);
	} else {
		fpu__init_check_bugs();
	}
}
+1 −0
Original line number Diff line number Diff line
@@ -79,6 +79,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);

unsigned int aperfmperf_get_khz(int cpu);
void cpu_select_mitigations(void);

extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);