Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ce42845 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Last minute x86 fixes:

   - Fix a softlockup detector warning and long delays if using ptdump
     with KASAN enabled.

   - Two more TSC-adjust fixes for interesting firmware interactions.

   - Two commits to fix an AMD CPU topology enumeration bug that caused
     a measurable gaming performance regression"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/ptdump: Fix soft lockup in page table walker
  x86/tsc: Make the TSC ADJUST sanitizing work for tsc_reliable
  x86/tsc: Avoid the large time jump when sanitizing TSC ADJUST
  x86/CPU/AMD: Fix Zen SMT topology
  x86/CPU/AMD: Bring back Compute Unit ID
parents fdb0ee7c 146fbb76
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
	__u8			x86_phys_bits;
	__u8			x86_phys_bits;
	/* CPUID returned core id bits: */
	/* CPUID returned core id bits: */
	__u8			x86_coreid_bits;
	__u8			x86_coreid_bits;
	__u8			cu_id;
	/* Max extended CPUID function supported: */
	/* Max extended CPUID function supported: */
	__u32			extended_cpuid_level;
	__u32			extended_cpuid_level;
	/* Maximum supported CPUID level, -1=no CPUID: */
	/* Maximum supported CPUID level, -1=no CPUID: */
+15 −1
Original line number Original line Diff line number Diff line
@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)


	/* get information required for multi-node processors */
	/* get information required for multi-node processors */
	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
		u32 eax, ebx, ecx, edx;


		node_id = cpuid_ecx(0x8000001e) & 7;
		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);

		node_id  = ecx & 0xff;
		smp_num_siblings = ((ebx >> 8) & 0xff) + 1;

		if (c->x86 == 0x15)
			c->cu_id = ebx & 0xff;

		if (c->x86 >= 0x17) {
			c->cpu_core_id = ebx & 0xff;

			if (smp_num_siblings > 1)
				c->x86_max_cores /= smp_num_siblings;
		}


		/*
		/*
		 * We may have multiple LLCs if L3 caches exist, so check if we
		 * We may have multiple LLCs if L3 caches exist, so check if we
+1 −0
Original line number Original line Diff line number Diff line
@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
	c->x86_model_id[0] = '\0';  /* Unset */
	c->x86_model_id[0] = '\0';  /* Unset */
	c->x86_max_cores = 1;
	c->x86_max_cores = 1;
	c->x86_coreid_bits = 0;
	c->x86_coreid_bits = 0;
	c->cu_id = 0xff;
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	c->x86_clflush_size = 64;
	c->x86_clflush_size = 64;
	c->x86_phys_bits = 36;
	c->x86_phys_bits = 36;
+9 −3
Original line number Original line Diff line number Diff line
@@ -433,10 +433,16 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;


		if (c->phys_proc_id == o->phys_proc_id &&
		if (c->phys_proc_id == o->phys_proc_id &&
		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
		    c->cpu_core_id == o->cpu_core_id)
			if (c->cpu_core_id == o->cpu_core_id)
				return topology_sane(c, o, "smt");
				return topology_sane(c, o, "smt");


			if ((c->cu_id != 0xff) &&
			    (o->cu_id != 0xff) &&
			    (c->cu_id == o->cu_id))
				return topology_sane(c, o, "smt");
		}

	} else if (c->phys_proc_id == o->phys_proc_id &&
	} else if (c->phys_proc_id == o->phys_proc_id &&
		   c->cpu_core_id == o->cpu_core_id) {
		   c->cpu_core_id == o->cpu_core_id) {
		return topology_sane(c, o, "smt");
		return topology_sane(c, o, "smt");
+3 −2
Original line number Original line Diff line number Diff line
@@ -1356,6 +1356,9 @@ void __init tsc_init(void)
		(unsigned long)cpu_khz / 1000,
		(unsigned long)cpu_khz / 1000,
		(unsigned long)cpu_khz % 1000);
		(unsigned long)cpu_khz % 1000);


	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
	tsc_store_and_check_tsc_adjust(true);

	/*
	/*
	 * Secondary CPUs do not run through tsc_init(), so set up
	 * Secondary CPUs do not run through tsc_init(), so set up
	 * all the scale factors for all CPUs, assuming the same
	 * all the scale factors for all CPUs, assuming the same
@@ -1386,8 +1389,6 @@ void __init tsc_init(void)


	if (unsynchronized_tsc())
	if (unsynchronized_tsc())
		mark_tsc_unstable("TSCs unsynchronized");
		mark_tsc_unstable("TSCs unsynchronized");
	else
		tsc_store_and_check_tsc_adjust(true);


	check_system_tsc_reliable();
	check_system_tsc_reliable();


Loading