Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2003cd90 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/pageattr: Prevent PSE and GLOABL leftovers to confuse pmd/pte_present and pmd_huge
  Revert "x86, mm: Make spurious_fault check explicitly check explicitly check the PRESENT bit"
  x86/mm/numa: Don't check if node is NUMA_NO_NODE
  x86, efi: Make "noefi" really disable EFI runtime serivces
  x86/apic: Fix parsing of the 'lapic' cmdline option
parents 24e55910 a8aed3e0
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
{
	if (config_enabled(CONFIG_X86_32) && !arg)
		force_enable_local_apic = 1;
	else if (!strncmp(arg, "notscdeadline", 13))
	else if (arg && !strncmp(arg, "notscdeadline", 13))
		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
	return 0;
}
+1 −7
Original line number Diff line number Diff line
@@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

	/*
	 * Note: don't use pte_present() here, since it returns true
	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
	 * when CONFIG_DEBUG_PAGEALLOC is used.
	 */
	pte = pte_offset_kernel(pmd, address);
	if (!(pte_flags(*pte) & _PAGE_PRESENT))
	if (!pte_present(*pte))
		return 0;

	ret = spurious_fault_check(error_code, pte);
+1 −2
Original line number Diff line number Diff line
@@ -97,7 +97,6 @@ void numa_set_node(int cpu, int node)
#endif
	per_cpu(x86_cpu_to_node_map, cpu) = node;

	if (node != NUMA_NO_NODE)
	set_cpu_numa_node(cpu, node);
}

+47 −3
Original line number Diff line number Diff line
@@ -472,6 +472,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
	pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
	pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);

	/*
	 * Set the PSE and GLOBAL flags only if the PRESENT flag is
	 * set otherwise pmd_present/pmd_huge will return true even on
	 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
	 * for the ancient hardware that doesn't support it.
	 */
	if (pgprot_val(new_prot) & _PAGE_PRESENT)
		pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
	else
		pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);

	new_prot = canon_pgprot(new_prot);

	/*
	 * old_pte points to the large page base address. So we need
	 * to add the offset of the virtual address:
@@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
		 * The address is aligned and the number of pages
		 * covers the full page.
		 */
		new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
		new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
		__set_pmd_pte(kpte, address, new_pte);
		cpa->flags |= CPA_FLUSHTLB;
		do_split = 0;
@@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
#ifdef CONFIG_X86_64
	if (level == PG_LEVEL_1G) {
		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
		/*
		 * Set the PSE flags only if the PRESENT flag is set
		 * otherwise pmd_present/pmd_huge will return true
		 * even on a non present pmd.
		 */
		if (pgprot_val(ref_prot) & _PAGE_PRESENT)
			pgprot_val(ref_prot) |= _PAGE_PSE;
		else
			pgprot_val(ref_prot) &= ~_PAGE_PSE;
	}
#endif

	/*
	 * Set the GLOBAL flags only if the PRESENT flag is set
	 * otherwise pmd/pte_present will return true even on a non
	 * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
	 * for the ancient hardware that doesn't support it.
	 */
	if (pgprot_val(ref_prot) & _PAGE_PRESENT)
		pgprot_val(ref_prot) |= _PAGE_GLOBAL;
	else
		pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;

	/*
	 * Get the target pfn from the original entry:
	 */
	pfn = pte_pfn(*kpte);
	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
		set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
		set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));

	if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
				PFN_DOWN(__pa(address)) + 1))
@@ -684,6 +716,18 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)

		new_prot = static_protections(new_prot, address, pfn);

		/*
		 * Set the GLOBAL flags only if the PRESENT flag is
		 * set otherwise pte_present will return true even on
		 * a non present pte. The canon_pgprot will clear
		 * _PAGE_GLOBAL for the ancient hardware that doesn't
		 * support it.
		 */
		if (pgprot_val(new_prot) & _PAGE_PRESENT)
			pgprot_val(new_prot) |= _PAGE_GLOBAL;
		else
			pgprot_val(new_prot) &= ~_PAGE_GLOBAL;

		/*
		 * We need to keep the pfn from the existing PTE,
		 * after all we're only going to change it's attributes
+3 −2
Original line number Diff line number Diff line
@@ -85,9 +85,10 @@ int efi_enabled(int facility)
}
EXPORT_SYMBOL(efi_enabled);

static bool disable_runtime = false;
static int __init setup_noefi(char *arg)
{
	clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
	disable_runtime = true;
	return 0;
}
early_param("noefi", setup_noefi);
@@ -734,7 +735,7 @@ void __init efi_init(void)
	if (!efi_is_native())
		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
	else {
		if (efi_runtime_init())
		if (disable_runtime || efi_runtime_init())
			return;
		set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
	}