Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d551aaa2 authored by Toshi Kani's avatar Toshi Kani Committed by Thomas Gleixner
Browse files

x86/mm: Fix __split_large_page() to handle large PAT bit



__split_large_page() is called from __change_page_attr() to change
the mapping attribute by splitting a given large page into smaller
pages.  This function uses pte_pfn() and pte_pgprot() for PUD/PMD,
which do not handle the large PAT bit properly.

Fix __split_large_page() by using the corresponding pud/pmd pfn/
pgprot interfaces.

Also remove '#ifdef CONFIG_X86_64', which is not necessary.

Signed-off-by: default avatarToshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Konrad Wilk <konrad.wilk@oracle.com>
Cc: Robert Elliot <elliott@hpe.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1442514264-12475-11-git-send-email-toshi.kani@hpe.com


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 3a19109e
Loading
Loading
Loading
Loading
+19 −12
Original line number Diff line number Diff line
@@ -605,7 +605,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
		   struct page *base)
{
	pte_t *pbase = (pte_t *)page_address(base);
	unsigned long pfn, pfninc = 1;
	unsigned long ref_pfn, pfn, pfninc = 1;
	unsigned int i, level;
	pte_t *tmp;
	pgprot_t ref_prot;
@@ -622,26 +622,33 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
	}

	paravirt_alloc_pte(&init_mm, page_to_pfn(base));
	ref_prot = pte_pgprot(pte_clrhuge(*kpte));

	/* promote PAT bit to correct position */
	if (level == PG_LEVEL_2M)
	switch (level) {
	case PG_LEVEL_2M:
		ref_prot = pmd_pgprot(*(pmd_t *)kpte);
		/* clear PSE and promote PAT bit to correct position */
		ref_prot = pgprot_large_2_4k(ref_prot);
		ref_pfn = pmd_pfn(*(pmd_t *)kpte);
		break;

#ifdef CONFIG_X86_64
	if (level == PG_LEVEL_1G) {
	case PG_LEVEL_1G:
		ref_prot = pud_pgprot(*(pud_t *)kpte);
		ref_pfn = pud_pfn(*(pud_t *)kpte);
		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;

		/*
		 * Set the PSE flags only if the PRESENT flag is set
		 * Clear the PSE flags if the PRESENT flag is not set
		 * otherwise pmd_present/pmd_huge will return true
		 * even on a non present pmd.
		 */
		if (pgprot_val(ref_prot) & _PAGE_PRESENT)
			pgprot_val(ref_prot) |= _PAGE_PSE;
		else
		if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
			pgprot_val(ref_prot) &= ~_PAGE_PSE;
		break;

	default:
		spin_unlock(&pgd_lock);
		return 1;
	}
#endif

	/*
	 * Set the GLOBAL flags only if the PRESENT flag is set
@@ -657,7 +664,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
	/*
	 * Get the target pfn from the original entry:
	 */
	pfn = pte_pfn(*kpte);
	pfn = ref_pfn;
	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
		set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));