Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 982d789a authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by H. Peter Anvin
Browse files

x86: PAT: remove follow_pfnmap_pte in favor of follow_phys



Impact: Cleanup - removes a new function in favor of a recently modified older one.

Replace follow_pfnmap_pte in pat code with follow_phys. follow_phys lso
returns protection eliminating the need of pte_pgprot call. Using follow_phys
also eliminates the need for pte_pa.

Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent d87fe660
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -230,11 +230,6 @@ static inline unsigned long pte_pfn(pte_t pte)
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

static inline u64 pte_pa(pte_t pte)
{
	return pte_val(pte) & PTE_PFN_MASK;
}

#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

static inline int pmd_large(pmd_t pte)
+11 −19
Original line number Diff line number Diff line
@@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
	int retval = 0;
	unsigned long i, j;
	u64 paddr;
	pgprot_t prot;
	pte_t pte;
	unsigned long prot;
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;
@@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)

	if (is_linear_pfn_mapping(vma)) {
		/*
		 * reserve the whole chunk starting from vm_pgoff,
		 * But, we have to get the protection from pte.
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
		 */
		if (follow_pfnmap_pte(vma, vma_start, &pte)) {
		if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
			return -1;
			return -EINVAL;
		}
		prot = pte_pgprot(pte);
		paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
		return reserve_pfn_range(paddr, vma_size, prot);
		return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
	}

	/* reserve entire vma page by page, using pfn and prot from pte */
	for (i = 0; i < vma_size; i += PAGE_SIZE) {
		if (follow_pfnmap_pte(vma, vma_start + i, &pte))
		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
			continue;

		paddr = pte_pa(pte);
		prot = pte_pgprot(pte);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
		if (retval)
			goto cleanup_ret;
	}
@@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
		if (follow_pfnmap_pte(vma, vma_start + j, &pte))
		if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
			continue;

		paddr = pte_pa(pte);
		free_pfn_range(paddr, PAGE_SIZE);
	}

@@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
{
	unsigned long i;
	u64 paddr;
	unsigned long prot;
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;
@@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
	} else {
		/* free entire vma, page by page, using the pfn from pte */
		for (i = 0; i < vma_size; i += PAGE_SIZE) {
			pte_t pte;

			if (follow_pfnmap_pte(vma, vma_start + i, &pte))
			if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
				continue;

			paddr = pte_pa(pte);
			free_pfn_range(paddr, PAGE_SIZE);
		}
	}
+0 −3
Original line number Diff line number Diff line
@@ -1239,9 +1239,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_GET	0x04	/* do get_page on page */
#define FOLL_ANON	0x08	/* give ZERO_PAGE if no pgtable */

int follow_pfnmap_pte(struct vm_area_struct *vma,
				unsigned long address, pte_t *ret_ptep);

typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
			void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+0 −43
Original line number Diff line number Diff line
@@ -1168,49 +1168,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
	return page;
}

int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
			pte_t *ret_ptep)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	if (!is_pfn_mapping(vma))
		goto err;

	page = NULL;
	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		goto err;

	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		goto err;

	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		goto err;

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);

	pte = *ptep;
	if (!pte_present(pte))
		goto err_unlock;

	*ret_ptep = pte;
	pte_unmap_unlock(ptep, ptl);
	return 0;

err_unlock:
	pte_unmap_unlock(ptep, ptl);
err:
	return -EINVAL;
}

/* Can we do the FOLL_ANON optimization? */
static inline int use_zero_page(struct vm_area_struct *vma)
{