Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10876376 authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by H. Peter Anvin
Browse files

x86, pat: Lookup the protection from memtype list on vm_insert_pfn()



Lookup the reserved memtype during vm_insert_pfn and use that memtype
for the new mapping. This takes care or handling of vm_insert_pfn()
interface in track_pfn_vma*/untrack_pfn_vma.

Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 637b86e7
Loading
Loading
Loading
Loading
+9 −15
Original line number Diff line number Diff line
@@ -848,11 +848,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
	unsigned long vma_size = vma->vm_end - vma->vm_start;
	pgprot_t pgprot;

	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
	if (is_linear_pfn_mapping(vma)) {
		/*
		 * reserve the whole chunk covered by vma. We need the
@@ -880,20 +875,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
			unsigned long pfn, unsigned long size)
{
	unsigned long flags;
	resource_size_t paddr;
	unsigned long vma_size = vma->vm_end - vma->vm_start;

	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
		return reserve_pfn_range(paddr, vma_size, prot, 0);
	}

	if (!pat_enabled)
		return 0;

	/* for vm_insert_pfn and friends, we set prot based on lookup */
	flags = lookup_memtype(pfn << PAGE_SHIFT);
	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
			 flags);

	return 0;
}

@@ -908,11 +907,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
	resource_size_t paddr;
	unsigned long vma_size = vma->vm_end - vma->vm_start;

	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;