Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4b866ed authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Ingo Molnar
Browse files

x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param



Impact: cleanup

Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer.
Subsequent patch changes the x86 PAT handling to return a compatible
memtype in pgprot_t, if what was requested cannot be allowed due to conflicts.
No fuctionality change in this patch.

Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent afc7d20c
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -741,7 +741,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 * Note that this function can be called with caller trying to map only a
 * subrange/page inside the vma.
 */
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
			unsigned long pfn, unsigned long size)
{
	int retval = 0;
@@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
		return reserve_pfn_range(paddr, vma_size, prot);
		return reserve_pfn_range(paddr, vma_size, *prot);
	}

	/* reserve page by page using pfn and size */
	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
	for (i = 0; i < size; i += PAGE_SIZE) {
		paddr = base_paddr + i;
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot);
		if (retval)
			goto cleanup_ret;
	}
+2 −2
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 */
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
					unsigned long pfn, unsigned long size)
{
	return 0;
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
{
}
#else
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
				unsigned long pfn, unsigned long size);
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+4 −3
Original line number Diff line number Diff line
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
			unsigned long pfn)
{
	int ret;
	pgprot_t pgprot = vma->vm_page_prot;
	/*
	 * Technically, architectures with pte_special can avoid all these
	 * restrictions (same for remap_pfn_range).  However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,

	if (addr < vma->vm_start || addr >= vma->vm_end)
		return -EFAULT;
	if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
	if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
		return -EINVAL;

	ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
	ret = insert_pfn(vma, addr, pfn, pgprot);

	if (ret)
		untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,

	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;

	err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
	if (err) {
		/*
		 * To indicate that track_pfn related cleanup is not