Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 238f58d8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Support strange discontiguous PFN remappings



These get created by some drivers that don't generally even want a pfn
remapping at all, but would really mostly prefer to just map pages
they've allocated individually instead.

For now, create a helper function that turns such an incomplete PFN
remapping call into a loop that does that explicit mapping.  In the long
run we almost certainly want to export a totally different interface for
that, though.

Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent eca35133
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
#define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
#define VM_INCOMPLETE	0x02000000	/* Strange partial PFN mapping marker */


#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+92 −0
Original line number Original line Diff line number Diff line
@@ -1146,6 +1146,95 @@ int zeromap_page_range(struct vm_area_struct *vma,
	return err;
	return err;
}
}


/*
 * This is the old fallback for page remapping.
 *
 * For historical reasons, it only allows reserved pages. Only
 * old drivers should use this, and they needed to mark their
 * pages reserved for the old functions anyway.
 */
static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
{
	int retval;
	pgd_t * pgd;
	pud_t * pud;
	pmd_t * pmd;  
	pte_t * pte;
	spinlock_t *ptl;  

	retval = -EINVAL;
	if (PageAnon(page) || !PageReserved(page))
		goto out;
	retval = -ENOMEM;
	flush_dcache_page(page);
	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		goto out;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		goto out;
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
	if (!pte)
		goto out;
	retval = -EBUSY;
	if (!pte_none(*pte))
		goto out_unlock;

	/* Ok, finally just insert the thing.. */
	get_page(page);
	inc_mm_counter(mm, file_rss);
	page_add_file_rmap(page);
	set_pte_at(mm, addr, pte, mk_pte(page, prot));

	retval = 0;
out_unlock:
	pte_unmap_unlock(pte, ptl);
out:
	return retval;
}

/*
 * Somebody does a pfn remapping that doesn't actually work as a vma.
 *
 * Do it as individual pages instead, and warn about it. It's bad form,
 * and very inefficient.
 */
static int incomplete_pfn_remap(struct vm_area_struct *vma,
		unsigned long start, unsigned long end,
		unsigned long pfn, pgprot_t prot)
{
	static int warn = 10;
	struct page *page;
	int retval;

	if (!(vma->vm_flags & VM_INCOMPLETE)) {
		if (warn) {
			warn--;
			printk("%s does an incomplete pfn remapping", current->comm);
			dump_stack();
		}
	}
	vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;

	if (start < vma->vm_start || end > vma->vm_end)
		return -EINVAL;

	if (!pfn_valid(pfn))
		return -EINVAL;

	retval = 0;
	page = pfn_to_page(pfn);
	while (start < end) {
		retval = insert_page(vma->vm_mm, start, page, prot);
		if (retval < 0)
			break;
		start += PAGE_SIZE;
		page++;
	}
	return retval;
}

/*
/*
 * maps a range of physical memory into the requested pages. the old
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * mappings are removed. any references to nonexistent pages results
@@ -1220,6 +1309,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
	struct mm_struct *mm = vma->vm_mm;
	struct mm_struct *mm = vma->vm_mm;
	int err;
	int err;


	if (addr != vma->vm_start || end != vma->vm_end)
		return incomplete_pfn_remap(vma, addr, end, pfn, prot);

	/*
	/*
	 * Physically remapped pages are special. Tell the
	 * Physically remapped pages are special. Tell the
	 * rest of the world about it:
	 * rest of the world about it: