Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee498ed7 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

[PATCH] unpaged: anon in VM_UNPAGED



copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.

But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter).  So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space).  I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.

In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 920fc356
Loading
Loading
Loading
Loading
+41 −22
Original line number Diff line number Diff line
@@ -349,6 +349,22 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
	dump_stack();
}

/*
 * page_is_anon applies strict checks for an anonymous page belonging to
 * this vma at this address.  It is used on VM_UNPAGED vmas, which are
 * usually populated with shared originals (which must not be counted),
 * but occasionally contain private COWed copies (when !VM_SHARED, or
 * perhaps via ptrace when VM_SHARED).  An mmap of /dev/mem might window
 * free pages, pages from other processes, or from other parts of this:
 * it's tricky, but try not to be deceived by foreign anonymous pages.
 */
static inline int page_is_anon(struct page *page,
			struct vm_area_struct *vma, unsigned long addr)
{
	return page && PageAnon(page) && page_mapped(page) &&
		page_address_in_vma(page, vma) == addr;
}

/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
@@ -381,23 +397,22 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		goto out_set_pte;
	}

	/* If the region is VM_UNPAGED, the mapping is not
	 * mapped via rmap - duplicate the pte as is.
	 */
	if (vm_flags & VM_UNPAGED)
	pfn = pte_pfn(pte);
	page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;

	if (unlikely(vm_flags & VM_UNPAGED))
		if (!page_is_anon(page, vma, addr))
			goto out_set_pte;

	pfn = pte_pfn(pte);
	/* If the pte points outside of valid memory but
	/*
	 * If the pte points outside of valid memory but
	 * the region is not VM_UNPAGED, we have a problem.
	 */
	if (unlikely(!pfn_valid(pfn))) {
	if (unlikely(!page)) {
		print_bad_pte(vma, pte, addr);
		goto out_set_pte; /* try to do something sane */
	}

	page = pfn_to_page(pfn);

	/*
	 * If it's a COW mapping, write protect it both
	 * in the parent and the child
@@ -568,17 +583,20 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
			continue;
		}
		if (pte_present(ptent)) {
			struct page *page = NULL;
			struct page *page;
			unsigned long pfn;

			(*zap_work) -= PAGE_SIZE;

			if (!(vma->vm_flags & VM_UNPAGED)) {
				unsigned long pfn = pte_pfn(ptent);
				if (unlikely(!pfn_valid(pfn)))
			pfn = pte_pfn(ptent);
			page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;

			if (unlikely(vma->vm_flags & VM_UNPAGED)) {
				if (!page_is_anon(page, vma, addr))
					page = NULL;
			} else if (unlikely(!page))
				print_bad_pte(vma, ptent, addr);
				else
					page = pfn_to_page(pfn);
			}

			if (unlikely(details) && page) {
				/*
				 * unmap_shared_mapping_pages() wants to
@@ -1295,7 +1313,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
	old_page = pfn_to_page(pfn);
	src_page = old_page;

	if (unlikely(vma->vm_flags & VM_UNPAGED)) {
	if (unlikely(vma->vm_flags & VM_UNPAGED))
		if (!page_is_anon(old_page, vma, address)) {
			old_page = NULL;
			goto gotten;
		}
+5 −2
Original line number Diff line number Diff line
@@ -225,7 +225,9 @@ vma_address(struct page *page, struct vm_area_struct *vma)

/*
 * At what user virtual address is page expected in vma? checking that the
 * page matches the vma: currently only used by unuse_process, on anon pages.
 * page matches the vma: currently only used on anon pages, by unuse_vma;
 * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking
 * care that an mmap of /dev/mem might window free and foreign pages.
 */
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
@@ -234,7 +236,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
		    (void *)page->mapping - PAGE_MAPPING_ANON)
			return -EFAULT;
	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
		if (vma->vm_file->f_mapping != page->mapping)
		if (!vma->vm_file ||
		    vma->vm_file->f_mapping != page->mapping)
			return -EFAULT;
	} else
		return -EFAULT;