Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e5e657e4 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "gup: document and work around "COW can break either way" issue"

parents 0afc5ff0 c1bdf658
Loading
Loading
Loading
Loading
+38 −6
Original line number Diff line number Diff line
@@ -61,13 +61,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}

/*
 * FOLL_FORCE can write to even unwritable pte's, but only
 * after we've gone through a COW cycle and they are dirty.
 * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
 * but only after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
	return pte_write(pte) ||
		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
	return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
}

/*
 * A (separate) COW fault might break the page the other way and
 * get_user_pages() would return the page from what is now the wrong
 * VM. So we need to force a COW break at GUP time even for reads.
 */
static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
{
	return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
}

static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -712,12 +721,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
			if (!vma || check_vma_flags(vma, gup_flags))
				return i ? : -EFAULT;
			if (is_vm_hugetlb_page(vma)) {
				if (should_force_cow_break(vma, foll_flags))
					foll_flags |= FOLL_WRITE;
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
						gup_flags, nonblocking);
						foll_flags, nonblocking);
				continue;
			}
		}

		if (should_force_cow_break(vma, foll_flags))
			foll_flags |= FOLL_WRITE;

retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
@@ -1808,6 +1823,10 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
 * the regular GUP.
 * Note a difference with get_user_pages_fast: this always returns the
 * number of pages pinned, 0 if no pages were pinned.
 *
 * Careful, careful! COW breaking can go either way, so a non-write
 * access can get ambiguous page results. If you call this function without
 * 'write' set, you'd better be sure that you're ok with that ambiguity.
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages)
@@ -1835,6 +1854,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
	 *
	 * We do not adopt an rcu_read_lock(.) here as we also want to
	 * block IPIs that come from THPs splitting.
	 *
	 * NOTE! We allow read-only gup_fast() here, but you'd better be
	 * careful about possible COW pages. You'll get _a_ COW page, but
	 * not necessarily the one you intended to get depending on what
	 * COW event happens after this. COW may break the page copy in a
	 * random direction.
	 */

	if (gup_fast_permitted(start, nr_pages, write)) {
@@ -1880,9 +1905,16 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
					(void __user *)start, len)))
		return -EFAULT;

	/*
	 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
	 * because get_user_pages() may need to cause an early COW in
	 * order to avoid confusing the normal COW routines. So only
	 * targets that are already writable are safe to do by just
	 * looking at the page tables.
	 */
	if (gup_fast_permitted(start, nr_pages, write)) {
		local_irq_disable();
		gup_pgd_range(addr, end, write, pages, &nr);
		gup_pgd_range(addr, end, 1, pages, &nr);
		local_irq_enable();
		ret = nr;
	}
+3 −4
Original line number Diff line number Diff line
@@ -1433,13 +1433,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
}

/*
 * FOLL_FORCE can write to even unwritable pmd's, but only
 * after we've gone through a COW cycle and they are dirty.
 * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
 * but only after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
	return pmd_write(pmd) ||
	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
	return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
}

struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,