Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 669adc0e authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: fix misplaced unlock_page in do_wp_page()"

parents a616f540 16a3a08d
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -619,14 +619,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
				      GFP_KERNEL |
				      __GFP_NORETRY |
				      __GFP_NOWARN);
		/*
		 * Using __get_user_pages_fast() with a read-only
		 * access is questionable. A read-only page may be
		 * COW-broken, and then this might end up giving
		 * the wrong side of the COW..
		 *
		 * We may or may not care.
		 */
		if (pvec) /* defer to worker if malloc fails */
			pinned = __get_user_pages_fast(obj->userptr.ptr,
						       num_pages,
+6 −18
Original line number Diff line number Diff line
@@ -161,12 +161,13 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}

/*
 * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
 * but only after we've gone through a COW cycle and they are dirty.
 * FOLL_FORCE can write to even unwritable pte's, but only
 * after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
	return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
	return pte_write(pte) ||
	    ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}

/*
@@ -832,18 +833,12 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
				goto out;
			}
			if (is_vm_hugetlb_page(vma)) {
				if (should_force_cow_break(vma, foll_flags))
					foll_flags |= FOLL_WRITE;
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
						foll_flags, nonblocking);
						gup_flags, nonblocking);
				continue;
			}
		}

		if (should_force_cow_break(vma, foll_flags))
			foll_flags |= FOLL_WRITE;

retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
@@ -2440,17 +2435,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
	if (unlikely(!access_ok((void __user *)start, len)))
		return -EFAULT;

	/*
	 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
	 * because get_user_pages() may need to cause an early COW in
	 * order to avoid confusing the normal COW routines. So only
	 * targets that are already writable are safe to do by just
	 * looking at the page tables.
	 */
	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
	    gup_fast_permitted(start, end)) {
		local_irq_disable();
		gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr);
		gup_pgd_range(addr, end, gup_flags, pages, &nr);
		local_irq_enable();
		ret = nr;
	}
+4 −3
Original line number Diff line number Diff line
@@ -1454,12 +1454,13 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
}

/*
 * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
 * but only after we've gone through a COW cycle and they are dirty.
 * FOLL_FORCE can write to even unwritable pmd's, but only
 * after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
	return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
	return pmd_write(pmd) ||
	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}

struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+1 −1
Original line number Diff line number Diff line
@@ -2771,8 +2771,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
		 * page count reference, and the page is locked,
		 * it's dark out, and we're wearing sunglasses. Hit it.
		 */
		wp_page_reuse(vmf);
		unlock_page(page);
		wp_page_reuse(vmf);
		return VM_FAULT_WRITE;
	} else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
					(VM_WRITE|VM_SHARED))) {