Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7da82b8 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds
Browse files

mm: replace pmd_write with pmd_access_permitted in fault + gup paths

The 'access_permitted' helper is used in the gup-fast path and goes
beyond the simple _PAGE_RW check to also:

 - validate that the mapping is writable from a protection keys
   standpoint

 - validate that the pte has _PAGE_USER set since all fault paths where
   pmd_write is must be referencing user-memory.

Link: http://lkml.kernel.org/r/151043111049.2842.15241454964150083466.stgit@dwillia2-desk3.amr.corp.intel.com


Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e7fe7b5c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
	if (!(pmd_val(pmd) & _PAGE_VALID))
		return 0;

	if (write && !pmd_write(pmd))
	if (!pmd_access_permitted(pmd, write))
		return 0;

	refs = 0;
+2 −1
Original line number Diff line number Diff line
@@ -627,7 +627,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,

			if (pfn != pmd_pfn(*pmdp))
				goto unlock_pmd;
			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
			if (!pmd_dirty(*pmdp)
					&& !pmd_access_permitted(*pmdp, WRITE))
				goto unlock_pmd;

			flush_cache_page(vma, address, pfn);
+2 −2
Original line number Diff line number Diff line
@@ -391,11 +391,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
		if (pmd_protnone(pmd))
			return hmm_vma_walk_clear(start, end, walk);

		if (write_fault && !pmd_write(pmd))
		if (!pmd_access_permitted(pmd, write_fault))
			return hmm_vma_walk_clear(start, end, walk);

		pfn = pmd_pfn(pmd) + pte_index(addr);
		flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
		flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
		for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
			pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
		return 0;
+2 −2
Original line number Diff line number Diff line
@@ -877,7 +877,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
	 */
	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");

	if (flags & FOLL_WRITE && !pmd_write(*pmd))
	if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
		return NULL;

	if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1393,7 +1393,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 */
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
	return pmd_write(pmd) ||
	return pmd_access_permitted(pmd, WRITE) ||
	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}

+1 −1
Original line number Diff line number Diff line
@@ -4046,7 +4046,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
				return do_huge_pmd_numa_page(&vmf, orig_pmd);

			if (dirty && !pmd_write(orig_pmd)) {
			if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
				ret = wp_huge_pmd(&vmf, orig_pmd);
				if (!(ret & VM_FAULT_FALLBACK))
					return ret;