Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f87e6b8d authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Gerrit - the friendly Code Review server
Browse files

ANDROID: mm: prevent reads of unstable pmd during speculation



Checks of pmd during speculative page fault handling are racy because
pmd is unprotected and might be modified or cleared. This might cause
use-after-free reads from speculative path, therefore prevent such
checks. At the beginning of speculation pmd is checked to be valid and
if it's changed before page fault is handled, the change will be detected
and page fault will be retried under mmap_lock protection.

Bug: 257443051
Change-Id: I0cbd3b0b44e8296cf0d6cb298fae48c696580068
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Git-commit: 2bb39b912175c3c087978ae5547e277a8422c601
Git-repo: https://android.googlesource.com/kernel/common/


[quic_c_spathi@quicinc.com: resolve merge conflicts]
Signed-off-by: default avatarSrinivasarao Pathipati <quic_c_spathi@quicinc.com>
parent cb68c255
Loading
Loading
Loading
Loading
+13 −12
Original line number Diff line number Diff line
@@ -3427,6 +3427,10 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
	struct vm_area_struct *vma = vmf->vma;
	vm_fault_t ret;

	/* Do not check unstable pmd, if it's changed will retry later */
	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
		goto skip_pmd_checks;

	/*
	 * Preallocate pte before we take page_lock because this might lead to
	 * deadlocks for memcg reclaim which waits for pages under writeback:
@@ -3449,6 +3453,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
		smp_wmb(); /* See comment in __pte_alloc() */
	}

skip_pmd_checks:
	ret = vma->vm_ops->fault(vmf);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
			    VM_FAULT_DONE_COW)))
@@ -3822,7 +3827,8 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
			start_pgoff + nr_pages - 1);

	if (pmd_none(*vmf->pmd)) {
	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
	    pmd_none(*vmf->pmd)) {
		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
		if (!vmf->prealloc_pte)
			goto out;
@@ -4189,16 +4195,11 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
	pte_t entry;
	vm_fault_t ret = 0;

	if (unlikely(pmd_none(*vmf->pmd))) {
		/*
		 * In the case of the speculative page fault handler we abort
		 * the speculative path immediately as the pmd is probably
		 * in the way to be converted in a huge one. We will try
		 * again holding the mmap_sem (which implies that the collapse
		 * operation is done).
		 */
	/* Do not check unstable pmd, if it's changed will retry later */
	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
			return VM_FAULT_RETRY;
		goto skip_pmd_checks;

	if (unlikely(pmd_none(*vmf->pmd))) {
		/*
		 * Leave __pte_alloc() until later: because vm_ops->fault may
		 * want to allocate huge page, and if we expose page table
@@ -4206,8 +4207,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
		 * concurrent faults and from rmap lookups.
		 */
		vmf->pte = NULL;
	} else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
		/* See comment in pte_alloc_one_map() */
	} else {
		if (pmd_devmap_trans_unstable(vmf->pmd))
			return 0;
		/*
@@ -4237,6 +4237,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
		}
	}

skip_pmd_checks:
	if (!vmf->pte) {
		if (vma_is_anonymous(vmf->vma))
			return do_anonymous_page(vmf);