Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 440e1675 authored by Alexander Winkowski's avatar Alexander Winkowski Committed by basamaryan
Browse files

Revert "BACKPORT: FROMLIST: mm: implement speculative handling in filemap_fault()"



This reverts commit 9debe341.

Change-Id: I3197cb64766cd5e102794d24c946bb88a8e73652
Signed-off-by: default avatarAlexander Winkowski <dereference23@outlook.com>
parent 873a2dc3
Loading
Loading
Loading
Loading
+1 −44
Original line number Diff line number Diff line
@@ -2474,9 +2474,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
 * it in the page cache, and handles the special cases reasonably without
 * having a lot of duplicated code.
 *
 * If FAULT_FLAG_SPECULATIVE is set, this function runs with elevated vma
 * refcount and with mmap lock not held.
 * Otherwise, vma->vm_mm->mmap_sem must be held on entry.
 * vma->vm_mm->mmap_sem must be held on entry.
 *
 * If our return value has VM_FAULT_RETRY set, it's because
 * lock_page_or_retry() returned 0.
@@ -2501,47 +2499,6 @@ int filemap_fault(struct vm_fault *vmf)
	struct page *page;
	int ret = 0;

	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
		page = find_get_page(mapping, offset);
		if (unlikely(!page) || unlikely(PageReadahead(page)))
			return VM_FAULT_RETRY;

		if (!trylock_page(page))
			return VM_FAULT_RETRY;

		if (unlikely(compound_head(page)->mapping != mapping))
			goto page_unlock;
		VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
		if (unlikely(!PageUptodate(page)))
			goto page_unlock;

		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
		if (unlikely(offset >= max_off))
			goto page_unlock;

		/*
		 * Update readahead mmap_miss statistic.
		 *
		 * Note that we are not sure if finish_fault() will
		 * manage to complete the transaction. If it fails,
		 * we'll come back to filemap_fault() non-speculative
		 * case which will update mmap_miss a second time.
		 * This is not ideal, we would prefer to guarantee the
		 * update will happen exactly once.
		 */
		if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) {
			unsigned int mmap_miss = READ_ONCE(ra->mmap_miss);
			if (mmap_miss)
				WRITE_ONCE(ra->mmap_miss, --mmap_miss);
		}

		vmf->page = page;
		return VM_FAULT_LOCKED;
page_unlock:
		unlock_page(page);
		return VM_FAULT_RETRY;
	}

	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
	if (unlikely(offset >= max_off))
		return VM_FAULT_SIGBUS;