Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2530fd59 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: allow vmas with vm_ops to be speculatively handled"

parents 65290d68 0947fbfe
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -111,7 +111,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
		SWAP_RA_HIT,
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
		SPECULATIVE_PGFAULT,
		SPECULATIVE_PGFAULT_ANON,
		SPECULATIVE_PGFAULT_FILE,
#endif
		NR_VM_EVENT_ITEMS
};
+4 −4
Original line number Diff line number Diff line
@@ -2477,12 +2477,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
	pgoff_t offset = vmf->pgoff;

	/* If we don't want any read-ahead, don't bother */
	if (vmf->vma->vm_flags & VM_RAND_READ)
	if (vmf->vma_flags & VM_RAND_READ)
		return fpin;
	if (!ra->ra_pages)
		return fpin;

	if (vmf->vma->vm_flags & VM_SEQ_READ) {
	if (vmf->vma_flags & VM_SEQ_READ) {
		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
		page_cache_sync_readahead(mapping, ra, file, offset,
					  ra->ra_pages);
@@ -2526,7 +2526,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
	pgoff_t offset = vmf->pgoff;

	/* If we don't want any read-ahead, don't bother */
	if (vmf->vma->vm_flags & VM_RAND_READ)
	if (vmf->vma_flags & VM_RAND_READ)
		return fpin;
	if (ra->mmap_miss > 0)
		ra->mmap_miss--;
@@ -2549,7 +2549,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
 * it in the page cache, and handles the special cases reasonably without
 * having a lot of duplicated code.
 *
 * vma->vm_mm->mmap_sem must be held on entry.
 * vma->vm_mm->mmap_sem must be held on entry (except FAULT_FLAG_SPECULATIVE).
 *
 * If our return value has VM_FAULT_RETRY set, it's because
 * lock_page_or_retry() returned 0.
+9 −15
Original line number Diff line number Diff line
@@ -4220,6 +4220,7 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
{
	pte_t entry;
	int ret = 0;

	if (unlikely(pmd_none(*vmf->pmd))) {
		/*
@@ -4272,8 +4273,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
	if (!vmf->pte) {
		if (vma_is_anonymous(vmf->vma))
			return do_anonymous_page(vmf);
		else if (vmf->flags & FAULT_FLAG_SPECULATIVE)
			return VM_FAULT_RETRY;
		else
			return do_fault(vmf);
	}
@@ -4307,10 +4306,12 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
		 */
		if (vmf->flags & FAULT_FLAG_WRITE)
			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
			ret = VM_FAULT_RETRY;
	}
unlock:
	pte_unmap_unlock(vmf->pte, vmf->ptl);
	return 0;
	return ret;
}

/*
@@ -4454,22 +4455,12 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
		return VM_FAULT_RETRY;
	}

	/*
	 * Can't call vm_ops service has we don't know what they would do
	 * with the VMA.
	 * This include huge page from hugetlbfs.
	 */
	if (vmf.vma->vm_ops) {
		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
		return VM_FAULT_RETRY;
	}

	/*
	 * __anon_vma_prepare() requires the mmap_sem to be held
	 * because vm_next and vm_prev must be safe. This can't be guaranteed
	 * in the speculative path.
	 */
	if (unlikely(!vmf.vma->anon_vma)) {
	if (unlikely(vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma)) {
		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
		return VM_FAULT_RETRY;
	}
@@ -4611,7 +4602,10 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
	 * If there is no need to retry, don't return the vma to the caller.
	 */
	if (ret != VM_FAULT_RETRY) {
		count_vm_event(SPECULATIVE_PGFAULT);
		if (vma_is_anonymous(vmf.vma))
			count_vm_event(SPECULATIVE_PGFAULT_ANON);
		else
			count_vm_event(SPECULATIVE_PGFAULT_FILE);
		put_vma(vmf.vma);
		*vma = NULL;
	}
+2 −2
Original line number Diff line number Diff line
@@ -2023,10 +2023,10 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)

	sgp = SGP_CACHE;

	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
	if ((vmf->vma_flags & VM_NOHUGEPAGE) ||
	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
		sgp = SGP_NOHUGE;
	else if (vma->vm_flags & VM_HUGEPAGE)
	else if (vmf->vma_flags & VM_HUGEPAGE)
		sgp = SGP_HUGE;

	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+2 −1
Original line number Diff line number Diff line
@@ -1290,7 +1290,8 @@ const char * const vmstat_text[] = {
	"swap_ra_hit",
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	"speculative_pgfault"
	"speculative_pgfault_anon",
	"speculative_pgfault_file",
#endif
#endif /* CONFIG_VM_EVENT_COUNTERS */
};