Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e655fb29 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm: introduce do_read_fault()



Introduce do_read_fault().  The function does what do_fault() does for
read page faults.

Unlike do_fault(), do_read_fault() is pretty clean and straightforward.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7eae74af
Loading
Loading
Loading
Loading
+43 −0
Original line number Original line Diff line number Diff line
@@ -3317,6 +3317,43 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
	return ret;
	return ret;
}
}


static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd,
		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
	struct page *fault_page;
	spinlock_t *ptl;
	pte_t entry, *pte;
	int ret;

	ret = __do_fault(vma, address, pgoff, flags, &fault_page);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		return ret;

	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
	if (unlikely(!pte_same(*pte, orig_pte))) {
		pte_unmap_unlock(pte, ptl);
		unlock_page(fault_page);
		page_cache_release(fault_page);
		return ret;
	}

	flush_icache_page(vma, fault_page);
	entry = mk_pte(fault_page, vma->vm_page_prot);
	if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
		pte_mksoft_dirty(entry);
	inc_mm_counter_fast(mm, MM_FILEPAGES);
	page_add_file_rmap(fault_page);
	set_pte_at(mm, address, pte, entry);

	/* no need to invalidate: a not-present page won't be cached */
	update_mmu_cache(vma, address, pte);
	pte_unmap_unlock(pte, ptl);
	unlock_page(fault_page);

	return ret;
}

/*
/*
 * do_fault() tries to create a new page mapping. It aggressively
 * do_fault() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * tries to share with existing pages, but makes a separate copy if
@@ -3510,6 +3547,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;


	pte_unmap(page_table);
	pte_unmap(page_table);
	if (!(flags & FAULT_FLAG_WRITE))
		return do_read_fault(mm, vma, address, pmd, pgoff, flags,
				orig_pte);
	return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
	return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
}


@@ -3542,6 +3582,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	}
	}


	pgoff = pte_to_pgoff(orig_pte);
	pgoff = pte_to_pgoff(orig_pte);
	if (!(flags & FAULT_FLAG_WRITE))
		return do_read_fault(mm, vma, address, pmd, pgoff, flags,
				orig_pte);
	return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
	return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
}