Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bb97794 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm: consolidate code to setup pte



Extract and consolidate code to setup pte from do_read_fault(),
do_cow_fault() and do_shared_fault().

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fb09a464
Loading
Loading
Loading
Loading
+30 −36
Original line number Diff line number Diff line
@@ -3318,13 +3318,37 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
	return ret;
}

static void do_set_pte(struct vm_area_struct *vma, unsigned long address,
		struct page *page, pte_t *pte, bool write, bool anon)
{
	pte_t entry;

	flush_icache_page(vma, page);
	entry = mk_pte(page, vma->vm_page_prot);
	if (write)
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
	else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
		pte_mksoft_dirty(entry);
	if (anon) {
		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
		page_add_new_anon_rmap(page, vma, address);
	} else {
		inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
		page_add_file_rmap(page);
	}
	set_pte_at(vma->vm_mm, address, pte, entry);

	/* no need to invalidate: a not-present page won't be cached */
	update_mmu_cache(vma, address, pte);
}

static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd,
		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
	struct page *fault_page;
	spinlock_t *ptl;
	pte_t entry, *pte;
	pte_t *pte;
	int ret;

	ret = __do_fault(vma, address, pgoff, flags, &fault_page);
@@ -3338,20 +3362,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		page_cache_release(fault_page);
		return ret;
	}

	flush_icache_page(vma, fault_page);
	entry = mk_pte(fault_page, vma->vm_page_prot);
	if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
		pte_mksoft_dirty(entry);
	inc_mm_counter_fast(mm, MM_FILEPAGES);
	page_add_file_rmap(fault_page);
	set_pte_at(mm, address, pte, entry);

	/* no need to invalidate: a not-present page won't be cached */
	update_mmu_cache(vma, address, pte);
	do_set_pte(vma, address, fault_page, pte, false, false);
	pte_unmap_unlock(pte, ptl);
	unlock_page(fault_page);

	return ret;
}

@@ -3361,7 +3374,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
{
	struct page *fault_page, *new_page;
	spinlock_t *ptl;
	pte_t entry, *pte;
	pte_t *pte;
	int ret;

	if (unlikely(anon_vma_prepare(vma)))
@@ -3390,17 +3403,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		page_cache_release(fault_page);
		goto uncharge_out;
	}

	flush_icache_page(vma, new_page);
	entry = mk_pte(new_page, vma->vm_page_prot);
	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
	inc_mm_counter_fast(mm, MM_ANONPAGES);
	page_add_new_anon_rmap(new_page, vma, address);
	set_pte_at(mm, address, pte, entry);

	/* no need to invalidate: a not-present page won't be cached */
	update_mmu_cache(vma, address, pte);

	do_set_pte(vma, address, new_page, pte, true, true);
	pte_unmap_unlock(pte, ptl);
	unlock_page(fault_page);
	page_cache_release(fault_page);
@@ -3418,7 +3421,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	struct page *fault_page;
	struct address_space *mapping;
	spinlock_t *ptl;
	pte_t entry, *pte;
	pte_t *pte;
	int dirtied = 0;
	int ret, tmp;

@@ -3447,16 +3450,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		page_cache_release(fault_page);
		return ret;
	}

	flush_icache_page(vma, fault_page);
	entry = mk_pte(fault_page, vma->vm_page_prot);
	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
	inc_mm_counter_fast(mm, MM_FILEPAGES);
	page_add_file_rmap(fault_page);
	set_pte_at(mm, address, pte, entry);

	/* no need to invalidate: a not-present page won't be cached */
	update_mmu_cache(vma, address, pte);
	do_set_pte(vma, address, fault_page, pte, true, false);
	pte_unmap_unlock(pte, ptl);

	if (set_page_dirty(fault_page))