Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f55e1014 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Revert "mm, thp: Do not make pmd/pud dirty without a reason"



This reverts commit 152e93af.

It was a nice cleanup in theory, but as Nicolai Stange points out, we do
need to make the page dirty for the copy-on-write case even when we
didn't end up making it writable, since the dirty bit is what we use to
check that we've gone through a COW cycle.

Reported-by: default avatarMichal Hocko <mhocko@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 43570f03
Loading
Loading
Loading
Loading
+12 −19
Original line number Original line Diff line number Diff line
@@ -474,13 +474,10 @@ static int __init setup_transparent_hugepage(char *str)
}
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
__setup("transparent_hugepage=", setup_transparent_hugepage);


pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma, bool dirty)
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
{
	if (likely(vma->vm_flags & VM_WRITE)) {
	if (likely(vma->vm_flags & VM_WRITE))
		pmd = pmd_mkwrite(pmd);
		pmd = pmd_mkwrite(pmd);
		if (dirty)
			pmd = pmd_mkdirty(pmd);
	}
	return pmd;
	return pmd;
}
}


@@ -602,7 +599,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
		}
		}


		entry = mk_huge_pmd(page, vma->vm_page_prot);
		entry = mk_huge_pmd(page, vma->vm_page_prot);
		entry = maybe_pmd_mkwrite(entry, vma, true);
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
		page_add_new_anon_rmap(page, vma, haddr, true);
		page_add_new_anon_rmap(page, vma, haddr, true);
		mem_cgroup_commit_charge(page, memcg, false, true);
		mem_cgroup_commit_charge(page, memcg, false, true);
		lru_cache_add_active_or_unevictable(page, vma);
		lru_cache_add_active_or_unevictable(page, vma);
@@ -744,8 +741,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
	if (pfn_t_devmap(pfn))
	if (pfn_t_devmap(pfn))
		entry = pmd_mkdevmap(entry);
		entry = pmd_mkdevmap(entry);
	if (write) {
	if (write) {
		entry = pmd_mkyoung(entry);
		entry = pmd_mkyoung(pmd_mkdirty(entry));
		entry = maybe_pmd_mkwrite(entry, vma, true);
		entry = maybe_pmd_mkwrite(entry, vma);
	}
	}


	if (pgtable) {
	if (pgtable) {
@@ -791,14 +788,10 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);


#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma,
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
		bool dirty)
{
{
	if (likely(vma->vm_flags & VM_WRITE)) {
	if (likely(vma->vm_flags & VM_WRITE))
		pud = pud_mkwrite(pud);
		pud = pud_mkwrite(pud);
		if (dirty)
			pud = pud_mkdirty(pud);
	}
	return pud;
	return pud;
}
}


@@ -814,8 +807,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
	if (pfn_t_devmap(pfn))
	if (pfn_t_devmap(pfn))
		entry = pud_mkdevmap(entry);
		entry = pud_mkdevmap(entry);
	if (write) {
	if (write) {
		entry = pud_mkyoung(entry);
		entry = pud_mkyoung(pud_mkdirty(entry));
		entry = maybe_pud_mkwrite(entry, vma, true);
		entry = maybe_pud_mkwrite(entry, vma);
	}
	}
	set_pud_at(mm, addr, pud, entry);
	set_pud_at(mm, addr, pud, entry);
	update_mmu_cache_pud(vma, addr, pud);
	update_mmu_cache_pud(vma, addr, pud);
@@ -1286,7 +1279,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
	if (reuse_swap_page(page, NULL)) {
	if (reuse_swap_page(page, NULL)) {
		pmd_t entry;
		pmd_t entry;
		entry = pmd_mkyoung(orig_pmd);
		entry = pmd_mkyoung(orig_pmd);
		entry = maybe_pmd_mkwrite(entry, vma, true);
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
		ret |= VM_FAULT_WRITE;
		ret |= VM_FAULT_WRITE;
@@ -1356,7 +1349,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
	} else {
	} else {
		pmd_t entry;
		pmd_t entry;
		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
		entry = maybe_pmd_mkwrite(entry, vma, true);
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
		page_add_new_anon_rmap(new_page, vma, haddr, true);
		page_add_new_anon_rmap(new_page, vma, haddr, true);
		mem_cgroup_commit_charge(new_page, memcg, false, true);
		mem_cgroup_commit_charge(new_page, memcg, false, true);
@@ -2935,7 +2928,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
	if (pmd_swp_soft_dirty(*pvmw->pmd))
	if (pmd_swp_soft_dirty(*pvmw->pmd))
		pmde = pmd_mksoft_dirty(pmde);
		pmde = pmd_mksoft_dirty(pmde);
	if (is_write_migration_entry(entry))
	if (is_write_migration_entry(entry))
		pmde = maybe_pmd_mkwrite(pmde, vma, false);
		pmde = maybe_pmd_mkwrite(pmde, vma);


	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
	page_add_anon_rmap(new, vma, mmun_start, true);
	page_add_anon_rmap(new, vma, mmun_start, true);
+1 −2
Original line number Original line Diff line number Diff line
@@ -328,8 +328,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
	}
	}
}
}


extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma,
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
		bool dirty);


/*
/*
 * At what user virtual address is page expected in @vma?
 * At what user virtual address is page expected in @vma?
+1 −1
Original line number Original line Diff line number Diff line
@@ -1057,7 +1057,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	pgtable = pmd_pgtable(_pmd);
	pgtable = pmd_pgtable(_pmd);


	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
	_pmd = maybe_pmd_mkwrite(_pmd, vma, false);
	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);


	/*
	/*
	 * spin_lock() below is not the equivalent of smp_wmb(), so
	 * spin_lock() below is not the equivalent of smp_wmb(), so
+1 −1
Original line number Original line Diff line number Diff line
@@ -3335,7 +3335,7 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)


	entry = mk_huge_pmd(page, vma->vm_page_prot);
	entry = mk_huge_pmd(page, vma->vm_page_prot);
	if (write)
	if (write)
		entry = maybe_pmd_mkwrite(entry, vma, true);
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);


	add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
	add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
	page_add_file_rmap(page, true);
	page_add_file_rmap(page, true);
+1 −1
Original line number Original line Diff line number Diff line
@@ -2068,7 +2068,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
	}
	}


	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
	entry = maybe_pmd_mkwrite(entry, vma, false);
	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);


	/*
	/*
	 * Clear the old entry under pagetable lock and establish the new PTE.
	 * Clear the old entry under pagetable lock and establish the new PTE.