Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12ebc158 authored by Vineet Gupta's avatar Vineet Gupta
Browse files

mm,thp: introduce flush_pmd_tlb_range



ARCHes with special requirements for evicting THP backing TLB entries
can implement this.

Otherwise also, it can help optimize TLB flush in THP regime.
stock flush_tlb_range() typically has optimization to nuke the entire
TLB if flush span is greater than a certain threshhold, which will
likely be true for a single huge page. Thus a single thp flush will
invalidate the entrire TLB which is not desirable.

e.g. see arch/arc: flush_pmd_tlb_range

Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20151009100816.GC7873@node


Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent bd5e88ad
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
		 * here). But it is generally safer to never allow
		 * small and huge TLB entries for the same virtual
		 * address to be loaded simultaneously. So instead of
		 * doing "pmd_populate(); flush_tlb_range();" we first
		 * doing "pmd_populate(); flush_pmd_tlb_range();" we first
		 * mark the current pmd notpresent (atomically because
		 * here the pmd_trans_huge and pmd_trans_splitting
		 * must remain set at all times on the pmd until the
+20 −6
Original line number Diff line number Diff line
@@ -84,6 +84,20 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE

/*
 * ARCHes with special requirements for evicting THP backing TLB entries can
 * implement this. Otherwise also, it can help optimize normal TLB flush in
 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
 * entire TLB TLB if flush span is greater than a threshhold, which will
 * likely be true for a single huge page. Thus a single thp flush will
 * invalidate the entire TLB which is not desitable.
 * e.g. see arch/arc: flush_pmd_tlb_range
 */
#define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
#endif

#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pmd_t *pmdp,
@@ -93,7 +107,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	if (changed) {
		set_pmd_at(vma->vm_mm, address, pmdp, entry);
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	}
	return changed;
}
@@ -107,7 +121,7 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	young = pmdp_test_and_clear_young(vma, address, pmdp);
	if (young)
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	return young;
}
#endif
@@ -120,7 +134,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	VM_BUG_ON(!pmd_trans_huge(*pmdp));
	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	return pmd;
}
#endif
@@ -133,7 +147,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
	/* tlb flush only to serialize against gup-fast */
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
#endif

@@ -179,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
	pmd_t entry = *pmdp;
	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
#endif

@@ -196,7 +210,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	VM_BUG_ON(pmd_trans_huge(*pmdp));
	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	return pmd;
}
#endif