Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e180377f authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

thp: change split_huge_page_pmd() interface



Pass vma instead of mm and add address parameter.

In most cases we already have vma on the stack. We provides
split_huge_page_pmd_mm() for few cases when we have mm, but not vma.

This change is preparation to huge zero pmd splitting implementation.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cad7f613
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -276,7 +276,7 @@ unaffected. libhugetlbfs will also work fine as usual.
== Graceful fallback ==

Code walking pagetables but unware about huge pmds can simply call
split_huge_page_pmd(mm, pmd) where the pmd is the one returned by
split_huge_page_pmd(vma, addr, pmd) where the pmd is the one returned by
pmd_offset. It's trivial to make the code transparent hugepage aware
by just grepping for "pmd_offset" and adding split_huge_page_pmd where
missing after pmd_offset returns the pmd. Thanks to the graceful
@@ -299,7 +299,7 @@ diff --git a/mm/mremap.c b/mm/mremap.c
		return NULL;

	pmd = pmd_offset(pud, addr);
+	split_huge_page_pmd(mm, pmd);
+	split_huge_page_pmd(vma, addr, pmd);
	if (pmd_none_or_clear_bad(pmd))
		return NULL;

+1 −1
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
	if (pud_none_or_clear_bad(pud))
		goto out;
	pmd = pmd_offset(pud, 0xA0000);
	split_huge_page_pmd(mm, pmd);
	split_huge_page_pmd_mm(mm, 0xA0000, pmd);
	if (pmd_none_or_clear_bad(pmd))
		goto out;
	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
+1 −1
Original line number Diff line number Diff line
@@ -643,7 +643,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
	spinlock_t *ptl;
	struct page *page;

	split_huge_page_pmd(walk->mm, pmd);
	split_huge_page_pmd(vma, addr, pmd);
	if (pmd_trans_unstable(pmd))
		return 0;

+10 −4
Original line number Diff line number Diff line
@@ -95,12 +95,14 @@ extern int handle_pte_fault(struct mm_struct *mm,
			    struct vm_area_struct *vma, unsigned long address,
			    pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page(struct page *page);
extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
#define split_huge_page_pmd(__mm, __pmd)				\
extern void __split_huge_page_pmd(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd);
#define split_huge_page_pmd(__vma, __address, __pmd)			\
	do {								\
		pmd_t *____pmd = (__pmd);				\
		if (unlikely(pmd_trans_huge(*____pmd)))			\
			__split_huge_page_pmd(__mm, ____pmd);		\
			__split_huge_page_pmd(__vma, __address,		\
					____pmd);			\
	}  while (0)
#define wait_split_huge_page(__anon_vma, __pmd)				\
	do {								\
@@ -110,6 +112,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
		BUG_ON(pmd_trans_splitting(*____pmd) ||			\
		       pmd_trans_huge(*____pmd));			\
	} while (0)
extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd);
#if HPAGE_PMD_ORDER > MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
#endif
@@ -177,10 +181,12 @@ static inline int split_huge_page(struct page *page)
{
	return 0;
}
#define split_huge_page_pmd(__mm, __pmd)	\
#define split_huge_page_pmd(__vma, __address, __pmd)	\
	do { } while (0)
#define wait_split_huge_page(__anon_vma, __pmd)	\
	do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd)	\
	do { } while (0)
#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma,
				   unsigned long *vm_flags, int advice)
+17 −2
Original line number Diff line number Diff line
@@ -2475,9 +2475,14 @@ static int khugepaged(void *none)
	return 0;
}

void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
		pmd_t *pmd)
{
	struct page *page;
	unsigned long haddr = address & HPAGE_PMD_MASK;
	struct mm_struct *mm = vma->vm_mm;

	BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);

	spin_lock(&mm->page_table_lock);
	if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2495,6 +2500,16 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
	BUG_ON(pmd_trans_huge(*pmd));
}

void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd)
{
	struct vm_area_struct *vma;

	vma = find_vma(mm, address);
	BUG_ON(vma == NULL);
	split_huge_page_pmd(vma, address, pmd);
}

static void split_huge_page_address(struct mm_struct *mm,
				    unsigned long address)
{
@@ -2509,7 +2524,7 @@ static void split_huge_page_address(struct mm_struct *mm,
	 * Caller holds the mmap_sem write mode, so a huge pmd cannot
	 * materialize from under us.
	 */
	split_huge_page_pmd(mm, pmd);
	split_huge_page_pmd_mm(mm, address, pmd);
}

void __vma_adjust_trans_huge(struct vm_area_struct *vma,
Loading