Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da146769 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

thp: fix zap_huge_pmd() for DAX



The original DAX code assumed that pgtable_t was a pointer, which isn't
true on all architectures.  Restructure the code to not rely on that
assumption.

[willy@linux.intel.com: further fixes integrated into this patch]
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5b701b84
Loading
Loading
Loading
Loading
+31 −40
Original line number Diff line number Diff line
@@ -1456,12 +1456,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
		 pmd_t *pmd, unsigned long addr)
{
	pmd_t orig_pmd;
	spinlock_t *ptl;
	int ret = 0;

	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
		pgtable_t pgtable;
		pmd_t orig_pmd;
	if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
		return 0;
	/*
	 * For architectures like ppc64 we look at deposited pgtable
	 * when calling pmdp_huge_get_and_clear. So do the
@@ -1472,16 +1471,11 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
			tlb->fullmm);
	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
	if (vma_is_dax(vma)) {
			if (is_huge_zero_pmd(orig_pmd)) {
				pgtable = NULL;
			} else {
		spin_unlock(ptl);
				return 1;
			}
		} else {
			pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
		}
		if (is_huge_zero_pmd(orig_pmd)) {
		if (is_huge_zero_pmd(orig_pmd))
			put_huge_zero_page();
	} else if (is_huge_zero_pmd(orig_pmd)) {
		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
		atomic_long_dec(&tlb->mm->nr_ptes);
		spin_unlock(ptl);
		put_huge_zero_page();
@@ -1491,15 +1485,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
		VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
		add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
		VM_BUG_ON_PAGE(!PageHead(page), page);
		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
		atomic_long_dec(&tlb->mm->nr_ptes);
		spin_unlock(ptl);
		tlb_remove_page(tlb, page);
	}
		if (pgtable)
			pte_free(tlb->mm, pgtable);
		ret = 1;
	}
	return ret;
	return 1;
}

int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,