Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 17766dde authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds
Browse files

mm, thp: count thp_fault_fallback anytime thp fault fails



Currently, thp_fault_fallback in vmstat only gets incremented if a
hugepage allocation fails.  If current's memcg hits its limit or the page
fault handler returns an error, it is incorrectly accounted as a
successful thp_fault_alloc.

Count thp_fault_fallback anytime the page fault handler falls back to
using regular pages and only count thp_fault_alloc when a hugepage has
actually been faulted.

Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c0292554
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -820,17 +820,19 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		count_vm_event(THP_FAULT_FALLBACK);
		return VM_FAULT_FALLBACK;
	}
	count_vm_event(THP_FAULT_ALLOC);
	if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
		return VM_FAULT_FALLBACK;
	}
	if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
		mem_cgroup_uncharge_page(page);
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
		return VM_FAULT_FALLBACK;
	}

	count_vm_event(THP_FAULT_ALLOC);
	return 0;
}

@@ -1143,7 +1145,6 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
		new_page = NULL;

	if (unlikely(!new_page)) {
		count_vm_event(THP_FAULT_FALLBACK);
		if (is_huge_zero_pmd(orig_pmd)) {
			ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
					address, pmd, orig_pmd, haddr);
@@ -1154,9 +1155,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
				split_huge_page(page);
			put_page(page);
		}
		count_vm_event(THP_FAULT_FALLBACK);
		goto out;
	}
	count_vm_event(THP_FAULT_ALLOC);

	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
		put_page(new_page);
@@ -1164,10 +1165,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
			split_huge_page(page);
			put_page(page);
		}
		count_vm_event(THP_FAULT_FALLBACK);
		ret |= VM_FAULT_OOM;
		goto out;
	}

	count_vm_event(THP_FAULT_ALLOC);

	if (is_huge_zero_pmd(orig_pmd))
		clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
	else