Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd6a03ed authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Andi Kleen
Browse files

HWPOISON, hugetlb: detect hwpoison in hugetlb code



This patch enables to block access to hwpoisoned hugepage and
also enables to block unmapping for it.

Dependency:
  "HWPOISON, hugetlb: enable error handling path for hugepage"

Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarFengguang Wu <fengguang.wu@intel.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
parent 93f70f90
Loading
Loading
Loading
Loading
+40 −0
Original line number Diff line number Diff line
@@ -19,6 +19,8 @@
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#include <asm/page.h>
#include <asm/pgtable.h>
@@ -2149,6 +2151,19 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
	return -ENOMEM;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
	swp_entry_t swp;

	if (huge_pte_none(pte) || pte_present(pte))
		return 0;
	swp = pte_to_swp_entry(pte);
	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
		return 1;
	} else
		return 0;
}

void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end, struct page *ref_page)
{
@@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
		if (huge_pte_none(pte))
			continue;

		/*
		 * HWPoisoned hugepage is already unmapped and dropped reference
		 */
		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
			continue;

		page = pte_page(pte);
		if (pte_dirty(pte))
			set_page_dirty(page);
@@ -2490,6 +2511,18 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
		page_dup_rmap(page);
	}

	/*
	 * Since memory error handler replaces pte into hwpoison swap entry
	 * at the time of error handling, a process which reserved but not have
	 * the mapping to the error hugepage does not have hwpoison swap entry.
	 * So we need to block accesses from such a process by checking
	 * PG_hwpoison bit here.
	 */
	if (unlikely(PageHWPoison(page))) {
		ret = VM_FAULT_HWPOISON;
		goto backout_unlocked;
	}

	/*
	 * If we are going to COW a private mapping later, we examine the
	 * pending reservations for this page now. This will ensure that
@@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
	struct hstate *h = hstate_vma(vma);

	ptep = huge_pte_offset(mm, address);
	if (ptep) {
		entry = huge_ptep_get(ptep);
		if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
			return VM_FAULT_HWPOISON;
	}

	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
	if (!ptep)
		return VM_FAULT_OOM;