Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ebff32c authored by Xishi Qiu's avatar Xishi Qiu Committed by Linus Torvalds
Browse files

memory-failure: fix an error of mce_bad_pages statistics



When doing

    $ echo paddr > /sys/devices/system/memory/soft_offline_page

to offline a *free* page, the value of mce_bad_pages will be added, and
the page is set HWPoison flag, but it is still managed by page buddy
alocator.

   $ cat /proc/meminfo | grep HardwareCorrupted

shows the value.

If we offline the same page, the value of mce_bad_pages will be added
*again*, this means the value is incorrect now.  Assume the page is
still free during this short time.

  soft_offline_page()
    get_any_page()
      "else if (is_free_buddy_page(p))" branch return 0
        "goto done";
           "atomic_long_add(1, &mce_bad_pages);"

This patch:

Move poisoned page check at the beginning of the function in order to
fix the error.

Signed-off-by: default avatarXishi Qiu <qiuxishi@huawei.com>
Signed-off-by: default avatarJiang Liu <jiang.liu@huawei.com>
Tested-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 194159fb
Loading
Loading
Loading
Loading
+17 −21
Original line number Diff line number Diff line
@@ -1419,18 +1419,17 @@ static int soft_offline_huge_page(struct page *page, int flags)
	unsigned long pfn = page_to_pfn(page);
	struct page *hpage = compound_head(page);

	if (PageHWPoison(hpage)) {
		pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
		return -EBUSY;
	}

	ret = get_any_page(page, pfn, flags);
	if (ret < 0)
		return ret;
	if (ret == 0)
		goto done;

	if (PageHWPoison(hpage)) {
		put_page(hpage);
		pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
		return -EBUSY;
	}

	/* Keep page count to indicate a given hugepage is isolated. */
	ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false,
				MIGRATE_SYNC);
@@ -1441,12 +1440,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
		return ret;
	}
done:
	if (!PageHWPoison(hpage))
		atomic_long_add(1 << compound_trans_order(hpage),
				&mce_bad_pages);
	/* keep elevated page count for bad page */
	atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
	set_page_hwpoison_huge_page(hpage);
	dequeue_hwpoisoned_huge_page(hpage);
	/* keep elevated page count for bad page */

	return ret;
}

@@ -1488,6 +1486,11 @@ int soft_offline_page(struct page *page, int flags)
		}
	}

	if (PageHWPoison(page)) {
		pr_info("soft offline: %#lx page already poisoned\n", pfn);
		return -EBUSY;
	}

	ret = get_any_page(page, pfn, flags);
	if (ret < 0)
		return ret;
@@ -1519,19 +1522,11 @@ int soft_offline_page(struct page *page, int flags)
		return -EIO;
	}

	lock_page(page);
	wait_on_page_writeback(page);

	/*
	 * Synchronized using the page lock with memory_failure()
	 */
	if (PageHWPoison(page)) {
		unlock_page(page);
		put_page(page);
		pr_info("soft offline: %#lx page already poisoned\n", pfn);
		return -EBUSY;
	}

	lock_page(page);
	wait_on_page_writeback(page);
	/*
	 * Try to invalidate first. This should work for
	 * non dirty unmapped page cache pages.
@@ -1583,8 +1578,9 @@ int soft_offline_page(struct page *page, int flags)
		return ret;

done:
	/* keep elevated page count for bad page */
	atomic_long_add(1, &mce_bad_pages);
	SetPageHWPoison(page);
	/* keep elevated page count for bad page */

	return ret;
}