Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8cc3b392 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

badpage: keep any bad page out of circulation



Until now the bad_page() checkers have special-cased PageReserved, keeping
those pages out of circulation thereafter.  Now extend the special case to
all: we want to keep ANY page with bad state out of circulation - the
"free" page may well be in use by something.

Leave the bad state of those pages untouched, for examination by
debuggers; except for PageBuddy - leaving that set would risk bringing the
page back.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79f4b7bf
Loading
Loading
Loading
Loading
+24 −28
Original line number Original line Diff line number Diff line
@@ -231,9 +231,9 @@ static void bad_page(struct page *page)
	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n");
		KERN_EMERG "Backtrace:\n");
	dump_stack();
	dump_stack();
	set_page_count(page, 0);

	reset_page_mapcount(page);
	/* Leave bad fields for debug, except PageBuddy could make trouble */
	page->mapping = NULL;
	__ClearPageBuddy(page);
	add_taint(TAINT_BAD_PAGE);
	add_taint(TAINT_BAD_PAGE);
}
}


@@ -290,25 +290,31 @@ void prep_compound_gigantic_page(struct page *page, unsigned long order)
}
}
#endif
#endif


static void destroy_compound_page(struct page *page, unsigned long order)
static int destroy_compound_page(struct page *page, unsigned long order)
{
{
	int i;
	int i;
	int nr_pages = 1 << order;
	int nr_pages = 1 << order;
	int bad = 0;


	if (unlikely(compound_order(page) != order))
	if (unlikely(compound_order(page) != order) ||
	    unlikely(!PageHead(page))) {
		bad_page(page);
		bad_page(page);
		bad++;
	}


	if (unlikely(!PageHead(page)))
			bad_page(page);
	__ClearPageHead(page);
	__ClearPageHead(page);

	for (i = 1; i < nr_pages; i++) {
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
		struct page *p = page + i;


		if (unlikely(!PageTail(p) |
		if (unlikely(!PageTail(p) | (p->first_page != page))) {
				(p->first_page != page)))
			bad_page(page);
			bad_page(page);
			bad++;
		}
		__ClearPageTail(p);
		__ClearPageTail(p);
	}
	}

	return bad;
}
}


static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
@@ -428,7 +434,8 @@ static inline void __free_one_page(struct page *page,
	int migratetype = get_pageblock_migratetype(page);
	int migratetype = get_pageblock_migratetype(page);


	if (unlikely(PageCompound(page)))
	if (unlikely(PageCompound(page)))
		destroy_compound_page(page, order);
		if (unlikely(destroy_compound_page(page, order)))
			return;


	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);


@@ -465,15 +472,10 @@ static inline int free_pages_check(struct page *page)
	if (unlikely(page_mapcount(page) |
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
		(page_count(page) != 0)  |
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
		bad_page(page);
		bad_page(page);
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	if (PageReserved(page))
		return 1;
		return 1;
	}
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
	return 0;
@@ -521,11 +523,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
{
{
	unsigned long flags;
	unsigned long flags;
	int i;
	int i;
	int reserved = 0;
	int bad = 0;


	for (i = 0 ; i < (1 << order) ; ++i)
	for (i = 0 ; i < (1 << order) ; ++i)
		reserved += free_pages_check(page + i);
		bad += free_pages_check(page + i);
	if (reserved)
	if (bad)
		return;
		return;


	if (!PageHighMem(page)) {
	if (!PageHighMem(page)) {
@@ -610,17 +612,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
	if (unlikely(page_mapcount(page) |
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
		(page_count(page) != 0)  |
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
		bad_page(page);
		bad_page(page);

	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;
		return 1;
	}


	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	set_page_private(page, 0);
	set_page_private(page, 0);
	set_page_refcounted(page);
	set_page_refcounted(page);