Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec95f53a authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds
Browse files

mm: introduce free_pages_prepare()



free_hot_cold_page() and __free_pages_ok() have very similar freeing
preparation.  Consolidate them.

[akpm@linux-foundation.org: fix busted coding style]
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5f53e762
Loading
Loading
Loading
Loading
+21 −19
Original line number Diff line number Diff line
@@ -620,20 +620,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
	spin_unlock(&zone->lock);
}

static void __free_pages_ok(struct page *page, unsigned int order)
static bool free_pages_prepare(struct page *page, unsigned int order)
{
	unsigned long flags;
	int i;
	int bad = 0;
	int wasMlocked = __TestClearPageMlocked(page);

	trace_mm_page_free_direct(page, order);
	kmemcheck_free_shadow(page, order);

	for (i = 0 ; i < (1 << order) ; ++i)
		bad += free_pages_check(page + i);
	for (i = 0; i < (1 << order); i++) {
		struct page *pg = page + i;

		if (PageAnon(pg))
			pg->mapping = NULL;
		bad += free_pages_check(pg);
	}
	if (bad)
		return;
		return false;

	if (!PageHighMem(page)) {
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -643,6 +646,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
	arch_free_page(page, order);
	kernel_map_pages(page, 1 << order, 0);

	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, order))
		return;

	local_irq_save(flags);
	if (unlikely(wasMlocked))
		free_page_mlock(page);
@@ -1128,21 +1142,9 @@ void free_hot_cold_page(struct page *page, int cold)
	int migratetype;
	int wasMlocked = __TestClearPageMlocked(page);

	trace_mm_page_free_direct(page, 0);
	kmemcheck_free_shadow(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
	if (free_pages_check(page))
	if (!free_pages_prepare(page, 0))
		return;

	if (!PageHighMem(page)) {
		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
	}
	arch_free_page(page, 0);
	kernel_map_pages(page, 1, 0);

	migratetype = get_pageblock_migratetype(page);
	set_page_private(page, migratetype);
	local_irq_save(flags);