Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b12c4ad1 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds
Browse files

mm: page_alloc: use get_freepage_migratetype() instead of page_private()



The page allocator uses set_page_private and page_private for handling
migratetype when it frees page.  Let's replace them with [set|get]
_freepage_migratetype to make it more clear.

Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d95ea5d1
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -237,6 +237,18 @@ struct inode;
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))

/* It's valid only if the page is free path or free_list */
static inline void set_freepage_migratetype(struct page *page, int migratetype)
{
	set_page_private(page, migratetype);
}

/* It's valid only if the page is free path or free_list */
static inline int get_freepage_migratetype(struct page *page)
{
	return page_private(page);
}

/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
+3 −3
Original line number Diff line number Diff line
@@ -674,7 +674,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
			mt = page_private(page);
			mt = get_freepage_migratetype(page);
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
			__free_one_page(page, zone, 0, mt);
			trace_mm_page_pcpu_drain(page, 0, mt);
@@ -1143,7 +1143,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
			if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
				mt = migratetype;
		}
		set_page_private(page, mt);
		set_freepage_migratetype(page, mt);
		list = &page->lru;
		if (is_migrate_cma(mt))
			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
@@ -1313,7 +1313,7 @@ void free_hot_cold_page(struct page *page, int cold)
		return;

	migratetype = get_pageblock_migratetype(page);
	set_page_private(page, migratetype);
	set_freepage_migratetype(page, migratetype);
	local_irq_save(flags);
	if (unlikely(wasMlocked))
		free_page_mlock(page);
+1 −1
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
		if (PageBuddy(page))
			pfn += 1 << page_order(page);
		else if (page_count(page) == 0 &&
				page_private(page) == MIGRATE_ISOLATE)
			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;