Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6ffdc07 authored by Xishi Qiu's avatar Xishi Qiu Committed by Linus Torvalds
Browse files

mm: use is_migrate_highatomic() to simplify the code

Introduce two helpers, is_migrate_highatomic() and is_migrate_highatomic_page().

Simplify the code, no functional changes.

[akpm@linux-foundation.org: use static inlines rather than macros, per mhocko]
Link: http://lkml.kernel.org/r/58B94F15.6060606@huawei.com


Signed-off-by: default avatarXishi Qiu <qiuxishi@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 322b8afe
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

enum {
enum migratetype {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
	MIGRATE_RECLAIMABLE,
+10 −0
Original line number Diff line number Diff line
@@ -510,4 +510,14 @@ extern const struct trace_print_flags pageflag_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];

static inline bool is_migrate_highatomic(enum migratetype migratetype)
{
	return migratetype == MIGRATE_HIGHATOMIC;
}

static inline bool is_migrate_highatomic_page(struct page *page)
{
	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
}

#endif	/* __MM_INTERNAL_H */
+6 −8
Original line number Diff line number Diff line
@@ -2036,8 +2036,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,

	/* Yoink! */
	mt = get_pageblock_migratetype(page);
	if (mt != MIGRATE_HIGHATOMIC &&
			!is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
	    && !is_migrate_cma(mt)) {
		zone->nr_reserved_highatomic += pageblock_nr_pages;
		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
@@ -2094,8 +2094,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
			 * from highatomic to ac->migratetype. So we should
			 * adjust the count once.
			 */
			if (get_pageblock_migratetype(page) ==
							MIGRATE_HIGHATOMIC) {
			if (is_migrate_highatomic_page(page)) {
				/*
				 * It should never happen but changes to
				 * locking could inadvertently allow a per-cpu
@@ -2152,8 +2151,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)

		page = list_first_entry(&area->free_list[fallback_mt],
						struct page, lru);
		if (can_steal &&
			get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
		if (can_steal && !is_migrate_highatomic_page(page))
			steal_suitable_fallback(zone, page, start_migratetype);

		/* Remove the page from the freelists */
@@ -2493,7 +2491,7 @@ void free_hot_cold_page(struct page *page, bool cold)
	/*
	 * We only track unmovable, reclaimable and movable on pcp lists.
	 * Free ISOLATE pages back to the allocator because they are being
	 * offlined but treat RESERVE as movable pages so we can get those
	 * offlined but treat HIGHATOMIC as movable pages so we can get those
	 * areas back if necessary. Otherwise, we may have to free
	 * excessively into the page allocator
	 */
@@ -2603,7 +2601,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
		for (; page < endpage; page += pageblock_nr_pages) {
			int mt = get_pageblock_migratetype(page);
			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
				&& mt != MIGRATE_HIGHATOMIC)
			    && !is_migrate_highatomic(mt))
				set_pageblock_migratetype(page,
							  MIGRATE_MOVABLE);
		}