Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83358ece authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm/page_owner: initialize page owner without holding the zone lock

It's not necessary to initialized page_owner with holding the zone lock.
It would cause more contention on the zone lock although it's not a big
problem since it is just debug feature.  But, it is better than before
so do it.  This is also preparation step to use stackdepot in page owner
feature.  Stackdepot allocates new pages when there is no reserved space
and holding the zone lock in this case will cause deadlock.

Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com


Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 66c64223
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include "internal.h"

#ifdef CONFIG_COMPACTION
@@ -79,6 +80,8 @@ static void map_pages(struct list_head *list)
		arch_alloc_page(page, order);
		kernel_map_pages(page, nr_pages, 1);
		kasan_alloc_pages(page, order);

		set_page_owner(page, order, __GFP_MOVABLE);
		if (order)
			split_page(page, order);

+0 −2
Original line number Diff line number Diff line
@@ -2509,8 +2509,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
	zone->free_area[order].nr_free--;
	rmv_page_order(page);

	set_page_owner(page, order, __GFP_MOVABLE);

	/* Set the pageblock if the isolated page is at least a pageblock */
	if (order >= pageblock_order - 1) {
		struct page *endpage = page + (1 << order) - 1;
+6 −3
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#include <linux/pageblock-flags.h>
#include <linux/memory.h>
#include <linux/hugetlb.h>
#include <linux/page_owner.h>
#include "internal.h"

#define CREATE_TRACE_POINTS
@@ -108,8 +109,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
			if (pfn_valid_within(page_to_pfn(buddy)) &&
			    !is_migrate_isolate_page(buddy)) {
				__isolate_free_page(page, order);
				kernel_map_pages(page, (1 << order), 1);
				set_page_refcounted(page);
				isolated_page = page;
			}
		}
@@ -128,9 +127,13 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
	zone->nr_isolate_pageblock--;
out:
	spin_unlock_irqrestore(&zone->lock, flags);
	if (isolated_page)
	if (isolated_page) {
		kernel_map_pages(page, (1 << order), 1);
		set_page_refcounted(page);
		set_page_owner(page, order, __GFP_MOVABLE);
		__free_pages(isolated_page, order);
	}
}

static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)