Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 66c64223 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm/compaction: split freepages without holding the zone lock

We don't need to split freepages with holding the zone lock.  It will
cause more contention on zone lock so not desirable.

[rientjes@google.com: if __isolate_free_page() fails, avoid adding to freelist so we don't call map_pages() with it]
  Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1606211447001.43430@chino.kir.corp.google.com
Link: http://lkml.kernel.org/r/1464230275-25791-1-git-send-email-iamjoonsoo.kim@lge.com


Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3b1d9ca6
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -537,7 +537,6 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);

void split_page(struct page *page, unsigned int order);
int split_free_page(struct page *page);

/*
 * Compound pages have a destructor function.  Provide a
+33 −14
Original line number Diff line number Diff line
@@ -64,15 +64,33 @@ static unsigned long release_freepages(struct list_head *freelist)

static void map_pages(struct list_head *list)
{
	struct page *page;
	unsigned int i, order, nr_pages;
	struct page *page, *next;
	LIST_HEAD(tmp_list);

	list_for_each_entry_safe(page, next, list, lru) {
		list_del(&page->lru);

	list_for_each_entry(page, list, lru) {
		arch_alloc_page(page, 0);
		kernel_map_pages(page, 1, 1);
		kasan_alloc_pages(page, 0);
		order = page_private(page);
		nr_pages = 1 << order;
		set_page_private(page, 0);
		set_page_refcounted(page);

		arch_alloc_page(page, order);
		kernel_map_pages(page, nr_pages, 1);
		kasan_alloc_pages(page, order);
		if (order)
			split_page(page, order);

		for (i = 0; i < nr_pages; i++) {
			list_add(&page->lru, &tmp_list);
			page++;
		}
	}

	list_splice(&tmp_list, list);
}

static inline bool migrate_async_suitable(int migratetype)
{
	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
@@ -405,12 +423,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
	unsigned long flags = 0;
	bool locked = false;
	unsigned long blockpfn = *start_pfn;
	unsigned int order;

	cursor = pfn_to_page(blockpfn);

	/* Isolate free pages. */
	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
		int isolated, i;
		int isolated;
		struct page *page = cursor;

		/*
@@ -476,17 +495,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
				goto isolate_fail;
		}

		/* Found a free page, break it into order-0 pages */
		isolated = split_free_page(page);
		/* Found a free page, will break it into order-0 pages */
		order = page_order(page);
		isolated = __isolate_free_page(page, order);
		if (!isolated)
			break;
		set_page_private(page, order);

		total_isolated += isolated;
		cc->nr_freepages += isolated;
		for (i = 0; i < isolated; i++) {
			list_add(&page->lru, freelist);
			page++;
		}
		list_add_tail(&page->lru, freelist);

		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
			blockpfn += isolated;
			break;
@@ -605,7 +624,7 @@ isolate_freepages_range(struct compact_control *cc,
		 */
	}

	/* split_free_page does not map the pages */
	/* __isolate_free_page() does not map the pages */
	map_pages(&freelist);

	if (pfn < end_pfn) {
@@ -1102,7 +1121,7 @@ static void isolate_freepages(struct compact_control *cc)
		}
	}

	/* split_free_page does not map the pages */
	/* __isolate_free_page() does not map the pages */
	map_pages(freelist);

	/*
+0 −27
Original line number Diff line number Diff line
@@ -2526,33 +2526,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
	return 1UL << order;
}

/*
 * Similar to split_page except the page is already free. As this is only
 * being used for migration, the migratetype of the block also changes.
 * As this is called with interrupts disabled, the caller is responsible
 * for calling arch_alloc_page() and kernel_map_page() after interrupts
 * are enabled.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
int split_free_page(struct page *page)
{
	unsigned int order;
	int nr_pages;

	order = page_order(page);

	nr_pages = __isolate_free_page(page, order);
	if (!nr_pages)
		return 0;

	/* Split into individual pages */
	set_page_refcounted(page);
	split_page(page, order);
	return nr_pages;
}

/*
 * Update NUMA hit/miss statistics
 *