Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6b4382b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm/page_alloc: introduce post allocation processing on page allocator"

parents 329e421a e448e4e5
Loading
Loading
Loading
Loading
+1 −7
Original line number Diff line number Diff line
@@ -69,14 +69,8 @@ static void map_pages(struct list_head *list)

		order = page_private(page);
		nr_pages = 1 << order;
		set_page_private(page, 0);
		set_page_refcounted(page);

		kasan_alloc_pages(page, order);
		arch_alloc_page(page, order);
		kernel_map_pages(page, nr_pages, 1);

		set_page_owner(page, order, __GFP_MOVABLE);
		post_alloc_hook(page, order, __GFP_MOVABLE);
		if (order)
			split_page(page, order);

+2 −0
Original line number Diff line number Diff line
@@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
extern void post_alloc_hook(struct page *page, unsigned int order,
					gfp_t gfp_flags);
extern int user_min_free_kbytes;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+15 −10
Original line number Diff line number Diff line
@@ -1443,6 +1443,19 @@ static inline bool free_pages_prezeroed(void)
		page_poisoning_enabled();
}

inline void post_alloc_hook(struct page *page, unsigned int order,
				gfp_t gfp_flags)
{
	set_page_private(page, 0);
	set_page_refcounted(page);

	kasan_alloc_pages(page, order);
	arch_alloc_page(page, order);
	kernel_map_pages(page, 1 << order, 1);
	kernel_poison_pages(page, 1 << order, 1);
	set_page_owner(page, order, gfp_flags);
}

static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
                                                               int alloc_flags)
{
@@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
			return 1;
	}

	set_page_private(page, 0);
	set_page_refcounted(page);

	kasan_alloc_pages(page, order);
	arch_alloc_page(page, order);
	kernel_map_pages(page, 1 << order, 1);
	kernel_poison_pages(page, 1 << order, 1);
	post_alloc_hook(page, order, gfp_flags);

	if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
		for (i = 0; i < (1 << order); i++)
@@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

	set_page_owner(page, order, gfp_flags);

	/*
	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
	 * allocate the page. The expectation is that the caller is taking
+1 −5
Original line number Diff line number Diff line
@@ -126,11 +126,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
out:
	spin_unlock_irqrestore(&zone->lock, flags);
	if (isolated_page) {
		kasan_alloc_pages(page, order);
		arch_alloc_page(page, order);
		kernel_map_pages(page, (1 << order), 1);
		set_page_refcounted(page);
		set_page_owner(page, order, __GFP_MOVABLE);
		post_alloc_hook(page, order, __GFP_MOVABLE);
		__free_pages(isolated_page, order);
	}
}