Loading mm/compaction.c +1 −7 Original line number Diff line number Diff line Loading @@ -69,14 +69,8 @@ static void map_pages(struct list_head *list) order = page_private(page); nr_pages = 1 << order; set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, nr_pages, 1); set_page_owner(page, order, __GFP_MOVABLE); post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); Loading mm/internal.h +2 −0 Original line number Diff line number Diff line Loading @@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA Loading mm/page_alloc.c +15 −10 Original line number Diff line number Diff line Loading @@ -1443,6 +1443,19 @@ static inline bool free_pages_prezeroed(void) page_poisoning_enabled(); } inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); set_page_owner(page, order, gfp_flags); } static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, int alloc_flags) { Loading @@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, return 1; } set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) Loading @@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); set_page_owner(page, order, gfp_flags); /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking Loading mm/page_isolation.c +1 −5 Original line number Diff line number Diff line Loading @@ -126,11 +126,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) { kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); set_page_owner(page, order, __GFP_MOVABLE); post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); } } Loading Loading
mm/compaction.c +1 −7 Original line number Diff line number Diff line Loading @@ -69,14 +69,8 @@ static void map_pages(struct list_head *list) order = page_private(page); nr_pages = 1 << order; set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, nr_pages, 1); set_page_owner(page, order, __GFP_MOVABLE); post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); Loading
mm/internal.h +2 −0 Original line number Diff line number Diff line Loading @@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA Loading
mm/page_alloc.c +15 −10 Original line number Diff line number Diff line Loading @@ -1443,6 +1443,19 @@ static inline bool free_pages_prezeroed(void) page_poisoning_enabled(); } inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); set_page_owner(page, order, gfp_flags); } static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, int alloc_flags) { Loading @@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, return 1; } set_page_private(page, 0); set_page_refcounted(page); kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) Loading @@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); set_page_owner(page, order, gfp_flags); /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking Loading
mm/page_isolation.c +1 −5 Original line number Diff line number Diff line Loading @@ -126,11 +126,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) { kasan_alloc_pages(page, order); arch_alloc_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); set_page_owner(page, order, __GFP_MOVABLE); post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); } } Loading