Loading include/linux/page_ext.h +2 −2 Original line number Diff line number Diff line Loading @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> struct pglist_data; struct page_ext_operations { Loading Loading @@ -44,9 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; unsigned int nr_entries; int last_migrate_reason; unsigned long trace_entries[8]; depot_stack_handle_t handle; #endif }; Loading include/linux/page_idle.h +36 −7 Original line number Diff line number Diff line Loading @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { return test_and_clear_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ Loading lib/Kconfig.debug +1 −0 Original line number Diff line number Diff line Loading @@ -244,6 +244,7 @@ config PAGE_OWNER depends on DEBUG_KERNEL && STACKTRACE_SUPPORT select DEBUG_FS select STACKTRACE select STACKDEPOT select PAGE_EXTENSION help This keeps track of what call chain is the owner of a page, may Loading mm/page_alloc.c +6 −0 Original line number Diff line number Diff line Loading @@ -584,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); Loading @@ -601,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); Loading mm/page_owner.c +151 −19 Original line number Diff line number Diff line Loading @@ -7,12 +7,23 @@ #include <linux/page_owner.h> #include <linux/jump_label.h> #include <linux/migrate.h> #include <linux/stackdepot.h> #include "internal.h" /* * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) * to use off stack temporal storage */ #define PAGE_OWNER_STACK_DEPTH (16) static bool page_owner_disabled = !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT); DEFINE_STATIC_KEY_FALSE(page_owner_inited); static depot_stack_handle_t dummy_handle; static depot_stack_handle_t failure_handle; static void init_early_allocated_pages(void); static int early_page_owner_param(char *buf) Loading @@ -38,11 +49,41 @@ static bool need_page_owner(void) return true; } static noinline void register_dummy_stack(void) { unsigned long entries[4]; struct stack_trace dummy; dummy.nr_entries = 0; dummy.max_entries = ARRAY_SIZE(entries); dummy.entries = &entries[0]; dummy.skip = 0; save_stack_trace(&dummy); dummy_handle = depot_save_stack(&dummy, GFP_KERNEL); } static noinline void register_failure_stack(void) { unsigned long entries[4]; struct stack_trace failure; failure.nr_entries = 0; failure.max_entries = ARRAY_SIZE(entries); failure.entries = &entries[0]; failure.skip = 0; save_stack_trace(&failure); failure_handle = depot_save_stack(&failure, GFP_KERNEL); } static void init_page_owner(void) { if (page_owner_disabled) return; register_dummy_stack(); register_failure_stack(); static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } Loading @@ -59,25 +100,72 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) static inline bool check_recursive_alloc(struct stack_trace *trace, unsigned long ip) { struct page_ext *page_ext = lookup_page_ext(page); int i, count; if (!trace->nr_entries) return false; for (i = 0, count = 0; i < trace->nr_entries; i++) { if (trace->entries[i] == ip && ++count == 2) return true; } return false; } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), .entries = &page_ext->trace_entries[0], .skip = 3, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; depot_stack_handle_t handle; save_stack_trace(&trace); if (trace.nr_entries != 0 && trace.entries[trace.nr_entries-1] == ULONG_MAX) trace.nr_entries--; /* * We need to check recursion here because our request to stackdepot * could trigger memory allocation to save new entry. New memory * allocation would reach here and call depot_save_stack() again * if we don't catch it. There is still not enough memory in stackdepot * so it would try to allocate memory again and loop forever. */ if (check_recursive_alloc(&trace, _RET_IP_)) return dummy_handle; handle = depot_save_stack(&trace, flags); if (!handle) handle = failure_handle; return handle; } noinline void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; page_ext->handle = save_stack(gfp_mask); page_ext->order = order; page_ext->gfp_mask = gfp_mask; page_ext->nr_entries = trace.nr_entries; page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); Loading @@ -86,6 +174,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) void __set_page_owner_migrate_reason(struct page *page, int reason) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; page_ext->last_migrate_reason = reason; } Loading @@ -94,6 +184,12 @@ void __split_page_owner(struct page *page, unsigned int order) { int i; struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) /* * The caller just returns if no valid gfp * So return here too. */ return; page_ext->order = 0; for (i = 1; i < (1 << order); i++) Loading @@ -104,15 +200,14 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) { struct page_ext *old_ext = lookup_page_ext(oldpage); struct page_ext *new_ext = lookup_page_ext(newpage); int i; if (unlikely(!old_ext || !new_ext)) return; new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; new_ext->last_migrate_reason = old_ext->last_migrate_reason; new_ext->nr_entries = old_ext->nr_entries; for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) new_ext->trace_entries[i] = old_ext->trace_entries[i]; new_ext->handle = old_ext->handle; /* * We don't clear the bit on the oldpage as it's going to be freed Loading @@ -128,14 +223,18 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) struct page *page, struct page_ext *page_ext, depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); Loading Loading @@ -164,6 +263,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; Loading Loading @@ -194,18 +294,36 @@ err: void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; gfp_t gfp_mask = page_ext->gfp_mask; int mt = gfpflags_to_migratetype(gfp_mask); depot_stack_handle_t handle; gfp_t gfp_mask; int mt; if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); return; } gfp_mask = page_ext->gfp_mask; mt = gfpflags_to_migratetype(gfp_mask); if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not active (free page?)\n"); return; } handle = READ_ONCE(page_ext->handle); if (!handle) { pr_alert("page_owner info is not active (free page?)\n"); return; } depot_fetch_stack(handle, &trace); pr_alert("page allocated via order %u, migratetype %s, " "gfp_mask %#x(%pGg)\n", page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); Loading @@ -222,6 +340,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned long pfn; struct page *page; struct page_ext *page_ext; depot_stack_handle_t handle; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; Loading Loading @@ -260,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* * Some pages could be missed by concurrent allocation or free, Loading @@ -268,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* * Access to page_ext->handle isn't synchronous so we should * be careful to access it. */ handle = READ_ONCE(page_ext->handle); if (!handle) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_ext); return print_page_owner(buf, count, pfn, page, page_ext, handle); } return 0; Loading Loading @@ -326,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) Loading Loading
include/linux/page_ext.h +2 −2 Original line number Diff line number Diff line Loading @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> struct pglist_data; struct page_ext_operations { Loading Loading @@ -44,9 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; unsigned int nr_entries; int last_migrate_reason; unsigned long trace_entries[8]; depot_stack_handle_t handle; #endif }; Loading
include/linux/page_idle.h +36 −7 Original line number Diff line number Diff line Loading @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { return test_and_clear_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ Loading
lib/Kconfig.debug +1 −0 Original line number Diff line number Diff line Loading @@ -244,6 +244,7 @@ config PAGE_OWNER depends on DEBUG_KERNEL && STACKTRACE_SUPPORT select DEBUG_FS select STACKTRACE select STACKDEPOT select PAGE_EXTENSION help This keeps track of what call chain is the owner of a page, may Loading
mm/page_alloc.c +6 −0 Original line number Diff line number Diff line Loading @@ -584,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); Loading @@ -601,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); Loading
mm/page_owner.c +151 −19 Original line number Diff line number Diff line Loading @@ -7,12 +7,23 @@ #include <linux/page_owner.h> #include <linux/jump_label.h> #include <linux/migrate.h> #include <linux/stackdepot.h> #include "internal.h" /* * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) * to use off stack temporal storage */ #define PAGE_OWNER_STACK_DEPTH (16) static bool page_owner_disabled = !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT); DEFINE_STATIC_KEY_FALSE(page_owner_inited); static depot_stack_handle_t dummy_handle; static depot_stack_handle_t failure_handle; static void init_early_allocated_pages(void); static int early_page_owner_param(char *buf) Loading @@ -38,11 +49,41 @@ static bool need_page_owner(void) return true; } static noinline void register_dummy_stack(void) { unsigned long entries[4]; struct stack_trace dummy; dummy.nr_entries = 0; dummy.max_entries = ARRAY_SIZE(entries); dummy.entries = &entries[0]; dummy.skip = 0; save_stack_trace(&dummy); dummy_handle = depot_save_stack(&dummy, GFP_KERNEL); } static noinline void register_failure_stack(void) { unsigned long entries[4]; struct stack_trace failure; failure.nr_entries = 0; failure.max_entries = ARRAY_SIZE(entries); failure.entries = &entries[0]; failure.skip = 0; save_stack_trace(&failure); failure_handle = depot_save_stack(&failure, GFP_KERNEL); } static void init_page_owner(void) { if (page_owner_disabled) return; register_dummy_stack(); register_failure_stack(); static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } Loading @@ -59,25 +100,72 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) static inline bool check_recursive_alloc(struct stack_trace *trace, unsigned long ip) { struct page_ext *page_ext = lookup_page_ext(page); int i, count; if (!trace->nr_entries) return false; for (i = 0, count = 0; i < trace->nr_entries; i++) { if (trace->entries[i] == ip && ++count == 2) return true; } return false; } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), .entries = &page_ext->trace_entries[0], .skip = 3, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; depot_stack_handle_t handle; save_stack_trace(&trace); if (trace.nr_entries != 0 && trace.entries[trace.nr_entries-1] == ULONG_MAX) trace.nr_entries--; /* * We need to check recursion here because our request to stackdepot * could trigger memory allocation to save new entry. New memory * allocation would reach here and call depot_save_stack() again * if we don't catch it. There is still not enough memory in stackdepot * so it would try to allocate memory again and loop forever. */ if (check_recursive_alloc(&trace, _RET_IP_)) return dummy_handle; handle = depot_save_stack(&trace, flags); if (!handle) handle = failure_handle; return handle; } noinline void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; page_ext->handle = save_stack(gfp_mask); page_ext->order = order; page_ext->gfp_mask = gfp_mask; page_ext->nr_entries = trace.nr_entries; page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); Loading @@ -86,6 +174,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) void __set_page_owner_migrate_reason(struct page *page, int reason) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; page_ext->last_migrate_reason = reason; } Loading @@ -94,6 +184,12 @@ void __split_page_owner(struct page *page, unsigned int order) { int i; struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) /* * The caller just returns if no valid gfp * So return here too. */ return; page_ext->order = 0; for (i = 1; i < (1 << order); i++) Loading @@ -104,15 +200,14 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) { struct page_ext *old_ext = lookup_page_ext(oldpage); struct page_ext *new_ext = lookup_page_ext(newpage); int i; if (unlikely(!old_ext || !new_ext)) return; new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; new_ext->last_migrate_reason = old_ext->last_migrate_reason; new_ext->nr_entries = old_ext->nr_entries; for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) new_ext->trace_entries[i] = old_ext->trace_entries[i]; new_ext->handle = old_ext->handle; /* * We don't clear the bit on the oldpage as it's going to be freed Loading @@ -128,14 +223,18 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) struct page *page, struct page_ext *page_ext, depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); Loading Loading @@ -164,6 +263,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; Loading Loading @@ -194,18 +294,36 @@ err: void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; gfp_t gfp_mask = page_ext->gfp_mask; int mt = gfpflags_to_migratetype(gfp_mask); depot_stack_handle_t handle; gfp_t gfp_mask; int mt; if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); return; } gfp_mask = page_ext->gfp_mask; mt = gfpflags_to_migratetype(gfp_mask); if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not active (free page?)\n"); return; } handle = READ_ONCE(page_ext->handle); if (!handle) { pr_alert("page_owner info is not active (free page?)\n"); return; } depot_fetch_stack(handle, &trace); pr_alert("page allocated via order %u, migratetype %s, " "gfp_mask %#x(%pGg)\n", page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); Loading @@ -222,6 +340,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned long pfn; struct page *page; struct page_ext *page_ext; depot_stack_handle_t handle; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; Loading Loading @@ -260,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* * Some pages could be missed by concurrent allocation or free, Loading @@ -268,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* * Access to page_ext->handle isn't synchronous so we should * be careful to access it. */ handle = READ_ONCE(page_ext->handle); if (!handle) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_ext); return print_page_owner(buf, count, pfn, page, page_ext, handle); } return 0; Loading Loading @@ -326,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) Loading