Loading include/linux/page_ext.h +1 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_DEBUG_GUARD, PAGE_EXT_OWNER, PAGE_EXT_PG_FREE, #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, Loading mm/page_owner.c +19 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,7 @@ struct page_owner { depot_stack_handle_t handle; int pid; u64 ts_nsec; u64 free_ts_nsec; }; static bool page_owner_disabled = Loading Loading @@ -119,12 +120,15 @@ void __reset_page_owner(struct page *page, unsigned int order) { int i; struct page_ext *page_ext; u64 free_ts_nsec = local_clock(); for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; get_page_owner(page_ext)->free_ts_nsec = free_ts_nsec; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_PG_FREE, &page_ext->flags); } } Loading Loading @@ -189,8 +193,10 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext, page_owner->last_migrate_reason = -1; page_owner->pid = current->pid; page_owner->ts_nsec = local_clock(); page_owner->free_ts_nsec = 0; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __clear_bit(PAGE_EXT_PG_FREE, &page_ext->flags); } noinline void __set_page_owner(struct page *page, unsigned int order, Loading @@ -198,12 +204,24 @@ noinline void __set_page_owner(struct page *page, unsigned int order, { struct page_ext *page_ext = lookup_page_ext(page); depot_stack_handle_t handle; int i; if (unlikely(!page_ext)) return; handle = save_stack(gfp_mask); __set_page_owner_handle(page_ext, handle, order, gfp_mask); /* set page owner for tail pages if any */ for (i = 1; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; /* mark tail pages as order 0 individual pages */ __set_page_owner_handle(page_ext, handle, 0, gfp_mask); } } void __set_page_owner_migrate_reason(struct page *page, int reason) Loading Loading @@ -251,6 +269,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) new_page_owner->handle = old_page_owner->handle; new_page_owner->pid = old_page_owner->pid; new_page_owner->ts_nsec = old_page_owner->ts_nsec; new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; /* * We don't clear the bit on the oldpage as it's going to be freed Loading Loading
include/linux/page_ext.h +1 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_DEBUG_GUARD, PAGE_EXT_OWNER, PAGE_EXT_PG_FREE, #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, Loading
mm/page_owner.c +19 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,7 @@ struct page_owner { depot_stack_handle_t handle; int pid; u64 ts_nsec; u64 free_ts_nsec; }; static bool page_owner_disabled = Loading Loading @@ -119,12 +120,15 @@ void __reset_page_owner(struct page *page, unsigned int order) { int i; struct page_ext *page_ext; u64 free_ts_nsec = local_clock(); for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; get_page_owner(page_ext)->free_ts_nsec = free_ts_nsec; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_PG_FREE, &page_ext->flags); } } Loading Loading @@ -189,8 +193,10 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext, page_owner->last_migrate_reason = -1; page_owner->pid = current->pid; page_owner->ts_nsec = local_clock(); page_owner->free_ts_nsec = 0; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __clear_bit(PAGE_EXT_PG_FREE, &page_ext->flags); } noinline void __set_page_owner(struct page *page, unsigned int order, Loading @@ -198,12 +204,24 @@ noinline void __set_page_owner(struct page *page, unsigned int order, { struct page_ext *page_ext = lookup_page_ext(page); depot_stack_handle_t handle; int i; if (unlikely(!page_ext)) return; handle = save_stack(gfp_mask); __set_page_owner_handle(page_ext, handle, order, gfp_mask); /* set page owner for tail pages if any */ for (i = 1; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); if (unlikely(!page_ext)) continue; /* mark tail pages as order 0 individual pages */ __set_page_owner_handle(page_ext, handle, 0, gfp_mask); } } void __set_page_owner_migrate_reason(struct page *page, int reason) Loading Loading @@ -251,6 +269,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) new_page_owner->handle = old_page_owner->handle; new_page_owner->pid = old_page_owner->pid; new_page_owner->ts_nsec = old_page_owner->ts_nsec; new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; /* * We don't clear the bit on the oldpage as it's going to be freed Loading