Loading include/trace/events/cma.h +39 −1 Original line number Diff line number Diff line Loading @@ -8,7 +8,7 @@ #include <linux/types.h> #include <linux/tracepoint.h> TRACE_EVENT(cma_alloc, DECLARE_EVENT_CLASS(cma_alloc_class, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), Loading Loading @@ -61,6 +61,44 @@ TRACE_EVENT(cma_release, __entry->count) ); TRACE_EVENT(cma_alloc_start, TP_PROTO(unsigned int count, unsigned int align), TP_ARGS(count, align), TP_STRUCT__entry( __field(unsigned int, count) __field(unsigned int, align) ), TP_fast_assign( __entry->count = count; __entry->align = align; ), TP_printk("count=%u align=%u", __entry->count, __entry->align) ); DEFINE_EVENT(cma_alloc_class, cma_alloc, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), TP_ARGS(pfn, page, count, align) ); DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), TP_ARGS(pfn, page, count, align) ); #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ Loading include/trace/events/migrate.h +21 −0 Original line number Diff line number Diff line Loading @@ -97,6 +97,27 @@ TRACE_EVENT(mm_numa_migrate_ratelimit, __entry->dst_nid, __entry->nr_pages) ); TRACE_EVENT(mm_migrate_pages_start, TP_PROTO(enum migrate_mode mode, int reason), TP_ARGS(mode, reason), TP_STRUCT__entry( __field(enum migrate_mode, mode) __field(int, reason) ), TP_fast_assign( __entry->mode = mode; __entry->reason = reason; ), TP_printk("mode=%s reason=%s", __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ Loading mm/Kconfig +9 −0 Original line number Diff line number Diff line Loading @@ -659,6 +659,15 @@ config MAX_STACK_SIZE_MB A sane initial value is 80 MB. config BALANCE_ANON_FILE_RECLAIM bool "During reclaim treat anon and file backed pages equally" depends on SWAP help When performing memory reclaim treat anonymous and file backed pages equally. Swapping anonymous pages out to memory can be efficient enough to justify treating anonymous and file backed pages equally. # For architectures that support deferred memory initialisation config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT bool Loading mm/cma.c +4 −0 Original line number Diff line number Diff line Loading @@ -447,6 +447,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, if (!count) return NULL; trace_cma_alloc_start(count, align); mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); Loading Loading @@ -514,6 +516,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } Loading mm/memblock.c +5 −2 Original line number Diff line number Diff line Loading @@ -707,7 +707,8 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); if (base < memblock.current_limit) kmemleak_free_part(__va(base), size); return memblock_remove_range(&memblock.reserved, base, size); } Loading Loading @@ -1148,7 +1149,9 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, * The min_count is set to 0 so that memblock allocations are * never reported as leaks. */ kmemleak_alloc_phys(found, size, 0, 0); if (found < memblock.current_limit) kmemleak_alloc(__va(found), size, 0, 0); return found; } return 0; Loading Loading
include/trace/events/cma.h +39 −1 Original line number Diff line number Diff line Loading @@ -8,7 +8,7 @@ #include <linux/types.h> #include <linux/tracepoint.h> TRACE_EVENT(cma_alloc, DECLARE_EVENT_CLASS(cma_alloc_class, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), Loading Loading @@ -61,6 +61,44 @@ TRACE_EVENT(cma_release, __entry->count) ); TRACE_EVENT(cma_alloc_start, TP_PROTO(unsigned int count, unsigned int align), TP_ARGS(count, align), TP_STRUCT__entry( __field(unsigned int, count) __field(unsigned int, align) ), TP_fast_assign( __entry->count = count; __entry->align = align; ), TP_printk("count=%u align=%u", __entry->count, __entry->align) ); DEFINE_EVENT(cma_alloc_class, cma_alloc, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), TP_ARGS(pfn, page, count, align) ); DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), TP_ARGS(pfn, page, count, align) ); #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ Loading
include/trace/events/migrate.h +21 −0 Original line number Diff line number Diff line Loading @@ -97,6 +97,27 @@ TRACE_EVENT(mm_numa_migrate_ratelimit, __entry->dst_nid, __entry->nr_pages) ); TRACE_EVENT(mm_migrate_pages_start, TP_PROTO(enum migrate_mode mode, int reason), TP_ARGS(mode, reason), TP_STRUCT__entry( __field(enum migrate_mode, mode) __field(int, reason) ), TP_fast_assign( __entry->mode = mode; __entry->reason = reason; ), TP_printk("mode=%s reason=%s", __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ Loading
mm/Kconfig +9 −0 Original line number Diff line number Diff line Loading @@ -659,6 +659,15 @@ config MAX_STACK_SIZE_MB A sane initial value is 80 MB. config BALANCE_ANON_FILE_RECLAIM bool "During reclaim treat anon and file backed pages equally" depends on SWAP help When performing memory reclaim treat anonymous and file backed pages equally. Swapping anonymous pages out to memory can be efficient enough to justify treating anonymous and file backed pages equally. # For architectures that support deferred memory initialisation config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT bool Loading
mm/cma.c +4 −0 Original line number Diff line number Diff line Loading @@ -447,6 +447,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, if (!count) return NULL; trace_cma_alloc_start(count, align); mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); Loading Loading @@ -514,6 +516,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } Loading
mm/memblock.c +5 −2 Original line number Diff line number Diff line Loading @@ -707,7 +707,8 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); if (base < memblock.current_limit) kmemleak_free_part(__va(base), size); return memblock_remove_range(&memblock.reserved, base, size); } Loading Loading @@ -1148,7 +1149,9 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, * The min_count is set to 0 so that memblock allocations are * never reported as leaks. */ kmemleak_alloc_phys(found, size, 0, 0); if (found < memblock.current_limit) kmemleak_alloc(__va(found), size, 0, 0); return found; } return 0; Loading