Loading include/linux/slub_def.h +16 −3 Original line number Diff line number Diff line Loading @@ -121,11 +121,24 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" * is relatively slow so we need this value sufficiently high so that * performance critical objects are allocated through the SLUB fastpath. * * This should be dropped to PAGE_SIZE / 2 once the page allocator * "fastpath" becomes competitive with the slab allocator fastpaths. */ #define SLUB_MAX_SIZE (PAGE_SIZE) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *ret; if (__builtin_constant_p(size)) { if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { Loading Loading @@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) void *ret; if (__builtin_constant_p(size) && size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Loading init/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -945,7 +945,7 @@ config TRACEPOINTS config MARKERS bool "Activate markers" depends on TRACEPOINTS select TRACEPOINTS help Place an empty function call at each marker site. Can be dynamically changed for a probe function. Loading mm/slub.c +8 −8 Original line number Diff line number Diff line Loading @@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) Loading Loading @@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; static void sysfs_add_func(struct work_struct *w) { Loading Loading @@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); Loading Loading @@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) { if (unlikely(size > SLUB_MAX_SIZE)) { ret = kmalloc_large_node(size, flags, node); kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, Loading Loading @@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) caches++; } for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; Loading Loading @@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); Loading Loading @@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); Loading @@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); Loading Loading
include/linux/slub_def.h +16 −3 Original line number Diff line number Diff line Loading @@ -121,11 +121,24 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" * is relatively slow so we need this value sufficiently high so that * performance critical objects are allocated through the SLUB fastpath. * * This should be dropped to PAGE_SIZE / 2 once the page allocator * "fastpath" becomes competitive with the slab allocator fastpaths. */ #define SLUB_MAX_SIZE (PAGE_SIZE) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *ret; if (__builtin_constant_p(size)) { if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { Loading Loading @@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) void *ret; if (__builtin_constant_p(size) && size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Loading
init/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -945,7 +945,7 @@ config TRACEPOINTS config MARKERS bool "Activate markers" depends on TRACEPOINTS select TRACEPOINTS help Place an empty function call at each marker site. Can be dynamically changed for a probe function. Loading
mm/slub.c +8 −8 Original line number Diff line number Diff line Loading @@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) Loading Loading @@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; static void sysfs_add_func(struct work_struct *w) { Loading Loading @@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); Loading Loading @@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) { if (unlikely(size > SLUB_MAX_SIZE)) { ret = kmalloc_large_node(size, flags, node); kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, Loading Loading @@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) caches++; } for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; Loading Loading @@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); Loading Loading @@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); Loading @@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); Loading