Loading include/linux/slab_def.h +7 −7 Original line number Diff line number Diff line Loading @@ -159,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_trace(size_t size, struct kmem_cache *cachep, extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, int nodeid); int nodeid, size_t size); #else static __always_inline void * kmem_cache_alloc_node_trace(size_t size, struct kmem_cache *cachep, kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, int nodeid) int nodeid, size_t size) { return kmem_cache_alloc_node(cachep, flags, nodeid); } Loading Loading @@ -200,7 +200,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size); } return __kmalloc_node(size, flags, node); } Loading mm/slab.c +1 −1 Original line number Diff line number Diff line Loading @@ -3824,7 +3824,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, { void *ret; ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP); ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, cachep->size, Loading mm/slob.c +1 −1 Original line number Diff line number Diff line Loading @@ -482,7 +482,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) } #ifdef CONFIG_NUMA void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, int node, unsigned long caller) { return __do_kmalloc_node(size, gfp, node, caller); Loading Loading
include/linux/slab_def.h +7 −7 Original line number Diff line number Diff line Loading @@ -159,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_trace(size_t size, struct kmem_cache *cachep, extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, int nodeid); int nodeid, size_t size); #else static __always_inline void * kmem_cache_alloc_node_trace(size_t size, struct kmem_cache *cachep, kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, int nodeid) int nodeid, size_t size) { return kmem_cache_alloc_node(cachep, flags, nodeid); } Loading Loading @@ -200,7 +200,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size); } return __kmalloc_node(size, flags, node); } Loading
mm/slab.c +1 −1 Original line number Diff line number Diff line Loading @@ -3824,7 +3824,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, { void *ret; ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP); ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, cachep->size, Loading
mm/slob.c +1 −1 Original line number Diff line number Diff line Loading @@ -482,7 +482,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) } #ifdef CONFIG_NUMA void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, int node, unsigned long caller) { return __do_kmalloc_node(size, gfp, node, caller); Loading