Loading Documentation/vm/slub.txt +1 −0 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ Possible debug options are P Poisoning (object and padding) U User tracking (free and alloc) T Trace (please only use on single slabs) A Toggle failslab filter mark for the cache O Switch debugging off for caches that would have caused higher minimum slab orders - Switch all debugging off (useful if the kernel is Loading include/linux/fault-inject.h +3 −2 Original line number Diff line number Diff line Loading @@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ #ifdef CONFIG_FAILSLAB extern bool should_failslab(size_t size, gfp_t gfpflags); extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); #else static inline bool should_failslab(size_t size, gfp_t gfpflags) static inline bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags) { return false; } Loading include/linux/slab.h +5 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,11 @@ #else # define SLAB_NOTRACK 0x00000000UL #endif #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ #else # define SLAB_FAILSLAB 0x00000000UL #endif /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ Loading include/linux/slub_def.h +12 −15 Original line number Diff line number Diff line Loading @@ -38,8 +38,6 @@ struct kmem_cache_cpu { void **freelist; /* Pointer to first free per cpu object */ struct page *page; /* The slab from which we are allocating */ int node; /* The node of the page (or -1 for debug) */ unsigned int offset; /* Freepointer offset (in word units) */ unsigned int objsize; /* Size of an object (from kmem_cache) */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif Loading Loading @@ -69,6 +67,7 @@ struct kmem_cache_order_objects { * Slab cache management. */ struct kmem_cache { struct kmem_cache_cpu *cpu_slab; /* Used for retriving partial slabs etc */ unsigned long flags; int size; /* The size of an object including meta data */ Loading Loading @@ -104,11 +103,6 @@ struct kmem_cache { int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; #endif #ifdef CONFIG_SMP struct kmem_cache_cpu *cpu_slab[NR_CPUS]; #else struct kmem_cache_cpu cpu_slab; #endif }; /* Loading @@ -135,11 +129,21 @@ struct kmem_cache { #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA /* Reserve extra caches for potential DMA use */ #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 #define KMALLOC_CACHES SLUB_PAGE_SHIFT #endif /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) return &kmalloc_caches[index]; } #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 #endif void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); Loading mm/failslab.c +15 −3 Original line number Diff line number Diff line #include <linux/fault-inject.h> #include <linux/gfp.h> #include <linux/slab.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; int cache_filter; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_wait_file; struct dentry *cache_filter_file; #endif } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .cache_filter = 0, }; bool should_failslab(size_t size, gfp_t gfpflags) bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) { if (gfpflags & __GFP_NOFAIL) return false; Loading @@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags) if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) return false; return should_fail(&failslab.attr, size); } Loading @@ -30,7 +37,6 @@ static int __init setup_failslab(char *str) __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; Loading @@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void) debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait); if (!failslab.ignore_gfp_wait_file) { failslab.cache_filter_file = debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter); if (!failslab.ignore_gfp_wait_file || !failslab.cache_filter_file) { err = -ENOMEM; debugfs_remove(failslab.cache_filter_file); debugfs_remove(failslab.ignore_gfp_wait_file); cleanup_fault_attr_dentries(&failslab.attr); } Loading Loading
Documentation/vm/slub.txt +1 −0 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ Possible debug options are P Poisoning (object and padding) U User tracking (free and alloc) T Trace (please only use on single slabs) A Toggle failslab filter mark for the cache O Switch debugging off for caches that would have caused higher minimum slab orders - Switch all debugging off (useful if the kernel is Loading
include/linux/fault-inject.h +3 −2 Original line number Diff line number Diff line Loading @@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ #ifdef CONFIG_FAILSLAB extern bool should_failslab(size_t size, gfp_t gfpflags); extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); #else static inline bool should_failslab(size_t size, gfp_t gfpflags) static inline bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags) { return false; } Loading
include/linux/slab.h +5 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,11 @@ #else # define SLAB_NOTRACK 0x00000000UL #endif #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ #else # define SLAB_FAILSLAB 0x00000000UL #endif /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ Loading
include/linux/slub_def.h +12 −15 Original line number Diff line number Diff line Loading @@ -38,8 +38,6 @@ struct kmem_cache_cpu { void **freelist; /* Pointer to first free per cpu object */ struct page *page; /* The slab from which we are allocating */ int node; /* The node of the page (or -1 for debug) */ unsigned int offset; /* Freepointer offset (in word units) */ unsigned int objsize; /* Size of an object (from kmem_cache) */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif Loading Loading @@ -69,6 +67,7 @@ struct kmem_cache_order_objects { * Slab cache management. */ struct kmem_cache { struct kmem_cache_cpu *cpu_slab; /* Used for retriving partial slabs etc */ unsigned long flags; int size; /* The size of an object including meta data */ Loading Loading @@ -104,11 +103,6 @@ struct kmem_cache { int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; #endif #ifdef CONFIG_SMP struct kmem_cache_cpu *cpu_slab[NR_CPUS]; #else struct kmem_cache_cpu cpu_slab; #endif }; /* Loading @@ -135,11 +129,21 @@ struct kmem_cache { #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA /* Reserve extra caches for potential DMA use */ #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 #define KMALLOC_CACHES SLUB_PAGE_SHIFT #endif /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) return &kmalloc_caches[index]; } #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 #endif void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); Loading
mm/failslab.c +15 −3 Original line number Diff line number Diff line #include <linux/fault-inject.h> #include <linux/gfp.h> #include <linux/slab.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; int cache_filter; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_wait_file; struct dentry *cache_filter_file; #endif } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .cache_filter = 0, }; bool should_failslab(size_t size, gfp_t gfpflags) bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) { if (gfpflags & __GFP_NOFAIL) return false; Loading @@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags) if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) return false; return should_fail(&failslab.attr, size); } Loading @@ -30,7 +37,6 @@ static int __init setup_failslab(char *str) __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; Loading @@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void) debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait); if (!failslab.ignore_gfp_wait_file) { failslab.cache_filter_file = debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter); if (!failslab.ignore_gfp_wait_file || !failslab.cache_filter_file) { err = -ENOMEM; debugfs_remove(failslab.cache_filter_file); debugfs_remove(failslab.ignore_gfp_wait_file); cleanup_fault_attr_dentries(&failslab.attr); } Loading