Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d50112ed authored by Alexey Dobriyan's avatar Alexey Dobriyan Committed by Linus Torvalds
Browse files

slab, slub, slob: add slab_flags_t

Add sparse-checked slab_flags_t for struct kmem_cache::flags (SLAB_POISON,
etc).

SLAB is bloated temporarily by switching to "unsigned long", but only
temporarily.

Link: http://lkml.kernel.org/r/20171021100225.GA22428@avx2


Signed-off-by: default avatarAlexey Dobriyan <adobriyan@gmail.com>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a3ba0744
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -660,7 +660,7 @@ static struct ecryptfs_cache_info {
	struct kmem_cache **cache;
	const char *name;
	size_t size;
	unsigned long flags;
	slab_flags_t flags;
	void (*ctor)(void *obj);
} ecryptfs_cache_infos[] = {
	{
+1 −1
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@ kmem_zone_init(int size, char *zone_name)
}

static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
		     void (*construct)(void *))
{
	return kmem_cache_create(zone_name, size, 0, flags, construct);
+2 −2
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);

void kasan_cache_create(struct kmem_cache *cache, size_t *size,
			unsigned long *flags);
			slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);

@@ -95,7 +95,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}

static inline void kasan_cache_create(struct kmem_cache *cache,
				      size_t *size,
				      unsigned long *flags) {}
				      slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}

+4 −4
Original line number Diff line number Diff line
@@ -48,14 +48,14 @@ extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;

static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
					    int min_count, unsigned long flags,
					    int min_count, slab_flags_t flags,
					    gfp_t gfp)
{
	if (!(flags & SLAB_NOLEAKTRACE))
		kmemleak_alloc(ptr, size, min_count, gfp);
}

static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
	if (!(flags & SLAB_NOLEAKTRACE))
		kmemleak_free(ptr);
@@ -76,7 +76,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
{
}
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
					    int min_count, unsigned long flags,
					    int min_count, slab_flags_t flags,
					    gfp_t gfp)
{
}
@@ -94,7 +94,7 @@ static inline void kmemleak_free(const void *ptr)
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
+37 −23
Original line number Diff line number Diff line
@@ -21,13 +21,20 @@
 * Flags to pass to kmem_cache_create().
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
 */
#define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100UL)
/* DEBUG: Red zone objs in a cache */
#define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400UL)
/* DEBUG: Poison objects */
#define SLAB_POISON		((slab_flags_t __force)0x00000800UL)
/* Align objs on cache lines */
#define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000UL)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000UL)
/* DEBUG: Store the last owner for bug hunting */
#define SLAB_STORE_USER		((slab_flags_t __force)0x00010000UL)
/* Panic if kmem_cache_create() fails */
#define SLAB_PANIC		((slab_flags_t __force)0x00040000UL)
/*
 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
 *
@@ -65,44 +72,51 @@
 *
 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
 */
#define SLAB_TYPESAFE_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
/* Defer freeing slabs to RCU */
#define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000UL)
/* Spread some memory over cpuset */
#define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000UL)
/* Trace allocations and frees */
#define SLAB_TRACE		((slab_flags_t __force)0x00200000UL)

/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
# define SLAB_DEBUG_OBJECTS	0x00400000UL
# define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000UL)
#else
# define SLAB_DEBUG_OBJECTS	0x00000000UL
# define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00000000UL)
#endif

#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
/* Avoid kmemleak tracing */
#define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000UL)

/* Don't track use of uninitialized memory */
#ifdef CONFIG_KMEMCHECK
# define SLAB_NOTRACK		0x01000000UL
# define SLAB_NOTRACK		((slab_flags_t __force)0x01000000UL)
#else
# define SLAB_NOTRACK		0x00000000UL
# define SLAB_NOTRACK		((slab_flags_t __force)0x00000000UL)
#endif
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
# define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000UL)
#else
# define SLAB_FAILSLAB		0x00000000UL
# define SLAB_FAILSLAB		((slab_flags_t __force)0x00000000UL)
#endif
/* Account to memcg */
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
# define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
# define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000UL)
#else
# define SLAB_ACCOUNT		0x00000000UL
# define SLAB_ACCOUNT		((slab_flags_t __force)0x00000000UL)
#endif

#ifdef CONFIG_KASAN
#define SLAB_KASAN		0x08000000UL
#define SLAB_KASAN		((slab_flags_t __force)0x08000000UL)
#else
#define SLAB_KASAN		0x00000000UL
#define SLAB_KASAN		((slab_flags_t __force)0x00000000UL)
#endif

/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000UL)
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
/*
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +142,7 @@ void __init kmem_cache_init(void);
bool slab_is_available(void);

struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
			unsigned long,
			slab_flags_t,
			void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
Loading