Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 023dc704 authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slab/next' into slab/for-linus

parents a0d271cb 608da7e3
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
 * request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
	__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 * allocation request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
+3 −10
Original line number Diff line number Diff line
@@ -45,7 +45,6 @@ struct kmem_cache {
	unsigned int colour_off;	/* colour offset */
	struct kmem_cache *slabp_cache;
	unsigned int slab_size;
	unsigned int dflags;		/* dynamic flags */

	/* constructor func */
	void (*ctor)(void *obj);
@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size,
				    struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
#else
static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
	return kmem_cache_alloc(cachep, flags);
}
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
	return 0;
}
#endif

static __always_inline void *kmalloc(size_t size, gfp_t flags)
@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
			cachep = malloc_sizes[i].cs_cachep;

		ret = kmem_cache_alloc_trace(size, cachep, flags);
		ret = kmem_cache_alloc_trace(cachep, flags, size);

		return ret;
	}
+4 −2
Original line number Diff line number Diff line
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H

#include <linux/numa.h>

void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
					      gfp_t flags)
{
	return kmem_cache_alloc_node(cachep, flags, -1);
	return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
}

void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	return __kmalloc_node(size, flags, -1);
	return __kmalloc_node(size, flags, NUMA_NO_NODE);
}

static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+40 −55
Original line number Diff line number Diff line
@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)

#endif

#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
	return cachep->size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
@@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;

static inline struct kmem_cache *page_get_cache(struct page *page)
{
	page = compound_head(page);
	BUG_ON(!PageSlab(page));
	return page->slab_cache;
}

static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_head_page(obj);
@@ -818,6 +803,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
	       function, cachep->name, msg);
	dump_stack();
	add_taint(TAINT_BAD_PAGE);
}

/*
@@ -1781,9 +1767,6 @@ void __init kmem_cache_init_late(void)

	slab_state = UP;

	/* Annotate slab for lockdep -- annotate the malloc caches */
	init_lock_keys();

	/* 6) resize the head arrays to their final sizes */
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
@@ -1791,6 +1774,9 @@ void __init kmem_cache_init_late(void)
			BUG();
	mutex_unlock(&slab_mutex);

	/* Annotate slab for lockdep -- annotate the malloc caches */
	init_lock_keys();

	/* Done! */
	slab_state = FULL;

@@ -2506,8 +2492,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
	    && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
	    && cachep->object_size > cache_line_size()
	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
		size = PAGE_SIZE;
	}
#endif
@@ -3098,7 +3085,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
}

static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
				   void *caller)
				   unsigned long caller)
{
	struct page *page;
	unsigned int objnr;
@@ -3118,7 +3105,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = (void *)caller;

	objnr = obj_to_index(cachep, slabp, objp);

@@ -3131,7 +3118,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
			store_stackinfo(cachep, objp, (unsigned long)caller);
			store_stackinfo(cachep, objp, caller);
			kernel_map_pages(virt_to_page(objp),
					 cachep->size / PAGE_SIZE, 0);
		} else {
@@ -3285,7 +3272,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,

#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
				gfp_t flags, void *objp, void *caller)
				gfp_t flags, void *objp, unsigned long caller)
{
	if (!objp)
		return objp;
@@ -3302,7 +3289,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = (void *)caller;

	if (cachep->flags & SLAB_RED_ZONE) {
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
@@ -3576,8 +3563,8 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
		   void *caller)
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
		   unsigned long caller)
{
	unsigned long save_flags;
	void *ptr;
@@ -3663,7 +3650,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */

static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{
	unsigned long save_flags;
	void *objp;
@@ -3799,7 +3786,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
 * be in this state _before_ it is released.  Called with disabled ints.
 */
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
    void *caller)
				unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);

@@ -3839,7 +3826,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
 */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
	void *ret = slab_alloc(cachep, flags, _RET_IP_);

	trace_kmem_cache_alloc(_RET_IP_, ret,
			       cachep->object_size, cachep->size, flags);
@@ -3850,14 +3837,14 @@ EXPORT_SYMBOL(kmem_cache_alloc);

#ifdef CONFIG_TRACING
void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
	void *ret;

	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
	ret = slab_alloc(cachep, flags, _RET_IP_);

	trace_kmalloc(_RET_IP_, ret,
		      size, slab_buffer_size(cachep), flags);
		      size, cachep->size, flags);
	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -3866,8 +3853,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
	void *ret = __cache_alloc_node(cachep, flags, nodeid,
				       __builtin_return_address(0));
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);

	trace_kmem_cache_alloc_node(_RET_IP_, ret,
				    cachep->object_size, cachep->size,
@@ -3878,17 +3864,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node);

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(size_t size,
				  struct kmem_cache *cachep,
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
				  gfp_t flags,
				  int nodeid)
				  int nodeid,
				  size_t size)
{
	void *ret;

	ret = __cache_alloc_node(cachep, flags, nodeid,
				  __builtin_return_address(0));
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP);

	trace_kmalloc_node(_RET_IP_, ret,
			   size, slab_buffer_size(cachep),
			   size, cachep->size,
			   flags, nodeid);
	return ret;
}
@@ -3896,34 +3882,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif

static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
	struct kmem_cache *cachep;

	cachep = kmem_find_general_cachep(size, flags);
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
	return kmem_cache_alloc_node_trace(size, cachep, flags, node);
	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
}

#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __do_kmalloc_node(size, flags, node,
			__builtin_return_address(0));
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
		int node, unsigned long caller)
{
	return __do_kmalloc_node(size, flags, node, (void *)caller);
	return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __do_kmalloc_node(size, flags, node, NULL);
	return __do_kmalloc_node(size, flags, node, 0);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
@@ -3936,7 +3921,7 @@ EXPORT_SYMBOL(__kmalloc_node);
 * @caller: function caller for debug tracking of the caller
 */
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
					  void *caller)
					  unsigned long caller)
{
	struct kmem_cache *cachep;
	void *ret;
@@ -3949,9 +3934,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
	cachep = __find_general_cachep(size, flags);
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
	ret = __cache_alloc(cachep, flags, caller);
	ret = slab_alloc(cachep, flags, caller);

	trace_kmalloc((unsigned long) caller, ret,
	trace_kmalloc(caller, ret,
		      size, cachep->size, flags);

	return ret;
@@ -3961,20 +3946,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
	return __do_kmalloc(size, flags, __builtin_return_address(0));
	return __do_kmalloc(size, flags, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);

void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{
	return __do_kmalloc(size, flags, (void *)caller);
	return __do_kmalloc(size, flags, caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);

#else
void *__kmalloc(size_t size, gfp_t flags)
{
	return __do_kmalloc(size, flags, NULL);
	return __do_kmalloc(size, flags, 0);
}
EXPORT_SYMBOL(__kmalloc);
#endif
@@ -3995,7 +3980,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
	debug_check_no_locks_freed(objp, cachep->object_size);
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
		debug_check_no_obj_freed(objp, cachep->object_size);
	__cache_free(cachep, objp, __builtin_return_address(0));
	__cache_free(cachep, objp, _RET_IP_);
	local_irq_restore(flags);

	trace_kmem_cache_free(_RET_IP_, objp);
@@ -4026,7 +4011,7 @@ void kfree(const void *objp)
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
	__cache_free(c, (void *)objp, __builtin_return_address(0));
	__cache_free(c, (void *)objp, _RET_IP_);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);
+48 −49
Original line number Diff line number Diff line
@@ -23,6 +23,52 @@ enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);

#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
	struct kmem_cache *s = NULL;

	if (!name || in_interrupt() || size < sizeof(void *) ||
		size > KMALLOC_MAX_SIZE) {
		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
		return -EINVAL;
	}

	list_for_each_entry(s, &slab_caches, list) {
		char tmp;
		int res;

		/*
		 * This happens when the module gets unloaded and doesn't
		 * destroy its slab cache and no-one else reuses the vmalloc
		 * area of the module.  Print a warning.
		 */
		res = probe_kernel_address(s->name, tmp);
		if (res) {
			pr_err("Slab cache with size %d has lost its name\n",
			       s->object_size);
			continue;
		}

		if (!strcmp(s->name, name)) {
			pr_err("%s (%s): Cache name already exists.\n",
			       __func__, name);
			dump_stack();
			s = NULL;
			return -EINVAL;
		}
	}

	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
	return 0;
}
#else
static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
	return 0;
}
#endif

/*
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -53,60 +99,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
{
	struct kmem_cache *s = NULL;

#ifdef CONFIG_DEBUG_VM
	if (!name || in_interrupt() || size < sizeof(void *) ||
		size > KMALLOC_MAX_SIZE) {
		printk(KERN_ERR "kmem_cache_create(%s) integrity check"
			" failed\n", name);
		goto out;
	}
#endif

	get_online_cpus();
	mutex_lock(&slab_mutex);

#ifdef CONFIG_DEBUG_VM
	list_for_each_entry(s, &slab_caches, list) {
		char tmp;
		int res;

		/*
		 * This happens when the module gets unloaded and doesn't
		 * destroy its slab cache and no-one else reuses the vmalloc
		 * area of the module.  Print a warning.
		 */
		res = probe_kernel_address(s->name, tmp);
		if (res) {
			printk(KERN_ERR
			       "Slab cache with size %d has lost its name\n",
			       s->object_size);
			continue;
		}

		if (!strcmp(s->name, name)) {
			printk(KERN_ERR "kmem_cache_create(%s): Cache name"
				" already exists.\n",
				name);
			dump_stack();
			s = NULL;
			goto oops;
		}
	}

	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
#endif

	if (kmem_cache_sanity_check(name, size) == 0)
		s = __kmem_cache_create(name, size, align, flags, ctor);

#ifdef CONFIG_DEBUG_VM
oops:
#endif
	mutex_unlock(&slab_mutex);
	put_online_cpus();

#ifdef CONFIG_DEBUG_VM
out:
#endif
	if (!s && (flags & SLAB_PANIC))
		panic("kmem_cache_create: Failed to create slab '%s'\n", name);

Loading