Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 125b79d7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SLAB changes from Pekka Enberg:
 "New and noteworthy:

  * More SLAB allocator unification patches from Christoph Lameter and
    others.  This paves the way for slab memcg patches that hopefully
    will land in v3.8.

  * SLAB tracing improvements from Ezequiel Garcia.

  * Kernel tainting upon SLAB corruption from Dave Jones.

  * Miscellanous SLAB allocator bug fixes and improvements from various
    people."

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits)
  slab: Fix build failure in __kmem_cache_create()
  slub: init_kmem_cache_cpus() and put_cpu_partial() can be static
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration"
  mm, slob: fix build breakage in __kmalloc_node_track_caller
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  mm/slab: Fix typo _RET_IP -> _RET_IP_
  mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
  mm, slab: Rename __cache_alloc() -> slab_alloc()
  mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype
  mm, slab: Replace 'caller' type, void* -> unsigned long
  mm, slob: Add support for kmalloc_track_caller()
  mm, slab: Remove silly function slab_buffer_size()
  mm, slob: Use NUMA_NO_NODE instead of -1
  mm, sl[au]b: Taint kernel when we detect a corrupted slab
  slab: Only define slab_error for DEBUG
  slab: fix the DEADLOCK issue on l3 alien lock
  slub: Zero initial memory segment for kmem_cache and kmem_cache_node
  Revert "mm/sl[aou]b: Move sysfs_slab_add to common"
  mm/sl[aou]b: Move kmem_cache refcounting to common code
  ...
parents f1c6872e e2087be3
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
 * request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
	__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 * allocation request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
+10 −17
Original line number Diff line number Diff line
@@ -45,7 +45,6 @@ struct kmem_cache {
	unsigned int colour_off;	/* colour offset */
	struct kmem_cache *slabp_cache;
	unsigned int slab_size;
	unsigned int dflags;		/* dynamic flags */

	/* constructor func */
	void (*ctor)(void *obj);
@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size,
				    struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
#else
static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
	return kmem_cache_alloc(cachep, flags);
}
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
	return 0;
}
#endif

static __always_inline void *kmalloc(size_t size, gfp_t flags)
@@ -154,7 +147,7 @@ found:
#endif
			cachep = malloc_sizes[i].cs_cachep;

		ret = kmem_cache_alloc_trace(size, cachep, flags);
		ret = kmem_cache_alloc_trace(cachep, flags, size);

		return ret;
	}
@@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_trace(size_t size,
					 struct kmem_cache *cachep,
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
					 gfp_t flags,
					 int nodeid);
					 int nodeid,
					 size_t size);
#else
static __always_inline void *
kmem_cache_alloc_node_trace(size_t size,
			    struct kmem_cache *cachep,
kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
			    gfp_t flags,
			    int nodeid)
			    int nodeid,
			    size_t size)
{
	return kmem_cache_alloc_node(cachep, flags, nodeid);
}
@@ -207,7 +200,7 @@ found:
#endif
			cachep = malloc_sizes[i].cs_cachep;

		return kmem_cache_alloc_node_trace(size, cachep, flags, node);
		return kmem_cache_alloc_node_trace(cachep, flags, node, size);
	}
	return __kmalloc_node(size, flags, node);
}
+4 −2
Original line number Diff line number Diff line
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H

#include <linux/numa.h>

void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
					      gfp_t flags)
{
	return kmem_cache_alloc_node(cachep, flags, -1);
	return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
}

void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	return __kmalloc_node(size, flags, -1);
	return __kmalloc_node(size, flags, NUMA_NO_NODE);
}

static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+143 −205

File changed.

Preview size limit exceeded, changes collapsed.

+18 −1
Original line number Diff line number Diff line
@@ -25,9 +25,26 @@ extern enum slab_state slab_state;

/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;

/* The list of all slab caches on the system */
extern struct list_head slab_caches;

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);

#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif


int __kmem_cache_shutdown(struct kmem_cache *);

#endif
Loading