Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4178cdd authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slab/common-for-cgroups' into slab/for-linus



Fix up a trivial conflict with NUMA_NO_NODE cleanups.

Conflicts:
	mm/slob.c

Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parents 023dc704 f28510d3
Loading
Loading
Loading
Loading
+103 −150
Original line number Diff line number Diff line
@@ -570,9 +570,9 @@ static struct arraycache_init initarray_generic =
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };

/* internal cache of cache description objs */
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache cache_cache = {
	.nodelists = cache_cache_nodelists,
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache kmem_cache_boot = {
	.nodelists = kmem_cache_nodelists,
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
@@ -795,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}

#if DEBUG
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)

static void __slab_error(const char *function, struct kmem_cache *cachep,
@@ -805,6 +806,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
	dump_stack();
	add_taint(TAINT_BAD_PAGE);
}
#endif

/*
 * By default on NUMA we use alien caches to stage the freeing of
@@ -1587,15 +1589,17 @@ void __init kmem_cache_init(void)
	int order;
	int node;

	kmem_cache = &kmem_cache_boot;

	if (num_possible_nodes() == 1)
		use_alien_caches = 0;

	for (i = 0; i < NUM_INIT_LISTS; i++) {
		kmem_list3_init(&initkmem_list3[i]);
		if (i < MAX_NUMNODES)
			cache_cache.nodelists[i] = NULL;
			kmem_cache->nodelists[i] = NULL;
	}
	set_up_list3s(&cache_cache, CACHE_CACHE);
	set_up_list3s(kmem_cache, CACHE_CACHE);

	/*
	 * Fragmentation resistance on low memory - only use bigger
@@ -1607,9 +1611,9 @@ void __init kmem_cache_init(void)

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
	 * 1) initialize the cache_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except cache_cache itself:
	 *    cache_cache is statically allocated.
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
	 *    Initially an __init data area is used for the head array and the
	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
	 *    array at the end of the bootstrap.
@@ -1618,43 +1622,43 @@ void __init kmem_cache_init(void)
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
	 * 4) Replace the __init data head arrays for cache_cache and the first
	 * 4) Replace the __init data head arrays for kmem_cache and the first
	 *    kmalloc cache with kmalloc allocated arrays.
	 * 5) Replace the __init data for kmem_list3 for cache_cache and
	 * 5) Replace the __init data for kmem_list3 for kmem_cache and
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
	 */

	node = numa_mem_id();

	/* 1) create the cache_cache */
	/* 1) create the kmem_cache */
	INIT_LIST_HEAD(&slab_caches);
	list_add(&cache_cache.list, &slab_caches);
	cache_cache.colour_off = cache_line_size();
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
	list_add(&kmem_cache->list, &slab_caches);
	kmem_cache->colour_off = cache_line_size();
	kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
	kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];

	/*
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
	 */
	cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
	kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
				  nr_node_ids * sizeof(struct kmem_list3 *);
	cache_cache.object_size = cache_cache.size;
	cache_cache.size = ALIGN(cache_cache.size,
	kmem_cache->object_size = kmem_cache->size;
	kmem_cache->size = ALIGN(kmem_cache->object_size,
					cache_line_size());
	cache_cache.reciprocal_buffer_size =
		reciprocal_value(cache_cache.size);
	kmem_cache->reciprocal_buffer_size =
		reciprocal_value(kmem_cache->size);

	for (order = 0; order < MAX_ORDER; order++) {
		cache_estimate(order, cache_cache.size,
			cache_line_size(), 0, &left_over, &cache_cache.num);
		if (cache_cache.num)
		cache_estimate(order, kmem_cache->size,
			cache_line_size(), 0, &left_over, &kmem_cache->num);
		if (kmem_cache->num)
			break;
	}
	BUG_ON(!cache_cache.num);
	cache_cache.gfporder = order;
	cache_cache.colour = left_over / cache_cache.colour_off;
	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
	BUG_ON(!kmem_cache->num);
	kmem_cache->gfporder = order;
	kmem_cache->colour = left_over / kmem_cache->colour_off;
	kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
				      sizeof(struct slab), cache_line_size());

	/* 2+3) create the kmalloc caches */
@@ -1667,19 +1671,22 @@ void __init kmem_cache_init(void)
	 * bug.
	 */

	sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
					sizes[INDEX_AC].cs_size,
					ARCH_KMALLOC_MINALIGN,
					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
					NULL);
	sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
	sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
	sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
	sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
	sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
	__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
	list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);

	if (INDEX_AC != INDEX_L3) {
		sizes[INDEX_L3].cs_cachep =
			__kmem_cache_create(names[INDEX_L3].name,
				sizes[INDEX_L3].cs_size,
				ARCH_KMALLOC_MINALIGN,
				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
				NULL);
		sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
		sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
		sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
		sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
		sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
		__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
		list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
	}

	slab_early_init = 0;
@@ -1693,20 +1700,23 @@ void __init kmem_cache_init(void)
		 * allow tighter packing of the smaller caches.
		 */
		if (!sizes->cs_cachep) {
			sizes->cs_cachep = __kmem_cache_create(names->name,
					sizes->cs_size,
					ARCH_KMALLOC_MINALIGN,
					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
					NULL);
			sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
			sizes->cs_cachep->name = names->name;
			sizes->cs_cachep->size = sizes->cs_size;
			sizes->cs_cachep->object_size = sizes->cs_size;
			sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
			__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
			list_add(&sizes->cs_cachep->list, &slab_caches);
		}
#ifdef CONFIG_ZONE_DMA
		sizes->cs_dmacachep = __kmem_cache_create(
					names->name_dma,
					sizes->cs_size,
					ARCH_KMALLOC_MINALIGN,
					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
						SLAB_PANIC,
					NULL);
		sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
		sizes->cs_dmacachep->name = names->name_dma;
		sizes->cs_dmacachep->size = sizes->cs_size;
		sizes->cs_dmacachep->object_size = sizes->cs_size;
		sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
		__kmem_cache_create(sizes->cs_dmacachep,
			       ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
		list_add(&sizes->cs_dmacachep->list, &slab_caches);
#endif
		sizes++;
		names++;
@@ -1717,15 +1727,15 @@ void __init kmem_cache_init(void)

		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);

		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
		memcpy(ptr, cpu_cache_get(&cache_cache),
		BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
		memcpy(ptr, cpu_cache_get(kmem_cache),
		       sizeof(struct arraycache_init));
		/*
		 * Do not assume that spinlocks can be initialized via memcpy:
		 */
		spin_lock_init(&ptr->lock);

		cache_cache.array[smp_processor_id()] = ptr;
		kmem_cache->array[smp_processor_id()] = ptr;

		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);

@@ -1746,7 +1756,7 @@ void __init kmem_cache_init(void)
		int nid;

		for_each_online_node(nid) {
			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
			init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);

			init_list(malloc_sizes[INDEX_AC].cs_cachep,
				  &initkmem_list3[SIZE_AC + nid], nid);
@@ -2195,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
	}
}

static void __kmem_cache_destroy(struct kmem_cache *cachep)
{
	int i;
	struct kmem_list3 *l3;

	for_each_online_cpu(i)
	    kfree(cachep->array[i]);

	/* NUMA: free the list3 structures */
	for_each_online_node(i) {
		l3 = cachep->nodelists[i];
		if (l3) {
			kfree(l3->shared);
			free_alien_cache(l3->alien);
			kfree(l3);
		}
	}
	kmem_cache_free(&cache_cache, cachep);
}


/**
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
@@ -2352,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 * Cannot be called within a int, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * @name must be valid until the cache is destroyed. This implies that
 * the module calling this has to destroy the cache before getting unloaded.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
@@ -2367,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
struct kmem_cache *
__kmem_cache_create (const char *name, size_t size, size_t align,
	unsigned long flags, void (*ctor)(void *))
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
	size_t left_over, slab_size, ralign;
	struct kmem_cache *cachep = NULL;
	gfp_t gfp;
	int err;
	size_t size = cachep->size;

#if DEBUG
#if FORCED_DEBUG
@@ -2445,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
		ralign = ARCH_SLAB_MINALIGN;
	}
	/* 3) caller mandated alignment */
	if (ralign < align) {
		ralign = align;
	if (ralign < cachep->align) {
		ralign = cachep->align;
	}
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
@@ -2454,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
	/*
	 * 4) Store it.
	 */
	align = ralign;
	cachep->align = ralign;

	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

	/* Get cache's description obj. */
	cachep = kmem_cache_zalloc(&cache_cache, gfp);
	if (!cachep)
		return NULL;

	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
	cachep->object_size = size;
	cachep->align = align;
#if DEBUG

	/*
@@ -2514,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
		 */
		flags |= CFLGS_OFF_SLAB;

	size = ALIGN(size, align);
	size = ALIGN(size, cachep->align);

	left_over = calculate_slab_order(cachep, size, align, flags);
	left_over = calculate_slab_order(cachep, size, cachep->align, flags);

	if (!cachep->num)
		return -E2BIG;

	if (!cachep->num) {
		printk(KERN_ERR
		       "kmem_cache_create: couldn't create cache %s.\n", name);
		kmem_cache_free(&cache_cache, cachep);
		return NULL;
	}
	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
			  + sizeof(struct slab), align);
			  + sizeof(struct slab), cachep->align);

	/*
	 * If the slab has been placed off-slab, and we have enough space then
@@ -2553,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,

	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < align)
		cachep->colour_off = align;
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
	cachep->colour = left_over / cachep->colour_off;
	cachep->slab_size = slab_size;
	cachep->flags = flags;
@@ -2575,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
		 */
		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
	}
	cachep->ctor = ctor;
	cachep->name = name;

	if (setup_cpu_cache(cachep, gfp)) {
		__kmem_cache_destroy(cachep);
		return NULL;
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
		__kmem_cache_shutdown(cachep);
		return err;
	}

	if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2593,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
		slab_set_debugobj_lock_classes(cachep);
	}

	/* cache setup completed, link it into the list */
	list_add(&cachep->list, &slab_caches);
	return cachep;
	return 0;
}

#if DEBUG
@@ -2754,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
}
EXPORT_SYMBOL(kmem_cache_shrink);

/**
 * kmem_cache_destroy - delete a cache
 * @cachep: the cache to destroy
 *
 * Remove a &struct kmem_cache object from the slab cache.
 *
 * It is expected this function will be called by a module when it is
 * unloaded.  This will remove the cache completely, and avoid a duplicate
 * cache being allocated each time a module is loaded and unloaded, if the
 * module doesn't have persistent in-kernel storage across loads and unloads.
 *
 * The cache must be empty before calling this function.
 *
 * The caller must guarantee that no one will allocate memory from the cache
 * during the kmem_cache_destroy().
 */
void kmem_cache_destroy(struct kmem_cache *cachep)
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
	BUG_ON(!cachep || in_interrupt());
	int i;
	struct kmem_list3 *l3;
	int rc = __cache_shrink(cachep);

	/* Find the cache in the chain of caches. */
	get_online_cpus();
	mutex_lock(&slab_mutex);
	/*
	 * the chain is never empty, cache_cache is never destroyed
	 */
	list_del(&cachep->list);
	if (__cache_shrink(cachep)) {
		slab_error(cachep, "Can't free all objects");
		list_add(&cachep->list, &slab_caches);
		mutex_unlock(&slab_mutex);
		put_online_cpus();
		return;
	}
	if (rc)
		return rc;

	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
		rcu_barrier();
	for_each_online_cpu(i)
	    kfree(cachep->array[i]);

	__kmem_cache_destroy(cachep);
	mutex_unlock(&slab_mutex);
	put_online_cpus();
	/* NUMA: free the list3 structures */
	for_each_online_node(i) {
		l3 = cachep->nodelists[i];
		if (l3) {
			kfree(l3->shared);
			free_alien_cache(l3->alien);
			kfree(l3);
		}
	}
	return 0;
}
EXPORT_SYMBOL(kmem_cache_destroy);

/*
 * Get the memory for a slab management obj.
@@ -3330,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,

static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{
	if (cachep == &cache_cache)
	if (cachep == kmem_cache)
		return false;

	return should_failslab(cachep->object_size, flags, cachep->flags);
+18 −1
Original line number Diff line number Diff line
@@ -25,9 +25,26 @@ extern enum slab_state slab_state;

/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;

/* The list of all slab caches on the system */
extern struct list_head slab_caches;

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);

#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif


int __kmem_cache_shutdown(struct kmem_cache *);

#endif
+76 −4
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;

#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
@@ -98,21 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
		unsigned long flags, void (*ctor)(void *))
{
	struct kmem_cache *s = NULL;
	int err = 0;

	get_online_cpus();
	mutex_lock(&slab_mutex);
	if (kmem_cache_sanity_check(name, size) == 0)
		s = __kmem_cache_create(name, size, align, flags, ctor);

	if (!kmem_cache_sanity_check(name, size) == 0)
		goto out_locked;


	s = __kmem_cache_alias(name, size, align, flags, ctor);
	if (s)
		goto out_locked;

	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
	if (s) {
		s->object_size = s->size = size;
		s->align = align;
		s->ctor = ctor;
		s->name = kstrdup(name, GFP_KERNEL);
		if (!s->name) {
			kmem_cache_free(kmem_cache, s);
			err = -ENOMEM;
			goto out_locked;
		}

		err = __kmem_cache_create(s, flags);
		if (!err) {

			s->refcount = 1;
			list_add(&s->list, &slab_caches);

		} else {
			kfree(s->name);
			kmem_cache_free(kmem_cache, s);
		}
	} else
		err = -ENOMEM;

out_locked:
	mutex_unlock(&slab_mutex);
	put_online_cpus();

	if (!s && (flags & SLAB_PANIC))
		panic("kmem_cache_create: Failed to create slab '%s'\n", name);
	if (err) {

		if (flags & SLAB_PANIC)
			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
				name, err);
		else {
			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
				name, err);
			dump_stack();
		}

		return NULL;
	}

	return s;
}
EXPORT_SYMBOL(kmem_cache_create);

void kmem_cache_destroy(struct kmem_cache *s)
{
	get_online_cpus();
	mutex_lock(&slab_mutex);
	s->refcount--;
	if (!s->refcount) {
		list_del(&s->list);

		if (!__kmem_cache_shutdown(s)) {
			if (s->flags & SLAB_DESTROY_BY_RCU)
				rcu_barrier();

			kfree(s->name);
			kmem_cache_free(kmem_cache, s);
		} else {
			list_add(&s->list, &slab_caches);
			printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
				s->name);
			dump_stack();
		}
	}
	mutex_unlock(&slab_mutex);
	put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);

int slab_is_available(void)
{
	return slab_state >= UP;
+27 −33
Original line number Diff line number Diff line
@@ -529,23 +529,15 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *))
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
	struct kmem_cache *c;
	size_t align = c->size;

	c = slob_alloc(sizeof(struct kmem_cache),
		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);

	if (c) {
		c->name = name;
		c->size = size;
	if (flags & SLAB_DESTROY_BY_RCU) {
		/* leave room for rcu footer at the end of object */
		c->size += sizeof(struct slob_rcu);
	}
	c->flags = flags;
		c->ctor = ctor;
	/* ignore alignment unless it's forced */
	c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
	if (c->align < ARCH_SLAB_MINALIGN)
@@ -553,20 +545,8 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
	if (c->align < align)
		c->align = align;

		kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
		c->refcount = 1;
	}
	return c;
}

void kmem_cache_destroy(struct kmem_cache *c)
{
	kmemleak_free(c);
	if (c->flags & SLAB_DESTROY_BY_RCU)
		rcu_barrier();
	slob_free(c, sizeof(struct kmem_cache));
	return 0;
}
EXPORT_SYMBOL(kmem_cache_destroy);

void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
@@ -634,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);

int __kmem_cache_shutdown(struct kmem_cache *c)
{
	/* No way to check for remaining objects */
	return 0;
}

int kmem_cache_shrink(struct kmem_cache *d)
{
	return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);

struct kmem_cache kmem_cache_boot = {
	.name = "kmem_cache",
	.size = sizeof(struct kmem_cache),
	.flags = SLAB_PANIC,
	.align = ARCH_KMALLOC_MINALIGN,
};

void __init kmem_cache_init(void)
{
	kmem_cache = &kmem_cache_boot;
	slab_state = UP;
}

+56 −89
Original line number Diff line number Diff line
@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
							{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
	kfree(s->name);
	kfree(s);
}
static inline void sysfs_slab_remove(struct kmem_cache *s) { }

#endif

@@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
	print_trailer(s, page, object);
}

static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
{
	va_list args;
	char buf[100];
@@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)

	page = virt_to_head_page(x);

	if (kmem_cache_debug(s) && page->slab != s) {
		pr_err("kmem_cache_free: Wrong slab cache. %s but object"
			" is from  %s\n", page->slab->name, s->name);
		WARN_ON_ONCE(1);
		return;
	}

	slab_free(s, page, x, _RET_IP_);

	trace_kmem_cache_free(_RET_IP_, x);
@@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)

}

static int kmem_cache_open(struct kmem_cache *s,
		const char *name, size_t size,
		size_t align, unsigned long flags,
		void (*ctor)(void *))
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
{
	memset(s, 0, kmem_size);
	s->name = name;
	s->ctor = ctor;
	s->object_size = size;
	s->align = align;
	s->flags = kmem_cache_flags(size, flags, name, ctor);
	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
	s->reserved = 0;

	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
	else
		s->cpu_partial = 30;

	s->refcount = 1;
#ifdef CONFIG_NUMA
	s->remote_node_defrag_ratio = 1000;
#endif
@@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
		goto error;

	if (alloc_kmem_cache_cpus(s))
		return 1;
		return 0;

	free_kmem_cache_nodes(s);
error:
	if (flags & SLAB_PANIC)
		panic("Cannot create slab %s size=%lu realsize=%u "
			"order=%u offset=%u flags=%lx\n",
			s->name, (unsigned long)size, s->size, oo_order(s->oo),
			s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
			s->offset, flags);
	return 0;
	return -EINVAL;
}

/*
@@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
				     sizeof(long), GFP_ATOMIC);
	if (!map)
		return;
	slab_err(s, page, "%s", text);
	slab_err(s, page, text, s->name);
	slab_lock(page);

	get_map(s, page, map);
@@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
			discard_slab(s, page);
		} else {
			list_slab_objects(s, page,
				"Objects remaining on kmem_cache_close()");
			"Objects remaining in %s on kmem_cache_close()");
		}
	}
}
@@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
	int node;

	flush_all(s);
	free_percpu(s->cpu_slab);
	/* Attempt to free all objects */
	for_each_node_state(node, N_NORMAL_MEMORY) {
		struct kmem_cache_node *n = get_node(s, node);
@@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
		if (n->nr_partial || slabs_node(s, node))
			return 1;
	}
	free_percpu(s->cpu_slab);
	free_kmem_cache_nodes(s);
	return 0;
}

/*
 * Close a cache and release the kmem_cache structure
 * (must be used for caches created using kmem_cache_create)
 */
void kmem_cache_destroy(struct kmem_cache *s)
int __kmem_cache_shutdown(struct kmem_cache *s)
{
	mutex_lock(&slab_mutex);
	s->refcount--;
	if (!s->refcount) {
		list_del(&s->list);
		mutex_unlock(&slab_mutex);
		if (kmem_cache_close(s)) {
			printk(KERN_ERR "SLUB %s: %s called for cache that "
				"still has objects.\n", s->name, __func__);
			dump_stack();
		}
		if (s->flags & SLAB_DESTROY_BY_RCU)
			rcu_barrier();
	int rc = kmem_cache_close(s);

	if (!rc)
		sysfs_slab_remove(s);
	} else
		mutex_unlock(&slab_mutex);

	return rc;
}
EXPORT_SYMBOL(kmem_cache_destroy);

/********************************************************************
 *		Kmalloc subsystem
@@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches);

static struct kmem_cache *kmem_cache;

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
@@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
{
	struct kmem_cache *s;

	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
	s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);

	s->name = name;
	s->size = s->object_size = size;
	s->align = ARCH_KMALLOC_MINALIGN;

	/*
	 * This function is called with IRQs disabled during early-boot on
	 * single CPU so there's no need to take slab_mutex here.
	 */
	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
								flags, NULL))
	if (kmem_cache_open(s, flags))
		goto panic;

	list_add(&s->list, &slab_caches);
@@ -3739,7 +3720,7 @@ void __init kmem_cache_init(void)
	/* Allocate two kmem_caches from the page allocator */
	kmalloc_size = ALIGN(kmem_size, cache_line_size());
	order = get_order(2 * kmalloc_size);
	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);

	/*
	 * Must first have the slab cache available for the allocations of the
@@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
	 */
	kmem_cache_node = (void *)kmem_cache + kmalloc_size;

	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
		sizeof(struct kmem_cache_node),
		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
	kmem_cache_node->name = "kmem_cache_node";
	kmem_cache_node->size = kmem_cache_node->object_size =
		sizeof(struct kmem_cache_node);
	kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);

	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);

@@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
	slab_state = PARTIAL;

	temp_kmem_cache = kmem_cache;
	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
	kmem_cache->name = "kmem_cache";
	kmem_cache->size = kmem_cache->object_size = kmem_size;
	kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);

	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
	memcpy(kmem_cache, temp_kmem_cache, kmem_size);

@@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
	return NULL;
}

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
		size_t align, unsigned long flags, void (*ctor)(void *))
{
	struct kmem_cache *s;
	char *n;

	s = find_mergeable(size, align, flags, name, ctor);
	if (s) {
@@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,

		if (sysfs_slab_alias(s, name)) {
			s->refcount--;
			return NULL;
			s = NULL;
		}
	}

	return s;
}

	n = kstrdup(name, GFP_KERNEL);
	if (!n)
		return NULL;
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
	int err;

	s = kmalloc(kmem_size, GFP_KERNEL);
	if (s) {
		if (kmem_cache_open(s, n,
				size, align, flags, ctor)) {
			int r;
	err = kmem_cache_open(s, flags);
	if (err)
		return err;

			list_add(&s->list, &slab_caches);
	mutex_unlock(&slab_mutex);
			r = sysfs_slab_add(s);
	err = sysfs_slab_add(s);
	mutex_lock(&slab_mutex);

			if (!r)
				return s;

			list_del(&s->list);
	if (err)
		kmem_cache_close(s);
		}
		kfree(s);
	}
	kfree(n);
	return NULL;

	return err;
}

#ifdef CONFIG_SMP
@@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
	return err;
}

static void kmem_cache_release(struct kobject *kobj)
{
	struct kmem_cache *s = to_slab(kobj);

	kfree(s->name);
	kfree(s);
}

static const struct sysfs_ops slab_sysfs_ops = {
	.show = slab_attr_show,
	.store = slab_attr_store,
@@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {

static struct kobj_type slab_ktype = {
	.sysfs_ops = &slab_sysfs_ops,
	.release = kmem_cache_release
};

static int uevent_filter(struct kset *kset, struct kobject *kobj)