Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12c3667f authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

mm/sl[aou]b: Get rid of __kmem_cache_destroy



What is done there can be done in __kmem_cache_shutdown.

This affects RCU handling somewhat. On rcu free all slab allocators do
not refer to other management structures than the kmem_cache structure.
Therefore these other structures can be freed before the rcu deferred
free to the page allocator occurs.

Reviewed-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 8f4c765c
Loading
Loading
Loading
Loading
+21 −25
Original line number Diff line number Diff line
@@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
	}
}

void __kmem_cache_destroy(struct kmem_cache *cachep)
{
	int i;
	struct kmem_list3 *l3;

	for_each_online_cpu(i)
	    kfree(cachep->array[i]);

	/* NUMA: free the list3 structures */
	for_each_online_node(i) {
		l3 = cachep->nodelists[i];
		if (l3) {
			kfree(l3->shared);
			free_alien_cache(l3->alien);
			kfree(l3);
		}
	}
}


/**
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
@@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 * Cannot be called within a int, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * @name must be valid until the cache is destroyed. This implies that
 * the module calling this has to destroy the cache before getting unloaded.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
@@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
	cachep->refcount = 1;

	if (setup_cpu_cache(cachep, gfp)) {
		__kmem_cache_destroy(cachep);
		__kmem_cache_shutdown(cachep);
		return NULL;
	}

@@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink);

int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
	return __cache_shrink(cachep);
	int i;
	struct kmem_list3 *l3;
	int rc = __cache_shrink(cachep);

	if (rc)
		return rc;

	for_each_online_cpu(i)
	    kfree(cachep->array[i]);

	/* NUMA: free the list3 structures */
	for_each_online_node(i) {
		l3 = cachep->nodelists[i];
		if (l3) {
			kfree(l3->shared);
			free_alien_cache(l3->alien);
			kfree(l3);
		}
	}
	return 0;
}

/*
+0 −1
Original line number Diff line number Diff line
@@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *));

int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_destroy(struct kmem_cache *);

#endif
+0 −1
Original line number Diff line number Diff line
@@ -153,7 +153,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
			if (s->flags & SLAB_DESTROY_BY_RCU)
				rcu_barrier();

			__kmem_cache_destroy(s);
			kmem_cache_free(kmem_cache, s);
		} else {
			list_add(&s->list, &slab_caches);
+0 −4
Original line number Diff line number Diff line
@@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
	return c;
}

void __kmem_cache_destroy(struct kmem_cache *c)
{
}

void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
	void *b;
+5 −5
Original line number Diff line number Diff line
@@ -3205,12 +3205,12 @@ static inline int kmem_cache_close(struct kmem_cache *s)

int __kmem_cache_shutdown(struct kmem_cache *s)
{
	return kmem_cache_close(s);
}
	int rc = kmem_cache_close(s);

void __kmem_cache_destroy(struct kmem_cache *s)
{
	if (!rc)
		sysfs_slab_remove(s);

	return rc;
}

/********************************************************************