Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit afc0cedb authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds
Browse files

slob: implement RCU freeing



The SLOB allocator should implement SLAB_DESTROY_BY_RCU correctly, because
even on UP, RCU freeing semantics are not equivalent to simply freeing
immediately.  This also allows SLOB to be used on SMP.

Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b2cd6415
Loading
Loading
Loading
Loading
+2 −5
Original line number Original line Diff line number Diff line
@@ -577,14 +577,11 @@ config SLUB
	   and has enhanced diagnostics.
	   and has enhanced diagnostics.


config SLOB
config SLOB
#
	depends on EMBEDDED && !SPARSEMEM
#	SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
#
	depends on EMBEDDED && !SMP && !SPARSEMEM
	bool "SLOB (Simple Allocator)"
	bool "SLOB (Simple Allocator)"
	help
	help
	   SLOB replaces the SLAB allocator with a drastically simpler
	   SLOB replaces the SLAB allocator with a drastically simpler
	   allocator.  SLOB is more space efficient that SLAB but does not
	   allocator.  SLOB is more space efficient than SLAB but does not
	   scale well (single lock for all operations) and is also highly
	   scale well (single lock for all operations) and is also highly
	   susceptible to fragmentation. SLUB can accomplish a higher object
	   susceptible to fragmentation. SLUB can accomplish a higher object
	   density. It is usually better to use SLUB instead of SLOB.
	   density. It is usually better to use SLUB instead of SLOB.
+45 −7
Original line number Original line Diff line number Diff line
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>


struct slob_block {
struct slob_block {
	int units;
	int units;
@@ -53,6 +54,16 @@ struct bigblock {
};
};
typedef struct bigblock bigblock_t;
typedef struct bigblock bigblock_t;


/*
 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
 * the block using call_rcu.
 */
struct slob_rcu {
	struct rcu_head head;
	int size;
};

static slob_t arena = { .next = &arena, .units = 1 };
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
static bigblock_t *bigblocks;
@@ -266,6 +277,7 @@ size_t ksize(const void *block)


struct kmem_cache {
struct kmem_cache {
	unsigned int size, align;
	unsigned int size, align;
	unsigned long flags;
	const char *name;
	const char *name;
	void (*ctor)(void *, struct kmem_cache *, unsigned long);
	void (*ctor)(void *, struct kmem_cache *, unsigned long);
	void (*dtor)(void *, struct kmem_cache *, unsigned long);
	void (*dtor)(void *, struct kmem_cache *, unsigned long);
@@ -283,6 +295,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
	if (c) {
	if (c) {
		c->name = name;
		c->name = name;
		c->size = size;
		c->size = size;
		if (flags & SLAB_DESTROY_BY_RCU) {
			BUG_ON(dtor);
			/* leave room for rcu footer at the end of object */
			c->size += sizeof(struct slob_rcu);
		}
		c->flags = flags;
		c->ctor = ctor;
		c->ctor = ctor;
		c->dtor = dtor;
		c->dtor = dtor;
		/* ignore alignment unless it's forced */
		/* ignore alignment unless it's forced */
@@ -328,15 +346,35 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
}
}
EXPORT_SYMBOL(kmem_cache_zalloc);
EXPORT_SYMBOL(kmem_cache_zalloc);


static void __kmem_cache_free(void *b, int size)
{
	if (size < PAGE_SIZE)
		slob_free(b, size);
	else
		free_pages((unsigned long)b, get_order(size));
}

static void kmem_rcu_free(struct rcu_head *head)
{
	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));

	__kmem_cache_free(b, slob_rcu->size);
}

void kmem_cache_free(struct kmem_cache *c, void *b)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
{
	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
		struct slob_rcu *slob_rcu;
		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
		INIT_RCU_HEAD(&slob_rcu->head);
		slob_rcu->size = c->size;
		call_rcu(&slob_rcu->head, kmem_rcu_free);
	} else {
		if (c->dtor)
		if (c->dtor)
			c->dtor(b, c, 0);
			c->dtor(b, c, 0);

		__kmem_cache_free(b, c->size);
	if (c->size < PAGE_SIZE)
	}
		slob_free(b, c->size);
	else
		free_pages((unsigned long)b, get_order(c->size));
}
}
EXPORT_SYMBOL(kmem_cache_free);
EXPORT_SYMBOL(kmem_cache_free);