Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8da3430d authored by Eric Dumazet's avatar Eric Dumazet Committed by Linus Torvalds
Browse files

slab: NUMA kmem_cache diet



Some NUMA machines have a big MAX_NUMNODES (possibly 1024), but fewer
possible nodes.  This patch dynamically sizes the 'struct kmem_cache' to
allocate only needed space.

I moved nodelists[] field at the end of struct kmem_cache, and use the
following computation in kmem_cache_init()

cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
                                 nr_node_ids * sizeof(struct kmem_list3 *);

On my two nodes x86_64 machine, kmem_cache.obj_size is now 192 instead of 704
(This is because on x86_64, MAX_NUMNODES is 64)

On bigger NUMA setups, this might reduce the gfporder of "cache_cache"

Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 63109846
Loading
Loading
Loading
Loading
+20 −4
Original line number Original line Diff line number Diff line
@@ -389,7 +389,6 @@ struct kmem_cache {
	unsigned int buffer_size;
	unsigned int buffer_size;
	u32 reciprocal_buffer_size;
	u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */
/* 3) touched by every alloc & free from the backend */
	struct kmem_list3 *nodelists[MAX_NUMNODES];


	unsigned int flags;		/* constant flags */
	unsigned int flags;		/* constant flags */
	unsigned int num;		/* # of objs per slab */
	unsigned int num;		/* # of objs per slab */
@@ -444,6 +443,17 @@ struct kmem_cache {
	int obj_offset;
	int obj_offset;
	int obj_size;
	int obj_size;
#endif
#endif
	/*
	 * We put nodelists[] at the end of kmem_cache, because we want to size
	 * this array to nr_node_ids slots instead of MAX_NUMNODES
	 * (see kmem_cache_init())
	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
	 * is statically defined, so we reserve the max number of nodes.
	 */
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	/*
	 * Do not add fields after nodelists[]
	 */
};
};


#define CFLGS_OFF_SLAB		(0x80000000UL)
#define CFLGS_OFF_SLAB		(0x80000000UL)
@@ -678,9 +688,6 @@ static struct kmem_cache cache_cache = {
	.shared = 1,
	.shared = 1,
	.buffer_size = sizeof(struct kmem_cache),
	.buffer_size = sizeof(struct kmem_cache),
	.name = "kmem_cache",
	.name = "kmem_cache",
#if DEBUG
	.obj_size = sizeof(struct kmem_cache),
#endif
};
};


#define BAD_ALIEN_MAGIC 0x01020304ul
#define BAD_ALIEN_MAGIC 0x01020304ul
@@ -1440,6 +1447,15 @@ void __init kmem_cache_init(void)
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];


	/*
	 * struct kmem_cache size depends on nr_node_ids, which
	 * can be less than MAX_NUMNODES.
	 */
	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
				 nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
	cache_cache.obj_size = cache_cache.buffer_size;
#endif
	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
					cache_line_size());
					cache_line_size());
	cache_cache.reciprocal_buffer_size =
	cache_cache.reciprocal_buffer_size =