Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b89d7d8 authored by David Rientjes's avatar David Rientjes Committed by Pekka Enberg
Browse files

slub: move min_partial to struct kmem_cache



Although it allows for better cacheline use, it is unnecessary to save a
copy of the cache's min_partial value in each kmem_cache_node.

Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent b578f3fc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -46,7 +46,6 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
	spinlock_t list_lock;	/* Protect partial list and nr_partial */
	unsigned long nr_partial;
	unsigned long min_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
@@ -89,6 +88,7 @@ struct kmem_cache {
	void (*ctor)(void *);
	int inuse;		/* Offset to metadata */
	int align;		/* Alignment */
	unsigned long min_partial;
	const char *name;	/* Name (only for display!) */
	struct list_head list;	/* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
+16 −13
Original line number Diff line number Diff line
@@ -1335,7 +1335,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
		n = get_node(s, zone_to_nid(zone));

		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
				n->nr_partial > n->min_partial) {
				n->nr_partial > s->min_partial) {
			page = get_partial_node(n);
			if (page)
				return page;
@@ -1387,7 +1387,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
		slab_unlock(page);
	} else {
		stat(c, DEACTIVATE_EMPTY);
		if (n->nr_partial < n->min_partial) {
		if (n->nr_partial < s->min_partial) {
			/*
			 * Adding an empty slab to the partial slabs in order
			 * to avoid page allocator overhead. This slab needs
@@ -1928,17 +1928,6 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
	n->nr_partial = 0;

	/*
	 * The larger the object size is, the more pages we want on the partial
	 * list to avoid pounding the page allocator excessively.
	 */
	n->min_partial = ilog2(s->size);
	if (n->min_partial < MIN_PARTIAL)
		n->min_partial = MIN_PARTIAL;
	else if (n->min_partial > MAX_PARTIAL)
		n->min_partial = MAX_PARTIAL;

	spin_lock_init(&n->list_lock);
	INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2170,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
}
#endif

static void calculate_min_partial(struct kmem_cache *s, unsigned long min)
{
	if (min < MIN_PARTIAL)
		min = MIN_PARTIAL;
	else if (min > MAX_PARTIAL)
		min = MAX_PARTIAL;
	s->min_partial = min;
}

/*
 * calculate_sizes() determines the order and the distribution of data within
 * a slab object.
@@ -2319,6 +2317,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
	if (!calculate_sizes(s, -1))
		goto error;

	/*
	 * The larger the object size is, the more pages we want on the partial
	 * list to avoid pounding the page allocator excessively.
	 */
	calculate_min_partial(s, ilog2(s->size));
	s->refcount = 1;
#ifdef CONFIG_NUMA
	s->remote_node_defrag_ratio = 1000;