Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 97654dfa authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

slab: defer slab_destroy in free_block()



In free_block(), if freeing object makes new free slab and number of
free_objects exceeds free_limit, we start to destroy this new free slab
with holding the kmem_cache node lock.  Holding the lock is useless and,
generally, holding a lock as least as possible is good thing.  I never
measure performance effect of this, but we'd be better not to hold the
lock as much as possible.

Commented by Christoph:
  This is also good because kmem_cache_free is no longer called while
  holding the node lock. So we avoid one case of recursion.

Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25c063fb
Loading
Loading
Loading
Loading
+41 −19
Original line number Original line Diff line number Diff line
@@ -242,7 +242,8 @@ static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
static int drain_freelist(struct kmem_cache *cache,
static int drain_freelist(struct kmem_cache *cache,
			struct kmem_cache_node *n, int tofree);
			struct kmem_cache_node *n, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static void cache_reap(struct work_struct *unused);
static void cache_reap(struct work_struct *unused);


@@ -1030,6 +1031,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache *ac, int node)
				struct array_cache *ac, int node)
{
{
	struct kmem_cache_node *n = get_node(cachep, node);
	struct kmem_cache_node *n = get_node(cachep, node);
	LIST_HEAD(list);


	if (ac->avail) {
	if (ac->avail) {
		spin_lock(&n->list_lock);
		spin_lock(&n->list_lock);
@@ -1041,9 +1043,10 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
		if (n->shared)
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
			transfer_objects(n->shared, ac, ac->limit);


		free_block(cachep, ac->entry, ac->avail, node);
		free_block(cachep, ac->entry, ac->avail, node, &list);
		ac->avail = 0;
		ac->avail = 0;
		spin_unlock(&n->list_lock);
		spin_unlock(&n->list_lock);
		slabs_destroy(cachep, &list);
	}
	}
}
}


@@ -1087,6 +1090,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
	struct kmem_cache_node *n;
	struct kmem_cache_node *n;
	struct array_cache *alien = NULL;
	struct array_cache *alien = NULL;
	int node;
	int node;
	LIST_HEAD(list);


	node = numa_mem_id();
	node = numa_mem_id();


@@ -1111,8 +1115,9 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
	} else {
	} else {
		n = get_node(cachep, nodeid);
		n = get_node(cachep, nodeid);
		spin_lock(&n->list_lock);
		spin_lock(&n->list_lock);
		free_block(cachep, &objp, 1, nodeid);
		free_block(cachep, &objp, 1, nodeid, &list);
		spin_unlock(&n->list_lock);
		spin_unlock(&n->list_lock);
		slabs_destroy(cachep, &list);
	}
	}
	return 1;
	return 1;
}
}
@@ -1182,6 +1187,7 @@ static void cpuup_canceled(long cpu)
		struct array_cache *nc;
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache *shared;
		struct array_cache **alien;
		struct array_cache **alien;
		LIST_HEAD(list);


		/* cpu is dead; no one can alloc from it. */
		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		nc = cachep->array[cpu];
@@ -1196,7 +1202,7 @@ static void cpuup_canceled(long cpu)
		/* Free limit for this kmem_cache_node */
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
		n->free_limit -= cachep->batchcount;
		if (nc)
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);
			free_block(cachep, nc->entry, nc->avail, node, &list);


		if (!cpumask_empty(mask)) {
		if (!cpumask_empty(mask)) {
			spin_unlock_irq(&n->list_lock);
			spin_unlock_irq(&n->list_lock);
@@ -1206,7 +1212,7 @@ static void cpuup_canceled(long cpu)
		shared = n->shared;
		shared = n->shared;
		if (shared) {
		if (shared) {
			free_block(cachep, shared->entry,
			free_block(cachep, shared->entry,
				   shared->avail, node);
				   shared->avail, node, &list);
			n->shared = NULL;
			n->shared = NULL;
		}
		}


@@ -1221,6 +1227,7 @@ static void cpuup_canceled(long cpu)
			free_alien_cache(alien);
			free_alien_cache(alien);
		}
		}
free_array_cache:
free_array_cache:
		slabs_destroy(cachep, &list);
		kfree(nc);
		kfree(nc);
	}
	}
	/*
	/*
@@ -2056,6 +2063,16 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
		kmem_cache_free(cachep->freelist_cache, freelist);
		kmem_cache_free(cachep->freelist_cache, freelist);
}
}


static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

/**
/**
 * calculate_slab_order - calculate size (page order) of slabs
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @cachep: pointer to the cache that is being created
@@ -2459,13 +2476,15 @@ static void do_drain(void *arg)
	struct array_cache *ac;
	struct array_cache *ac;
	int node = numa_mem_id();
	int node = numa_mem_id();
	struct kmem_cache_node *n;
	struct kmem_cache_node *n;
	LIST_HEAD(list);


	check_irq_off();
	check_irq_off();
	ac = cpu_cache_get(cachep);
	ac = cpu_cache_get(cachep);
	n = get_node(cachep, node);
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
	spin_lock(&n->list_lock);
	free_block(cachep, ac->entry, ac->avail, node);
	free_block(cachep, ac->entry, ac->avail, node, &list);
	spin_unlock(&n->list_lock);
	spin_unlock(&n->list_lock);
	slabs_destroy(cachep, &list);
	ac->avail = 0;
	ac->avail = 0;
}
}


@@ -3393,9 +3412,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)


/*
/*
 * Caller needs to acquire correct kmem_cache_node's list_lock
 * Caller needs to acquire correct kmem_cache_node's list_lock
 * @list: List of detached free slabs should be freed by caller
 */
 */
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
static void free_block(struct kmem_cache *cachep, void **objpp,
		       int node)
			int nr_objects, int node, struct list_head *list)
{
{
	int i;
	int i;
	struct kmem_cache_node *n = get_node(cachep, node);
	struct kmem_cache_node *n = get_node(cachep, node);
@@ -3418,13 +3438,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
		if (page->active == 0) {
		if (page->active == 0) {
			if (n->free_objects > n->free_limit) {
			if (n->free_objects > n->free_limit) {
				n->free_objects -= cachep->num;
				n->free_objects -= cachep->num;
				/* No need to drop any previously held
				list_add_tail(&page->lru, list);
				 * lock here, even if we have a off-slab slab
				 * descriptor it is guaranteed to come from
				 * a different cache, refer to comments before
				 * alloc_slabmgmt.
				 */
				slab_destroy(cachep, page);
			} else {
			} else {
				list_add(&page->lru, &n->slabs_free);
				list_add(&page->lru, &n->slabs_free);
			}
			}
@@ -3443,6 +3457,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
	int batchcount;
	int batchcount;
	struct kmem_cache_node *n;
	struct kmem_cache_node *n;
	int node = numa_mem_id();
	int node = numa_mem_id();
	LIST_HEAD(list);


	batchcount = ac->batchcount;
	batchcount = ac->batchcount;
#if DEBUG
#if DEBUG
@@ -3464,7 +3479,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
		}
		}
	}
	}


	free_block(cachep, ac->entry, batchcount, node);
	free_block(cachep, ac->entry, batchcount, node, &list);
free_done:
free_done:
#if STATS
#if STATS
	{
	{
@@ -3485,6 +3500,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
	}
	}
#endif
#endif
	spin_unlock(&n->list_lock);
	spin_unlock(&n->list_lock);
	slabs_destroy(cachep, &list);
	ac->avail -= batchcount;
	ac->avail -= batchcount;
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}
}
@@ -3765,12 +3781,13 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
		n = get_node(cachep, node);
		n = get_node(cachep, node);
		if (n) {
		if (n) {
			struct array_cache *shared = n->shared;
			struct array_cache *shared = n->shared;
			LIST_HEAD(list);


			spin_lock_irq(&n->list_lock);
			spin_lock_irq(&n->list_lock);


			if (shared)
			if (shared)
				free_block(cachep, shared->entry,
				free_block(cachep, shared->entry,
						shared->avail, node);
						shared->avail, node, &list);


			n->shared = new_shared;
			n->shared = new_shared;
			if (!n->alien) {
			if (!n->alien) {
@@ -3780,6 +3797,7 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
			n->free_limit = (1 + nr_cpus_node(node)) *
			n->free_limit = (1 + nr_cpus_node(node)) *
					cachep->batchcount + cachep->num;
					cachep->batchcount + cachep->num;
			spin_unlock_irq(&n->list_lock);
			spin_unlock_irq(&n->list_lock);
			slabs_destroy(cachep, &list);
			kfree(shared);
			kfree(shared);
			free_alien_cache(new_alien);
			free_alien_cache(new_alien);
			continue;
			continue;
@@ -3869,6 +3887,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
	cachep->shared = shared;
	cachep->shared = shared;


	for_each_online_cpu(i) {
	for_each_online_cpu(i) {
		LIST_HEAD(list);
		struct array_cache *ccold = new->new[i];
		struct array_cache *ccold = new->new[i];
		int node;
		int node;
		struct kmem_cache_node *n;
		struct kmem_cache_node *n;
@@ -3879,8 +3898,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
		node = cpu_to_mem(i);
		node = cpu_to_mem(i);
		n = get_node(cachep, node);
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
		spin_lock_irq(&n->list_lock);
		free_block(cachep, ccold->entry, ccold->avail, node);
		free_block(cachep, ccold->entry, ccold->avail, node, &list);
		spin_unlock_irq(&n->list_lock);
		spin_unlock_irq(&n->list_lock);
		slabs_destroy(cachep, &list);
		kfree(ccold);
		kfree(ccold);
	}
	}
	kfree(new);
	kfree(new);
@@ -3988,6 +4008,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
			 struct array_cache *ac, int force, int node)
			 struct array_cache *ac, int force, int node)
{
{
	LIST_HEAD(list);
	int tofree;
	int tofree;


	if (!ac || !ac->avail)
	if (!ac || !ac->avail)
@@ -4000,12 +4021,13 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			if (tofree > ac->avail)
			if (tofree > ac->avail)
				tofree = (ac->avail + 1) / 2;
				tofree = (ac->avail + 1) / 2;
			free_block(cachep, ac->entry, tofree, node);
			free_block(cachep, ac->entry, tofree, node, &list);
			ac->avail -= tofree;
			ac->avail -= tofree;
			memmove(ac->entry, &(ac->entry[tofree]),
			memmove(ac->entry, &(ac->entry[tofree]),
				sizeof(void *) * ac->avail);
				sizeof(void *) * ac->avail);
		}
		}
		spin_unlock_irq(&n->list_lock);
		spin_unlock_irq(&n->list_lock);
		slabs_destroy(cachep, &list);
	}
	}
}
}