Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbf1e473 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds
Browse files

cpu hotplug: slab: cleanup cpuup_callback()



cpuup_callback() is too long.  This patch factors out CPU_UP_CANCELLED and
CPU_UP_PREPARE handlings from cpuup_callback().

Cc: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6c72ffaa
Loading
Loading
Loading
Loading
+160 −143
Original line number Diff line number Diff line
@@ -1156,21 +1156,79 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
}
#endif

static int __cpuinit cpuup_callback(struct notifier_block *nfb,
				    unsigned long action, void *hcpu)
static void __cpuinit cpuup_canceled(long cpu)
{
	struct kmem_cache *cachep;
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);

	list_for_each_entry(cachep, &cache_chain, next) {
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;
		cpumask_t mask;

		mask = node_to_cpumask(node);
		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
		l3 = cachep->nodelists[node];

		if (!l3)
			goto free_array_cache;

		spin_lock_irq(&l3->list_lock);

		/* Free limit for this kmem_list3 */
		l3->free_limit -= cachep->batchcount;
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

		if (!cpus_empty(mask)) {
			spin_unlock_irq(&l3->list_lock);
			goto free_array_cache;
		}

		shared = l3->shared;
		if (shared) {
			free_block(cachep, shared->entry,
				   shared->avail, node);
			l3->shared = NULL;
		}

		alien = l3->alien;
		l3->alien = NULL;

		spin_unlock_irq(&l3->list_lock);

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
free_array_cache:
		kfree(nc);
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
	list_for_each_entry(cachep, &cache_chain, next) {
		l3 = cachep->nodelists[node];
		if (!l3)
			continue;
		drain_freelist(cachep, l3, l3->free_objects);
	}
}

static int __cpuinit cpuup_prepare(long cpu)
{
	long cpu = (long)hcpu;
	struct kmem_cache *cachep;
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	const int memsize = sizeof(struct kmem_list3);

	switch (action) {
	case CPU_LOCK_ACQUIRE:
		mutex_lock(&cache_chain_mutex);
		break;
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
@@ -1255,6 +1313,24 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
		kfree(shared);
		free_alien_cache(alien);
	}
	return 0;
bad:
	return -ENOMEM;
}

static int __cpuinit cpuup_callback(struct notifier_block *nfb,
				    unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	int err = 0;

	switch (action) {
	case CPU_LOCK_ACQUIRE:
		mutex_lock(&cache_chain_mutex);
		break;
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		err = cpuup_prepare(cpu);
		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
@@ -1291,72 +1367,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;
			struct array_cache *shared;
			struct array_cache **alien;
			cpumask_t mask;

			mask = node_to_cpumask(node);
			/* cpu is dead; no one can alloc from it. */
			nc = cachep->array[cpu];
			cachep->array[cpu] = NULL;
			l3 = cachep->nodelists[node];

			if (!l3)
				goto free_array_cache;

			spin_lock_irq(&l3->list_lock);

			/* Free limit for this kmem_list3 */
			l3->free_limit -= cachep->batchcount;
			if (nc)
				free_block(cachep, nc->entry, nc->avail, node);

			if (!cpus_empty(mask)) {
				spin_unlock_irq(&l3->list_lock);
				goto free_array_cache;
			}

			shared = l3->shared;
			if (shared) {
				free_block(cachep, shared->entry,
					   shared->avail, node);
				l3->shared = NULL;
			}

			alien = l3->alien;
			l3->alien = NULL;

			spin_unlock_irq(&l3->list_lock);

			kfree(shared);
			if (alien) {
				drain_alien_cache(cachep, alien);
				free_alien_cache(alien);
			}
free_array_cache:
			kfree(nc);
		}
		/*
		 * In the previous loop, all the objects were freed to
		 * the respective cache's slabs,  now we can go ahead and
		 * shrink each nodelist to its limit.
		 */
		list_for_each_entry(cachep, &cache_chain, next) {
			l3 = cachep->nodelists[node];
			if (!l3)
				continue;
			drain_freelist(cachep, l3, l3->free_objects);
		}
		cpuup_canceled(cpu);
		break;
	case CPU_LOCK_RELEASE:
		mutex_unlock(&cache_chain_mutex);
		break;
	}
	return NOTIFY_OK;
bad:
	return NOTIFY_BAD;
	return err ? NOTIFY_BAD : NOTIFY_OK;
}

static struct notifier_block __cpuinitdata cpucache_notifier = {