Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ded0ecf6 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm/slab: factor out kmem_cache_node initialization code



It can be reused on other place, so factor out it.  Following patch will
use it.

Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a5aa63a5
Loading
Loading
Loading
Loading
+45 −29
Original line number Original line Diff line number Diff line
@@ -848,50 +848,66 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
}
}
#endif
#endif


/*
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
 * Allocates and initializes node for a node on each slab cache, used for
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
 * will be allocated off-node since memory is not yet online for the new node.
 * When hotplugging memory or a cpu, existing node are not replaced if
 * already in use.
 *
 * Must hold slab_mutex.
 */
static int init_cache_node_node(int node)
{
{
	struct kmem_cache *cachep;
	struct kmem_cache_node *n;
	struct kmem_cache_node *n;
	const size_t memsize = sizeof(struct kmem_cache_node);


	list_for_each_entry(cachep, &slab_caches, list) {
	/*
	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 * node has not already allocated this
	 */
	 */
	n = get_node(cachep, node);
	n = get_node(cachep, node);
		if (!n) {
	if (n) {
			n = kmalloc_node(memsize, GFP_KERNEL, node);
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
	if (!n)
		return -ENOMEM;
		return -ENOMEM;

	kmem_cache_node_init(n);
	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;


	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 * protection here.
	 */
	 */
	cachep->node[node] = n;
	cachep->node[node] = n;

	return 0;
}
}


		spin_lock_irq(&n->list_lock);
/*
		n->free_limit =
 * Allocates and initializes node for a node on each slab cache, used for
			(1 + nr_cpus_node(node)) *
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
			cachep->batchcount + cachep->num;
 * will be allocated off-node since memory is not yet online for the new node.
		spin_unlock_irq(&n->list_lock);
 * When hotplugging memory or a cpu, existing node are not replaced if
 * already in use.
 *
 * Must hold slab_mutex.
 */
static int init_cache_node_node(int node)
{
	int ret;
	struct kmem_cache *cachep;

	list_for_each_entry(cachep, &slab_caches, list) {
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
	}
	}

	return 0;
	return 0;
}
}