Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e0528da authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slub: Push irq disable into allocate_slab()



Do the irq handling in allocate_slab() instead of __slab_alloc().

__slab_alloc() is already cluttered and allocate_slab() is already
fiddling around with gfp flags.

v6->v7:
	Only increment ORDER_FALLBACK if we get a page during fallback

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent e4a46182
Loading
Loading
Loading
Loading
+13 −10
Original line number Diff line number Diff line
@@ -1187,6 +1187,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
	struct kmem_cache_order_objects oo = s->oo;
	gfp_t alloc_gfp;

	flags &= gfp_allowed_mask;

	if (flags & __GFP_WAIT)
		local_irq_enable();

	flags |= s->allocflags;

	/*
@@ -1203,12 +1208,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
		 * Try a lower order alloc if possible
		 */
		page = alloc_slab_page(flags, node, oo);
		if (!page)
			return NULL;

		if (page)
			stat(s, ORDER_FALLBACK);
	}

	if (flags & __GFP_WAIT)
		local_irq_disable();

	if (!page)
		return NULL;

	if (kmemcheck_enabled
		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
		int pages = 1 << oo_order(oo);
@@ -1849,15 +1859,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		goto load_freelist;
	}

	gfpflags &= gfp_allowed_mask;
	if (gfpflags & __GFP_WAIT)
		local_irq_enable();

	page = new_slab(s, gfpflags, node);

	if (gfpflags & __GFP_WAIT)
		local_irq_disable();

	if (page) {
		c = __this_cpu_ptr(s->cpu_slab);
		stat(s, ALLOC_SLAB);