Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84e554e6 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

SLUB: Make slub statistics use this_cpu_inc



this_cpu_inc() translates into a single instruction on x86 and does not
need any register. So use it in stat(). We also want to avoid the
calculation of the per cpu kmem_cache_cpu structure pointer. So pass
a kmem_cache pointer instead of a kmem_cache_cpu pointer.

Signed-off-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent ff12059e
Loading
Loading
Loading
Loading
+20 −23
Original line number Original line Diff line number Diff line
@@ -217,10 +217,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)


#endif
#endif


static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
static inline void stat(struct kmem_cache *s, enum stat_item si)
{
{
#ifdef CONFIG_SLUB_STATS
#ifdef CONFIG_SLUB_STATS
	c->stat[si]++;
	__this_cpu_inc(s->cpu_slab->stat[si]);
#endif
#endif
}
}


@@ -1108,7 +1108,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
		if (!page)
		if (!page)
			return NULL;
			return NULL;


		stat(this_cpu_ptr(s->cpu_slab), ORDER_FALLBACK);
		stat(s, ORDER_FALLBACK);
	}
	}


	if (kmemcheck_enabled
	if (kmemcheck_enabled
@@ -1406,23 +1406,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
{
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);


	__ClearPageSlubFrozen(page);
	__ClearPageSlubFrozen(page);
	if (page->inuse) {
	if (page->inuse) {


		if (page->freelist) {
		if (page->freelist) {
			add_partial(n, page, tail);
			add_partial(n, page, tail);
			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
		} else {
		} else {
			stat(c, DEACTIVATE_FULL);
			stat(s, DEACTIVATE_FULL);
			if (SLABDEBUG && PageSlubDebug(page) &&
			if (SLABDEBUG && PageSlubDebug(page) &&
						(s->flags & SLAB_STORE_USER))
						(s->flags & SLAB_STORE_USER))
				add_full(n, page);
				add_full(n, page);
		}
		}
		slab_unlock(page);
		slab_unlock(page);
	} else {
	} else {
		stat(c, DEACTIVATE_EMPTY);
		stat(s, DEACTIVATE_EMPTY);
		if (n->nr_partial < s->min_partial) {
		if (n->nr_partial < s->min_partial) {
			/*
			/*
			 * Adding an empty slab to the partial slabs in order
			 * Adding an empty slab to the partial slabs in order
@@ -1438,7 +1437,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
			slab_unlock(page);
			slab_unlock(page);
		} else {
		} else {
			slab_unlock(page);
			slab_unlock(page);
			stat(__this_cpu_ptr(s->cpu_slab), FREE_SLAB);
			stat(s, FREE_SLAB);
			discard_slab(s, page);
			discard_slab(s, page);
		}
		}
	}
	}
@@ -1453,7 +1452,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
	int tail = 1;
	int tail = 1;


	if (page->freelist)
	if (page->freelist)
		stat(c, DEACTIVATE_REMOTE_FREES);
		stat(s, DEACTIVATE_REMOTE_FREES);
	/*
	/*
	 * Merge cpu freelist into slab freelist. Typically we get here
	 * Merge cpu freelist into slab freelist. Typically we get here
	 * because both freelists are empty. So this is unlikely
	 * because both freelists are empty. So this is unlikely
@@ -1479,7 +1478,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)


static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
{
	stat(c, CPUSLAB_FLUSH);
	stat(s, CPUSLAB_FLUSH);
	slab_lock(c->page);
	slab_lock(c->page);
	deactivate_slab(s, c);
	deactivate_slab(s, c);
}
}
@@ -1619,7 +1618,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	if (unlikely(!node_match(c, node)))
	if (unlikely(!node_match(c, node)))
		goto another_slab;
		goto another_slab;


	stat(c, ALLOC_REFILL);
	stat(s, ALLOC_REFILL);


load_freelist:
load_freelist:
	object = c->page->freelist;
	object = c->page->freelist;
@@ -1634,7 +1633,7 @@ load_freelist:
	c->node = page_to_nid(c->page);
	c->node = page_to_nid(c->page);
unlock_out:
unlock_out:
	slab_unlock(c->page);
	slab_unlock(c->page);
	stat(c, ALLOC_SLOWPATH);
	stat(s, ALLOC_SLOWPATH);
	return object;
	return object;


another_slab:
another_slab:
@@ -1644,7 +1643,7 @@ new_slab:
	new = get_partial(s, gfpflags, node);
	new = get_partial(s, gfpflags, node);
	if (new) {
	if (new) {
		c->page = new;
		c->page = new;
		stat(c, ALLOC_FROM_PARTIAL);
		stat(s, ALLOC_FROM_PARTIAL);
		goto load_freelist;
		goto load_freelist;
	}
	}


@@ -1658,7 +1657,7 @@ new_slab:


	if (new) {
	if (new) {
		c = __this_cpu_ptr(s->cpu_slab);
		c = __this_cpu_ptr(s->cpu_slab);
		stat(c, ALLOC_SLAB);
		stat(s, ALLOC_SLAB);
		if (c->page)
		if (c->page)
			flush_slab(s, c);
			flush_slab(s, c);
		slab_lock(new);
		slab_lock(new);
@@ -1713,7 +1712,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,


	else {
	else {
		c->freelist = get_freepointer(s, object);
		c->freelist = get_freepointer(s, object);
		stat(c, ALLOC_FASTPATH);
		stat(s, ALLOC_FASTPATH);
	}
	}
	local_irq_restore(flags);
	local_irq_restore(flags);


@@ -1780,10 +1779,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{
{
	void *prior;
	void *prior;
	void **object = (void *)x;
	void **object = (void *)x;
	struct kmem_cache_cpu *c;


	c = __this_cpu_ptr(s->cpu_slab);
	stat(s, FREE_SLOWPATH);
	stat(c, FREE_SLOWPATH);
	slab_lock(page);
	slab_lock(page);


	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
@@ -1796,7 +1793,7 @@ checks_ok:
	page->inuse--;
	page->inuse--;


	if (unlikely(PageSlubFrozen(page))) {
	if (unlikely(PageSlubFrozen(page))) {
		stat(c, FREE_FROZEN);
		stat(s, FREE_FROZEN);
		goto out_unlock;
		goto out_unlock;
	}
	}


@@ -1809,7 +1806,7 @@ checks_ok:
	 */
	 */
	if (unlikely(!prior)) {
	if (unlikely(!prior)) {
		add_partial(get_node(s, page_to_nid(page)), page, 1);
		add_partial(get_node(s, page_to_nid(page)), page, 1);
		stat(c, FREE_ADD_PARTIAL);
		stat(s, FREE_ADD_PARTIAL);
	}
	}


out_unlock:
out_unlock:
@@ -1822,10 +1819,10 @@ slab_empty:
		 * Slab still on the partial list.
		 * Slab still on the partial list.
		 */
		 */
		remove_partial(s, page);
		remove_partial(s, page);
		stat(c, FREE_REMOVE_PARTIAL);
		stat(s, FREE_REMOVE_PARTIAL);
	}
	}
	slab_unlock(page);
	slab_unlock(page);
	stat(c, FREE_SLAB);
	stat(s, FREE_SLAB);
	discard_slab(s, page);
	discard_slab(s, page);
	return;
	return;


@@ -1863,7 +1860,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
	if (likely(page == c->page && c->node >= 0)) {
	if (likely(page == c->page && c->node >= 0)) {
		set_freepointer(s, object, c->freelist);
		set_freepointer(s, object, c->freelist);
		c->freelist = object;
		c->freelist = object;
		stat(c, FREE_FASTPATH);
		stat(s, FREE_FASTPATH);
	} else
	} else
		__slab_free(s, page, x, addr);
		__slab_free(s, page, x, addr);