Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 633b0764 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg
Browse files

slub: correct to calculate num of acquired objects in get_partial_node()



There is a subtle bug when calculating a number of acquired objects.

Currently, we calculate "available = page->objects - page->inuse",
after acquire_slab() is called in get_partial_node().

In acquire_slab() with mode = 1, we always set new.inuse = page->objects.
So,

	acquire_slab(s, n, page, object == NULL);

	if (!object) {
		c->page = page;
		stat(s, ALLOC_FROM_PARTIAL);
		object = t;
		available = page->objects - page->inuse;

		!!! availabe is always 0 !!!
	...

Therfore, "available > s->cpu_partial / 2" is always false and
we always go to second iteration.
This patch correct this problem.

After that, we don't need return value of put_cpu_partial().
So remove it.

Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 7d557b3c
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
 */
static inline void *acquire_slab(struct kmem_cache *s,
		struct kmem_cache_node *n, struct page *page,
		int mode)
		int mode, int *objects)
{
	void *freelist;
	unsigned long counters;
@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
	freelist = page->freelist;
	counters = page->counters;
	new.counters = counters;
	*objects = new.objects - new.inuse;
	if (mode) {
		new.inuse = page->objects;
		new.freelist = NULL;
@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
	return freelist;
}

static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);

/*
@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
{
	struct page *page, *page2;
	void *object = NULL;
	int available = 0;
	int objects;

	/*
	 * Racy check. If we mistakenly see no partial slabs then we
@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
	spin_lock(&n->list_lock);
	list_for_each_entry_safe(page, page2, &n->partial, lru) {
		void *t;
		int available;

		if (!pfmemalloc_match(page, flags))
			continue;

		t = acquire_slab(s, n, page, object == NULL);
		t = acquire_slab(s, n, page, object == NULL, &objects);
		if (!t)
			break;

		available += objects;
		if (!object) {
			c->page = page;
			stat(s, ALLOC_FROM_PARTIAL);
			object = t;
			available =  page->objects - page->inuse;
		} else {
			available = put_cpu_partial(s, page, 0);
			put_cpu_partial(s, page, 0);
			stat(s, CPU_PARTIAL_NODE);
		}
		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
 * If we did not find a slot then simply move all the partials to the
 * per node partial list.
 */
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
	struct page *oldpage;
	int pages;
@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
		page->next = oldpage;

	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
	return pobjects;
}

static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)