Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af4f8ba3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull slab updates from Pekka Enberg:
 "Mainly a bunch of SLUB fixes from Joonsoo Kim"

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  slub: use __SetPageSlab function to set PG_slab flag
  slub: fix a memory leak in get_partial_node()
  slub: remove unused argument of init_kmem_cache_node()
  slub: fix a possible memory leak
  Documentations: Fix slabinfo.c directory in vm/slub.txt
  slub: fix incorrect return type of get_any_partial()
parents efff0471 c03f94cc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
slabs that have data in them. See "slabinfo -h" for more options when
running the command. slabinfo can be compiled with

gcc -o slabinfo tools/slub/slabinfo.c
gcc -o slabinfo tools/vm/slabinfo.c

Some of the modes of operation of slabinfo require that slub debugging
be enabled on the command line. F.e. no tracking information will be
+13 −10
Original line number Diff line number Diff line
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)

	inc_slabs_node(s, page_to_nid(page), page->objects);
	page->slab = s;
	page->flags |= 1 << PG_slab;
	__SetPageSlab(page);

	start = page_address(page);

@@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
		freelist = page->freelist;
		counters = page->counters;
		new.counters = counters;
		if (mode)
		if (mode) {
			new.inuse = page->objects;
			new.freelist = NULL;
		} else {
			new.freelist = freelist;
		}

		VM_BUG_ON(new.frozen);
		new.frozen = 1;

	} while (!__cmpxchg_double_slab(s, page,
			freelist, counters,
			NULL, new.counters,
			new.freelist, new.counters,
			"lock and freeze"));

	remove_partial(n, page);
@@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
			object = t;
			available =  page->objects - page->inuse;
		} else {
			page->freelist = t;
			available = put_cpu_partial(s, page, 0);
			stat(s, CPU_PARTIAL_NODE);
		}
@@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
/*
 * Get a page from somewhere. Search in increasing NUMA distances.
 */
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
		struct kmem_cache_cpu *c)
{
#ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
}

static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
init_kmem_cache_node(struct kmem_cache_node *n)
{
	n->nr_partial = 0;
	spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
	init_tracking(kmem_cache_node, n);
#endif
	init_kmem_cache_node(n, kmem_cache_node);
	init_kmem_cache_node(n);
	inc_slabs_node(kmem_cache_node, node, page->objects);

	add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
		}

		s->node[node] = n;
		init_kmem_cache_node(n, s);
		init_kmem_cache_node(n);
	}
	return 1;
}
@@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
			ret = -ENOMEM;
			goto out;
		}
		init_kmem_cache_node(n, s);
		init_kmem_cache_node(n);
		s->node[nid] = n;
	}
out:
@@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
			}
			return s;
		}
		kfree(n);
		kfree(s);
	}
	kfree(n);
err:
	up_write(&slub_lock);