Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec3ab083 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slub: Get rid of the node field



The node field is always page_to_nid(c->page). So its rather easy to
replace. Note that there maybe slightly more overhead in various hot paths
due to the need to shift the bits from page->flags. However, that is mostly
compensated for by a smaller footprint of the kmem_cache_cpu structure (this
patch reduces that to 3 words per cache) which allows better caching.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 188fd063
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
	unsigned long tid;	/* Globally unique transaction id */
	struct page *page;	/* The slab from which we are allocating */
	struct page *partial;	/* Partially allocated frozen slabs */
	int node;		/* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
	unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
+16 −19
Original line number Diff line number Diff line
@@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,

		if (!object) {
			c->page = page;
			c->node = page_to_nid(page);
			stat(s, ALLOC_FROM_PARTIAL);
			object = t;
			available =  page->objects - page->inuse;
@@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
	if (node != NUMA_NO_NODE && c->node != node)
	if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
		return 0;
#endif
	return 1;
@@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
		page->freelist = NULL;

		stat(s, ALLOC_SLAB);
		c->node = page_to_nid(page);
		c->page = page;
		*pc = c;
	} else
@@ -2269,7 +2267,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	if (c->partial) {
		c->page = c->partial;
		c->partial = c->page->next;
		c->node = page_to_nid(c->page);
		stat(s, CPU_PARTIAL_ALLOC);
		c->freelist = NULL;
		goto redo;
@@ -2294,7 +2291,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

	c->freelist = get_freepointer(s, freelist);
	deactivate_slab(s, c);
	c->node = NUMA_NO_NODE;
	local_irq_restore(flags);
	return freelist;
}
@@ -4507,13 +4503,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

		for_each_possible_cpu(cpu) {
			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
			int node = ACCESS_ONCE(c->node);
			int node;
			struct page *page;

			if (node < 0)
				continue;
			page = ACCESS_ONCE(c->page);
			if (page) {
			if (!page)
				continue;

			node = page_to_nid(page);
			if (flags & SO_TOTAL)
				x = page->objects;
			else if (flags & SO_OBJECTS)
@@ -4523,14 +4520,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

			total += x;
			nodes[node] += x;
			}
			page = c->partial;

			page = ACCESS_ONCE(c->partial);
			if (page) {
				x = page->pobjects;
				total += x;
				nodes[node] += x;
			}

			per_cpu[node]++;
		}
	}