Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 136333d1 authored by Shaohua Li's avatar Shaohua Li Committed by Pekka Enberg
Browse files

slub: explicitly document position of inserting slab to partial list



Adding slab to partial list head/tail is sensitive to performance.
So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document
it to avoid we get it wrong.

Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarShaohua Li <shli@kernel.org>
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 130655ef
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
				struct page *page, int tail)
{
	n->nr_partial++;
	if (tail)
	if (tail == DEACTIVATE_TO_TAIL)
		list_add_tail(&page->lru, &n->partial);
	else
		list_add(&page->lru, &n->partial);
@@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
	enum slab_modes l = M_NONE, m = M_NONE;
	void *freelist;
	void *nextfree;
	int tail = 0;
	int tail = DEACTIVATE_TO_HEAD;
	struct page new;
	struct page old;

	if (page->freelist) {
		stat(s, DEACTIVATE_REMOTE_FREES);
		tail = 1;
		tail = DEACTIVATE_TO_TAIL;
	}

	c->tid = next_tid(c->tid);
@@ -1893,7 +1893,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
		if (m == M_PARTIAL) {

			add_partial(n, page, tail);
			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
			stat(s, tail);

		} else if (m == M_FULL) {

@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
		 */
		if (unlikely(!prior)) {
			remove_full(s, page);
			add_partial(n, page, 1);
			add_partial(n, page, DEACTIVATE_TO_TAIL);
			stat(s, FREE_ADD_PARTIAL);
		}
	}
@@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
	init_kmem_cache_node(n, kmem_cache_node);
	inc_slabs_node(kmem_cache_node, node, page->objects);

	add_partial(n, page, 0);
	add_partial(n, page, DEACTIVATE_TO_HEAD);
}

static void free_kmem_cache_nodes(struct kmem_cache *s)