Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cc6eee8 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slub: explicit list_lock taking



The allocator fastpath rework does change the usage of the list_lock.
Remove the list_lock processing from the functions that hide them from the
critical sections and move them into those critical sections.

This in turn simplifies the support functions (no __ variant needed anymore)
and simplifies the lock handling on bootstrap.

Inline add_partial since it becomes pretty simple.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent b789ef51
Loading
Loading
Loading
Loading
+49 −40
Original line number Diff line number Diff line
@@ -916,26 +916,27 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)

/*
 * Tracking of fully allocated slabs for debugging purposes.
 *
 * list_lock must be held.
 */
static void add_full(struct kmem_cache_node *n, struct page *page)
static void add_full(struct kmem_cache *s,
	struct kmem_cache_node *n, struct page *page)
{
	spin_lock(&n->list_lock);
	if (!(s->flags & SLAB_STORE_USER))
		return;

	list_add(&page->lru, &n->full);
	spin_unlock(&n->list_lock);
}

/*
 * list_lock must be held.
 */
static void remove_full(struct kmem_cache *s, struct page *page)
{
	struct kmem_cache_node *n;

	if (!(s->flags & SLAB_STORE_USER))
		return;

	n = get_node(s, page_to_nid(page));

	spin_lock(&n->list_lock);
	list_del(&page->lru);
	spin_unlock(&n->list_lock);
}

/* Tracking of the number of slabs for debugging purposes */
@@ -1060,8 +1061,13 @@ static noinline int free_debug_processing(struct kmem_cache *s,
	}

	/* Special debug activities for freeing objects */
	if (!page->frozen && !page->freelist)
	if (!page->frozen && !page->freelist) {
		struct kmem_cache_node *n = get_node(s, page_to_nid(page));

		spin_lock(&n->list_lock);
		remove_full(s, page);
		spin_unlock(&n->list_lock);
	}
	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_FREE, addr);
	trace(s, page, object, 0);
@@ -1170,7 +1176,8 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
			{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
			void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
					struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize,
	unsigned long flags, const char *name,
	void (*ctor)(void *))
@@ -1420,38 +1427,33 @@ static __always_inline int slab_trylock(struct page *page)
}

/*
 * Management of partially allocated slabs
 * Management of partially allocated slabs.
 *
 * list_lock must be held.
 */
static void add_partial(struct kmem_cache_node *n,
static inline void add_partial(struct kmem_cache_node *n,
				struct page *page, int tail)
{
	spin_lock(&n->list_lock);
	n->nr_partial++;
	if (tail)
		list_add_tail(&page->lru, &n->partial);
	else
		list_add(&page->lru, &n->partial);
	spin_unlock(&n->list_lock);
}

static inline void __remove_partial(struct kmem_cache_node *n,
/*
 * list_lock must be held.
 */
static inline void remove_partial(struct kmem_cache_node *n,
					struct page *page)
{
	list_del(&page->lru);
	n->nr_partial--;
}

static void remove_partial(struct kmem_cache *s, struct page *page)
{
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));

	spin_lock(&n->list_lock);
	__remove_partial(n, page);
	spin_unlock(&n->list_lock);
}

/*
 * Lock slab and remove from the partial list.
 * Lock slab, remove from the partial list and put the object into the
 * per cpu freelist.
 *
 * Must hold list_lock.
 */
@@ -1459,7 +1461,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
							struct page *page)
{
	if (slab_trylock(page)) {
		__remove_partial(n, page);
		remove_partial(n, page);
		return 1;
	}
	return 0;
@@ -1576,12 +1578,17 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
	if (page->inuse) {

		if (page->freelist) {
			spin_lock(&n->list_lock);
			add_partial(n, page, tail);
			spin_unlock(&n->list_lock);
			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
		} else {
			stat(s, DEACTIVATE_FULL);
			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
				add_full(n, page);
			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) {
				spin_lock(&n->list_lock);
				add_full(s, n, page);
				spin_unlock(&n->list_lock);
			}
		}
		slab_unlock(page);
	} else {
@@ -1597,7 +1604,9 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
			 * kmem_cache_shrink can reclaim any empty slabs from
			 * the partial list.
			 */
			spin_lock(&n->list_lock);
			add_partial(n, page, 1);
			spin_unlock(&n->list_lock);
			slab_unlock(page);
		} else {
			slab_unlock(page);
@@ -2099,7 +2108,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
	 * then add it.
	 */
	if (unlikely(!prior)) {
		struct kmem_cache_node *n = get_node(s, page_to_nid(page));

		spin_lock(&n->list_lock);
		add_partial(get_node(s, page_to_nid(page)), page, 1);
		spin_unlock(&n->list_lock);
		stat(s, FREE_ADD_PARTIAL);
	}

@@ -2113,7 +2126,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
		/*
		 * Slab still on the partial list.
		 */
		remove_partial(s, page);
		struct kmem_cache_node *n = get_node(s, page_to_nid(page));

		spin_lock(&n->list_lock);
		remove_partial(n, page);
		spin_unlock(&n->list_lock);
		stat(s, FREE_REMOVE_PARTIAL);
	}
	slab_unlock(page);
@@ -2391,7 +2408,6 @@ static void early_kmem_cache_node_alloc(int node)
{
	struct page *page;
	struct kmem_cache_node *n;
	unsigned long flags;

	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));

@@ -2418,14 +2434,7 @@ static void early_kmem_cache_node_alloc(int node)
	init_kmem_cache_node(n, kmem_cache_node);
	inc_slabs_node(kmem_cache_node, node, page->objects);

	/*
	 * lockdep requires consistent irq usage for each lock
	 * so even though there cannot be a race this early in
	 * the boot sequence, we still disable irqs.
	 */
	local_irq_save(flags);
	add_partial(n, page, 0);
	local_irq_restore(flags);
}

static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2709,7 +2718,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry_safe(page, h, &n->partial, lru) {
		if (!page->inuse) {
			__remove_partial(n, page);
			remove_partial(n, page);
			discard_slab(s, page);
		} else {
			list_slab_objects(s, page,
@@ -3047,7 +3056,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
				 * may have freed the last object and be
				 * waiting to release the slab.
				 */
				__remove_partial(n, page);
				remove_partial(n, page);
				slab_unlock(page);
				discard_slab(s, page);
			} else {