Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34bf6ef9 authored by Dave Hansen's avatar Dave Hansen Committed by Pekka Enberg
Browse files

mm: slab/slub: use page->list consistently instead of page->lru



'struct page' has two list_head fields: 'lru' and 'list'.  Conveniently,
they are unioned together.  This means that code can use them
interchangably, which gets horribly confusing like with this nugget from
slab.c:

>	list_del(&page->lru);
>	if (page->active == cachep->num)
>		list_add(&page->list, &n->slabs_full);

This patch makes the slab and slub code use page->lru universally instead
of mixing ->list and ->lru.

So, the new rule is: page->lru is what the you use if you want to keep
your page on a list.  Don't like the fact that it's not called ->list?
Too bad.

Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 5f0985bb
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -124,6 +124,8 @@ struct page {
	union {
		struct list_head lru;	/* Pageout list, eg. active_list
					 * protected by zone->lru_lock !
					 * Can be used as a generic list
					 * by the page owner.
					 */
		struct {		/* slub per cpu partial pages */
			struct page *next;	/* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
#endif
		};

		struct list_head list;	/* slobs list of pages */
		struct slab *slab_page; /* slab fields */
		struct rcu_head rcu_head;	/* Used by SLAB
						 * when destroying via RCU
+2 −2
Original line number Diff line number Diff line
@@ -2922,9 +2922,9 @@ retry:
		/* move slabp to correct slabp list: */
		list_del(&page->lru);
		if (page->active == cachep->num)
			list_add(&page->list, &n->slabs_full);
			list_add(&page->lru, &n->slabs_full);
		else
			list_add(&page->list, &n->slabs_partial);
			list_add(&page->lru, &n->slabs_partial);
	}

must_grow:
+5 −5
Original line number Diff line number Diff line
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)

static void set_slob_page_free(struct page *sp, struct list_head *list)
{
	list_add(&sp->list, list);
	list_add(&sp->lru, list);
	__SetPageSlobFree(sp);
}

static inline void clear_slob_page_free(struct page *sp)
{
	list_del(&sp->list);
	list_del(&sp->lru);
	__ClearPageSlobFree(sp);
}

@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, list) {
	list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
			continue;

		/* Attempt to alloc */
		prev = sp->list.prev;
		prev = sp->lru.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->list);
		INIT_LIST_HEAD(&sp->lru);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);