Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50d5c41c authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slub: Do not use frozen page flag but a bit in the page counters



Do not use a page flag for the frozen bit. It needs to be part
of the state that is handled with cmpxchg_double(). So use a bit
in the counter struct in the page struct for that purpose.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 7e0528da
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -41,8 +41,9 @@ struct page {
					 * & limit reverse map searches.
					 */
		struct {		/* SLUB */
			u16 inuse;
			u16 objects;
			unsigned inuse:16;
			unsigned objects:15;
			unsigned frozen:1;
		};
	};
	union {
+0 −5
Original line number Diff line number Diff line
@@ -124,9 +124,6 @@ enum pageflags {

	/* SLOB */
	PG_slob_free = PG_private,

	/* SLUB */
	PG_slub_frozen = PG_active,
};

#ifndef __GENERATING_BOUNDS_H
@@ -212,8 +209,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)

/*
 * Private page markings that may be used by the filesystem that owns the page
 * for its own purposes.
+6 −6
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)

#define OO_SHIFT	16
#define OO_MASK		((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */

/* Internal SLUB flags */
#define __OBJECT_POISON		0x80000000UL /* Poison object */
@@ -1025,7 +1025,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
	}

	/* Special debug activities for freeing objects */
	if (!PageSlubFrozen(page) && !page->freelist)
	if (!page->frozen && !page->freelist)
		remove_full(s, page);
	if (s->flags & SLAB_STORE_USER)
		set_track(s, object, TRACK_FREE, addr);
@@ -1424,7 +1424,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
{
	if (slab_trylock(page)) {
		__remove_partial(n, page);
		__SetPageSlubFrozen(page);
		page->frozen = 1;
		return 1;
	}
	return 0;
@@ -1538,7 +1538,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));

	__ClearPageSlubFrozen(page);
	page->frozen = 0;
	if (page->inuse) {

		if (page->freelist) {
@@ -1868,7 +1868,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
			flush_slab(s, c);

		slab_lock(page);
		__SetPageSlubFrozen(page);
		page->frozen = 1;
		c->node = page_to_nid(page);
		c->page = page;
		goto load_freelist;
@@ -2048,7 +2048,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
	page->freelist = object;
	page->inuse--;

	if (unlikely(PageSlubFrozen(page))) {
	if (unlikely(page->frozen)) {
		stat(s, FREE_FROZEN);
		goto out_unlock;
	}