Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6326fec1 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds
Browse files

mm: Use owner_priv bit for PageSwapCache, valid when PageSwapBacked



A page is not added to the swap cache without being swap backed,
so PageSwapBacked mappings can use PG_owner_priv_1 for PageSwapCache.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Andrew Lutomirski <luto@kernel.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c0f6ba6
Loading
Loading
Loading
Loading
+16 −8
Original line number Diff line number Diff line
@@ -87,7 +87,6 @@ enum pageflags {
	PG_private_2,		/* If pagecache, has fs aux data */
	PG_writeback,		/* Page is under writeback */
	PG_head,		/* A head page */
	PG_swapcache,		/* Swap page: swp_entry_t in private */
	PG_mappedtodisk,	/* Has blocks allocated on-disk */
	PG_reclaim,		/* To be reclaimed asap */
	PG_swapbacked,		/* Page is backed by RAM/swap */
@@ -110,6 +109,9 @@ enum pageflags {
	/* Filesystems */
	PG_checked = PG_owner_priv_1,

	/* SwapBacked */
	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */

	/* Two page bits are conscripted by FS-Cache to maintain local caching
	 * state.  These bits are set on pages belonging to the netfs's inodes
	 * when those inodes are being locally cached.
@@ -314,7 +316,13 @@ PAGEFLAG_FALSE(HighMem)
#endif

#ifdef CONFIG_SWAP
PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
static __always_inline int PageSwapCache(struct page *page)
{
	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);

}
SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
#else
PAGEFLAG_FALSE(SwapCache)
#endif
@@ -705,7 +713,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
	(1UL << PG_lru		| 1UL << PG_locked	|	\
	 1UL << PG_private	| 1UL << PG_private_2	|	\
	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
	 1UL << PG_slab	 | 1UL << PG_swapcache | 1UL << PG_active | \
	 1UL << PG_slab		| 1UL << PG_active 	|	\
	 1UL << PG_unevictable	| __PG_MLOCKED)

/*
+0 −1
Original line number Diff line number Diff line
@@ -95,7 +95,6 @@
	{1UL << PG_private_2,		"private_2"	},		\
	{1UL << PG_writeback,		"writeback"	},		\
	{1UL << PG_head,		"head"		},		\
	{1UL << PG_swapcache,		"swapcache"	},		\
	{1UL << PG_mappedtodisk,	"mappedtodisk"	},		\
	{1UL << PG_reclaim,		"reclaim"	},		\
	{1UL << PG_swapbacked,		"swapbacked"	},		\
+1 −3
Original line number Diff line number Diff line
@@ -764,12 +764,11 @@ static int me_huge_page(struct page *p, unsigned long pfn)
 */

#define dirty		(1UL << PG_dirty)
#define sc		(1UL << PG_swapcache)
#define sc		((1UL << PG_swapcache) | (1UL << PG_swapbacked))
#define unevict		(1UL << PG_unevictable)
#define mlock		(1UL << PG_mlocked)
#define writeback	(1UL << PG_writeback)
#define lru		(1UL << PG_lru)
#define swapbacked	(1UL << PG_swapbacked)
#define head		(1UL << PG_head)
#define slab		(1UL << PG_slab)
#define reserved	(1UL << PG_reserved)
@@ -819,7 +818,6 @@ static struct page_state {
#undef mlock
#undef writeback
#undef lru
#undef swapbacked
#undef head
#undef slab
#undef reserved
+8 −6
Original line number Diff line number Diff line
@@ -466,14 +466,16 @@ int migrate_page_move_mapping(struct address_space *mapping,
	 */
	newpage->index = page->index;
	newpage->mapping = page->mapping;
	if (PageSwapBacked(page))
		__SetPageSwapBacked(newpage);

	get_page(newpage);	/* add cache reference */
	if (PageSwapBacked(page)) {
		__SetPageSwapBacked(newpage);
		if (PageSwapCache(page)) {
			SetPageSwapCache(newpage);
			set_page_private(newpage, page_private(page));
		}
	} else {
		VM_BUG_ON_PAGE(PageSwapCache(page), page);
	}

	/* Move dirty while page refs frozen and newpage not yet exposed */
	dirty = PageDirty(page);