Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37765cc1 authored by Vinayak Menon's avatar Vinayak Menon
Browse files

mm: swap: don't delay swap free for fast swap devices



There are couple of issues with swapcache usage when ZRAM is used
as swap device.
1) Kernel does a swap readahead which can be around 6 to 8 pages
depending on total ram, which is not required for zram since
accesses are fast.
2) Kernel delays the freeing up of swapcache expecting a later hit,
which again is useless in the case of zram.
3) This is not related to swapcache, but zram usage itself.
As mentioned in (2) kernel delays freeing of swapcache, but along with
that it delays zram compressed page free also. i.e. there can be 2 copies,
though one is compressed.

This patch addresses these issues using two new flags
QUEUE_FLAG_FAST and SWP_FAST, to indicate that accesses to the device
will be fast and cheap, and instructs the swap layer to free up
swap space agressively, and not to do read ahead.

Change-Id: I5d2d5176a5f9420300bb2f843f6ecbdb25ea80e4
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent 764ae007
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1081,6 +1081,7 @@ static int create_device(struct zram *zram, int device_id)
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);

	__set_bit(QUEUE_FLAG_FAST, &zram->queue->queue_flags);
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
	set_capacity(zram->disk, 0);
	/* zram devices sort of resembles non-rotational disks */
+2 −0
Original line number Diff line number Diff line
@@ -509,6 +509,7 @@ struct request_queue {
#define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
#define QUEUE_FLAG_SG_GAPS     22	/* queue doesn't support SG gaps */
#define QUEUE_FLAG_FAST        23	/* fast block device (e.g. ram based) */

#define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -596,6 +597,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \
	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
#define blk_queue_fast(q)	test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)

#define blk_noretry_request(rq) \
	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
+12 −3
Original line number Diff line number Diff line
@@ -159,7 +159,8 @@ enum {
	SWP_AREA_DISCARD = (1 << 8),	/* single-time swap area discards */
	SWP_PAGE_DISCARD = (1 << 9),	/* freed swap page-cluster discards */
					/* add others here before... */
	SWP_SCANNING	= (1 << 10),	/* refcount in scan_swap_map */
	SWP_FAST	= (1 << 10),	/* blkdev access is fast and cheap */
	SWP_SCANNING	= (1 << 11),	/* refcount in scan_swap_map */
};

#define SWAP_CLUSTER_MAX 32UL
@@ -413,10 +414,18 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
extern bool is_swap_fast(swp_entry_t entry);

/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
static inline bool vm_swap_full(struct swap_info_struct *si)
{
	/*
	 * If the swap device is fast, return true
	 * not to delay swap free.
	 */
	if (si->flags & SWP_FAST)
		return true;

	return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
}

@@ -461,7 +470,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
#define get_nr_swap_pages()			0L
#define total_swap_pages			0L
#define total_swapcache_pages()			0UL
#define vm_swap_full()				0
#define vm_swap_full(si)			0

#define si_swapinfo(val) \
	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
+2 −1
Original line number Diff line number Diff line
@@ -2536,7 +2536,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
	}

	swap_free(entry);
	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
	if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
		(vma->vm_flags & VM_LOCKED) || PageMlocked(page))
		try_to_free_swap(page);
	unlock_page(page);
	if (page != swapcache) {
+2 −1
Original line number Diff line number Diff line
@@ -461,7 +461,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
	unsigned long entry_offset = swp_offset(entry);
	unsigned long offset = entry_offset;
	unsigned long start_offset, end_offset;
	unsigned long mask;
	unsigned long mask = is_swap_fast(entry) ? 0 :
				(1UL << page_cluster) - 1;
	struct blk_plug plug;

	mask = swapin_nr_pages(offset) - 1;
Loading