Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c9e44410 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds
Browse files

mm: reuse unused swap entry if necessary



Presently we can know a swap entry is just used as SwapCache via swap_map,
without looking up swap cache.

Then, we have a chance to reuse swap-cache-only swap entries in
get_swap_pages().

This patch tries to free swap-cache-only swap entries if swap is not
enough.

Note: We hit following path when swap_cluster code cannot find a free
cluster.  Then, vm_swap_full() is not only condition to allow the kernel
to reclaim unused swap.

Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarBalbir Singh <balbir@in.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Tested-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 355cfa73
Loading
Loading
Loading
Loading
+47 −0
Original line number Original line Diff line number Diff line
@@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache)
	return ret;
	return ret;
}
}


/* returnes 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
	int type = si - swap_info;
	swp_entry_t entry = swp_entry(type, offset);
	struct page *page;
	int ret = 0;

	page = find_get_page(&swapper_space, entry.val);
	if (!page)
		return 0;
	/*
	 * This function is called from scan_swap_map() and it's called
	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
	 * We have to use trylock for avoiding deadlock. This is a special
	 * case and you should use try_to_free_swap() with explicit lock_page()
	 * in usual operations.
	 */
	if (trylock_page(page)) {
		ret = try_to_free_swap(page);
		unlock_page(page);
	}
	page_cache_release(page);
	return ret;
}


/*
/*
 * We need this because the bdev->unplug_fn can sleep and we cannot
 * We need this because the bdev->unplug_fn can sleep and we cannot
@@ -301,6 +327,19 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si,
		goto no_page;
		goto no_page;
	if (offset > si->highest_bit)
	if (offset > si->highest_bit)
		scan_base = offset = si->lowest_bit;
		scan_base = offset = si->lowest_bit;

	/* reuse swap entry of cache-only swap if not busy. */
	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
		int swap_was_freed;
		spin_unlock(&swap_lock);
		swap_was_freed = __try_to_reclaim_swap(si, offset);
		spin_lock(&swap_lock);
		/* entry was freed successfully, try to use this again */
		if (swap_was_freed)
			goto checks;
		goto scan; /* check next one */
	}

	if (si->swap_map[offset])
	if (si->swap_map[offset])
		goto scan;
		goto scan;


@@ -382,6 +421,10 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si,
			spin_lock(&swap_lock);
			spin_lock(&swap_lock);
			goto checks;
			goto checks;
		}
		}
		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
			spin_lock(&swap_lock);
			goto checks;
		}
		if (unlikely(--latency_ration < 0)) {
		if (unlikely(--latency_ration < 0)) {
			cond_resched();
			cond_resched();
			latency_ration = LATENCY_LIMIT;
			latency_ration = LATENCY_LIMIT;
@@ -393,6 +436,10 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si,
			spin_lock(&swap_lock);
			spin_lock(&swap_lock);
			goto checks;
			goto checks;
		}
		}
		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
			spin_lock(&swap_lock);
			goto checks;
		}
		if (unlikely(--latency_ration < 0)) {
		if (unlikely(--latency_ration < 0)) {
			cond_resched();
			cond_resched();
			latency_ration = LATENCY_LIMIT;
			latency_ration = LATENCY_LIMIT;