Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f116695a authored by Sasha Levin's avatar Sasha Levin Committed by Konrad Rzeszutek Wilk
Browse files

mm: frontswap: split out __frontswap_unuse_pages



An attempt at making frontswap_shrink shorter and more readable. This patch
splits out walking through the swap list to find an entry with enough
pages to unuse.

Also, assert that the internal __frontswap_unuse_pages is called under swap
lock, since that part of code was previously directly happen inside the lock.

Signed-off-by: default avatarSasha Levin <levinsasha928@gmail.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 96253444
Loading
Loading
Loading
Loading
+39 −20
Original line number Diff line number Diff line
@@ -230,6 +230,41 @@ static unsigned long __frontswap_curr_pages(void)
	return totalpages;
}

static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
					int *swapid)
{
	int ret = -EINVAL;
	struct swap_info_struct *si = NULL;
	int si_frontswap_pages;
	unsigned long total_pages_to_unuse = total;
	unsigned long pages = 0, pages_to_unuse = 0;
	int type;

	assert_spin_locked(&swap_lock);
	for (type = swap_list.head; type >= 0; type = si->next) {
		si = swap_info[type];
		si_frontswap_pages = atomic_read(&si->frontswap_pages);
		if (total_pages_to_unuse < si_frontswap_pages) {
			pages = pages_to_unuse = total_pages_to_unuse;
		} else {
			pages = si_frontswap_pages;
			pages_to_unuse = 0; /* unuse all */
		}
		/* ensure there is enough RAM to fetch pages from frontswap */
		if (security_vm_enough_memory_mm(current->mm, pages)) {
			ret = -ENOMEM;
			continue;
		}
		vm_unacct_memory(pages);
		*unused = pages_to_unuse;
		*swapid = type;
		ret = 0;
		break;
	}

	return ret;
}

/*
 * Frontswap, like a true swap device, may unnecessarily retain pages
 * under certain circumstances; "shrink" frontswap is essentially a
@@ -240,11 +275,9 @@ static unsigned long __frontswap_curr_pages(void)
 */
void frontswap_shrink(unsigned long target_pages)
{
	struct swap_info_struct *si = NULL;
	int si_frontswap_pages;
	unsigned long total_pages = 0, total_pages_to_unuse;
	unsigned long pages = 0, pages_to_unuse = 0;
	int type;
	unsigned long pages_to_unuse = 0;
	int type, ret;
	bool locked = false;

	/*
@@ -258,22 +291,8 @@ void frontswap_shrink(unsigned long target_pages)
	if (total_pages <= target_pages)
		goto out;
	total_pages_to_unuse = total_pages - target_pages;
	for (type = swap_list.head; type >= 0; type = si->next) {
		si = swap_info[type];
		si_frontswap_pages = atomic_read(&si->frontswap_pages);
		if (total_pages_to_unuse < si_frontswap_pages) {
			pages = pages_to_unuse = total_pages_to_unuse;
		} else {
			pages = si_frontswap_pages;
			pages_to_unuse = 0; /* unuse all */
		}
		/* ensure there is enough RAM to fetch pages from frontswap */
		if (security_vm_enough_memory_mm(current->mm, pages))
			continue;
		vm_unacct_memory(pages);
		break;
	}
	if (type < 0)
	ret = __frontswap_unuse_pages(total_pages_to_unuse, &pages_to_unuse, &type);
	if (ret < 0)
		goto out;
	locked = false;
	spin_unlock(&swap_lock);