Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a834fd7f authored by Sultan Alsawaf's avatar Sultan Alsawaf Committed by KakatkarAkshay
Browse files

mm: Stop kswapd early when nothing's waiting for it to free pages



Keeping kswapd running when all the failed allocations that invoked it
are satisfied incurs a high overhead due to unnecessary page eviction
and writeback, as well as spurious VM pressure events to various
registered shrinkers. When kswapd doesn't need to work to make an
allocation succeed anymore, stop it prematurely to save resources.

Signed-off-by: default avatarSultan Alsawaf <sultan@kerneltoast.com>
[kdrag0n: Forward-ported from 4.14 to 4.19]
Signed-off-by: default avatarDanny Lin <danny@kdrag0n.dev>
parent a4ca3621
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -698,6 +698,7 @@ typedef struct pglist_data {
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	atomic_t kswapd_waiters;
	wait_queue_head_t kswapd_wait;
	wait_queue_head_t pfmemalloc_wait;
	/*
+14 −3
Original line number Diff line number Diff line
@@ -4567,6 +4567,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	int no_progress_loops;
	unsigned int cpuset_mems_cookie;
	int reserve_flags;
        pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat;
        bool woke_kswapd = false;
#ifdef CONFIG_ONEPLUS_MEM_MONITOR
	unsigned long oneplus_alloc_start = jiffies;
#endif
@@ -4603,8 +4605,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	if (!ac->preferred_zoneref->zone)
		goto nopage;

	if (alloc_flags & ALLOC_KSWAPD)
	if (alloc_flags & ALLOC_KSWAPD) {
		if (!woke_kswapd) {
			atomic_inc(&pgdat->kswapd_waiters);
			woke_kswapd = true;
		}
		wake_all_kswapds(order, gfp_mask, ac);
	}

	/*
	 * The adjusted alloc_flags might result in immediate success, so try
@@ -4806,13 +4813,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
		goto retry;
	}
fail:
	warn_alloc(gfp_mask, ac->nodemask,
			"page allocation failure: order:%u", order);
got_pg:
#ifdef CONFIG_ONEPLUS_MEM_MONITOR
	memory_alloc_monitor(gfp_mask, order,
			jiffies_to_msecs(jiffies - oneplus_alloc_start));
#endif
	if (woke_kswapd)
		atomic_dec(&pgdat->kswapd_waiters);
	if (!page)
		warn_alloc(gfp_mask, ac->nodemask,
				"page allocation failure: order:%u", order);
	return page;
}

@@ -6784,6 +6794,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
	pgdat_page_ext_init(pgdat);
	spin_lock_init(&pgdat->lru_lock);
	lruvec_init(node_lruvec(pgdat));
	pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0);
}

static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
+2 −1
Original line number Diff line number Diff line
@@ -3841,7 +3841,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
		__fs_reclaim_release();
		ret = try_to_freeze();
		__fs_reclaim_acquire();
		if (ret || kthread_should_stop())
		if (ret || kthread_should_stop() ||
		    !atomic_read(&pgdat->kswapd_waiters))
			break;

		/*