Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cf45a00 authored by Charan Teja Reddy's avatar Charan Teja Reddy
Browse files

mm: add a feedback from lowmemorykiller for OOM decisions



In the alloc slow path, for non-costly order allocations, we rely on the
should_reclaim_retry() function to decide whether we should go for page
reclaim before landing into OOM. But when low memory killer is enabled,
the wrong accounting of anon lru pages can result into unnecessary
retries that inturn lead to page allocation stalls for longer
durations. To avoid this situation, get input from the LMK that
has already the info about whether there are any lmk killable tasks.
This also fixes commit 23ba63a1 ("mm: retry more before OOM in the
presence of slow shrinkers")

Change-Id: I91416b7646d4545f7762402e50c6193771f62605
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent 55a26aeb
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -106,6 +106,7 @@ static unsigned long lowmem_count(struct shrinker *s,
		global_node_page_state(NR_INACTIVE_FILE);
}

bool lmk_kill_possible(void);
static atomic_t shift_adj = ATOMIC_INIT(0);
static short adj_max_shift = 353;
module_param_named(adj_max_shift, adj_max_shift, short, 0644);
@@ -128,6 +129,20 @@ module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644);
static int oom_reaper;
module_param_named(oom_reaper, oom_reaper, int, 0644);

/* Variable that helps in feed to the reclaim path  */
static atomic64_t lmk_feed = ATOMIC64_INIT(0);

/*
 * This function can be called whether to include the anon LRU pages
 * for accounting in the page reclaim.
 */
bool lmk_kill_possible(void)
{
	unsigned long val = atomic64_read(&lmk_feed);

	return !val || time_after_eq(jiffies, val);
}

enum {
	VMPRESSURE_NO_ADJUST = 0,
	VMPRESSURE_ADJUST_ENCROACH,
@@ -574,6 +589,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
		long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
		long free = other_free * (long)(PAGE_SIZE / 1024);

		atomic64_set(&lmk_feed, 0);
		if (test_task_lmk_waiting(selected) &&
		    (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
			lowmem_print(2, "'%s' (%d) is already killed\n",
@@ -638,6 +654,12 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
	} else {
		trace_almk_shrink(1, ret, other_free, other_file, 0);
		rcu_read_unlock();
		if (other_free < lowmem_minfree[0] &&
		    other_file < lowmem_minfree[0])
			atomic64_set(&lmk_feed, jiffies + HZ);
		else
			atomic64_set(&lmk_feed, 0);

	}

	lowmem_print(4, "%s %lu, %x, return %lu\n",
+9 −0
Original line number Diff line number Diff line
@@ -557,4 +557,13 @@ static inline bool is_migrate_highatomic_page(struct page *page)
}

void setup_zone_pageset(struct zone *zone);

#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER
extern bool lmk_kill_possible(void);
#else
static inline bool lmk_kill_possible(void)
{
	return false;
}
#endif
#endif	/* __MM_INTERNAL_H */
+2 −4
Original line number Diff line number Diff line
@@ -3870,8 +3870,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
	 * their order will become available due to high fragmentation so
	 * always increment the no progress counter for them
	 */
	if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
		*no_progress_loops = 0;
	else
		(*no_progress_loops)++;
@@ -4172,8 +4171,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 * implementation of the compaction depends on the sufficient amount
	 * of free memory (see __compaction_suitable)
	 */
	if ((did_some_progress > 0 ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) &&
	if ((did_some_progress > 0 || lmk_kill_possible()) &&
			should_compact_retry(ac, order, alloc_flags,
				compact_result, &compact_priority,
				&compaction_retries))
+1 −2
Original line number Diff line number Diff line
@@ -220,8 +220,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)

	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
	if (get_nr_swap_pages() > 0
			|| IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
	if (get_nr_swap_pages() > 0 || lmk_kill_possible())
		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);