Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c0e04a0f authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "lowmemorykiller: don't take scan_mutex lock when low on memory"

parents 387baf8f 5669bb16
Loading
Loading
Loading
Loading
+40 −8
Original line number Diff line number Diff line
@@ -106,6 +106,7 @@ static unsigned long lowmem_count(struct shrinker *s,
		global_node_page_state(NR_INACTIVE_FILE);
}

bool lmk_kill_possible(void);
static atomic_t shift_adj = ATOMIC_INIT(0);
static short adj_max_shift = 353;
module_param_named(adj_max_shift, adj_max_shift, short, 0644);
@@ -128,6 +129,20 @@ module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644);
static int oom_reaper;
module_param_named(oom_reaper, oom_reaper, int, 0644);

/* Variable that helps in feed to the reclaim path  */
static atomic64_t lmk_feed = ATOMIC64_INIT(0);

/*
 * This function can be called whether to include the anon LRU pages
 * for accounting in the page reclaim.
 */
bool lmk_kill_possible(void)
{
	unsigned long val = atomic64_read(&lmk_feed);

	return !val || time_after_eq(jiffies, val);
}

enum {
	VMPRESSURE_NO_ADJUST = 0,
	VMPRESSURE_ADJUST_ENCROACH,
@@ -457,9 +472,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free;
	int other_file;

	if (!mutex_trylock(&scan_mutex))
		return 0;
	bool lock_required = true;

	other_free = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;

@@ -473,6 +486,13 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
	else
		other_file = 0;

	if (!get_nr_swap_pages() && (other_free <= lowmem_minfree[0] >> 1) &&
	    (other_file <= lowmem_minfree[0] >> 1))
		lock_required = false;

	if (likely(lock_required) && !mutex_trylock(&scan_mutex))
		return 0;

	tune_lmk_param(&other_free, &other_file, sc);

	scale_percent = get_minfree_scalefactor(sc->gfp_mask);
@@ -498,6 +518,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
		trace_almk_shrink(0, ret, other_free, other_file, 0);
		lowmem_print(5, "%s %lu, %x, return 0\n",
			     __func__, sc->nr_to_scan, sc->gfp_mask);
		if (lock_required)
			mutex_unlock(&scan_mutex);
		return 0;
	}
@@ -529,6 +550,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
						lowmem_deathpending_timeout)) {
					task_unlock(p);
					rcu_read_unlock();
					if (lock_required)
						mutex_unlock(&scan_mutex);
					return 0;
				}
@@ -538,6 +560,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
					   lowmem_deathpending_timeout))
				if (test_task_lmk_waiting(tsk)) {
					rcu_read_unlock();
					if (lock_required)
						mutex_unlock(&scan_mutex);
					return 0;
				}
@@ -574,12 +597,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
		long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
		long free = other_free * (long)(PAGE_SIZE / 1024);

		atomic64_set(&lmk_feed, 0);
		if (test_task_lmk_waiting(selected) &&
		    (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
			lowmem_print(2, "'%s' (%d) is already killed\n",
				     selected->comm,
				     selected->pid);
			rcu_read_unlock();
			if (lock_required)
				mutex_unlock(&scan_mutex);
			return 0;
		}
@@ -638,10 +663,17 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
	} else {
		trace_almk_shrink(1, ret, other_free, other_file, 0);
		rcu_read_unlock();
		if (other_free < lowmem_minfree[0] &&
		    other_file < lowmem_minfree[0])
			atomic64_set(&lmk_feed, jiffies + HZ);
		else
			atomic64_set(&lmk_feed, 0);

	}

	lowmem_print(4, "%s %lu, %x, return %lu\n",
		     __func__, sc->nr_to_scan, sc->gfp_mask, rem);
	if (lock_required)
		mutex_unlock(&scan_mutex);
	return rem;
}
+9 −0
Original line number Diff line number Diff line
@@ -557,4 +557,13 @@ static inline bool is_migrate_highatomic_page(struct page *page)
}

void setup_zone_pageset(struct zone *zone);

#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER
extern bool lmk_kill_possible(void);
#else
static inline bool lmk_kill_possible(void)
{
	return false;
}
#endif
#endif	/* __MM_INTERNAL_H */
+2 −4
Original line number Diff line number Diff line
@@ -3870,8 +3870,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
	 * their order will become available due to high fragmentation so
	 * always increment the no progress counter for them
	 */
	if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
		*no_progress_loops = 0;
	else
		(*no_progress_loops)++;
@@ -4172,8 +4171,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 * implementation of the compaction depends on the sufficient amount
	 * of free memory (see __compaction_suitable)
	 */
	if ((did_some_progress > 0 ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) &&
	if ((did_some_progress > 0 || lmk_kill_possible()) &&
			should_compact_retry(ac, order, alloc_flags,
				compact_result, &compact_priority,
				&compaction_retries))
+1 −2
Original line number Diff line number Diff line
@@ -220,8 +220,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)

	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
	if (get_nr_swap_pages() > 0
			|| IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
	if (get_nr_swap_pages() > 0 || lmk_kill_possible())
		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);