Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82a0e08e authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: retry more before OOM in the presence of slow shrinkers"

parents 7cc46fb3 23ba63a1
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -799,3 +799,11 @@ config SPECULATIVE_PAGE_FAULT
	 detected or because underlying PMD or PTE tables are not yet
	 allocating, it is failing its processing and a classic page fault
	 is then tried.

config HAVE_LOW_MEMORY_KILLER
	bool "Have user/kernel space low memory killer"
	default n
	help
	  Say 'y' if you have a user/kernel low memory killer and thus you
	  want page allocator to provide sufficient time before it triggers
	  Out of Memory killer.
+47 −2
Original line number Diff line number Diff line
@@ -3424,6 +3424,46 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
	return NULL;
}

#ifdef CONFIG_HAVE_LOW_MEMORY_KILLER
static inline bool
should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags)
{
	struct zone *zone;
	struct zoneref *z;

	/* Let costly order requests check for compaction progress */
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		return false;

	/*
	 * For (0 < order < PAGE_ALLOC_COSTLY_ORDER) allow the shrinkers
	 * to run and free up memory. Do not let these allocations fail
	 * if shrinkers can free up memory. This is similar to
	 * should_compact_retry implementation for !CONFIG_COMPACTION.
	 */
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
				ac->high_zoneidx, ac->nodemask) {
		unsigned long available;

		available = zone_reclaimable_pages(zone);
		available +=
			zone_page_state_snapshot(zone, NR_FREE_PAGES);

		if (__zone_watermark_ok(zone, 0, min_wmark_pages(zone),
			ac_classzone_idx(ac), alloc_flags, available))
			return true;
	}

	return false;
}
#else
static inline bool
should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags)
{
	return false;
}
#endif

static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
		     enum compact_result compact_result,
@@ -3439,6 +3479,9 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
	if (!order)
		return false;

	if (should_compact_lmk_retry(ac, order, alloc_flags))
		return true;

	if (compaction_made_progress(compact_result))
		(*compaction_retries)++;

@@ -3750,7 +3793,8 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
	 * their order will become available due to high fragmentation so
	 * always increment the no progress counter for them
	 */
	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
	if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
		*no_progress_loops = 0;
	else
		(*no_progress_loops)++;
@@ -4052,7 +4096,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 * implementation of the compaction depends on the sufficient amount
	 * of free memory (see __compaction_suitable)
	 */
	if (did_some_progress > 0 &&
	if ((did_some_progress > 0 ||
			IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) &&
			should_compact_retry(ac, order, alloc_flags,
				compact_result, &compact_priority,
				&compaction_retries))
+2 −1
Original line number Diff line number Diff line
@@ -213,7 +213,8 @@ unsigned long zone_reclaimable_pages(struct zone *zone)

	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
	if (get_nr_swap_pages() > 0)
	if (get_nr_swap_pages() > 0
			|| IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER))
		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);