Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e45ac714 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: allow page poisoning to be enabled by default."

parents 86a0648a 5d3323ae
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -63,6 +63,16 @@ config PAGE_POISONING

	  If unsure, say N

config PAGE_POISONING_ENABLE_DEFAULT
	bool "Enable page poisoning by default?"
	default n
	depends on PAGE_POISONING
	---help---
	  Enable page poisoning of free pages by default? This value
	  can be overridden by page_poison=off|on. This can be used
	  to avoid passing the kernel parameter and let page poisoning
	  feature enabled by default.

config PAGE_POISONING_NO_SANITY
	depends on PAGE_POISONING
	bool "Only poison, don't sanity check"
+2 −1
Original line number Diff line number Diff line
@@ -7,7 +7,8 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>

static bool want_page_poisoning __read_mostly;
static bool want_page_poisoning __read_mostly
		= IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);

static int early_page_poison_param(char *buf)
{
+46 −19
Original line number Diff line number Diff line
@@ -1647,31 +1647,32 @@ int isolate_lru_page(struct page *page)
	return ret;
}

/*
 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
 * then get resheduled. When there are massive number of tasks doing page
 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
 * the LRU list will go small and be scanned faster than necessary, leading to
 * unnecessary swapping, thrashing and OOM.
 */
static int too_many_isolated(struct pglist_data *pgdat, int file,
		struct scan_control *sc)
static int __too_many_isolated(struct pglist_data *pgdat, int file,
	struct scan_control *sc, bool stalled)
{
	unsigned long inactive, isolated;

	if (current_is_kswapd())
		return 0;

	if (!sane_reclaim(sc))
		return 0;

	if (file) {
		if (stalled) {
			inactive = node_page_state_snapshot(pgdat,
					NR_INACTIVE_FILE);
			isolated = node_page_state_snapshot(pgdat,
					NR_ISOLATED_FILE);
		} else {
			inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
			isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
		}
	} else {
		if (stalled) {
			inactive = node_page_state_snapshot(pgdat,
					NR_INACTIVE_ANON);
			isolated = node_page_state_snapshot(pgdat,
					NR_ISOLATED_ANON);
		} else {
			inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
			isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
		}
	}

	/*
	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
@@ -1684,6 +1685,32 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
	return isolated > inactive;
}

/*
 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
 * then get resheduled. When there are massive number of tasks doing page
 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
 * the LRU list will go small and be scanned faster than necessary, leading to
 * unnecessary swapping, thrashing and OOM.
 */
static int too_many_isolated(struct pglist_data *pgdat, int file,
		struct scan_control *sc, bool stalled)
{
	if (current_is_kswapd())
		return 0;

	if (!sane_reclaim(sc))
		return 0;

	if (unlikely(__too_many_isolated(pgdat, file, sc, false))) {
		if (stalled)
			return __too_many_isolated(pgdat, file, sc, stalled);
		else
			return 1;
	}

	return 0;
}

static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
@@ -1771,7 +1798,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
	bool stalled = false;

	while (unlikely(too_many_isolated(pgdat, file, sc))) {
	while (unlikely(too_many_isolated(pgdat, file, sc, stalled))) {
		if (stalled)
			return 0;