Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 35be952a authored by Mark Salyzyn's avatar Mark Salyzyn Committed by Martin Liu
Browse files

Revert "BACKPORT: mm: reclaim small amounts of memory when an external fragmentation event occurs"



This reverts commit 5cbbeadd.

Reason for revert: revert customized code
Bug: 140544941
Test: boot
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
Signed-off-by: default avatarMartin Liu <liumartin@google.com>
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Change-Id: I65735f27f6a44a112957bcec07e2f63f2d8ccff6
parent 776eba01
Loading
Loading
Loading
Loading
+0 −21
Original line number Diff line number Diff line
@@ -64,7 +64,6 @@ Currently, these files are in /proc/sys/vm:
- swappiness
- user_reserve_kbytes
- vfs_cache_pressure
- watermark_boost_factor
- watermark_scale_factor
- zone_reclaim_mode

@@ -873,26 +872,6 @@ ten times more freeable objects than there are.

=============================================================

watermark_boost_factor:

This factor controls the level of reclaim when memory is being fragmented.
It defines the percentage of the high watermark of a zone that will be
reclaimed if pages of different mobility are being mixed within pageblocks.
The intent is that compaction has less work to do in the future and to
increase the success rate of future high-order allocations such as SLUB
allocations, THP and hugetlbfs pages.

To make it sensible with respect to the watermark_scale_factor parameter,
the unit is in fractions of 10,000. The default value of 15,000 means
that up to 150% of the high watermark will be reclaimed in the event of
a pageblock being mixed due to fragmentation. The level of reclaim is
determined by the number of fragmentation events that occurred in the
recent past. If this value is smaller than a pageblock then a pageblocks
worth of pages will be reclaimed (e.g.  2MB on 64-bit x86). A boost factor
of 0 will disable the feature.

=============================================================

watermark_scale_factor:

This factor controls the aggressiveness of kswapd. It defines the
+0 −1
Original line number Diff line number Diff line
@@ -2239,7 +2239,6 @@ extern void zone_pcp_reset(struct zone *zone);

/* page_alloc.c */
extern int min_free_kbytes;
extern int watermark_boost_factor;
extern int watermark_scale_factor;

/* nommu.c */
+4 −7
Original line number Diff line number Diff line
@@ -274,10 +274,10 @@ enum zone_watermarks {
	NR_WMARK
};

#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH])
#define wmark_pages(z, i) (z->_watermark[i])

struct per_cpu_pages {
	int count;		/* number of pages in the list */
@@ -369,7 +369,6 @@ struct zone {

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long _watermark[NR_WMARK];
	unsigned long watermark_boost;

	unsigned long nr_reserved_highatomic;

@@ -897,8 +896,6 @@ static inline int is_highmem(struct zone *zone)
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
+0 −8
Original line number Diff line number Diff line
@@ -1494,14 +1494,6 @@ static struct ctl_table vm_table[] = {
		.proc_handler	= min_free_kbytes_sysctl_handler,
		.extra1		= &zero,
	},
	{
		.procname	= "watermark_boost_factor",
		.data		= &watermark_boost_factor,
		.maxlen		= sizeof(watermark_boost_factor),
		.mode		= 0644,
		.proc_handler	= watermark_boost_factor_sysctl_handler,
		.extra1		= &zero,
	},
	{
		.procname	= "watermark_scale_factor",
		.data		= &watermark_scale_factor,
+2 −41
Original line number Diff line number Diff line
@@ -318,7 +318,6 @@ compound_page_dtor * const compound_page_dtors[] = {
 */
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
int watermark_boost_factor __read_mostly = 15000;
int watermark_scale_factor = 10;

/*
@@ -2219,21 +2218,6 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
	return false;
}

static inline void boost_watermark(struct zone *zone)
{
	unsigned long max_boost;

	if (!watermark_boost_factor)
		return;

	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
			watermark_boost_factor, 10000);
	max_boost = max(pageblock_nr_pages, max_boost);

	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
		max_boost);
}

/*
 * This function implements actual steal behaviour. If order is large enough,
 * we can steal whole pageblock. If not, we first move freepages in this
@@ -2243,7 +2227,7 @@ static inline void boost_watermark(struct zone *zone)
 * itself, so pages freed in the future will be put on the correct free list.
 */
static void steal_suitable_fallback(struct zone *zone, struct page *page,
		unsigned int alloc_flags, int start_type, bool whole_block)
					int start_type, bool whole_block)
{
	unsigned int current_order = page_order(page);
	struct free_area *area;
@@ -2265,15 +2249,6 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
		goto single_page;
	}

	/*
	 * Boost watermarks to increase reclaim pressure to reduce the
	 * likelihood of future fallbacks. Wake kswapd now as the node
	 * may be balanced overall and kswapd will not wake naturally.
	 */
	boost_watermark(zone);
	if (alloc_flags & ALLOC_KSWAPD)
		wakeup_kswapd(zone, 0, 0, zone_idx(zone));

	/* We are not allowed to try stealing from the whole block */
	if (!whole_block)
		goto single_page;
@@ -2557,8 +2532,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
	page = list_first_entry(&area->free_list[fallback_mt],
							struct page, lru);

	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
								can_steal);
	steal_suitable_fallback(zone, page, start_migratetype, can_steal);

	trace_mm_page_alloc_extfrag(page, order, current_order,
		start_migratetype, fallback_mt);
@@ -7594,7 +7568,6 @@ static void __setup_per_zone_wmarks(void)
					low + min;
		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) +
					low + min * 2;
		zone->watermark_boost = 0;

		spin_unlock_irqrestore(&zone->lock, flags);
	}
@@ -7695,18 +7668,6 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
	return 0;
}

int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
	int rc;

	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (rc)
		return rc;

	return 0;
}

int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
Loading