Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a458431e authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz Committed by Linus Torvalds
Browse files

mm: fix zone_watermark_ok_safe() accounting of isolated pages



Commit 702d1a6e ("memory-hotplug: fix kswapd looping forever
problem") added an isolated pageblocks counter (nr_pageblock_isolate in
struct zone) and used it to adjust free pages counter in
zone_watermark_ok_safe() to prevent kswapd looping forever problem.

Then later, commit 2139cbe6 ("cma: fix counting of isolated pages")
fixed accounting of isolated pages in global free pages counter.  It
made the previous zone_watermark_ok_safe() fix unnecessary and
potentially harmful (cause now isolated pages may be accounted twice
making free pages counter incorrect).

This patch removes the special isolated pageblocks counter altogether
which fixes zone_watermark_ok_safe() free pages check.

Reported-by: default avatarTomasz Stanislawski <t.stanislaws@samsung.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Aaditya Kumar <aaditya.kumar.30@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 358e419f
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -503,14 +503,6 @@ struct zone {
	 * rarely used fields:
	 */
	const char		*name;
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * the number of MIGRATE_ISOLATE *pageblock*.
	 * We need this for free page counting. Look at zone_watermark_ok_safe.
	 * It's protected by zone->lock
	 */
	int		nr_pageblock_isolate;
#endif
} ____cacheline_internodealigned_in_smp;

typedef enum {
+0 −27
Original line number Diff line number Diff line
@@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes);

int page_group_by_mobility_disabled __read_mostly;

/*
 * NOTE:
 * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
 * Instead, use {un}set_pageblock_isolate.
 */
void set_pageblock_migratetype(struct page *page, int migratetype)
{

@@ -1655,20 +1650,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
	return true;
}

#ifdef CONFIG_MEMORY_ISOLATION
static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
{
	if (unlikely(zone->nr_pageblock_isolate))
		return zone->nr_pageblock_isolate * pageblock_nr_pages;
	return 0;
}
#else
static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
{
	return 0;
}
#endif

bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
		      int classzone_idx, int alloc_flags)
{
@@ -1684,14 +1665,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);

	/*
	 * If the zone has MIGRATE_ISOLATE type free pages, we should consider
	 * it.  nr_zone_isolate_freepages is never accurate so kswapd might not
	 * sleep although it could do so.  But this is more desirable for memory
	 * hotplug than sleeping which can cause a livelock in the direct
	 * reclaim path.
	 */
	free_pages -= nr_zone_isolate_freepages(z);
	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
								free_pages);
}
+2 −24
Original line number Diff line number Diff line
@@ -8,28 +8,6 @@
#include <linux/memory.h>
#include "internal.h"

/* called while holding zone->lock */
static void set_pageblock_isolate(struct page *page)
{
	if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
		return;

	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
	page_zone(page)->nr_pageblock_isolate++;
}

/* called while holding zone->lock */
static void restore_pageblock_isolate(struct page *page, int migratetype)
{
	struct zone *zone = page_zone(page);
	if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
		return;

	BUG_ON(zone->nr_pageblock_isolate <= 0);
	set_pageblock_migratetype(page, migratetype);
	zone->nr_pageblock_isolate--;
}

int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
{
	struct zone *zone;
@@ -80,7 +58,7 @@ int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
		unsigned long nr_pages;
		int migratetype = get_pageblock_migratetype(page);

		set_pageblock_isolate(page);
		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);

		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -103,7 +81,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
		goto out;
	nr_pages = move_freepages_block(zone, page, migratetype);
	__mod_zone_freepage_state(zone, nr_pages, migratetype);
	restore_pageblock_isolate(page, migratetype);
	set_pageblock_migratetype(page, migratetype);
out:
	spin_unlock_irqrestore(&zone->lock, flags);
}