Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b0a7867 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm, vmscan: clear PGDAT_WRITEBACK when zone is balanced"

parents 5059309c 56c7b5c0
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -645,6 +645,8 @@ typedef struct pglist_data {
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */

#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
+6 −0
Original line number Diff line number Diff line
@@ -73,6 +73,12 @@ static inline void set_page_refcounted(struct page *page)

extern unsigned long highest_memmap_pfn;

/*
 * Maximum number of reclaim retries without progress before the OOM
 * killer is consider the only way forward.
 */
#define MAX_RECLAIM_RETRIES 16

/*
 * in mm/vmscan.c:
 */
+2 −7
Original line number Diff line number Diff line
@@ -3500,12 +3500,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
	return false;
}

/*
 * Maximum number of reclaim retries without any progress before OOM killer
 * is consider as the only way to move forward.
 */
#define MAX_RECLAIM_RETRIES 16

/*
 * Checks whether it makes sense to retry the reclaim to make a forward progress
 * for the given allocation request.
@@ -4466,7 +4460,8 @@ void show_free_areas(unsigned int filter)
			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
			K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
			node_page_state(pgdat, NR_PAGES_SCANNED),
			!pgdat_reclaimable(pgdat) ? "yes" : "no");
			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
				"yes" : "no");
	}

	for_each_populated_zone(zone) {
+33 −15
Original line number Diff line number Diff line
@@ -2704,6 +2704,15 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	} while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
					 sc->nr_scanned - nr_scanned, sc));

	/*
	 * Kswapd gives up on balancing particular nodes after too
	 * many failures to reclaim anything from them and goes to
	 * sleep. On reclaim progress, reset the failure counter. A
	 * successful direct reclaim run will revive a dormant kswapd.
	 */
	if (reclaimable)
		pgdat->kswapd_failures = 0;

	return reclaimable;
}

@@ -2778,10 +2787,6 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
						 GFP_KERNEL | __GFP_HARDWALL))
				continue;

			if (sc->priority != DEF_PRIORITY &&
			    !pgdat_reclaimable(zone->zone_pgdat))
				continue;	/* Let kswapd poll it */

			/*
			 * If we already have plenty of memory free for
			 * compaction in this zone, don't free any more.
@@ -2918,7 +2923,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
	return 0;
}

static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
static bool allow_direct_reclaim(pg_data_t *pgdat)
{
	struct zone *zone;
	unsigned long pfmemalloc_reserve = 0;
@@ -2926,6 +2931,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
	int i;
	bool wmark_ok;

	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
		return true;

	for (i = 0; i <= ZONE_NORMAL; i++) {
		zone = &pgdat->node_zones[i];
		if (!managed_zone(zone) ||
@@ -3006,7 +3014,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,

		/* Throttle based on the first usable node */
		pgdat = zone->zone_pgdat;
		if (pfmemalloc_watermark_ok(pgdat))
		if (allow_direct_reclaim(pgdat))
			goto out;
		break;
	}
@@ -3028,14 +3036,14 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
	 */
	if (!(gfp_mask & __GFP_FS)) {
		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
			pfmemalloc_watermark_ok(pgdat), HZ);
			allow_direct_reclaim(pgdat), HZ);

		goto check_pending;
	}

	/* Throttle until kswapd wakes the process */
	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
		pfmemalloc_watermark_ok(pgdat));
		allow_direct_reclaim(pgdat));

check_pending:
	if (fatal_signal_pending(current))
@@ -3198,6 +3206,7 @@ static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
	 */
	clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
	clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
	clear_bit(PGDAT_WRITEBACK, &zone->zone_pgdat->flags);

	return true;
}
@@ -3214,7 +3223,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)

	/*
	 * The throttled processes are normally woken up in balance_pgdat() as
	 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
	 * soon as allow_direct_reclaim() is true. But there is a potential
	 * race between when kswapd checks the watermarks and a process gets
	 * throttled. There is also a potential race if processes get
	 * throttled, kswapd wakes, a large process exits thereby balancing the
@@ -3228,6 +3237,10 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
	if (waitqueue_active(&pgdat->pfmemalloc_wait))
		wake_up_all(&pgdat->pfmemalloc_wait);

	/* Hopeless node, leave it to direct reclaim */
	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
		return true;

	for (i = 0; i <= classzone_idx; i++) {
		struct zone *zone = pgdat->node_zones + i;

@@ -3314,9 +3327,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
	count_vm_event(PAGEOUTRUN);

	do {
		unsigned long nr_reclaimed = sc.nr_reclaimed;
		bool raise_priority = true;

		sc.nr_reclaimed = 0;
		sc.reclaim_idx = classzone_idx;

		/*
@@ -3395,7 +3408,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
		 * able to safely make forward progress. Wake them
		 */
		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
				pfmemalloc_watermark_ok(pgdat))
				allow_direct_reclaim(pgdat))
			wake_up_all(&pgdat->pfmemalloc_wait);

		/* Check if kswapd should be suspending */
@@ -3406,10 +3419,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
		 * Raise priority if scanning rate is too low or there was no
		 * progress in reclaiming pages
		 */
		if (raise_priority || !sc.nr_reclaimed)
		nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
		if (raise_priority || !nr_reclaimed)
			sc.priority--;
	} while (sc.priority >= 1);

	if (!sc.nr_reclaimed)
		pgdat->kswapd_failures++;

out:
	/*
	 * Return the order kswapd stopped reclaiming at as
@@ -3609,6 +3626,10 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
	if (!waitqueue_active(&pgdat->kswapd_wait))
		return;

	/* Hopeless node, leave it to direct reclaim */
	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
		return;

	/* Only wake kswapd if all zones are unbalanced */
	for (z = 0; z <= classzone_idx; z++) {
		zone = pgdat->node_zones + z;
@@ -3879,9 +3900,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
	    sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
		return NODE_RECLAIM_FULL;

	if (!pgdat_reclaimable(pgdat))
		return NODE_RECLAIM_FULL;

	/*
	 * Do not scan if the allocation should not be delayed.
	 */
+1 −1
Original line number Diff line number Diff line
@@ -1426,7 +1426,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
		   "\n  node_unreclaimable:  %u"
		   "\n  start_pfn:           %lu"
		   "\n  node_inactive_ratio: %u",
		   !pgdat_reclaimable(zone->zone_pgdat),
		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
		   zone->zone_start_pfn,
		   zone->zone_pgdat->inactive_ratio);
	seq_putc(m, '\n');