Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit add8c328 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: vmscan: stall page reclaim after a list of pages have been processed"

parents d7fbf1b8 ddff7025
Loading
Loading
Loading
Loading
+33 −16
Original line number Original line Diff line number Diff line
@@ -785,6 +785,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
				      enum ttu_flags ttu_flags,
				      enum ttu_flags ttu_flags,
				      unsigned long *ret_nr_unqueued_dirty,
				      unsigned long *ret_nr_unqueued_dirty,
				      unsigned long *ret_nr_writeback,
				      unsigned long *ret_nr_writeback,
				      unsigned long *ret_nr_immediate,
				      bool force_reclaim)
				      bool force_reclaim)
{
{
	LIST_HEAD(ret_pages);
	LIST_HEAD(ret_pages);
@@ -795,6 +796,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
	unsigned long nr_congested = 0;
	unsigned long nr_congested = 0;
	unsigned long nr_reclaimed = 0;
	unsigned long nr_reclaimed = 0;
	unsigned long nr_writeback = 0;
	unsigned long nr_writeback = 0;
	unsigned long nr_immediate = 0;


	cond_resched();
	cond_resched();


@@ -861,8 +863,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
		 *    IO can complete. Waiting on the page itself risks an
		 *    IO can complete. Waiting on the page itself risks an
		 *    indefinite stall if it is impossible to writeback the
		 *    indefinite stall if it is impossible to writeback the
		 *    page due to IO error or disconnected storage so instead
		 *    page due to IO error or disconnected storage so instead
		 *    block for HZ/10 or until some IO completes then clear the
		 *    note that the LRU is being scanned too quickly and the
		 *    ZONE_WRITEBACK flag to recheck if the condition exists.
		 *    caller can stall after page list has been processed.
		 *
		 *
		 * 2) Global reclaim encounters a page, memcg encounters a
		 * 2) Global reclaim encounters a page, memcg encounters a
		 *    page that is not marked for immediate reclaim or
		 *    page that is not marked for immediate reclaim or
@@ -892,10 +894,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
			if (current_is_kswapd() &&
			if (current_is_kswapd() &&
			    PageReclaim(page) &&
			    PageReclaim(page) &&
			    zone_is_reclaim_writeback(zone)) {
			    zone_is_reclaim_writeback(zone)) {
				unlock_page(page);
				nr_immediate++;
				congestion_wait(BLK_RW_ASYNC, HZ/10);
				goto keep_locked;
				zone_clear_flag(zone, ZONE_WRITEBACK);
				goto keep;


			/* Case 2 above */
			/* Case 2 above */
			} else if (global_reclaim(sc) ||
			} else if (global_reclaim(sc) ||
@@ -1121,6 +1121,7 @@ keep:
	mem_cgroup_uncharge_end();
	mem_cgroup_uncharge_end();
	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
	*ret_nr_writeback += nr_writeback;
	*ret_nr_writeback += nr_writeback;
	*ret_nr_immediate += nr_immediate;
	return nr_reclaimed;
	return nr_reclaimed;
}
}


@@ -1132,7 +1133,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
		.priority = DEF_PRIORITY,
		.priority = DEF_PRIORITY,
		.may_unmap = 1,
		.may_unmap = 1,
	};
	};
	unsigned long ret, dummy1, dummy2;
	unsigned long ret, dummy1, dummy2, dummy3;
	struct page *page, *next;
	struct page *page, *next;
	LIST_HEAD(clean_pages);
	LIST_HEAD(clean_pages);


@@ -1146,7 +1147,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,


	ret = shrink_page_list(&clean_pages, zone, &sc,
	ret = shrink_page_list(&clean_pages, zone, &sc,
				TTU_UNMAP|TTU_IGNORE_ACCESS,
				TTU_UNMAP|TTU_IGNORE_ACCESS,
				&dummy1, &dummy2, true);
				&dummy1, &dummy2, &dummy3, true);
	list_splice(&clean_pages, page_list);
	list_splice(&clean_pages, page_list);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
	return ret;
	return ret;
@@ -1442,6 +1443,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	unsigned long nr_taken;
	unsigned long nr_taken;
	unsigned long nr_unqueued_dirty = 0;
	unsigned long nr_unqueued_dirty = 0;
	unsigned long nr_writeback = 0;
	unsigned long nr_writeback = 0;
	unsigned long nr_immediate = 0;
	isolate_mode_t isolate_mode = 0;
	isolate_mode_t isolate_mode = 0;
	int file = is_file_lru(lru);
	int file = is_file_lru(lru);
	struct zone *zone = lruvec_zone(lruvec);
	struct zone *zone = lruvec_zone(lruvec);
@@ -1483,7 +1485,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
		return 0;
		return 0;


	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
				&nr_unqueued_dirty, &nr_writeback, false);
			&nr_unqueued_dirty, &nr_writeback, &nr_immediate,
			false);


	spin_lock_irq(&zone->lru_lock);
	spin_lock_irq(&zone->lru_lock);


@@ -1536,14 +1539,28 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	}
	}


	/*
	/*
	 * Similarly, if many dirty pages are encountered that are not
	 * memcg will stall in page writeback so only consider forcibly
	 * currently being written then flag that kswapd should start
	 * stalling for global reclaim
	 * writing back pages and stall to give a chance for flushers
	 * to catch up.
	 */
	 */
	if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) {
	if (global_reclaim(sc)) {
		congestion_wait(BLK_RW_ASYNC, HZ/10);
		/*
		 * If dirty pages are scanned that are not queued for IO, it
		 * implies that flushers are not keeping up. In this case, flag
		 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
		 * pages from reclaim context. It will forcibly stall in the
		 * next check.
		 */
		if (nr_unqueued_dirty == nr_taken)
			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);

		/*
		 * In addition, if kswapd scans pages marked marked for
		 * immediate reclaim and under writeback (nr_immediate), it
		 * implies that pages are cycling through the LRU faster than
		 * they are written so also forcibly stall.
		 */
		if (nr_unqueued_dirty == nr_taken || nr_immediate)
			congestion_wait(BLK_RW_ASYNC, HZ/10);
	}
	}


	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,