Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7fbf1b8 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: vmscan: stall page reclaim and writeback pages based on...

Merge "mm: vmscan: stall page reclaim and writeback pages based on dirty/writepage pages encountered"
parents 122b8c2d 8a1c1c90
Loading
Loading
Loading
Loading
+48 −13
Original line number Diff line number Diff line
@@ -757,6 +757,25 @@ static enum page_references page_check_references(struct page *page,
	return PAGEREF_RECLAIM;
}

/* Check if a page is dirty or under writeback */
static void page_check_dirty_writeback(struct page *page,
				       bool *dirty, bool *writeback)
{
	/*
	 * Anonymous pages are not handled by flushers and must be written
	 * from reclaim context. Do not stall reclaim based on them
	 */
	if (!page_is_file_cache(page)) {
		*dirty = false;
		*writeback = false;
		return;
	}

	/* By default assume that the page flags are accurate */
	*dirty = PageDirty(page);
	*writeback = PageWriteback(page);
}

/*
 * shrink_page_list() returns the number of reclaimed pages
 */
@@ -785,6 +804,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
		struct page *page;
		int may_enter_fs;
		enum page_references references = PAGEREF_RECLAIM_CLEAN;
		bool dirty, writeback;

		cond_resched();

@@ -812,6 +832,24 @@ static unsigned long shrink_page_list(struct list_head *page_list,
		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

		/*
		 * The number of dirty pages determines if a zone is marked
		 * reclaim_congested which affects wait_iff_congested. kswapd
		 * will stall and start writing pages if the tail of the LRU
		 * is all dirty unqueued pages.
		 */
		page_check_dirty_writeback(page, &dirty, &writeback);
		if (dirty || writeback)
			nr_dirty++;

		if (dirty && !writeback)
			nr_unqueued_dirty++;

		/* Treat this page as congested if underlying BDI is */
		mapping = page_mapping(page);
		if (mapping && bdi_write_congested(mapping->backing_dev_info))
			nr_congested++;

		/*
		 * If a page at the tail of the LRU is under writeback, there
		 * are three cases to consider.
@@ -907,9 +945,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
			if (!add_to_swap(page, page_list))
				goto activate_locked;
			may_enter_fs = 1;
		}

			/* Adding to swap updated mapping */
			mapping = page_mapping(page);
		}

		/*
		 * The page is mapped into the page tables of one or more
@@ -929,11 +968,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
		}

		if (PageDirty(page)) {
			nr_dirty++;

			if (!PageWriteback(page))
				nr_unqueued_dirty++;

			/*
			 * Only kswapd can writeback filesystem pages to
			 * avoid risk of stack overflow but only writeback
@@ -964,7 +998,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
			/* Page is dirty, try to write it out here */
			switch (pageout(page, mapping, sc)) {
			case PAGE_KEEP:
				nr_congested++;
				goto keep_locked;
			case PAGE_ACTIVATE:
				goto activate_locked;
@@ -1407,7 +1440,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	unsigned long nr_scanned;
	unsigned long nr_reclaimed = 0;
	unsigned long nr_taken;
	unsigned long nr_dirty = 0;
	unsigned long nr_unqueued_dirty = 0;
	unsigned long nr_writeback = 0;
	isolate_mode_t isolate_mode = 0;
	int file = is_file_lru(lru);
@@ -1450,7 +1483,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
		return 0;

	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
					&nr_dirty, &nr_writeback, false);
				&nr_unqueued_dirty, &nr_writeback, false);

	spin_lock_irq(&zone->lru_lock);

@@ -1505,11 +1538,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	/*
	 * Similarly, if many dirty pages are encountered that are not
	 * currently being written then flag that kswapd should start
	 * writing back pages.
	 * writing back pages and stall to give a chance for flushers
	 * to catch up.
	 */
	if (global_reclaim(sc) && nr_dirty &&
			nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority)))
	if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) {
		congestion_wait(BLK_RW_ASYNC, HZ/10);
		zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
	}

	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
		zone_idx(zone),