Loading mm/vmscan.c +48 −13 Original line number Diff line number Diff line Loading @@ -757,6 +757,25 @@ static enum page_references page_check_references(struct page *page, return PAGEREF_RECLAIM; } /* Check if a page is dirty or under writeback */ static void page_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) { /* * Anonymous pages are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them */ if (!page_is_file_cache(page)) { *dirty = false; *writeback = false; return; } /* By default assume that the page flags are accurate */ *dirty = PageDirty(page); *writeback = PageWriteback(page); } /* * shrink_page_list() returns the number of reclaimed pages */ Loading Loading @@ -785,6 +804,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, struct page *page; int may_enter_fs; enum page_references references = PAGEREF_RECLAIM_CLEAN; bool dirty, writeback; cond_resched(); Loading Loading @@ -812,6 +832,24 @@ static unsigned long shrink_page_list(struct list_head *page_list, may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); /* * The number of dirty pages determines if a zone is marked * reclaim_congested which affects wait_iff_congested. kswapd * will stall and start writing pages if the tail of the LRU * is all dirty unqueued pages. */ page_check_dirty_writeback(page, &dirty, &writeback); if (dirty || writeback) nr_dirty++; if (dirty && !writeback) nr_unqueued_dirty++; /* Treat this page as congested if underlying BDI is */ mapping = page_mapping(page); if (mapping && bdi_write_congested(mapping->backing_dev_info)) nr_congested++; /* * If a page at the tail of the LRU is under writeback, there * are three cases to consider. Loading Loading @@ -907,9 +945,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (!add_to_swap(page, page_list)) goto activate_locked; may_enter_fs = 1; } /* Adding to swap updated mapping */ mapping = page_mapping(page); } /* * The page is mapped into the page tables of one or more Loading @@ -929,11 +968,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, } if (PageDirty(page)) { nr_dirty++; if (!PageWriteback(page)) nr_unqueued_dirty++; /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but only writeback Loading Loading @@ -964,7 +998,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sc)) { case PAGE_KEEP: nr_congested++; goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; Loading Loading @@ -1407,7 +1440,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_writeback = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); Loading Loading @@ -1450,7 +1483,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_writeback, false); &nr_unqueued_dirty, &nr_writeback, false); spin_lock_irq(&zone->lru_lock); Loading Loading @@ -1505,11 +1538,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, /* * Similarly, if many dirty pages are encountered that are not * currently being written then flag that kswapd should start * writing back pages. * writing back pages and stall to give a chance for flushers * to catch up. */ if (global_reclaim(sc) && nr_dirty && nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority))) if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) { congestion_wait(BLK_RW_ASYNC, HZ/10); zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); } trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, zone_idx(zone), Loading Loading
mm/vmscan.c +48 −13 Original line number Diff line number Diff line Loading @@ -757,6 +757,25 @@ static enum page_references page_check_references(struct page *page, return PAGEREF_RECLAIM; } /* Check if a page is dirty or under writeback */ static void page_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) { /* * Anonymous pages are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them */ if (!page_is_file_cache(page)) { *dirty = false; *writeback = false; return; } /* By default assume that the page flags are accurate */ *dirty = PageDirty(page); *writeback = PageWriteback(page); } /* * shrink_page_list() returns the number of reclaimed pages */ Loading Loading @@ -785,6 +804,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, struct page *page; int may_enter_fs; enum page_references references = PAGEREF_RECLAIM_CLEAN; bool dirty, writeback; cond_resched(); Loading Loading @@ -812,6 +832,24 @@ static unsigned long shrink_page_list(struct list_head *page_list, may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); /* * The number of dirty pages determines if a zone is marked * reclaim_congested which affects wait_iff_congested. kswapd * will stall and start writing pages if the tail of the LRU * is all dirty unqueued pages. */ page_check_dirty_writeback(page, &dirty, &writeback); if (dirty || writeback) nr_dirty++; if (dirty && !writeback) nr_unqueued_dirty++; /* Treat this page as congested if underlying BDI is */ mapping = page_mapping(page); if (mapping && bdi_write_congested(mapping->backing_dev_info)) nr_congested++; /* * If a page at the tail of the LRU is under writeback, there * are three cases to consider. Loading Loading @@ -907,9 +945,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (!add_to_swap(page, page_list)) goto activate_locked; may_enter_fs = 1; } /* Adding to swap updated mapping */ mapping = page_mapping(page); } /* * The page is mapped into the page tables of one or more Loading @@ -929,11 +968,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, } if (PageDirty(page)) { nr_dirty++; if (!PageWriteback(page)) nr_unqueued_dirty++; /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but only writeback Loading Loading @@ -964,7 +998,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sc)) { case PAGE_KEEP: nr_congested++; goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; Loading Loading @@ -1407,7 +1440,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_writeback = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); Loading Loading @@ -1450,7 +1483,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_writeback, false); &nr_unqueued_dirty, &nr_writeback, false); spin_lock_irq(&zone->lru_lock); Loading Loading @@ -1505,11 +1538,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, /* * Similarly, if many dirty pages are encountered that are not * currently being written then flag that kswapd should start * writing back pages. * writing back pages and stall to give a chance for flushers * to catch up. */ if (global_reclaim(sc) && nr_dirty && nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority))) if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) { congestion_wait(BLK_RW_ASYNC, HZ/10); zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); } trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, zone_idx(zone), Loading