Loading mm/vmscan.c +49 −19 Original line number Diff line number Diff line Loading @@ -1343,31 +1343,32 @@ int isolate_lru_page(struct page *page) return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) static int __too_many_isolated(struct zone *zone, int file, struct scan_control *sc, int safe) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (file) { if (safe) { inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE); isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } } else { if (safe) { inactive = zone_page_state_snapshot(zone, NR_INACTIVE_ANON); isolated = zone_page_state_snapshot(zone, NR_ISOLATED_ANON); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they Loading @@ -1380,6 +1381,32 @@ static int too_many_isolated(struct zone *zone, int file, return isolated > inactive; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc, int safe) { if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (unlikely(__too_many_isolated(zone, file, sc, 0))) { if (safe) return __too_many_isolated(zone, file, sc, safe); else return 1; } return 0; } static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { Loading Loading @@ -1453,15 +1480,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, unsigned long nr_immediate = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); int safe = 0; struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { while (unlikely(too_many_isolated(zone, file, sc, safe))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; safe = 1; } lru_add_drain(); Loading Loading
mm/vmscan.c +49 −19 Original line number Diff line number Diff line Loading @@ -1343,31 +1343,32 @@ int isolate_lru_page(struct page *page) return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) static int __too_many_isolated(struct zone *zone, int file, struct scan_control *sc, int safe) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (file) { if (safe) { inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE); isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } } else { if (safe) { inactive = zone_page_state_snapshot(zone, NR_INACTIVE_ANON); isolated = zone_page_state_snapshot(zone, NR_ISOLATED_ANON); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they Loading @@ -1380,6 +1381,32 @@ static int too_many_isolated(struct zone *zone, int file, return isolated > inactive; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc, int safe) { if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (unlikely(__too_many_isolated(zone, file, sc, 0))) { if (safe) return __too_many_isolated(zone, file, sc, safe); else return 1; } return 0; } static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { Loading Loading @@ -1453,15 +1480,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, unsigned long nr_immediate = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); int safe = 0; struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { while (unlikely(too_many_isolated(zone, file, sc, safe))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; safe = 1; } lru_add_drain(); Loading