Loading mm/vmscan.c +2 −11 Original line number Diff line number Diff line Loading @@ -87,9 +87,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; /* e.g. boosted watermark reclaim leaves slabs alone */ unsigned int may_shrinkslab:1; /* * Cgroups are not reclaimed below their configured memory.low, * unless we threaten to OOM. If any cgroups are skipped due to Loading Loading @@ -2807,10 +2804,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcg(pgdat, memcg, sc, &lru_pages); node_lru_pages += lru_pages; if (sc->may_shrinkslab) { shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); } shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, Loading Loading @@ -3295,7 +3290,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, .may_shrinkslab = 1, }; /* Loading Loading @@ -3340,7 +3334,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, .may_shrinkslab = 1, }; unsigned long lru_pages; Loading Loading @@ -3387,7 +3380,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, .may_shrinkslab = 1, }; /* Loading Loading @@ -3699,7 +3691,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) */ sc.may_writepage = !laptop_mode && !nr_boost_reclaim; sc.may_swap = !nr_boost_reclaim; sc.may_shrinkslab = !nr_boost_reclaim; /* * Do some background aging of the anon list, to give Loading Loading
mm/vmscan.c +2 −11 Original line number Diff line number Diff line Loading @@ -87,9 +87,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; /* e.g. boosted watermark reclaim leaves slabs alone */ unsigned int may_shrinkslab:1; /* * Cgroups are not reclaimed below their configured memory.low, * unless we threaten to OOM. If any cgroups are skipped due to Loading Loading @@ -2807,10 +2804,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcg(pgdat, memcg, sc, &lru_pages); node_lru_pages += lru_pages; if (sc->may_shrinkslab) { shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); } shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, Loading Loading @@ -3295,7 +3290,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, .may_shrinkslab = 1, }; /* Loading Loading @@ -3340,7 +3334,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, .may_shrinkslab = 1, }; unsigned long lru_pages; Loading Loading @@ -3387,7 +3380,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, .may_shrinkslab = 1, }; /* Loading Loading @@ -3699,7 +3691,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) */ sc.may_writepage = !laptop_mode && !nr_boost_reclaim; sc.may_swap = !nr_boost_reclaim; sc.may_shrinkslab = !nr_boost_reclaim; /* * Do some background aging of the anon list, to give Loading