Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca315166 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: don't avoid high-priority reclaim on memcg limit reclaim"

parents 2b0a7867 c383d089
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -1738,9 +1738,6 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
{
	int z;

	if (!pgdat_reclaimable(pgdat))
		return false;

	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
		struct zone *zone = pgdat->node_zones + z;

+42 −69
Original line number Diff line number Diff line
@@ -2199,30 +2199,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
	unsigned long anon_prio, file_prio;
	enum scan_balance scan_balance;
	unsigned long anon, file;
	bool force_scan = false;
	unsigned long ap, fp;
	enum lru_list lru;
	bool some_scanned;
	int pass;

	/*
	 * If the zone or memcg is small, nr[l] can be 0.  This
	 * results in no scanning on this priority and a potential
	 * priority drop.  Global direct reclaim can go to the next
	 * zone and tends to have no problems. Global kswapd is for
	 * zone balancing and it needs to scan a minimum amount. When
	 * reclaiming for a memcg, a priority drop can cause high
	 * latencies, so it's better to scan a minimum amount there as
	 * well.
	 */
	if (current_is_kswapd()) {
		if (!pgdat_reclaimable(pgdat))
			force_scan = true;
		if (!mem_cgroup_online(memcg))
			force_scan = true;
	}
	if (!global_reclaim(sc))
		force_scan = true;

	/* If we have no swap space, do not bother scanning anon pages. */
	if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
@@ -2354,9 +2332,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
	fraction[1] = fp;
	denominator = ap + fp + 1;
out:
	some_scanned = false;
	/* Only use force_scan on second pass. */
	for (pass = 0; !some_scanned && pass < 2; pass++) {
	*lru_pages = 0;
	for_each_evictable_lru(lru) {
		int file = is_file_lru(lru);
@@ -2365,8 +2340,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,

		size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
		scan = size >> sc->priority;

			if (!scan && pass && force_scan)
		/*
		 * If the cgroup's already been deleted, make sure to
		 * scrape out the remaining cache.
		 */
		if (!scan && !mem_cgroup_online(memcg))
			scan = min(size, SWAP_CLUSTER_MAX);

		switch (scan_balance) {
@@ -2396,13 +2374,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,

		*lru_pages += size;
		nr[lru] = scan;

			/*
			 * Skip the second pass and don't force_scan,
			 * if we found something to scan.
			 */
			some_scanned |= !!scan;
		}
	}
}

@@ -2936,8 +2907,10 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)

	for (i = 0; i <= ZONE_NORMAL; i++) {
		zone = &pgdat->node_zones[i];
		if (!managed_zone(zone) ||
		    pgdat_reclaimable_pages(pgdat) == 0)
		if (!managed_zone(zone))
			continue;

		if (!zone_reclaimable_pages(zone))
			continue;

		pfmemalloc_reserve += min_wmark_pages(zone);
@@ -3384,7 +3357,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
		 * If we're getting trouble reclaiming, start doing writepage
		 * even in laptop mode.
		 */
		if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
		if (sc.priority < DEF_PRIORITY - 2)
			sc.may_writepage = 1;

		/* Call soft limit reclaim before calling shrink_node. */