Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a1c84b4 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm: remove reclaim and compaction retry approximations

If per-zone LRU accounting is available then there is no point
approximating whether reclaim and compaction should retry based on pgdat
statistics.  This is effectively a revert of "mm, vmstat: remove zone
and node double accounting by approximating retries" with the difference
that inactive/active stats are still available.  This preserves the
history of why the approximation was retried and why it had to be
reverted to handle OOM kills on 32-bit systems.

Link: http://lkml.kernel.org/r/1469110261-7365-4-git-send-email-mgorman@techsingularity.net


Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bb4cc2be
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -116,6 +116,7 @@ enum zone_stat_item {
	NR_ZONE_INACTIVE_FILE,
	NR_ZONE_ACTIVE_FILE,
	NR_ZONE_UNEVICTABLE,
	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
+1 −0
Original line number Diff line number Diff line
@@ -307,6 +307,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma);

/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
					gfp_t gfp_mask, nodemask_t *mask);
+1 −19
Original line number Diff line number Diff line
@@ -1438,11 +1438,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
{
	struct zone *zone;
	struct zoneref *z;
	pg_data_t *last_pgdat = NULL;

	/* Do not retry compaction for zone-constrained allocations */
	if (ac->high_zoneidx < ZONE_NORMAL)
		return false;

	/*
	 * Make sure at least one zone would pass __compaction_suitable if we continue
@@ -1453,27 +1448,14 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
		unsigned long available;
		enum compact_result compact_result;

		if (last_pgdat == zone->zone_pgdat)
			continue;

		/*
		 * This over-estimates the number of pages available for
		 * reclaim/compaction but walking the LRU would take too
		 * long. The consequences are that compaction may retry
		 * longer than it should for a zone-constrained allocation
		 * request.
		 */
		last_pgdat = zone->zone_pgdat;
		available = pgdat_reclaimable_pages(zone->zone_pgdat) / order;

		/*
		 * Do not consider all the reclaimable memory because we do not
		 * want to trash just for a single high order allocation which
		 * is even not guaranteed to appear even if __compaction_suitable
		 * is happy about the watermark check.
		 */
		available = zone_reclaimable_pages(zone) / order;
		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
		available = min(zone->managed_pages, available);
		compact_result = __compaction_suitable(zone, order, alloc_flags,
				ac_classzone_idx(ac), available);
		if (compact_result != COMPACT_SKIPPED &&
+2 −0
Original line number Diff line number Diff line
@@ -513,7 +513,9 @@ int migrate_page_move_mapping(struct address_space *mapping,
		}
		if (dirty && mapping_cap_account_dirty(mapping)) {
			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
			__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
		}
	}
	local_irq_enable();
+5 −0
Original line number Diff line number Diff line
@@ -2462,6 +2462,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)

		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
		__inc_node_page_state(page, NR_FILE_DIRTY);
		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		__inc_node_page_state(page, NR_DIRTIED);
		__inc_wb_stat(wb, WB_RECLAIMABLE);
		__inc_wb_stat(wb, WB_DIRTIED);
@@ -2483,6 +2484,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
	if (mapping_cap_account_dirty(mapping)) {
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
		dec_node_page_state(page, NR_FILE_DIRTY);
		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		dec_wb_stat(wb, WB_RECLAIMABLE);
		task_io_account_cancelled_write(PAGE_SIZE);
	}
@@ -2739,6 +2741,7 @@ int clear_page_dirty_for_io(struct page *page)
		if (TestClearPageDirty(page)) {
			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
			dec_node_page_state(page, NR_FILE_DIRTY);
			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
			dec_wb_stat(wb, WB_RECLAIMABLE);
			ret = 1;
		}
@@ -2785,6 +2788,7 @@ int test_clear_page_writeback(struct page *page)
	if (ret) {
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
		dec_node_page_state(page, NR_WRITEBACK);
		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		inc_node_page_state(page, NR_WRITTEN);
	}
	unlock_page_memcg(page);
@@ -2839,6 +2843,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
	if (!ret) {
		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
		inc_node_page_state(page, NR_WRITEBACK);
		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
	}
	unlock_page_memcg(page);
	return ret;
Loading