Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 93ea9964 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm, page_alloc: remove field from alloc_context



The classzone_idx can be inferred from preferred_zoneref so remove the
unnecessary field and save stack space.

Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c33d6c06
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1602,7 +1602,7 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,

		status = compact_zone_order(zone, order, gfp_mask, mode,
				&zone_contended, alloc_flags,
				ac->classzone_idx);
				ac_classzone_idx(ac));
		rc = max(status, rc);
		/*
		 * It takes at least one zone that wasn't lock contended
@@ -1612,7 +1612,7 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,

		/* If a normal allocation would succeed, stop compacting */
		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
					ac->classzone_idx, alloc_flags)) {
					ac_classzone_idx(ac), alloc_flags)) {
			/*
			 * We think the allocation will succeed in this zone,
			 * but it is not certain, hence the false. The caller
+2 −1
Original line number Diff line number Diff line
@@ -103,12 +103,13 @@ struct alloc_context {
	struct zonelist *zonelist;
	nodemask_t *nodemask;
	struct zoneref *preferred_zoneref;
	int classzone_idx;
	int migratetype;
	enum zone_type high_zoneidx;
	bool spread_dirty_pages;
};

#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
+3 −5
Original line number Diff line number Diff line
@@ -2771,7 +2771,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,

		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
		if (!zone_watermark_fast(zone, order, mark,
				       ac->classzone_idx, alloc_flags)) {
				       ac_classzone_idx(ac), alloc_flags)) {
			int ret;

			/* Checked here to keep the fast path fast */
@@ -2794,7 +2794,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
			default:
				/* did we reclaim enough */
				if (zone_watermark_ok(zone, order, mark,
						ac->classzone_idx, alloc_flags))
						ac_classzone_idx(ac), alloc_flags))
					goto try_this_zone;

				continue;
@@ -3114,7 +3114,7 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)

	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
						ac->high_zoneidx, ac->nodemask)
		wakeup_kswapd(zone, order, zonelist_zone_idx(ac->preferred_zoneref));
		wakeup_kswapd(zone, order, ac_classzone_idx(ac));
}

static inline unsigned int
@@ -3422,8 +3422,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
		goto no_zone;
	}

	ac.classzone_idx = zonelist_zone_idx(ac.preferred_zoneref);

	/* First allocation attempt */
	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
	if (likely(page))