Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82d4e597 authored by Mark Salyzyn's avatar Mark Salyzyn Committed by Martin Liu
Browse files

Revert "UPSTREAM: mm, page_alloc: spread allocations across zones before introducing fragmentation"



This reverts commit 8ad4b225.

Reason for revert: revert customized code
Bug: 140544941
Test: boot
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
Signed-off-by: default avatarMartin Liu <liumartin@google.com>
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Change-Id: I340fb8af98b3f6e033fd5463b082fbca43abe941
parent 5e86f20f
Loading
Loading
Loading
Loading
+4 −9
Original line number Diff line number Diff line
@@ -494,11 +494,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
#define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
#ifdef CONFIG_ZONE_DMA32
#define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
#else
#define ALLOC_NOFRAGMENT	  0x0
#endif

enum ttu_flags;
struct tlbflush_unmap_batch;
+15 −98
Original line number Diff line number Diff line
@@ -2464,30 +2464,20 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
 * condition simpler.
 */
static __always_inline bool
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
						unsigned int alloc_flags)
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{
	struct free_area *area;
	int current_order;
	int min_order = order;
	struct page *page;
	int fallback_mt;
	bool can_steal;

	/*
	 * Do not steal pages from freelists belonging to other pageblocks
	 * i.e. orders < pageblock_order. If there are no local zones free,
	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
	 */
	if (alloc_flags & ALLOC_NOFRAGMENT)
		min_order = pageblock_order;

	/*
	 * Find the largest available free page in the other list. This roughly
	 * approximates finding the pageblock with the most free pages, which
	 * would be too costly to do exactly.
	 */
	for (current_order = MAX_ORDER - 1; current_order >= min_order;
	for (current_order = MAX_ORDER - 1; current_order >= order;
				--current_order) {
		area = &(zone->free_area[current_order]);
		fallback_mt = find_suitable_fallback(area, current_order,
@@ -2546,16 +2536,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
 * Call me with the zone->lock already held.
 */
static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
						unsigned int alloc_flags)
__rmqueue(struct zone *zone, unsigned int order, int migratetype)
{
	struct page *page;

retry:
	page = __rmqueue_smallest(zone, order, migratetype);

	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
						  alloc_flags))
	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
		goto retry;

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -2587,7 +2575,7 @@ static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order,
			unsigned long count, struct list_head *list,
			int migratetype, unsigned int alloc_flags)
			int migratetype)
{
	int i, alloced = 0;

@@ -2603,7 +2591,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
		if (is_migrate_cma(migratetype))
			page = __rmqueue_cma(zone, order);
		else
			page = __rmqueue(zone, order, migratetype, alloc_flags);
			page = __rmqueue(zone, order, migratetype);

		if (unlikely(page == NULL))
			break;
@@ -2646,14 +2634,14 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 */
static struct list_head *get_populated_pcp_list(struct zone *zone,
			unsigned int order, struct per_cpu_pages *pcp,
			int migratetype, unsigned int alloc_flags)
			int migratetype)
{
	struct list_head *list = &pcp->lists[migratetype];

	if (list_empty(list)) {
		pcp->count += rmqueue_bulk(zone, order,
				pcp->batch, list,
				migratetype, alloc_flags);
				migratetype);

		if (list_empty(list))
			list = NULL;
@@ -3082,7 +3070,6 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)

/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			unsigned int alloc_flags,
			struct per_cpu_pages *pcp,
			gfp_t gfp_flags)
{
@@ -3094,7 +3081,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
		if (migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA) {
			list = get_populated_pcp_list(zone, 0, pcp,
					get_cma_migrate_type(), alloc_flags);
					get_cma_migrate_type());
		}

		if (list == NULL) {
@@ -3103,7 +3090,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			 * free CMA pages.
			 */
			list = get_populated_pcp_list(zone, 0, pcp,
					migratetype, alloc_flags);
					migratetype);
			if (unlikely(list == NULL) ||
					unlikely(list_empty(list)))
				return NULL;
@@ -3120,8 +3107,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
/* Lock and remove page from the per-cpu list */
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
			struct zone *zone, unsigned int order,
			gfp_t gfp_flags, int migratetype,
			unsigned int alloc_flags)
			gfp_t gfp_flags, int migratetype)
{
	struct per_cpu_pages *pcp;
	struct page *page;
@@ -3129,7 +3115,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,

	local_irq_save(flags);
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp,
	page = __rmqueue_pcplist(zone,  migratetype, pcp,
				 gfp_flags);
	if (page) {
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
@@ -3153,7 +3139,7 @@ struct page *rmqueue(struct zone *preferred_zone,

	if (likely(order == 0)) {
		page = rmqueue_pcplist(preferred_zone, zone, order,
				gfp_flags, migratetype, alloc_flags);
				gfp_flags, migratetype);
		goto out;
	}

@@ -3178,7 +3164,7 @@ struct page *rmqueue(struct zone *preferred_zone,
			page = __rmqueue_cma(zone, order);

		if (!page)
			page = __rmqueue(zone, order, migratetype, alloc_flags);
			page = __rmqueue(zone, order, migratetype);
	} while (page && check_new_pages(page, order));

	spin_unlock(&zone->lock);
@@ -3430,40 +3416,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
}
#endif	/* CONFIG_NUMA */

#ifdef CONFIG_ZONE_DMA32
/*
 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
 * fragmentation is subtle. If the preferred zone was HIGHMEM then
 * premature use of a lower zone may cause lowmem pressure problems that
 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
 * probably too small. It only makes sense to spread allocations to avoid
 * fragmentation between the Normal and DMA32 zones.
 */
static inline unsigned int
alloc_flags_nofragment(struct zone *zone)
{
	if (zone_idx(zone) != ZONE_NORMAL)
		return 0;

	/*
	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
	 * on UMA that if Normal is populated then so is DMA32.
	 */
	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
	if (nr_online_nodes > 1 && !populated_zone(--zone))
		return 0;

	return ALLOC_NOFRAGMENT;
}
#else
static inline unsigned int
alloc_flags_nofragment(struct zone *zone)
{
	return 0;
}
#endif

/*
 * get_page_from_freelist goes through the zonelist trying to allocate
 * a page.
@@ -3472,18 +3424,14 @@ static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
						const struct alloc_context *ac)
{
	struct zoneref *z;
	struct zoneref *z = ac->preferred_zoneref;
	struct zone *zone;
	struct pglist_data *last_pgdat_dirty_limit = NULL;
	bool no_fallback;

retry:
	/*
	 * Scan zonelist, looking for a zone with enough free.
	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
	 */
	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
	z = ac->preferred_zoneref;
	for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
								ac->nodemask) {
		struct page *page;
@@ -3522,22 +3470,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
			}
		}

		if (no_fallback && nr_online_nodes > 1 &&
		    zone != ac->preferred_zoneref->zone) {
			int local_nid;

			/*
			 * If moving to a remote node, retry but allow
			 * fragmenting fallbacks. Locality is more important
			 * than fragmentation avoidance.
			 */
			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
			if (zone_to_nid(zone) != local_nid) {
				alloc_flags &= ~ALLOC_NOFRAGMENT;
				goto retry;
			}
		}

		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
		if (!zone_watermark_fast(zone, order, mark,
				       ac_classzone_idx(ac), alloc_flags)) {
@@ -3605,15 +3537,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
		}
	}

	/*
	 * It's possible on a UMA machine to get through all zones that are
	 * fragmented. If avoiding fragmentation, reset and try again.
	 */
	if (no_fallback) {
		alloc_flags &= ~ALLOC_NOFRAGMENT;
		goto retry;
	}

	return NULL;
}

@@ -4623,12 +4546,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,

	finalise_ac(gfp_mask, &ac);

	/*
	 * Forbid the first pass from falling back to types that fragment
	 * memory until all local zones are considered.
	 */
	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone);

	/* First allocation attempt */
	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
	if (likely(page))