Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e86f20f authored by Mark Salyzyn's avatar Mark Salyzyn Committed by Martin Liu
Browse files

Revert "UPSTREAM: mm: use alloc_flags to record if kswapd can wake"



This reverts commit 112ced56.

Reason for revert: revert customized code
Bug: 140544941
Test: boot
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
Signed-off-by: default avatarMartin Liu <liumartin@google.com>
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Change-Id: I400cabb69dc9e24ea5d84f677315dbb3bfd34e45
parent fbc355c1
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -499,7 +499,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#else
#define ALLOC_NOFRAGMENT	  0x0
#endif
#define ALLOC_KSWAPD		0x200 /* allow waking of kswapd */

enum ttu_flags;
struct tlbflush_unmap_batch;
+15 −18
Original line number Diff line number Diff line
@@ -3430,6 +3430,7 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
}
#endif	/* CONFIG_NUMA */

#ifdef CONFIG_ZONE_DMA32
/*
 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
 * fragmentation is subtle. If the preferred zone was HIGHMEM then
@@ -3439,16 +3440,10 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 * fragmentation between the Normal and DMA32 zones.
 */
static inline unsigned int
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
alloc_flags_nofragment(struct zone *zone)
{
	unsigned int alloc_flags = 0;

	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
		alloc_flags |= ALLOC_KSWAPD;

#ifdef CONFIG_ZONE_DMA32
	if (zone_idx(zone) != ZONE_NORMAL)
		goto out;
		return 0;

	/*
	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3457,12 +3452,17 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
	 */
	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
	if (nr_online_nodes > 1 && !populated_zone(--zone))
		goto out;
		return 0;

out:
#endif /* CONFIG_ZONE_DMA32 */
	return alloc_flags;
	return ALLOC_NOFRAGMENT;
}
#else
static inline unsigned int
alloc_flags_nofragment(struct zone *zone)
{
	return 0;
}
#endif

/*
 * get_page_from_freelist goes through the zonelist trying to allocate
@@ -4115,9 +4115,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
	} else if (unlikely(rt_task(current)) && !in_interrupt())
		alloc_flags |= ALLOC_HARDER;

	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
		alloc_flags |= ALLOC_KSWAPD;

#ifdef CONFIG_CMA
	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
		alloc_flags |= ALLOC_CMA;
@@ -4349,7 +4346,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	if (!ac->preferred_zoneref->zone)
		goto nopage;

	if (alloc_flags & ALLOC_KSWAPD)
	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
		wake_all_kswapds(order, gfp_mask, ac);

	/*
@@ -4407,7 +4404,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,

retry:
	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
	if (alloc_flags & ALLOC_KSWAPD)
	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
		wake_all_kswapds(order, gfp_mask, ac);

	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
@@ -4630,7 +4627,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
	 * Forbid the first pass from falling back to types that fragment
	 * memory until all local zones are considered.
	 */
	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone);

	/* First allocation attempt */
	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);