Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81227ff5 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: Increase number of GFP masks"

parents 4d9ba243 e1f0edb1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -40,12 +40,12 @@ struct vm_area_struct;
#define ___GFP_DIRECT_RECLAIM	0x400000u
#define ___GFP_WRITE		0x800000u
#define ___GFP_KSWAPD_RECLAIM	0x1000000u
#define ___GFP_CMA		0x4000000u
#ifdef CONFIG_LOCKDEP
#define ___GFP_NOLOCKDEP	0x2000000u
#else
#define ___GFP_NOLOCKDEP	0
#endif
#define ___GFP_CMA		0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */

/*
@@ -211,7 +211,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)

/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_SHIFT 27
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))

/*
+4 −2
Original line number Diff line number Diff line
@@ -40,8 +40,6 @@ enum migratetype {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
	MIGRATE_RECLAIMABLE,
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
@@ -58,6 +56,8 @@ enum migratetype {
	 */
	MIGRATE_CMA,
#endif
	MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
	MIGRATE_ISOLATE,	/* can't allocate from here */
#endif
@@ -77,9 +77,11 @@ extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#  define get_cma_migrate_type() MIGRATE_CMA
#else
#  define is_migrate_cma(migratetype) false
#  define is_migrate_cma_page(_page) false
#  define get_cma_migrate_type() MIGRATE_MOVABLE
#endif

static inline bool is_migrate_movable(int mt)
+73 −35
Original line number Diff line number Diff line
@@ -239,10 +239,10 @@ char * const migratetype_names[MIGRATE_TYPES] = {
	"Unmovable",
	"Movable",
	"Reclaimable",
	"HighAtomic",
#ifdef CONFIG_CMA
	"CMA",
#endif
	"HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
	"Isolate",
#endif
@@ -2329,25 +2329,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
	return page;
}

static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
					int migratetype)
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	struct page *page = 0;

retry:
#ifdef CONFIG_CMA
	if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
	if (IS_ENABLED(CONFIG_CMA))
		if (!zone->cma_alloc)
			page = __rmqueue_cma_fallback(zone, order);
	else
#endif
		page = __rmqueue_smallest(zone, order, migratetype);

	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
		goto retry;

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
	trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
	return page;
}
#else
static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	return NULL;
}
#endif

/*
 * Obtain a specified number of elements from the buddy allocator, all under
@@ -2356,7 +2354,7 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order,
			unsigned long count, struct list_head *list,
			int migratetype, bool cold, int cma)
			int migratetype, bool cold)
{
	int i, alloced = 0;

@@ -2364,8 +2362,13 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
	for (i = 0; i < count; ++i) {
		struct page *page;

		if (cma)
			page = __rmqueue_cma(zone, order, migratetype);
		/*
		 * If migrate type CMA is being requested only try to
		 * satisfy the request with CMA pages to try and increase
		 * CMA utlization.
		 */
		if (is_migrate_cma(migratetype))
			page = __rmqueue_cma(zone, order);
		else
			page = __rmqueue(zone, order, migratetype);

@@ -2406,6 +2409,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
	return alloced;
}

/*
 * Return the pcp list that corresponds to the migrate type if that list isn't
 * empty.
 * If the list is empty return NULL.
 */
static struct list_head *get_populated_pcp_list(struct zone *zone,
			unsigned int order, struct per_cpu_pages *pcp,
			int migratetype, int cold)
{
	struct list_head *list = &pcp->lists[migratetype];

	if (list_empty(list)) {
		pcp->count += rmqueue_bulk(zone, order,
				pcp->batch, list,
				migratetype, cold);

		if (list_empty(list))
			list = NULL;
	}
	return list;
}

#ifdef CONFIG_NUMA
/*
 * Called from the vmstat counter updater to drain pagesets of this
@@ -2793,18 +2818,30 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			bool cold, struct per_cpu_pages *pcp,
			struct list_head *list, gfp_t gfp_flags)
			gfp_t gfp_flags)
{
	struct page *page;
	struct page *page = NULL;
	struct list_head *list = NULL;

	do {
		if (list_empty(list)) {
			pcp->count += rmqueue_bulk(zone, 0,
					pcp->batch, list,
					migratetype, cold,
					gfp_flags & __GFP_CMA);
			if (unlikely(list_empty(list)))
		/* First try to get CMA pages */
		if (migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA) {
			list = get_populated_pcp_list(zone, 0, pcp,
					get_cma_migrate_type(), cold);
		}

		if (list == NULL) {
			/*
			 * Either CMA is not suitable or there are no
			 * free CMA pages.
			 */
			list = get_populated_pcp_list(zone, 0, pcp,
					migratetype, cold);
			if (unlikely(list == NULL) ||
					unlikely(list_empty(list)))
				return NULL;

		}

		if (cold)
@@ -2825,15 +2862,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
			gfp_t gfp_flags, int migratetype)
{
	struct per_cpu_pages *pcp;
	struct list_head *list;
	bool cold = ((gfp_flags & __GFP_COLD) != 0);
	struct page *page;
	unsigned long flags;

	local_irq_save(flags);
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	list = &pcp->lists[migratetype];
	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list,
	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp,
				 gfp_flags);
	if (page) {
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
@@ -2870,18 +2905,21 @@ struct page *rmqueue(struct zone *preferred_zone,

	do {
		page = NULL;

		if (alloc_flags & ALLOC_HARDER) {
			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
			if (page)
				trace_mm_page_alloc_zone_locked(page, order, migratetype);
		}
		if (!page) {
			if (gfp_flags & __GFP_CMA)
				page = __rmqueue_cma(zone, order, migratetype);
			else

		if (!page && migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA)
			page = __rmqueue_cma(zone, order);

		if (!page)
			page = __rmqueue(zone, order, migratetype);
		}
	} while (page && check_new_pages(page, order));

	spin_unlock(&zone->lock);
	if (!page)
		goto failed;