Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6569be0 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: add cma pcp list"

parents 654fadad 0caf6be3
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -39,8 +39,9 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT		0x100000u
#define ___GFP_DIRECT_RECLAIM	0x200000u
#define ___GFP_KSWAPD_RECLAIM	0x400000u
#define ___GFP_CMA		0x800000u
#ifdef CONFIG_LOCKDEP
#define ___GFP_NOLOCKDEP	0x800000u
#define ___GFP_NOLOCKDEP	0x1000000u
#else
#define ___GFP_NOLOCKDEP	0
#endif
@@ -57,6 +58,7 @@ struct vm_area_struct;
#define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32	((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* ZONE_MOVABLE allowed */
#define __GFP_CMA	((__force gfp_t)___GFP_CMA)
#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)

/**
@@ -217,7 +219,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)

/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))

/**
+5 −0
Original line number Diff line number Diff line
@@ -188,7 +188,12 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
					unsigned long vaddr)
{
#ifndef CONFIG_CMA
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
#else
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
						vaddr);
#endif
}

static inline void clear_highpage(struct page *page)
+8 −2
Original line number Diff line number Diff line
@@ -40,8 +40,6 @@ enum migratetype {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
	MIGRATE_RECLAIMABLE,
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
@@ -58,6 +56,8 @@ enum migratetype {
	 */
	MIGRATE_CMA,
#endif
	MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
	MIGRATE_ISOLATE,	/* can't allocate from here */
#endif
@@ -70,9 +70,11 @@ extern char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#  define get_cma_migrate_type() MIGRATE_CMA
#else
#  define is_migrate_cma(migratetype) false
#  define is_migrate_cma_page(_page) false
#  define get_cma_migrate_type() MIGRATE_MOVABLE
#endif

static inline bool is_migrate_movable(int mt)
@@ -381,6 +383,10 @@ struct zone {
	struct pglist_data	*zone_pgdat;
	struct per_cpu_pageset __percpu *pageset;

#ifdef CONFIG_CMA
	bool			cma_alloc;
#endif

#ifndef CONFIG_SPARSEMEM
	/*
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
+85 −17
Original line number Diff line number Diff line
@@ -242,10 +242,10 @@ char * const migratetype_names[MIGRATE_TYPES] = {
	"Unmovable",
	"Movable",
	"Reclaimable",
	"HighAtomic",
#ifdef CONFIG_CMA
	"CMA",
#endif
	"HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
	"Isolate",
#endif
@@ -2455,18 +2455,32 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype)

retry:
	page = __rmqueue_smallest(zone, order, migratetype);
	if (unlikely(!page)) {
		if (migratetype == MIGRATE_MOVABLE)
			page = __rmqueue_cma_fallback(zone, order);

		if (!page && __rmqueue_fallback(zone, order, migratetype))
	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
		goto retry;
	}

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
	return page;
}

#ifdef CONFIG_CMA
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	struct page *page = 0;

	if (IS_ENABLED(CONFIG_CMA))
		if (!zone->cma_alloc)
			page = __rmqueue_cma_fallback(zone, order);
	trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
	return page;
}
#else
static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	return NULL;
}
#endif

/*
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
@@ -2480,7 +2494,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,

	spin_lock(&zone->lock);
	for (i = 0; i < count; ++i) {
		struct page *page = __rmqueue(zone, order, migratetype);
		struct page *page;

		/*
		 * If migrate type CMA is being requested only try to
		 * satisfy the request with CMA pages to try and increase
		 * CMA utlization.
		 */
		if (is_migrate_cma(migratetype))
			page = __rmqueue_cma(zone, order);
		else
			page = __rmqueue(zone, order, migratetype);

		if (unlikely(page == NULL))
			break;

@@ -2515,6 +2540,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
	return alloced;
}

/*
 * Return the pcp list that corresponds to the migrate type if that list isn't
 * empty.
 * If the list is empty return NULL.
 */
static struct list_head *get_populated_pcp_list(struct zone *zone,
			unsigned int order, struct per_cpu_pages *pcp,
			int migratetype)
{
	struct list_head *list = &pcp->lists[migratetype];

	if (list_empty(list)) {
		pcp->count += rmqueue_bulk(zone, order,
				pcp->batch, list,
				migratetype);

		if (list_empty(list))
			list = NULL;
	}
	return list;
}

#ifdef CONFIG_NUMA
/*
 * Called from the vmstat counter updater to drain pagesets of this
@@ -2937,17 +2984,30 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			struct per_cpu_pages *pcp,
			struct list_head *list)
			gfp_t gfp_flags)
{
	struct page *page;
	struct page *page = NULL;
	struct list_head *list = NULL;

	do {
		if (list_empty(list)) {
			pcp->count += rmqueue_bulk(zone, 0,
					pcp->batch, list,
		/* First try to get CMA pages */
		if (migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA) {
			list = get_populated_pcp_list(zone, 0, pcp,
					get_cma_migrate_type());
		}

		if (list == NULL) {
			/*
			 * Either CMA is not suitable or there are no
			 * free CMA pages.
			 */
			list = get_populated_pcp_list(zone, 0, pcp,
					migratetype);
			if (unlikely(list_empty(list)))
			if (unlikely(list == NULL) ||
					unlikely(list_empty(list)))
				return NULL;

		}

		page = list_first_entry(list, struct page, lru);
@@ -2964,14 +3024,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
			gfp_t gfp_flags, int migratetype)
{
	struct per_cpu_pages *pcp;
	struct list_head *list;
	struct page *page;
	unsigned long flags;

	local_irq_save(flags);
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	list = &pcp->lists[migratetype];
	page = __rmqueue_pcplist(zone,  migratetype, pcp, list);
	page = __rmqueue_pcplist(zone,  migratetype, pcp,
				 gfp_flags);
	if (page) {
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
		zone_statistics(preferred_zone, zone);
@@ -3007,14 +3066,21 @@ struct page *rmqueue(struct zone *preferred_zone,

	do {
		page = NULL;

		if (alloc_flags & ALLOC_HARDER) {
			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
			if (page)
				trace_mm_page_alloc_zone_locked(page, order, migratetype);
		}

		if (!page && migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA)
			page = __rmqueue_cma(zone, order);

		if (!page)
			page = __rmqueue(zone, order, migratetype);
	} while (page && check_new_pages(page, order));

	spin_unlock(&zone->lock);
	if (!page)
		goto failed;
@@ -7951,6 +8017,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
	if (ret)
		return ret;

	cc.zone->cma_alloc = 1;
	/*
	 * In case of -EBUSY, we'd like to know which page causes problem.
	 * So, just fall through. test_pages_isolated() has a tracepoint
@@ -8033,6 +8100,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
	undo_isolate_page_range(pfn_max_align_down(start),
				pfn_max_align_up(end), migratetype);
	cc.zone->cma_alloc = 0;
	return ret;
}