Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49689bac authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cma: redirect page allocation to CMA"

parents d5a30f11 d29bd29a
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ struct vm_area_struct;
#define ___GFP_DIRECT_RECLAIM	0x400000u
#define ___GFP_WRITE		0x800000u
#define ___GFP_KSWAPD_RECLAIM	0x1000000u
#define ___GFP_CMA		0x4000000u
#ifdef CONFIG_LOCKDEP
#define ___GFP_NOLOCKDEP	0x2000000u
#else
@@ -58,8 +59,8 @@ struct vm_area_struct;
#define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32	((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* ZONE_MOVABLE allowed */
#define __GFP_CMA	((__force gfp_t)___GFP_CMA)
#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)

/*
 * Page mobility and placement hints
 *
+5 −0
Original line number Diff line number Diff line
@@ -193,7 +193,12 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
					unsigned long vaddr)
{
#ifndef CONFIG_CMA
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
#else
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
						vaddr);
#endif
}

static inline void clear_highpage(struct page *page)
+4 −0
Original line number Diff line number Diff line
@@ -388,6 +388,10 @@ struct zone {
	struct pglist_data	*zone_pgdat;
	struct per_cpu_pageset __percpu *pageset;

#ifdef CONFIG_CMA
	bool			cma_alloc;
#endif

#ifndef CONFIG_SPARSEMEM
	/*
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
+43 −13
Original line number Diff line number Diff line
@@ -2321,14 +2321,30 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,

retry:
	page = __rmqueue_smallest(zone, order, migratetype);
	if (unlikely(!page)) {
		if (migratetype == MIGRATE_MOVABLE)
			page = __rmqueue_cma_fallback(zone, order);

		if (!page && __rmqueue_fallback(zone, order, migratetype))
	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
		goto retry;

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
	return page;
}

static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
					int migratetype)
{
	struct page *page = 0;

retry:
#ifdef CONFIG_CMA
	if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
		page = __rmqueue_cma_fallback(zone, order);
	else
#endif
		page = __rmqueue_smallest(zone, order, migratetype);

	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
		goto retry;

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
	return page;
}
@@ -2340,13 +2356,19 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order,
			unsigned long count, struct list_head *list,
			int migratetype, bool cold)
			int migratetype, bool cold, int cma)
{
	int i, alloced = 0;

	spin_lock(&zone->lock);
	for (i = 0; i < count; ++i) {
		struct page *page = __rmqueue(zone, order, migratetype);
		struct page *page;

		if (cma)
			page = __rmqueue_cma(zone, order, migratetype);
		else
			page = __rmqueue(zone, order, migratetype);

		if (unlikely(page == NULL))
			break;

@@ -2771,7 +2793,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			bool cold, struct per_cpu_pages *pcp,
			struct list_head *list)
			struct list_head *list, gfp_t gfp_flags)
{
	struct page *page;

@@ -2779,7 +2801,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
		if (list_empty(list)) {
			pcp->count += rmqueue_bulk(zone, 0,
					pcp->batch, list,
					migratetype, cold);
					migratetype, cold,
					gfp_flags & __GFP_CMA);
			if (unlikely(list_empty(list)))
				return NULL;
		}
@@ -2810,7 +2833,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
	local_irq_save(flags);
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	list = &pcp->lists[migratetype];
	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list,
				 gfp_flags);
	if (page) {
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
		zone_statistics(preferred_zone, zone);
@@ -2851,8 +2875,12 @@ struct page *rmqueue(struct zone *preferred_zone,
			if (page)
				trace_mm_page_alloc_zone_locked(page, order, migratetype);
		}
		if (!page)
		if (!page) {
			if (gfp_flags & __GFP_CMA)
				page = __rmqueue_cma(zone, order, migratetype);
			else
				page = __rmqueue(zone, order, migratetype);
		}
	} while (page && check_new_pages(page, order));
	spin_unlock(&zone->lock);
	if (!page)
@@ -7638,6 +7666,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
	if (ret)
		return ret;

	cc.zone->cma_alloc = 1;
	/*
	 * In case of -EBUSY, we'd like to know which page causes problem.
	 * So, just fall through. test_pages_isolated() has a tracepoint
@@ -7720,6 +7749,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
	undo_isolate_page_range(pfn_max_align_down(start),
				pfn_max_align_up(end), migratetype);
	cc.zone->cma_alloc = 0;
	return ret;
}