Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e51aa630 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres
Browse files

mm: add cma pcp list



Add a cma pcp list in order to increase cma memory utilization.

Increased cma memory utilization will improve overall memory
utilization because free cma pages are ignored when memory reclaim
is done with gfp mask GFP_KERNEL.

Since most memory reclaim is done by kswapd, which uses a gfp mask
of GFP_KERNEL, by increasing cma memory utilization we are therefore
ensuring that less aggressive memory reclaim takes place.

Increased cma memory utilization will improve performance,
for example it will increase app concurrency.

Change-Id: I809589a25c6abca51f1c963f118adfc78e955cf9
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
[vinmenon@codeaurora.org: fix !CONFIG_CMA compile time issues]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
[swatsrid@codeaurora.org: Fix merge conflicts]
Signed-off-by: default avatarSwathi Sridhar <swatsrid@codeaurora.org>
[isaacm@codeaurora.org: Introduce CONFIG_CMA_PCP_LISTS]
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent 095c51f2
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ enum migratetype {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
	MIGRATE_RECLAIMABLE,
#ifndef CONFIG_CMA_PCP_LISTS
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
@@ -60,6 +61,11 @@ enum migratetype {
	 */
	MIGRATE_CMA,
#endif
#else
	MIGRATE_CMA,
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
	MIGRATE_ISOLATE,	/* can't allocate from here */
#endif
@@ -72,9 +78,15 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#ifdef CONFIG_CMA_PCP_LISTS
#  define get_cma_migrate_type() MIGRATE_CMA
#else
#  define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#else
#  define is_migrate_cma(migratetype) false
#  define is_migrate_cma_page(_page) false
#  define get_cma_migrate_type() MIGRATE_MOVABLE
#endif

static inline bool is_migrate_movable(int mt)
+12 −0
Original line number Diff line number Diff line
@@ -556,6 +556,18 @@ config CMA_DIRECT_UTILIZATION
	  CMA pages, so that CMA pages can be used directly for allocations,
	  instead of as a fallback, thus, improving CMA utilization.

config CMA_PCP_LISTS
	bool "Create PCP lists for CMA memory"
	depends on CMA && CMA_DIRECT_UTILIZATION && QGKI
	help
	  Improve memory utilization by creating PCP lists that contain CMA
	  pages to satisfy order-0 allocations when appropriate. Increased
	  CMA usage results in better memory utilization, as the system will
	  can use CMA pages to satisfy movable allocations. This results in
	  more efficient reclaim when trying to satisfy non-movable
	  allocations, as the amount of non-CMA memory to be reclaimed should
	  be smaller.

config MEM_SOFT_DIRTY
	bool "Track memory changes"
	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
+75 −38
Original line number Diff line number Diff line
@@ -294,10 +294,15 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
	"Unmovable",
	"Movable",
	"Reclaimable",
#ifndef CONFIG_CMA_PCP_LISTS
	"HighAtomic",
#ifdef CONFIG_CMA
	"CMA",
#endif
#else
	"CMA",
	"HighAtomic",
#endif
#ifdef CONFIG_MEMORY_ISOLATION
	"Isolate",
#endif
@@ -2753,27 +2758,23 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
	return page;
}

static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
					int migratetype,
					unsigned int alloc_flags)
#ifdef CONFIG_CMA_DIRECT_UTILIZATION
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	struct page *page = 0;

retry:
#ifdef CONFIG_CMA_DIRECT_UTILIZATION
	if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
	if (!zone->cma_alloc)
		page = __rmqueue_cma_fallback(zone, order);
	else
#endif
		page = __rmqueue_smallest(zone, order, migratetype);

	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
						  alloc_flags))
		goto retry;

	trace_mm_page_alloc_zone_locked(page, order, migratetype);
	trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
	return page;
}
#else
static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
	return NULL;
}
#endif

/*
 * Obtain a specified number of elements from the buddy allocator, all under
@@ -2782,7 +2783,7 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order,
			unsigned long count, struct list_head *list,
			int migratetype, unsigned int alloc_flags, int cma)
			int migratetype, unsigned int alloc_flags)
{
	int i, alloced = 0;

@@ -2790,9 +2791,13 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
	for (i = 0; i < count; ++i) {
		struct page *page;

		if (cma)
			page = __rmqueue_cma(zone, order, migratetype,
					     alloc_flags);
		/*
		 * If migrate type CMA is being requested only try to
		 * satisfy the request with CMA pages to try and increase
		 * CMA utlization.
		 */
		if (is_migrate_cma(migratetype))
			page = __rmqueue_cma(zone, order);
		else
			page = __rmqueue(zone, order, migratetype, alloc_flags);

@@ -2830,6 +2835,27 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
	return alloced;
}

/*
 * Return the pcp list that corresponds to the migrate type if that list isn't
 * empty.
 * If the list is empty return NULL.
 */
static struct list_head *get_populated_pcp_list(struct zone *zone,
			unsigned int order, struct per_cpu_pages *pcp,
			int migratetype, unsigned int alloc_flags)
{
	struct list_head *list = &pcp->lists[migratetype];

	if (list_empty(list)) {
		pcp->count += rmqueue_bulk(zone, order,
				pcp->batch, list, migratetype, alloc_flags);

		if (list_empty(list))
			list = NULL;
	}
	return list;
}

#ifdef CONFIG_NUMA
/*
 * Called from the vmstat counter updater to drain pagesets of this
@@ -3258,19 +3284,30 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
			unsigned int alloc_flags,
			struct per_cpu_pages *pcp,
			struct list_head *list, gfp_t gfp_flags)
			struct per_cpu_pages *pcp, gfp_t gfp_flags)
{
	struct page *page;
	struct page *page = NULL;
	struct list_head *list = NULL;

	do {
		if (list_empty(list)) {
			pcp->count += rmqueue_bulk(zone, 0,
					pcp->batch, list,
					migratetype, alloc_flags,
					gfp_flags & __GFP_CMA);
			if (unlikely(list_empty(list)))
		/* First try to get CMA pages */
		if (migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA) {
			list = get_populated_pcp_list(zone, 0, pcp,
					get_cma_migrate_type(), alloc_flags);
		}

		if (list == NULL) {
			/*
			 * Either CMA is not suitable or there are no
			 * free CMA pages.
			 */
			list = get_populated_pcp_list(zone, 0, pcp,
					migratetype, alloc_flags);
			if (unlikely(list == NULL) ||
					unlikely(list_empty(list)))
				return NULL;

		}

		page = list_first_entry(list, struct page, lru);
@@ -3287,14 +3324,12 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
			int migratetype, unsigned int alloc_flags)
{
	struct per_cpu_pages *pcp;
	struct list_head *list;
	struct page *page;
	unsigned long flags;

	local_irq_save(flags);
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	list = &pcp->lists[migratetype];
	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list,
	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp,
				 gfp_flags);
	if (page) {
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
@@ -3331,20 +3366,22 @@ struct page *rmqueue(struct zone *preferred_zone,

	do {
		page = NULL;

		if (alloc_flags & ALLOC_HARDER) {
			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
			if (page)
				trace_mm_page_alloc_zone_locked(page, order, migratetype);
		}
		if (!page) {
			if (gfp_flags & __GFP_CMA)
				page = __rmqueue_cma(zone, order, migratetype,
						     alloc_flags);
			else
				page = __rmqueue(zone, order, migratetype,
						 alloc_flags);
		}

		if (!page && migratetype == MIGRATE_MOVABLE &&
				gfp_flags & __GFP_CMA)
			page = __rmqueue_cma(zone, order);

		if (!page)
			page = __rmqueue(zone, order, migratetype, alloc_flags);

	} while (page && check_new_pages(page, order));

	spin_unlock(&zone->lock);
	if (!page)
		goto failed;