Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6a68df4 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "staging: ion: shrink page-pool by page unit"

parents c7c3623d 4ac998d8
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -128,7 +128,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
				int nr_to_scan)
{
	int i;
	int freed = 0;
	bool high;

	if (current_is_kswapd())
@@ -136,7 +136,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
	else
		high = !!(gfp_mask & __GFP_HIGHMEM);

	for (i = 0; i < nr_to_scan; i++) {
	if (nr_to_scan == 0)
		return ion_page_pool_total(pool, high);

	while (freed < nr_to_scan) {
		struct page *page;

		mutex_lock(&pool->mutex);
@@ -150,9 +153,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
		}
		mutex_unlock(&pool->mutex);
		ion_page_pool_free_pages(pool, page);
		freed += (1 << pool->order);
	}

	return ion_page_pool_total(pool, high);
	return freed;
}

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+16 −3
Original line number Diff line number Diff line
@@ -365,16 +365,29 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
{
	struct ion_system_heap *sys_heap;
	int nr_total = 0;
	int i;
	int i, nr_freed = 0;
	int only_scan = 0;

	sys_heap = container_of(heap, struct ion_system_heap, heap);

	if (!nr_to_scan)
		only_scan = 1;

	for (i = 0; i < num_orders; i++) {
		struct ion_page_pool *pool = sys_heap->uncached_pools[i];
		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_total += nr_freed;

		pool = sys_heap->cached_pools[i];
		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_total += nr_freed;

		if (!only_scan) {
			nr_to_scan -= nr_freed;
			/* shrink completed */
			if (nr_to_scan <= 0)
				break;
		}
	}

	return nr_total;
+45 −37
Original line number Diff line number Diff line
@@ -995,11 +995,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
static int fallbacks[MIGRATE_TYPES][4] = {
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
#ifdef CONFIG_CMA
	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
#else
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
#endif
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
#ifdef CONFIG_MEMORY_ISOLATION
@@ -1012,6 +1010,17 @@ int *get_migratetype_fallbacks(int mtype)
	return fallbacks[mtype];
}

#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
					unsigned int order)
{
	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}
#else
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
					unsigned int order) { return NULL; }
#endif

/*
 * Move the free pages in a range to the free lists of the requested type.
 * Note that start_page and end_pages are not aligned on a pageblock
@@ -1094,39 +1103,34 @@ static void change_pageblock_range(struct page *pageblock_page,
}

/*
 * If breaking a large block of pages, move all free pages to the preferred
 * allocation list. If falling back for a reclaimable kernel allocation, be
 * more aggressive about taking ownership of free pages.
 * When we are falling back to another migratetype during allocation, try to
 * steal extra free pages from the same pageblocks to satisfy further
 * allocations, instead of polluting multiple pageblocks.
 *
 * On the other hand, never change migration type of MIGRATE_CMA pageblocks
 * nor move CMA pages to different free lists. We don't want unmovable pages
 * to be allocated from MIGRATE_CMA areas.
 * If we are stealing a relatively large buddy page, it is likely there will
 * be more free pages in the pageblock, so try to steal them all. For
 * reclaimable and unmovable allocations, we steal regardless of page size,
 * as fragmentation caused by those allocations polluting movable pageblocks
 * is worse than movable allocations stealing from unmovable and reclaimable
 * pageblocks.
 *
 * Returns the allocation migratetype if free pages were stolen, or the
 * fallback migratetype if it was decided not to steal.
 * If we claim more than half of the pageblock, change pageblock's migratetype
 * as well.
 */
static int try_to_steal_freepages(struct zone *zone, struct page *page,
static void try_to_steal_freepages(struct zone *zone, struct page *page,
				  int start_type, int fallback_type)
{
	int current_order = page_order(page);

	/*
	 * When borrowing from MIGRATE_CMA, we need to release the excess
	 * buddy pages to CMA itself. We also ensure the freepage_migratetype
	 * is set to CMA so it is returned to the correct freelist in case
	 * the page ends up being not actually allocated from the pcp lists.
	 */
	if (is_migrate_cma(fallback_type))
		return fallback_type;

	/* Take ownership for orders >= pageblock_order */
	if (current_order >= pageblock_order) {
		change_pageblock_range(page, current_order, start_type);
		return start_type;
		return;
	}

	if (current_order >= pageblock_order / 2 ||
	    start_type == MIGRATE_RECLAIMABLE ||
	    start_type == MIGRATE_UNMOVABLE ||
	    page_group_by_mobility_disabled) {
		int pages;

@@ -1136,11 +1140,7 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
		if (pages >= (1 << (pageblock_order-1)) ||
				page_group_by_mobility_disabled)
			set_pageblock_migratetype(page, start_type);

		return start_type;
	}

	return fallback_type;
}

/* Remove an element from the buddy allocator from the fallback list */
@@ -1150,14 +1150,15 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
	struct free_area *area;
	unsigned int current_order;
	struct page *page;
	int migratetype, new_type, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1;
				current_order >= order && current_order <= MAX_ORDER-1;
				--current_order) {
		int i;
		for (i = 0;; i++) {
			migratetype = fallbacks[start_migratetype][i];
			int migratetype = fallbacks[start_migratetype][i];
			int buddy_type = start_migratetype;

			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
@@ -1171,8 +1172,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
					struct page, lru);
			area->nr_free--;

			new_type = try_to_steal_freepages(zone, page,
							  start_migratetype,
			try_to_steal_freepages(zone, page, start_migratetype,
								migratetype);

			/* Remove the page from the freelists */
@@ -1180,13 +1180,17 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
			rmv_page_order(page);

			expand(zone, page, order, current_order, area,
			       new_type);
			/* The freepage_migratetype may differ from pageblock's
					buddy_type);

			/*
			 * The freepage_migratetype may differ from pageblock's
			 * migratetype depending on the decisions in
			 * try_to_steal_freepages. This is OK as long as it does
			 * not differ for MIGRATE_CMA type.
			 * try_to_steal_freepages(). This is OK as long as it
			 * does not differ for MIGRATE_CMA pageblocks. For CMA
			 * we need to make sure unallocated pages flushed from
			 * pcp lists are returned to the correct freelist.
			 */
			set_freepage_migratetype(page, new_type);
			set_freepage_migratetype(page, buddy_type);

			trace_mm_page_alloc_extfrag(page, order, current_order,
				start_migratetype, migratetype);
@@ -1211,6 +1215,10 @@ retry_reserve:
	page = __rmqueue_smallest(zone, order, migratetype);

	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
		if (migratetype == MIGRATE_MOVABLE)
			page = __rmqueue_cma_fallback(zone, order);

		if (!page)
			page = __rmqueue_fallback(zone, order, migratetype);

		/*