Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c35e9dee authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm/page_alloc: factor out fallback freepage checking"

parents 8443b263 c15a64aa
Loading
Loading
Loading
Loading
+93 −54
Original line number Diff line number Diff line
@@ -1152,14 +1152,40 @@ static void change_pageblock_range(struct page *pageblock_page,
 * as fragmentation caused by those allocations polluting movable pageblocks
 * is worse than movable allocations stealing from unmovable and reclaimable
 * pageblocks.
 *
 * If we claim more than half of the pageblock, change pageblock's migratetype
 * as well.
 */
static void try_to_steal_freepages(struct zone *zone, struct page *page,
				  int start_type, int fallback_type)
static bool can_steal_fallback(unsigned int order, int start_mt)
{
	/*
	 * Leaving this order check is intended, although there is
	 * relaxed order check in next check. The reason is that
	 * we can actually steal whole pageblock if this condition met,
	 * but, below check doesn't guarantee it and that is just heuristic
	 * so could be changed anytime.
	 */
	if (order >= pageblock_order)
		return true;

	if (order >= pageblock_order / 2 ||
		start_mt == MIGRATE_RECLAIMABLE ||
		start_mt == MIGRATE_UNMOVABLE ||
		page_group_by_mobility_disabled)
		return true;

	return false;
}

/*
 * This function implements actual steal behaviour. If order is large enough,
 * we can steal whole pageblock. If not, we first move freepages in this
 * pageblock and check whether half of pages are moved or not. If half of
 * pages are moved, we can change migratetype of pageblock and permanently
 * use it's pages as requested migratetype in the future.
 */
static void steal_suitable_fallback(struct zone *zone, struct page *page,
							  int start_type)
{
	int current_order = page_order(page);
	int pages;

	/* Take ownership for orders >= pageblock_order */
	if (current_order >= pageblock_order) {
@@ -1167,20 +1193,40 @@ static void try_to_steal_freepages(struct zone *zone, struct page *page,
		return;
	}

	if (current_order >= pageblock_order / 2 ||
	    start_type == MIGRATE_RECLAIMABLE ||
	    start_type == MIGRATE_UNMOVABLE ||
	    page_group_by_mobility_disabled) {
		int pages;

		pages = move_freepages_block(zone, page,
				start_type, 0);
	pages = move_freepages_block(zone, page, start_type, 0);

	/* Claim the whole block if over half of it is free */
	if (pages >= (1 << (pageblock_order-1)) ||
			page_group_by_mobility_disabled)
		set_pageblock_migratetype(page, start_type);
}

/* Check whether there is a suitable fallback freepage with requested order. */
static int find_suitable_fallback(struct free_area *area, unsigned int order,
					int migratetype, bool *can_steal)
{
	int i;
	int fallback_mt;

	if (area->nr_free == 0)
		return -1;

	*can_steal = false;
	for (i = 0;; i++) {
		fallback_mt = fallbacks[migratetype][i];
		if (fallback_mt == MIGRATE_RESERVE)
			break;

		if (list_empty(&area->free_list[fallback_mt]))
			continue;

		if (can_steal_fallback(order, migratetype))
			*can_steal = true;

		return fallback_mt;
	}

	return -1;
}

/* Remove an element from the buddy allocator from the fallback list */
@@ -1190,40 +1236,34 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
	struct free_area *area;
	unsigned int current_order;
	struct page *page;
	int fallback_mt;
	bool can_steal;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1;
				current_order >= order && current_order <= MAX_ORDER-1;
				--current_order) {
		int i;
		for (i = 0;; i++) {
			int migratetype = fallbacks[start_migratetype][i];
			int buddy_type = start_migratetype;

			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				break;

		area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
		fallback_mt = find_suitable_fallback(area, current_order,
				start_migratetype, &can_steal);
		if (fallback_mt == -1)
			continue;

			page = list_entry(area->free_list[migratetype].next,
		page = list_entry(area->free_list[fallback_mt].next,
						struct page, lru);
			area->nr_free--;
			if (is_migrate_cma(migratetype))
				area->nr_free_cma--;

			try_to_steal_freepages(zone, page, start_migratetype,
								migratetype);
		if (can_steal)
			steal_suitable_fallback(zone, page, start_migratetype);

		/* Remove the page from the freelists */
		area->nr_free--;

                if (is_migrate_cma(fallback_mt))
                        area->nr_free_cma--;
		list_del(&page->lru);
		rmv_page_order(page);

		expand(zone, page, order, current_order, area,
					buddy_type);

					start_migratetype);
		/*
		 * The freepage_migratetype may differ from pageblock's
		 * migratetype depending on the decisions in
@@ -1232,14 +1272,13 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
		 * we need to make sure unallocated pages flushed from
		 * pcp lists are returned to the correct freelist.
		 */
			set_freepage_migratetype(page, buddy_type);
		set_freepage_migratetype(page, start_migratetype);

		trace_mm_page_alloc_extfrag(page, order, current_order,
				start_migratetype, migratetype);
			start_migratetype, fallback_mt);

		return page;
	}
	}

	return NULL;
}