Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8dad76b5 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm, compaction: round-robin the order while searching the free lists for a target"

parents 0b07cbdd f50303ed
Loading
Loading
Loading
Loading
+48 −58
Original line number Diff line number Diff line
@@ -330,10 +330,9 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 * future. The information is later cleared by __reset_isolation_suitable().
 */
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated)
			struct page *page, unsigned long pfn)
{
	struct zone *zone = cc->zone;
	unsigned long pfn;

	if (cc->no_set_skip_hint)
		return;
@@ -341,13 +340,8 @@ static void update_pageblock_skip(struct compact_control *cc,
	if (!page)
		return;

	if (nr_isolated)
		return;

	set_pageblock_skip(page);

	pfn = page_to_pfn(page);

	/* Update where async and sync compaction should restart */
	if (pfn < zone->compact_cached_free_pfn)
		zone->compact_cached_free_pfn = pfn;
@@ -365,7 +359,7 @@ static inline bool pageblock_skip_persistent(struct page *page)
}

static inline void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated)
			struct page *page, unsigned long pfn)
{
}

@@ -432,33 +426,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
		return true;
	}

	if (need_resched()) {
		if (cc->mode == MIGRATE_ASYNC)
			cc->contended = true;
		cond_resched();
	}

	return false;
}

/*
 * Aside from avoiding lock contention, compaction also periodically checks
 * need_resched() and either schedules in sync compaction or aborts async
 * compaction. This is similar to what compact_unlock_should_abort() does, but
 * is used where no lock is concerned.
 *
 * Returns false when no scheduling was needed, or sync compaction scheduled.
 * Returns true when async compaction should abort.
 */
static inline bool compact_should_abort(struct compact_control *cc)
{
	/* async compaction aborts if contended */
	if (need_resched()) {
		if (cc->mode == MIGRATE_ASYNC)
			cc->contended = true;

	cond_resched();
	}

	return false;
}
@@ -475,7 +443,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
				bool strict)
{
	int nr_scanned = 0, total_isolated = 0;
	struct page *cursor, *valid_page = NULL;
	struct page *cursor;
	unsigned long flags = 0;
	bool locked = false;
	unsigned long blockpfn = *start_pfn;
@@ -502,9 +470,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
		if (!pfn_valid_within(blockpfn))
			goto isolate_fail;

		if (!valid_page)
			valid_page = page;

		/*
		 * For compound pages such as THP and hugetlbfs, we can save
		 * potentially a lot of iterations if we skip them at once.
@@ -592,10 +557,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
	if (strict && blockpfn < end_pfn)
		total_isolated = 0;

	/* Update the pageblock-skip if the whole pageblock was scanned */
	if (blockpfn == end_pfn)
		update_pageblock_skip(cc, valid_page, total_isolated);

	cc->total_free_scanned += nr_scanned;
	if (total_isolated)
		count_compact_events(COMPACTISOLATED, total_isolated);
@@ -747,8 +708,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
			return 0;
	}

	if (compact_should_abort(cc))
		return 0;
	cond_resched();

	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
		skip_on_failure = true;
@@ -1187,6 +1147,24 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
		set_pageblock_skip(page);
}

/* Search orders in round-robin fashion */
static int next_search_order(struct compact_control *cc, int order)
{
	order--;
	if (order < 0)
		order = cc->order - 1;

	/* Search wrapped around? */
	if (order == cc->search_order) {
		cc->search_order--;
		if (cc->search_order < 0)
			cc->search_order = cc->order - 1;
		return -1;
	}

	return order;
}

static unsigned long
fast_isolate_freepages(struct compact_control *cc)
{
@@ -1223,9 +1201,15 @@ fast_isolate_freepages(struct compact_control *cc)
	if (WARN_ON_ONCE(min_pfn > low_pfn))
		low_pfn = min_pfn;

	for (order = cc->order - 1;
	     order >= 0 && !page;
	     order--) {
	/*
	 * Search starts from the last successful isolation order or the next
	 * order to search after a previous failure
	 */
	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);

	for (order = cc->search_order;
	     !page && order >= 0;
	     order = next_search_order(cc, order)) {
		struct free_area *area = &cc->zone->free_area[order];
		struct list_head *freelist;
		struct page *freepage;
@@ -1249,6 +1233,7 @@ fast_isolate_freepages(struct compact_control *cc)

			if (pfn >= low_pfn) {
				cc->fast_search_fail = 0;
				cc->search_order = order;
				page = freepage;
				break;
			}
@@ -1320,8 +1305,10 @@ fast_isolate_freepages(struct compact_control *cc)
		}
	}

	if (highest && highest > cc->zone->compact_cached_free_pfn)
	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
		highest -= pageblock_nr_pages;
		cc->zone->compact_cached_free_pfn = highest;
	}

	cc->total_free_scanned += nr_scanned;
	if (!page)
@@ -1379,12 +1366,10 @@ static void isolate_freepages(struct compact_control *cc)
				isolate_start_pfn = block_start_pfn) {
		/*
		 * This can iterate a massively long zone without finding any
		 * suitable migration targets, so periodically check if we need
		 * to schedule, or even abort async compaction.
		 * suitable migration targets, so periodically check resched.
		 */
		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
			cond_resched();

		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
									zone);
@@ -1403,6 +1388,10 @@ static void isolate_freepages(struct compact_control *cc)
		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
					freelist, false);

		/* Update the skip hint if the full pageblock was scanned */
		if (isolate_start_pfn == block_end_pfn)
			update_pageblock_skip(cc, page, block_start_pfn);

		/* Are enough freepages isolated? */
		if (cc->nr_freepages >= cc->nr_migratepages) {
			if (isolate_start_pfn >= block_end_pfn) {
@@ -1677,11 +1666,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
		/*
		 * This can potentially iterate a massively long zone with
		 * many pageblocks unsuitable, so periodically check if we
		 * need to schedule, or even abort async compaction.
		 * need to schedule.
		 */
		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
			cond_resched();

		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
									zone);
@@ -2175,6 +2163,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
		.total_migrate_scanned = 0,
		.total_free_scanned = 0,
		.order = order,
		.search_order = order,
		.gfp_mask = gfp_mask,
		.zone = zone,
		.mode = (prio == COMPACT_PRIO_ASYNC) ?
@@ -2414,6 +2403,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
	struct zone *zone;
	struct compact_control cc = {
		.order = pgdat->kcompactd_max_order,
		.search_order = pgdat->kcompactd_max_order,
		.total_migrate_scanned = 0,
		.total_free_scanned = 0,
		.classzone_idx = pgdat->kcompactd_classzone_idx,
+2 −1
Original line number Diff line number Diff line
@@ -211,7 +211,8 @@ struct compact_control {
	struct zone *zone;
	unsigned long total_migrate_scanned;
	unsigned long total_free_scanned;
	unsigned int fast_search_fail;	/* failures to use free list searches */
	unsigned short fast_search_fail;/* failures to use free list searches */
	short search_order;		/* order to start a fast search at */
	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
	int order;			/* order a direct compactor needs */
	int migratetype;		/* migratetype of direct compactor */