Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc357f43 authored by Marek Szyprowski's avatar Marek Szyprowski Committed by Linus Torvalds
Browse files

mm: cma: remove watermark hacks



Commits 2139cbe6 ("cma: fix counting of isolated pages") and
d95ea5d1 ("cma: fix watermark checking") introduced a reliable
method of free page accounting when memory is being allocated from CMA
regions, so the workaround introduced earlier by commit 49f223a9
("mm: trigger page reclaim in alloc_contig_range() to stabilise
watermarks") can be finally removed.

Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e30abd1
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -63,10 +63,8 @@ enum {

#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#  define cma_wmark_pages(zone)	zone->min_cma_pages
#else
#  define is_migrate_cma(migratetype) false
#  define cma_wmark_pages(zone) 0
#endif

#define for_each_migratetype_order(order, type) \
@@ -382,13 +380,6 @@ struct zone {
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif
#ifdef CONFIG_CMA
	/*
	 * CMA needs to increase watermark levels during the allocation
	 * process to make sure that the system is not starved.
	 */
	unsigned long		min_cma_pages;
#endif
	struct free_area	free_area[MAX_ORDER];

+0 −58
Original line number Diff line number Diff line
@@ -5218,10 +5218,6 @@ static void __setup_per_zone_wmarks(void)
		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);

		zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
		zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
		zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);

		setup_zone_migrate_reserve(zone);
		spin_unlock_irqrestore(&zone->lock, flags);
	}
@@ -5766,54 +5762,6 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
	return ret > 0 ? 0 : ret;
}

/*
 * Update zone's cma pages counter used for watermark level calculation.
 */
static inline void __update_cma_watermarks(struct zone *zone, int count)
{
	unsigned long flags;
	spin_lock_irqsave(&zone->lock, flags);
	zone->min_cma_pages += count;
	spin_unlock_irqrestore(&zone->lock, flags);
	setup_per_zone_wmarks();
}

/*
 * Trigger memory pressure bump to reclaim some pages in order to be able to
 * allocate 'count' pages in single page units. Does similar work as
 *__alloc_pages_slowpath() function.
 */
static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
{
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
	struct zonelist *zonelist = node_zonelist(0, gfp_mask);
	int did_some_progress = 0;
	int order = 1;

	/*
	 * Increase level of watermarks to force kswapd do his job
	 * to stabilise at new watermark level.
	 */
	__update_cma_watermarks(zone, count);

	/* Obey watermarks as if the page was being allocated */
	while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
		wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));

		did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
						      NULL);
		if (!did_some_progress) {
			/* Exhausted what can be done so it's blamo time */
			out_of_memory(zonelist, gfp_mask, order, NULL, false);
		}
	}

	/* Restore original watermark levels. */
	__update_cma_watermarks(zone, -count);

	return count;
}

/**
 * alloc_contig_range() -- tries to allocate given range of pages
 * @start:	start PFN to allocate
@@ -5837,7 +5785,6 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
int alloc_contig_range(unsigned long start, unsigned long end,
		       unsigned migratetype)
{
	struct zone *zone = page_zone(pfn_to_page(start));
	unsigned long outer_start, outer_end;
	int ret = 0, order;

@@ -5922,11 +5869,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
		goto done;
	}

	/*
	 * Reclaim enough pages to make sure that contiguous allocation
	 * will not starve the system.
	 */
	__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);

	/* Grab isolated pages from freelists. */
	outer_end = isolate_freepages_range(&cc, outer_start, end);