Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 108bcc96 authored by Cody P Schafer's avatar Cody P Schafer Committed by Linus Torvalds
Browse files

mm: add & use zone_end_pfn() and zone_spans_pfn()



Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code
duplication.

This also switches to using them in compaction (where an additional
variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and
kmemleak.

Note that in compaction.c I avoid calling zone_end_pfn() repeatedly
because I expect at some point the sycronization issues with start_pfn &
spanned_pages will need fixing, either by actually using the seqlock or
clever memory barrier usage.

Signed-off-by: default avatarCody P Schafer <cody@linux.vnet.ibm.com>
Cc: David Hansen <dave@linux.vnet.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9127ab4f
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -527,6 +527,16 @@ static inline int zone_is_oom_locked(const struct zone *zone)
	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}

static inline unsigned zone_end_pfn(const struct zone *zone)
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
+5 −5
Original line number Diff line number Diff line
@@ -86,7 +86,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
static void __reset_isolation_suitable(struct zone *zone)
{
	unsigned long start_pfn = zone->zone_start_pfn;
	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	unsigned long end_pfn = zone_end_pfn(zone);
	unsigned long pfn;

	zone->compact_cached_migrate_pfn = start_pfn;
@@ -647,7 +647,7 @@ static void isolate_freepages(struct zone *zone,
				struct compact_control *cc)
{
	struct page *page;
	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
	unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
	int nr_freepages = cc->nr_freepages;
	struct list_head *freelist = &cc->freepages;

@@ -666,7 +666,7 @@ static void isolate_freepages(struct zone *zone,
	 */
	high_pfn = min(low_pfn, pfn);

	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	z_end_pfn = zone_end_pfn(zone);

	/*
	 * Isolate free pages until enough are available to migrate the
@@ -709,7 +709,7 @@ static void isolate_freepages(struct zone *zone,
		 * only scans within a pageblock
		 */
		end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		end_pfn = min(end_pfn, zone_end_pfn);
		end_pfn = min(end_pfn, z_end_pfn);
		isolated = isolate_freepages_block(cc, pfn, end_pfn,
						   freelist, false);
		nr_freepages += isolated;
@@ -923,7 +923,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
{
	int ret;
	unsigned long start_pfn = zone->zone_start_pfn;
	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	unsigned long end_pfn = zone_end_pfn(zone);

	ret = compaction_suitable(zone, cc->order);
	switch (ret) {
+2 −3
Original line number Diff line number Diff line
@@ -1300,9 +1300,8 @@ static void kmemleak_scan(void)
	 */
	lock_memory_hotplug();
	for_each_online_node(i) {
		pg_data_t *pgdat = NODE_DATA(i);
		unsigned long start_pfn = pgdat->node_start_pfn;
		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
		unsigned long start_pfn = node_start_pfn(i);
		unsigned long end_pfn = node_end_pfn(i);
		unsigned long pfn;

		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+5 −5
Original line number Diff line number Diff line
@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
	pgdat_resize_lock(z1->zone_pgdat, &flags);

	/* can't move pfns which are higher than @z2 */
	if (end_pfn > z2->zone_start_pfn + z2->spanned_pages)
	if (end_pfn > zone_end_pfn(z2))
		goto out_fail;
	/* the move out part mast at the left most of @z2 */
	if (start_pfn > z2->zone_start_pfn)
@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
		z1_start_pfn = start_pfn;

	resize_zone(z1, z1_start_pfn, end_pfn);
	resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);
	resize_zone(z2, end_pfn, zone_end_pfn(z2));

	pgdat_resize_unlock(z1->zone_pgdat, &flags);

@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
	if (z1->zone_start_pfn > start_pfn)
		goto out_fail;
	/* the move out part mast at the right most of @z1 */
	if (z1->zone_start_pfn + z1->spanned_pages >  end_pfn)
	if (zone_end_pfn(z1) >  end_pfn)
		goto out_fail;
	/* must included/overlap */
	if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
	if (start_pfn >= zone_end_pfn(z1))
		goto out_fail;

	/* use end_pfn for z2's end_pfn if z2 is empty */
	if (z2->spanned_pages)
		z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
		z2_end_pfn = zone_end_pfn(z2);
	else
		z2_end_pfn = end_pfn;

+9 −13
Original line number Diff line number Diff line
@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)

	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
		if (!zone_spans_pfn(zone, pfn))
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
	end_pfn = start_pfn + pageblock_nr_pages - 1;

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
	if (!zone_spans_pfn(zone, start_pfn))
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
	if (!zone_spans_pfn(zone, end_pfn))
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)

	spin_lock_irqsave(&zone->lock, flags);

	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
	max_zone_pfn = zone_end_pfn(zone);
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);
@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
	 * the block.
	 */
	start_pfn = zone->zone_start_pfn;
	end_pfn = start_pfn + zone->spanned_pages;
	end_pfn = zone_end_pfn(zone);
	start_pfn = roundup(start_pfn, pageblock_nr_pages);
	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
							pageblock_order;
@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
		 * pfn out of zone.
		 */
		if ((z->zone_start_pfn <= pfn)
		    && (pfn < z->zone_start_pfn + z->spanned_pages)
		    && (pfn < zone_end_pfn(z))
		    && !(pfn & (pageblock_nr_pages - 1)))
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);

@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
		 * for the buddy allocator to function correctly.
		 */
		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
		end = pgdat_end_pfn(pgdat);
		end = ALIGN(end, MAX_ORDER_NR_PAGES);
		size =  (end - start) * sizeof(struct page);
		map = alloc_remap(pgdat->node_id, size);
@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
	pfn = page_to_pfn(page);
	bitmap = get_pageblock_bitmap(zone, pfn);
	bitidx = pfn_to_bitidx(zone, pfn);
	VM_BUG_ON(pfn < zone->zone_start_pfn);
	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
	VM_BUG_ON(!zone_spans_pfn(zone, pfn));

	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
		if (flags & value)
@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)

	zone = page_zone(page);
	pfn = page_to_pfn(page);
	if (zone->zone_start_pfn > pfn ||
			zone->zone_start_pfn + zone->spanned_pages <= pfn)
	if (!zone_spans_pfn(zone, pfn))
		return false;

	return !has_unmovable_pages(zone, page, 0, true);
Loading