Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e8ce83c authored by Robin Holt's avatar Robin Holt Committed by Linus Torvalds
Browse files

mm: meminit: move page initialization into a separate function



Currently, memmap_init_zone() has all the smarts for initializing a single
page.  A subset of this is required for parallel page initialisation and
so this patch breaks up the monolithic function in preparation.

Signed-off-by: default avatarRobin Holt <holt@sgi.com>
Signed-off-by: default avatarNathan Zimmer <nzimmer@sgi.com>
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Tested-by: default avatarNate Zimmer <nzimmer@sgi.com>
Tested-by: default avatarWaiman Long <waiman.long@hp.com>
Tested-by: default avatarDaniel J Blueman <daniel@numascale.com>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8e7a7f86
Loading
Loading
Loading
Loading
+46 −33
Original line number Original line Diff line number Diff line
@@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
	return 0;
	return 0;
}
}


static void __meminit __init_single_page(struct page *page, unsigned long pfn,
				unsigned long zone, int nid)
{
	struct zone *z = &NODE_DATA(nid)->node_zones[zone];

	set_page_links(page, zone, nid, pfn);
	mminit_verify_page_links(page, zone, nid, pfn);
	init_page_count(page);
	page_mapcount_reset(page);
	page_cpupid_reset_last(page);
	SetPageReserved(page);

	/*
	 * Mark the block movable so that blocks are reserved for
	 * movable at startup. This will force kernel allocations
	 * to reserve their blocks rather than leaking throughout
	 * the address space during boot when many long-lived
	 * kernel allocations are made. Later some blocks near
	 * the start are marked MIGRATE_RESERVE by
	 * setup_zone_migrate_reserve()
	 *
	 * bitmap is created for zone's valid pfn range. but memmap
	 * can be created for invalid pages (for alignment)
	 * check here not to call set_pageblock_migratetype() against
	 * pfn out of zone.
	 */
	if ((z->zone_start_pfn <= pfn)
	    && (pfn < zone_end_pfn(z))
	    && !(pfn & (pageblock_nr_pages - 1)))
		set_pageblock_migratetype(page, MIGRATE_MOVABLE);

	INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
	if (!is_highmem_idx(zone))
		set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
}

static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
					int nid)
{
	return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
}

static bool free_pages_prepare(struct page *page, unsigned int order)
static bool free_pages_prepare(struct page *page, unsigned int order)
{
{
	bool compound = PageCompound(page);
	bool compound = PageCompound(page);
@@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
		unsigned long start_pfn, enum memmap_context context)
		unsigned long start_pfn, enum memmap_context context)
{
{
	struct page *page;
	unsigned long end_pfn = start_pfn + size;
	unsigned long end_pfn = start_pfn + size;
	unsigned long pfn;
	unsigned long pfn;
	struct zone *z;
	struct zone *z;
@@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
			if (!early_pfn_in_nid(pfn, nid))
			if (!early_pfn_in_nid(pfn, nid))
				continue;
				continue;
		}
		}
		page = pfn_to_page(pfn);
		__init_single_pfn(pfn, zone, nid);
		set_page_links(page, zone, nid, pfn);
		mminit_verify_page_links(page, zone, nid, pfn);
		init_page_count(page);
		page_mapcount_reset(page);
		page_cpupid_reset_last(page);
		SetPageReserved(page);
		/*
		 * Mark the block movable so that blocks are reserved for
		 * movable at startup. This will force kernel allocations
		 * to reserve their blocks rather than leaking throughout
		 * the address space during boot when many long-lived
		 * kernel allocations are made. Later some blocks near
		 * the start are marked MIGRATE_RESERVE by
		 * setup_zone_migrate_reserve()
		 *
		 * bitmap is created for zone's valid pfn range. but memmap
		 * can be created for invalid pages (for alignment)
		 * check here not to call set_pageblock_migratetype() against
		 * pfn out of zone.
		 */
		if ((z->zone_start_pfn <= pfn)
		    && (pfn < zone_end_pfn(z))
		    && !(pfn & (pageblock_nr_pages - 1)))
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);

		INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
		if (!is_highmem_idx(zone))
			set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
	}
	}
}
}