Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8216da5 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

x86-64: use vmemmap_populate_basepages() for !pse setups



We already have generic code to allocate vmemmap with regular pages, use
it.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David Miller <davem@davemloft.net>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6c7a2ca4
Loading
Loading
Loading
Loading
+38 −40
Original line number Diff line number Diff line
@@ -1281,7 +1281,8 @@ static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
static int __meminit vmemmap_populate_hugepages(unsigned long start,
						unsigned long end, int node)
{
	unsigned long addr;
	unsigned long next;
@@ -1290,7 +1291,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
	pmd_t *pmd;

	for (addr = start; addr < end; addr = next) {
		void *p = NULL;
		next = pmd_addr_end(addr, end);

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
@@ -1300,23 +1301,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
		if (!pud)
			return -ENOMEM;

		if (!cpu_has_pse) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = vmemmap_pmd_populate(pud, addr, node);

			if (!pmd)
				return -ENOMEM;

			p = vmemmap_pte_populate(pmd, addr, node);

			if (!p)
				return -ENOMEM;
		} else {
			next = pmd_addr_end(addr, end);

		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			pte_t entry;
			void *p;

			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
			if (!p)
@@ -1341,10 +1329,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
		} else
			vmemmap_verify((pte_t *)pmd, node, addr, next);
	}

	return 0;
}

int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
	int err;

	if (cpu_has_pse)
		err = vmemmap_populate_hugepages(start, end, node);
	else
		err = vmemmap_populate_basepages(start, end, node);
	if (!err)
		sync_global_pgds(start, end - 1);
	return 0;
	return err;
}

#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)