Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b8443b2 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin
Browse files

x86-32, numa: Remove redundant top-down alloc code from remap initialization



memblock_find_in_range() now does top-down allocation by default, so
there's no reason for its callers to explicitly implement it by
gradually lowering the start address.

Remove redundant top-down allocation logic from init_meminit() and
calculate_numa_remap_pages().

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1301955840-7246-4-git-send-email-tj@kernel.org


Acked-by: default avatarYinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent a6c24f7a
Loading
Loading
Loading
Loading
+14 −29
Original line number Diff line number Diff line
@@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
	unsigned long size, reserve_pages = 0;

	for_each_online_node(nid) {
		u64 node_kva_target;
		u64 node_kva_final;
		u64 node_kva;

		/*
		 * The acpi/srat node info can show hot-add memroy zones
@@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
		/* now the roundup is correct, convert to PAGE_SIZE pages */
		size = size * PTRS_PER_PTE;

		node_kva_target = round_down(node_end_pfn[nid] - size,
						 PTRS_PER_PTE);
		node_kva_target <<= PAGE_SHIFT;
		do {
			node_kva_final = memblock_find_in_range(node_kva_target,
		node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
					((u64)node_end_pfn[nid])<<PAGE_SHIFT,
					((u64)size)<<PAGE_SHIFT,
					LARGE_PAGE_BYTES);
			node_kva_target -= LARGE_PAGE_BYTES;
		} while (node_kva_final == MEMBLOCK_ERROR &&
			 (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));

		if (node_kva_final == MEMBLOCK_ERROR)
		if (node_kva == MEMBLOCK_ERROR)
			panic("Can not get kva ram\n");

		node_remap_size[nid] = size;
@@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
		reserve_pages += size;
		printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
				  " node %d at %llx\n",
				size, nid, node_kva_final>>PAGE_SHIFT);
				size, nid, node_kva >> PAGE_SHIFT);

		/*
		 *  prevent kva address below max_low_pfn want it on system
@@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
		 *  to use it as free.
		 *  So memblock_x86_reserve_range here, hope we don't run out of that array
		 */
		memblock_x86_reserve_range(node_kva_final,
			      node_kva_final+(((u64)size)<<PAGE_SHIFT),
		memblock_x86_reserve_range(node_kva,
					   node_kva + (((u64)size)<<PAGE_SHIFT),
					   "KVA RAM");

		node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
		node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
	}
	printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
			reserve_pages);
@@ -356,7 +347,6 @@ static void init_remap_allocator(int nid)
void __init initmem_init(void)
{
	int nid;
	long kva_target_pfn;

	/*
	 * When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -371,15 +361,10 @@ void __init initmem_init(void)

	kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);

	kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
	do {
		kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
	kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
				max_low_pfn << PAGE_SHIFT,
				kva_pages << PAGE_SHIFT,
				PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT;
		kva_target_pfn -= PTRS_PER_PTE;
	} while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);

	if (kva_start_pfn == MEMBLOCK_ERROR)
		panic("Can not get kva space\n");