Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af7c1a6e authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin
Browse files

x86-32, numa: Make @size in init_aloc_remap() represent bytes



@size variable in init_alloc_remap() is confusing in that it starts as
number of bytes as its name implies and then becomes number of pages.
Make it consistently represent bytes.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1301955840-7246-7-git-send-email-tj@kernel.org


Acked-by: default avatarYinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent c4d4f577
Loading
Loading
Loading
Loading
+7 −11
Original line number Original line Diff line number Diff line
@@ -286,22 +286,19 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
	size = node_remap_size[nid];
	size = node_remap_size[nid];
	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);


	/* convert size to large (pmd size) pages, rounding up */
	/* align to large page */
	size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
	size = ALIGN(size, LARGE_PAGE_BYTES);
	/* now the roundup is correct, convert to PAGE_SIZE pages */
	size = size * PTRS_PER_PTE;


	node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
	node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
					 (u64)node_end_pfn[nid] << PAGE_SHIFT,
					 (u64)node_end_pfn[nid] << PAGE_SHIFT,
					 (u64)size << PAGE_SHIFT,
					 size, LARGE_PAGE_BYTES);
					 LARGE_PAGE_BYTES);
	if (node_pa == MEMBLOCK_ERROR)
	if (node_pa == MEMBLOCK_ERROR)
		panic("Can not get kva ram\n");
		panic("Can not get kva ram\n");


	node_remap_size[nid] = size;
	node_remap_size[nid] = size >> PAGE_SHIFT;
	node_remap_offset[nid] = offset;
	node_remap_offset[nid] = offset;
	printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
	printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
	       size, nid, node_pa >> PAGE_SHIFT);
	       size >> PAGE_SHIFT, nid, node_pa >> PAGE_SHIFT);


	/*
	/*
	 *  prevent kva address below max_low_pfn want it on system
	 *  prevent kva address below max_low_pfn want it on system
@@ -315,12 +312,11 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
	 *  So memblock_x86_reserve_range here, hope we don't run out
	 *  So memblock_x86_reserve_range here, hope we don't run out
	 *  of that array
	 *  of that array
	 */
	 */
	memblock_x86_reserve_range(node_pa, node_pa + ((u64)size << PAGE_SHIFT),
	memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
				   "KVA RAM");


	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;


	return size;
	return size >> PAGE_SHIFT;
}
}


static void init_remap_allocator(int nid)
static void init_remap_allocator(int nid)