Loading arch/x86/mm/numa_64.c +111 −102 Original line number Original line Diff line number Diff line Loading @@ -24,6 +24,8 @@ #endif #endif struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); bootmem_data_t plat_node_bdata[MAX_NUMNODES]; bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct memnode memnode; struct memnode memnode; Loading @@ -31,16 +33,19 @@ struct memnode memnode; unsigned char cpu_to_node[NR_CPUS] __read_mostly = { unsigned char cpu_to_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = NUMA_NO_NODE [0 ... NR_CPUS-1] = NUMA_NO_NODE }; }; EXPORT_SYMBOL(cpu_to_node); unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; }; cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_to_cpumask); int numa_off __initdata; int numa_off __initdata; unsigned long __initdata nodemap_addr; unsigned long __initdata nodemap_addr; unsigned long __initdata nodemap_size; unsigned long __initdata nodemap_size; /* /* * Given a shift value, try to populate memnodemap[] * Given a shift value, try to populate memnodemap[] * Returns : * Returns : Loading @@ -48,12 +53,11 @@ unsigned long __initdata nodemap_size; * 0 if memnodmap[] too small (of shift too small) * 0 if memnodmap[] too small (of shift too small) * -1 if node overlap or lost ram (shift too big) * -1 if node overlap or lost ram (shift too big) */ */ static int __init static int __init populate_memnodemap(const struct bootnode *nodes, populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift) int numnodes, int shift) { { int i; int res = -1; unsigned long addr, end; unsigned long addr, end; int i, res = -1; memset(memnodemap, 0xff, memnodemapsize); memset(memnodemap, 0xff, memnodemapsize); for (i = 0; i < numnodes; i++) { for (i = 0; i < numnodes; i++) { Loading Loading @@ -105,8 +109,8 @@ static int __init allocate_cachealigned_memnodemap(void) * The LSB of all start and end addresses in the node map is the value of the * The LSB of all start and end addresses in the node map is the value of the * maximum possible shift. * maximum possible shift. */ */ static int __init static int __init extract_lsb_from_nodes(const struct bootnode *nodes, extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes) int numnodes) { { int i, nodes_used = 0; int i, nodes_used = 0; unsigned long start, end; unsigned long start, end; Loading Loading @@ -141,10 +145,9 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes) shift); shift); if (populate_memnodemap(nodes, numnodes, shift) != 1) { if (populate_memnodemap(nodes, numnodes, shift) != 1) { printk(KERN_INFO printk(KERN_INFO "Your memory is not aligned you need to " "Your memory is not aligned you need to rebuild your kernel " "rebuild your kernel with a bigger NODEMAPSIZE " "with a bigger NODEMAPSIZE shift=%d\n", "shift=%d\n", shift); shift); return -1; return -1; } } return shift; return shift; Loading @@ -157,12 +160,12 @@ int early_pfn_to_nid(unsigned long pfn) } } #endif #endif static void * __init static void * __init early_node_mem(int nodeid, unsigned long start, early_node_mem(int nodeid, unsigned long start, unsigned long end, unsigned long end, unsigned long size) unsigned long size) { { unsigned long mem = find_e820_area(start, end, size); unsigned long mem = find_e820_area(start, end, size); void *ptr; void *ptr; if (mem != -1L) if (mem != -1L) return __va(mem); return __va(mem); ptr = __alloc_bootmem_nopanic(size, ptr = __alloc_bootmem_nopanic(size, Loading @@ -176,16 +179,18 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end, } } /* Initialize bootmem allocator for a node */ /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) { { unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size; unsigned long nodedata_phys; unsigned long bootmap_start, nodedata_phys; void *bootmap; void *bootmap; const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); start = round_up(start, ZONE_ALIGN); start = round_up(start, ZONE_ALIGN); printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); start_pfn = start >> PAGE_SHIFT; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; Loading @@ -207,7 +212,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en bootmap_pages<<PAGE_SHIFT); bootmap_pages<<PAGE_SHIFT); if (bootmap == NULL) { if (bootmap == NULL) { if (nodedata_phys < start || nodedata_phys >= end) if (nodedata_phys < start || nodedata_phys >= end) free_bootmem((unsigned long)node_data[nodeid],pgdat_size); free_bootmem((unsigned long)node_data[nodeid], pgdat_size); node_data[nodeid] = NULL; node_data[nodeid] = NULL; return; return; } } Loading @@ -221,7 +227,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en free_bootmem_with_active_regions(nodeid, end); free_bootmem_with_active_regions(nodeid, end); reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); #ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA srat_reserve_add_area(nodeid); srat_reserve_add_area(nodeid); #endif #endif Loading @@ -239,8 +246,10 @@ void __init setup_node_zones(int nodeid) Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", nodeid, start_pfn, end_pfn); nodeid, start_pfn, end_pfn); /* Try to allocate mem_map at end to not fill up precious <4GB /* memory. */ * Try to allocate mem_map at end to not fill up precious <4GB * memory. */ memmapsize = sizeof(struct page) * (end_pfn-start_pfn); memmapsize = sizeof(struct page) * (end_pfn-start_pfn); limit = end_pfn << PAGE_SHIFT; limit = end_pfn << PAGE_SHIFT; #ifdef CONFIG_FLAT_NODE_MEM_MAP #ifdef CONFIG_FLAT_NODE_MEM_MAP Loading @@ -252,14 +261,17 @@ void __init setup_node_zones(int nodeid) #endif #endif } } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ void __init numa_init_array(void) void __init numa_init_array(void) { { int rr, i; int rr, i; /* There are unfortunately some poorly designed mainboards around that only connect memory to a single CPU. This breaks the 1:1 cpu->node mapping. To avoid this fill in the mapping for all possible CPUs, as the number of CPUs is not known yet. We round robin the existing nodes. */ rr = first_node(node_online_map); rr = first_node(node_online_map); for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node(i) != NUMA_NO_NODE) if (cpu_to_node(i) != NUMA_NO_NODE) Loading @@ -269,7 +281,6 @@ void __init numa_init_array(void) if (rr == MAX_NUMNODES) if (rr == MAX_NUMNODES) rr = first_node(node_online_map); rr = first_node(node_online_map); } } } } #ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU Loading @@ -277,15 +288,17 @@ void __init numa_init_array(void) char *cmdline __initdata; char *cmdline __initdata; /* /* * Setups up nid to range from addr to addr + size. If the end boundary is * Setups up nid to range from addr to addr + size. If the end * greater than max_addr, then max_addr is used instead. The return value is 0 * boundary is greater than max_addr, then max_addr is used instead. * if there is additional memory left for allocation past addr and -1 otherwise. * The return value is 0 if there is additional memory left for * addr is adjusted to be at the end of the node. * allocation past addr and -1 otherwise. addr is adjusted to be at * the end of the node. */ */ static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, u64 size, u64 max_addr) u64 size, u64 max_addr) { { int ret = 0; int ret = 0; nodes[nid].start = *addr; nodes[nid].start = *addr; *addr += size; *addr += size; if (*addr >= max_addr) { if (*addr >= max_addr) { Loading Loading @@ -336,6 +349,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr, for (i = node_start; i < num_nodes + node_start; i++) { for (i = node_start; i < num_nodes + node_start; i++) { u64 end = *addr + size; u64 end = *addr + size; if (i < big) if (i < big) end += FAKE_NODE_MIN_SIZE; end += FAKE_NODE_MIN_SIZE; /* /* Loading Loading @@ -381,14 +395,9 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr, static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) { { struct bootnode nodes[MAX_NUMNODES]; struct bootnode nodes[MAX_NUMNODES]; u64 addr = start_pfn << PAGE_SHIFT; u64 size, addr = start_pfn << PAGE_SHIFT; u64 max_addr = end_pfn << PAGE_SHIFT; u64 max_addr = end_pfn << PAGE_SHIFT; int num_nodes = 0; int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i; int coeff_flag; int coeff = -1; int num = 0; u64 size; int i; memset(&nodes, 0, sizeof(nodes)); memset(&nodes, 0, sizeof(nodes)); /* /* Loading @@ -396,8 +405,9 @@ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) * system RAM into N fake nodes. * system RAM into N fake nodes. */ */ if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, long n = simple_strtol(cmdline, NULL, 0); simple_strtol(cmdline, NULL, 0)); num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n); if (num_nodes < 0) if (num_nodes < 0) return num_nodes; return num_nodes; goto out; goto out; Loading Loading @@ -512,7 +522,8 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) #endif #endif #ifdef CONFIG_K8_NUMA #ifdef CONFIG_K8_NUMA if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) return; return; nodes_clear(node_possible_map); nodes_clear(node_possible_map); #endif #endif Loading Loading @@ -549,18 +560,20 @@ void __cpuinit numa_set_node(int cpu, int node) unsigned long __init numa_free_all_bootmem(void) unsigned long __init numa_free_all_bootmem(void) { { int i; unsigned long pages = 0; unsigned long pages = 0; for_each_online_node(i) { int i; for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); pages += free_all_bootmem_node(NODE_DATA(i)); } return pages; return pages; } } void __init paging_init(void) void __init paging_init(void) { { int i; unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES]; int i; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; Loading @@ -569,9 +582,8 @@ void __init paging_init(void) sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); sparse_init(); for_each_online_node(i) { for_each_online_node(i) setup_node_zones(i); setup_node_zones(i); } free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns); } } Loading @@ -594,7 +606,6 @@ static __init int numa_setup(char *opt) #endif #endif return 0; return 0; } } early_param("numa", numa_setup); early_param("numa", numa_setup); /* /* Loading @@ -612,8 +623,10 @@ early_param("numa", numa_setup); void __init init_cpu_to_node(void) void __init init_cpu_to_node(void) { { int i; int i; for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) { u8 apicid = x86_cpu_to_apicid_init[i]; u8 apicid = x86_cpu_to_apicid_init[i]; if (apicid == BAD_APICID) if (apicid == BAD_APICID) continue; continue; if (apicid_to_node[apicid] == NUMA_NO_NODE) if (apicid_to_node[apicid] == NUMA_NO_NODE) Loading @@ -622,10 +635,6 @@ void __init init_cpu_to_node(void) } } } } EXPORT_SYMBOL(cpu_to_node); EXPORT_SYMBOL(node_to_cpumask); EXPORT_SYMBOL(node_data); #ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM /* /* * Functions to convert PFNs from/to per node page addresses. * Functions to convert PFNs from/to per node page addresses. Loading Loading
arch/x86/mm/numa_64.c +111 −102 Original line number Original line Diff line number Diff line Loading @@ -24,6 +24,8 @@ #endif #endif struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); bootmem_data_t plat_node_bdata[MAX_NUMNODES]; bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct memnode memnode; struct memnode memnode; Loading @@ -31,16 +33,19 @@ struct memnode memnode; unsigned char cpu_to_node[NR_CPUS] __read_mostly = { unsigned char cpu_to_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = NUMA_NO_NODE [0 ... NR_CPUS-1] = NUMA_NO_NODE }; }; EXPORT_SYMBOL(cpu_to_node); unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; }; cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_to_cpumask); int numa_off __initdata; int numa_off __initdata; unsigned long __initdata nodemap_addr; unsigned long __initdata nodemap_addr; unsigned long __initdata nodemap_size; unsigned long __initdata nodemap_size; /* /* * Given a shift value, try to populate memnodemap[] * Given a shift value, try to populate memnodemap[] * Returns : * Returns : Loading @@ -48,12 +53,11 @@ unsigned long __initdata nodemap_size; * 0 if memnodmap[] too small (of shift too small) * 0 if memnodmap[] too small (of shift too small) * -1 if node overlap or lost ram (shift too big) * -1 if node overlap or lost ram (shift too big) */ */ static int __init static int __init populate_memnodemap(const struct bootnode *nodes, populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift) int numnodes, int shift) { { int i; int res = -1; unsigned long addr, end; unsigned long addr, end; int i, res = -1; memset(memnodemap, 0xff, memnodemapsize); memset(memnodemap, 0xff, memnodemapsize); for (i = 0; i < numnodes; i++) { for (i = 0; i < numnodes; i++) { Loading Loading @@ -105,8 +109,8 @@ static int __init allocate_cachealigned_memnodemap(void) * The LSB of all start and end addresses in the node map is the value of the * The LSB of all start and end addresses in the node map is the value of the * maximum possible shift. * maximum possible shift. */ */ static int __init static int __init extract_lsb_from_nodes(const struct bootnode *nodes, extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes) int numnodes) { { int i, nodes_used = 0; int i, nodes_used = 0; unsigned long start, end; unsigned long start, end; Loading Loading @@ -141,10 +145,9 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes) shift); shift); if (populate_memnodemap(nodes, numnodes, shift) != 1) { if (populate_memnodemap(nodes, numnodes, shift) != 1) { printk(KERN_INFO printk(KERN_INFO "Your memory is not aligned you need to " "Your memory is not aligned you need to rebuild your kernel " "rebuild your kernel with a bigger NODEMAPSIZE " "with a bigger NODEMAPSIZE shift=%d\n", "shift=%d\n", shift); shift); return -1; return -1; } } return shift; return shift; Loading @@ -157,12 +160,12 @@ int early_pfn_to_nid(unsigned long pfn) } } #endif #endif static void * __init static void * __init early_node_mem(int nodeid, unsigned long start, early_node_mem(int nodeid, unsigned long start, unsigned long end, unsigned long end, unsigned long size) unsigned long size) { { unsigned long mem = find_e820_area(start, end, size); unsigned long mem = find_e820_area(start, end, size); void *ptr; void *ptr; if (mem != -1L) if (mem != -1L) return __va(mem); return __va(mem); ptr = __alloc_bootmem_nopanic(size, ptr = __alloc_bootmem_nopanic(size, Loading @@ -176,16 +179,18 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end, } } /* Initialize bootmem allocator for a node */ /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) { { unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size; unsigned long nodedata_phys; unsigned long bootmap_start, nodedata_phys; void *bootmap; void *bootmap; const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); start = round_up(start, ZONE_ALIGN); start = round_up(start, ZONE_ALIGN); printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); start_pfn = start >> PAGE_SHIFT; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; Loading @@ -207,7 +212,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en bootmap_pages<<PAGE_SHIFT); bootmap_pages<<PAGE_SHIFT); if (bootmap == NULL) { if (bootmap == NULL) { if (nodedata_phys < start || nodedata_phys >= end) if (nodedata_phys < start || nodedata_phys >= end) free_bootmem((unsigned long)node_data[nodeid],pgdat_size); free_bootmem((unsigned long)node_data[nodeid], pgdat_size); node_data[nodeid] = NULL; node_data[nodeid] = NULL; return; return; } } Loading @@ -221,7 +227,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en free_bootmem_with_active_regions(nodeid, end); free_bootmem_with_active_regions(nodeid, end); reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); #ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA srat_reserve_add_area(nodeid); srat_reserve_add_area(nodeid); #endif #endif Loading @@ -239,8 +246,10 @@ void __init setup_node_zones(int nodeid) Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", nodeid, start_pfn, end_pfn); nodeid, start_pfn, end_pfn); /* Try to allocate mem_map at end to not fill up precious <4GB /* memory. */ * Try to allocate mem_map at end to not fill up precious <4GB * memory. */ memmapsize = sizeof(struct page) * (end_pfn-start_pfn); memmapsize = sizeof(struct page) * (end_pfn-start_pfn); limit = end_pfn << PAGE_SHIFT; limit = end_pfn << PAGE_SHIFT; #ifdef CONFIG_FLAT_NODE_MEM_MAP #ifdef CONFIG_FLAT_NODE_MEM_MAP Loading @@ -252,14 +261,17 @@ void __init setup_node_zones(int nodeid) #endif #endif } } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ void __init numa_init_array(void) void __init numa_init_array(void) { { int rr, i; int rr, i; /* There are unfortunately some poorly designed mainboards around that only connect memory to a single CPU. This breaks the 1:1 cpu->node mapping. To avoid this fill in the mapping for all possible CPUs, as the number of CPUs is not known yet. We round robin the existing nodes. */ rr = first_node(node_online_map); rr = first_node(node_online_map); for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node(i) != NUMA_NO_NODE) if (cpu_to_node(i) != NUMA_NO_NODE) Loading @@ -269,7 +281,6 @@ void __init numa_init_array(void) if (rr == MAX_NUMNODES) if (rr == MAX_NUMNODES) rr = first_node(node_online_map); rr = first_node(node_online_map); } } } } #ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU Loading @@ -277,15 +288,17 @@ void __init numa_init_array(void) char *cmdline __initdata; char *cmdline __initdata; /* /* * Setups up nid to range from addr to addr + size. If the end boundary is * Setups up nid to range from addr to addr + size. If the end * greater than max_addr, then max_addr is used instead. The return value is 0 * boundary is greater than max_addr, then max_addr is used instead. * if there is additional memory left for allocation past addr and -1 otherwise. * The return value is 0 if there is additional memory left for * addr is adjusted to be at the end of the node. * allocation past addr and -1 otherwise. addr is adjusted to be at * the end of the node. */ */ static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, u64 size, u64 max_addr) u64 size, u64 max_addr) { { int ret = 0; int ret = 0; nodes[nid].start = *addr; nodes[nid].start = *addr; *addr += size; *addr += size; if (*addr >= max_addr) { if (*addr >= max_addr) { Loading Loading @@ -336,6 +349,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr, for (i = node_start; i < num_nodes + node_start; i++) { for (i = node_start; i < num_nodes + node_start; i++) { u64 end = *addr + size; u64 end = *addr + size; if (i < big) if (i < big) end += FAKE_NODE_MIN_SIZE; end += FAKE_NODE_MIN_SIZE; /* /* Loading Loading @@ -381,14 +395,9 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr, static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) { { struct bootnode nodes[MAX_NUMNODES]; struct bootnode nodes[MAX_NUMNODES]; u64 addr = start_pfn << PAGE_SHIFT; u64 size, addr = start_pfn << PAGE_SHIFT; u64 max_addr = end_pfn << PAGE_SHIFT; u64 max_addr = end_pfn << PAGE_SHIFT; int num_nodes = 0; int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i; int coeff_flag; int coeff = -1; int num = 0; u64 size; int i; memset(&nodes, 0, sizeof(nodes)); memset(&nodes, 0, sizeof(nodes)); /* /* Loading @@ -396,8 +405,9 @@ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) * system RAM into N fake nodes. * system RAM into N fake nodes. */ */ if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, long n = simple_strtol(cmdline, NULL, 0); simple_strtol(cmdline, NULL, 0)); num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n); if (num_nodes < 0) if (num_nodes < 0) return num_nodes; return num_nodes; goto out; goto out; Loading Loading @@ -512,7 +522,8 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) #endif #endif #ifdef CONFIG_K8_NUMA #ifdef CONFIG_K8_NUMA if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) return; return; nodes_clear(node_possible_map); nodes_clear(node_possible_map); #endif #endif Loading Loading @@ -549,18 +560,20 @@ void __cpuinit numa_set_node(int cpu, int node) unsigned long __init numa_free_all_bootmem(void) unsigned long __init numa_free_all_bootmem(void) { { int i; unsigned long pages = 0; unsigned long pages = 0; for_each_online_node(i) { int i; for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); pages += free_all_bootmem_node(NODE_DATA(i)); } return pages; return pages; } } void __init paging_init(void) void __init paging_init(void) { { int i; unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES]; int i; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; Loading @@ -569,9 +582,8 @@ void __init paging_init(void) sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); sparse_init(); for_each_online_node(i) { for_each_online_node(i) setup_node_zones(i); setup_node_zones(i); } free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns); } } Loading @@ -594,7 +606,6 @@ static __init int numa_setup(char *opt) #endif #endif return 0; return 0; } } early_param("numa", numa_setup); early_param("numa", numa_setup); /* /* Loading @@ -612,8 +623,10 @@ early_param("numa", numa_setup); void __init init_cpu_to_node(void) void __init init_cpu_to_node(void) { { int i; int i; for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) { u8 apicid = x86_cpu_to_apicid_init[i]; u8 apicid = x86_cpu_to_apicid_init[i]; if (apicid == BAD_APICID) if (apicid == BAD_APICID) continue; continue; if (apicid_to_node[apicid] == NUMA_NO_NODE) if (apicid_to_node[apicid] == NUMA_NO_NODE) Loading @@ -622,10 +635,6 @@ void __init init_cpu_to_node(void) } } } } EXPORT_SYMBOL(cpu_to_node); EXPORT_SYMBOL(node_to_cpumask); EXPORT_SYMBOL(node_data); #ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM /* /* * Functions to convert PFNs from/to per node page addresses. * Functions to convert PFNs from/to per node page addresses. Loading