Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca764aaf authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86-mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into x86/mm

parents d04c579f 078a1989
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -54,8 +54,6 @@ static inline phys_addr_t get_max_mapped(void)
extern unsigned long init_memory_mapping(unsigned long start,
					 unsigned long end);

void init_memory_mapping_high(void);

extern void initmem_init(void);
extern void free_initmem(void);

+8 −0
Original line number Diff line number Diff line
@@ -963,6 +963,14 @@ void __init setup_arch(char **cmdline_p)
	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
	max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping(1UL<<32,
						     max_pfn<<PAGE_SHIFT);
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
#endif
	memblock.current_limit = get_max_mapped();

	/*
+0 −54
Original line number Diff line number Diff line
@@ -606,63 +606,9 @@ kernel_physical_mapping_init(unsigned long start,
void __init initmem_init(void)
{
	memblock_x86_register_active_regions(0, 0, max_pfn);
	init_memory_mapping_high();
}
#endif

struct mapping_work_data {
	unsigned long start;
	unsigned long end;
	unsigned long pfn_mapped;
};

static int __init_refok
mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax)
{
	struct mapping_work_data *data = datax;
	unsigned long pfn_mapped;
	unsigned long final_start, final_end;

	final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start);
	final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end);

	if (final_end <= final_start)
		return 0;

	pfn_mapped = init_memory_mapping(final_start, final_end);

	if (pfn_mapped > data->pfn_mapped)
		data->pfn_mapped = pfn_mapped;

	return 0;
}

static unsigned long __init_refok
init_memory_mapping_active_regions(unsigned long start, unsigned long end)
{
	struct mapping_work_data data;

	data.start = start;
	data.end = end;
	data.pfn_mapped = 0;

	work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data);

	return data.pfn_mapped;
}

void __init_refok init_memory_mapping_high(void)
{
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32,
							 max_pfn<<PAGE_SHIFT);
		/* can we preserve max_low_pfn ? */
		max_low_pfn = max_pfn;

		memblock.current_limit = get_max_mapped();
	}
}

void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
+55 −41
Original line number Diff line number Diff line
@@ -543,8 +543,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
	if (!numa_meminfo_cover_memory(mi))
		return -EINVAL;

	init_memory_mapping_high();

	/* Finally register nodes. */
	for_each_node_mask(nid, node_possible_map) {
		u64 start = (u64)max_pfn << PAGE_SHIFT;
@@ -564,6 +562,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
	return 0;
}

/**
 * dummy_numma_init - Fallback dummy NUMA init
 *
 * Used if there's no underlying NUMA architecture, NUMA initialization
 * fails, or NUMA is disabled on the command line.
 *
 * Must online at least one node and add memory blocks that cover all
 * allowed memory.  This function must not fail.
 */
static int __init dummy_numa_init(void)
{
	printk(KERN_INFO "%s\n",
@@ -577,26 +584,13 @@ static int __init dummy_numa_init(void)
	return 0;
}

void __init initmem_init(void)
static int __init numa_init(int (*init_func)(void))
{
	int (*numa_init[])(void) = { [2] = dummy_numa_init };
	int i, j;

	if (!numa_off) {
#ifdef CONFIG_ACPI_NUMA
		numa_init[0] = x86_acpi_numa_init;
#endif
#ifdef CONFIG_AMD_NUMA
		numa_init[1] = amd_numa_init;
#endif
	}

	for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
		if (!numa_init[i])
			continue;
	int i;
	int ret;

		for (j = 0; j < MAX_LOCAL_APIC; j++)
			set_apicid_to_node(j, NUMA_NO_NODE);
	for (i = 0; i < MAX_LOCAL_APIC; i++)
		set_apicid_to_node(i, NUMA_NO_NODE);

	nodes_clear(numa_nodes_parsed);
	nodes_clear(node_possible_map);
@@ -605,29 +599,49 @@ void __init initmem_init(void)
	remove_all_active_ranges();
	numa_reset_distance();

		if (numa_init[i]() < 0)
			continue;

		if (numa_cleanup_meminfo(&numa_meminfo) < 0)
			continue;
	ret = init_func();
	if (ret < 0)
		return ret;
	ret = numa_cleanup_meminfo(&numa_meminfo);
	if (ret < 0)
		return ret;

	numa_emulation(&numa_meminfo, numa_distance_cnt);

		if (numa_register_memblks(&numa_meminfo) < 0)
			continue;
	ret = numa_register_memblks(&numa_meminfo);
	if (ret < 0)
		return ret;

		for (j = 0; j < nr_cpu_ids; j++) {
			int nid = early_cpu_to_node(j);
	for (i = 0; i < nr_cpu_ids; i++) {
		int nid = early_cpu_to_node(i);

		if (nid == NUMA_NO_NODE)
			continue;
		if (!node_online(nid))
				numa_clear_node(j);
			numa_clear_node(i);
	}
	numa_init_array();
	return 0;
}

void __init initmem_init(void)
{
	int ret;

	if (!numa_off) {
#ifdef CONFIG_ACPI_NUMA
		ret = numa_init(x86_acpi_numa_init);
		if (!ret)
			return;
#endif
#ifdef CONFIG_AMD_NUMA
		ret = numa_init(amd_numa_init);
		if (!ret)
			return;
#endif
	}
	BUG();

	numa_init(dummy_numa_init);
}

unsigned long __init numa_free_all_bootmem(void)
+16 −4
Original line number Diff line number Diff line
@@ -301,6 +301,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
	const u64 max_addr = max_pfn << PAGE_SHIFT;
	u8 *phys_dist = NULL;
	size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
	int dfl_phys_nid;
	int i, j, ret;

	if (!emu_cmdline)
@@ -357,6 +358,19 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
					node_distance(i, j);
	}

	/* determine the default phys nid to use for unmapped nodes */
	dfl_phys_nid = NUMA_NO_NODE;
	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
		if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
			dfl_phys_nid = emu_nid_to_phys[i];
			break;
		}
	}
	if (dfl_phys_nid == NUMA_NO_NODE) {
		pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n");
		goto no_emu;
	}

	/* commit */
	*numa_meminfo = ei;

@@ -377,7 +391,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
	/* make sure all emulated nodes are mapped to a physical node */
	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
			emu_nid_to_phys[i] = 0;
			emu_nid_to_phys[i] = dfl_phys_nid;

	/*
	 * Transform distance table.  numa_set_distance() ignores all
@@ -417,8 +431,6 @@ void __cpuinit numa_add_cpu(int cpu)
{
	int physnid, nid;

	nid = numa_cpu_node(cpu);
	if (nid == NUMA_NO_NODE)
	nid = early_cpu_to_node(cpu);
	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));