Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f361a450 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar
Browse files

x86: introduce max_low_pfn_mapped for 64-bit



when more than 4g memory is installed, don't map the big hole below 4g.

Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f302a5bb
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -130,7 +130,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
	if (!phys || !size)
	if (!phys || !size)
		return NULL;
		return NULL;


	if (phys+size <= (max_pfn_mapped << PAGE_SHIFT))
	if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
		return __va(phys);
		return __va(phys);


	offset = phys & (PAGE_SIZE - 1);
	offset = phys & (PAGE_SIZE - 1);
+7 −3
Original line number Original line Diff line number Diff line
@@ -199,12 +199,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
		 * Don't do it for gbpages because there seems very little
		 * Don't do it for gbpages because there seems very little
		 * benefit in doing so.
		 * benefit in doing so.
		 */
		 */
		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
		    (tseg >> PMD_SHIFT) <
		    if ((tseg>>PMD_SHIFT) <
			(max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
			((tseg>>PMD_SHIFT) <
				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
			 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
			set_memory_4k((unsigned long)__va(tseg), 1);
			set_memory_4k((unsigned long)__va(tseg), 1);
		}
		}
	}
	}
}


static struct cpu_dev amd_cpu_dev __cpuinitdata = {
static struct cpu_dev amd_cpu_dev __cpuinitdata = {
	.c_vendor	= "AMD",
	.c_vendor	= "AMD",
+20 −3
Original line number Original line Diff line number Diff line
@@ -1056,7 +1056,7 @@ unsigned long __initdata end_user_pfn = MAX_ARCH_PFN;
/*
/*
 * Find the highest page frame number we have available
 * Find the highest page frame number we have available
 */
 */
unsigned long __init e820_end(void)
static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
{
{
	int i;
	int i;
	unsigned long last_pfn = 0;
	unsigned long last_pfn = 0;
@@ -1064,12 +1064,21 @@ unsigned long __init e820_end(void)


	for (i = 0; i < e820.nr_map; i++) {
	for (i = 0; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i];
		struct e820entry *ei = &e820.map[i];
		unsigned long start_pfn;
		unsigned long end_pfn;
		unsigned long end_pfn;


		if (ei->type != E820_RAM)
		if (ei->type != type)
			continue;
			continue;


		start_pfn = ei->addr >> PAGE_SHIFT;
		end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
		end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;

		if (start_pfn >= limit_pfn)
			continue;
		if (end_pfn > limit_pfn) {
			last_pfn = limit_pfn;
			break;
		}
		if (end_pfn > last_pfn)
		if (end_pfn > last_pfn)
			last_pfn = end_pfn;
			last_pfn = end_pfn;
	}
	}
@@ -1083,7 +1092,15 @@ unsigned long __init e820_end(void)
			 last_pfn, max_arch_pfn);
			 last_pfn, max_arch_pfn);
	return last_pfn;
	return last_pfn;
}
}
unsigned long __init e820_end_of_ram_pfn(void)
{
	return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
}


unsigned long __init e820_end_of_low_ram_pfn(void)
{
	return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
}
/*
/*
 * Finds an active region in the address range from start_pfn to last_pfn and
 * Finds an active region in the address range from start_pfn to last_pfn and
 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
@@ -1206,7 +1223,7 @@ static int __init parse_memmap_opt(char *p)
		 * the real mem size before original memory map is
		 * the real mem size before original memory map is
		 * reset.
		 * reset.
		 */
		 */
		saved_max_pfn = e820_end();
		saved_max_pfn = e820_end_of_ram_pfn();
#endif
#endif
		e820.nr_map = 0;
		e820.nr_map = 0;
		userdef = 1;
		userdef = 1;
+1 −1
Original line number Original line Diff line number Diff line
@@ -473,7 +473,7 @@ void __init efi_enter_virtual_mode(void)
		size = md->num_pages << EFI_PAGE_SHIFT;
		size = md->num_pages << EFI_PAGE_SHIFT;
		end = md->phys_addr + size;
		end = md->phys_addr + size;


		if (PFN_UP(end) <= max_pfn_mapped)
		if (PFN_UP(end) <= max_low_pfn_mapped)
			va = __va(md->phys_addr);
			va = __va(md->phys_addr);
		else
		else
			va = efi_ioremap(md->phys_addr, size);
			va = efi_ioremap(md->phys_addr, size);
+18 −4
Original line number Original line Diff line number Diff line
@@ -713,14 +713,14 @@ void __init setup_arch(char **cmdline_p)
	 * partially used pages are not usable - thus
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 * we are rounding upwards:
	 */
	 */
	max_pfn = e820_end();
	max_pfn = e820_end_of_ram_pfn();


	/* preallocate 4k for mptable mpc */
	/* preallocate 4k for mptable mpc */
	early_reserve_e820_mpc_new();
	early_reserve_e820_mpc_new();
	/* update e820 for memory not covered by WB MTRRs */
	/* update e820 for memory not covered by WB MTRRs */
	mtrr_bp_init();
	mtrr_bp_init();
	if (mtrr_trim_uncached_memory(max_pfn))
	if (mtrr_trim_uncached_memory(max_pfn))
		max_pfn = e820_end();
		max_pfn = e820_end_of_ram_pfn();


#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
	/* max_low_pfn get updated here */
	/* max_low_pfn get updated here */
@@ -732,12 +732,26 @@ void __init setup_arch(char **cmdline_p)


	/* How many end-of-memory variables you have, grandma! */
	/* How many end-of-memory variables you have, grandma! */
	/* need this before calling reserve_initrd */
	/* need this before calling reserve_initrd */
	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
		max_low_pfn = e820_end_of_low_ram_pfn();
	else
		max_low_pfn = max_pfn;
		max_low_pfn = max_pfn;

	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
#endif
#endif


	/* max_pfn_mapped is updated here */
	/* max_pfn_mapped is updated here */
	max_pfn_mapped = init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT));
	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
	max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping(1UL<<32,
						     max_pfn<<PAGE_SHIFT);
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
#endif


	/*
	/*
	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
Loading