Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06d4a22b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: completely disable NOPL on 32 bits
  x86/paravirt: Remove duplicate paravirt_pagetable_setup_{start, done}()
  xen: fix for xen guest with mem > 3.7G
  x86: fix possible x86_64 and EFI regression
  arch/x86/kernel/kdebugfs.c: introduce missing kfree
parents b9719635 ba0593bf
Loading
Loading
Loading
Loading
+4 −20
Original line number Diff line number Diff line
@@ -344,31 +344,15 @@ static void __init early_cpu_detect(void)

/*
 * The NOPL instruction is supposed to exist on all CPUs with
 * family >= 6, unfortunately, that's not true in practice because
 * family >= 6; unfortunately, that's not true in practice because
 * of early VIA chips and (more importantly) broken virtualizers that
 * are not easy to detect.  Hence, probe for it based on first
 * principles.
 * are not easy to detect.  In the latter case it doesn't even *fail*
 * reliably, so probing for it doesn't even work.  Disable it completely
 * unless we can find a reliable way to detect all the broken cases.
 */
static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
{
	const u32 nopl_signature = 0x888c53b1; /* Random number */
	u32 has_nopl = nopl_signature;

	clear_cpu_cap(c, X86_FEATURE_NOPL);
	if (c->x86 >= 6) {
		asm volatile("\n"
			     "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
			     "2:\n"
			     "        .section .fixup,\"ax\"\n"
			     "3:      xor %0,%0\n"
			     "        jmp 2b\n"
			     "        .previous\n"
			     _ASM_EXTABLE(1b,3b)
			     : "+a" (has_nopl));

		if (has_nopl == nopl_signature)
			set_cpu_cap(c, X86_FEATURE_NOPL);
	}
}

static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
+1 −0
Original line number Diff line number Diff line
@@ -139,6 +139,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
		if (PageHighMem(pg)) {
			data = ioremap_cache(pa_data, sizeof(*data));
			if (!data) {
				kfree(node);
				error = -ENXIO;
				goto err_dir;
			}
+4 −1
Original line number Diff line number Diff line
@@ -670,6 +670,10 @@ void __init setup_arch(char **cmdline_p)

	parse_early_param();

#ifdef CONFIG_X86_64
	check_efer();
#endif

#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
	/*
	 * Must be before kernel pagetables are setup
@@ -738,7 +742,6 @@ void __init setup_arch(char **cmdline_p)
#else
	num_physpages = max_pfn;

	check_efer();

	/* How many end-of-memory variables you have, grandma! */
	/* need this before calling reserve_initrd */
+0 −4
Original line number Diff line number Diff line
@@ -458,11 +458,7 @@ static void __init pagetable_init(void)
{
	pgd_t *pgd_base = swapper_pg_dir;

	paravirt_pagetable_setup_start(pgd_base);

	permanent_kmaps_init(pgd_base);

	paravirt_pagetable_setup_done(pgd_base);
}

#ifdef CONFIG_ACPI_SLEEP
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ char * __init xen_memory_setup(void)

	e820.nr_map = 0;

	e820_add_region(0, PFN_PHYS(max_pfn), E820_RAM);
	e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);

	/*
	 * Even though this is normal, usable memory under Xen, reserve