Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c17e486 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull x86 fixes from Peter Anvin.

This includes the resume-time FPU corruption fix from the chromeos guys,
marked for stable.

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Avoid FPU lazy restore after suspend
  x86-32: Unbreak booting on some 486 clones
  x86, kvm: Remove incorrect redundant assembly constraint
parents 8fdd78ee 644c1541
Loading
Loading
Loading
Loading
+9 −6
Original line number Diff line number Diff line
@@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk)
typedef struct { int preload; } fpu_switch_t;

/*
 * FIXME! We could do a totally lazy restore, but we need to
 * add a per-cpu "this was the task that last touched the FPU
 * on this CPU" variable, and the task needs to have a "I last
 * touched the FPU on this CPU" and check them.
 * Must be run with preemption disabled: this clears the fpu_owner_task,
 * on this CPU.
 *
 * We don't do that yet, so "fpu_lazy_restore()" always returns
 * false, but some day..
 * This will disable any lazy FPU state restore of the current FPU state,
 * but if the current thread owns the FPU, it will still be saved by.
 */
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
	per_cpu(fpu_owner_task, cpu) = NULL;
}

static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
	return new == this_cpu_read_stable(fpu_owner_task) &&
+7 −2
Original line number Diff line number Diff line
@@ -292,8 +292,8 @@ default_entry:
 *	be using the global pages. 
 *
 *	NOTE! If we are on a 486 we may have no cr4 at all!
 *	Specifically, cr4 exists if and only if CPUID exists,
 *	which in turn exists if and only if EFLAGS.ID exists.
 *	Specifically, cr4 exists if and only if CPUID exists
 *	and has flags other than the FPU flag set.
 */
	movl $X86_EFLAGS_ID,%ecx
	pushl %ecx
@@ -308,6 +308,11 @@ default_entry:
	testl %ecx,%eax
	jz 6f			# No ID flag = no CPUID = no CR4

	movl $1,%eax
	cpuid
	andl $~1,%edx		# Ignore CPUID.FPU
	jz 6f			# No flags or only CPUID.FPU = no CR4

	movl pa(mmu_cr4_features),%eax
	movl %eax,%cr4

+5 −0
Original line number Diff line number Diff line
@@ -68,6 +68,8 @@
#include <asm/mwait.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)

	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

	/* the FPU context is blank, nobody can own it */
	__cpu_disable_lazy_restore(cpu);

	err = do_boot_cpu(apicid, cpu, tidle);
	if (err) {
		pr_debug("do_boot_cpu failed %d\n", err);
+1 −2
Original line number Diff line number Diff line
@@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
			_ASM_EXTABLE(1b, 3b)				\
			: "=m" ((ctxt)->eflags), "=&r" (_tmp),		\
			  "+a" (*rax), "+d" (*rdx), "+qm"(_ex)		\
			: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val),	\
			  "a" (*rax), "d" (*rdx));			\
			: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val));	\
	} while (0)

/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */