Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da51da18 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86/entry/64: Pass SP0 directly to load_sp0()



load_sp0() had an odd signature:

  void load_sp0(struct tss_struct *tss, struct thread_struct *thread);

Simplify it to:

  void load_sp0(unsigned long sp0);

Also simplify a few get_cpu()/put_cpu() sequences to
preempt_disable()/preempt_enable().

Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/2655d8b42ed940aa384fe18ee1129bbbcf730a08.1509609304.git.luto@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bd7dc5a6
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -15,10 +15,9 @@
#include <linux/cpumask.h>
#include <asm/frame.h>

static inline void load_sp0(struct tss_struct *tss,
			     struct thread_struct *thread)
static inline void load_sp0(unsigned long sp0)
{
	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
	PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
}

/* The paravirtualized CPUID instruction. */
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ struct pv_cpu_ops {
	void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
	void (*free_ldt)(struct desc_struct *ldt, unsigned entries);

	void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
	void (*load_sp0)(unsigned long sp0);

	void (*set_iopl_mask)(unsigned mask);

+4 −5
Original line number Diff line number Diff line
@@ -517,9 +517,9 @@ static inline void native_set_iopl_mask(unsigned mask)
}

static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
native_load_sp0(unsigned long sp0)
{
	tss->x86_tss.sp0 = thread->sp0;
	this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
}

static inline void native_swapgs(void)
@@ -544,10 +544,9 @@ static inline unsigned long current_top_of_stack(void)
#else
#define __cpuid			native_cpuid

static inline void load_sp0(struct tss_struct *tss,
			    struct thread_struct *thread)
static inline void load_sp0(unsigned long sp0)
{
	native_load_sp0(tss, thread);
	native_load_sp0(sp0);
}

#define set_iopl_mask native_set_iopl_mask
+2 −2
Original line number Diff line number Diff line
@@ -1570,7 +1570,7 @@ void cpu_init(void)
	initialize_tlbstate_and_flush();
	enter_lazy_tlb(&init_mm, me);

	load_sp0(t, &current->thread);
	load_sp0(current->thread.sp0);
	set_tss_desc(cpu, t);
	load_TR_desc();
	load_mm_ldt(&init_mm);
@@ -1625,7 +1625,7 @@ void cpu_init(void)
	initialize_tlbstate_and_flush();
	enter_lazy_tlb(&init_mm, curr);

	load_sp0(t, thread);
	load_sp0(thread->sp0);
	set_tss_desc(cpu, t);
	load_TR_desc();
	load_mm_ldt(&init_mm);
+1 −1
Original line number Diff line number Diff line
@@ -287,7 +287,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * current_thread_info().  Refresh the SYSENTER configuration in
	 * case prev or next is vm86.
	 */
	load_sp0(tss, next);
	load_sp0(next->sp0);
	refresh_sysenter_cs(next);
	this_cpu_write(cpu_current_top_of_stack,
		       (unsigned long)task_stack_page(next_p) +
Loading