Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13bba6fd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Fix performance regression caused by paravirt_ops on native kernels
  xen: use header for EXPORT_SYMBOL_GPL
  x86, 32-bit: fix kernel_trap_sp()
  x86: fix percpu_{to,from}_op()
  x86: mtrr: Fix high_width computation when phys-addr is >= 44bit
  x86: Fix false positive section mismatch warnings in the apic code
parents 0130b2d7 b4ecc126
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -498,6 +498,19 @@ config PARAVIRT
	  over full virtualization.  However, when run without a hypervisor
	  the kernel is theoretically slower and slightly larger.

config PARAVIRT_SPINLOCKS
	bool "Paravirtualization layer for spinlocks"
	depends on PARAVIRT && SMP && EXPERIMENTAL
	---help---
	  Paravirtualized spinlocks allow a pvops backend to replace the
	  spinlock implementation with something virtualization-friendly
	  (for example, block the virtual CPU rather than spinning).

	  Unfortunately the downside is an up to 5% performance hit on
	  native kernels, with various workloads.

	  If you are unsure how to answer this question, answer N.

config PARAVIRT_CLOCK
	bool
	default n
+1 −1
Original line number Diff line number Diff line
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);

#define paravirt_nop	((void *)_paravirt_nop)

#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{
+5 −5
Original line number Diff line number Diff line
@@ -82,22 +82,22 @@ do { \
	case 1:						\
		asm(op "b %1,"__percpu_arg(0)		\
		    : "+m" (var)			\
		    : "ri" ((T__)val));			\
		    : "qi" ((T__)(val)));		\
		break;					\
	case 2:						\
		asm(op "w %1,"__percpu_arg(0)		\
		    : "+m" (var)			\
		    : "ri" ((T__)val));			\
		    : "ri" ((T__)(val)));		\
		break;					\
	case 4:						\
		asm(op "l %1,"__percpu_arg(0)		\
		    : "+m" (var)			\
		    : "ri" ((T__)val));			\
		    : "ri" ((T__)(val)));		\
		break;					\
	case 8:						\
		asm(op "q %1,"__percpu_arg(0)		\
		    : "+m" (var)			\
		    : "re" ((T__)val));			\
		    : "re" ((T__)(val)));		\
		break;					\
	default: __bad_percpu_size();			\
	}						\
@@ -109,7 +109,7 @@ do { \
	switch (sizeof(var)) {				\
	case 1:						\
		asm(op "b "__percpu_arg(1)",%0"		\
		    : "=r" (ret__)			\
		    : "=q" (ret__)			\
		    : "m" (var));			\
		break;					\
	case 2:						\
+4 −3
Original line number Diff line number Diff line
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)

/*
 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
 * when it traps.  So regs will be the current sp.
 * when it traps.  The previous stack will be directly underneath the saved
 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
 *
 * This is valid only for kernel mode traps.
 */
static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
	return (unsigned long)regs;
	return (unsigned long)(&regs->sp);
#else
	return regs->sp;
#endif
+2 −2
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}

#ifndef CONFIG_PARAVIRT
#ifndef CONFIG_PARAVIRT_SPINLOCKS

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
	__raw_spin_lock(lock);
}

#endif
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
Loading