Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e335e3eb authored by Raghavendra K T's avatar Raghavendra K T Committed by Ingo Molnar
Browse files

locking/kconfig: Simplify INLINE_SPIN_UNLOCK usage

Get rid of INLINE_SPIN_UNLOCK entirely replacing it with
UNINLINE_SPIN_UNLOCK instead of the reverse meaning.

Whoever wants to change the default spinlock inlining
behavior and uninline the spinlocks for some weird reason,
such as spinlock debugging, paravirt etc. can now all just
select UNINLINE_SPIN_UNLOCK

Original discussion at: https://lkml.org/lkml/2012/3/21/357



Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20120322095502.30866.75756.sendpatchset@codeblue


[ tidied up the changelog a bit ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 280fb016
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ CONFIG_BLK_DEV_BSG=y
CONFIG_IOSCHED_NOOP=y
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
CONFIG_INLINE_SPIN_UNLOCK=y
# CONFIG_UNINLINE_SPIN_UNLOCK is not set
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
CONFIG_INLINE_READ_UNLOCK=y
CONFIG_INLINE_READ_UNLOCK_IRQ=y
+1 −1
Original line number Diff line number Diff line
@@ -113,7 +113,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
# CONFIG_INLINE_SPIN_LOCK_BH is not set
# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
CONFIG_INLINE_SPIN_UNLOCK=y
# CONFIG_UNINLINE_SPIN_UNLOCK is not set
# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+1 −1
Original line number Diff line number Diff line
@@ -67,7 +67,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK
#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif

+2 −2
Original line number Diff line number Diff line
@@ -124,8 +124,8 @@ config INLINE_SPIN_LOCK_IRQSAVE
	def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
		 ARCH_INLINE_SPIN_LOCK_IRQSAVE

config INLINE_SPIN_UNLOCK
	def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
config UNINLINE_SPIN_UNLOCK
	bool

config INLINE_SPIN_UNLOCK_BH
	def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
+1 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT
	bool "Preemptible Kernel (Low-Latency Desktop)"
	select PREEMPT_COUNT
	select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
	help
	  This option reduces the latency of the kernel by making
	  all kernel code (that is not executing in a critical section)
Loading