Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4866cde0 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds
Browse files

[PATCH] sched: cleanup context switch locking



Instead of requiring architecture code to interact with the scheduler's
locking implementation, provide a couple of defines that can be used by the
architecture to request runqueue unlocked context switches, and ask for
interrupts to be enabled over the context switch.

Also replaces the "switch_lock" used by these architectures with an oncpu
flag (note, not a potentially slow bitflag).  This eliminates one bus
locked memory operation when context switching, and simplifies the
task_running function.

Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 48c08d3f
Loading
Loading
Loading
Loading
+4 −26
Original line number Diff line number Diff line
@@ -145,34 +145,12 @@ extern unsigned int user_debug;
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");

#ifdef CONFIG_SMP
/*
 * Define our own context switch locking.  This allows us to enable
 * interrupts over the context switch, otherwise we end up with high
 * interrupt latency.  The real problem area is switch_mm() which may
 * do a full cache flush.
 * switch_mm() may do a full cache flush over the context switch,
 * so enable interrupts over the context switch to avoid high
 * latency.
 */
#define prepare_arch_switch(rq,next)					\
do {									\
	spin_lock(&(next)->switch_lock);				\
	spin_unlock_irq(&(rq)->lock);					\
} while (0)

#define finish_arch_switch(rq,prev)					\
	spin_unlock(&(prev)->switch_lock)

#define task_running(rq,p)						\
	((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#else
/*
 * Our UP-case is more simple, but we assume knowledge of how
 * spin_unlock_irq() and friends are implemented.  This avoids
 * us needlessly decrementing and incrementing the preempt count.
 */
#define prepare_arch_switch(rq,next)	local_irq_enable()
#define finish_arch_switch(rq,prev)	spin_unlock(&(rq)->lock)
#define task_running(rq,p)		((rq)->curr == (p))
#endif
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW

/*
 * switch_to(prev, next) should switch from task `prev' to `next'
+1 −9
Original line number Diff line number Diff line
@@ -183,8 +183,6 @@ do { \

#ifdef __KERNEL__

#define prepare_to_switch()    do { } while(0)

#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs)	(ia64_psr(regs)->is != 0)
#else
@@ -274,13 +272,7 @@ extern void ia64_load_extra (struct task_struct *task);
 * of that CPU which will not be released, because there we wait for the
 * tasklist_lock to become available.
 */
#define prepare_arch_switch(rq, next)		\
do {						\
	spin_lock(&(next)->switch_lock);	\
	spin_unlock(&(rq)->lock);		\
} while (0)
#define finish_arch_switch(rq, prev)	spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) 		((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#define __ARCH_WANT_UNLOCKED_CTXSW

#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)

+2 −8
Original line number Diff line number Diff line
@@ -422,16 +422,10 @@ extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
extern int stop_a_enabled;

/*
 * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
 * See include/asm-ia64/system.h; prevents deadlock on SMP
 * systems.
 */
#define prepare_arch_switch(rq, next)		\
do {						\
	spin_lock(&(next)->switch_lock);	\
	spin_unlock(&(rq)->lock);		\
} while (0)
#define finish_arch_switch(rq, prev)	spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) 		((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#define __ARCH_WANT_UNLOCKED_CTXSW

#define arch_align_stack(x) (x)

+3 −14
Original line number Diff line number Diff line
@@ -104,29 +104,18 @@ static inline void restore_access_regs(unsigned int *acrs)
	prev = __switch_to(prev,next);					     \
} while (0)

#define prepare_arch_switch(rq, next)	do { } while(0)
#define task_running(rq, p)		((rq)->curr == (p))

#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);

#define finish_arch_switch(rq, prev) do {				     \
	set_fs(current->thread.mm_segment);				     \
	spin_unlock(&(rq)->lock);					     \
	account_system_vtime(prev);					     \
	local_irq_enable();						     \
} while (0)

#else
#define account_system_vtime(prev) do { } while (0)
#endif

#define finish_arch_switch(rq, prev) do {				     \
	set_fs(current->thread.mm_segment);				     \
	spin_unlock_irq(&(rq)->lock);					     \
	account_system_vtime(prev);					     \
} while (0)

#endif

#define nop() __asm__ __volatile__ ("nop")

#define xchg(ptr,x) \
+1 −3
Original line number Diff line number Diff line
@@ -101,7 +101,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
 * XXX WTF is the above comment? Found in late teen 2.4.x.
 */
#define prepare_arch_switch(rq, next) do { \
#define prepare_arch_switch(next) do { \
	__asm__ __volatile__( \
	".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
@@ -109,8 +109,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
	"save %sp, -0x40, %sp\n\t" \
	"restore; restore; restore; restore; restore; restore; restore"); \
} while(0)
#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
#define task_running(rq, p)		((rq)->curr == (p))

	/* Much care has gone into this code, do not touch it.
	 *
Loading