Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8efb90cf authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle are:

   - big rtmutex and futex cleanup and robustification from Thomas
     Gleixner
   - mutex optimizations and refinements from Jason Low
   - arch_mutex_cpu_relax() removal and related cleanups
   - smaller lockdep tweaks"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
  arch, locking: Ciao arch_mutex_cpu_relax()
  locking/lockdep: Only ask for /proc/lock_stat output when available
  locking/mutexes: Optimize mutex trylock slowpath
  locking/mutexes: Try to acquire mutex only if it is unlocked
  locking/mutexes: Delete the MUTEX_SHOW_NO_WAITER macro
  locking/mutexes: Correct documentation on mutex optimistic spinning
  rtmutex: Make the rtmutex tester depend on BROKEN
  futex: Simplify futex_lock_pi_atomic() and make it more robust
  futex: Split out the first waiter attachment from lookup_pi_state()
  futex: Split out the waiter check from lookup_pi_state()
  futex: Use futex_top_waiter() in lookup_pi_state()
  futex: Make unlock_pi more robust
  rtmutex: Avoid pointless requeueing in the deadlock detection chain walk
  rtmutex: Cleanup deadlock detector debug logic
  rtmutex: Confine deadlock logic to futex
  rtmutex: Simplify remove_waiter()
  rtmutex: Document pi chain walk
  rtmutex: Clarify the boost/deboost part
  rtmutex: No need to keep task ref for lock owner check
  rtmutex: Simplify and document try_to_take_rtmutex()
  ...
parents 5bda4f63 3a6bfbc9
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -57,6 +57,7 @@ unsigned long get_wchan(struct task_struct *p);
  ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
  ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)


#define cpu_relax()	barrier()
#define cpu_relax()	barrier()
#define cpu_relax_lowlatency() cpu_relax()


#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_PREFETCHW
+2 −0
Original line number Original line Diff line number Diff line
@@ -62,6 +62,8 @@ unsigned long thread_saved_pc(struct task_struct *t);
#define cpu_relax()	do { } while (0)
#define cpu_relax()	do { } while (0)
#endif
#endif


#define cpu_relax_lowlatency() cpu_relax()

#define copy_segments(tsk, mm)      do { } while (0)
#define copy_segments(tsk, mm)      do { } while (0)
#define release_segments(mm)        do { } while (0)
#define release_segments(mm)        do { } while (0)


+2 −0
Original line number Original line Diff line number Diff line
@@ -82,6 +82,8 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax()			barrier()
#define cpu_relax()			barrier()
#endif
#endif


#define cpu_relax_lowlatency()                cpu_relax()

#define task_pt_regs(p) \
#define task_pt_regs(p) \
	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)


+1 −0
Original line number Original line Diff line number Diff line
@@ -129,6 +129,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
unsigned long get_wchan(struct task_struct *p);


#define cpu_relax()			barrier()
#define cpu_relax()			barrier()
#define cpu_relax_lowlatency()                cpu_relax()


/* Thread switching */
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
+1 −0
Original line number Original line Diff line number Diff line
@@ -92,6 +92,7 @@ extern struct avr32_cpuinfo boot_cpu_data;
#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))


#define cpu_relax()		barrier()
#define cpu_relax()		barrier()
#define cpu_relax_lowlatency()        cpu_relax()
#define cpu_sync_pipeline()	asm volatile("sub pc, -2" : : : "memory")
#define cpu_sync_pipeline()	asm volatile("sub pc, -2" : : : "memory")


struct cpu_context {
struct cpu_context {
Loading