Loading include/linux/mutex.h +6 −0 Original line number Diff line number Diff line Loading @@ -7,6 +7,11 @@ * * This file contains the main data structure and API definitions. */ /* * NOTE: This file has been modified by Sony Mobile Communications Inc. * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, * and licensed under the license of the file. */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H Loading Loading @@ -171,6 +176,7 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); * Returns 1 if the mutex has been acquired successfully, and 0 on contention. */ extern int mutex_trylock(struct mutex *lock); extern int mutex_trylock_spin(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); Loading kernel/locking/mutex.c +26 −12 Original line number Diff line number Diff line Loading @@ -17,6 +17,11 @@ * * Also see Documentation/locking/mutex-design.txt. */ /* * NOTE: This file has been modified by Sony Mobile Communications Inc. * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, * and licensed under the license of the file. */ #include <linux/mutex.h> #include <linux/ww_mutex.h> #include <linux/sched.h> Loading @@ -26,7 +31,6 @@ #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/osq_lock.h> #include <linux/delay.h> /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, Loading Loading @@ -379,17 +383,6 @@ static bool mutex_optimistic_spin(struct mutex *lock, * values at the cost of a few extra spins. */ cpu_relax_lowlatency(); /* * On arm systems, we must slow down the waiter's repeated * aquisition of spin_mlock and atomics on the lock count, or * we risk starving out a thread attempting to release the * mutex. The mutex slowpath release must take spin lock * wait_lock. This spin lock can share a monitor with the * other waiter atomics in the mutex data structure, so must * take care to rate limit the waiters. */ udelay(1); } osq_unlock(&lock->osq); Loading Loading @@ -925,6 +918,27 @@ int __sched mutex_trylock(struct mutex *lock) } EXPORT_SYMBOL(mutex_trylock); int __sched mutex_trylock_spin(struct mutex *lock) { int ret; ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); if (!ret) { preempt_disable(); ret = mutex_optimistic_spin(lock, NULL, 0); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); preempt_enable(); } if (ret) mutex_set_owner(lock); return ret; } EXPORT_SYMBOL(mutex_trylock_spin); #ifndef CONFIG_DEBUG_LOCK_ALLOC int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) Loading Loading
include/linux/mutex.h +6 −0 Original line number Diff line number Diff line Loading @@ -7,6 +7,11 @@ * * This file contains the main data structure and API definitions. */ /* * NOTE: This file has been modified by Sony Mobile Communications Inc. * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, * and licensed under the license of the file. */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H Loading Loading @@ -171,6 +176,7 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); * Returns 1 if the mutex has been acquired successfully, and 0 on contention. */ extern int mutex_trylock(struct mutex *lock); extern int mutex_trylock_spin(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); Loading
kernel/locking/mutex.c +26 −12 Original line number Diff line number Diff line Loading @@ -17,6 +17,11 @@ * * Also see Documentation/locking/mutex-design.txt. */ /* * NOTE: This file has been modified by Sony Mobile Communications Inc. * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, * and licensed under the license of the file. */ #include <linux/mutex.h> #include <linux/ww_mutex.h> #include <linux/sched.h> Loading @@ -26,7 +31,6 @@ #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/osq_lock.h> #include <linux/delay.h> /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, Loading Loading @@ -379,17 +383,6 @@ static bool mutex_optimistic_spin(struct mutex *lock, * values at the cost of a few extra spins. */ cpu_relax_lowlatency(); /* * On arm systems, we must slow down the waiter's repeated * aquisition of spin_mlock and atomics on the lock count, or * we risk starving out a thread attempting to release the * mutex. The mutex slowpath release must take spin lock * wait_lock. This spin lock can share a monitor with the * other waiter atomics in the mutex data structure, so must * take care to rate limit the waiters. */ udelay(1); } osq_unlock(&lock->osq); Loading Loading @@ -925,6 +918,27 @@ int __sched mutex_trylock(struct mutex *lock) } EXPORT_SYMBOL(mutex_trylock); int __sched mutex_trylock_spin(struct mutex *lock) { int ret; ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); if (!ret) { preempt_disable(); ret = mutex_optimistic_spin(lock, NULL, 0); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); preempt_enable(); } if (ret) mutex_set_owner(lock); return ret; } EXPORT_SYMBOL(mutex_trylock_spin); #ifndef CONFIG_DEBUG_LOCK_ALLOC int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) Loading