Loading kernel/locking/mcs_spinlock.c +8 −1 Original line number Diff line number Diff line #include <linux/percpu.h> #include <linux/sched.h> #include "mcs_spinlock.h" #include <linux/sched/rt.h> #ifdef CONFIG_SMP Loading Loading @@ -87,6 +88,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) { struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *prev, *next; struct task_struct *task = current; int curr = encode_cpu(smp_processor_id()); int old; Loading Loading @@ -114,8 +116,13 @@ bool osq_lock(struct optimistic_spin_queue *lock) while (!smp_load_acquire(&node->locked)) { /* * If we need to reschedule bail... so we can block. * If a task spins on owner on a CPU after acquiring * osq_lock while a RT task spins on another CPU to * acquire osq_lock, it will starve the owner from * completing if owner is to be scheduled on the same CPU. * It will be a live lock. */ if (need_resched()) if (need_resched() || rt_task(task)) goto unqueue; cpu_relax_lowlatency(); Loading Loading
kernel/locking/mcs_spinlock.c +8 −1 Original line number Diff line number Diff line #include <linux/percpu.h> #include <linux/sched.h> #include "mcs_spinlock.h" #include <linux/sched/rt.h> #ifdef CONFIG_SMP Loading Loading @@ -87,6 +88,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) { struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *prev, *next; struct task_struct *task = current; int curr = encode_cpu(smp_processor_id()); int old; Loading Loading @@ -114,8 +116,13 @@ bool osq_lock(struct optimistic_spin_queue *lock) while (!smp_load_acquire(&node->locked)) { /* * If we need to reschedule bail... so we can block. * If a task spins on owner on a CPU after acquiring * osq_lock while a RT task spins on another CPU to * acquire osq_lock, it will starve the owner from * completing if owner is to be scheduled on the same CPU. * It will be a live lock. */ if (need_resched()) if (need_resched() || rt_task(task)) goto unqueue; cpu_relax_lowlatency(); Loading