Loading kernel/sched.c +2 −2 Original line number Diff line number Diff line Loading @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) static inline int rt_policy(int policy) { if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) return 1; return 0; } Loading Loading @@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); Loading Loading
kernel/sched.c +2 −2 Original line number Diff line number Diff line Loading @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) static inline int rt_policy(int policy) { if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) return 1; return 0; } Loading Loading @@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); Loading