Loading kernel/sched.c +25 −19 Original line number Diff line number Diff line Loading @@ -1007,8 +1007,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* * find_idlest_queue - find the idlest runqueue among the cpus in group. */ static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { cpumask_t tmp; unsigned long load, min_load = ULONG_MAX; Loading Loading @@ -1766,7 +1766,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, */ static inline int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) struct sched_domain *sd, enum idle_type idle, int *all_pinned) { /* * We do not migrate tasks that are: Loading Loading @@ -3058,7 +3059,8 @@ asmlinkage void __sched preempt_schedule_irq(void) #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { task_t *p = curr->private; return try_to_wake_up(p, mode, sync); Loading Loading @@ -3132,7 +3134,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) * * On UP it can prevent extra preemption. */ void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; int sync = 1; Loading Loading @@ -3323,7 +3326,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(interruptible_sleep_on); long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR Loading Loading @@ -3542,7 +3546,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) * @policy: new policy. * @param: structure containing the new RT priority. */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { int retval; int oldprio, oldpolicy = -1; Loading Loading @@ -3625,7 +3630,8 @@ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *pa } EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { int retval; struct sched_param lparam; Loading Loading
kernel/sched.c +25 −19 Original line number Diff line number Diff line Loading @@ -1007,8 +1007,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* * find_idlest_queue - find the idlest runqueue among the cpus in group. */ static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { cpumask_t tmp; unsigned long load, min_load = ULONG_MAX; Loading Loading @@ -1766,7 +1766,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, */ static inline int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) struct sched_domain *sd, enum idle_type idle, int *all_pinned) { /* * We do not migrate tasks that are: Loading Loading @@ -3058,7 +3059,8 @@ asmlinkage void __sched preempt_schedule_irq(void) #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { task_t *p = curr->private; return try_to_wake_up(p, mode, sync); Loading Loading @@ -3132,7 +3134,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) * * On UP it can prevent extra preemption. */ void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; int sync = 1; Loading Loading @@ -3323,7 +3326,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(interruptible_sleep_on); long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR Loading Loading @@ -3542,7 +3546,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) * @policy: new policy. * @param: structure containing the new RT priority. */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { int retval; int oldprio, oldpolicy = -1; Loading Loading @@ -3625,7 +3630,8 @@ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *pa } EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { int retval; struct sched_param lparam; Loading