Loading kernel/sched.c +1 −2 Original line number Diff line number Diff line Loading @@ -7697,7 +7697,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, else rt_se->rt_rq = parent->my_q; rt_se->rt_rq = &rq->rt; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); Loading Loading @@ -8420,7 +8419,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) #ifdef CONFIG_CGROUP_SCHED static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { struct task_group *tgi, *parent = tg->parent; struct task_group *tgi, *parent = tg ? tg->parent : NULL; unsigned long total = 0; if (!parent) { Loading kernel/sched_rt.c +39 −28 Original line number Diff line number Diff line Loading @@ -571,14 +571,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) #endif } static void enqueue_rt_entity(struct sched_rt_entity *rt_se) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; struct rt_rq *group_rq = group_rt_rq(rt_se); struct list_head *queue = array->queue + rt_se_prio(rt_se); if (group_rq && rt_rq_throttled(group_rq)) /* * Don't enqueue the group if its throttled, or when empty. * The latter is a consequence of the former when a child group * get throttled and the current group doesn't have any other * active members. */ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) return; if (rt_se->nr_cpus_allowed == 1) Loading @@ -591,7 +597,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) inc_rt_tasks(rt_se, rt_rq); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; Loading @@ -607,11 +613,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */ static void dequeue_rt_stack(struct task_struct *p) static void dequeue_rt_stack(struct sched_rt_entity *rt_se) { struct sched_rt_entity *rt_se, *back = NULL; struct sched_rt_entity *back = NULL; rt_se = &p->rt; for_each_sched_rt_entity(rt_se) { rt_se->back = back; back = rt_se; Loading @@ -619,7 +624,26 @@ static void dequeue_rt_stack(struct task_struct *p) for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); __dequeue_rt_entity(rt_se); } } static void enqueue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) __enqueue_rt_entity(rt_se); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) __enqueue_rt_entity(rt_se); } } Loading @@ -633,32 +657,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) if (wakeup) rt_se->timeout = 0; dequeue_rt_stack(p); /* * enqueue everybody, bottom - up. */ for_each_sched_rt_entity(rt_se) enqueue_rt_entity(rt_se); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq; update_curr_rt(rq); dequeue_rt_stack(p); /* * re-enqueue all non-empty rt_rq entities. */ for_each_sched_rt_entity(rt_se) { rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) enqueue_rt_entity(rt_se); } dequeue_rt_entity(rt_se); } /* Loading @@ -669,9 +676,13 @@ static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) { struct rt_prio_array *array = &rt_rq->active; struct list_head *queue = array->queue + rt_se_prio(rt_se); if (on_rt_rq(rt_se)) { list_del_init(&rt_se->run_list); list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); } } static void requeue_task_rt(struct rq *rq, struct task_struct *p) Loading Loading
kernel/sched.c +1 −2 Original line number Diff line number Diff line Loading @@ -7697,7 +7697,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, else rt_se->rt_rq = parent->my_q; rt_se->rt_rq = &rq->rt; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); Loading Loading @@ -8420,7 +8419,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) #ifdef CONFIG_CGROUP_SCHED static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { struct task_group *tgi, *parent = tg->parent; struct task_group *tgi, *parent = tg ? tg->parent : NULL; unsigned long total = 0; if (!parent) { Loading
kernel/sched_rt.c +39 −28 Original line number Diff line number Diff line Loading @@ -571,14 +571,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) #endif } static void enqueue_rt_entity(struct sched_rt_entity *rt_se) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; struct rt_rq *group_rq = group_rt_rq(rt_se); struct list_head *queue = array->queue + rt_se_prio(rt_se); if (group_rq && rt_rq_throttled(group_rq)) /* * Don't enqueue the group if its throttled, or when empty. * The latter is a consequence of the former when a child group * get throttled and the current group doesn't have any other * active members. */ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) return; if (rt_se->nr_cpus_allowed == 1) Loading @@ -591,7 +597,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) inc_rt_tasks(rt_se, rt_rq); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; Loading @@ -607,11 +613,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */ static void dequeue_rt_stack(struct task_struct *p) static void dequeue_rt_stack(struct sched_rt_entity *rt_se) { struct sched_rt_entity *rt_se, *back = NULL; struct sched_rt_entity *back = NULL; rt_se = &p->rt; for_each_sched_rt_entity(rt_se) { rt_se->back = back; back = rt_se; Loading @@ -619,7 +624,26 @@ static void dequeue_rt_stack(struct task_struct *p) for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); __dequeue_rt_entity(rt_se); } } static void enqueue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) __enqueue_rt_entity(rt_se); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) __enqueue_rt_entity(rt_se); } } Loading @@ -633,32 +657,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) if (wakeup) rt_se->timeout = 0; dequeue_rt_stack(p); /* * enqueue everybody, bottom - up. */ for_each_sched_rt_entity(rt_se) enqueue_rt_entity(rt_se); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq; update_curr_rt(rq); dequeue_rt_stack(p); /* * re-enqueue all non-empty rt_rq entities. */ for_each_sched_rt_entity(rt_se) { rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) enqueue_rt_entity(rt_se); } dequeue_rt_entity(rt_se); } /* Loading @@ -669,9 +676,13 @@ static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) { struct rt_prio_array *array = &rt_rq->active; struct list_head *queue = array->queue + rt_se_prio(rt_se); if (on_rt_rq(rt_se)) { list_del_init(&rt_se->run_list); list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); } } static void requeue_task_rt(struct rq *rq, struct task_struct *p) Loading