Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4ebcbc0 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar
Browse files

sched/rt: Substract number of tasks of throttled queues from rq->nr_running



Now rq->rt becomes to be able to be in dequeued or enqueued state.
We add new member rt_rq->rt_queued, which is used to indicate this.
The member is used only for top queue rq->rt_rq.

The goal is to fit generic scheme which is used in deadline and
fair classes, i.e. throttled rt_rq's rt_nr_running is beeing
substracted from rq->nr_running.

Signed-off-by: default avatarKirill Tkhai <tkhai@yandex.ru>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394835300.18748.33.camel@HP-250-G1-Notebook-PC


Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 653d07a6
Loading
Loading
Loading
Loading
+61 −12
Original line number Original line Diff line number Diff line
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
	rt_rq->overloaded = 0;
	rt_rq->overloaded = 0;
	plist_head_init(&rt_rq->pushable_tasks);
	plist_head_init(&rt_rq->pushable_tasks);
#endif
#endif
	/* We start is dequeued state, because no RT tasks are queued */
	rt_rq->rt_queued = 0;


	rt_rq->rt_time = 0;
	rt_rq->rt_time = 0;
	rt_rq->rt_throttled = 0;
	rt_rq->rt_throttled = 0;
@@ -404,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
}
}
#endif /* CONFIG_SMP */
#endif /* CONFIG_SMP */


static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);

static inline int on_rt_rq(struct sched_rt_entity *rt_se)
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
{
	return !list_empty(&rt_se->run_list);
	return !list_empty(&rt_se->run_list);
@@ -465,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
	rt_se = rt_rq->tg->rt_se[cpu];
	rt_se = rt_rq->tg->rt_se[cpu];


	if (rt_rq->rt_nr_running) {
	if (rt_rq->rt_nr_running) {
		if (rt_se && !on_rt_rq(rt_se))
		if (!rt_se)
			enqueue_top_rt_rq(rt_rq);
		else if (!on_rt_rq(rt_se))
			enqueue_rt_entity(rt_se, false);
			enqueue_rt_entity(rt_se, false);

		if (rt_rq->highest_prio.curr < curr->prio)
		if (rt_rq->highest_prio.curr < curr->prio)
			resched_task(curr);
			resched_task(curr);
	}
	}
@@ -479,7 +487,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)


	rt_se = rt_rq->tg->rt_se[cpu];
	rt_se = rt_rq->tg->rt_se[cpu];


	if (rt_se && on_rt_rq(rt_se))
	if (!rt_se)
		dequeue_top_rt_rq(rt_rq);
	else if (on_rt_rq(rt_se))
		dequeue_rt_entity(rt_se);
		dequeue_rt_entity(rt_se);
}
}


@@ -545,12 +555,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)


static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
{
	if (rt_rq->rt_nr_running)
	struct rq *rq = rq_of_rt_rq(rt_rq);
		resched_task(rq_of_rt_rq(rt_rq)->curr);

	if (!rt_rq->rt_nr_running)
		return;

	enqueue_top_rt_rq(rt_rq);
	resched_task(rq->curr);
}
}


static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
{
	dequeue_top_rt_rq(rt_rq);
}
}


static inline const struct cpumask *sched_rt_period_mask(void)
static inline const struct cpumask *sched_rt_period_mask(void)
@@ -935,6 +951,38 @@ static void update_curr_rt(struct rq *rq)
	}
	}
}
}


static void
dequeue_top_rt_rq(struct rt_rq *rt_rq)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);

	BUG_ON(&rq->rt != rt_rq);

	if (!rt_rq->rt_queued)
		return;

	BUG_ON(!rq->nr_running);

	rq->nr_running -= rt_rq->rt_nr_running;
	rt_rq->rt_queued = 0;
}

static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);

	BUG_ON(&rq->rt != rt_rq);

	if (rt_rq->rt_queued)
		return;
	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
		return;

	rq->nr_running += rt_rq->rt_nr_running;
	rt_rq->rt_queued = 1;
}

#if defined CONFIG_SMP
#if defined CONFIG_SMP


static void
static void
@@ -1143,6 +1191,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
		back = rt_se;
		back = rt_se;
	}
	}


	dequeue_top_rt_rq(rt_rq_of_se(back));

	for (rt_se = back; rt_se; rt_se = rt_se->back) {
	for (rt_se = back; rt_se; rt_se = rt_se->back) {
		if (on_rt_rq(rt_se))
		if (on_rt_rq(rt_se))
			__dequeue_rt_entity(rt_se);
			__dequeue_rt_entity(rt_se);
@@ -1151,13 +1201,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)


static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
{
{
	struct rq *rq = rq_of_rt_se(rt_se);

	dequeue_rt_stack(rt_se);
	dequeue_rt_stack(rt_se);
	for_each_sched_rt_entity(rt_se)
	for_each_sched_rt_entity(rt_se)
		__enqueue_rt_entity(rt_se, head);
		__enqueue_rt_entity(rt_se, head);
	enqueue_top_rt_rq(&rq->rt);
}
}


static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
{
	struct rq *rq = rq_of_rt_se(rt_se);

	dequeue_rt_stack(rt_se);
	dequeue_rt_stack(rt_se);


	for_each_sched_rt_entity(rt_se) {
	for_each_sched_rt_entity(rt_se) {
@@ -1166,6 +1221,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
		if (rt_rq && rt_rq->rt_nr_running)
		if (rt_rq && rt_rq->rt_nr_running)
			__enqueue_rt_entity(rt_se, false);
			__enqueue_rt_entity(rt_se, false);
	}
	}
	enqueue_top_rt_rq(&rq->rt);
}
}


/*
/*
@@ -1183,8 +1239,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)


	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
		enqueue_pushable_task(rq, p);
		enqueue_pushable_task(rq, p);

	inc_nr_running(rq);
}
}


static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1195,8 +1249,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
	dequeue_rt_entity(rt_se);
	dequeue_rt_entity(rt_se);


	dequeue_pushable_task(rq, p);
	dequeue_pushable_task(rq, p);

	dec_nr_running(rq);
}
}


/*
/*
@@ -1401,10 +1453,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
	if (prev->sched_class == &rt_sched_class)
	if (prev->sched_class == &rt_sched_class)
		update_curr_rt(rq);
		update_curr_rt(rq);


	if (!rt_rq->rt_nr_running)
	if (!rt_rq->rt_queued)
		return NULL;

	if (rt_rq_throttled(rt_rq))
		return NULL;
		return NULL;


	put_prev_task(rq, prev);
	put_prev_task(rq, prev);
+2 −0
Original line number Original line Diff line number Diff line
@@ -409,6 +409,8 @@ struct rt_rq {
	int overloaded;
	int overloaded;
	struct plist_head pushable_tasks;
	struct plist_head pushable_tasks;
#endif
#endif
	int rt_queued;

	int rt_throttled;
	int rt_throttled;
	u64 rt_time;
	u64 rt_time;
	u64 rt_runtime;
	u64 rt_runtime;