Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa717060 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: sched_rt_entity



Move the task_struct members specific to rt scheduling together.
A future optimization could be to put sched_entity and sched_rt_entity
into a union.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8eb703e4
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -133,9 +133,10 @@ extern struct group_info init_groups;
	.nr_cpus_allowed = NR_CPUS,					\
	.mm		= NULL,						\
	.active_mm	= &init_mm,					\
	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
	.rt		= {						\
		.run_list	= LIST_HEAD_INIT(tsk.rt.run_list),	\
		.time_slice	= HZ, },				\
	.ioprio		= 0,						\
	.time_slice	= HZ,						\
	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
	.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children),		\
	.ptrace_list	= LIST_HEAD_INIT(tsk.ptrace_list),		\
+6 −2
Original line number Diff line number Diff line
@@ -929,6 +929,11 @@ struct sched_entity {
#endif
};

struct sched_rt_entity {
	struct list_head run_list;
	unsigned int time_slice;
};

struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
	void *stack;
@@ -945,9 +950,9 @@ struct task_struct {
#endif

	int prio, static_prio, normal_prio;
	struct list_head run_list;
	const struct sched_class *sched_class;
	struct sched_entity se;
	struct sched_rt_entity rt;

#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
@@ -972,7 +977,6 @@ struct task_struct {
	unsigned int policy;
	cpumask_t cpus_allowed;
	int nr_cpus_allowed;
	unsigned int time_slice;

#ifdef CONFIG_PREEMPT_RCU
	int rcu_read_lock_nesting;
+1 −1
Original line number Diff line number Diff line
@@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_struct *p)
	p->se.wait_max			= 0;
#endif

	INIT_LIST_HEAD(&p->run_list);
	INIT_LIST_HEAD(&p->rt.run_list);
	p->se.on_rq = 0;

#ifdef CONFIG_PREEMPT_NOTIFIERS
+10 −10
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
{
	struct rt_prio_array *array = &rq->rt.active;

	list_add_tail(&p->run_list, array->queue + p->prio);
	list_add_tail(&p->rt.run_list, array->queue + p->prio);
	__set_bit(p->prio, array->bitmap);
	inc_cpu_load(rq, p->se.load.weight);

@@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)

	update_curr_rt(rq);

	list_del(&p->run_list);
	list_del(&p->rt.run_list);
	if (list_empty(array->queue + p->prio))
		__clear_bit(p->prio, array->bitmap);
	dec_cpu_load(rq, p->se.load.weight);
@@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p)
{
	struct rt_prio_array *array = &rq->rt.active;

	list_move_tail(&p->run_list, array->queue + p->prio);
	list_move_tail(&p->rt.run_list, array->queue + p->prio);
}

static void
@@ -212,7 +212,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
		return NULL;

	queue = array->queue + idx;
	next = list_entry(queue->next, struct task_struct, run_list);
	next = list_entry(queue->next, struct task_struct, rt.run_list);

	next->se.exec_start = rq->clock;

@@ -261,14 +261,14 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
	queue = array->queue + idx;
	BUG_ON(list_empty(queue));

	next = list_entry(queue->next, struct task_struct, run_list);
	next = list_entry(queue->next, struct task_struct, rt.run_list);
	if (unlikely(pick_rt_task(rq, next, cpu)))
		goto out;

	if (queue->next->next != queue) {
		/* same prio task */
		next = list_entry(queue->next->next, struct task_struct,
				  run_list);
				  rt.run_list);
		if (pick_rt_task(rq, next, cpu))
			goto out;
	}
@@ -282,7 +282,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
	queue = array->queue + idx;
	BUG_ON(list_empty(queue));

	list_for_each_entry(next, queue, run_list) {
	list_for_each_entry(next, queue, rt.run_list) {
		if (pick_rt_task(rq, next, cpu))
			goto out;
	}
@@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
	if (p->policy != SCHED_RR)
		return;

	if (--p->time_slice)
	if (--p->rt.time_slice)
		return;

	p->time_slice = DEF_TIMESLICE;
	p->rt.time_slice = DEF_TIMESLICE;

	/*
	 * Requeue to the end of queue if we are not the only element
	 * on the queue:
	 */
	if (p->run_list.prev != p->run_list.next) {
	if (p->rt.run_list.prev != p->rt.run_list.next) {
		requeue_task_rt(rq, p);
		set_tsk_need_resched(p);
	}
+1 −1
Original line number Diff line number Diff line
@@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
	 * all the memory it needs. That way it should be able to
	 * exit() and clear out its resources quickly...
	 */
	p->time_slice = HZ;
	p->rt.time_slice = HZ;
	set_tsk_thread_flag(p, TIF_MEMDIE);

	force_sig(SIGKILL, p);