Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8875125e authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar
Browse files

sched: Transform resched_task() into resched_curr()



We always use resched_task() with rq->curr argument.
It's not possible to reschedule any task but rq's current.

The patch introduces resched_curr(struct rq *) to
replace all of the repeating patterns. The main aim
is cleanup, but there is a little size profit too:

  (before)
	$ size kernel/sched/built-in.o
	   text	   data	    bss	    dec	    hex	filename
	155274	  16445	   7042	 178761	  2ba49	kernel/sched/built-in.o

	$ size vmlinux
	   text	   data	    bss	    dec	    hex	filename
	7411490	1178376	 991232	9581098	 92322a	vmlinux

  (after)
	$ size kernel/sched/built-in.o
	   text	   data	    bss	    dec	    hex	filename
	155130	  16445	   7042	 178617	  2b9b9	kernel/sched/built-in.o

	$ size vmlinux
	   text	   data	    bss	    dec	    hex	filename
	7411362	1178376	 991232	9580970	 9231aa	vmlinux

	I was choosing between resched_curr() and resched_rq(),
	and the first name looks better for me.

A little lie in Documentation/trace/ftrace.txt. I have not
actually collected the tracing again. With a hope the patch
won't make execution times much worse :)

Signed-off-by: default avatarKirill Tkhai <tkhai@yandex.ru>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20140628200219.1778.18735.stgit@localhost


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 466af29b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1515,7 +1515,7 @@ Doing the same with chrt -r 5 and function-trace set.
  <idle>-0       3d.h4    1us+:      0:120:R   + [003]  2448: 94:R sleep
  <idle>-0       3d.h4    2us : ttwu_do_activate.constprop.87 <-try_to_wake_up
  <idle>-0       3d.h3    3us : check_preempt_curr <-ttwu_do_wakeup
  <idle>-0       3d.h3    3us : resched_task <-check_preempt_curr
  <idle>-0       3d.h3    3us : resched_curr <-check_preempt_curr
  <idle>-0       3dNh3    4us : task_woken_rt <-ttwu_do_wakeup
  <idle>-0       3dNh3    4us : _raw_spin_unlock <-try_to_wake_up
  <idle>-0       3dNh3    4us : sub_preempt_count <-_raw_spin_unlock
+3 −3
Original line number Diff line number Diff line
@@ -2786,7 +2786,7 @@ static inline bool __must_check current_set_polling_and_test(void)

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 * paired by resched_curr()
	 */
	smp_mb__after_atomic();

@@ -2804,7 +2804,7 @@ static inline bool __must_check current_clr_polling_and_test(void)

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 * paired by resched_curr()
	 */
	smp_mb__after_atomic();

@@ -2836,7 +2836,7 @@ static inline void current_clr_polling(void)
	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
	 * fold.
	 */
	smp_mb(); /* paired with resched_task() */
	smp_mb(); /* paired with resched_curr() */

	preempt_fold_need_resched();
}
+13 −12
Original line number Diff line number Diff line
@@ -589,30 +589,31 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif

/*
 * resched_task - mark a task 'to be rescheduled now'.
 * resched_curr - mark rq's current task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
void resched_task(struct task_struct *p)
void resched_curr(struct rq *rq)
{
	struct task_struct *curr = rq->curr;
	int cpu;

	lockdep_assert_held(&task_rq(p)->lock);
	lockdep_assert_held(&rq->lock);

	if (test_tsk_need_resched(p))
	if (test_tsk_need_resched(curr))
		return;

	cpu = task_cpu(p);
	cpu = cpu_of(rq);

	if (cpu == smp_processor_id()) {
		set_tsk_need_resched(p);
		set_tsk_need_resched(curr);
		set_preempt_need_resched();
		return;
	}

	if (set_nr_and_not_polling(p))
	if (set_nr_and_not_polling(curr))
		smp_send_reschedule(cpu);
	else
		trace_sched_wake_idle_without_ipi(cpu);
@@ -625,7 +626,7 @@ void resched_cpu(int cpu)

	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
		return;
	resched_task(cpu_curr(cpu));
	resched_curr(rq);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

@@ -1027,7 +1028,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
			if (class == rq->curr->sched_class)
				break;
			if (class == p->sched_class) {
				resched_task(rq->curr);
				resched_curr(rq);
				break;
			}
		}
@@ -3073,7 +3074,7 @@ void set_user_nice(struct task_struct *p, long nice)
		 * lowered its priority, then reschedule its CPU:
		 */
		if (delta < 0 || (delta > 0 && task_running(rq, p)))
			resched_task(rq->curr);
			resched_curr(rq);
	}
out_unlock:
	task_rq_unlock(rq, p, &flags);
@@ -4299,7 +4300,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
		 * fairness.
		 */
		if (preempt && rq != p_rq)
			resched_task(p_rq->curr);
			resched_curr(p_rq);
	}

out_unlock:
@@ -7106,7 +7107,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
	__setscheduler(rq, p, &attr);
	if (on_rq) {
		enqueue_task(rq, p, 0);
		resched_task(rq->curr);
		resched_curr(rq);
	}

	check_class_changed(rq, p, prev_class, old_prio);
+8 −8
Original line number Diff line number Diff line
@@ -535,7 +535,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
		if (task_has_dl_policy(rq->curr))
			check_preempt_curr_dl(rq, p, 0);
		else
			resched_task(rq->curr);
			resched_curr(rq);
#ifdef CONFIG_SMP
		/*
		 * Queueing this task back might have overloaded rq,
@@ -634,7 +634,7 @@ static void update_curr_dl(struct rq *rq)
			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);

		if (!is_leftmost(curr, &rq->dl))
			resched_task(curr);
			resched_curr(rq);
	}

	/*
@@ -964,7 +964,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
		return;

	resched_task(rq->curr);
	resched_curr(rq);
}

static int pull_dl_task(struct rq *this_rq);
@@ -979,7 +979,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
				  int flags)
{
	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
		resched_task(rq->curr);
		resched_curr(rq);
		return;
	}

@@ -1333,7 +1333,7 @@ static int push_dl_task(struct rq *rq)
	if (dl_task(rq->curr) &&
	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
	    rq->curr->nr_cpus_allowed > 1) {
		resched_task(rq->curr);
		resched_curr(rq);
		return 0;
	}

@@ -1373,7 +1373,7 @@ static int push_dl_task(struct rq *rq)
	set_task_cpu(next_task, later_rq->cpu);
	activate_task(later_rq, next_task, 0);

	resched_task(later_rq->curr);
	resched_curr(later_rq);

	double_unlock_balance(rq, later_rq);

@@ -1632,14 +1632,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
		 */
		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
		    rq->curr == p)
			resched_task(p);
			resched_curr(rq);
#else
		/*
		 * Again, we don't know if p has a earlier
		 * or later deadline, so let's blindly set a
		 * (maybe not needed) rescheduling point.
		 */
		resched_task(p);
		resched_curr(rq);
#endif /* CONFIG_SMP */
	} else
		switched_to_dl(rq, p);
+10 −10
Original line number Diff line number Diff line
@@ -2923,7 +2923,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
	ideal_runtime = sched_slice(cfs_rq, curr);
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime) {
		resched_task(rq_of(cfs_rq)->curr);
		resched_curr(rq_of(cfs_rq));
		/*
		 * The current task ran long enough, ensure it doesn't get
		 * re-elected due to buddy favours.
@@ -2947,7 +2947,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
		return;

	if (delta > ideal_runtime)
		resched_task(rq_of(cfs_rq)->curr);
		resched_curr(rq_of(cfs_rq));
}

static void
@@ -3087,7 +3087,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
	 * validating it and just reschedule.
	 */
	if (queued) {
		resched_task(rq_of(cfs_rq)->curr);
		resched_curr(rq_of(cfs_rq));
		return;
	}
	/*
@@ -3278,7 +3278,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
	 * hierarchy can be throttled
	 */
	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
		resched_task(rq_of(cfs_rq)->curr);
		resched_curr(rq_of(cfs_rq));
}

static __always_inline
@@ -3438,7 +3438,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

	/* determine whether we need to wake up potentially idle cpu */
	if (rq->curr == rq->idle && rq->cfs.nr_running)
		resched_task(rq->curr);
		resched_curr(rq);
}

static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -3897,7 +3897,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)

		if (delta < 0) {
			if (rq->curr == p)
				resched_task(p);
				resched_curr(rq);
			return;
		}

@@ -4766,7 +4766,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
	return;

preempt:
	resched_task(curr);
	resched_curr(rq);
	/*
	 * Only set the backward buddy when the current task is still
	 * on the rq. This can happen when a wakeup gets interleaved
@@ -7457,7 +7457,7 @@ static void task_fork_fair(struct task_struct *p)
		 * 'current' within the tree based on its new key value.
		 */
		swap(curr->vruntime, se->vruntime);
		resched_task(rq->curr);
		resched_curr(rq);
	}

	se->vruntime -= cfs_rq->min_vruntime;
@@ -7482,7 +7482,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
	 */
	if (rq->curr == p) {
		if (p->prio > oldprio)
			resched_task(rq->curr);
			resched_curr(rq);
	} else
		check_preempt_curr(rq, p, 0);
}
@@ -7545,7 +7545,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
	 * if we can still preempt the current task.
	 */
	if (rq->curr == p)
		resched_task(rq->curr);
		resched_curr(rq);
	else
		check_preempt_curr(rq, p, 0);
}
Loading