Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ef9206d authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Steve Muckle
Browse files

sched: code cleanup



Avoid the long if() block of code in set_task_cpu(). Move that code to
its own function

Change-Id: Ia80a99867ff9c23a614635e366777759abaccee4
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@codeaurora.org>
parent cf0d1f54
Loading
Loading
Loading
Loading
+87 −89
Original line number Diff line number Diff line
@@ -1673,74 +1673,8 @@ static int register_sched_callback(void)
 */
core_initcall(register_sched_callback);

#else	/* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
			 int event, u64 wallclock, int *long_sleep)
{
}

static inline int rq_freq_margin(struct rq *rq)
static void fixup_busy_time(struct task_struct *p, int new_cpu)
{
	return INT_MAX;
}

static inline void init_cpu_efficiency(void) {}

static inline void mark_task_starting(struct task_struct *p) {}

static inline void set_window_start(struct rq *rq) {}

static inline void migrate_sync_cpu(int cpu) {}

#endif	/* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
	/*
	 * We should never call set_task_cpu() on a blocked task,
	 * ttwu() will sort out the placement.
	 */
	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));

#ifdef CONFIG_LOCKDEP
	/*
	 * The caller should hold either p->pi_lock or rq->lock, when changing
	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
	 *
	 * sched_move_task() holds both and thus holding either pins the cgroup,
	 * see task_group().
	 *
	 * Furthermore, all task_rq users should acquire both locks, see
	 * task_rq_lock().
	 */
	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
				      lockdep_is_held(&task_rq(p)->lock)));
#endif
#endif

	trace_sched_migrate_task(p, new_cpu, pct_task_load(p));

	if (task_cpu(p) != new_cpu) {
		struct task_migration_notifier tmn;

		if (p->sched_class->migrate_task_rq)
			p->sched_class->migrate_task_rq(p, new_cpu);
		p->se.nr_migrations++;
		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);

		tmn.task = p;
		tmn.from_cpu = task_cpu(p);
		tmn.to_cpu = new_cpu;

		atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);

#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
		if (p->on_rq || p->state == TASK_WAKING) {
	struct rq *src_rq = task_rq(p);
	struct rq *dest_rq = cpu_rq(new_cpu);
	u64 wallclock;
@@ -1784,10 +1718,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
	}

	if (p->ravg.sum) {
				src_rq->curr_runnable_sum -=
					p->ravg.partial_demand;
				dest_rq->curr_runnable_sum +=
					p->ravg.partial_demand;
		src_rq->curr_runnable_sum -= p->ravg.partial_demand;
		dest_rq->curr_runnable_sum += p->ravg.partial_demand;
	}

	if (p->ravg.prev_window) {
@@ -1806,7 +1738,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)

	if (cpumask_test_cpu(new_cpu,
			     &src_rq->freq_domain_cpumask))
				goto done;
		return;

	/* Evaluate possible frequency notifications for
	 * source and destination CPUs in different frequency
@@ -1822,14 +1754,80 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
		atomic_notifier_call_chain(
			&load_alert_notifier_head, 0,
			(void *)(long)task_cpu(p));
}

#else	/* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
			 int event, u64 wallclock, int *long_sleep)
{
}
#endif

static inline int rq_freq_margin(struct rq *rq)
{
	return INT_MAX;
}

#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
done:
static inline void init_cpu_efficiency(void) {}

static inline void mark_task_starting(struct task_struct *p) {}

static inline void set_window_start(struct rq *rq) {}

static inline void migrate_sync_cpu(int cpu) {}

static inline void fixup_busy_time(struct task_struct *p, int new_cpu) {}

#endif	/* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
	/*
	 * We should never call set_task_cpu() on a blocked task,
	 * ttwu() will sort out the placement.
	 */
	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));

#ifdef CONFIG_LOCKDEP
	/*
	 * The caller should hold either p->pi_lock or rq->lock, when changing
	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
	 *
	 * sched_move_task() holds both and thus holding either pins the cgroup,
	 * see task_group().
	 *
	 * Furthermore, all task_rq users should acquire both locks, see
	 * task_rq_lock().
	 */
	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
				      lockdep_is_held(&task_rq(p)->lock)));
#endif
#endif

	trace_sched_migrate_task(p, new_cpu, pct_task_load(p));

	if (task_cpu(p) != new_cpu) {
		struct task_migration_notifier tmn;

		if (p->sched_class->migrate_task_rq)
			p->sched_class->migrate_task_rq(p, new_cpu);
		p->se.nr_migrations++;
		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);

		tmn.task = p;
		tmn.from_cpu = task_cpu(p);
		tmn.to_cpu = new_cpu;

		atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);

		if (p->on_rq || p->state == TASK_WAKING)
			fixup_busy_time(p, new_cpu);
	}

	__set_task_cpu(p, new_cpu);
}