Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25d5c94d authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Syed Rameez Mustafa
Browse files

sched: window-stats: Code cleanup



add_task_demand() and 'long_sleep' calculation in it are not strictly
required. rq_freq_margin() check for need to change frequency, which
removes need for long_sleep calculation. Once that is removed, need
for add_task_demand() vanishes.

Change-Id: I936540c06072eb8238fc18754aba88789ee3c9f5
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@codeaurora.org>
parent e1ea811d
Loading
Loading
Loading
Loading
+32 −48
Original line number Diff line number Diff line
@@ -1391,29 +1391,8 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq)
	return delta;
}

/*
 * We depend on task's partial_demand to be always represented in
 * rq->curr_runnable_sum and its demand to be represented in
 * rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to run
 * (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with sched_account_wait_time ==
 * 0, ensure this dependency is met.
 */
static inline int add_task_demand(int event, struct task_struct *p,
		 struct rq *rq, int *long_sleep)
{
	if ((p->ravg.flags & CURR_WINDOW_CONTRIB) &&
		(p->ravg.flags & PREV_WINDOW_CONTRIB))
			return 0;

	if (long_sleep && (rq->window_start > p->ravg.mark_start &&
		rq->window_start - p->ravg.mark_start > sched_ravg_window))
			*long_sleep = 1;

	return 1;
}

static void update_task_ravg(struct task_struct *p, struct rq *rq,
	     int event, u64 wallclock, int *long_sleep, u64 irqtime)
	     int event, u64 wallclock, u64 irqtime)
{
	u32 window_size = sched_ravg_window;
	int update_sum, new_window;
@@ -1528,7 +1507,14 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
		mark_start = window_start;
	} while (new_window);

	if (add_task_demand(event, p, rq, long_sleep)) {
	/*
	 * We depend on task's partial_demand to be always represented in
	 * rq->curr_runnable_sum and its demand to be represented in
	 * rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to
	 * run (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with
	 * sched_account_wait_time == 0, ensure this dependency is met.
	 */

	if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
		rq->curr_runnable_sum += p->ravg.partial_demand;
		p->ravg.flags |= CURR_WINDOW_CONTRIB;
@@ -1538,7 +1524,6 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
		rq->prev_runnable_sum += p->ravg.demand;
		p->ravg.flags |= PREV_WINDOW_CONTRIB;
	}
	}

done:
	trace_sched_update_task_ravg(p, rq, event, wallclock);
@@ -1556,7 +1541,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
		return;

	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, NULL, delta);
	update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, delta);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

@@ -1601,7 +1586,7 @@ static inline void mark_task_starting(struct task_struct *p)
		return;
	}

	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	p->ravg.mark_start = wallclock;
	rq->prev_runnable_sum += p->ravg.demand;
	rq->curr_runnable_sum += p->ravg.partial_demand;
@@ -1654,7 +1639,7 @@ unsigned long sched_get_busy(int cpu)
	 * that the window stats are current by doing an update.
	 */
	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu),
@@ -1917,7 +1902,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
	BUG_ON(!new_freq);

	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
	cpu_rq(cpu)->cur_freq = new_freq;
	raw_spin_unlock_irqrestore(&rq->lock, flags);

@@ -1970,9 +1955,9 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)

	update_task_ravg(task_rq(p)->curr, task_rq(p),
			 TASK_UPDATE,
			 wallclock, NULL, 0);
			 wallclock, 0);
	update_task_ravg(dest_rq->curr, dest_rq,
			 TASK_UPDATE, wallclock, NULL, 0);
			 TASK_UPDATE, wallclock, 0);

	/*
	 * In case of migration of task on runqueue, on_rq =1,
@@ -1989,7 +1974,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
	}

	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
			 wallclock, NULL, 0);
			 wallclock, 0);

	/*
	 * Remove task's load from rq as its now migrating to
@@ -2044,7 +2029,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)

static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
			 int event, u64 wallclock, int *long_sleep, u64 irqtime)
			 int event, u64 wallclock, u64 irqtime)
{
}

@@ -2566,6 +2551,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
}

__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;

/**
 * try_to_wake_up - wake up a thread
 * @p: the thread to be awakened
@@ -2589,7 +2575,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	unsigned long src_cpu;
#ifdef CONFIG_SMP
	struct rq *rq;
	int long_sleep = 0;
	u64 wallclock;
#endif

@@ -2627,8 +2612,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)

	raw_spin_lock(&rq->lock);
	wallclock = sched_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock,  &long_sleep, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	raw_spin_unlock(&rq->lock);

	p->sched_contributes_to_load = !!task_contributes_to_load(p);
@@ -2695,7 +2680,6 @@ out:
static void try_to_wake_up_local(struct task_struct *p)
{
	struct rq *rq = task_rq(p);
	int long_sleep = 0;

	if (rq != this_rq() || p == current) {
		printk_sched("%s: Failed to wakeup task %d (%s), rq = %p, this_rq = %p, p = %p, current = %p\n",
@@ -2718,8 +2702,8 @@ static void try_to_wake_up_local(struct task_struct *p)
	if (!p->on_rq) {
		u64 wallclock = sched_clock();

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
	}

@@ -3915,7 +3899,7 @@ void scheduler_tick(void)
	update_rq_clock(rq);
	update_cpu_load_active(rq);
	curr->sched_class->task_tick(rq, curr, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
	raw_spin_unlock(&rq->lock);

	perf_event_task_tick();
@@ -4184,8 +4168,8 @@ need_resched:
	put_prev_task(rq, prev);
	next = pick_next_task(rq);
	wallclock = sched_clock();
	update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL, 0);
	update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL, 0);
	update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
	update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
	clear_tsk_need_resched(prev);
	rq->skip_clock_update = 0;