Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4dd4264e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: window-stats: Account interrupt handling time as busy time"

parents b08ab899 4da7e167
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -274,6 +274,7 @@ enum task_event {
	TASK_WAKE       = 2,
	TASK_MIGRATE    = 3,
	TASK_UPDATE     = 4,
	IRQ_UPDATE	= 5,
};

#include <linux/spinlock.h>
+36 −17
Original line number Diff line number Diff line
@@ -91,7 +91,8 @@
#include <trace/events/sched.h>

const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
				  "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE"};
				  "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
				"IRQ_UPDATE"};

ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
@@ -1375,7 +1376,7 @@ static inline int add_task_demand(int event, struct task_struct *p,
}

static void update_task_ravg(struct task_struct *p, struct rq *rq,
			     int event, u64 wallclock, int *long_sleep)
	     int event, u64 wallclock, int *long_sleep, u64 irqtime)
{
	u32 window_size = sched_ravg_window;
	int update_sum, new_window;
@@ -1396,9 +1397,13 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
	window_start = rq->window_start;

	if (is_idle_task(p)) {
		if (!(event == PUT_PREV_TASK && cpu_is_waiting_on_io(rq)))
		if (!irqtime && !(event == PUT_PREV_TASK &&
					cpu_is_waiting_on_io(rq)))
			goto done;

		if (irqtime && !cpu_is_waiting_on_io(rq))
			mark_start = wallclock - irqtime;

		if (window_start > mark_start) {
			delta = window_start - mark_start;
			if (delta > window_size) {
@@ -1504,6 +1509,20 @@ done:
	p->ravg.mark_start = wallclock;
}

void sched_account_irqtime(int cpu, struct task_struct *curr,
				 u64 delta, u64 wallclock)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	if (!is_idle_task(curr))
		return;

	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, NULL, delta);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

unsigned long __weak arch_get_cpu_efficiency(int cpu)
{
	return SCHED_LOAD_SCALE;
@@ -1546,7 +1565,7 @@ static inline void mark_task_starting(struct task_struct *p)
		return;
	}

	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
	p->ravg.mark_start = wallclock;
	rq->prev_runnable_sum += p->ravg.demand;
	rq->curr_runnable_sum += p->ravg.partial_demand;
@@ -1595,7 +1614,7 @@ unsigned long sched_get_busy(int cpu)
	 * that the window stats are current by doing an update.
	 */
	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu),
@@ -1850,7 +1869,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
	BUG_ON(!new_freq);

	raw_spin_lock_irqsave(&rq->lock, flags);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	cpu_rq(cpu)->cur_freq = new_freq;
	raw_spin_unlock_irqrestore(&rq->lock, flags);

@@ -1903,9 +1922,9 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)

	update_task_ravg(task_rq(p)->curr, task_rq(p),
			 TASK_UPDATE,
			 wallclock, NULL);
			 wallclock, NULL, 0);
	update_task_ravg(dest_rq->curr, dest_rq,
			 TASK_UPDATE, wallclock, NULL);
			 TASK_UPDATE, wallclock, NULL, 0);

	/*
	 * In case of migration of task on runqueue, on_rq =1,
@@ -1922,7 +1941,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
	}

	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
			 wallclock, NULL);
			 wallclock, NULL, 0);

	/*
	 * Remove task's load from rq as its now migrating to
@@ -1977,7 +1996,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)

static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
			 int event, u64 wallclock, int *long_sleep)
			 int event, u64 wallclock, int *long_sleep, u64 irqtime)
{
}

@@ -2560,8 +2579,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)

	raw_spin_lock(&rq->lock);
	wallclock = sched_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
	update_task_ravg(p, rq, TASK_WAKE, wallclock,  &long_sleep);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock,  &long_sleep, 0);
	raw_spin_unlock(&rq->lock);

	p->sched_contributes_to_load = !!task_contributes_to_load(p);
@@ -2651,8 +2670,8 @@ static void try_to_wake_up_local(struct task_struct *p)
	if (!p->on_rq) {
		u64 wallclock = sched_clock();

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep);
		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
	}

@@ -3848,7 +3867,7 @@ void scheduler_tick(void)
	update_rq_clock(rq);
	update_cpu_load_active(rq);
	curr->sched_class->task_tick(rq, curr, 0);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
	raw_spin_unlock(&rq->lock);

	perf_event_task_tick();
@@ -4117,8 +4136,8 @@ need_resched:
	put_prev_task(rq, prev);
	next = pick_next_task(rq);
	wallclock = sched_clock();
	update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL);
	update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL);
	update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL, 0);
	update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL, 0);
	clear_tsk_need_resched(prev);
	rq->skip_clock_update = 0;

+9 −3
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ void irqtime_account_irq(struct task_struct *curr)
	unsigned long flags;
	s64 delta;
	int cpu;
	u64 wallclock;

	if (!sched_clock_irqtime)
		return;
@@ -56,7 +57,8 @@ void irqtime_account_irq(struct task_struct *curr)
	local_irq_save(flags);

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
	wallclock = sched_clock_cpu(cpu);
	delta = wallclock - __this_cpu_read(irq_start_time);
	__this_cpu_add(irq_start_time, delta);

	irq_time_write_begin();
@@ -66,10 +68,14 @@ void irqtime_account_irq(struct task_struct *curr)
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
	if (hardirq_count()) {
		__this_cpu_add(cpu_hardirq_time, delta);
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		sched_account_irqtime(cpu, curr, delta, wallclock);
	} else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
		__this_cpu_add(cpu_softirq_time, delta);
		sched_account_irqtime(cpu, curr, delta, wallclock);
	}


	irq_time_write_end();
	local_irq_restore(flags);
+7 −0
Original line number Diff line number Diff line
@@ -704,6 +704,8 @@ extern void fixup_nr_big_small_task(int cpu);

u64 scale_load_to_cpu(u64 load, int cpu);
unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
				 u64 delta, u64 wallclock);

static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
@@ -750,6 +752,11 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
	return SCHED_LOAD_SCALE;
}

static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
				 u64 delta, u64 wallclock)
{
}

#endif	/* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_HMP