Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ff616f72 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: fix potential deflated frequency estimation during IRQ handling



Time between mark_start of idle task and IRQ handler entry time is CPU
cycle counter stall period.  Therefore it's inappropriate to include such
duration as part of sample period when we do frequency estimation.

Fix such suboptimality by replenishing idle task's CPU cycle counter
upon IRQ entry and using irqtime as time delta.

Change-Id: I274d5047a50565cfaaa2fb821ece21c8cf4c991d
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent a87828a7
Loading
Loading
Loading
Loading
+25 −1
Original line number Diff line number Diff line
@@ -2744,6 +2744,19 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
		else
			rq->cc.cycles = cur_cycles - p->cpu_cycles;
		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;

		if (event == IRQ_UPDATE && is_idle_task(p))
			/*
			 * Time between mark_start of idle task and IRQ handler
			 * entry time is CPU cycle counter stall period.
			 * Upon IRQ handler entry sched_account_irqstart()
			 * replenishes idle task's cpu cycle counter so
			 * rq->cc.cycles now represents increased cycles during
			 * IRQ handler rather than time between idle entry and
			 * IRQ exit.  Thus use irqtime as time delta.
			 */
			rq->cc.time = irqtime;
		else
			rq->cc.time = wallclock - p->ravg.mark_start;
		BUG_ON((s64)rq->cc.time < 0);
	}
@@ -3017,6 +3030,17 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
{
	struct rq *rq = cpu_rq(cpu);

	if (!rq->window_start || sched_disable_window_stats)
		return;

	if (is_idle_task(curr) && use_cycle_counter)
		update_task_cpu_cycles(curr, cpu);
}

static void reset_task_stats(struct task_struct *p)
{
	u32 sum = 0;
+2 −0
Original line number Diff line number Diff line
@@ -81,6 +81,8 @@ void irqtime_account_irq(struct task_struct *curr)

	if (account)
		sched_account_irqtime(cpu, curr, delta, wallclock);
	else if (curr != this_cpu_ksoftirqd())
		sched_account_irqstart(cpu, curr, wallclock);

	local_irq_restore(flags);
}
+8 −0
Original line number Diff line number Diff line
@@ -988,6 +988,9 @@ extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
				 u64 delta, u64 wallclock);
extern void sched_account_irqstart(int cpu, struct task_struct *curr,
				   u64 wallclock);

unsigned int cpu_temp(int cpu);
int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int nr_eligible_big_tasks(int cpu);
@@ -1232,6 +1235,11 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}

static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
					  u64 wallclock)
{
}

static inline int sched_cpu_high_irqload(int cpu) { return 0; }

static inline void set_preferred_cluster(struct related_thread_group *grp) { }