Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51f0d766 authored by Steve Muckle's avatar Steve Muckle
Browse files

sched: avoid CPUs with high irq activity



CPUs with significant IRQ activity will not be able to serve tasks
quickly. Avoid them if possible by disqualifying such CPUs from
being recognized as mostly idle.

Change-Id: I2c09272a4f259f0283b272455147d288fce11982
Signed-off-by: default avatarSteve Muckle <smuckle@codeaurora.org>
parent a14e0110
Loading
Loading
Loading
Loading
+5 −7
Original line number Diff line number Diff line
@@ -1495,12 +1495,10 @@ spill_threshold_crossed(struct task_struct *p, struct rq *rq, int cpu)
int mostly_idle_cpu(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	int mostly_idle;

	mostly_idle = (cpu_load(cpu) <= rq->mostly_idle_load
				&& rq->nr_running <= rq->mostly_idle_nr_run);

	return mostly_idle;
	return cpu_load(cpu) <= rq->mostly_idle_load
		&& rq->nr_running <= rq->mostly_idle_nr_run
		&& !sched_cpu_high_irqload(cpu);
}

static int boost_refcount;
@@ -1752,7 +1750,7 @@ static int best_small_task_cpu(struct task_struct *p)
			continue;
		}

		if (idle_cpu(i) && cstate) {
		if (idle_cpu(i) && cstate && !sched_cpu_high_irqload(i)) {
			if (cstate < min_cstate) {
				min_cstate_cpu = i;
				min_cstate = cstate;
@@ -1840,7 +1838,7 @@ static int select_packing_target(struct task_struct *p, int best_cpu)
	for_each_cpu(i, &search_cpus) {
		int cost = power_cost(p, i);

		if (cost < min_cost) {
		if (cost < min_cost && !sched_cpu_high_irqload(i)) {
			target = i;
			min_cost = cost;
		}
+1 −1
Original line number Diff line number Diff line
@@ -1560,7 +1560,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
		if (sched_boost() && capacity(rq) != max_capacity)
			continue;

		if (cpu_cost < min_cost) {
		if (cpu_cost < min_cost && !sched_cpu_high_irqload(i)) {
			min_cost = cpu_cost;
			best_cpu = i;
		}
+23 −0
Original line number Diff line number Diff line
@@ -767,6 +767,27 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
#define real_to_pct(tunable)	\
		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))

#define SCHED_HIGH_IRQ_TIMEOUT 3
static inline u64 sched_irqload(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	s64 delta;

	delta = get_jiffies_64() - rq->irqload_ts;
	BUG_ON(delta < 0);

	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
		return rq->avg_irqload;
	else
		return 0;
}

#define SCHED_HIGH_IRQ_NS (10 * NSEC_PER_MSEC)
static inline int sched_cpu_high_irqload(int cpu)
{
	return sched_irqload(cpu) >= SCHED_HIGH_IRQ_NS;
}

#else	/* CONFIG_SCHED_HMP */

static inline int pct_task_load(struct task_struct *p) { return 0; }
@@ -801,6 +822,8 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}

static inline int sched_cpu_high_irqload(int cpu) { return 0; }

#endif	/* CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_FREQ_INPUT