Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6d973a43 authored by Olav Haugan's avatar Olav Haugan Committed by Gerrit - the friendly Code Review server
Browse files

sched/fair: Add irq load awareness to the tick CPU selection logic



IRQ load is not taken into account when determining whether a task
should be migrated to a different CPU.  A task that runs for a long time
could get stuck on CPU with high IRQ load causing degraded performance.

Add irq load awareness to the tick CPU selection logic.

CRs-fixed: 809119
Change-Id: I7969f7dd947fb5d66fce0bedbc212bfb2d42c8c1
Signed-off-by: default avatarOlav Haugan <ohaugan@codeaurora.org>
parent 13e54426
Loading
Loading
Loading
Loading
+18 −10
Original line number Diff line number Diff line
@@ -1906,9 +1906,10 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
	return best_fallback_cpu;
}

#define MOVE_TO_BIG_CPU			1
#define MOVE_TO_LITTLE_CPU		2
#define MOVE_TO_POWER_EFFICIENT_CPU	3
#define UP_MIGRATION		1
#define DOWN_MIGRATION		2
#define EA_MIGRATION		3
#define IRQLOAD_MIGRATION	4

static int skip_cpu(struct task_struct *p, int cpu, int reason)
{
@@ -1923,18 +1924,22 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
		return 1;

	switch (reason) {
	case MOVE_TO_BIG_CPU:
	case UP_MIGRATION:
		skip = (rq->capacity <= task_rq->capacity);
		break;

	case MOVE_TO_LITTLE_CPU:
	case DOWN_MIGRATION:
		skip = (rq->capacity >= task_rq->capacity);
		break;

	case MOVE_TO_POWER_EFFICIENT_CPU:
	case EA_MIGRATION:
		skip = rq->capacity < task_rq->capacity  ||
			power_cost(p, cpu) >  power_cost(p,  task_cpu(p));
		break;

	case IRQLOAD_MIGRATION:
		/* Purposely fall through */

	default:
		skip = (cpu == task_cpu(p));
		break;
@@ -2699,7 +2704,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)

	if (sched_boost()) {
		if (rq->capacity != max_capacity)
			return MOVE_TO_BIG_CPU;
			return UP_MIGRATION;

		return 0;
	}
@@ -2707,18 +2712,21 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
	if (is_small_task(p))
		return 0;

	if (sched_cpu_high_irqload(cpu_of(rq)))
		return IRQLOAD_MIGRATION;

	if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) &&
			 rq->capacity > min_capacity)
		return MOVE_TO_LITTLE_CPU;
		return DOWN_MIGRATION;

	if (!task_will_fit(p, cpu_of(rq)))
		return MOVE_TO_BIG_CPU;
		return UP_MIGRATION;

	if (sysctl_sched_enable_power_aware &&
	    !is_task_migration_throttled(p) &&
	    is_cpu_throttling_imminent(cpu_of(rq)) &&
	    lower_power_cpu_available(p, cpu_of(rq)))
		return MOVE_TO_POWER_EFFICIENT_CPU;
		return EA_MIGRATION;

	return 0;
}