Loading kernel/sched/fair.c +18 −10 Original line number Original line Diff line number Diff line Loading @@ -1906,9 +1906,10 @@ static int best_small_task_cpu(struct task_struct *p, int sync) return best_fallback_cpu; return best_fallback_cpu; } } #define MOVE_TO_BIG_CPU 1 #define UP_MIGRATION 1 #define MOVE_TO_LITTLE_CPU 2 #define DOWN_MIGRATION 2 #define MOVE_TO_POWER_EFFICIENT_CPU 3 #define EA_MIGRATION 3 #define IRQLOAD_MIGRATION 4 static int skip_cpu(struct task_struct *p, int cpu, int reason) static int skip_cpu(struct task_struct *p, int cpu, int reason) { { Loading @@ -1923,18 +1924,22 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason) return 1; return 1; switch (reason) { switch (reason) { case MOVE_TO_BIG_CPU: case UP_MIGRATION: skip = (rq->capacity <= task_rq->capacity); skip = (rq->capacity <= task_rq->capacity); break; break; case MOVE_TO_LITTLE_CPU: case DOWN_MIGRATION: skip = (rq->capacity >= task_rq->capacity); skip = (rq->capacity >= task_rq->capacity); break; break; case MOVE_TO_POWER_EFFICIENT_CPU: case EA_MIGRATION: skip = rq->capacity < task_rq->capacity || skip = rq->capacity < task_rq->capacity || power_cost(p, cpu) > power_cost(p, task_cpu(p)); power_cost(p, cpu) > power_cost(p, task_cpu(p)); break; break; case IRQLOAD_MIGRATION: /* Purposely fall through */ default: default: skip = (cpu == task_cpu(p)); skip = (cpu == task_cpu(p)); break; break; Loading Loading @@ -2699,7 +2704,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) if (sched_boost()) { if (sched_boost()) { if (rq->capacity != max_capacity) if (rq->capacity != max_capacity) return MOVE_TO_BIG_CPU; return UP_MIGRATION; return 0; return 0; } } Loading @@ -2707,18 +2712,21 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) if (is_small_task(p)) if (is_small_task(p)) return 0; return 0; if (sched_cpu_high_irqload(cpu_of(rq))) return IRQLOAD_MIGRATION; if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) && if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) && rq->capacity > min_capacity) rq->capacity > min_capacity) return MOVE_TO_LITTLE_CPU; return DOWN_MIGRATION; if (!task_will_fit(p, cpu_of(rq))) if (!task_will_fit(p, cpu_of(rq))) return MOVE_TO_BIG_CPU; return UP_MIGRATION; if (sysctl_sched_enable_power_aware && if (sysctl_sched_enable_power_aware && !is_task_migration_throttled(p) && !is_task_migration_throttled(p) && is_cpu_throttling_imminent(cpu_of(rq)) && is_cpu_throttling_imminent(cpu_of(rq)) && lower_power_cpu_available(p, cpu_of(rq))) lower_power_cpu_available(p, cpu_of(rq))) return MOVE_TO_POWER_EFFICIENT_CPU; return EA_MIGRATION; return 0; return 0; } } Loading Loading
kernel/sched/fair.c +18 −10 Original line number Original line Diff line number Diff line Loading @@ -1906,9 +1906,10 @@ static int best_small_task_cpu(struct task_struct *p, int sync) return best_fallback_cpu; return best_fallback_cpu; } } #define MOVE_TO_BIG_CPU 1 #define UP_MIGRATION 1 #define MOVE_TO_LITTLE_CPU 2 #define DOWN_MIGRATION 2 #define MOVE_TO_POWER_EFFICIENT_CPU 3 #define EA_MIGRATION 3 #define IRQLOAD_MIGRATION 4 static int skip_cpu(struct task_struct *p, int cpu, int reason) static int skip_cpu(struct task_struct *p, int cpu, int reason) { { Loading @@ -1923,18 +1924,22 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason) return 1; return 1; switch (reason) { switch (reason) { case MOVE_TO_BIG_CPU: case UP_MIGRATION: skip = (rq->capacity <= task_rq->capacity); skip = (rq->capacity <= task_rq->capacity); break; break; case MOVE_TO_LITTLE_CPU: case DOWN_MIGRATION: skip = (rq->capacity >= task_rq->capacity); skip = (rq->capacity >= task_rq->capacity); break; break; case MOVE_TO_POWER_EFFICIENT_CPU: case EA_MIGRATION: skip = rq->capacity < task_rq->capacity || skip = rq->capacity < task_rq->capacity || power_cost(p, cpu) > power_cost(p, task_cpu(p)); power_cost(p, cpu) > power_cost(p, task_cpu(p)); break; break; case IRQLOAD_MIGRATION: /* Purposely fall through */ default: default: skip = (cpu == task_cpu(p)); skip = (cpu == task_cpu(p)); break; break; Loading Loading @@ -2699,7 +2704,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) if (sched_boost()) { if (sched_boost()) { if (rq->capacity != max_capacity) if (rq->capacity != max_capacity) return MOVE_TO_BIG_CPU; return UP_MIGRATION; return 0; return 0; } } Loading @@ -2707,18 +2712,21 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) if (is_small_task(p)) if (is_small_task(p)) return 0; return 0; if (sched_cpu_high_irqload(cpu_of(rq))) return IRQLOAD_MIGRATION; if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) && if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) && rq->capacity > min_capacity) rq->capacity > min_capacity) return MOVE_TO_LITTLE_CPU; return DOWN_MIGRATION; if (!task_will_fit(p, cpu_of(rq))) if (!task_will_fit(p, cpu_of(rq))) return MOVE_TO_BIG_CPU; return UP_MIGRATION; if (sysctl_sched_enable_power_aware && if (sysctl_sched_enable_power_aware && !is_task_migration_throttled(p) && !is_task_migration_throttled(p) && is_cpu_throttling_imminent(cpu_of(rq)) && is_cpu_throttling_imminent(cpu_of(rq)) && lower_power_cpu_available(p, cpu_of(rq))) lower_power_cpu_available(p, cpu_of(rq))) return MOVE_TO_POWER_EFFICIENT_CPU; return EA_MIGRATION; return 0; return 0; } } Loading