Loading kernel/sched/core.c +0 −6 Original line number Diff line number Diff line Loading @@ -5958,12 +5958,6 @@ int do_isolation_work_cpu_stop(void *data) set_rq_online(rq); raw_spin_unlock(&rq->lock); /* * We might have been in tickless state. Clear NOHZ flags to avoid * us being kicked for helping out with balancing */ nohz_balance_clear_nohz_mask(cpu); clear_walt_request(cpu); local_irq_enable(); return 0; Loading kernel/sched/fair.c +22 −14 Original line number Diff line number Diff line Loading @@ -9888,6 +9888,8 @@ static inline int find_new_ilb(int type) if (sd) { cpumask_and(&cpumask, nohz.idle_cpus_mask, sched_domain_span(sd)); cpumask_andnot(&cpumask, &cpumask, cpu_isolated_mask); ilb = cpumask_first(&cpumask); } rcu_read_unlock(); Loading @@ -9896,8 +9898,11 @@ static inline int find_new_ilb(int type) if (!energy_aware() || (capacity_orig_of(cpu) == cpu_rq(cpu)->rd->max_cpu_capacity.val || cpu_overutilized(cpu))) ilb = cpumask_first(nohz.idle_cpus_mask); cpu_overutilized(cpu))) { cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask); ilb = cpumask_first(&cpumask); } } if (ilb < nr_cpu_ids && idle_cpu(ilb)) Loading Loading @@ -9934,21 +9939,16 @@ static void nohz_balancer_kick(int type) return; } void nohz_balance_clear_nohz_mask(int cpu) { if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } } void nohz_balance_exit_idle(unsigned int cpu) { if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { /* * Completely isolated CPUs don't ever set, so we must test. */ nohz_balance_clear_nohz_mask(cpu); if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } } Loading Loading @@ -10223,6 +10223,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) int nr_busy; int cpu = rq->cpu; bool kick = false; cpumask_t cpumask; if (unlikely(rq->idle_balance)) return false; Loading @@ -10238,7 +10239,8 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) * None are in tickless mode and hence no need for NOHZ idle load * balancing. */ if (likely(!atomic_read(&nohz.nr_cpus))) cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask); if (cpumask_empty(&cpumask)) return false; if (time_before(now, nohz.next_balance)) Loading Loading @@ -10272,8 +10274,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) } sd = rcu_dereference(per_cpu(sd_asym, cpu)); if (sd && (cpumask_first_and(nohz.idle_cpus_mask, sched_domain_span(sd)) < cpu)) { if (sd && (cpumask_first_and(&cpumask, sched_domain_span(sd)) < cpu)) { kick = true; goto unlock; } Loading @@ -10296,6 +10297,13 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; /* * Since core isolation doesn't update nohz.idle_cpus_mask, there * is a possibility this nohz kicked cpu could be isolated. Hence * return if the cpu is isolated. */ if (cpu_isolated(this_rq->cpu)) return; /* * If this cpu has a pending nohz_balance_kick, then do the * balancing on behalf of the other idle cpus whose ticks are Loading kernel/sched/sched.h +0 −1 Original line number Diff line number Diff line Loading @@ -1444,7 +1444,6 @@ extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); extern void update_group_capacity(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); extern void nohz_balance_clear_nohz_mask(int cpu); extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); Loading Loading
kernel/sched/core.c +0 −6 Original line number Diff line number Diff line Loading @@ -5958,12 +5958,6 @@ int do_isolation_work_cpu_stop(void *data) set_rq_online(rq); raw_spin_unlock(&rq->lock); /* * We might have been in tickless state. Clear NOHZ flags to avoid * us being kicked for helping out with balancing */ nohz_balance_clear_nohz_mask(cpu); clear_walt_request(cpu); local_irq_enable(); return 0; Loading
kernel/sched/fair.c +22 −14 Original line number Diff line number Diff line Loading @@ -9888,6 +9888,8 @@ static inline int find_new_ilb(int type) if (sd) { cpumask_and(&cpumask, nohz.idle_cpus_mask, sched_domain_span(sd)); cpumask_andnot(&cpumask, &cpumask, cpu_isolated_mask); ilb = cpumask_first(&cpumask); } rcu_read_unlock(); Loading @@ -9896,8 +9898,11 @@ static inline int find_new_ilb(int type) if (!energy_aware() || (capacity_orig_of(cpu) == cpu_rq(cpu)->rd->max_cpu_capacity.val || cpu_overutilized(cpu))) ilb = cpumask_first(nohz.idle_cpus_mask); cpu_overutilized(cpu))) { cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask); ilb = cpumask_first(&cpumask); } } if (ilb < nr_cpu_ids && idle_cpu(ilb)) Loading Loading @@ -9934,21 +9939,16 @@ static void nohz_balancer_kick(int type) return; } void nohz_balance_clear_nohz_mask(int cpu) { if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } } void nohz_balance_exit_idle(unsigned int cpu) { if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { /* * Completely isolated CPUs don't ever set, so we must test. */ nohz_balance_clear_nohz_mask(cpu); if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } } Loading Loading @@ -10223,6 +10223,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) int nr_busy; int cpu = rq->cpu; bool kick = false; cpumask_t cpumask; if (unlikely(rq->idle_balance)) return false; Loading @@ -10238,7 +10239,8 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) * None are in tickless mode and hence no need for NOHZ idle load * balancing. */ if (likely(!atomic_read(&nohz.nr_cpus))) cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask); if (cpumask_empty(&cpumask)) return false; if (time_before(now, nohz.next_balance)) Loading Loading @@ -10272,8 +10274,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) } sd = rcu_dereference(per_cpu(sd_asym, cpu)); if (sd && (cpumask_first_and(nohz.idle_cpus_mask, sched_domain_span(sd)) < cpu)) { if (sd && (cpumask_first_and(&cpumask, sched_domain_span(sd)) < cpu)) { kick = true; goto unlock; } Loading @@ -10296,6 +10297,13 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; /* * Since core isolation doesn't update nohz.idle_cpus_mask, there * is a possibility this nohz kicked cpu could be isolated. Hence * return if the cpu is isolated. */ if (cpu_isolated(this_rq->cpu)) return; /* * If this cpu has a pending nohz_balance_kick, then do the * balancing on behalf of the other idle cpus whose ticks are Loading
kernel/sched/sched.h +0 −1 Original line number Diff line number Diff line Loading @@ -1444,7 +1444,6 @@ extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); extern void update_group_capacity(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); extern void nohz_balance_clear_nohz_mask(int cpu); extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); Loading