Loading include/linux/sched/sysctl.h +0 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,6 @@ extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_user_hint; extern const int __weak sched_user_hint_max; extern unsigned int __weak sysctl_sched_cpu_high_irqload; extern unsigned int __weak sysctl_sched_boost; extern unsigned int __weak sysctl_sched_group_upmigrate_pct; extern unsigned int __weak sysctl_sched_group_downmigrate_pct; Loading kernel/sched/core.c +0 −42 Original line number Diff line number Diff line Loading @@ -8411,46 +8411,4 @@ int set_task_boost(int boost, u64 period) } return 0; } void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock) { struct rq *rq = cpu_rq(cpu); unsigned long flags, nr_ticks; u64 cur_jiffies_ts; raw_spin_lock_irqsave(&rq->lock, flags); /* * cputime (wallclock) uses sched_clock so use the same here for * consistency. */ delta += sched_clock() - wallclock; cur_jiffies_ts = get_jiffies_64(); if (is_idle_task(curr)) walt_update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(), delta); nr_ticks = cur_jiffies_ts - rq->wrq.irqload_ts; if (nr_ticks) { if (nr_ticks < 10) { /* Decay CPU's irqload by 3/4 for each window. */ rq->wrq.avg_irqload *= (3 * nr_ticks); rq->wrq.avg_irqload = div64_u64(rq->wrq.avg_irqload, 4 * nr_ticks); } else { rq->wrq.avg_irqload = 0; } rq->wrq.avg_irqload += rq->wrq.cur_irqload; rq->wrq.high_irqload = (rq->wrq.avg_irqload >= sysctl_sched_cpu_high_irqload); rq->wrq.cur_irqload = 0; } rq->wrq.cur_irqload += delta; rq->wrq.irqload_ts = cur_jiffies_ts; raw_spin_unlock_irqrestore(&rq->lock, flags); } #endif kernel/sched/cputime.c +8 −14 Original line number Diff line number Diff line Loading @@ -54,18 +54,11 @@ void irqtime_account_irq(struct task_struct *curr) struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); s64 delta; int cpu; #ifdef CONFIG_SCHED_WALT u64 wallclock; bool account = true; #endif if (!sched_clock_irqtime) return; cpu = smp_processor_id(); #ifdef CONFIG_SCHED_WALT wallclock = sched_clock_cpu(cpu); #endif delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; irqtime->irq_start_time += delta; Loading @@ -79,14 +72,15 @@ void irqtime_account_irq(struct task_struct *curr) irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); #ifdef CONFIG_SCHED_WALT if (is_idle_task(curr)) { if (hardirq_count() || in_serving_softirq()) walt_sched_account_irqend(cpu, curr, delta); else account = false; if (account) sched_account_irqtime(cpu, curr, delta, wallclock); else if (curr != this_cpu_ksoftirqd()) sched_account_irqstart(cpu, curr, wallclock); walt_sched_account_irqstart(cpu, curr); } cpu_rq(cpu)->wrq.last_irq_window = cpu_rq(cpu)->wrq.window_start; #endif } EXPORT_SYMBOL_GPL(irqtime_account_irq); Loading kernel/sched/sched.h +3 −3 Original line number Diff line number Diff line Loading @@ -159,10 +159,9 @@ struct walt_rq { u32 prev_window_size; unsigned long walt_flags; u64 cur_irqload; u64 avg_irqload; u64 irqload_ts; bool high_irqload; u64 last_irq_window; u64 prev_irq_time; struct task_struct *ed_task; u64 task_exec_scale; u64 old_busy_time; Loading @@ -181,6 +180,7 @@ struct walt_rq { int prev_top; int curr_top; bool notif_pending; bool high_irqload; u64 last_cc_update; u64 cycles; }; Loading kernel/sched/walt.c +5 −2 Original line number Diff line number Diff line Loading @@ -199,8 +199,11 @@ void __weak set_window_start(struct rq *rq) { } bool __weak do_pl_notif(struct rq *rq) { return false; } void __weak sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) { } void __weak walt_sched_account_irqstart(int cpu, struct task_struct *curr) { } void __weak walt_sched_account_irqend(int cpu, struct task_struct *curr, u64 delta) { } void __weak update_cluster_topology(void) { } Loading Loading
include/linux/sched/sysctl.h +0 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,6 @@ extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_user_hint; extern const int __weak sched_user_hint_max; extern unsigned int __weak sysctl_sched_cpu_high_irqload; extern unsigned int __weak sysctl_sched_boost; extern unsigned int __weak sysctl_sched_group_upmigrate_pct; extern unsigned int __weak sysctl_sched_group_downmigrate_pct; Loading
kernel/sched/core.c +0 −42 Original line number Diff line number Diff line Loading @@ -8411,46 +8411,4 @@ int set_task_boost(int boost, u64 period) } return 0; } void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock) { struct rq *rq = cpu_rq(cpu); unsigned long flags, nr_ticks; u64 cur_jiffies_ts; raw_spin_lock_irqsave(&rq->lock, flags); /* * cputime (wallclock) uses sched_clock so use the same here for * consistency. */ delta += sched_clock() - wallclock; cur_jiffies_ts = get_jiffies_64(); if (is_idle_task(curr)) walt_update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(), delta); nr_ticks = cur_jiffies_ts - rq->wrq.irqload_ts; if (nr_ticks) { if (nr_ticks < 10) { /* Decay CPU's irqload by 3/4 for each window. */ rq->wrq.avg_irqload *= (3 * nr_ticks); rq->wrq.avg_irqload = div64_u64(rq->wrq.avg_irqload, 4 * nr_ticks); } else { rq->wrq.avg_irqload = 0; } rq->wrq.avg_irqload += rq->wrq.cur_irqload; rq->wrq.high_irqload = (rq->wrq.avg_irqload >= sysctl_sched_cpu_high_irqload); rq->wrq.cur_irqload = 0; } rq->wrq.cur_irqload += delta; rq->wrq.irqload_ts = cur_jiffies_ts; raw_spin_unlock_irqrestore(&rq->lock, flags); } #endif
kernel/sched/cputime.c +8 −14 Original line number Diff line number Diff line Loading @@ -54,18 +54,11 @@ void irqtime_account_irq(struct task_struct *curr) struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); s64 delta; int cpu; #ifdef CONFIG_SCHED_WALT u64 wallclock; bool account = true; #endif if (!sched_clock_irqtime) return; cpu = smp_processor_id(); #ifdef CONFIG_SCHED_WALT wallclock = sched_clock_cpu(cpu); #endif delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; irqtime->irq_start_time += delta; Loading @@ -79,14 +72,15 @@ void irqtime_account_irq(struct task_struct *curr) irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); #ifdef CONFIG_SCHED_WALT if (is_idle_task(curr)) { if (hardirq_count() || in_serving_softirq()) walt_sched_account_irqend(cpu, curr, delta); else account = false; if (account) sched_account_irqtime(cpu, curr, delta, wallclock); else if (curr != this_cpu_ksoftirqd()) sched_account_irqstart(cpu, curr, wallclock); walt_sched_account_irqstart(cpu, curr); } cpu_rq(cpu)->wrq.last_irq_window = cpu_rq(cpu)->wrq.window_start; #endif } EXPORT_SYMBOL_GPL(irqtime_account_irq); Loading
kernel/sched/sched.h +3 −3 Original line number Diff line number Diff line Loading @@ -159,10 +159,9 @@ struct walt_rq { u32 prev_window_size; unsigned long walt_flags; u64 cur_irqload; u64 avg_irqload; u64 irqload_ts; bool high_irqload; u64 last_irq_window; u64 prev_irq_time; struct task_struct *ed_task; u64 task_exec_scale; u64 old_busy_time; Loading @@ -181,6 +180,7 @@ struct walt_rq { int prev_top; int curr_top; bool notif_pending; bool high_irqload; u64 last_cc_update; u64 cycles; }; Loading
kernel/sched/walt.c +5 −2 Original line number Diff line number Diff line Loading @@ -199,8 +199,11 @@ void __weak set_window_start(struct rq *rq) { } bool __weak do_pl_notif(struct rq *rq) { return false; } void __weak sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) { } void __weak walt_sched_account_irqstart(int cpu, struct task_struct *curr) { } void __weak walt_sched_account_irqend(int cpu, struct task_struct *curr, u64 delta) { } void __weak update_cluster_topology(void) { } Loading