Loading kernel/sched/hmp.c +39 −7 Original line number Diff line number Diff line Loading @@ -2521,10 +2521,42 @@ static inline u32 predict_and_update_buckets(struct rq *rq, return pred_demand; } static void update_task_cpu_cycles(struct task_struct *p, int cpu) #define THRESH_CC_UPDATE (2 * NSEC_PER_USEC) /* * Assumes rq_lock is held and wallclock was recorded in the same critical * section as this function's invocation. */ static inline u64 read_cycle_counter(int cpu, u64 wallclock) { struct sched_cluster *cluster = cpu_rq(cpu)->cluster; u64 delta; if (unlikely(!cluster)) return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); /* * Why don't we need locking here? Let's say that delta is negative * because some other CPU happened to update last_cc_update with a * more recent timestamp. We simply read the conter again in that case * with no harmful side effects. This can happen if there is an FIQ * between when we read the wallclock and when we use it here. */ delta = wallclock - atomic64_read(&cluster->last_cc_update); if (delta > THRESH_CC_UPDATE) { atomic64_set(&cluster->cycles, cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu)); atomic64_set(&cluster->last_cc_update, wallclock); } return atomic64_read(&cluster->cycles); } static void update_task_cpu_cycles(struct task_struct *p, int cpu, u64 wallclock) { if (use_cycle_counter) p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); p->cpu_cycles = read_cycle_counter(cpu, wallclock); } static void Loading @@ -2542,7 +2574,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, return; } cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); cur_cycles = read_cycle_counter(cpu, wallclock); /* * If current task is idle task and irqtime == 0 CPU was Loading Loading @@ -2834,7 +2866,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event, update_window_start(rq, wallclock); if (!p->ravg.mark_start) { update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq), wallclock); goto done; } Loading Loading @@ -2902,7 +2934,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) if (is_idle_task(curr)) { /* We're here without rq->lock held, IRQ disabled */ raw_spin_lock(&rq->lock); update_task_cpu_cycles(curr, cpu); update_task_cpu_cycles(curr, cpu, sched_ktime_clock()); raw_spin_unlock(&rq->lock); } } Loading Loading @@ -2947,7 +2979,7 @@ void mark_task_starting(struct task_struct *p) p->ravg.mark_start = p->last_wake_ts = wallclock; p->last_cpu_selected_ts = wallclock; p->last_switch_out_ts = 0; update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq), wallclock); } void set_window_start(struct rq *rq) Loading Loading @@ -3560,7 +3592,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); update_task_cpu_cycles(p, new_cpu); update_task_cpu_cycles(p, new_cpu, wallclock); new_task = is_new_task(p); /* Protected by rq_lock */ Loading kernel/sched/sched.h +2 −0 Original line number Diff line number Diff line Loading @@ -397,6 +397,8 @@ struct sched_cluster { unsigned int static_cluster_pwr_cost; int notifier_sent; bool wake_up_idle; atomic64_t last_cc_update; atomic64_t cycles; }; extern unsigned long all_cluster_ids[]; Loading Loading
kernel/sched/hmp.c +39 −7 Original line number Diff line number Diff line Loading @@ -2521,10 +2521,42 @@ static inline u32 predict_and_update_buckets(struct rq *rq, return pred_demand; } static void update_task_cpu_cycles(struct task_struct *p, int cpu) #define THRESH_CC_UPDATE (2 * NSEC_PER_USEC) /* * Assumes rq_lock is held and wallclock was recorded in the same critical * section as this function's invocation. */ static inline u64 read_cycle_counter(int cpu, u64 wallclock) { struct sched_cluster *cluster = cpu_rq(cpu)->cluster; u64 delta; if (unlikely(!cluster)) return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); /* * Why don't we need locking here? Let's say that delta is negative * because some other CPU happened to update last_cc_update with a * more recent timestamp. We simply read the conter again in that case * with no harmful side effects. This can happen if there is an FIQ * between when we read the wallclock and when we use it here. */ delta = wallclock - atomic64_read(&cluster->last_cc_update); if (delta > THRESH_CC_UPDATE) { atomic64_set(&cluster->cycles, cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu)); atomic64_set(&cluster->last_cc_update, wallclock); } return atomic64_read(&cluster->cycles); } static void update_task_cpu_cycles(struct task_struct *p, int cpu, u64 wallclock) { if (use_cycle_counter) p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); p->cpu_cycles = read_cycle_counter(cpu, wallclock); } static void Loading @@ -2542,7 +2574,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, return; } cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); cur_cycles = read_cycle_counter(cpu, wallclock); /* * If current task is idle task and irqtime == 0 CPU was Loading Loading @@ -2834,7 +2866,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event, update_window_start(rq, wallclock); if (!p->ravg.mark_start) { update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq), wallclock); goto done; } Loading Loading @@ -2902,7 +2934,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) if (is_idle_task(curr)) { /* We're here without rq->lock held, IRQ disabled */ raw_spin_lock(&rq->lock); update_task_cpu_cycles(curr, cpu); update_task_cpu_cycles(curr, cpu, sched_ktime_clock()); raw_spin_unlock(&rq->lock); } } Loading Loading @@ -2947,7 +2979,7 @@ void mark_task_starting(struct task_struct *p) p->ravg.mark_start = p->last_wake_ts = wallclock; p->last_cpu_selected_ts = wallclock; p->last_switch_out_ts = 0; update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq), wallclock); } void set_window_start(struct rq *rq) Loading Loading @@ -3560,7 +3592,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); update_task_cpu_cycles(p, new_cpu); update_task_cpu_cycles(p, new_cpu, wallclock); new_task = is_new_task(p); /* Protected by rq_lock */ Loading
kernel/sched/sched.h +2 −0 Original line number Diff line number Diff line Loading @@ -397,6 +397,8 @@ struct sched_cluster { unsigned int static_cluster_pwr_cost; int notifier_sent; bool wake_up_idle; atomic64_t last_cc_update; atomic64_t cycles; }; extern unsigned long all_cluster_ids[]; Loading