Loading kernel/sched/walt.c +8 −2 Original line number Diff line number Diff line Loading @@ -3168,13 +3168,19 @@ void walt_irq_work(struct irq_work *irq_work) u64 wc; bool is_migration = false; u64 total_grp_load = 0; int level = 0; /* Am I the window rollover work or the migration work? */ if (irq_work == &walt_migration_irq_work) is_migration = true; for_each_cpu(cpu, cpu_possible_mask) for_each_cpu(cpu, cpu_possible_mask) { if (level == 0) raw_spin_lock(&cpu_rq(cpu)->lock); else raw_spin_lock_nested(&cpu_rq(cpu)->lock, level); level++; } wc = sched_ktime_clock(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); Loading Loading
kernel/sched/walt.c +8 −2 Original line number Diff line number Diff line Loading @@ -3168,13 +3168,19 @@ void walt_irq_work(struct irq_work *irq_work) u64 wc; bool is_migration = false; u64 total_grp_load = 0; int level = 0; /* Am I the window rollover work or the migration work? */ if (irq_work == &walt_migration_irq_work) is_migration = true; for_each_cpu(cpu, cpu_possible_mask) for_each_cpu(cpu, cpu_possible_mask) { if (level == 0) raw_spin_lock(&cpu_rq(cpu)->lock); else raw_spin_lock_nested(&cpu_rq(cpu)->lock, level); level++; } wc = sched_ktime_clock(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); Loading