Loading kernel/sched/walt.c +8 −2 Original line number Diff line number Diff line Loading @@ -3170,13 +3170,19 @@ void walt_irq_work(struct irq_work *irq_work) u64 wc; int flag = SCHED_CPUFREQ_WALT; bool is_migration = false; int level = 0; /* Am I the window rollover work or the migration work? */ if (irq_work == &walt_migration_irq_work) is_migration = true; for_each_cpu(cpu, cpu_possible_mask) for_each_cpu(cpu, cpu_possible_mask) { if (level == 0) raw_spin_lock(&cpu_rq(cpu)->lock); else raw_spin_lock_nested(&cpu_rq(cpu)->lock, level); level++; } wc = sched_ktime_clock(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); Loading kernel/trace/trace_irqsoff.c +1 −2 Original line number Diff line number Diff line Loading @@ -804,10 +804,9 @@ static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } #endif #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) /* Per-cpu variable to prevent redundant calls when IRQs already off */ static DEFINE_PER_CPU(int, tracing_irq_cpu); #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) void trace_hardirqs_on(void) { if (!this_cpu_read(tracing_irq_cpu)) Loading Loading
kernel/sched/walt.c +8 −2 Original line number Diff line number Diff line Loading @@ -3170,13 +3170,19 @@ void walt_irq_work(struct irq_work *irq_work) u64 wc; int flag = SCHED_CPUFREQ_WALT; bool is_migration = false; int level = 0; /* Am I the window rollover work or the migration work? */ if (irq_work == &walt_migration_irq_work) is_migration = true; for_each_cpu(cpu, cpu_possible_mask) for_each_cpu(cpu, cpu_possible_mask) { if (level == 0) raw_spin_lock(&cpu_rq(cpu)->lock); else raw_spin_lock_nested(&cpu_rq(cpu)->lock, level); level++; } wc = sched_ktime_clock(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); Loading
kernel/trace/trace_irqsoff.c +1 −2 Original line number Diff line number Diff line Loading @@ -804,10 +804,9 @@ static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } #endif #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) /* Per-cpu variable to prevent redundant calls when IRQs already off */ static DEFINE_PER_CPU(int, tracing_irq_cpu); #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) void trace_hardirqs_on(void) { if (!this_cpu_read(tracing_irq_cpu)) Loading