Loading include/linux/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -266,7 +266,7 @@ struct pmu { int capabilities; int * __percpu pmu_disable_count; struct perf_cpu_context * __percpu pmu_cpu_context; struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; Loading kernel/events/core.c +13 −1 Original line number Diff line number Diff line Loading @@ -11212,13 +11212,25 @@ static void __perf_event_exit_context(void *__info) static void perf_event_exit_cpu_context(int cpu) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; unsigned long flags; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; /* Cancel the mux hrtimer to avoid CPU migration */ if (pmu->task_ctx_nr != perf_sw_context) { raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); hrtimer_cancel(&cpuctx->hrtimer); cpuctx->hrtimer_active = 0; raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); } mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); Loading Loading
include/linux/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -266,7 +266,7 @@ struct pmu { int capabilities; int * __percpu pmu_disable_count; struct perf_cpu_context * __percpu pmu_cpu_context; struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; Loading
kernel/events/core.c +13 −1 Original line number Diff line number Diff line Loading @@ -11212,13 +11212,25 @@ static void __perf_event_exit_context(void *__info) static void perf_event_exit_cpu_context(int cpu) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; unsigned long flags; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; /* Cancel the mux hrtimer to avoid CPU migration */ if (pmu->task_ctx_nr != perf_sw_context) { raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); hrtimer_cancel(&cpuctx->hrtimer); cpuctx->hrtimer_active = 0; raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); } mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); Loading