Loading arch/arm64/kernel/perf_event.c +2 −2 Original line number Diff line number Diff line Loading @@ -779,8 +779,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event or if it's a zombie event */ if (!event || event->state == PERF_EVENT_STATE_ZOMBIE) /* Ignore if we don't have an event */ if (!event || event->state != PERF_EVENT_STATE_ACTIVE) continue; /* Loading drivers/perf/arm_pmu.c +3 −11 Original line number Diff line number Diff line Loading @@ -752,11 +752,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) if (!event) continue; /* * Check if an attempt was made to free this event during * the CPU went offline. */ if (event->state == PERF_EVENT_STATE_ZOMBIE) if (event->state != PERF_EVENT_STATE_ACTIVE) continue; switch (cmd) { Loading Loading @@ -882,10 +878,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; data.cmd = CPU_PM_EXIT; cpu_pm_pmu_common(&data); if (data.ret == NOTIFY_DONE) return 0; if (pmu->reset) pmu->reset(pmu); if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF && data.armpmu->plat_device) { Loading @@ -911,8 +905,6 @@ static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node) if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; data.cmd = CPU_PM_ENTER; cpu_pm_pmu_common(&data); /* Disarm the PMU IRQ before disappearing. */ if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING && data.armpmu->plat_device) { Loading include/linux/perf_event.h +11 −3 Original line number Diff line number Diff line Loading @@ -497,8 +497,8 @@ struct perf_addr_filters_head { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { PERF_EVENT_STATE_DEAD = -5, PERF_EVENT_STATE_ZOMBIE = -4, PERF_EVENT_STATE_DORMANT = -5, PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, Loading Loading @@ -721,7 +721,13 @@ struct perf_event { /* Is this event shared with other events */ bool shared; struct list_head zombie_entry; /* * Entry into the list that holds the events whose CPUs * are offline. These events will be installed once the * CPU wakes up and will be removed from the list after that */ struct list_head dormant_event_entry; #endif /* CONFIG_PERF_EVENTS */ }; Loading Loading @@ -1401,9 +1407,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) #ifdef CONFIG_PERF_EVENTS int perf_event_init_cpu(unsigned int cpu); int perf_event_exit_cpu(unsigned int cpu); int perf_event_restart_events(unsigned int cpu); #else #define perf_event_init_cpu NULL #define perf_event_exit_cpu NULL #define perf_event_restart_events NULL #endif #endif /* _LINUX_PERF_EVENT_H */ kernel/cpu.c +1 −1 Original line number Diff line number Diff line Loading @@ -1463,7 +1463,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { }, [CPUHP_AP_PERF_ONLINE] = { .name = "perf:online", .startup.single = perf_event_init_cpu, .startup.single = perf_event_restart_events, .teardown.single = perf_event_exit_cpu, }, [CPUHP_AP_WORKQUEUE_ONLINE] = { Loading kernel/events/core.c +71 −136 Original line number Diff line number Diff line Loading @@ -2315,6 +2315,23 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_pmu_enable(cpuctx->ctx.pmu); } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static LIST_HEAD(dormant_event_list); static DEFINE_SPINLOCK(dormant_event_list_lock); static void perf_prepare_install_in_context(struct perf_event *event) { spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) goto out; event->state = PERF_EVENT_STATE_DORMANT; list_add_tail(&event->dormant_event_entry, &dormant_event_list); out: spin_unlock(&dormant_event_list_lock); } #endif /* * Cross CPU call to install and enable a performance event * Loading Loading @@ -2460,6 +2477,34 @@ perf_install_in_context(struct perf_event_context *ctx, raw_spin_unlock_irq(&ctx->lock); } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void perf_deferred_install_in_context(int cpu) { struct perf_event *event, *tmp; struct perf_event_context *ctx; spin_lock(&dormant_event_list_lock); list_for_each_entry_safe(event, tmp, &dormant_event_list, dormant_event_entry) { if (cpu != event->cpu) continue; list_del(&event->dormant_event_entry); event->state = PERF_EVENT_STATE_INACTIVE; spin_unlock(&dormant_event_list_lock); ctx = event->ctx; mutex_lock(&ctx->mutex); perf_install_in_context(ctx, event, cpu); mutex_unlock(&ctx->mutex); spin_lock(&dormant_event_list_lock); } spin_unlock(&dormant_event_list_lock); } #endif /* * Put a event into inactive state and update time fields. * Enabling the leader of a group effectively enables all Loading Loading @@ -4276,14 +4321,6 @@ static void put_event(struct perf_event *event) _free_event(event); } /* * Maintain a zombie list to collect all the zombie events */ #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static LIST_HEAD(zombie_list); static DEFINE_SPINLOCK(zombie_list_lock); #endif /* * Kill an event dead; while event:refcount will preserve the event * object, it will not preserve its functionality. Once the last 'user' Loading @@ -4294,23 +4331,12 @@ static int __perf_event_release_kernel(struct perf_event *event) struct perf_event_context *ctx = event->ctx; struct perf_event *child, *tmp; /* * If the cpu associated to this event is offline, set the event as a * zombie event. The cleanup of the cpu would be done if the CPU is * back online. */ #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu)) { if (event->state == PERF_EVENT_STATE_ZOMBIE) return 0; event->state = PERF_EVENT_STATE_ZOMBIE; spin_lock(&zombie_list_lock); list_add_tail(&event->zombie_entry, &zombie_list); spin_unlock(&zombie_list_lock); return 0; if (event->cpu != -1) { spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) list_del(&event->dormant_event_entry); spin_unlock(&dormant_event_list_lock); } #endif Loading Loading @@ -4627,6 +4653,15 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) struct perf_event_context *ctx; int ret; #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) { spin_unlock(&dormant_event_list_lock); return 0; } spin_unlock(&dormant_event_list_lock); #endif ctx = perf_event_ctx_lock(event); ret = __perf_read(event, buf, count); perf_event_ctx_unlock(event, ctx); Loading Loading @@ -9455,13 +9490,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); INIT_LIST_HEAD(&event->dormant_event_entry); INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); INIT_LIST_HEAD(&event->rb_entry); INIT_LIST_HEAD(&event->active_entry); INIT_LIST_HEAD(&event->addr_filters.list); INIT_LIST_HEAD(&event->zombie_entry); INIT_HLIST_NODE(&event->hlist_entry); Loading Loading @@ -11114,111 +11149,27 @@ int perf_event_init_cpu(unsigned int cpu) } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void check_hotplug_start_event(struct perf_event *event) { if (event->pmu->events_across_hotplug && event->attr.type == PERF_TYPE_SOFTWARE && event->pmu->start) event->pmu->start(event, 0); } static void perf_event_zombie_cleanup(unsigned int cpu) { struct perf_event *event, *tmp; spin_lock(&zombie_list_lock); list_for_each_entry_safe(event, tmp, &zombie_list, zombie_entry) { if (event->cpu != cpu) continue; list_del(&event->zombie_entry); spin_unlock(&zombie_list_lock); /* * The detachment of the event with the * PMU expects it to be in an active state */ event->state = PERF_EVENT_STATE_ACTIVE; __perf_event_release_kernel(event); spin_lock(&zombie_list_lock); } spin_unlock(&zombie_list_lock); } static int perf_event_start_swevents(unsigned int cpu) int perf_event_restart_events(unsigned int cpu) { struct perf_event_context *ctx; struct pmu *pmu; struct perf_event *event; int idx; mutex_lock(&pmus_lock); perf_event_zombie_cleanup(cpu); idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; mutex_lock(&ctx->mutex); raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->event_list, event_entry) check_hotplug_start_event(event); raw_spin_unlock(&ctx->lock); mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); per_cpu(is_hotplugging, cpu) = false; perf_deferred_install_in_context(cpu); mutex_unlock(&pmus_lock); return 0; } /* * If keeping events across hotplugging is supported, do not * remove the event list so event lives beyond CPU hotplug. * The context is exited via an fd close path when userspace * is done and the target CPU is online. If software clock * event is active, then stop hrtimer associated with it. * Start the timer when the CPU comes back online. */ static void check_hotplug_remove_from_context(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { if (event->pmu->events_across_hotplug && event->attr.type == PERF_TYPE_SOFTWARE && event->pmu->stop) event->pmu->stop(event, PERF_EF_UPDATE); else if (!event->pmu->events_across_hotplug) __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); } static void __perf_event_exit_context(void *__info) { struct perf_event_context *ctx = __info; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event *event; raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->event_list, event_entry) check_hotplug_remove_from_context(event, cpuctx, ctx); raw_spin_unlock(&ctx->lock); } static void perf_event_exit_cpu_context(int cpu) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; unsigned long flags; struct perf_event *event, *event_tmp; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); per_cpu(is_hotplugging, cpu) = true; list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; Loading @@ -11233,7 +11184,12 @@ static void perf_event_exit_cpu_context(int cpu) } mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); list_for_each_entry_safe(event, event_tmp, &ctx->event_list, event_entry) { perf_remove_from_context(event, DETACH_GROUP); if (event->pmu->events_across_hotplug) perf_prepare_install_in_context(event); } mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); Loading @@ -11246,8 +11202,8 @@ static void perf_event_exit_cpu_context(int cpu) { } int perf_event_exit_cpu(unsigned int cpu) { mutex_lock(&pmus_lock); per_cpu(is_hotplugging, cpu) = true; perf_event_exit_cpu_context(cpu); mutex_unlock(&pmus_lock); return 0; Loading Loading @@ -11292,25 +11248,6 @@ static struct notifier_block perf_event_idle_nb = { .notifier_call = event_idle_notif, }; #ifdef CONFIG_HOTPLUG_CPU static int perf_cpu_hp_init(void) { int ret; ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE, "PERF/CORE/CPUHP_AP_PERF_ONLINE", perf_event_start_swevents, perf_event_exit_cpu); if (ret) pr_err("CPU hotplug notifier for perf core could not be registered: %d\n", ret); return ret; } #else static int perf_cpu_hp_init(void) { return 0; } #endif void __init perf_event_init(void) { int ret, cpu; Loading @@ -11337,8 +11274,6 @@ void __init perf_event_init(void) perf_event_init_cpu(smp_processor_id()); idle_notifier_register(&perf_event_idle_nb); register_reboot_notifier(&perf_reboot_notifier); ret = perf_cpu_hp_init(); WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret); ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); Loading Loading
arch/arm64/kernel/perf_event.c +2 −2 Original line number Diff line number Diff line Loading @@ -779,8 +779,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event or if it's a zombie event */ if (!event || event->state == PERF_EVENT_STATE_ZOMBIE) /* Ignore if we don't have an event */ if (!event || event->state != PERF_EVENT_STATE_ACTIVE) continue; /* Loading
drivers/perf/arm_pmu.c +3 −11 Original line number Diff line number Diff line Loading @@ -752,11 +752,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) if (!event) continue; /* * Check if an attempt was made to free this event during * the CPU went offline. */ if (event->state == PERF_EVENT_STATE_ZOMBIE) if (event->state != PERF_EVENT_STATE_ACTIVE) continue; switch (cmd) { Loading Loading @@ -882,10 +878,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; data.cmd = CPU_PM_EXIT; cpu_pm_pmu_common(&data); if (data.ret == NOTIFY_DONE) return 0; if (pmu->reset) pmu->reset(pmu); if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF && data.armpmu->plat_device) { Loading @@ -911,8 +905,6 @@ static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node) if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; data.cmd = CPU_PM_ENTER; cpu_pm_pmu_common(&data); /* Disarm the PMU IRQ before disappearing. */ if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING && data.armpmu->plat_device) { Loading
include/linux/perf_event.h +11 −3 Original line number Diff line number Diff line Loading @@ -497,8 +497,8 @@ struct perf_addr_filters_head { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { PERF_EVENT_STATE_DEAD = -5, PERF_EVENT_STATE_ZOMBIE = -4, PERF_EVENT_STATE_DORMANT = -5, PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, Loading Loading @@ -721,7 +721,13 @@ struct perf_event { /* Is this event shared with other events */ bool shared; struct list_head zombie_entry; /* * Entry into the list that holds the events whose CPUs * are offline. These events will be installed once the * CPU wakes up and will be removed from the list after that */ struct list_head dormant_event_entry; #endif /* CONFIG_PERF_EVENTS */ }; Loading Loading @@ -1401,9 +1407,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) #ifdef CONFIG_PERF_EVENTS int perf_event_init_cpu(unsigned int cpu); int perf_event_exit_cpu(unsigned int cpu); int perf_event_restart_events(unsigned int cpu); #else #define perf_event_init_cpu NULL #define perf_event_exit_cpu NULL #define perf_event_restart_events NULL #endif #endif /* _LINUX_PERF_EVENT_H */
kernel/cpu.c +1 −1 Original line number Diff line number Diff line Loading @@ -1463,7 +1463,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { }, [CPUHP_AP_PERF_ONLINE] = { .name = "perf:online", .startup.single = perf_event_init_cpu, .startup.single = perf_event_restart_events, .teardown.single = perf_event_exit_cpu, }, [CPUHP_AP_WORKQUEUE_ONLINE] = { Loading
kernel/events/core.c +71 −136 Original line number Diff line number Diff line Loading @@ -2315,6 +2315,23 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_pmu_enable(cpuctx->ctx.pmu); } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static LIST_HEAD(dormant_event_list); static DEFINE_SPINLOCK(dormant_event_list_lock); static void perf_prepare_install_in_context(struct perf_event *event) { spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) goto out; event->state = PERF_EVENT_STATE_DORMANT; list_add_tail(&event->dormant_event_entry, &dormant_event_list); out: spin_unlock(&dormant_event_list_lock); } #endif /* * Cross CPU call to install and enable a performance event * Loading Loading @@ -2460,6 +2477,34 @@ perf_install_in_context(struct perf_event_context *ctx, raw_spin_unlock_irq(&ctx->lock); } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void perf_deferred_install_in_context(int cpu) { struct perf_event *event, *tmp; struct perf_event_context *ctx; spin_lock(&dormant_event_list_lock); list_for_each_entry_safe(event, tmp, &dormant_event_list, dormant_event_entry) { if (cpu != event->cpu) continue; list_del(&event->dormant_event_entry); event->state = PERF_EVENT_STATE_INACTIVE; spin_unlock(&dormant_event_list_lock); ctx = event->ctx; mutex_lock(&ctx->mutex); perf_install_in_context(ctx, event, cpu); mutex_unlock(&ctx->mutex); spin_lock(&dormant_event_list_lock); } spin_unlock(&dormant_event_list_lock); } #endif /* * Put a event into inactive state and update time fields. * Enabling the leader of a group effectively enables all Loading Loading @@ -4276,14 +4321,6 @@ static void put_event(struct perf_event *event) _free_event(event); } /* * Maintain a zombie list to collect all the zombie events */ #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static LIST_HEAD(zombie_list); static DEFINE_SPINLOCK(zombie_list_lock); #endif /* * Kill an event dead; while event:refcount will preserve the event * object, it will not preserve its functionality. Once the last 'user' Loading @@ -4294,23 +4331,12 @@ static int __perf_event_release_kernel(struct perf_event *event) struct perf_event_context *ctx = event->ctx; struct perf_event *child, *tmp; /* * If the cpu associated to this event is offline, set the event as a * zombie event. The cleanup of the cpu would be done if the CPU is * back online. */ #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu)) { if (event->state == PERF_EVENT_STATE_ZOMBIE) return 0; event->state = PERF_EVENT_STATE_ZOMBIE; spin_lock(&zombie_list_lock); list_add_tail(&event->zombie_entry, &zombie_list); spin_unlock(&zombie_list_lock); return 0; if (event->cpu != -1) { spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) list_del(&event->dormant_event_entry); spin_unlock(&dormant_event_list_lock); } #endif Loading Loading @@ -4627,6 +4653,15 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) struct perf_event_context *ctx; int ret; #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE spin_lock(&dormant_event_list_lock); if (event->state == PERF_EVENT_STATE_DORMANT) { spin_unlock(&dormant_event_list_lock); return 0; } spin_unlock(&dormant_event_list_lock); #endif ctx = perf_event_ctx_lock(event); ret = __perf_read(event, buf, count); perf_event_ctx_unlock(event, ctx); Loading Loading @@ -9455,13 +9490,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); INIT_LIST_HEAD(&event->dormant_event_entry); INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); INIT_LIST_HEAD(&event->rb_entry); INIT_LIST_HEAD(&event->active_entry); INIT_LIST_HEAD(&event->addr_filters.list); INIT_LIST_HEAD(&event->zombie_entry); INIT_HLIST_NODE(&event->hlist_entry); Loading Loading @@ -11114,111 +11149,27 @@ int perf_event_init_cpu(unsigned int cpu) } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void check_hotplug_start_event(struct perf_event *event) { if (event->pmu->events_across_hotplug && event->attr.type == PERF_TYPE_SOFTWARE && event->pmu->start) event->pmu->start(event, 0); } static void perf_event_zombie_cleanup(unsigned int cpu) { struct perf_event *event, *tmp; spin_lock(&zombie_list_lock); list_for_each_entry_safe(event, tmp, &zombie_list, zombie_entry) { if (event->cpu != cpu) continue; list_del(&event->zombie_entry); spin_unlock(&zombie_list_lock); /* * The detachment of the event with the * PMU expects it to be in an active state */ event->state = PERF_EVENT_STATE_ACTIVE; __perf_event_release_kernel(event); spin_lock(&zombie_list_lock); } spin_unlock(&zombie_list_lock); } static int perf_event_start_swevents(unsigned int cpu) int perf_event_restart_events(unsigned int cpu) { struct perf_event_context *ctx; struct pmu *pmu; struct perf_event *event; int idx; mutex_lock(&pmus_lock); perf_event_zombie_cleanup(cpu); idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; mutex_lock(&ctx->mutex); raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->event_list, event_entry) check_hotplug_start_event(event); raw_spin_unlock(&ctx->lock); mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); per_cpu(is_hotplugging, cpu) = false; perf_deferred_install_in_context(cpu); mutex_unlock(&pmus_lock); return 0; } /* * If keeping events across hotplugging is supported, do not * remove the event list so event lives beyond CPU hotplug. * The context is exited via an fd close path when userspace * is done and the target CPU is online. If software clock * event is active, then stop hrtimer associated with it. * Start the timer when the CPU comes back online. */ static void check_hotplug_remove_from_context(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { if (event->pmu->events_across_hotplug && event->attr.type == PERF_TYPE_SOFTWARE && event->pmu->stop) event->pmu->stop(event, PERF_EF_UPDATE); else if (!event->pmu->events_across_hotplug) __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); } static void __perf_event_exit_context(void *__info) { struct perf_event_context *ctx = __info; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event *event; raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->event_list, event_entry) check_hotplug_remove_from_context(event, cpuctx, ctx); raw_spin_unlock(&ctx->lock); } static void perf_event_exit_cpu_context(int cpu) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; unsigned long flags; struct perf_event *event, *event_tmp; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); per_cpu(is_hotplugging, cpu) = true; list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; Loading @@ -11233,7 +11184,12 @@ static void perf_event_exit_cpu_context(int cpu) } mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); list_for_each_entry_safe(event, event_tmp, &ctx->event_list, event_entry) { perf_remove_from_context(event, DETACH_GROUP); if (event->pmu->events_across_hotplug) perf_prepare_install_in_context(event); } mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); Loading @@ -11246,8 +11202,8 @@ static void perf_event_exit_cpu_context(int cpu) { } int perf_event_exit_cpu(unsigned int cpu) { mutex_lock(&pmus_lock); per_cpu(is_hotplugging, cpu) = true; perf_event_exit_cpu_context(cpu); mutex_unlock(&pmus_lock); return 0; Loading Loading @@ -11292,25 +11248,6 @@ static struct notifier_block perf_event_idle_nb = { .notifier_call = event_idle_notif, }; #ifdef CONFIG_HOTPLUG_CPU static int perf_cpu_hp_init(void) { int ret; ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE, "PERF/CORE/CPUHP_AP_PERF_ONLINE", perf_event_start_swevents, perf_event_exit_cpu); if (ret) pr_err("CPU hotplug notifier for perf core could not be registered: %d\n", ret); return ret; } #else static int perf_cpu_hp_init(void) { return 0; } #endif void __init perf_event_init(void) { int ret, cpu; Loading @@ -11337,8 +11274,6 @@ void __init perf_event_init(void) perf_event_init_cpu(smp_processor_id()); idle_notifier_register(&perf_event_idle_nb); register_reboot_notifier(&perf_reboot_notifier); ret = perf_cpu_hp_init(); WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret); ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); Loading