Loading arch/arm64/kernel/perf_debug.c +1 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ static char *descriptions = "11 Perf: arm64: Refine disable/enable in tracecounters\n" "12 Perf: arm64: fix disable of pmu irq during hotplug\n" "13 Perf: arm64: restore registers after reset\n" "14 Perf: arm64: stop counters when going into hotplug\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, Loading arch/arm64/kernel/perf_event.c +51 −2 Original line number Diff line number Diff line Loading @@ -52,6 +52,7 @@ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_m static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); static DEFINE_PER_CPU(u32, from_idle); static DEFINE_PER_CPU(u32, armv8_pm_pmuserenr); static DEFINE_PER_CPU(u32, hotplug_down); #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) static struct pmu_hw_events *armpmu_get_cpu_events(void); Loading Loading @@ -1462,6 +1463,48 @@ static void armpmu_update_counters(void *x) } } static void armpmu_hotplug_enable(void *parm_pmu) { struct arm_pmu *armpmu = parm_pmu; struct pmu *pmu = &(armpmu->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int idx; for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; event->state = event->hotplug_save_state; pmu->start(event, 0); } per_cpu(hotplug_down, smp_processor_id()) = 0; } static void armpmu_hotplug_disable(void *parm_pmu) { struct arm_pmu *armpmu = parm_pmu; struct pmu *pmu = &(armpmu->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int idx; for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; event->hotplug_save_state = event->state; /* * Prevent timer tick handler perf callback from enabling * this event and potentially generating an interrupt * before the CPU goes down. */ event->state = PERF_EVENT_STATE_OFF; pmu->stop(event, 0); } per_cpu(hotplug_down, smp_processor_id()) = 1; } /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are Loading @@ -1479,6 +1522,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, int ret = NOTIFY_DONE; if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_DOWN_FAILED) && (masked_action != CPU_STARTING)) return NOTIFY_DONE; Loading @@ -1496,7 +1540,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) { if (cpu_has_active_perf(cpu)) smp_call_function_single(cpu, armpmu_update_counters, NULL, 1); armpmu_hotplug_disable, cpu_pmu, 1); /* Disarm the PMU IRQ before disappearing. */ if (msm_pmu_use_irq && cpu_pmu->plat_device) { irq = platform_get_irq(cpu_pmu->plat_device, 0); Loading @@ -1507,6 +1551,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, break; case CPU_STARTING: case CPU_DOWN_FAILED: /* Reset PMU to clear counters for ftrace buffer */ if (cpu_pmu->reset) cpu_pmu->reset(NULL); Loading @@ -1519,7 +1564,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, armpmu_enable_percpu_irq(&irq); } if (cpu_has_active_perf(cpu)) { __get_cpu_var(from_idle) = 1; armpmu_hotplug_enable(cpu_pmu); pmu = &cpu_pmu->pmu; pmu->pmu_enable(pmu); } Loading @@ -1543,6 +1588,10 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, if (!cpu_pmu) return NOTIFY_OK; /* If the cpu is going down, don't do anything here */ if (per_cpu(hotplug_down, cpu)) return NOTIFY_OK; switch (cmd) { case CPU_PM_ENTER: if (cpu_pmu->save_pm_registers) Loading Loading
arch/arm64/kernel/perf_debug.c +1 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ static char *descriptions = "11 Perf: arm64: Refine disable/enable in tracecounters\n" "12 Perf: arm64: fix disable of pmu irq during hotplug\n" "13 Perf: arm64: restore registers after reset\n" "14 Perf: arm64: stop counters when going into hotplug\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, Loading
arch/arm64/kernel/perf_event.c +51 −2 Original line number Diff line number Diff line Loading @@ -52,6 +52,7 @@ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_m static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); static DEFINE_PER_CPU(u32, from_idle); static DEFINE_PER_CPU(u32, armv8_pm_pmuserenr); static DEFINE_PER_CPU(u32, hotplug_down); #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) static struct pmu_hw_events *armpmu_get_cpu_events(void); Loading Loading @@ -1462,6 +1463,48 @@ static void armpmu_update_counters(void *x) } } static void armpmu_hotplug_enable(void *parm_pmu) { struct arm_pmu *armpmu = parm_pmu; struct pmu *pmu = &(armpmu->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int idx; for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; event->state = event->hotplug_save_state; pmu->start(event, 0); } per_cpu(hotplug_down, smp_processor_id()) = 0; } static void armpmu_hotplug_disable(void *parm_pmu) { struct arm_pmu *armpmu = parm_pmu; struct pmu *pmu = &(armpmu->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int idx; for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; event->hotplug_save_state = event->state; /* * Prevent timer tick handler perf callback from enabling * this event and potentially generating an interrupt * before the CPU goes down. */ event->state = PERF_EVENT_STATE_OFF; pmu->stop(event, 0); } per_cpu(hotplug_down, smp_processor_id()) = 1; } /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are Loading @@ -1479,6 +1522,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, int ret = NOTIFY_DONE; if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_DOWN_FAILED) && (masked_action != CPU_STARTING)) return NOTIFY_DONE; Loading @@ -1496,7 +1540,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) { if (cpu_has_active_perf(cpu)) smp_call_function_single(cpu, armpmu_update_counters, NULL, 1); armpmu_hotplug_disable, cpu_pmu, 1); /* Disarm the PMU IRQ before disappearing. */ if (msm_pmu_use_irq && cpu_pmu->plat_device) { irq = platform_get_irq(cpu_pmu->plat_device, 0); Loading @@ -1507,6 +1551,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, break; case CPU_STARTING: case CPU_DOWN_FAILED: /* Reset PMU to clear counters for ftrace buffer */ if (cpu_pmu->reset) cpu_pmu->reset(NULL); Loading @@ -1519,7 +1564,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, armpmu_enable_percpu_irq(&irq); } if (cpu_has_active_perf(cpu)) { __get_cpu_var(from_idle) = 1; armpmu_hotplug_enable(cpu_pmu); pmu = &cpu_pmu->pmu; pmu->pmu_enable(pmu); } Loading @@ -1543,6 +1588,10 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, if (!cpu_pmu) return NOTIFY_OK; /* If the cpu is going down, don't do anything here */ if (per_cpu(hotplug_down, cpu)) return NOTIFY_OK; switch (cmd) { case CPU_PM_ENTER: if (cpu_pmu->save_pm_registers) Loading