Loading arch/arm/include/asm/pmu.h +2 −0 Original line number Diff line number Diff line Loading @@ -109,6 +109,8 @@ struct arm_pmu { struct pmu_hw_events *(*get_hw_events)(void); int (*test_set_event_constraints)(struct perf_event *event); int (*clear_event_constraints)(struct perf_event *event); void (*save_pm_registers)(void *hcpu); void (*restore_pm_registers)(void *hcpu); }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) Loading arch/arm/kernel/perf_event_cpu.c +20 −0 Original line number Diff line number Diff line Loading @@ -217,6 +217,21 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, { int irq; struct pmu *pmu; int cpu = (int)hcpu; switch ((action & ~CPU_TASKS_FROZEN)) { case CPU_DOWN_PREPARE: if (cpu_pmu && cpu_pmu->save_pm_registers) smp_call_function_single(cpu, cpu_pmu->save_pm_registers, hcpu, 1); break; case CPU_STARTING: if (cpu_pmu && cpu_pmu->restore_pm_registers) smp_call_function_single(cpu, cpu_pmu->restore_pm_registers, hcpu, 1); } if (cpu_has_active_perf((int)hcpu)) { switch ((action & ~CPU_TASKS_FROZEN)) { Loading Loading @@ -277,6 +292,8 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, struct pmu *pmu; switch (cmd) { case CPU_PM_ENTER: if (cpu_pmu && cpu_pmu->save_pm_registers) cpu_pmu->save_pm_registers((void *)smp_processor_id()); if (cpu_has_active_perf((int)v)) { armpmu_update_counters(); pmu = &cpu_pmu->pmu; Loading @@ -286,6 +303,9 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: if (cpu_pmu && cpu_pmu->restore_pm_registers) cpu_pmu->restore_pm_registers( (void *)smp_processor_id()); if (cpu_has_active_perf((int)v) && cpu_pmu->reset) { /* * Flip this bit so armpmu_enable knows it needs Loading arch/arm/kernel/perf_event_msm_krait.c +29 −0 Original line number Diff line number Diff line Loading @@ -564,6 +564,33 @@ static int msm_clear_ev_constraint(struct perf_event *event) return 1; } static DEFINE_PER_CPU(u32, krait_pm_pmactlr); static void krait_save_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; /* Read PMACTLR */ asm volatile("mrc p15, 0, %0, c9, c15, 5" : "=r" (val)); per_cpu(krait_pm_pmactlr, cpu) = val; armv7pmu_save_pm_registers(hcpu); } static void krait_restore_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; val = per_cpu(krait_pm_pmactlr, cpu); if (val != 0) /* Restore PMACTLR */ asm volatile("mcr p15, 0, %0, c9, c15, 5" : : "r" (val)); armv7pmu_restore_pm_registers(hcpu); } /* NRCCG format for perf RAW codes. */ PMU_FORMAT_ATTR(prefix, "config:16-19"); PMU_FORMAT_ATTR(reg, "config:12-15"); Loading Loading @@ -605,6 +632,8 @@ static int armv7_krait_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->reset = krait_pmu_reset; cpu_pmu->test_set_event_constraints = msm_test_set_ev_constraint; cpu_pmu->clear_event_constraints = msm_clear_ev_constraint; cpu_pmu->save_pm_registers = krait_save_pm_registers; cpu_pmu->restore_pm_registers = krait_restore_pm_registers; cpu_pmu->max_period = (1LLU << 32) - 1; cpu_pmu->name = "cpu"; cpu_pmu->map_event = krait_8960_map_event; Loading arch/arm/kernel/perf_event_v7.c +25 −0 Original line number Diff line number Diff line Loading @@ -1216,6 +1216,29 @@ static int armv7_a7_map_event(struct perf_event *event) &armv7_a7_perf_cache_map, 0xFF); } static DEFINE_PER_CPU(u32, armv7_pm_pmuserenr); static void armv7pmu_save_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; /* Read PMUSERENR */ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (val)); per_cpu(armv7_pm_pmuserenr, cpu) = val; } static void armv7pmu_restore_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; val = per_cpu(armv7_pm_pmuserenr, cpu); if (val != 0) /* Restore PMUSERENR */ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (val)); } static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; Loading @@ -1228,6 +1251,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->stop = armv7pmu_stop; cpu_pmu->reset = armv7pmu_reset; cpu_pmu->max_period = (1LLU << 32) - 1; cpu_pmu->save_pm_registers = armv7pmu_save_pm_registers; cpu_pmu->restore_pm_registers = armv7pmu_restore_pm_registers; }; static u32 armv7_read_num_pmnc_events(void) Loading arch/arm/mach-msm/perf_debug.c +1 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ static char *descriptions = "11 ARM: dts: msm: add perf-events support for msmsamarium\n" "12 Perf: Make per-process counters cumulative\n" "13 Perf: Fix PID for tracepoints\n" "14 Perf: preserve registers across hotplug\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, Loading Loading
arch/arm/include/asm/pmu.h +2 −0 Original line number Diff line number Diff line Loading @@ -109,6 +109,8 @@ struct arm_pmu { struct pmu_hw_events *(*get_hw_events)(void); int (*test_set_event_constraints)(struct perf_event *event); int (*clear_event_constraints)(struct perf_event *event); void (*save_pm_registers)(void *hcpu); void (*restore_pm_registers)(void *hcpu); }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) Loading
arch/arm/kernel/perf_event_cpu.c +20 −0 Original line number Diff line number Diff line Loading @@ -217,6 +217,21 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, { int irq; struct pmu *pmu; int cpu = (int)hcpu; switch ((action & ~CPU_TASKS_FROZEN)) { case CPU_DOWN_PREPARE: if (cpu_pmu && cpu_pmu->save_pm_registers) smp_call_function_single(cpu, cpu_pmu->save_pm_registers, hcpu, 1); break; case CPU_STARTING: if (cpu_pmu && cpu_pmu->restore_pm_registers) smp_call_function_single(cpu, cpu_pmu->restore_pm_registers, hcpu, 1); } if (cpu_has_active_perf((int)hcpu)) { switch ((action & ~CPU_TASKS_FROZEN)) { Loading Loading @@ -277,6 +292,8 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, struct pmu *pmu; switch (cmd) { case CPU_PM_ENTER: if (cpu_pmu && cpu_pmu->save_pm_registers) cpu_pmu->save_pm_registers((void *)smp_processor_id()); if (cpu_has_active_perf((int)v)) { armpmu_update_counters(); pmu = &cpu_pmu->pmu; Loading @@ -286,6 +303,9 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: if (cpu_pmu && cpu_pmu->restore_pm_registers) cpu_pmu->restore_pm_registers( (void *)smp_processor_id()); if (cpu_has_active_perf((int)v) && cpu_pmu->reset) { /* * Flip this bit so armpmu_enable knows it needs Loading
arch/arm/kernel/perf_event_msm_krait.c +29 −0 Original line number Diff line number Diff line Loading @@ -564,6 +564,33 @@ static int msm_clear_ev_constraint(struct perf_event *event) return 1; } static DEFINE_PER_CPU(u32, krait_pm_pmactlr); static void krait_save_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; /* Read PMACTLR */ asm volatile("mrc p15, 0, %0, c9, c15, 5" : "=r" (val)); per_cpu(krait_pm_pmactlr, cpu) = val; armv7pmu_save_pm_registers(hcpu); } static void krait_restore_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; val = per_cpu(krait_pm_pmactlr, cpu); if (val != 0) /* Restore PMACTLR */ asm volatile("mcr p15, 0, %0, c9, c15, 5" : : "r" (val)); armv7pmu_restore_pm_registers(hcpu); } /* NRCCG format for perf RAW codes. */ PMU_FORMAT_ATTR(prefix, "config:16-19"); PMU_FORMAT_ATTR(reg, "config:12-15"); Loading Loading @@ -605,6 +632,8 @@ static int armv7_krait_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->reset = krait_pmu_reset; cpu_pmu->test_set_event_constraints = msm_test_set_ev_constraint; cpu_pmu->clear_event_constraints = msm_clear_ev_constraint; cpu_pmu->save_pm_registers = krait_save_pm_registers; cpu_pmu->restore_pm_registers = krait_restore_pm_registers; cpu_pmu->max_period = (1LLU << 32) - 1; cpu_pmu->name = "cpu"; cpu_pmu->map_event = krait_8960_map_event; Loading
arch/arm/kernel/perf_event_v7.c +25 −0 Original line number Diff line number Diff line Loading @@ -1216,6 +1216,29 @@ static int armv7_a7_map_event(struct perf_event *event) &armv7_a7_perf_cache_map, 0xFF); } static DEFINE_PER_CPU(u32, armv7_pm_pmuserenr); static void armv7pmu_save_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; /* Read PMUSERENR */ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (val)); per_cpu(armv7_pm_pmuserenr, cpu) = val; } static void armv7pmu_restore_pm_registers(void *hcpu) { u32 val; u32 cpu = (int)hcpu; val = per_cpu(armv7_pm_pmuserenr, cpu); if (val != 0) /* Restore PMUSERENR */ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (val)); } static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; Loading @@ -1228,6 +1251,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->stop = armv7pmu_stop; cpu_pmu->reset = armv7pmu_reset; cpu_pmu->max_period = (1LLU << 32) - 1; cpu_pmu->save_pm_registers = armv7pmu_save_pm_registers; cpu_pmu->restore_pm_registers = armv7pmu_restore_pm_registers; }; static u32 armv7_read_num_pmnc_events(void) Loading
arch/arm/mach-msm/perf_debug.c +1 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ static char *descriptions = "11 ARM: dts: msm: add perf-events support for msmsamarium\n" "12 Perf: Make per-process counters cumulative\n" "13 Perf: Fix PID for tracepoints\n" "14 Perf: preserve registers across hotplug\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, Loading