Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3fe2f12b authored by Neil Leeder's avatar Neil Leeder
Browse files

Perf: stop counters when going into hotplug



Hotplug disables the pmu irq, but if counters are
running in the window before the CPU is hotplugged off
they can overflow and generate an interrupt. Because the
interrupt is disabled, this prevents the cpu from going
down which leads to a spin loop in the cpu_down driver
which consumes 100% cpu.

Events are stopped during hotplug processing. However,
perf is hooked into the timer tick, and restarts enabled
events on every tick, even if they were stopped. Change
the event state to OFF to prevent this.

Processing for stop reads the counters, so a separate call
is no longer needed. Start processing re-enables events so
the from_idle flag is not needed during pmu_enable.

Change-Id: I27ded876315147b50d9f85c09487b107a9216641
Signed-off-by: default avatarNeil Leeder <nleeder@codeaurora.org>
parent 7057c2b4
Loading
Loading
Loading
Loading
+43 −2
Original line number Diff line number Diff line
@@ -211,6 +211,46 @@ static void armpmu_update_counters(void)
	}
}

static void armpmu_hotplug_enable(void *parm_pmu)
{
	struct arm_pmu *armpmu = parm_pmu;
	struct pmu *pmu = &(armpmu->pmu);
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
	int idx;

	for (idx = 0; idx <= armpmu->num_events; ++idx) {
		struct perf_event *event = hw_events->events[idx];
		if (!event)
			continue;

		event->state = event->hotplug_save_state;
		pmu->start(event, 0);
	}
}

static void armpmu_hotplug_disable(void *parm_pmu)
{
	struct arm_pmu *armpmu = parm_pmu;
	struct pmu *pmu = &(armpmu->pmu);
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
	int idx;

	for (idx = 0; idx <= armpmu->num_events; ++idx) {
		struct perf_event *event = hw_events->events[idx];
		if (!event)
			continue;

		event->hotplug_save_state = event->state;
		/*
		 * Prevent timer tick handler perf callback from enabling
		 * this event and potentially generating an interrupt
		 * before the CPU goes down.
		 */
		event->state = PERF_EVENT_STATE_OFF;
		pmu->stop(event, 0);
	}
}

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
@@ -243,7 +283,8 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
				cpu_pmu->save_pm_registers, hcpu, 1);
		if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
			if (cpu_has_active_perf(cpu))
				armpmu_update_counters();
				smp_call_function_single(cpu,
					 armpmu_hotplug_disable, cpu_pmu, 1);
			/* Disarm the PMU IRQ before disappearing. */
			if (cpu_pmu->plat_device) {
				irq = platform_get_irq(cpu_pmu->plat_device, 0);
@@ -266,7 +307,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
				enable_irq_callback(&irq);
			}
			if (cpu_has_active_perf(cpu)) {
				__get_cpu_var(from_idle) = 1;
				armpmu_hotplug_enable(cpu_pmu);
				pmu = &cpu_pmu->pmu;
				pmu->pmu_enable(pmu);
			}
+1 −0
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ static char *descriptions =
	"24 Perf: Add debugfs node to clear PMU\n"
	"25 msm: perf: reset perf cycle counter on krait\n"
	"26 Perf: interrupt disable without bringing cpus up\n"
	"27 Perf: stop counters when going into hotplug\n"
;

static ssize_t desc_read(struct file *fp, char __user *buf,
+1 −0
Original line number Diff line number Diff line
@@ -319,6 +319,7 @@ struct perf_event {
	struct pmu			*pmu;

	enum perf_event_active_state	state;
	enum perf_event_active_state	hotplug_save_state;
	unsigned int			attach_state;
	local64_t			count;
	atomic64_t			child_count;