Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ff90fe97 authored by Neil Leeder's avatar Neil Leeder
Browse files

Perf: arm64: stop counters when going into hotplug



Hotplug disables the pmu irq, but if counters are
running in the window before the CPU is hotplugged off
they can overflow and generate an interrupt. Because the
interrupt is disabled, this prevents the cpu from going
down.

Events are stopped during hotplug processing. However,
perf is hooked into the timer tick, and restarts enabled
events on every tick, even if they were stopped. Change
the event state to OFF to prevent this.

CPUs can still be power-collapsed while being hotplugged
off, but hotplug processing will save and restore the correct
state, so don't process power-collapse save/restore while
hotplug is in process.

Processing for stop reads the counters, so a separate call
is no longer needed. Start processing re-enables events so
the from_idle flag is not needed during pmu_enable.

Change-Id: I6a7f5b04955ebba8c4d76547f24e2be4071d7539
Signed-off-by: default avatarNeil Leeder <nleeder@codeaurora.org>
parent d42a3dae
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ static char *descriptions =
	"11 Perf: arm64: Refine disable/enable in tracecounters\n"
	"12 Perf: arm64: fix disable of pmu irq during hotplug\n"
	"13 Perf: arm64: restore registers after reset\n"
	"14 Perf: arm64: stop counters when going into hotplug\n"
;

static ssize_t desc_read(struct file *fp, char __user *buf,
+51 −2
Original line number Diff line number Diff line
@@ -52,6 +52,7 @@ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_m
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
static DEFINE_PER_CPU(u32, from_idle);
static DEFINE_PER_CPU(u32, armv8_pm_pmuserenr);
static DEFINE_PER_CPU(u32, hotplug_down);

#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
static struct pmu_hw_events *armpmu_get_cpu_events(void);
@@ -1462,6 +1463,48 @@ static void armpmu_update_counters(void *x)
	}
}

static void armpmu_hotplug_enable(void *parm_pmu)
{
	struct arm_pmu *armpmu = parm_pmu;
	struct pmu *pmu = &(armpmu->pmu);
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
	int idx;

	for (idx = 0; idx <= armpmu->num_events; ++idx) {
		struct perf_event *event = hw_events->events[idx];
		if (!event)
			continue;

		event->state = event->hotplug_save_state;
		pmu->start(event, 0);
	}
	per_cpu(hotplug_down, smp_processor_id()) = 0;
}

static void armpmu_hotplug_disable(void *parm_pmu)
{
	struct arm_pmu *armpmu = parm_pmu;
	struct pmu *pmu = &(armpmu->pmu);
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
	int idx;

	for (idx = 0; idx <= armpmu->num_events; ++idx) {
		struct perf_event *event = hw_events->events[idx];
		if (!event)
			continue;

		event->hotplug_save_state = event->state;
		/*
		 * Prevent timer tick handler perf callback from enabling
		 * this event and potentially generating an interrupt
		 * before the CPU goes down.
		 */
		event->state = PERF_EVENT_STATE_OFF;
		pmu->stop(event, 0);
	}
	per_cpu(hotplug_down, smp_processor_id()) = 1;
}

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
@@ -1479,6 +1522,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
	int ret = NOTIFY_DONE;

	if ((masked_action != CPU_DOWN_PREPARE) &&
	    (masked_action != CPU_DOWN_FAILED) &&
	    (masked_action != CPU_STARTING))
		return NOTIFY_DONE;

@@ -1496,7 +1540,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
		if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
			if (cpu_has_active_perf(cpu))
				smp_call_function_single(cpu,
					armpmu_update_counters, NULL, 1);
					armpmu_hotplug_disable, cpu_pmu, 1);
			/* Disarm the PMU IRQ before disappearing. */
			if (msm_pmu_use_irq && cpu_pmu->plat_device) {
				irq = platform_get_irq(cpu_pmu->plat_device, 0);
@@ -1507,6 +1551,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
		break;

	case CPU_STARTING:
	case CPU_DOWN_FAILED:
		/* Reset PMU to clear counters for ftrace buffer */
		if (cpu_pmu->reset)
			cpu_pmu->reset(NULL);
@@ -1519,7 +1564,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
				armpmu_enable_percpu_irq(&irq);
			}
			if (cpu_has_active_perf(cpu)) {
				__get_cpu_var(from_idle) = 1;
				armpmu_hotplug_enable(cpu_pmu);
				pmu = &cpu_pmu->pmu;
				pmu->pmu_enable(pmu);
			}
@@ -1543,6 +1588,10 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
	if (!cpu_pmu)
		return NOTIFY_OK;

	/* If the cpu is going down, don't do anything here */
	if (per_cpu(hotplug_down, cpu))
		return NOTIFY_OK;

	switch (cmd) {
	case CPU_PM_ENTER:
		if (cpu_pmu->save_pm_registers)