Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7057c2b4 authored by Neil Leeder's avatar Neil Leeder
Browse files

Perf: interrupt disable without bringing cpus up



Refactor the interrupt disabling so that interrupts
are disabled when a cpu is hotplugged out, even if
there are no perf events on that cpu, but it holds
the PMU irq. This means that when perf ends it does
not have to re-enable an offline cpu just to disable
its irq.

Change-Id: Id0e052e4386173e107abe0eb6e80cb130d92ec24
Signed-off-by: default avatarNeil Leeder <nleeder@codeaurora.org>
parent c1a5a21e
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -25,6 +25,12 @@ enum arm_pmu_type {
	ARM_NUM_PMU_DEVICES,
};

enum arm_pmu_state {
	ARM_PMU_STATE_OFF       = 0,
	ARM_PMU_STATE_GOING_DOWN,
	ARM_PMU_STATE_RUNNING,
};

/*
 * struct arm_pmu_platdata - ARM PMU platform data
 *
@@ -85,6 +91,7 @@ struct arm_pmu {
	cpumask_t	active_irqs;
	char		*name;
	int		num_events;
	int             pmu_state;
	atomic_t	active_events;
	struct mutex	reserve_mutex;
	u64		max_period;
+7 −0
Original line number Diff line number Diff line
@@ -354,8 +354,14 @@ armpmu_generic_free_irq(int irq, void *dev_id)
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
	/*
	 * If a cpu comes online during this function, do not enable its irq.
	 * If a cpu goes offline, it should disable its irq.
	 */
	armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
	armpmu->free_irq(armpmu);
	pm_runtime_put_sync(&armpmu->plat_device->dev);
	armpmu->pmu_state = ARM_PMU_STATE_OFF;
}

static int
@@ -386,6 +392,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
		armpmu_release_hardware(armpmu);
		return err;
	}
	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;

	return 0;
}
+41 −73
Original line number Diff line number Diff line
@@ -223,68 +223,57 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
	int irq;
	struct pmu *pmu;
	int cpu = (int)hcpu;
	unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
	int ret = NOTIFY_DONE;

	switch ((action & ~CPU_TASKS_FROZEN)) {
	case CPU_DOWN_PREPARE:
		if (cpu_pmu && cpu_pmu->save_pm_registers)
			smp_call_function_single(cpu,
						 cpu_pmu->save_pm_registers,
						 hcpu, 1);
		break;
	case CPU_STARTING:
		if (cpu_pmu && cpu_pmu->reset)
			cpu_pmu->reset(cpu_pmu);
		if (cpu_pmu && cpu_pmu->restore_pm_registers)
			smp_call_function_single(cpu,
						 cpu_pmu->restore_pm_registers,
						 hcpu, 1);
	}
	if ((masked_action != CPU_DOWN_PREPARE) &&
	    (masked_action != CPU_STARTING))
		return NOTIFY_DONE;

	if (cpu_has_active_perf((int)hcpu)) {
		switch ((action & ~CPU_TASKS_FROZEN)) {
	if (masked_action == CPU_STARTING)
		ret = NOTIFY_OK;

	if (!cpu_pmu)
		return ret;

	switch (masked_action) {
	case CPU_DOWN_PREPARE:
		if (cpu_pmu->save_pm_registers)
			smp_call_function_single(cpu,
				cpu_pmu->save_pm_registers, hcpu, 1);
		if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
			if (cpu_has_active_perf(cpu))
				armpmu_update_counters();
			/*
			 * If this is on a multicore CPU, we need
			 * to disarm the PMU IRQ before disappearing.
			 */
			if (cpu_pmu &&
				cpu_pmu->plat_device->dev.platform_data) {
			/* Disarm the PMU IRQ before disappearing. */
			if (cpu_pmu->plat_device) {
				irq = platform_get_irq(cpu_pmu->plat_device, 0);
				smp_call_function_single((int)hcpu,
				smp_call_function_single(cpu,
					 disable_irq_callback, &irq, 1);
			}
			return NOTIFY_DONE;
		}
		break;

	case CPU_STARTING:
			/*
			 * If this is on a multicore CPU, we need
			 * to arm the PMU IRQ before appearing.
			 */
			if (cpu_pmu &&
				cpu_pmu->plat_device->dev.platform_data) {
		/* Reset PMU to clear counters for ftrace buffer */
		if (cpu_pmu->reset)
			cpu_pmu->reset(cpu_pmu);
		if (cpu_pmu->restore_pm_registers)
			cpu_pmu->restore_pm_registers(hcpu);
		if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
			/* Arm the PMU IRQ before appearing. */
			if (cpu_pmu->plat_device) {
				irq = platform_get_irq(cpu_pmu->plat_device, 0);
				enable_irq_callback(&irq);
			}

			if (cpu_pmu) {
			if (cpu_has_active_perf(cpu)) {
				__get_cpu_var(from_idle) = 1;
				pmu = &cpu_pmu->pmu;
				pmu->pmu_enable(pmu);
				return NOTIFY_OK;
			}
		default:
			return NOTIFY_DONE;
		}
		break;
	}



	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
		return NOTIFY_DONE;

	return NOTIFY_OK;
	return ret;
}

static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
@@ -309,16 +298,17 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,

	case CPU_PM_ENTER_FAILED:
	case CPU_PM_EXIT:
		if (cpu_has_active_perf((int)v) && cpu_pmu->reset)
			cpu_pmu->reset(NULL);
		if (cpu_pmu && cpu_pmu->restore_pm_registers)
			cpu_pmu->restore_pm_registers(
				(void *)smp_processor_id());
		if (cpu_has_active_perf((int)v) && cpu_pmu->reset) {
		if (cpu_pmu && cpu_has_active_perf((int)v)) {
			/*
			 * Flip this bit so armpmu_enable knows it needs
			 * to re-enable active counters.
			 */
			__get_cpu_var(from_idle) = 1;
			cpu_pmu->reset(NULL);
			pmu = &cpu_pmu->pmu;
			pmu->pmu_enable(pmu);
		}
@@ -435,37 +425,15 @@ static int multicore_request_irq(int irq, irq_handler_t *handle_irq, void *dev_i
	return err;
}

#ifdef CONFIG_SMP
static __ref int armpmu_cpu_up(int cpu)
{
	int ret = 0;

	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
		ret = cpu_up(cpu);
		if (ret)
			pr_err("Failed to bring up CPU: %d, ret: %d\n",
			       cpu, ret);
	}
	return ret;
}
#else
static inline int armpmu_cpu_up(int cpu)
{
	return 0;
}
#endif

static void __ref multicore_free_irq(int irq, void *dev_id)
{
	int cpu;
	struct irq_desc *desc = irq_to_desc(irq);

	if ((irq >= 0) && desc) {
		for_each_cpu(cpu, desc->percpu_enabled) {
			if (!armpmu_cpu_up(cpu))
		for_each_cpu(cpu, desc->percpu_enabled)
			smp_call_function_single(cpu,
						disable_irq_callback, &irq, 1);
		}
		free_percpu_irq(irq, &pmu_irq_cookie);
	}
}
+1 −0
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@ static char *descriptions =
	"23 Perf: Add event type check in hrtimer hotplug fix\n"
	"24 Perf: Add debugfs node to clear PMU\n"
	"25 msm: perf: reset perf cycle counter on krait\n"
	"26 Perf: interrupt disable without bringing cpus up\n"
;

static ssize_t desc_read(struct file *fp, char __user *buf,