Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b390e21 authored by Will Deacon's avatar Will Deacon
Browse files

ARM: perf: use cpumask_t to record active IRQs



Commit 5dfc54e0 ("ARM: GIC: avoid routing interrupts to offline CPUs")
prevents the GIC from setting the affinity of an IRQ to a CPU with
id >= nr_cpu_ids. This was previously abused by perf on some platforms
where more IRQs were registered than possible CPUs.

This patch fixes the problem by using a cpumask_t to keep track of the
active (requested) interrupts in perf. The same effect could be achieved
by limiting the number of IRQs to the number of CPUs, but using a mask
instead will be useful for adding extended CPU hotplug support in the
future.

Acked-by: default avatarJamie Iles <jamie@jamieiles.com>
Reviewed-by: default avatarJean Pihet <j-pihet@ti.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent b0e89590
Loading
Loading
Loading
Loading
+31 −33
Original line number Diff line number Diff line
@@ -69,6 +69,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);

struct arm_pmu {
	enum arm_perf_pmu_ids id;
	cpumask_t	active_irqs;
	const char	*name;
	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
	void		(*enable)(struct hw_perf_event *evt, int idx);
@@ -388,6 +389,25 @@ static irqreturn_t armpmu_platform_irq(int irq, void *dev)
	return plat->handle_irq(irq, dev, armpmu->handle_irq);
}

static void
armpmu_release_hardware(void)
{
	int i, irq, irqs;

	irqs = min(pmu_device->num_resources, num_possible_cpus());

	for (i = 0; i < irqs; ++i) {
		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
			continue;
		irq = platform_get_irq(pmu_device, i);
		if (irq >= 0)
			free_irq(irq, NULL);
	}

	armpmu->stop();
	release_pmu(ARM_PMU_DEVICE_CPU);
}

static int
armpmu_reserve_hardware(void)
{
@@ -401,20 +421,20 @@ armpmu_reserve_hardware(void)
		return err;
	}

	irqs = pmu_device->num_resources;

	plat = dev_get_platdata(&pmu_device->dev);
	if (plat && plat->handle_irq)
		handle_irq = armpmu_platform_irq;
	else
		handle_irq = armpmu->handle_irq;

	irqs = min(pmu_device->num_resources, num_possible_cpus());
	if (irqs < 1) {
		pr_err("no irqs for PMUs defined\n");
		return -ENODEV;
	}

	for (i = 0; i < irqs; ++i) {
		err = 0;
		irq = platform_get_irq(pmu_device, i);
		if (irq < 0)
			continue;
@@ -422,13 +442,12 @@ armpmu_reserve_hardware(void)
		/*
		 * If we have a single PMU interrupt that we can't shift,
		 * assume that we're running on a uniprocessor machine and
		 * continue.
		 * continue. Otherwise, continue without this interrupt.
		 */
		err = irq_set_affinity(irq, cpumask_of(i));
		if (err && irqs > 1) {
			pr_err("unable to set irq affinity (irq=%d, cpu=%u)\n",
		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
				    irq, i);
			break;
			continue;
		}

		err = request_irq(irq, handle_irq,
@@ -437,35 +456,14 @@ armpmu_reserve_hardware(void)
		if (err) {
			pr_err("unable to request IRQ%d for ARM PMU counters\n",
				irq);
			break;
		}
	}

	if (err) {
		for (i = i - 1; i >= 0; --i) {
			irq = platform_get_irq(pmu_device, i);
			if (irq >= 0)
				free_irq(irq, NULL);
		}
		release_pmu(ARM_PMU_DEVICE_CPU);
	}

			armpmu_release_hardware();
			return err;
		}

static void
armpmu_release_hardware(void)
{
	int i, irq;

	for (i = pmu_device->num_resources - 1; i >= 0; --i) {
		irq = platform_get_irq(pmu_device, i);
		if (irq >= 0)
			free_irq(irq, NULL);
		cpumask_set_cpu(i, &armpmu->active_irqs);
	}
	armpmu->stop();

	release_pmu(ARM_PMU_DEVICE_CPU);
	return 0;
}

static atomic_t active_events = ATOMIC_INIT(0);