Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bbd64559 authored by Stephen Boyd's avatar Stephen Boyd Committed by Will Deacon
Browse files

ARM: perf: support percpu irqs for the CPU PMU



Some CPU PMUs are wired up with one PPI for all the CPUs instead
of with a different SPI for each CPU. Add support for these
devices.

Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 6d0abeca
Loading
Loading
Loading
Loading
+11 −3
Original line number Original line Diff line number Diff line
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>


#include <asm/irq_regs.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/pmu.h>
@@ -295,9 +297,15 @@ validate_group(struct perf_event *event)


static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
{
	struct arm_pmu *armpmu = (struct arm_pmu *) dev;
	struct arm_pmu *armpmu;
	struct platform_device *plat_device = armpmu->plat_device;
	struct platform_device *plat_device;
	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
	struct arm_pmu_platdata *plat;

	if (irq_is_percpu(irq))
		dev = *(void **)dev;
	armpmu = dev;
	plat_device = armpmu->plat_device;
	plat = dev_get_platdata(&plat_device->dev);


	if (plat && plat->handle_irq)
	if (plat && plat->handle_irq)
		return plat->handle_irq(irq, dev, armpmu->handle_irq);
		return plat->handle_irq(irq, dev, armpmu->handle_irq);
+69 −28
Original line number Original line Diff line number Diff line
@@ -25,6 +25,8 @@
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>


#include <asm/cputype.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/irq_regs.h>
@@ -33,6 +35,7 @@
/* Set at runtime when we know what CPU type we are. */
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
static struct arm_pmu *cpu_pmu;


static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
@@ -71,6 +74,26 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
	return this_cpu_ptr(&cpu_hw_events);
	return this_cpu_ptr(&cpu_hw_events);
}
}


static void cpu_pmu_enable_percpu_irq(void *data)
{
	struct arm_pmu *cpu_pmu = data;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	int irq = platform_get_irq(pmu_device, 0);

	enable_percpu_irq(irq, IRQ_TYPE_NONE);
	cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
}

static void cpu_pmu_disable_percpu_irq(void *data)
{
	struct arm_pmu *cpu_pmu = data;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	int irq = platform_get_irq(pmu_device, 0);

	cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
	disable_percpu_irq(irq);
}

static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
{
	int i, irq, irqs;
	int i, irq, irqs;
@@ -78,6 +101,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)


	irqs = min(pmu_device->num_resources, num_possible_cpus());
	irqs = min(pmu_device->num_resources, num_possible_cpus());


	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
		free_percpu_irq(irq, &percpu_pmu);
	} else {
		for (i = 0; i < irqs; ++i) {
		for (i = 0; i < irqs; ++i) {
			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
				continue;
				continue;
@@ -86,6 +114,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
				free_irq(irq, cpu_pmu);
				free_irq(irq, cpu_pmu);
		}
		}
	}
	}
}


static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
{
@@ -101,6 +130,16 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
		return -ENODEV;
		return -ENODEV;
	}
	}


	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
		if (err) {
			pr_err("unable to request IRQ%d for ARM PMU counters\n",
				irq);
			return err;
		}
		on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
	} else {
		for (i = 0; i < irqs; ++i) {
		for (i = 0; i < irqs; ++i) {
			err = 0;
			err = 0;
			irq = platform_get_irq(pmu_device, i);
			irq = platform_get_irq(pmu_device, i);
@@ -129,6 +168,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)


			cpumask_set_cpu(i, &cpu_pmu->active_irqs);
			cpumask_set_cpu(i, &cpu_pmu->active_irqs);
		}
		}
	}


	return 0;
	return 0;
}
}
@@ -141,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
		events->events = per_cpu(hw_events, cpu);
		events->events = per_cpu(hw_events, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		raw_spin_lock_init(&events->pmu_lock);
		raw_spin_lock_init(&events->pmu_lock);
		per_cpu(percpu_pmu, cpu) = cpu_pmu;
	}
	}


	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;