Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7465710 authored by Rohit Vaswani's avatar Rohit Vaswani Committed by Matt Wagantall
Browse files

arm64: perf: Allow other entities to safely program the pmu



The pmu registers are now per cpu and protected using a spinlock.
If other entities need to program the pmu, they need to make sure
that they are not interfering with the perf driver's configuration.
Encapsulate the locking and unlocking functions and export them.

Change-Id: I42197b22f6be7b02b63e2faf76f2872f3c571973
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
parent fdf8e420
Loading
Loading
Loading
Loading
+8 −1
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#define __ASM_PERF_EVENT_H

#include <linux/irqreturn.h>
#include <linux/spinlock_types.h>

#ifdef CONFIG_HW_PERF_EVENTS
struct pt_regs;
@@ -25,13 +26,19 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev);
void arm64_pmu_irq_handled_externally(void);
void arm64_pmu_lock(raw_spinlock_t *lock, unsigned long *flags);
void arm64_pmu_unlock(raw_spinlock_t *lock, unsigned long *flags);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
#else
static inline irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
	return IRQ_HANDLED;
}
void arm64_pmu_irq_handled_externally(void) { }
static inline void arm64_pmu_irq_handled_externally(void) { }
static inline void arm64_pmu_lock(raw_spinlock_t *lock, unsigned long *flags)
{ }
static inline void arm64_pmu_unlock(raw_spinlock_t *lock, unsigned long *flags)
{ }
#endif

#endif
+33 −8
Original line number Diff line number Diff line
@@ -47,6 +47,7 @@ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_m
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);

#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
static struct pmu_hw_events *armpmu_get_cpu_events(void);

/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
@@ -57,6 +58,30 @@ void arm64_pmu_irq_handled_externally(void)
	msm_pmu_use_irq = 0;
}

void arm64_pmu_lock(raw_spinlock_t *lock, unsigned long *flags)
{
	struct pmu_hw_events *events_cpu;

	if (lock) {
		raw_spin_lock_irqsave(lock, *flags);
	} else  {
		events_cpu = armpmu_get_cpu_events();
		raw_spin_lock_irqsave(&events_cpu->pmu_lock, *flags);
	}
}

void arm64_pmu_unlock(raw_spinlock_t *lock, unsigned long *flags)
{
	struct pmu_hw_events *events_cpu;

	if (lock) {
		raw_spin_unlock_irqrestore(lock, *flags);
	} else  {
		events_cpu = armpmu_get_cpu_events();
		raw_spin_unlock_irqrestore(&events_cpu->pmu_lock, *flags);
	}
}

int
armpmu_get_max_events(void)
{
@@ -1034,7 +1059,7 @@ static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
	 * Enable counter and interrupt, and set the counter to count
	 * the event that we're interested in.
	 */
	raw_spin_lock_irqsave(&events->pmu_lock, flags);
	arm64_pmu_lock(&events->pmu_lock, &flags);

	/*
	 * Disable counter
@@ -1056,7 +1081,7 @@ static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
	 */
	armv8pmu_enable_counter(idx);

	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	arm64_pmu_unlock(&events->pmu_lock, &flags);
}

static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1067,7 +1092,7 @@ static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
	/*
	 * Disable counter and interrupt
	 */
	raw_spin_lock_irqsave(&events->pmu_lock, flags);
	arm64_pmu_lock(&events->pmu_lock, &flags);

	/*
	 * Disable counter
@@ -1079,7 +1104,7 @@ static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
	 */
	armv8pmu_disable_intens(idx);

	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	arm64_pmu_unlock(&events->pmu_lock, &flags);
}

irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
@@ -1149,10 +1174,10 @@ static void armv8pmu_start(void)
	unsigned long flags;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();

	raw_spin_lock_irqsave(&events->pmu_lock, flags);
	arm64_pmu_lock(&events->pmu_lock, &flags);
	/* Enable all counters */
	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	arm64_pmu_unlock(&events->pmu_lock, &flags);
}

static void armv8pmu_stop(void)
@@ -1160,10 +1185,10 @@ static void armv8pmu_stop(void)
	unsigned long flags;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();

	raw_spin_lock_irqsave(&events->pmu_lock, flags);
	arm64_pmu_lock(&events->pmu_lock, &flags);
	/* Disable all counters */
	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	arm64_pmu_unlock(&events->pmu_lock, &flags);
}

static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,