Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fdf8e420 authored by Rohit Vaswani's avatar Rohit Vaswani Committed by Matt Wagantall
Browse files

arm64: perf_events: Allow edac to use the pmu interrupt



In the event that edac driver needs to use the percpu
pmu interrupt, the PMU driver should relinquish
control over the interrupt. The edac driver takes the
responsibilty of invoking the pmu interrupt handler
if a non-edac pmu event triggers the pmu interrupt.
This is synchronized using a global variable.

Change-Id: I21274377cb51b890e0bfb2f05d601c49dd0b9f73
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
parent bf1c006e
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -17,11 +17,21 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H

#include <linux/irqreturn.h>

#ifdef CONFIG_HW_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev);
void arm64_pmu_irq_handled_externally(void);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
#else
static inline irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
	return IRQ_HANDLED;
}
void arm64_pmu_irq_handled_externally(void) { }
#endif

#endif
+20 −3
Original line number Diff line number Diff line
@@ -50,6 +50,12 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);

/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
static int msm_pmu_use_irq = 1;

void arm64_pmu_irq_handled_externally(void)
{
	msm_pmu_use_irq = 0;
}

int
armpmu_get_max_events(void)
@@ -386,8 +392,10 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
		return;

	if (irq_is_percpu(irq)) {
		if (msm_pmu_use_irq) {
			on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
			free_percpu_irq(irq, &cpu_hw_events);
		}
	} else {
		for (i = 0; i < irqs; ++i) {
			if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
@@ -430,6 +438,11 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
		return -ENODEV;
	}

	if (!msm_pmu_use_irq) {
		pr_info("EDAC driver requests for the PMU interrupt\n");
		goto out;
	}

	if (irq_is_percpu(irq)) {
		err = request_percpu_irq(irq, armpmu->handle_irq,
				"arm-pmu", &cpu_hw_events);
@@ -474,6 +487,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
		}
	}

out:
	return 0;
}

@@ -1068,7 +1082,7 @@ static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
	u32 pmovsr;
	struct perf_sample_data data;
@@ -1247,6 +1261,9 @@ static u32 __init armv8pmu_read_num_pmnc_events(void)
	/* Read the nb of CNTx counters supported from PMNC */
	nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;

#ifdef CONFIG_EDAC_CORTEX_ARM64
	nb_cnt -= 1;
#endif
	/* Add the CPU cycles counter and return */
	return nb_cnt + 1;
}