Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 991b7c26 authored by Xu Yang's avatar Xu Yang Committed by Greg Kroah-Hartman
Browse files

perf/imx_ddr: don't enable counter0 if none of 4 counters are used



[ Upstream commit f4e2bd91ddf5e8543cbe7ad80b3fba3d2dc63fa3 ]

In current driver, counter0 will be enabled after ddr_perf_pmu_enable()
is called even though none of the 4 counters are used. This will cause
counter0 continue to count until ddr_perf_pmu_disabled() is called. If
pmu is not disabled all the time, the pmu interrupt will be asserted
from time to time due to counter0 will overflow and irq handler will
clear it. It's not an expected behavior. This patch will not enable
counter0 if none of 4 counters are used.

Fixes: 9a66d36c ("drivers/perf: imx_ddr: Add DDR performance counter support to perf")
Signed-off-by: default avatarXu Yang <xu.yang_2@nxp.com>
Reviewed-by: default avatarFrank Li <Frank.Li@nxp.com>
Link: https://lore.kernel.org/r/20230811015438.1999307-2-xu.yang_2@nxp.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 07415be1
Loading
Loading
Loading
Loading
+9 −15
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ struct ddr_pmu {
	const struct fsl_ddr_devtype_data *devtype_data;
	int irq;
	int id;
	int active_counter;
};

static ssize_t ddr_perf_cpumask_show(struct device *dev,
@@ -353,6 +354,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)

	ddr_perf_counter_enable(pmu, event->attr.config, counter, true);

	if (!pmu->active_counter++)
		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
			EVENT_CYCLES_COUNTER, true);

	hwc->state = 0;
}

@@ -407,6 +412,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags)
	ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
	ddr_perf_event_update(event);

	if (!--pmu->active_counter)
		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
			EVENT_CYCLES_COUNTER, false);

	hwc->state |= PERF_HES_STOPPED;
}

@@ -425,25 +434,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)

static void ddr_perf_pmu_enable(struct pmu *pmu)
{
	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);

	/* enable cycle counter if cycle is not active event list */
	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
		ddr_perf_counter_enable(ddr_pmu,
				      EVENT_CYCLES_ID,
				      EVENT_CYCLES_COUNTER,
				      true);
}

static void ddr_perf_pmu_disable(struct pmu *pmu)
{
	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);

	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
		ddr_perf_counter_enable(ddr_pmu,
				      EVENT_CYCLES_ID,
				      EVENT_CYCLES_COUNTER,
				      false);
}

static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,