Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a737823d authored by Will Deacon's avatar Will Deacon Committed by Russell King
Browse files

ARM: 6835/1: perf: ensure overflows aren't missed due to IRQ latency



If a counter overflows during a perf stat profiling run it may overtake
the last known value of the counter:

    0        prev     new                0xffffffff
    |----------|-------|----------------------|

In this case, the number of events that have occurred is
(0xffffffff - prev) + new. Unfortunately, the event update code will
not realise an overflow has occurred and will instead report the event
delta as (new - prev) which may be considerably smaller than the real
count.

This patch adds an extra argument to armpmu_event_update which indicates
whether or not an overflow has occurred. If an overflow has occurred
then we use the maximum period of the counter to calculate the elapsed
events.

Acked-by: default avatarJamie Iles <jamie@jamieiles.com>
Reported-by: default avatarAshwin Chaugule <ashwinc@codeaurora.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 574b69cb
Loading
Loading
Loading
Loading
+11 −8
Original line number Diff line number Diff line
@@ -205,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
static u64
armpmu_event_update(struct perf_event *event,
		    struct hw_perf_event *hwc,
		    int idx)
		    int idx, int overflow)
{
	int shift = 64 - 32;
	s64 prev_raw_count, new_raw_count;
	u64 delta;
	u64 delta, prev_raw_count, new_raw_count;

again:
	prev_raw_count = local64_read(&hwc->prev_count);
@@ -219,8 +217,13 @@ armpmu_event_update(struct perf_event *event,
			     new_raw_count) != prev_raw_count)
		goto again;

	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;
	new_raw_count &= armpmu->max_period;
	prev_raw_count &= armpmu->max_period;

	if (overflow)
		delta = armpmu->max_period - prev_raw_count + new_raw_count;
	else
		delta = new_raw_count - prev_raw_count;

	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);
@@ -237,7 +240,7 @@ armpmu_read(struct perf_event *event)
	if (hwc->idx < 0)
		return;

	armpmu_event_update(event, hwc, hwc->idx);
	armpmu_event_update(event, hwc, hwc->idx, 0);
}

static void
@@ -255,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
	if (!(hwc->state & PERF_HES_STOPPED)) {
		armpmu->disable(hwc, hwc->idx);
		barrier(); /* why? */
		armpmu_event_update(event, hwc, hwc->idx);
		armpmu_event_update(event, hwc, hwc->idx, 0);
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
}
+1 −1
Original line number Diff line number Diff line
@@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num,
			continue;

		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx);
		armpmu_event_update(event, hwc, idx, 1);
		data.period = event->hw.last_period;
		if (!armpmu_event_set_period(event, hwc, idx))
			continue;
+1 −1
Original line number Diff line number Diff line
@@ -782,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
			continue;

		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx);
		armpmu_event_update(event, hwc, idx, 1);
		data.period = event->hw.last_period;
		if (!armpmu_event_set_period(event, hwc, idx))
			continue;
+2 −2
Original line number Diff line number Diff line
@@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
			continue;

		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx);
		armpmu_event_update(event, hwc, idx, 1);
		data.period = event->hw.last_period;
		if (!armpmu_event_set_period(event, hwc, idx))
			continue;
@@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
			continue;

		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx);
		armpmu_event_update(event, hwc, idx, 1);
		data.period = event->hw.last_period;
		if (!armpmu_event_set_period(event, hwc, idx))
			continue;