Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71e2d282 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)



Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result
in a double disable, cure this by using x86_pmu_{start,stop} for
throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c08053e6
Loading
Loading
Loading
Loading
+6 −14
Original line number Diff line number Diff line
@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)

static void x86_pmu_unthrottle(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;

	if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
				cpuc->events[hwc->idx] != event))
		return;

	x86_pmu.enable(event);
	int ret = x86_pmu_start(event);
	WARN_ON_ONCE(ret);
}

void perf_event_print_debug(void)
@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	/*
	 * Must be done before we disable, otherwise the nmi handler
	 * could reenable again:
	 */
	__clear_bit(idx, cpuc->active_mask);
	if (!__test_and_clear_bit(idx, cpuc->active_mask))
		return;

	x86_pmu.disable(event);

	/*
@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
			continue;

		if (perf_event_overflow(event, 1, &data, regs))
			x86_pmu.disable(event);
			x86_pmu_stop(event);
	}

	if (handled)
+1 −1
Original line number Diff line number Diff line
@@ -774,7 +774,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
		data.period = event->hw.last_period;

		if (perf_event_overflow(event, 1, &data, regs))
			intel_pmu_disable_event(event);
			x86_pmu_stop(event);
	}

	intel_pmu_ack_status(ack);