Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2cdbfd65 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman
Browse files

perf/core: Fix endless multiplex timer



commit 90c91dfb86d0ff545bd329d3ddd72c147e2ae198 upstream.

Kan and Andi reported that we fail to kill rotation when the flexible
events go empty, but the context does not. XXX moar

Fixes: fd7d55172d1e ("perf/cgroups: Don't rotate events for cgroups unnecessarily")
Reported-by: default avatarAndi Kleen <ak@linux.intel.com>
Reported-by: default avatarKan Liang <kan.liang@linux.intel.com>
Tested-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Wen Yang <wenyang@linux.alibaba.com>
Link: https://lkml.kernel.org/r/20200305123851.GX2596@hirez.programming.kicks-ass.net


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9a2dc0e6
Loading
Loading
Loading
Loading
+14 −6
Original line number Diff line number Diff line
@@ -2086,6 +2086,7 @@ __perf_remove_from_context(struct perf_event *event,

	if (!ctx->nr_events && ctx->is_active) {
		ctx->is_active = 0;
		ctx->rotate_necessary = 0;
		if (ctx->task) {
			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
			cpuctx->task_ctx = NULL;
@@ -2952,12 +2953,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
	if (!ctx->nr_active || !(is_active & EVENT_ALL))
		return;

	/*
	 * If we had been multiplexing, no rotations are necessary, now no events
	 * are active.
	 */
	ctx->rotate_necessary = 0;

	perf_pmu_disable(ctx->pmu);
	if (is_active & EVENT_PINNED) {
		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
@@ -2967,6 +2962,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
	if (is_active & EVENT_FLEXIBLE) {
		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
			group_sched_out(event, cpuctx, ctx);

		/*
		 * Since we cleared EVENT_FLEXIBLE, also clear
		 * rotate_necessary, is will be reset by
		 * ctx_flexible_sched_in() when needed.
		 */
		ctx->rotate_necessary = 0;
	}
	perf_pmu_enable(ctx->pmu);
}
@@ -3705,6 +3707,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
				      typeof(*event), group_node);
	}

	/*
	 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
	 * finds there are unschedulable events, it will set it again.
	 */
	ctx->rotate_necessary = 0;

	return event;
}