Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db24d33e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Change and simplify ctx::is_active semantics



Instead of tracking if a context is active or not, track which events
of the context are active. By making it a bitmask of
EVENT_PINNED|EVENT_FLEXIBLE we can simplify some of the scheduling
routines since it can avoid adding events that are already active.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.930282378@chello.nl


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2c29ef0f
Loading
Loading
Loading
Loading
+8 −6
Original line number Diff line number Diff line
@@ -1763,8 +1763,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
			  enum event_type_t event_type)
{
	struct perf_event *event;
	int is_active = ctx->is_active;

	ctx->is_active = 0;
	ctx->is_active &= ~event_type;
	if (likely(!ctx->nr_events))
		return;

@@ -1774,12 +1775,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
		return;

	perf_pmu_disable(ctx->pmu);
	if (event_type & EVENT_PINNED) {
	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
	}

	if (event_type & EVENT_FLEXIBLE) {
	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
	}
@@ -2058,8 +2059,9 @@ ctx_sched_in(struct perf_event_context *ctx,
	     struct task_struct *task)
{
	u64 now;
	int is_active = ctx->is_active;

	ctx->is_active = 1;
	ctx->is_active |= event_type;
	if (likely(!ctx->nr_events))
		return;

@@ -2070,11 +2072,11 @@ ctx_sched_in(struct perf_event_context *ctx,
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	if (event_type & EVENT_PINNED)
	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
		ctx_pinned_sched_in(ctx, cpuctx);

	/* Then walk through the lower prio flexible groups */
	if (event_type & EVENT_FLEXIBLE)
	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
		ctx_flexible_sched_in(ctx, cpuctx);
}