Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit afedadf2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: Optimize sched in/out of counters



Avoid a function call for !group counters by directly calling the counter
function.

[ Impact: micro-optimize the code ]

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090520102553.511933670@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b986d7ec
Loading
Loading
Loading
Loading
+19 −6
Original line number Diff line number Diff line
@@ -826,9 +826,13 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,

	perf_disable();
	if (ctx->nr_active) {
		list_for_each_entry(counter, &ctx->counter_list, list_entry)
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
	}
	perf_enable();
 out:
	spin_unlock(&ctx->lock);
@@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}

		/*
		 * If this pinned group hasn't been scheduled,
@@ -932,11 +940,16 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
				can_add_hw = 0;
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
		}
	}
	perf_enable();
 out:
	spin_unlock(&ctx->lock);