Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04dc2dbb authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Remove task_ctx_sched_in()



Make task_ctx_sched_*() imply EVENT_ALL, since anything less will not
actually have scheduled the task in/out at all.

Since there's no site that schedules all of a task in (due to the
interleave with flexible cpuctx) we can remove this function.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.817893268@chello.nl


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent facc4307
Loading
Loading
Loading
Loading
+6 −20
Original line number Original line Diff line number Diff line
@@ -1979,8 +1979,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
		perf_cgroup_sched_out(task);
		perf_cgroup_sched_out(task);
}
}


static void task_ctx_sched_out(struct perf_event_context *ctx,
static void task_ctx_sched_out(struct perf_event_context *ctx)
			       enum event_type_t event_type)
{
{
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);


@@ -1990,7 +1989,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;
		return;


	ctx_sched_out(ctx, cpuctx, event_type);
	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
	cpuctx->task_ctx = NULL;
	cpuctx->task_ctx = NULL;
}
}


@@ -2098,19 +2097,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
	ctx_sched_in(ctx, cpuctx, event_type, task);
	ctx_sched_in(ctx, cpuctx, event_type, task);
}
}


static void task_ctx_sched_in(struct perf_event_context *ctx,
			      enum event_type_t event_type)
{
	struct perf_cpu_context *cpuctx;

	cpuctx = __get_cpu_context(ctx);
	if (cpuctx->task_ctx == ctx)
		return;

	ctx_sched_in(ctx, cpuctx, event_type, NULL);
	cpuctx->task_ctx = ctx;
}

static void perf_event_context_sched_in(struct perf_event_context *ctx,
static void perf_event_context_sched_in(struct perf_event_context *ctx,
					struct task_struct *task)
					struct task_struct *task)
{
{
@@ -2363,7 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)


	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
	if (ctx)
	if (ctx)
		task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);


	rotate_ctx(&cpuctx->ctx);
	rotate_ctx(&cpuctx->ctx);
	if (ctx)
	if (ctx)
@@ -2371,7 +2357,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)


	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
	if (ctx)
	if (ctx)
		task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);


done:
done:
	if (remove)
	if (remove)
@@ -2435,7 +2421,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
	perf_cgroup_sched_out(current);
	perf_cgroup_sched_out(current);


	raw_spin_lock(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	task_ctx_sched_out(ctx, EVENT_ALL);
	task_ctx_sched_out(ctx);


	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		ret = event_enable_on_exec(event, ctx);
		ret = event_enable_on_exec(event, ctx);
@@ -6794,7 +6780,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
	 * incremented the context's refcount before we do put_ctx below.
	 * incremented the context's refcount before we do put_ctx below.
	 */
	 */
	raw_spin_lock(&child_ctx->lock);
	raw_spin_lock(&child_ctx->lock);
	task_ctx_sched_out(child_ctx, EVENT_ALL);
	task_ctx_sched_out(child_ctx);
	child->perf_event_ctxp[ctxn] = NULL;
	child->perf_event_ctxp[ctxn] = NULL;
	/*
	/*
	 * If this context is a clone; unclone it so it can't get
	 * If this context is a clone; unclone it so it can't get