Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0017960f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf/core: Collapse common IPI pattern



Various functions implement the same pattern to send IPIs to an
event's CPU. Collapse the easy ones in a common helper function to
reduce duplication.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 42a0789b
Loading
Loading
Loading
Loading
+76 −104
Original line number Diff line number Diff line
@@ -126,6 +126,37 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
	return data.ret;
}

static void event_function_call(struct perf_event *event,
				int (*active)(void *),
				void (*inactive)(void *),
				void *data)
{
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		cpu_function_call(event->cpu, active, data);
		return;
	}

again:
	if (!task_function_call(task, active, data))
		return;

	raw_spin_lock_irq(&ctx->lock);
	if (ctx->is_active) {
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
		raw_spin_unlock_irq(&ctx->lock);
		goto again;
	}
	inactive(data);
	raw_spin_unlock_irq(&ctx->lock);
}

#define EVENT_OWNER_KERNEL ((void *) -1)

static bool is_kernel_event(struct perf_event *event)
@@ -1629,6 +1660,17 @@ struct remove_event {
	bool detach_group;
};

static void ___perf_remove_from_context(void *info)
{
	struct remove_event *re = info;
	struct perf_event *event = re->event;
	struct perf_event_context *ctx = event->ctx;

	if (re->detach_group)
		perf_group_detach(event);
	list_del_event(event, ctx);
}

/*
 * Cross CPU call to remove a performance event
 *
@@ -1656,7 +1698,6 @@ static int __perf_remove_from_context(void *info)
	return 0;
}


/*
 * Remove the event from a task's (or a CPU's) list of events.
 *
@@ -1673,7 +1714,6 @@ static int __perf_remove_from_context(void *info)
static void perf_remove_from_context(struct perf_event *event, bool detach_group)
{
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task = ctx->task;
	struct remove_event re = {
		.event = event,
		.detach_group = detach_group,
@@ -1681,44 +1721,8 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group

	lockdep_assert_held(&ctx->mutex);

	if (!task) {
		/*
		 * Per cpu events are removed via an smp call. The removal can
		 * fail if the CPU is currently offline, but in that case we
		 * already called __perf_remove_from_context from
		 * perf_event_exit_cpu.
		 */
		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
		return;
	}

retry:
	if (!task_function_call(task, __perf_remove_from_context, &re))
		return;

	raw_spin_lock_irq(&ctx->lock);
	/*
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
	 */
	if (ctx->is_active) {
		raw_spin_unlock_irq(&ctx->lock);
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
		goto retry;
	}

	/*
	 * Since the task isn't running, its safe to remove the event, us
	 * holding the ctx->lock ensures the task won't get scheduled in.
	 */
	if (detach_group)
		perf_group_detach(event);
	list_del_event(event, ctx);
	raw_spin_unlock_irq(&ctx->lock);
	event_function_call(event, __perf_remove_from_context,
			    ___perf_remove_from_context, &re);
}

/*
@@ -2067,6 +2071,18 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

static void ___perf_install_in_context(void *info)
{
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;

	/*
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
	 */
	add_event_to_ctx(event, ctx);
}

/*
 * Cross CPU call to install and enable a performance event
 *
@@ -2143,48 +2159,14 @@ perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
			int cpu)
{
	struct task_struct *task = ctx->task;

	lockdep_assert_held(&ctx->mutex);

	event->ctx = ctx;
	if (event->cpu != -1)
		event->cpu = cpu;

	if (!task) {
		/*
		 * Per cpu events are installed via an smp call and
		 * the install is always successful.
		 */
		cpu_function_call(cpu, __perf_install_in_context, event);
		return;
	}

retry:
	if (!task_function_call(task, __perf_install_in_context, event))
		return;

	raw_spin_lock_irq(&ctx->lock);
	/*
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
	 */
	if (ctx->is_active) {
		raw_spin_unlock_irq(&ctx->lock);
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
		goto retry;
	}

	/*
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
	 */
	add_event_to_ctx(event, ctx);
	raw_spin_unlock_irq(&ctx->lock);
	event_function_call(event, __perf_install_in_context,
			    ___perf_install_in_context, event);
}

/*
@@ -4154,6 +4136,22 @@ struct period_event {
	u64 value;
};

static void ___perf_event_period(void *info)
{
	struct period_event *pe = info;
	struct perf_event *event = pe->event;
	u64 value = pe->value;

	if (event->attr.freq) {
		event->attr.sample_freq = value;
	} else {
		event->attr.sample_period = value;
		event->hw.sample_period = value;
	}

	local64_set(&event->hw.period_left, 0);
}

static int __perf_event_period(void *info)
{
	struct period_event *pe = info;
@@ -4190,8 +4188,6 @@ static int __perf_event_period(void *info)
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	struct period_event pe = { .event = event, };
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task;
	u64 value;

	if (!is_sampling_event(event))
@@ -4206,34 +4202,10 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
		return -EINVAL;

	task = ctx->task;
	pe.value = value;

	if (!task) {
		cpu_function_call(event->cpu, __perf_event_period, &pe);
		return 0;
	}

retry:
	if (!task_function_call(task, __perf_event_period, &pe))
		return 0;

	raw_spin_lock_irq(&ctx->lock);
	if (ctx->is_active) {
		raw_spin_unlock_irq(&ctx->lock);
		task = ctx->task;
		goto retry;
	}

	if (event->attr.freq) {
		event->attr.sample_freq = value;
	} else {
		event->attr.sample_period = value;
		event->hw.sample_period = value;
	}

	local64_set(&event->hw.period_left, 0);
	raw_spin_unlock_irq(&ctx->lock);
	event_function_call(event, __perf_event_period,
			    ___perf_event_period, &pe);

	return 0;
}