Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7999c6f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Fix PERF_EVENT_IOC_PERIOD migration race



I ran the perf fuzzer, which triggered some WARN()s which are due to
trying to stop/restart an event on the wrong CPU.

Use the normal IPI pattern to ensure we run the code on the correct CPU.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bad7192b ("perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period")
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ee9397a6
Loading
Loading
Loading
Loading
+55 −20
Original line number Diff line number Diff line
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
		perf_event_for_each_child(sibling, func);
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	struct perf_event_context *ctx = event->ctx;
	int ret = 0, active;
struct period_event {
	struct perf_event *event;
	u64 value;
};

	if (!is_sampling_event(event))
		return -EINVAL;

	if (copy_from_user(&value, arg, sizeof(value)))
		return -EFAULT;

	if (!value)
		return -EINVAL;
static int __perf_event_period(void *info)
{
	struct period_event *pe = info;
	struct perf_event *event = pe->event;
	struct perf_event_context *ctx = event->ctx;
	u64 value = pe->value;
	bool active;

	raw_spin_lock_irq(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
			ret = -EINVAL;
			goto unlock;
		}

		event->attr.sample_freq = value;
	} else {
		event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
		event->pmu->start(event, PERF_EF_RELOAD);
		perf_pmu_enable(ctx->pmu);
	}
	raw_spin_unlock(&ctx->lock);

unlock:
	return 0;
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	struct period_event pe = { .event = event, };
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task;
	u64 value;

	if (!is_sampling_event(event))
		return -EINVAL;

	if (copy_from_user(&value, arg, sizeof(value)))
		return -EFAULT;

	if (!value)
		return -EINVAL;

	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
		return -EINVAL;

	task = ctx->task;
	pe.value = value;

	if (!task) {
		cpu_function_call(event->cpu, __perf_event_period, &pe);
		return 0;
	}

retry:
	if (!task_function_call(task, __perf_event_period, &pe))
		return 0;

	raw_spin_lock_irq(&ctx->lock);
	if (ctx->is_active) {
		raw_spin_unlock_irq(&ctx->lock);
		task = ctx->task;
		goto retry;
	}

	return ret;
	__perf_event_period(&pe);
	raw_spin_unlock_irq(&ctx->lock);

	return 0;
}

static const struct file_operations perf_fops;