Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cde8e884 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Sanitize the RCU logic



Simplify things and simply synchronize against two RCU variants for
PMU unregister -- we don't care about performance, its module unload
if anything.

Reported-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b0b2072d
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -3810,7 +3810,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
	struct pmu *pmu;
	int ctxn;

	rcu_read_lock_sched();
	rcu_read_lock();
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
		perf_event_task_ctx(&cpuctx->ctx, task_event);
@@ -3825,7 +3825,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
		if (ctx)
			perf_event_task_ctx(ctx, task_event);
	}
	rcu_read_unlock_sched();
	rcu_read_unlock();
}

static void perf_event_task(struct task_struct *task,
@@ -3943,7 +3943,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)

	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;

	rcu_read_lock_sched();
	rcu_read_lock();
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
@@ -3956,7 +3956,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
		if (ctx)
			perf_event_comm_ctx(ctx, comm_event);
	}
	rcu_read_unlock_sched();
	rcu_read_unlock();
}

void perf_event_comm(struct task_struct *task)
@@ -4126,7 +4126,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)

	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;

	rcu_read_lock_sched();
	rcu_read_lock();
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
@@ -4142,7 +4142,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
					vma->vm_flags & VM_EXEC);
		}
	}
	rcu_read_unlock_sched();
	rcu_read_unlock();

	kfree(buf);
}
@@ -5218,10 +5218,11 @@ void perf_pmu_unregister(struct pmu *pmu)
	mutex_unlock(&pmus_lock);

	/*
	 * We use the pmu list either under SRCU or preempt_disable,
	 * synchronize_srcu() implies synchronize_sched() so we're good.
	 * We dereference the pmu list under both SRCU and regular RCU, so
	 * synchronize against both of those.
	 */
	synchronize_srcu(&pmus_srcu);
	synchronize_rcu();

	free_percpu(pmu->pmu_disable_count);
	free_pmu_context(pmu->pmu_cpu_context);