Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9cc96b0a authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents d64fe8e6 12ca6ad2
Loading
Loading
Loading
Loading
+6 −29
Original line number Diff line number Diff line
@@ -3136,15 +3136,16 @@ static int event_enable_on_exec(struct perf_event *event,
 * Enable all of a task's events that have been marked enable-on-exec.
 * This expects task == current.
 */
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
static void perf_event_enable_on_exec(int ctxn)
{
	struct perf_event_context *clone_ctx = NULL;
	struct perf_event_context *ctx, *clone_ctx = NULL;
	struct perf_event *event;
	unsigned long flags;
	int enabled = 0;
	int ret;

	local_irq_save(flags);
	ctx = current->perf_event_ctxp[ctxn];
	if (!ctx || !ctx->nr_events)
		goto out;

@@ -3187,17 +3188,11 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)

void perf_event_exec(void)
{
	struct perf_event_context *ctx;
	int ctxn;

	rcu_read_lock();
	for_each_task_context_nr(ctxn) {
		ctx = current->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;

		perf_event_enable_on_exec(ctx);
	}
	for_each_task_context_nr(ctxn)
		perf_event_enable_on_exec(ctxn);
	rcu_read_unlock();
}

@@ -6465,9 +6460,6 @@ struct swevent_htable {

	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];

	/* Keeps track of cpu being initialized/exited */
	bool				online;
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -6725,14 +6717,8 @@ static int perf_swevent_add(struct perf_event *event, int flags)
	hwc->state = !(flags & PERF_EF_START);

	head = find_swevent_head(swhash, event);
	if (!head) {
		/*
		 * We can race with cpu hotplug code. Do not
		 * WARN if the cpu just got unplugged.
		 */
		WARN_ON_ONCE(swhash->online);
	if (WARN_ON_ONCE(!head))
		return -EINVAL;
	}

	hlist_add_head_rcu(&event->hlist_entry, head);
	perf_event_update_userpage(event);
@@ -6800,7 +6786,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
	int err = 0;

	mutex_lock(&swhash->hlist_mutex);

	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
		struct swevent_hlist *hlist;

@@ -9263,7 +9248,6 @@ static void perf_event_init_cpu(int cpu)
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);

	mutex_lock(&swhash->hlist_mutex);
	swhash->online = true;
	if (swhash->hlist_refcount > 0) {
		struct swevent_hlist *hlist;

@@ -9305,14 +9289,7 @@ static void perf_event_exit_cpu_context(int cpu)

static void perf_event_exit_cpu(int cpu)
{
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);

	perf_event_exit_cpu_context(cpu);

	mutex_lock(&swhash->hlist_mutex);
	swhash->online = false;
	swevent_hlist_release(swhash);
	mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }