Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38cd0d65 authored by Raghavendra Rao Ananta's avatar Raghavendra Rao Ananta Committed by Gerrit - the friendly Code Review server
Browse files

perf: Add support for creating events for offline CPU



Perf framework APIs for registering and unregistering events
does not work when the CPU is offline. Registering of an
event on a CPU that is offline could start counting when
the CPU comes up.

So introduced new state such that, events can created on an
offline cpu, but is not usable yet. As soon as the CPU comes
up back online,the event is completely created and starts counting.

Change-Id: I3e2f54f6d441bdd57942a928f9cd28ffd9b7e2c7
Signed-off-by: default avatarRaghavendra Rao Ananta <rananta@codeaurora.org>
Signed-off-by: default avatarSwetha Chikkaboraiah <schikk@codeaurora.org>
parent 85016969
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -497,6 +497,7 @@ struct perf_addr_filters_head {
 * enum perf_event_active_state - the states of a event
 */
enum perf_event_active_state {
	PERF_EVENT_STATE_DORMANT	= -6,
	PERF_EVENT_STATE_DEAD		= -5,
	PERF_EVENT_STATE_ZOMBIE		= -4,
	PERF_EVENT_STATE_EXIT		= -3,
@@ -722,6 +723,13 @@ struct perf_event {
	/* Is this event shared with other events */
	bool					shared;
	struct list_head		zombie_entry;

	/*
	 * Entry into the list that holds the events whose CPUs
	 * are offline. These events will be installed once the
	 * CPU wakes up and will be removed from the list after that
	 */
	struct list_head		dormant_event_entry;
#endif /* CONFIG_PERF_EVENTS */
};

+68 −1
Original line number Diff line number Diff line
@@ -2315,6 +2315,19 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
	perf_pmu_enable(cpuctx->ctx.pmu);
}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static LIST_HEAD(dormant_event_list);
static DEFINE_SPINLOCK(dormant_event_list_lock);

static void perf_prepare_install_in_context(struct perf_event *event)
{
	spin_lock(&dormant_event_list_lock);
	event->state = PERF_EVENT_STATE_DORMANT;
	list_add_tail(&event->dormant_event_entry, &dormant_event_list);
	spin_unlock(&dormant_event_list_lock);
}
#endif

/*
 * Cross CPU call to install and enable a performance event
 *
@@ -2391,6 +2404,13 @@ perf_install_in_context(struct perf_event_context *ctx,
	 */
	smp_store_release(&event->ctx, ctx);

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	if (!cpu_online(cpu)) {
		perf_prepare_install_in_context(event);
		return;
	}
#endif

	if (!task) {
		cpu_function_call(cpu, __perf_install_in_context, event);
		return;
@@ -2460,6 +2480,35 @@ perf_install_in_context(struct perf_event_context *ctx,
	raw_spin_unlock_irq(&ctx->lock);
}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void perf_deferred_install_in_context(int cpu)
{
	struct perf_event *event, *tmp;
	struct perf_event_context *ctx;

	spin_lock(&dormant_event_list_lock);
	list_for_each_entry_safe(event, tmp, &dormant_event_list,
						dormant_event_entry) {
		if (cpu != event->cpu)
			continue;

		list_del(&event->dormant_event_entry);
		event->state = PERF_EVENT_STATE_INACTIVE;
		spin_unlock(&dormant_event_list_lock);

		ctx = event->ctx;
		perf_event__state_init(event);

		mutex_lock(&ctx->mutex);
		perf_install_in_context(ctx, event, cpu);
		mutex_unlock(&ctx->mutex);

		spin_lock(&dormant_event_list_lock);
	}
	spin_unlock(&dormant_event_list_lock);
}
#endif

/*
 * Put a event into inactive state and update time fields.
 * Enabling the leader of a group effectively enables all
@@ -4300,10 +4349,12 @@ static int __perf_event_release_kernel(struct perf_event *event)
	 *  back online.
	 */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu)) {
	if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu))
		if (event->state == PERF_EVENT_STATE_ZOMBIE)
			return 0;

	if (!cpu_online(event->cpu) &&
		event->state == PERF_EVENT_STATE_ACTIVE) {
		event->state = PERF_EVENT_STATE_ZOMBIE;

		spin_lock(&zombie_list_lock);
@@ -4312,6 +4363,11 @@ static int __perf_event_release_kernel(struct perf_event *event)

		return 0;
	}

	spin_lock(&dormant_event_list_lock);
	if (event->state == PERF_EVENT_STATE_DORMANT)
		list_del(&event->dormant_event_entry);
	spin_unlock(&dormant_event_list_lock);
#endif

	/*
@@ -4632,6 +4688,15 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
	struct perf_event_context *ctx;
	int ret;

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	spin_lock(&dormant_event_list_lock);
	if (event->state == PERF_EVENT_STATE_DORMANT) {
		spin_unlock(&dormant_event_list_lock);
		return 0;
	}
	spin_unlock(&dormant_event_list_lock);
#endif

	ctx = perf_event_ctx_lock(event);
	ret = __perf_read(event, buf, count);
	perf_event_ctx_unlock(event, ctx);
@@ -9466,6 +9531,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
	mutex_init(&event->child_mutex);
	INIT_LIST_HEAD(&event->child_list);

	INIT_LIST_HEAD(&event->dormant_event_entry);
	INIT_LIST_HEAD(&event->group_entry);
	INIT_LIST_HEAD(&event->event_entry);
	INIT_LIST_HEAD(&event->sibling_list);
@@ -11169,6 +11235,7 @@ static int perf_event_start_swevents(unsigned int cpu)

	mutex_lock(&pmus_lock);
	perf_event_zombie_cleanup(cpu);
	perf_deferred_install_in_context(cpu);

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {