Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5fd2edf authored by Raghavendra Rao Ananta's avatar Raghavendra Rao Ananta
Browse files

perf: Fix event cleanup across CPU hotplugs



Perf hardware events are generally tied to a particular CPU.
During the cleanup process perf_remove_from_context()
tries to execute the PMU related cleanup on the CPU that the
event is tied to. But if the CPU is offline, the cleanup is
not completed successfully at the PMU level, but the event
is freed (at the core level).

This fix is to collect the events that are approaching the release
when it's CPU is down. These zombie events are cleaned up when the
CPU is hotplugged on again.

Change-Id: I9ddf8f32cfcec4e0cb1c0910fb68b6db984d7553
Signed-off-by: default avatarRaghavendra Rao Ananta <rananta@codeaurora.org>
parent d88f28ae
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -779,8 +779,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;

		/* Ignore if we don't have an event. */
		if (!event)
		/* Ignore if we don't have an event or if it's a zombie event */
		if (!event || event->state == PERF_EVENT_STATE_ZOMBIE)
			continue;

		/*
+9 −0
Original line number Diff line number Diff line
@@ -743,6 +743,15 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
			continue;

		event = hw_events->events[idx];
		if (!event)
			continue;

		/*
		 * Check if an attempt was made to free this event during
		 * the CPU went offline.
		 */
		if (event->state == PERF_EVENT_STATE_ZOMBIE)
			continue;

		switch (cmd) {
		case CPU_PM_ENTER:
+3 −1
Original line number Diff line number Diff line
@@ -497,7 +497,8 @@ struct perf_addr_filters_head {
 * enum perf_event_active_state - the states of a event
 */
enum perf_event_active_state {
	PERF_EVENT_STATE_DEAD		= -4,
	PERF_EVENT_STATE_DEAD		= -5,
	PERF_EVENT_STATE_ZOMBIE		= -4,
	PERF_EVENT_STATE_EXIT		= -3,
	PERF_EVENT_STATE_ERROR		= -2,
	PERF_EVENT_STATE_OFF		= -1,
@@ -720,6 +721,7 @@ struct perf_event {

	/* Is this event shared with other events */
	bool					shared;
	struct list_head		zombie_entry;
#endif /* CONFIG_PERF_EVENTS */
};

+57 −0
Original line number Diff line number Diff line
@@ -4262,6 +4262,14 @@ static void put_event(struct perf_event *event)
	_free_event(event);
}

/*
 * Maintain a zombie list to collect all the zombie events
 */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static LIST_HEAD(zombie_list);
static DEFINE_SPINLOCK(zombie_list_lock);
#endif

/*
 * Kill an event dead; while event:refcount will preserve the event
 * object, it will not preserve its functionality. Once the last 'user'
@@ -4272,6 +4280,26 @@ int perf_event_release_kernel(struct perf_event *event)
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *child, *tmp;

	/*
	 * If the cpu associated to this event is offline, set the event as a
	 *  zombie event. The cleanup of the cpu would be done if the CPU is
	 *  back online.
	 */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	if (!cpu_online(event->cpu)) {
		if (event->state == PERF_EVENT_STATE_ZOMBIE)
			return 0;

		event->state = PERF_EVENT_STATE_ZOMBIE;

		spin_lock(&zombie_list_lock);
		list_add_tail(&event->zombie_entry, &zombie_list);
		spin_unlock(&zombie_list_lock);

		return 0;
	}
#endif

	/*
	 * If we got here through err_file: fput(event_file); we will not have
	 * attached to a context yet.
@@ -9388,6 +9416,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
	INIT_LIST_HEAD(&event->rb_entry);
	INIT_LIST_HEAD(&event->active_entry);
	INIT_LIST_HEAD(&event->addr_filters.list);
	INIT_LIST_HEAD(&event->zombie_entry);
	INIT_HLIST_NODE(&event->hlist_entry);


@@ -11043,6 +11072,32 @@ check_hotplug_start_event(struct perf_event *event)
		event->pmu->start(event, 0);
}

static void perf_event_zombie_cleanup(unsigned int cpu)
{
	struct perf_event *event, *tmp;

	spin_lock(&zombie_list_lock);

	list_for_each_entry_safe(event, tmp, &zombie_list, zombie_entry) {
		if (event->cpu != cpu)
			continue;

		list_del(&event->zombie_entry);
		spin_unlock(&zombie_list_lock);

		/*
		 * The detachment of the event with the
		 * PMU expects it to be in an active state
		 */
		event->state = PERF_EVENT_STATE_ACTIVE;
		perf_event_release_kernel(event);

		spin_lock(&zombie_list_lock);
	}

	spin_unlock(&zombie_list_lock);
}

static int perf_event_start_swevents(unsigned int cpu)
{
	struct perf_event_context *ctx;
@@ -11050,6 +11105,8 @@ static int perf_event_start_swevents(unsigned int cpu)
	struct perf_event *event;
	int idx;

	perf_event_zombie_cleanup(cpu);

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;