Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cd2a161 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "perf: Manage CPU hotplug events at core level"

parents f68bf8e8 595428ca
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -779,8 +779,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;

		/* Ignore if we don't have an event or if it's a zombie event */
		if (!event || event->state == PERF_EVENT_STATE_ZOMBIE)
		/* Ignore if we don't have an event */
		if (!event || event->state != PERF_EVENT_STATE_ACTIVE)
			continue;

		/*
+3 −11
Original line number Diff line number Diff line
@@ -752,11 +752,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
		if (!event)
			continue;

		/*
		 * Check if an attempt was made to free this event during
		 * the CPU went offline.
		 */
		if (event->state == PERF_EVENT_STATE_ZOMBIE)
		if (event->state != PERF_EVENT_STATE_ACTIVE)
			continue;

		switch (cmd) {
@@ -882,10 +878,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd    = CPU_PM_EXIT;
	cpu_pm_pmu_common(&data);
	if (data.ret == NOTIFY_DONE)
		return 0;
	if (pmu->reset)
		pmu->reset(pmu);

	if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
		data.armpmu->plat_device) {
@@ -911,8 +905,6 @@ static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd = CPU_PM_ENTER;
	cpu_pm_pmu_common(&data);
	/* Disarm the PMU IRQ before disappearing. */
	if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
		data.armpmu->plat_device) {
+4 −6
Original line number Diff line number Diff line
@@ -497,9 +497,8 @@ struct perf_addr_filters_head {
 * enum perf_event_active_state - the states of a event
 */
enum perf_event_active_state {
	PERF_EVENT_STATE_DORMANT	= -6,
	PERF_EVENT_STATE_DEAD		= -5,
	PERF_EVENT_STATE_ZOMBIE		= -4,
	PERF_EVENT_STATE_DORMANT	= -5,
	PERF_EVENT_STATE_DEAD		= -4,
	PERF_EVENT_STATE_EXIT		= -3,
	PERF_EVENT_STATE_ERROR		= -2,
	PERF_EVENT_STATE_OFF		= -1,
@@ -722,7 +721,6 @@ struct perf_event {

	/* Is this event shared with other events */
	bool					shared;
	struct list_head		zombie_entry;

	/*
	 * Entry into the list that holds the events whose CPUs
@@ -1409,11 +1407,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
#ifdef CONFIG_PERF_EVENTS
int perf_event_init_cpu(unsigned int cpu);
int perf_event_exit_cpu(unsigned int cpu);
int perf_event_start_swevents(unsigned int cpu);
int perf_event_restart_events(unsigned int cpu);
#else
#define perf_event_init_cpu	NULL
#define perf_event_exit_cpu	NULL
#define perf_event_start_swevents NULL
#define perf_event_restart_events NULL
#endif

#endif /* _LINUX_PERF_EVENT_H */
+1 −1
Original line number Diff line number Diff line
@@ -1556,7 +1556,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
	},
	[CPUHP_AP_PERF_ONLINE] = {
		.name			= "perf:online",
		.startup.single		= perf_event_start_swevents,
		.startup.single		= perf_event_restart_events,
		.teardown.single	= perf_event_exit_cpu,
	},
	[CPUHP_AP_WORKQUEUE_ONLINE] = {
+20 −131
Original line number Diff line number Diff line
@@ -2322,8 +2322,12 @@ static DEFINE_SPINLOCK(dormant_event_list_lock);
static void perf_prepare_install_in_context(struct perf_event *event)
{
	spin_lock(&dormant_event_list_lock);
	if (event->state == PERF_EVENT_STATE_DORMANT)
		goto out;

	event->state = PERF_EVENT_STATE_DORMANT;
	list_add_tail(&event->dormant_event_entry, &dormant_event_list);
out:
	spin_unlock(&dormant_event_list_lock);
}
#endif
@@ -2404,13 +2408,6 @@ perf_install_in_context(struct perf_event_context *ctx,
	 */
	smp_store_release(&event->ctx, ctx);

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	if (!cpu_online(cpu)) {
		perf_prepare_install_in_context(event);
		return;
	}
#endif

	if (!task) {
		cpu_function_call(cpu, __perf_install_in_context, event);
		return;
@@ -2497,7 +2494,6 @@ static void perf_deferred_install_in_context(int cpu)
		spin_unlock(&dormant_event_list_lock);

		ctx = event->ctx;
		perf_event__state_init(event);

		mutex_lock(&ctx->mutex);
		perf_install_in_context(ctx, event, cpu);
@@ -4325,14 +4321,6 @@ static void put_event(struct perf_event *event)
	_free_event(event);
}

/*
 * Maintain a zombie list to collect all the zombie events
 */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static LIST_HEAD(zombie_list);
static DEFINE_SPINLOCK(zombie_list_lock);
#endif

/*
 * Kill an event dead; while event:refcount will preserve the event
 * object, it will not preserve its functionality. Once the last 'user'
@@ -4343,31 +4331,13 @@ static int __perf_event_release_kernel(struct perf_event *event)
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *child, *tmp;

	/*
	 * If the cpu associated to this event is offline, set the event as a
	 *  zombie event. The cleanup of the cpu would be done if the CPU is
	 *  back online.
	 */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
	if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu))
		if (event->state == PERF_EVENT_STATE_ZOMBIE)
			return 0;

	if (!cpu_online(event->cpu) &&
		event->state == PERF_EVENT_STATE_ACTIVE) {
		event->state = PERF_EVENT_STATE_ZOMBIE;

		spin_lock(&zombie_list_lock);
		list_add_tail(&event->zombie_entry, &zombie_list);
		spin_unlock(&zombie_list_lock);

		return 0;
	}

	if (event->cpu != -1) {
		spin_lock(&dormant_event_list_lock);
		if (event->state == PERF_EVENT_STATE_DORMANT)
			list_del(&event->dormant_event_entry);
		spin_unlock(&dormant_event_list_lock);
	}
#endif

	/*
@@ -9538,7 +9508,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
	INIT_LIST_HEAD(&event->rb_entry);
	INIT_LIST_HEAD(&event->active_entry);
	INIT_LIST_HEAD(&event->addr_filters.list);
	INIT_LIST_HEAD(&event->zombie_entry);
	INIT_HLIST_NODE(&event->hlist_entry);


@@ -11191,112 +11160,27 @@ int perf_event_init_cpu(unsigned int cpu)
}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void
check_hotplug_start_event(struct perf_event *event)
int perf_event_restart_events(unsigned int cpu)
{
	if (event->pmu->events_across_hotplug &&
	    event->attr.type == PERF_TYPE_SOFTWARE &&
	    event->pmu->start)
		event->pmu->start(event, 0);
}

static void perf_event_zombie_cleanup(unsigned int cpu)
{
	struct perf_event *event, *tmp;

	spin_lock(&zombie_list_lock);

	list_for_each_entry_safe(event, tmp, &zombie_list, zombie_entry) {
		if (event->cpu != cpu)
			continue;

		list_del(&event->zombie_entry);
		spin_unlock(&zombie_list_lock);

		/*
		 * The detachment of the event with the
		 * PMU expects it to be in an active state
		 */
		event->state = PERF_EVENT_STATE_ACTIVE;
		__perf_event_release_kernel(event);

		spin_lock(&zombie_list_lock);
	}

	spin_unlock(&zombie_list_lock);
}

int perf_event_start_swevents(unsigned int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	struct perf_event *event;
	int idx;

	mutex_lock(&pmus_lock);
	perf_event_zombie_cleanup(cpu);
	perf_deferred_install_in_context(cpu);

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
		mutex_lock(&ctx->mutex);
		raw_spin_lock(&ctx->lock);
		list_for_each_entry(event, &ctx->event_list, event_entry)
			check_hotplug_start_event(event);
		raw_spin_unlock(&ctx->lock);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
	per_cpu(is_hotplugging, cpu) = false;
	perf_deferred_install_in_context(cpu);
	mutex_unlock(&pmus_lock);

	return 0;
}

/*
 * If keeping events across hotplugging is supported, do not
 * remove the event list so event lives beyond CPU hotplug.
 * The context is exited via an fd close path when userspace
 * is done and the target CPU is online. If software clock
 * event is active, then stop hrtimer associated with it.
 * Start the timer when the CPU comes back online.
 */
static void
check_hotplug_remove_from_context(struct perf_event *event,
			   struct perf_cpu_context *cpuctx,
			   struct perf_event_context *ctx)
{
	if (event->pmu->events_across_hotplug &&
	    event->attr.type == PERF_TYPE_SOFTWARE &&
	    event->pmu->stop)
		event->pmu->stop(event, PERF_EF_UPDATE);
	else if (!event->pmu->events_across_hotplug)
		__perf_remove_from_context(event, cpuctx,
			ctx, (void *)DETACH_GROUP);
}

static void __perf_event_exit_context(void *__info)
{
	struct perf_event_context *ctx = __info;
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
	struct perf_event *event;

	raw_spin_lock(&ctx->lock);
	list_for_each_entry(event, &ctx->event_list, event_entry)
		check_hotplug_remove_from_context(event, cpuctx, ctx);
	raw_spin_unlock(&ctx->lock);
}

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_cpu_context *cpuctx;
	struct perf_event_context *ctx;
	unsigned long flags;
	struct perf_event *event, *event_tmp;
	struct pmu *pmu;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	per_cpu(is_hotplugging, cpu) = true;
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
		ctx = &cpuctx->ctx;
@@ -11311,7 +11195,12 @@ static void perf_event_exit_cpu_context(int cpu)
		}

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		list_for_each_entry_safe(event, event_tmp, &ctx->event_list,
								event_entry) {
			perf_remove_from_context(event, DETACH_GROUP);
			if (event->pmu->events_across_hotplug)
				perf_prepare_install_in_context(event);
		}
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
@@ -11324,8 +11213,8 @@ static void perf_event_exit_cpu_context(int cpu) { }

int perf_event_exit_cpu(unsigned int cpu)
{

	mutex_lock(&pmus_lock);
	per_cpu(is_hotplugging, cpu) = true;
	perf_event_exit_cpu_context(cpu);
	mutex_unlock(&pmus_lock);
	return 0;