Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce39e1ca authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes I3c5f1f53,Ib554c9fe into msm-next

* changes:
  perf: add hotplug support
  perf: Add support for exclude_idle attribute
parents 6df67bb2 a7db37ad
Loading
Loading
Loading
Loading
+128 −6
Original line number Diff line number Diff line
@@ -29,6 +29,8 @@
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>

static DEFINE_PER_CPU(bool, is_hotplugging);

/*
 * ARMv8 PMUv3 Performance Events handling code.
 * Common event types (some are defined in asm/perf_event.h).
@@ -794,9 +796,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
{
	unsigned long config_base = 0;

	if (attr->exclude_idle)
		return -EPERM;

	/*
	 * If we're running in hyp mode, then we *are* the hypervisor.
	 * Therefore we ignore exclude_hv in this configuration, since
@@ -936,6 +935,55 @@ static void __armv8pmu_probe_pmu(void *info)
			     ARRAY_SIZE(pmceid));
}

static void armv8pmu_idle_update(struct arm_pmu *cpu_pmu)
{
	struct pmu_hw_events *hw_events;
	struct perf_event *event;
	int idx;

	if (!cpu_pmu)
		return;

	if (__this_cpu_read(is_hotplugging))
		return;

	hw_events = this_cpu_ptr(cpu_pmu->hw_events);

	if (!hw_events)
		return;

	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {

		if (!test_bit(idx, hw_events->used_mask))
			continue;

		event = hw_events->events[idx];

		if (!event || !event->attr.exclude_idle ||
				event->state != PERF_EVENT_STATE_ACTIVE)
			continue;

		cpu_pmu->pmu.read(event);
	}
}

struct arm_pmu_and_idle_nb {
	struct arm_pmu *cpu_pmu;
	struct notifier_block perf_cpu_idle_nb;
};

static int perf_cpu_idle_notifier(struct notifier_block *nb,
				unsigned long action, void *data)
{
	struct arm_pmu_and_idle_nb *pmu_nb = container_of(nb,
				struct arm_pmu_and_idle_nb, perf_cpu_idle_nb);

	if (action == IDLE_START)
		armv8pmu_idle_update(pmu_nb->cpu_pmu);

	return NOTIFY_OK;
}

static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
{
	struct armv8pmu_probe_info probe = {
@@ -943,14 +991,32 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
		.present = false,
	};
	int ret;
	struct arm_pmu_and_idle_nb *pmu_idle_nb;

	pmu_idle_nb = devm_kzalloc(&cpu_pmu->plat_device->dev,
					sizeof(*pmu_idle_nb), GFP_KERNEL);
	if (!pmu_idle_nb)
		return -ENOMEM;

	pmu_idle_nb->cpu_pmu = cpu_pmu;
	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;

	ret = smp_call_function_any(&cpu_pmu->supported_cpus,
				    __armv8pmu_probe_pmu,
				    &probe, 1);
	if (ret)
		return ret;
		goto probe_fail;

	return probe.present ? 0 : -ENODEV;
	if (!probe.present) {
		ret = -ENODEV;
		goto probe_fail;
	}

	return 0;

probe_fail:
	idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
	return ret;
}

static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1114,9 +1180,65 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
	{},
};

#ifdef CONFIG_HOTPLUG_CPU
static int perf_event_hotplug_coming_up(unsigned int cpu)
{
	per_cpu(is_hotplugging, cpu) = false;
	return 0;
}

static int perf_event_hotplug_going_down(unsigned int cpu)
{
	per_cpu(is_hotplugging, cpu) = true;
	return 0;
}

static int perf_event_cpu_hp_init(void)
{
	int ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
				"PERF_EVENT/CPUHP_AP_PERF_ONLINE",
				perf_event_hotplug_coming_up,
				perf_event_hotplug_going_down);
	if (ret)
		pr_err("CPU hotplug notifier for perf_event.c could not be registered: %d\n",
		       ret);

	return ret;
}
#else
static int perf_event_cpu_hp_init(void) { return 0; }
#endif

/*
 * Non DT systems have their micro/arch events probed at run-time.
 * A fairly complete list of generic events are provided and ones that
 * aren't supported by the current PMU are disabled.
 */
static const struct pmu_probe_info armv8_pmu_probe_table[] = {
	PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */
	{ /* sentinel value */ }
};

static int armv8_pmu_device_probe(struct platform_device *pdev)
{
	return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
	int ret, cpu;

	for_each_possible_cpu(cpu)
		per_cpu(is_hotplugging, cpu) = false;

	ret = perf_event_cpu_hp_init();
	if (ret)
		return ret;

	/* set to true so armv8pmu_idle_update doesn't try to load
	 * hw_events before arm_pmu_device_probe has initialized it.
	 */
	ret = arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
		(acpi_disabled ?  NULL : armv8_pmu_probe_table));

	return ret;
}

static struct platform_driver armv8_pmu_driver = {
+134 −74
Original line number Diff line number Diff line
@@ -26,6 +26,9 @@

#include <asm/irq_regs.h>

#define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING
#define USE_CPUHP_STR "AP_PERF_ARM_STARTING"

static int
armpmu_map_cache_event(const unsigned (*cache_map)
				      [PERF_COUNT_HW_CACHE_MAX]
@@ -539,13 +542,18 @@ void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
	if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
		return;

	armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;

	if (irq_is_percpu(irq)) {
		free_percpu_irq(irq, &hw_events->percpu_pmu);
		cpumask_clear(&armpmu->active_irqs);
		armpmu->percpu_irq = -1;
		armpmu->pmu_state = ARM_PMU_STATE_OFF;
		return;
	}

	free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
	armpmu->pmu_state = ARM_PMU_STATE_OFF;
}

void armpmu_free_irqs(struct arm_pmu *armpmu)
@@ -568,6 +576,7 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
	if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
		err = request_percpu_irq(irq, handler, "arm-pmu",
					 &hw_events->percpu_pmu);
		armpmu->percpu_irq = irq;
	} else if (irq_is_percpu(irq)) {
		int other_cpu = cpumask_first(&armpmu->active_irqs);
		int other_irq = per_cpu(hw_events->irq, other_cpu);
@@ -604,6 +613,8 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
	if (err)
		goto err_out;

	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;

	cpumask_set_cpu(cpu, &armpmu->active_irqs);
	return 0;

@@ -625,53 +636,12 @@ int armpmu_request_irqs(struct arm_pmu *armpmu)
	return err;
}

static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
	return per_cpu(hw_events->irq, cpu);
}

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
	int irq;

	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;
	if (pmu->reset)
		pmu->reset(pmu);

	irq = armpmu_get_cpu_irq(pmu, cpu);
	if (irq) {
		if (irq_is_percpu(irq)) {
			enable_percpu_irq(irq, IRQ_TYPE_NONE);
			return 0;
		}
	}

	return 0;
}

static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
	int irq;

	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	irq = armpmu_get_cpu_irq(pmu, cpu);
	if (irq && irq_is_percpu(irq))
		disable_percpu_irq(irq);

	return 0;
}
struct cpu_pm_pmu_args {
	struct arm_pmu	*armpmu;
	unsigned long	cmd;
	int		cpu;
	int		ret;
};

#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -719,15 +689,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
	}
}

static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
			     void *v)
static void cpu_pm_pmu_common(void *info)
{
	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
	struct cpu_pm_pmu_args *data	= info;
	struct arm_pmu *armpmu		= data->armpmu;
	unsigned long cmd		= data->cmd;
	int cpu				= data->cpu;
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);

	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return NOTIFY_DONE;
	if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
		data->ret = NOTIFY_DONE;
		return;
	}

	/*
	 * Always reset the PMU registers on power-up even if
@@ -736,8 +710,12 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
	if (cmd == CPU_PM_EXIT && armpmu->reset)
		armpmu->reset(armpmu);

	if (!enabled)
		return NOTIFY_OK;
	if (!enabled) {
		data->ret = NOTIFY_OK;
		return;
	}

	data->ret = NOTIFY_OK;

	switch (cmd) {
	case CPU_PM_ENTER:
@@ -745,15 +723,29 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
		cpu_pm_pmu_setup(armpmu, cmd);
		break;
	case CPU_PM_EXIT:
		cpu_pm_pmu_setup(armpmu, cmd);
	case CPU_PM_ENTER_FAILED:
		cpu_pm_pmu_setup(armpmu, cmd);
		armpmu->start(armpmu);
		break;
	default:
		return NOTIFY_DONE;
		data->ret = NOTIFY_DONE;
		break;
	}

	return NOTIFY_OK;
	return;
}

static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
			     void *v)
{
	struct cpu_pm_pmu_args data = {
		.armpmu	= container_of(b, struct arm_pmu, cpu_pm_nb),
		.cmd	= cmd,
		.cpu	= smp_processor_id(),
	};

	cpu_pm_pmu_common(&data);
	return data.ret;
}

static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -766,11 +758,75 @@ static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}

#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
static void cpu_pm_pmu_common(void *info) { }
#endif

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);

	struct cpu_pm_pmu_args data = {
		.armpmu	= pmu,
		.cpu	= (int)cpu,
	};

	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd    = CPU_PM_EXIT;
	cpu_pm_pmu_common(&data);
	if (data.ret == NOTIFY_DONE)
		return 0;

	if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
		data.armpmu->plat_device) {
		int irq = data.armpmu->percpu_irq;

		if (irq > 0 && irq_is_percpu(irq))
			enable_percpu_irq(irq, IRQ_TYPE_NONE);

	}

	return 0;
}

static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);

	struct cpu_pm_pmu_args data = {
		.armpmu	= pmu,
		.cpu	= (int)cpu,
	};

	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd = CPU_PM_ENTER;
	cpu_pm_pmu_common(&data);
	/* Disarm the PMU IRQ before disappearing. */
	if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
		data.armpmu->plat_device) {
		int irq = data.armpmu->percpu_irq;

		if (irq > 0 && irq_is_percpu(irq))
			disable_percpu_irq(irq);

	}

	return 0;
}

static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int err;
@@ -782,12 +838,12 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)

	err = cpu_pm_pmu_register(cpu_pmu);
	if (err)
		goto out_unregister;
		goto out_unreg_perf_starting;

	return 0;

out_unregister:
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
out_unreg_perf_starting:
	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
					    &cpu_pmu->node);
out:
	return err;
@@ -796,7 +852,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
	cpu_pm_pmu_unregister(cpu_pmu);
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
					    &cpu_pmu->node);
}

@@ -836,6 +892,7 @@ struct arm_pmu *armpmu_alloc(void)
		 * validation).
		 */
		.capabilities		= PERF_PMU_CAP_HETEROGENEOUS_CPUS,
		.events_across_hotplug	= 1,
	};

	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
@@ -878,6 +935,9 @@ int armpmu_register(struct arm_pmu *pmu)
	if (!__oprofile_cpu_pmu)
		__oprofile_cpu_pmu = pmu;

	pmu->pmu_state  = ARM_PMU_STATE_OFF;
	pmu->percpu_irq = -1;

	pr_info("enabled with %s PMU driver, %d counters available\n",
		pmu->name, pmu->num_events);

@@ -895,9 +955,9 @@ static int arm_pmu_hp_init(void)
	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
				      "perf/arm/pmu:starting",
				      arm_perf_starting_cpu,
				      arm_perf_teardown_cpu);
				      arm_perf_stopping_cpu);
	if (ret)
		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
		pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n",
		       ret);
	return ret;
}
+8 −0
Original line number Diff line number Diff line
@@ -90,6 +90,12 @@ enum armpmu_attr_groups {
	ARMPMU_NR_ATTR_GROUPS
};

enum armpmu_pmu_states {
	ARM_PMU_STATE_OFF,
	ARM_PMU_STATE_RUNNING,
	ARM_PMU_STATE_GOING_DOWN,
};

struct arm_pmu {
	struct pmu	pmu;
	cpumask_t	active_irqs;
@@ -111,6 +117,8 @@ struct arm_pmu {
	void		(*reset)(void *);
	int		(*map_event)(struct perf_event *event);
	int		num_events;
	int		pmu_state;
	int		percpu_irq;
	u64		max_period;
	bool		secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+2 −0
Original line number Diff line number Diff line
@@ -266,6 +266,8 @@ struct pmu {
	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
	int				task_ctx_nr;
	int				hrtimer_interval_ms;
	u32				events_across_hotplug:1,
					reserved:31;

	/* number of address filters this PMU can do */
	unsigned int			nr_addr_filters;
+128 −8
Original line number Diff line number Diff line
@@ -378,6 +378,8 @@ static atomic_t perf_sched_count;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
static DEFINE_PER_CPU(bool, is_idle);
static DEFINE_PER_CPU(bool, is_hotplugging);

static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -3631,6 +3633,9 @@ static void __perf_event_read(void *info)
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
	struct pmu *pmu = event->pmu;

	if (__this_cpu_read(is_hotplugging))
		return;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu.  If not it has been
@@ -3743,22 +3748,32 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
static int perf_event_read(struct perf_event *event, bool group)
{
	int event_cpu, ret = 0;
	bool active_event_skip_read = false;

	/*
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
	 */
	event_cpu = READ_ONCE(event->oncpu);

	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		if ((unsigned int)event_cpu >= nr_cpu_ids)
			return 0;
		if (cpu_isolated(event_cpu) ||
			(event->attr.exclude_idle &&
				per_cpu(is_idle, event_cpu)) ||
				per_cpu(is_hotplugging, event_cpu))
			active_event_skip_read = true;
	}

	if (event->state == PERF_EVENT_STATE_ACTIVE &&
		!active_event_skip_read) {
		struct perf_read_data data = {
			.event = event,
			.group = group,
			.ret = 0,
		};

		event_cpu = READ_ONCE(event->oncpu);
		if ((unsigned)event_cpu >= nr_cpu_ids)
			return 0;

		preempt_disable();
		event_cpu = __perf_event_read_cpu(event, event_cpu);

@@ -3772,10 +3787,13 @@ static int perf_event_read(struct perf_event *event, bool group)
		 * Therefore, either way, we'll have an up-to-date event count
		 * after this.
		 */
		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
		(void)smp_call_function_single(event_cpu,
				__perf_event_read, &data, 1);
		preempt_enable();
		ret = data.ret;
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
	} else if (event->state == PERF_EVENT_STATE_INACTIVE ||
			(active_event_skip_read &&
			!per_cpu(is_hotplugging, event_cpu))) {
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;

@@ -3868,7 +3886,8 @@ find_get_context(struct pmu *pmu, struct task_struct *task,

	if (!task) {
		/* Must be root to operate on a CPU event: */
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
		if (!is_kernel_event(event) && perf_paranoid_cpu() &&
			!capable(CAP_SYS_ADMIN))
			return ERR_PTR(-EACCES);

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
@@ -7910,6 +7929,7 @@ static struct pmu perf_swevent = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

#ifdef CONFIG_EVENT_TRACING
@@ -8059,6 +8079,7 @@ static struct pmu perf_tracepoint = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

static inline void perf_tp_register(void)
@@ -8803,6 +8824,7 @@ static struct pmu perf_cpu_clock = {
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
	.read		= cpu_clock_event_read,
	.events_across_hotplug = 1,
};

/*
@@ -8884,6 +8906,7 @@ static struct pmu perf_task_clock = {
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
	.read		= task_clock_event_read,
	.events_across_hotplug = 1,
};

static void perf_pmu_nop_void(struct pmu *pmu)
@@ -11085,6 +11108,8 @@ static void __init perf_event_init_all_cpus(void)
		INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
#endif
		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
		per_cpu(is_hotplugging, cpu) = false;
		per_cpu(is_idle, cpu) = false;
	}
}

@@ -11104,6 +11129,59 @@ void perf_swevent_init_cpu(unsigned int cpu)
}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void
check_hotplug_start_event(struct perf_event *event)
{
	if (event->pmu->events_across_hotplug &&
	    event->attr.type == PERF_TYPE_SOFTWARE &&
	    event->pmu->start)
		event->pmu->start(event, 0);
}

static int perf_event_start_swevents(unsigned int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	struct perf_event *event;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
		mutex_lock(&ctx->mutex);
		raw_spin_lock(&ctx->lock);
		list_for_each_entry(event, &ctx->event_list, event_entry)
			check_hotplug_start_event(event);
		raw_spin_unlock(&ctx->lock);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
	per_cpu(is_hotplugging, cpu) = false;
	return 0;
}

/*
 * If keeping events across hotplugging is supported, do not
 * remove the event list so event lives beyond CPU hotplug.
 * The context is exited via an fd close path when userspace
 * is done and the target CPU is online. If software clock
 * event is active, then stop hrtimer associated with it.
 * Start the timer when the CPU comes back online.
 */
static void
check_hotplug_remove_from_context(struct perf_event *event,
			   struct perf_cpu_context *cpuctx,
			   struct perf_event_context *ctx)
{
	if (event->pmu->events_across_hotplug &&
	    event->attr.type == PERF_TYPE_SOFTWARE &&
	    event->pmu->stop)
		event->pmu->stop(event, PERF_EF_UPDATE);
	else if (!event->pmu->events_across_hotplug)
		__perf_remove_from_context(event, cpuctx,
			ctx, (void *)DETACH_GROUP);
}

static void __perf_event_exit_context(void *__info)
{
	struct perf_event_context *ctx = __info;
@@ -11112,7 +11190,7 @@ static void __perf_event_exit_context(void *__info)

	raw_spin_lock(&ctx->lock);
	list_for_each_entry(event, &ctx->event_list, event_entry)
		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
		check_hotplug_remove_from_context(event, cpuctx, ctx);
	raw_spin_unlock(&ctx->lock);
}

@@ -11166,6 +11244,7 @@ int perf_event_init_cpu(unsigned int cpu)

int perf_event_exit_cpu(unsigned int cpu)
{
	per_cpu(is_hotplugging, cpu) = true;
	perf_event_exit_cpu_context(cpu);
	return 0;
}
@@ -11190,6 +11269,44 @@ static struct notifier_block perf_reboot_notifier = {
	.priority = INT_MIN,
};

static int event_idle_notif(struct notifier_block *nb, unsigned long action,
							void *data)
{
	switch (action) {
	case IDLE_START:
		__this_cpu_write(is_idle, true);
		break;
	case IDLE_END:
		__this_cpu_write(is_idle, false);
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block perf_event_idle_nb = {
	.notifier_call = event_idle_notif,
};

#ifdef CONFIG_HOTPLUG_CPU
static int perf_cpu_hp_init(void)
{
	int ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
				"PERF/CORE/CPUHP_AP_PERF_ONLINE",
				perf_event_start_swevents,
				perf_event_exit_cpu);
	if (ret)
		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
		       ret);

	return ret;
}
#else
static int perf_cpu_hp_init(void) { return 0; }
#endif

void __init perf_event_init(void)
{
	int ret;
@@ -11203,7 +11320,10 @@ void __init perf_event_init(void)
	perf_pmu_register(&perf_task_clock, NULL, -1);
	perf_tp_register();
	perf_event_init_cpu(smp_processor_id());
	idle_notifier_register(&perf_event_idle_nb);
	register_reboot_notifier(&perf_reboot_notifier);
	ret = perf_cpu_hp_init();
	WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret);

	ret = init_hw_breakpoint();
	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);