Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit deffc0a7 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "perf: enable perf to continue across hotplug" into msm-4.9

parents c695eea3 b02d7648
Loading
Loading
Loading
Loading
+126 −35
Original line number Diff line number Diff line
@@ -27,6 +27,9 @@
#include <asm/cputype.h>
#include <asm/irq_regs.h>

#define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING
#define USE_CPUHP_STR "AP_PERF_ARM_STARTING"

static int
armpmu_map_cache_event(const unsigned (*cache_map)
				      [PERF_COUNT_HW_CACHE_MAX]
@@ -366,6 +369,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
		return err;
	}

	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;

	return 0;
}

@@ -568,6 +573,7 @@ static void armpmu_init(struct arm_pmu *armpmu)
		.read		= armpmu_read,
		.filter_match	= armpmu_filter_match,
		.attr_groups	= armpmu->attr_groups,
		.events_across_hotplug = 1,
	};
	armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
		&armpmu_common_attr_group;
@@ -620,6 +626,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;

	cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;

	irqs = min(pmu_device->num_resources, num_possible_cpus());

	irq = platform_get_irq(pmu_device, 0);
@@ -627,6 +635,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
		on_each_cpu_mask(&cpu_pmu->supported_cpus,
				 cpu_pmu_disable_percpu_irq, &irq, 1);
		free_percpu_irq(irq, &hw_events->percpu_pmu);
		cpu_pmu->percpu_irq = -1;
	} else {
		for (i = 0; i < irqs; ++i) {
			int cpu = i;
@@ -641,6 +650,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
		}
	}
	cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}

static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -670,6 +680,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)

		on_each_cpu_mask(&cpu_pmu->supported_cpus,
				 cpu_pmu_enable_percpu_irq, &irq, 1);
		cpu_pmu->percpu_irq = irq;
	} else {
		for (i = 0; i < irqs; ++i) {
			int cpu = i;
@@ -709,22 +720,12 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
	return 0;
}

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);

	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;
	if (pmu->reset)
		pmu->reset(pmu);
	return 0;
}
struct cpu_pm_pmu_args {
	struct arm_pmu	*armpmu;
	unsigned long	cmd;
	int		cpu;
	int		ret;
};

#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -772,15 +773,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
	}
}

static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
			     void *v)
static void cpu_pm_pmu_common(void *info)
{
	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
	struct cpu_pm_pmu_args *data	= info;
	struct arm_pmu *armpmu		= data->armpmu;
	unsigned long cmd		= data->cmd;
	int cpu				= data->cpu;
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);

	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return NOTIFY_DONE;
	if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
		data->ret = NOTIFY_DONE;
		return;
	}

	/*
	 * Always reset the PMU registers on power-up even if
@@ -789,8 +794,12 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
	if (cmd == CPU_PM_EXIT && armpmu->reset)
		armpmu->reset(armpmu);

	if (!enabled)
		return NOTIFY_OK;
	if (!enabled) {
		data->ret = NOTIFY_OK;
		return;
	}

	data->ret = NOTIFY_OK;

	switch (cmd) {
	case CPU_PM_ENTER:
@@ -798,15 +807,29 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
		cpu_pm_pmu_setup(armpmu, cmd);
		break;
	case CPU_PM_EXIT:
		cpu_pm_pmu_setup(armpmu, cmd);
	case CPU_PM_ENTER_FAILED:
		cpu_pm_pmu_setup(armpmu, cmd);
		armpmu->start(armpmu);
		break;
	default:
		return NOTIFY_DONE;
		data->ret = NOTIFY_DONE;
		break;
	}

	return NOTIFY_OK;
	return;
}

static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
			     void *v)
{
	struct cpu_pm_pmu_args data = {
		.armpmu	= container_of(b, struct arm_pmu, cpu_pm_nb),
		.cmd	= cmd,
		.cpu	= smp_processor_id(),
	};

	cpu_pm_pmu_common(&data);
	return data.ret;
}

static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -819,11 +842,75 @@ static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}

#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
static void cpu_pm_pmu_common(void *info) { }
#endif

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);

	struct cpu_pm_pmu_args data = {
		.armpmu	= pmu,
		.cpu	= (int)cpu,
	};

	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd    = CPU_PM_EXIT;
	cpu_pm_pmu_common(&data);
	if (data.ret == NOTIFY_DONE)
		return 0;

	if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
		data.armpmu->plat_device) {
		int irq = data.armpmu->percpu_irq;

		if (irq > 0 && irq_is_percpu(irq))
			cpu_pmu_enable_percpu_irq(&irq);

	}

	return 0;
}

static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);

	struct cpu_pm_pmu_args data = {
		.armpmu	= pmu,
		.cpu	= (int)cpu,
	};

	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	data.cmd = CPU_PM_ENTER;
	cpu_pm_pmu_common(&data);
	/* Disarm the PMU IRQ before disappearing. */
	if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
		data.armpmu->plat_device) {
		int irq = data.armpmu->percpu_irq;

		if (irq > 0 && irq_is_percpu(irq))
			cpu_pmu_disable_percpu_irq(&irq);

	}

	return 0;
}

static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int err;
@@ -834,14 +921,14 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
	if (!cpu_hw_events)
		return -ENOMEM;

	err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
	err = cpuhp_state_add_instance_nocalls(USE_CPUHP_STATE,
					       &cpu_pmu->node);
	if (err)
		goto out_free;

	err = cpu_pm_pmu_register(cpu_pmu);
	if (err)
		goto out_unregister;
		goto out_unreg_perf_starting;

	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
@@ -872,8 +959,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)

	return 0;

out_unregister:
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
out_unreg_perf_starting:
	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
					    &cpu_pmu->node);
out_free:
	free_percpu(cpu_hw_events);
@@ -883,7 +970,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
	cpu_pm_pmu_unregister(cpu_pmu);
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
					    &cpu_pmu->node);
	free_percpu(cpu_pmu->hw_events);
}
@@ -1064,6 +1151,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
	if (!__oprofile_cpu_pmu)
		__oprofile_cpu_pmu = pmu;

	pmu->pmu_state  = ARM_PMU_STATE_OFF;
	pmu->percpu_irq = -1;

	pr_info("enabled with %s PMU driver, %d counters available\n",
			pmu->name, pmu->num_events);

@@ -1083,11 +1173,12 @@ static int arm_pmu_hp_init(void)
{
	int ret;

	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
				      "AP_PERF_ARM_STARTING",
				      arm_perf_starting_cpu, NULL);
	ret = cpuhp_setup_state_multi(USE_CPUHP_STATE,
					USE_CPUHP_STR,
					arm_perf_starting_cpu,
					arm_perf_stopping_cpu);
	if (ret)
		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
		pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n",
		       ret);
	return ret;
}
+8 −0
Original line number Diff line number Diff line
@@ -84,6 +84,12 @@ enum armpmu_attr_groups {
	ARMPMU_NR_ATTR_GROUPS
};

enum armpmu_pmu_states {
	ARM_PMU_STATE_OFF,
	ARM_PMU_STATE_RUNNING,
	ARM_PMU_STATE_GOING_DOWN,
};

struct arm_pmu {
	struct pmu	pmu;
	cpumask_t	active_irqs;
@@ -108,6 +114,8 @@ struct arm_pmu {
	void		(*free_irq)(struct arm_pmu *);
	int		(*map_event)(struct perf_event *event);
	int		num_events;
	int		pmu_state;
	int		percpu_irq;
	atomic_t	active_events;
	struct mutex	reserve_mutex;
	u64		max_period;
+2 −0
Original line number Diff line number Diff line
@@ -270,6 +270,8 @@ struct pmu {
	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
	int				task_ctx_nr;
	int				hrtimer_interval_ms;
	u32				events_across_hotplug:1,
					reserved:31;

	/* number of address filters this PMU can do */
	unsigned int			nr_addr_filters;
+90 −1
Original line number Diff line number Diff line
@@ -7586,6 +7586,7 @@ static struct pmu perf_swevent = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

#ifdef CONFIG_EVENT_TRACING
@@ -7730,6 +7731,7 @@ static struct pmu perf_tracepoint = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

static inline void perf_tp_register(void)
@@ -8460,6 +8462,7 @@ static struct pmu perf_cpu_clock = {
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
	.read		= cpu_clock_event_read,
	.events_across_hotplug = 1,
};

/*
@@ -8541,6 +8544,7 @@ static struct pmu perf_task_clock = {
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
	.read		= task_clock_event_read,
	.events_across_hotplug = 1,
};

static void perf_pmu_nop_void(struct pmu *pmu)
@@ -10715,6 +10719,76 @@ int perf_event_init_cpu(unsigned int cpu)
}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void
check_hotplug_start_event(struct perf_event *event)
{
	if (event->attr.type == PERF_TYPE_SOFTWARE) {
		switch (event->attr.config) {
		case PERF_COUNT_SW_CPU_CLOCK:
			cpu_clock_event_start(event, 0);
			break;
		case PERF_COUNT_SW_TASK_CLOCK:
			break;
		default:
			if (event->pmu->start)
				event->pmu->start(event, 0);
			break;
		}
	}
}

static int perf_event_start_swevents(unsigned int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	struct perf_event *event;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
		mutex_lock(&ctx->mutex);
		raw_spin_lock(&ctx->lock);
		list_for_each_entry(event, &ctx->event_list, event_entry)
			check_hotplug_start_event(event);
		raw_spin_unlock(&ctx->lock);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
	return 0;
}

/*
 * If keeping events across hotplugging is supported, do not
 * remove the event list so event lives beyond CPU hotplug.
 * The context is exited via an fd close path when userspace
 * is done and the target CPU is online. If software clock
 * event is active, then stop hrtimer associated with it.
 * Start the timer when the CPU comes back online.
 */
static void
check_hotplug_remove_from_context(struct perf_event *event,
			   struct perf_cpu_context *cpuctx,
			   struct perf_event_context *ctx)
{
	if (!event->pmu->events_across_hotplug) {
		__perf_remove_from_context(event, cpuctx,
			ctx, (void *)DETACH_GROUP);
	} else if (event->attr.type == PERF_TYPE_SOFTWARE) {
		switch (event->attr.config) {
		case PERF_COUNT_SW_CPU_CLOCK:
			cpu_clock_event_stop(event, 0);
			break;
		case PERF_COUNT_SW_TASK_CLOCK:
			break;
		default:
			if (event->pmu->stop)
				event->pmu->stop(event, 0);
			break;
		}
	}
}

static void __perf_event_exit_context(void *__info)
{
	struct perf_event_context *ctx = __info;
@@ -10723,7 +10797,7 @@ static void __perf_event_exit_context(void *__info)

	raw_spin_lock(&ctx->lock);
	list_for_each_entry(event, &ctx->event_list, event_entry)
		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
		check_hotplug_remove_from_context(event, cpuctx, ctx);
	raw_spin_unlock(&ctx->lock);
}

@@ -10842,6 +10916,21 @@ static int __init perf_event_sysfs_init(void)
}
device_initcall(perf_event_sysfs_init);

static int perf_cpu_hp_init(void)
{
	int ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
				"PERF/CORE/AP_PERF_ONLINE",
				perf_event_start_swevents,
				perf_event_exit_cpu);
	if (ret)
		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
		       ret);
	return ret;
}
subsys_initcall(perf_cpu_hp_init);

#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)