Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5a15ee4 authored by Rohit Gupta's avatar Rohit Gupta
Browse files

perf: Improve idle handling for perf events



Use the exclude_idle attribute of the perf events to avoid reading
PMUs of idle CPUs. The counter values are updated when CPU enters
idle and the saved value is returned when the idle CPU is queried
for that event provided the attribute is set in the perf_event.

Change-Id: Ia4f7b8106afdf53ee6f31049b5ef799afbda8480
Signed-off-by: default avatarRohit Gupta <rohgup@codeaurora.org>
parent 50e0173f
Loading
Loading
Loading
Loading
+36 −2
Original line number Diff line number Diff line
@@ -1253,8 +1253,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
{
	unsigned long config_base = 0;

	if (attr->exclude_idle)
		return -EPERM;
	if (attr->exclude_user)
		config_base |= ARMV8_EXCLUDE_EL0;
	if (attr->exclude_kernel)
@@ -1416,6 +1414,26 @@ static void armpmu_update_counters(void *x)
	}
}

static void armpmu_idle_update(void)
{
	struct pmu_hw_events *hw_events;
	int idx;

	if (!cpu_pmu)
		return;

	hw_events = cpu_pmu->get_hw_events();

	for (idx = 0; idx <= cpu_pmu->num_events; ++idx) {
		struct perf_event *event = hw_events->events[idx];

		if (!event || !event->attr.exclude_idle)
			continue;

		cpu_pmu->pmu.read(event);
	}
}

static void armpmu_hotplug_enable(void *parm_pmu)
{
	struct arm_pmu *armpmu = parm_pmu;
@@ -1581,6 +1599,19 @@ static struct notifier_block perf_cpu_pm_notifier_block = {
	.notifier_call = perf_cpu_pm_notifier,
};

static int perf_cpu_idle_notifier(struct notifier_block *nb,
				unsigned long action, void *data)
{
	if (action == IDLE_START)
		armpmu_idle_update();

	return NOTIFY_OK;
}

static struct notifier_block perf_cpu_idle_nb = {
	.notifier_call = perf_cpu_idle_notifier,
};

/*
 * PMU platform driver and devicetree bindings.
 */
@@ -1633,6 +1664,8 @@ static int __init register_pmu_driver(void)
	if (err)
		goto err_cpu_pm;

	idle_notifier_register(&perf_cpu_idle_nb);

	err = platform_driver_register(&armpmu_driver);
	if (err)
		goto err_driver;
@@ -1640,6 +1673,7 @@ static int __init register_pmu_driver(void)

err_driver:
	cpu_pm_unregister_notifier(&perf_cpu_pm_notifier_block);
	idle_notifier_unregister(&perf_cpu_idle_nb);
err_cpu_pm:
	unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
	return err;
+25 −1
Original line number Diff line number Diff line
@@ -154,6 +154,7 @@ enum event_type_t {
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static DEFINE_PER_CPU(bool, is_idle);

static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -3147,6 +3148,9 @@ static u64 perf_event_read(struct perf_event *event)
	 * value in the event structure:
	 */
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		if (!event->attr.exclude_idle ||
			(!per_cpu(is_idle, event->oncpu) &&
			event->attr.type == PERF_TYPE_RAW))
				smp_call_function_single(event->oncpu,
					 __perf_event_read, event, 1);
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
@@ -8316,6 +8320,25 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
	return NOTIFY_OK;
}

static int event_idle_notif(struct notifier_block *nb, unsigned long action,
							void *data)
{
	switch (action) {
	case IDLE_START:
		per_cpu(is_idle, smp_processor_id()) = true;
		break;
	case IDLE_END:
		per_cpu(is_idle, smp_processor_id()) = false;
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block perf_event_idle_nb = {
	.notifier_call = event_idle_notif,
};

void __init perf_event_init(void)
{
	int ret;
@@ -8329,6 +8352,7 @@ void __init perf_event_init(void)
	perf_pmu_register(&perf_task_clock, NULL, -1);
	perf_tp_register();
	perf_cpu_notifier(perf_cpu_notify);
	idle_notifier_register(&perf_event_idle_nb);
	register_reboot_notifier(&perf_reboot_notifier);

	ret = init_hw_breakpoint();