Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 948b26b6 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

perf: Account freq events globally



Freq events may not always be affine to a particular CPU. As such,
account_event_cpu() may crash if we account per cpu a freq event
that has event->cpu == -1.

To solve this, lets account freq events globally. In practice
this doesn't change much the picture because perf tools create
per-task perf events with one event per CPU by default. Profiling a
single CPU is usually a corner case so there is no much point in
optimizing things that way.

Reported-by: default avatarJiri Olsa <jolsa@redhat.com>
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Tested-by: default avatarJiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1375460996-16329-3-git-send-email-fweisbec@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fc3b86d6
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -141,11 +141,11 @@ enum event_type_t {
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static DEFINE_PER_CPU(atomic_t, perf_freq_events);

static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;

static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -1871,9 +1871,6 @@ static int __perf_install_in_context(void *info)
	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, task_ctx);

	if (atomic_read(&__get_cpu_var(perf_freq_events)))
		tick_nohz_full_kick();

	return 0;
}

@@ -2811,7 +2808,7 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
	if (atomic_read(&__get_cpu_var(perf_freq_events)) ||
	if (atomic_read(&nr_freq_events) ||
	    __this_cpu_read(perf_throttled_count))
		return false;
	else
@@ -3140,9 +3137,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
	}
	if (is_cgroup_event(event))
		atomic_dec(&per_cpu(perf_cgroup_events, cpu));

	if (event->attr.freq)
		atomic_dec(&per_cpu(perf_freq_events, cpu));
}

static void unaccount_event(struct perf_event *event)
@@ -3158,6 +3152,8 @@ static void unaccount_event(struct perf_event *event)
		atomic_dec(&nr_comm_events);
	if (event->attr.task)
		atomic_dec(&nr_task_events);
	if (event->attr.freq)
		atomic_dec(&nr_freq_events);
	if (is_cgroup_event(event))
		static_key_slow_dec_deferred(&perf_sched_events);
	if (has_branch_stack(event))
@@ -6489,9 +6485,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
	}
	if (is_cgroup_event(event))
		atomic_inc(&per_cpu(perf_cgroup_events, cpu));

	if (event->attr.freq)
		atomic_inc(&per_cpu(perf_freq_events, cpu));
}

static void account_event(struct perf_event *event)
@@ -6507,6 +6500,10 @@ static void account_event(struct perf_event *event)
		atomic_inc(&nr_comm_events);
	if (event->attr.task)
		atomic_inc(&nr_task_events);
	if (event->attr.freq) {
		if (atomic_inc_return(&nr_freq_events) == 1)
			tick_nohz_full_kick_all();
	}
	if (has_branch_stack(event))
		static_key_slow_inc(&perf_sched_events.key);
	if (is_cgroup_event(event))