Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4beb31f3 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

perf: Split the per-cpu accounting part of the event accounting code



This way we can use the per-cpu handling seperately.
This is going to be used by to fix the event migration
code accounting.

Original-patch-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 766d6c07
Loading
Loading
Loading
Loading
+55 −32
Original line number Diff line number Diff line
@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);

static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
	if (event->parent)
		return;

	if (has_branch_stack(event)) {
		if (!(event->attach_state & PERF_ATTACH_TASK))
			atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
	}
	if (is_cgroup_event(event))
		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}

static void unaccount_event(struct perf_event *event)
{
	if (event->parent)
		return;

	if (event->attach_state & PERF_ATTACH_TASK)
		static_key_slow_dec_deferred(&perf_sched_events);
	if (event->attr.mmap || event->attr.mmap_data)
		atomic_dec(&nr_mmap_events);
	if (event->attr.comm)
		atomic_dec(&nr_comm_events);
	if (event->attr.task)
		atomic_dec(&nr_task_events);
	if (is_cgroup_event(event))
		static_key_slow_dec_deferred(&perf_sched_events);
	if (has_branch_stack(event))
		static_key_slow_dec_deferred(&perf_sched_events);

	unaccount_event_cpu(event, event->cpu);
}

static void __free_event(struct perf_event *event)
{
	if (!event->parent) {
@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event)
{
	irq_work_sync(&event->pending);

	if (!event->parent) {
		if (event->attach_state & PERF_ATTACH_TASK)
			static_key_slow_dec_deferred(&perf_sched_events);
		if (event->attr.mmap || event->attr.mmap_data)
			atomic_dec(&nr_mmap_events);
		if (event->attr.comm)
			atomic_dec(&nr_comm_events);
		if (event->attr.task)
			atomic_dec(&nr_task_events);
		if (is_cgroup_event(event)) {
			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
			static_key_slow_dec_deferred(&perf_sched_events);
		}

		if (has_branch_stack(event)) {
			static_key_slow_dec_deferred(&perf_sched_events);
			/* is system-wide event */
			if (!(event->attach_state & PERF_ATTACH_TASK)) {
				atomic_dec(&per_cpu(perf_branch_stack_events,
						    event->cpu));
			}
		}
	}
	unaccount_event(event);

	if (event->rb) {
		struct ring_buffer *rb;
@@ -6451,8 +6463,24 @@ struct pmu *perf_init_event(struct perf_event *event)
	return pmu;
}

static void account_event_cpu(struct perf_event *event, int cpu)
{
	if (event->parent)
		return;

	if (has_branch_stack(event)) {
		if (!(event->attach_state & PERF_ATTACH_TASK))
			atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
	}
	if (is_cgroup_event(event))
		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}

static void account_event(struct perf_event *event)
{
	if (event->parent)
		return;

	if (event->attach_state & PERF_ATTACH_TASK)
		static_key_slow_inc(&perf_sched_events.key);
	if (event->attr.mmap || event->attr.mmap_data)
@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event)
		atomic_inc(&nr_comm_events);
	if (event->attr.task)
		atomic_inc(&nr_task_events);
	if (has_branch_stack(event)) {
	if (has_branch_stack(event))
		static_key_slow_inc(&perf_sched_events.key);
		if (!(event->attach_state & PERF_ATTACH_TASK))
			atomic_inc(&per_cpu(perf_branch_stack_events,
					    event->cpu));
	}

	if (is_cgroup_event(event)) {
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
	if (is_cgroup_event(event))
		static_key_slow_inc(&perf_sched_events.key);
	}

	account_event_cpu(event, event->cpu);
}

/*