Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 766d6c07 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

perf: Factor out event accounting code to account_event()/__free_event()



Gather all the event accounting code to a single place,
once all the prerequisites are completed. This simplifies
the refcounting.

Original-patch-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-4-git-send-email-fweisbec@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 90983b16
Loading
Loading
Loading
Loading
+47 −32
Original line number Diff line number Diff line
@@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);

static void __free_event(struct perf_event *event)
{
	if (!event->parent) {
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
	}

	if (event->destroy)
		event->destroy(event);

	if (event->ctx)
		put_ctx(event->ctx);

	call_rcu(&event->rcu_head, free_event_rcu);
}
static void free_event(struct perf_event *event)
{
	irq_work_sync(&event->pending);
@@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event)
			atomic_dec(&nr_comm_events);
		if (event->attr.task)
			atomic_dec(&nr_task_events);
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
		if (is_cgroup_event(event)) {
			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
			static_key_slow_dec_deferred(&perf_sched_events);
@@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event)
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);

	if (event->destroy)
		event->destroy(event);

	if (event->ctx)
		put_ctx(event->ctx);

	call_rcu(&event->rcu_head, free_event_rcu);
	__free_event(event);
}

int perf_event_release_kernel(struct perf_event *event)
@@ -6443,6 +6451,29 @@ struct pmu *perf_init_event(struct perf_event *event)
	return pmu;
}

static void account_event(struct perf_event *event)
{
	if (event->attach_state & PERF_ATTACH_TASK)
		static_key_slow_inc(&perf_sched_events.key);
	if (event->attr.mmap || event->attr.mmap_data)
		atomic_inc(&nr_mmap_events);
	if (event->attr.comm)
		atomic_inc(&nr_comm_events);
	if (event->attr.task)
		atomic_inc(&nr_task_events);
	if (has_branch_stack(event)) {
		static_key_slow_inc(&perf_sched_events.key);
		if (!(event->attach_state & PERF_ATTACH_TASK))
			atomic_inc(&per_cpu(perf_branch_stack_events,
					    event->cpu));
	}

	if (is_cgroup_event(event)) {
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
		static_key_slow_inc(&perf_sched_events.key);
	}
}

/*
 * Allocate and initialize a event structure
 */
@@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
			if (err)
				goto err_pmu;
		}

		if (event->attach_state & PERF_ATTACH_TASK)
			static_key_slow_inc(&perf_sched_events.key);
		if (event->attr.mmap || event->attr.mmap_data)
			atomic_inc(&nr_mmap_events);
		if (event->attr.comm)
			atomic_inc(&nr_comm_events);
		if (event->attr.task)
			atomic_inc(&nr_task_events);
		if (has_branch_stack(event)) {
			static_key_slow_inc(&perf_sched_events.key);
			if (!(event->attach_state & PERF_ATTACH_TASK))
				atomic_inc(&per_cpu(perf_branch_stack_events,
						    event->cpu));
		}
	}

	return event;
@@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open,

	if (flags & PERF_FLAG_PID_CGROUP) {
		err = perf_cgroup_connect(pid, event, &attr, group_leader);
		if (err)
			goto err_alloc;
		/*
		 * one more event:
		 * - that has cgroup constraint on event->cpu
		 * - that may need work on context switch
		 */
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
		static_key_slow_inc(&perf_sched_events.key);
		if (err) {
			__free_event(event);
			goto err_task;
		}
	}

	account_event(event);

	/*
	 * Special case software events and allow them to be part of
	 * any hardware group.
@@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
		goto err;
	}

	account_event(event);

	ctx = find_get_context(event->pmu, task, cpu);
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);