Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ae07ea3 authored by Frederic Weisbecker's avatar Frederic Weisbecker
Browse files

perf: Humanize the number of contexts



Instead of hardcoding the number of contexts for the recursions
barriers, define a cpp constant to make the code more
self-explanatory.

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
parent 927c7a9e
Loading
Loading
Loading
Loading
+8 −6
Original line number Original line Diff line number Diff line
@@ -808,6 +808,12 @@ struct perf_event_context {
	struct rcu_head			rcu_head;
	struct rcu_head			rcu_head;
};
};


/*
 * Number of contexts where an event can trigger:
 * 	task, softirq, hardirq, nmi.
 */
#define PERF_NR_CONTEXTS	4

/**
/**
 * struct perf_event_cpu_context - per cpu event context structure
 * struct perf_event_cpu_context - per cpu event context structure
 */
 */
@@ -821,12 +827,8 @@ struct perf_cpu_context {
	struct mutex			hlist_mutex;
	struct mutex			hlist_mutex;
	int				hlist_refcount;
	int				hlist_refcount;


	/*
	/* Recursion avoidance in each contexts */
	 * Recursion avoidance:
	int				recursion[PERF_NR_CONTEXTS];
	 *
	 * task, softirq, irq, nmi context
	 */
	int				recursion[4];
};
};


struct perf_output_handle {
struct perf_output_handle {
+2 −2
Original line number Original line Diff line number Diff line
@@ -1772,7 +1772,7 @@ struct callchain_cpus_entries {
	struct perf_callchain_entry	*cpu_entries[0];
	struct perf_callchain_entry	*cpu_entries[0];
};
};


static DEFINE_PER_CPU(int, callchain_recursion[4]);
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
static atomic_t nr_callchain_events;
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
static DEFINE_MUTEX(callchain_mutex);
struct callchain_cpus_entries *callchain_cpus_entries;
struct callchain_cpus_entries *callchain_cpus_entries;
@@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void)
	if (!entries)
	if (!entries)
		return -ENOMEM;
		return -ENOMEM;


	size = sizeof(struct perf_callchain_entry) * 4;
	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;


	for_each_possible_cpu(cpu) {
	for_each_possible_cpu(cpu) {
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+4 −4
Original line number Original line Diff line number Diff line
@@ -9,7 +9,7 @@
#include <linux/kprobes.h>
#include <linux/kprobes.h>
#include "trace.h"
#include "trace.h"


static char *perf_trace_buf[4];
static char *perf_trace_buf[PERF_NR_CONTEXTS];


/*
/*
 * Force it to be aligned to unsigned long to avoid misaligned accesses
 * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
		char *buf;
		char *buf;
		int i;
		int i;


		for (i = 0; i < 4; i++) {
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
			buf = (char *)alloc_percpu(perf_trace_t);
			buf = (char *)alloc_percpu(perf_trace_t);
			if (!buf)
			if (!buf)
				goto fail;
				goto fail;
@@ -65,7 +65,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
	if (!total_ref_count) {
	if (!total_ref_count) {
		int i;
		int i;


		for (i = 0; i < 4; i++) {
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
			free_percpu(perf_trace_buf[i]);
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
			perf_trace_buf[i] = NULL;
		}
		}
@@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event)
	tp_event->perf_events = NULL;
	tp_event->perf_events = NULL;


	if (!--total_ref_count) {
	if (!--total_ref_count) {
		for (i = 0; i < 4; i++) {
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
			free_percpu(perf_trace_buf[i]);
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
			perf_trace_buf[i] = NULL;
		}
		}