Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3165f41 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Ingo Molnar
Browse files

perf session: Move the global threads list to perf_session



So that we can process two perf.data files.

We still need to add a O_MMAP mode for perf_session so that we
can do all the mmap stuff in it.

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-5-git-send-email-acme@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ec913369
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -131,14 +131,14 @@ static int hist_entry__add(struct addr_location *al, u64 count)
	return 0;
}

static int process_sample_event(event_t *event, struct perf_session *session __used)
static int process_sample_event(event_t *event, struct perf_session *session)
{
	struct addr_location al;

	dump_printf("(IP, %d): %d: %p\n", event->header.misc,
		    event->ip.pid, (void *)(long)event->ip.ip);

	if (event__preprocess_sample(event, &al, symbol_filter) < 0) {
	if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
		fprintf(stderr, "problem processing %d event, skipping it.\n",
			event->header.type);
		return -1;
@@ -479,7 +479,7 @@ static int __cmd_annotate(void)
	}

	if (verbose > 3)
		threads__fprintf(stdout);
		perf_session__fprintf(session, stdout);

	if (verbose > 2)
		dsos__fprintf(stdout);
+2 −2
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ process_raw_event(event_t *raw_event __used, void *data,
	}
}

static int process_sample_event(event_t *event, struct perf_session *session __used)
static int process_sample_event(event_t *event, struct perf_session *session)
{
	struct sample_data data;
	struct thread *thread;
@@ -329,7 +329,7 @@ static int process_sample_event(event_t *event, struct perf_session *session __u
		(void *)(long)data.ip,
		(long long)data.period);

	thread = threads__findnew(event->ip.pid);
	thread = perf_session__findnew(session, event->ip.pid);
	if (thread == NULL) {
		pr_debug("problem processing %d event, skipping it.\n",
			 event->header.type);
+5 −5
Original line number Diff line number Diff line
@@ -600,7 +600,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
	return 0;
}

static int process_sample_event(event_t *event, struct perf_session *session __used)
static int process_sample_event(event_t *event, struct perf_session *session)
{
	struct sample_data data;
	int cpumode;
@@ -636,7 +636,7 @@ static int process_sample_event(event_t *event, struct perf_session *session __u
		}
	}

	thread = threads__findnew(data.pid);
	thread = perf_session__findnew(session, data.pid);
	if (thread == NULL) {
		pr_debug("problem processing %d event, skipping it.\n",
			event->header.type);
@@ -679,9 +679,9 @@ static int process_sample_event(event_t *event, struct perf_session *session __u
	return 0;
}

static int process_comm_event(event_t *event, struct perf_session *session __used)
static int process_comm_event(event_t *event, struct perf_session *session)
{
	struct thread *thread = threads__findnew(event->comm.pid);
	struct thread *thread = perf_session__findnew(session, event->comm.pid);

	dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid);

@@ -780,7 +780,7 @@ static int __cmd_report(void)
	}

	if (verbose > 3)
		threads__fprintf(stdout);
		perf_session__fprintf(session, stdout);

	if (verbose > 2)
		dsos__fprintf(stdout);
+41 −27
Original line number Diff line number Diff line
@@ -730,18 +730,21 @@ struct trace_migrate_task_event {

struct trace_sched_handler {
	void (*switch_event)(struct trace_switch_event *,
			     struct perf_session *,
			     struct event *,
			     int cpu,
			     u64 timestamp,
			     struct thread *thread);

	void (*runtime_event)(struct trace_runtime_event *,
			      struct perf_session *,
			      struct event *,
			      int cpu,
			      u64 timestamp,
			      struct thread *thread);

	void (*wakeup_event)(struct trace_wakeup_event *,
			     struct perf_session *,
			     struct event *,
			     int cpu,
			     u64 timestamp,
@@ -754,6 +757,7 @@ struct trace_sched_handler {
			   struct thread *thread);

	void (*migrate_task_event)(struct trace_migrate_task_event *,
			   struct perf_session *session,
			   struct event *,
			   int cpu,
			   u64 timestamp,
@@ -763,6 +767,7 @@ struct trace_sched_handler {

static void
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
		    struct perf_session *session __used,
		    struct event *event,
		    int cpu __used,
		    u64 timestamp __used,
@@ -789,6 +794,7 @@ static u64 cpu_last_switched[MAX_CPUS];

static void
replay_switch_event(struct trace_switch_event *switch_event,
		    struct perf_session *session __used,
		    struct event *event,
		    int cpu,
		    u64 timestamp,
@@ -1022,6 +1028,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)

static void
latency_switch_event(struct trace_switch_event *switch_event,
		     struct perf_session *session,
		     struct event *event __used,
		     int cpu,
		     u64 timestamp,
@@ -1045,8 +1052,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
		die("hm, delta: %Ld < 0 ?\n", delta);


	sched_out = threads__findnew(switch_event->prev_pid);
	sched_in = threads__findnew(switch_event->next_pid);
	sched_out = perf_session__findnew(session, switch_event->prev_pid);
	sched_in = perf_session__findnew(session, switch_event->next_pid);

	out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
	if (!out_events) {
@@ -1074,12 +1081,13 @@ latency_switch_event(struct trace_switch_event *switch_event,

static void
latency_runtime_event(struct trace_runtime_event *runtime_event,
		     struct perf_session *session,
		     struct event *event __used,
		     int cpu,
		     u64 timestamp,
		     struct thread *this_thread __used)
{
	struct thread *thread = threads__findnew(runtime_event->pid);
	struct thread *thread = perf_session__findnew(session, runtime_event->pid);
	struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);

	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1096,6 +1104,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,

static void
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
		     struct perf_session *session,
		     struct event *__event __used,
		     int cpu __used,
		     u64 timestamp,
@@ -1109,7 +1118,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
	if (!wakeup_event->success)
		return;

	wakee = threads__findnew(wakeup_event->pid);
	wakee = perf_session__findnew(session, wakeup_event->pid);
	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
	if (!atoms) {
		thread_atoms_insert(wakee);
@@ -1143,6 +1152,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,

static void
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
		     struct perf_session *session,
		     struct event *__event __used,
		     int cpu __used,
		     u64 timestamp,
@@ -1158,7 +1168,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
	if (profile_cpu == -1)
		return;

	migrant = threads__findnew(migrate_task_event->pid);
	migrant = perf_session__findnew(session, migrate_task_event->pid);
	atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
	if (!atoms) {
		thread_atoms_insert(migrant);
@@ -1353,7 +1363,7 @@ static void sort_lat(void)
static struct trace_sched_handler *trace_handler;

static void
process_sched_wakeup_event(void *data,
process_sched_wakeup_event(void *data, struct perf_session *session,
			   struct event *event,
			   int cpu __used,
			   u64 timestamp __used,
@@ -1370,7 +1380,8 @@ process_sched_wakeup_event(void *data,
	FILL_FIELD(wakeup_event, cpu, event, data);

	if (trace_handler->wakeup_event)
		trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
		trace_handler->wakeup_event(&wakeup_event, session, event,
					    cpu, timestamp, thread);
}

/*
@@ -1388,6 +1399,7 @@ static char next_shortname2 = '0';

static void
map_switch_event(struct trace_switch_event *switch_event,
		 struct perf_session *session,
		 struct event *event __used,
		 int this_cpu,
		 u64 timestamp,
@@ -1415,8 +1427,8 @@ map_switch_event(struct trace_switch_event *switch_event,
		die("hm, delta: %Ld < 0 ?\n", delta);


	sched_out = threads__findnew(switch_event->prev_pid);
	sched_in = threads__findnew(switch_event->next_pid);
	sched_out = perf_session__findnew(session, switch_event->prev_pid);
	sched_in = perf_session__findnew(session, switch_event->next_pid);

	curr_thread[this_cpu] = sched_in;

@@ -1466,7 +1478,7 @@ map_switch_event(struct trace_switch_event *switch_event,


static void
process_sched_switch_event(void *data,
process_sched_switch_event(void *data, struct perf_session *session,
			   struct event *event,
			   int this_cpu,
			   u64 timestamp __used,
@@ -1493,13 +1505,14 @@ process_sched_switch_event(void *data,
			nr_context_switch_bugs++;
	}
	if (trace_handler->switch_event)
		trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
		trace_handler->switch_event(&switch_event, session, event,
					    this_cpu, timestamp, thread);

	curr_pid[this_cpu] = switch_event.next_pid;
}

static void
process_sched_runtime_event(void *data,
process_sched_runtime_event(void *data, struct perf_session *session,
			   struct event *event,
			   int cpu __used,
			   u64 timestamp __used,
@@ -1513,7 +1526,7 @@ process_sched_runtime_event(void *data,
	FILL_FIELD(runtime_event, vruntime, event, data);

	if (trace_handler->runtime_event)
		trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
		trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
}

static void
@@ -1533,7 +1546,8 @@ process_sched_fork_event(void *data,
	FILL_FIELD(fork_event, child_pid, event, data);

	if (trace_handler->fork_event)
		trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
		trace_handler->fork_event(&fork_event, event,
					  cpu, timestamp, thread);
}

static void
@@ -1547,7 +1561,7 @@ process_sched_exit_event(struct event *event,
}

static void
process_sched_migrate_task_event(void *data,
process_sched_migrate_task_event(void *data, struct perf_session *session,
			   struct event *event,
			   int cpu __used,
			   u64 timestamp __used,
@@ -1563,12 +1577,13 @@ process_sched_migrate_task_event(void *data,
	FILL_FIELD(migrate_task_event, cpu, event, data);

	if (trace_handler->migrate_task_event)
		trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
		trace_handler->migrate_task_event(&migrate_task_event, session,
						 event, cpu, timestamp, thread);
}

static void
process_raw_event(event_t *raw_event __used, void *data,
		  int cpu, u64 timestamp, struct thread *thread)
process_raw_event(event_t *raw_event __used, struct perf_session *session,
		  void *data, int cpu, u64 timestamp, struct thread *thread)
{
	struct event *event;
	int type;
@@ -1578,23 +1593,22 @@ process_raw_event(event_t *raw_event __used, void *data,
	event = trace_find_event(type);

	if (!strcmp(event->name, "sched_switch"))
		process_sched_switch_event(data, event, cpu, timestamp, thread);
		process_sched_switch_event(data, session, event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_stat_runtime"))
		process_sched_runtime_event(data, event, cpu, timestamp, thread);
		process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_wakeup"))
		process_sched_wakeup_event(data, event, cpu, timestamp, thread);
		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_wakeup_new"))
		process_sched_wakeup_event(data, event, cpu, timestamp, thread);
		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_process_fork"))
		process_sched_fork_event(data, event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_process_exit"))
		process_sched_exit_event(event, cpu, timestamp, thread);
	if (!strcmp(event->name, "sched_migrate_task"))
		process_sched_migrate_task_event(data, event, cpu, timestamp, thread);
		process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
}

static int process_sample_event(event_t *event,
				struct perf_session *session __used)
static int process_sample_event(event_t *event, struct perf_session *session)
{
	struct sample_data data;
	struct thread *thread;
@@ -1615,7 +1629,7 @@ static int process_sample_event(event_t *event,
		(void *)(long)data.ip,
		(long long)data.period);

	thread = threads__findnew(data.pid);
	thread = perf_session__findnew(session, data.pid);
	if (thread == NULL) {
		pr_debug("problem processing %d event, skipping it.\n",
			 event->header.type);
@@ -1627,7 +1641,7 @@ static int process_sample_event(event_t *event,
	if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
		return 0;

	process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
	process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);

	return 0;
}
+12 −7
Original line number Diff line number Diff line
@@ -20,8 +20,9 @@

#include "perf.h"

#include "util/symbol.h"
#include "util/color.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/util.h"
#include <linux/rbtree.h>
@@ -926,7 +927,8 @@ static int symbol_filter(struct map *map, struct symbol *sym)
	return 0;
}

static void event__process_sample(const event_t *self, int counter)
static void event__process_sample(const event_t *self,
				 struct perf_session *session, int counter)
{
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
@@ -946,7 +948,7 @@ static void event__process_sample(const event_t *self, int counter)
		return;
	}

	if (event__preprocess_sample(self, &al, symbol_filter) < 0 ||
	if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
	    al.sym == NULL)
		return;

@@ -1053,7 +1055,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
		}

		if (event->header.type == PERF_RECORD_SAMPLE)
			event__process_sample(event, md->counter);
			event__process_sample(event, self, md->counter);
		else
			event__process(event, self);
		old += size;
@@ -1157,10 +1159,13 @@ static int __cmd_top(void)
	int i, counter;
	int ret;
	/*
	 * XXX perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it.
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
	 */
	struct perf_session *session = NULL;
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false);

	if (session == NULL)
		return -ENOMEM;

	if (target_pid != -1)
		event__synthesize_thread(target_pid, event__process, session);
Loading