Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 078c3386 authored by Wang Nan's avatar Wang Nan Committed by Arnaldo Carvalho de Melo
Browse files

perf evlist: Map backward events to backward_mmap



In perf_evlist__mmap_per_evsel(), select backward_mmap for backward
events.  Utilize new perf_mmap APIs. Dynamically alloc backward_mmap.

Remove useless functions.

Signed-off-by: default avatarWang Nan <wangnan0@huawei.com>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-9-git-send-email-wangnan0@huawei.com


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent b2cb615d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
	for (i = 0; i < evlist->nr_mmaps; i++) {
		union perf_event *event;

		perf_evlist__mmap_read_catchup(evlist, i);
		while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) {
		perf_mmap__read_catchup(&evlist->backward_mmap[i]);
		while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
			const u32 type = event->header.type;

			switch (type) {
+27 −27
Original line number Diff line number Diff line
@@ -27,7 +27,6 @@
#include <linux/log2.h>
#include <linux/err.h>

static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
static void perf_mmap__munmap(struct perf_mmap *map);
static void perf_mmap__put(struct perf_mmap *map);

@@ -692,8 +691,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
{
	int i;

	if (!evlist->backward_mmap)
		return 0;

	for (i = 0; i < evlist->nr_mmaps; i++) {
		int fd = evlist->mmap[i].fd;
		int fd = evlist->backward_mmap[i].fd;
		int err;

		if (fd < 0)
@@ -904,16 +906,6 @@ static void perf_mmap__put(struct perf_mmap *md)
		perf_mmap__munmap(md);
}

static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
{
	perf_mmap__get(&evlist->mmap[idx]);
}

static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
{
	perf_mmap__put(&evlist->mmap[idx]);
}

void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
{
	if (!overwrite) {
@@ -1049,12 +1041,6 @@ static int perf_mmap__mmap(struct perf_mmap *map,
	return 0;
}

static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
			       struct mmap_params *mp, int fd)
{
	return perf_mmap__mmap(&evlist->mmap[idx], mp, fd);
}

static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
			 struct perf_evsel *evsel)
@@ -1066,16 +1052,27 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,

static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
				       struct mmap_params *mp, int cpu,
				       int thread, int *output)
				       int thread, int *_output, int *_output_backward)
{
	struct perf_evsel *evsel;
	int revent;

	evlist__for_each_entry(evlist, evsel) {
		struct perf_mmap *maps = evlist->mmap;
		int *output = _output;
		int fd;

		if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward))
			continue;
		if (evsel->attr.write_backward) {
			output = _output_backward;
			maps = evlist->backward_mmap;

			if (!maps) {
				maps = perf_evlist__alloc_mmap(evlist);
				if (!maps)
					return -1;
				evlist->backward_mmap = maps;
			}
		}

		if (evsel->system_wide && thread)
			continue;
@@ -1084,13 +1081,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,

		if (*output == -1) {
			*output = fd;
			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)

			if (perf_mmap__mmap(&maps[idx], mp, *output)  < 0)
				return -1;
		} else {
			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
				return -1;

			perf_evlist__mmap_get(evlist, idx);
			perf_mmap__get(&maps[idx]);
		}

		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
@@ -1103,8 +1101,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
		 * Therefore don't add it for polling.
		 */
		if (!evsel->system_wide &&
		    __perf_evlist__add_pollfd(evlist, fd, &evlist->mmap[idx], revent) < 0) {
			perf_evlist__mmap_put(evlist, idx);
		    __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
			perf_mmap__put(&maps[idx]);
			return -1;
		}

@@ -1130,13 +1128,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
	pr_debug2("perf event ring buffer mmapped per cpu\n");
	for (cpu = 0; cpu < nr_cpus; cpu++) {
		int output = -1;
		int output_backward = -1;

		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
					      true);

		for (thread = 0; thread < nr_threads; thread++) {
			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
							thread, &output))
							thread, &output, &output_backward))
				goto out_unmap;
		}
	}
@@ -1157,12 +1156,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
	pr_debug2("perf event ring buffer mmapped per thread\n");
	for (thread = 0; thread < nr_threads; thread++) {
		int output = -1;
		int output_backward = -1;

		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
					      false);

		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
						&output))
						&output, &output_backward))
			goto out_unmap;
	}