Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6afad54d authored by Kan Liang's avatar Kan Liang Committed by Arnaldo Carvalho de Melo
Browse files

perf mmap: Discard legacy interfaces for mmap read forward



Discards legacy interfaces perf_evlist__mmap_read_forward(),
perf_evlist__mmap_read() and perf_evlist__mmap_consume().

No tools use them.

Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1519945751-37786-14-git-send-email-kan.liang@linux.intel.com


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 75948730
Loading
Loading
Loading
Loading
+1 −24
Original line number Diff line number Diff line
@@ -702,29 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
	return perf_evlist__set_paused(evlist, false);
}

union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{
	struct perf_mmap *md = &evlist->mmap[idx];

	/*
	 * Check messup is required for forward overwritable ring buffer:
	 * memory pointed by md->prev can be overwritten in this case.
	 * No need for read-write ring buffer: kernel stop outputting when
	 * it hit md->prev (perf_mmap__consume()).
	 */
	return perf_mmap__read_forward(md);
}

union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
	return perf_evlist__mmap_read_forward(evlist, idx);
}

void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
	perf_mmap__consume(&evlist->mmap[idx], false);
}

static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
	int i;
@@ -761,7 +738,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
		map[i].fd = -1;
		/*
		 * When the perf_mmap() call is made we grab one refcount, plus
		 * one extra to let perf_evlist__mmap_consume() get the last
		 * one extra to let perf_mmap__consume() get the last
		 * events after all real references (perf_mmap__get()) are
		 * dropped.
		 *
+0 −4
Original line number Diff line number Diff line
@@ -129,10 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);

void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);

union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);

union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
						 int idx);
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);

int perf_evlist__open(struct perf_evlist *evlist);
+1 −20
Original line number Diff line number Diff line
@@ -63,25 +63,6 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
	return event;
}

/*
 * legacy interface for mmap read.
 * Don't use it. Use perf_mmap__read_event().
 */
union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{
	u64 head;

	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!refcount_read(&map->refcnt))
		return NULL;

	head = perf_mmap__read_head(map);

	return perf_mmap__read(map, &map->prev, head);
}

/*
 * Read event from ring buffer one by one.
 * Return one event for each call.
@@ -191,7 +172,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
	/*
	 * The last one will be done at perf_evlist__mmap_consume(), so that we
	 * The last one will be done at perf_mmap__consume(), so that we
	 * make sure we don't prevent tools from consuming every last event in
	 * the ring buffer.
	 *