Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03b6ea9b authored by Adrian Hunter's avatar Adrian Hunter Committed by Arnaldo Carvalho de Melo
Browse files

perf evsel: Tidy up sample parsing overflow checking



The size of data retrieved from a sample event must be validated to
ensure it does not go past the end of the event.  That was being done
sporadically and without considering integer overflows.

Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarJiri Olsa <jolsa@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1377591794-30553-3-git-send-email-adrian.hunter@intel.com


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 314add6b
Loading
Loading
Loading
Loading
+71 −41
Original line number Original line Diff line number Diff line
@@ -1131,24 +1131,30 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
	return 0;
	return 0;
}
}


static bool sample_overlap(const union perf_event *event,
static inline bool overflow(const void *endp, u16 max_size, const void *offset,
			   const void *offset, u64 size)
			    u64 size)
{
{
	const void *base = event;
	return size > max_size || offset + size > endp;
}


	if (offset + size > base + event->header.size)
#define OVERFLOW_CHECK(offset, size, max_size)				\
		return true;
	do {								\
		if (overflow(endp, (max_size), (offset), (size)))	\
			return -EFAULT;					\
	} while (0)


	return false;
#define OVERFLOW_CHECK_u64(offset) \
}
	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))


int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
			     struct perf_sample *data)
			     struct perf_sample *data)
{
{
	u64 type = evsel->attr.sample_type;
	u64 type = evsel->attr.sample_type;
	u64 regs_user = evsel->attr.sample_regs_user;
	bool swapped = evsel->needs_swap;
	bool swapped = evsel->needs_swap;
	const u64 *array;
	const u64 *array;
	u16 max_size = event->header.size;
	const void *endp = (void *)event + max_size;
	u64 sz;


	/*
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * used for cross-endian analysis. See git commit 65014ab3
@@ -1170,6 +1176,11 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,


	array = event->sample.array;
	array = event->sample.array;


	/*
	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
	 * check the format does not go past the end of the event.
	 */
	if (evsel->sample_size + sizeof(event->header) > event->header.size)
	if (evsel->sample_size + sizeof(event->header) > event->header.size)
		return -EFAULT;
		return -EFAULT;


@@ -1235,6 +1246,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
	if (type & PERF_SAMPLE_READ) {
	if (type & PERF_SAMPLE_READ) {
		u64 read_format = evsel->attr.read_format;
		u64 read_format = evsel->attr.read_format;


		OVERFLOW_CHECK_u64(array);
		if (read_format & PERF_FORMAT_GROUP)
		if (read_format & PERF_FORMAT_GROUP)
			data->read.group.nr = *array;
			data->read.group.nr = *array;
		else
		else
@@ -1243,41 +1255,51 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
		array++;
		array++;


		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
			OVERFLOW_CHECK_u64(array);
			data->read.time_enabled = *array;
			data->read.time_enabled = *array;
			array++;
			array++;
		}
		}


		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
			OVERFLOW_CHECK_u64(array);
			data->read.time_running = *array;
			data->read.time_running = *array;
			array++;
			array++;
		}
		}


		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
		if (read_format & PERF_FORMAT_GROUP) {
			data->read.group.values = (struct sample_read_value *) array;
			const u64 max_group_nr = UINT64_MAX /
			array = (void *) array + data->read.group.nr *
					sizeof(struct sample_read_value);

			if (data->read.group.nr > max_group_nr)
				return -EFAULT;
			sz = data->read.group.nr *
			     sizeof(struct sample_read_value);
			     sizeof(struct sample_read_value);
			OVERFLOW_CHECK(array, sz, max_size);
			data->read.group.values =
					(struct sample_read_value *)array;
			array = (void *)array + sz;
		} else {
		} else {
			OVERFLOW_CHECK_u64(array);
			data->read.one.id = *array;
			data->read.one.id = *array;
			array++;
			array++;
		}
		}
	}
	}


	if (type & PERF_SAMPLE_CALLCHAIN) {
	if (type & PERF_SAMPLE_CALLCHAIN) {
		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
			return -EFAULT;

		data->callchain = (struct ip_callchain *)array;


		if (sample_overlap(event, array, data->callchain->nr))
		OVERFLOW_CHECK_u64(array);
		data->callchain = (struct ip_callchain *)array++;
		if (data->callchain->nr > max_callchain_nr)
			return -EFAULT;
			return -EFAULT;

		sz = data->callchain->nr * sizeof(u64);
		array += 1 + data->callchain->nr;
		OVERFLOW_CHECK(array, sz, max_size);
		array = (void *)array + sz;
	}
	}


	if (type & PERF_SAMPLE_RAW) {
	if (type & PERF_SAMPLE_RAW) {
		const u64 *pdata;
		OVERFLOW_CHECK_u64(array);

		u.val64 = *array;
		u.val64 = *array;
		if (WARN_ONCE(swapped,
		if (WARN_ONCE(swapped,
			      "Endianness of raw data not corrected!\n")) {
			      "Endianness of raw data not corrected!\n")) {
@@ -1286,65 +1308,73 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
			u.val32[1] = bswap_32(u.val32[1]);
		}
		}

		if (sample_overlap(event, array, sizeof(u32)))
			return -EFAULT;

		data->raw_size = u.val32[0];
		data->raw_size = u.val32[0];
		pdata = (void *) array + sizeof(u32);
		array = (void *)array + sizeof(u32);

		if (sample_overlap(event, pdata, data->raw_size))
			return -EFAULT;


		data->raw_data = (void *) pdata;
		OVERFLOW_CHECK(array, data->raw_size, max_size);

		data->raw_data = (void *)array;
		array = (void *)array + data->raw_size + sizeof(u32);
		array = (void *)array + data->raw_size;
	}
	}


	if (type & PERF_SAMPLE_BRANCH_STACK) {
	if (type & PERF_SAMPLE_BRANCH_STACK) {
		u64 sz;
		const u64 max_branch_nr = UINT64_MAX /
					  sizeof(struct branch_entry);


		data->branch_stack = (struct branch_stack *)array;
		OVERFLOW_CHECK_u64(array);
		array++; /* nr */
		data->branch_stack = (struct branch_stack *)array++;


		if (data->branch_stack->nr > max_branch_nr)
			return -EFAULT;
		sz = data->branch_stack->nr * sizeof(struct branch_entry);
		sz = data->branch_stack->nr * sizeof(struct branch_entry);
		sz /= sizeof(u64);
		OVERFLOW_CHECK(array, sz, max_size);
		array += sz;
		array = (void *)array + sz;
	}
	}


	if (type & PERF_SAMPLE_REGS_USER) {
	if (type & PERF_SAMPLE_REGS_USER) {
		u64 avail;

		/* First u64 tells us if we have any regs in sample. */
		/* First u64 tells us if we have any regs in sample. */
		u64 avail = *array++;
		OVERFLOW_CHECK_u64(array);
		avail = *array++;


		if (avail) {
		if (avail) {
			u64 regs_user = evsel->attr.sample_regs_user;

			sz = hweight_long(regs_user) * sizeof(u64);
			OVERFLOW_CHECK(array, sz, max_size);
			data->user_regs.regs = (u64 *)array;
			data->user_regs.regs = (u64 *)array;
			array += hweight_long(regs_user);
			array = (void *)array + sz;
		}
		}
	}
	}


	if (type & PERF_SAMPLE_STACK_USER) {
	if (type & PERF_SAMPLE_STACK_USER) {
		u64 size = *array++;
		OVERFLOW_CHECK_u64(array);
		sz = *array++;


		data->user_stack.offset = ((char *)(array - 1)
		data->user_stack.offset = ((char *)(array - 1)
					  - (char *) event);
					  - (char *) event);


		if (!size) {
		if (!sz) {
			data->user_stack.size = 0;
			data->user_stack.size = 0;
		} else {
		} else {
			OVERFLOW_CHECK(array, sz, max_size);
			data->user_stack.data = (char *)array;
			data->user_stack.data = (char *)array;
			array += size / sizeof(*array);
			array = (void *)array + sz;
			OVERFLOW_CHECK_u64(array);
			data->user_stack.size = *array++;
			data->user_stack.size = *array++;
		}
		}
	}
	}


	data->weight = 0;
	data->weight = 0;
	if (type & PERF_SAMPLE_WEIGHT) {
	if (type & PERF_SAMPLE_WEIGHT) {
		OVERFLOW_CHECK_u64(array);
		data->weight = *array;
		data->weight = *array;
		array++;
		array++;
	}
	}


	data->data_src = PERF_MEM_DATA_SRC_NONE;
	data->data_src = PERF_MEM_DATA_SRC_NONE;
	if (type & PERF_SAMPLE_DATA_SRC) {
	if (type & PERF_SAMPLE_DATA_SRC) {
		OVERFLOW_CHECK_u64(array);
		data->data_src = *array;
		data->data_src = *array;
		array++;
		array++;
	}
	}