Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d855457 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: move PERF_RECORD_TIME



Move PERF_RECORD_TIME so that all the fixed length items come before
the variable length ones.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130409.307926436@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent de9ac07b
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -100,9 +100,9 @@ enum sw_event_ids {
enum perf_counter_record_format {
	PERF_RECORD_IP		= 1U << 0,
	PERF_RECORD_TID		= 1U << 1,
	PERF_RECORD_GROUP	= 1U << 2,
	PERF_RECORD_CALLCHAIN	= 1U << 3,
	PERF_RECORD_TIME	= 1U << 4,
	PERF_RECORD_TIME	= 1U << 2,
	PERF_RECORD_GROUP	= 1U << 3,
	PERF_RECORD_CALLCHAIN	= 1U << 4,
};

/*
@@ -250,6 +250,7 @@ enum perf_event_type {
	 *
	 * 	{ u64			ip;	  } && PERF_RECORD_IP
	 * 	{ u32			pid, tid; } && PERF_RECORD_TID
	 * 	{ u64			time;     } && PERF_RECORD_TIME
	 *
	 * 	{ u64			nr;
	 * 	  { u64 event, val; } 	cnt[nr];  } && PERF_RECORD_GROUP
@@ -259,8 +260,6 @@ enum perf_event_type {
	 * 				kernel,
	 * 				user;
	 * 	  u64			ips[nr];  } && PERF_RECORD_CALLCHAIN
	 *
	 * 	{ u64			time;     } && PERF_RECORD_TIME
	 * };
	 */
};
+13 −13
Original line number Diff line number Diff line
@@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter,
		header.size += sizeof(tid_entry);
	}

	if (record_type & PERF_RECORD_TIME) {
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

		header.type |= PERF_RECORD_TIME;
		header.size += sizeof(u64);
	}

	if (record_type & PERF_RECORD_GROUP) {
		header.type |= PERF_RECORD_GROUP;
		header.size += sizeof(u64) +
@@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter,
		}
	}

	if (record_type & PERF_RECORD_TIME) {
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

		header.type |= PERF_RECORD_TIME;
		header.size += sizeof(u64);
	}

	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
	if (ret)
		return;
@@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter,
	if (record_type & PERF_RECORD_TID)
		perf_output_put(&handle, tid_entry);

	if (record_type & PERF_RECORD_TIME)
		perf_output_put(&handle, time);

	if (record_type & PERF_RECORD_GROUP) {
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
@@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter,
	if (callchain)
		perf_output_copy(&handle, callchain, callchain_size);

	if (record_type & PERF_RECORD_TIME)
		perf_output_put(&handle, time);

	perf_output_end(&handle);
}