Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8aab2e0 authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar
Browse files

perf/x86: Clean up __intel_pmu_pebs_event() code



This patch makes the code more readable. It also renames
precise_store_data_hsw() to precise_datala_hsw() because
the function is called for both loads and stores on HSW.
The patch also gets rid of the hardcoded store events
codes in that same function.

Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1407785233-32193-5-git-send-email-eranian@google.com


Cc: ak@linux.intel.com
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 770eee1f
Loading
Loading
Loading
Loading
+37 −44
Original line number Diff line number Diff line
@@ -108,10 +108,9 @@ static u64 precise_store_data(u64 status)
	return val;
}

static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
static u64 precise_datala_hsw(struct perf_event *event, u64 status)
{
	union perf_mem_data_src dse;
	u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK;

	dse.val = PERF_MEM_NA;

@@ -128,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
	 * MEM_UOPS_RETIRED.SPLIT_STORES
	 * MEM_UOPS_RETIRED.ALL_STORES
	 */
	if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0)
		return dse.val;

	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
		if (status & 1)
			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
		else
			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;

	/* Nothing else supported. Sorry. */
	}
	return dse.val;
}

@@ -825,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
static void __intel_pmu_pebs_event(struct perf_event *event,
				   struct pt_regs *iregs, void *__pebs)
{
#define PERF_X86_EVENT_PEBS_HSW_PREC \
		(PERF_X86_EVENT_PEBS_ST_HSW | \
		 PERF_X86_EVENT_PEBS_LD_HSW | \
		 PERF_X86_EVENT_PEBS_NA_HSW)
	/*
	 * We cast to the biggest pebs_record but are careful not to
	 * unconditionally access the 'extra' entries.
@@ -834,26 +834,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
	struct perf_sample_data data;
	struct pt_regs regs;
	u64 sample_type;
	int fll, fst;
	int fll, fst, dsrc;
	int fl = event->hw.flags;

	if (!intel_pmu_save_and_restart(event))
		return;

	fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
	fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST |
				 PERF_X86_EVENT_PEBS_ST_HSW |
				 PERF_X86_EVENT_PEBS_LD_HSW |
				 PERF_X86_EVENT_PEBS_NA_HSW);
	sample_type = event->attr.sample_type;
	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;

	fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
	fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);

	perf_sample_data_init(&data, 0, event->hw.last_period);

	data.period = event->hw.last_period;
	sample_type = event->attr.sample_type;

	/*
	 * if PEBS-LL or PreciseStore
	 */
	if (fll || fst) {
	/*
	 * Use latency for weight (only avail with PEBS-LL)
	 */
@@ -863,18 +859,15 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
	/*
	 * data.data_src encodes the data source
	 */
		if (sample_type & PERF_SAMPLE_DATA_SRC) {
	if (dsrc) {
		u64 val = PERF_MEM_NA;
		if (fll)
				data.data_src.val = load_latency_data(pebs->dse);
			else if (event->hw.flags &
					(PERF_X86_EVENT_PEBS_ST_HSW|
					 PERF_X86_EVENT_PEBS_LD_HSW|
					 PERF_X86_EVENT_PEBS_NA_HSW))
				data.data_src.val =
					precise_store_data_hsw(event, pebs->dse);
			else
				data.data_src.val = precise_store_data(pebs->dse);
		}
			val = load_latency_data(pebs->dse);
		else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
			val = precise_datala_hsw(event, pebs->dse);
		else if (fst)
			val = precise_store_data(pebs->dse);
		data.data_src.val = val;
	}

	/*
@@ -901,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
	else
		regs.flags &= ~PERF_EFLAGS_EXACT;

	if ((event->attr.sample_type & PERF_SAMPLE_ADDR) &&
	if ((sample_type & PERF_SAMPLE_ADDR) &&
	    x86_pmu.intel_cap.pebs_format >= 1)
		data.addr = pebs->dla;

	if (x86_pmu.intel_cap.pebs_format >= 2) {
		/* Only set the TSX weight when no memory weight. */
		if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll)
		if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
			data.weight = intel_hsw_weight(pebs);

		if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION)
		if (sample_type & PERF_SAMPLE_TRANSACTION)
			data.txn = intel_hsw_transaction(pebs);
	}