Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a098f448 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar
Browse files

perf, x86: implement ARCH_PERFMON_EVENTSEL bit masks



ARCH_PERFMON_EVENTSEL bit masks are often used in the kernel. This
patch adds macros for the bit masks and removes local defines. The
function intel_pmu_raw_event() becomes x86_pmu_raw_event() which is
generic for x86 models and same also for p6. Duplicate code is
removed.

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100330092821.GH11907@erda.amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 948b1bb8
Loading
Loading
Loading
Loading
+25 −33
Original line number Original line Diff line number Diff line
@@ -18,39 +18,31 @@
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187


#define ARCH_PERFMON_EVENTSEL_ENABLE			  (1 << 22)
#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
#define ARCH_PERFMON_EVENTSEL_ANY			  (1 << 21)
#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
#define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
#define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)

#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
/*
#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
 * Includes eventsel and unit mask as well:
#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
 */
#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)

#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL


#define INTEL_ARCH_EVTSEL_MASK		0x000000FFULL
#define AMD64_EVENTSEL_EVENT	\
#define INTEL_ARCH_UNIT_MASK		0x0000FF00ULL
	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
#define INTEL_ARCH_EDGE_MASK		0x00040000ULL
#define INTEL_ARCH_EVENT_MASK	\
#define INTEL_ARCH_INV_MASK		0x00800000ULL
	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
#define INTEL_ARCH_CNT_MASK		0xFF000000ULL

#define INTEL_ARCH_EVENT_MASK	(INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
#define X86_RAW_EVENT_MASK		\

	(ARCH_PERFMON_EVENTSEL_EVENT |	\
/*
	 ARCH_PERFMON_EVENTSEL_UMASK |	\
 * filter mask to validate fixed counter events.
	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
 * the following filters disqualify for fixed counters:
	 ARCH_PERFMON_EVENTSEL_INV   |	\
 *  - inv
	 ARCH_PERFMON_EVENTSEL_CMASK)
 *  - edge
#define AMD64_RAW_EVENT_MASK		\
 *  - cnt-mask
	(X86_RAW_EVENT_MASK          |  \
 *  The other filters are supported by fixed counters.
	 AMD64_EVENTSEL_EVENT)
 *  The any-thread option is supported starting with v3.
 */
#define INTEL_ARCH_FIXED_MASK \
	(INTEL_ARCH_CNT_MASK| \
	 INTEL_ARCH_INV_MASK| \
	 INTEL_ARCH_EDGE_MASK|\
	 INTEL_ARCH_UNIT_MASK|\
	 INTEL_ARCH_EVENT_MASK)


#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
+16 −3
Original line number Original line Diff line number Diff line
@@ -143,13 +143,21 @@ struct cpu_hw_events {
 * Constraint on the Event code.
 * Constraint on the Event code.
 */
 */
#define INTEL_EVENT_CONSTRAINT(c, n)	\
#define INTEL_EVENT_CONSTRAINT(c, n)	\
	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)


/*
/*
 * Constraint on the Event code + UMask + fixed-mask
 * Constraint on the Event code + UMask + fixed-mask
 *
 * filter mask to validate fixed counter events.
 * the following filters disqualify for fixed counters:
 *  - inv
 *  - edge
 *  - cnt-mask
 *  The other filters are supported by fixed counters.
 *  The any-thread option is supported starting with v3.
 */
 */
#define FIXED_EVENT_CONSTRAINT(c, n)	\
#define FIXED_EVENT_CONSTRAINT(c, n)	\
	EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
	EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)


/*
/*
 * Constraint on the Event code + UMask
 * Constraint on the Event code + UMask
@@ -437,6 +445,11 @@ static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc
	return 0;
	return 0;
}
}


static u64 x86_pmu_raw_event(u64 hw_event)
{
	return hw_event & X86_RAW_EVENT_MASK;
}

/*
/*
 * Setup the hardware configuration for a given attr_type
 * Setup the hardware configuration for a given attr_type
 */
 */
@@ -1427,7 +1440,7 @@ void __init init_hw_perf_events(void)


	if (x86_pmu.event_constraints) {
	if (x86_pmu.event_constraints) {
		for_each_event_constraint(c, x86_pmu.event_constraints) {
		for_each_event_constraint(c, x86_pmu.event_constraints) {
			if (c->cmask != INTEL_ARCH_FIXED_MASK)
			if (c->cmask != X86_RAW_EVENT_MASK)
				continue;
				continue;


			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+1 −14
Original line number Original line Diff line number Diff line
@@ -113,20 +113,7 @@ static u64 amd_pmu_event_map(int hw_event)


static u64 amd_pmu_raw_event(u64 hw_event)
static u64 amd_pmu_raw_event(u64 hw_event)
{
{
#define K7_EVNTSEL_EVENT_MASK	0xF000000FFULL
	return hw_event & AMD64_RAW_EVENT_MASK;
#define K7_EVNTSEL_UNIT_MASK	0x00000FF00ULL
#define K7_EVNTSEL_EDGE_MASK	0x000040000ULL
#define K7_EVNTSEL_INV_MASK	0x000800000ULL
#define K7_EVNTSEL_REG_MASK	0x0FF000000ULL

#define K7_EVNTSEL_MASK			\
	(K7_EVNTSEL_EVENT_MASK |	\
	 K7_EVNTSEL_UNIT_MASK  |	\
	 K7_EVNTSEL_EDGE_MASK  |	\
	 K7_EVNTSEL_INV_MASK   |	\
	 K7_EVNTSEL_REG_MASK)

	return hw_event & K7_EVNTSEL_MASK;
}
}


/*
/*
+2 −20
Original line number Original line Diff line number Diff line
@@ -452,24 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
 },
 },
};
};


static u64 intel_pmu_raw_event(u64 hw_event)
{
#define CORE_EVNTSEL_EVENT_MASK		0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
#define CORE_EVNTSEL_EDGE_MASK		0x00040000ULL
#define CORE_EVNTSEL_INV_MASK		0x00800000ULL
#define CORE_EVNTSEL_REG_MASK		0xFF000000ULL

#define CORE_EVNTSEL_MASK		\
	(INTEL_ARCH_EVTSEL_MASK |	\
	 INTEL_ARCH_UNIT_MASK   |	\
	 INTEL_ARCH_EDGE_MASK   |	\
	 INTEL_ARCH_INV_MASK    |	\
	 INTEL_ARCH_CNT_MASK)

	return hw_event & CORE_EVNTSEL_MASK;
}

static void intel_pmu_disable_all(void)
static void intel_pmu_disable_all(void)
{
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -788,7 +770,7 @@ static __initconst struct x86_pmu core_pmu = {
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.event_map		= intel_pmu_event_map,
	.event_map		= intel_pmu_event_map,
	.raw_event		= intel_pmu_raw_event,
	.raw_event		= x86_pmu_raw_event,
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.apic			= 1,
	.apic			= 1,
	/*
	/*
@@ -827,7 +809,7 @@ static __initconst struct x86_pmu intel_pmu = {
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.event_map		= intel_pmu_event_map,
	.event_map		= intel_pmu_event_map,
	.raw_event		= intel_pmu_raw_event,
	.raw_event		= x86_pmu_raw_event,
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.apic			= 1,
	.apic			= 1,
	/*
	/*
+1 −19
Original line number Original line Diff line number Diff line
@@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
 */
 */
#define P6_NOP_EVENT			0x0000002EULL
#define P6_NOP_EVENT			0x0000002EULL


static u64 p6_pmu_raw_event(u64 hw_event)
{
#define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
#define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
#define P6_EVNTSEL_EDGE_MASK		0x00040000ULL
#define P6_EVNTSEL_INV_MASK		0x00800000ULL
#define P6_EVNTSEL_REG_MASK		0xFF000000ULL

#define P6_EVNTSEL_MASK			\
	(P6_EVNTSEL_EVENT_MASK |	\
	 P6_EVNTSEL_UNIT_MASK  |	\
	 P6_EVNTSEL_EDGE_MASK  |	\
	 P6_EVNTSEL_INV_MASK   |	\
	 P6_EVNTSEL_REG_MASK)

	return hw_event & P6_EVNTSEL_MASK;
}

static struct event_constraint p6_event_constraints[] =
static struct event_constraint p6_event_constraints[] =
{
{
	INTEL_EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
	INTEL_EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
@@ -114,7 +96,7 @@ static __initconst struct x86_pmu p6_pmu = {
	.eventsel		= MSR_P6_EVNTSEL0,
	.eventsel		= MSR_P6_EVNTSEL0,
	.perfctr		= MSR_P6_PERFCTR0,
	.perfctr		= MSR_P6_PERFCTR0,
	.event_map		= p6_pmu_event_map,
	.event_map		= p6_pmu_event_map,
	.raw_event		= p6_pmu_raw_event,
	.raw_event		= x86_pmu_raw_event,
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.apic			= 1,
	.apic			= 1,
	.max_period		= (1ULL << 31) - 1,
	.max_period		= (1ULL << 31) - 1,