Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dfc65094 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

perf_counter: Rename 'event' to event_id/hw_event



In preparation to the renames, to avoid a namespace clash.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 65abc865
Loading
Loading
Loading
Loading
+24 −24
Original line number Original line Diff line number Diff line
@@ -124,9 +124,9 @@ static const u64 p6_perfmon_event_map[] =
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
};
};


static u64 p6_pmu_event_map(int event)
static u64 p6_pmu_event_map(int hw_event)
{
{
	return p6_perfmon_event_map[event];
	return p6_perfmon_event_map[hw_event];
}
}


/*
/*
@@ -137,7 +137,7 @@ static u64 p6_pmu_event_map(int event)
 */
 */
#define P6_NOP_COUNTER			0x0000002EULL
#define P6_NOP_COUNTER			0x0000002EULL


static u64 p6_pmu_raw_event(u64 event)
static u64 p6_pmu_raw_event(u64 hw_event)
{
{
#define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
#define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
#define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
#define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
@@ -152,7 +152,7 @@ static u64 p6_pmu_raw_event(u64 event)
	 P6_EVNTSEL_INV_MASK   |	\
	 P6_EVNTSEL_INV_MASK   |	\
	 P6_EVNTSEL_COUNTER_MASK)
	 P6_EVNTSEL_COUNTER_MASK)


	return event & P6_EVNTSEL_MASK;
	return hw_event & P6_EVNTSEL_MASK;
}
}




@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
};
};


static u64 intel_pmu_event_map(int event)
static u64 intel_pmu_event_map(int hw_event)
{
{
	return intel_perfmon_event_map[event];
	return intel_perfmon_event_map[hw_event];
}
}


/*
/*
 * Generalized hw caching related event table, filled
 * Generalized hw caching related hw_event table, filled
 * in on a per model basis. A value of 0 means
 * in on a per model basis. A value of 0 means
 * 'not supported', -1 means 'event makes no sense on
 * 'not supported', -1 means 'hw_event makes no sense on
 * this CPU', any other value means the raw event
 * this CPU', any other value means the raw hw_event
 * ID.
 * ID.
 */
 */


@@ -463,7 +463,7 @@ static const u64 atom_hw_cache_event_ids
 },
 },
};
};


static u64 intel_pmu_raw_event(u64 event)
static u64 intel_pmu_raw_event(u64 hw_event)
{
{
#define CORE_EVNTSEL_EVENT_MASK		0x000000FFULL
#define CORE_EVNTSEL_EVENT_MASK		0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
#define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
@@ -478,7 +478,7 @@ static u64 intel_pmu_raw_event(u64 event)
	 CORE_EVNTSEL_INV_MASK  |	\
	 CORE_EVNTSEL_INV_MASK  |	\
	 CORE_EVNTSEL_COUNTER_MASK)
	 CORE_EVNTSEL_COUNTER_MASK)


	return event & CORE_EVNTSEL_MASK;
	return hw_event & CORE_EVNTSEL_MASK;
}
}


static const u64 amd_hw_cache_event_ids
static const u64 amd_hw_cache_event_ids
@@ -585,12 +585,12 @@ static const u64 amd_perfmon_event_map[] =
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
};
};


static u64 amd_pmu_event_map(int event)
static u64 amd_pmu_event_map(int hw_event)
{
{
	return amd_perfmon_event_map[event];
	return amd_perfmon_event_map[hw_event];
}
}


static u64 amd_pmu_raw_event(u64 event)
static u64 amd_pmu_raw_event(u64 hw_event)
{
{
#define K7_EVNTSEL_EVENT_MASK	0x7000000FFULL
#define K7_EVNTSEL_EVENT_MASK	0x7000000FFULL
#define K7_EVNTSEL_UNIT_MASK	0x00000FF00ULL
#define K7_EVNTSEL_UNIT_MASK	0x00000FF00ULL
@@ -605,7 +605,7 @@ static u64 amd_pmu_raw_event(u64 event)
	 K7_EVNTSEL_INV_MASK   |	\
	 K7_EVNTSEL_INV_MASK   |	\
	 K7_EVNTSEL_COUNTER_MASK)
	 K7_EVNTSEL_COUNTER_MASK)


	return event & K7_EVNTSEL_MASK;
	return hw_event & K7_EVNTSEL_MASK;
}
}


/*
/*
@@ -956,7 +956,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
	}
	}


	/*
	/*
	 * Raw event type provide the config in the event structure
	 * Raw hw_event type provide the config in the hw_event structure
	 */
	 */
	if (attr->type == PERF_TYPE_RAW) {
	if (attr->type == PERF_TYPE_RAW) {
		hwc->config |= x86_pmu.raw_event(attr->config);
		hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
		ret = 1;
		ret = 1;
	}
	}
	/*
	/*
	 * Quirk: certain CPUs dont like it if just 1 event is left:
	 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
	 */
	 */
	if (unlikely(left < 2))
	if (unlikely(left < 2))
		left = 2;
		left = 2;
@@ -1337,11 +1337,11 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
static int
static int
fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
{
{
	unsigned int event;
	unsigned int hw_event;


	event = hwc->config & ARCH_PERFMON_EVENT_MASK;
	hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;


	if (unlikely((event ==
	if (unlikely((hw_event ==
		      x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
		      x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
		     (hwc->sample_period == 1)))
		     (hwc->sample_period == 1)))
		return X86_PMC_IDX_FIXED_BTS;
		return X86_PMC_IDX_FIXED_BTS;
@@ -1349,11 +1349,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
	if (!x86_pmu.num_counters_fixed)
	if (!x86_pmu.num_counters_fixed)
		return -1;
		return -1;


	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
		return X86_PMC_IDX_FIXED_CPU_CYCLES;
		return X86_PMC_IDX_FIXED_CPU_CYCLES;
	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
		return X86_PMC_IDX_FIXED_BUS_CYCLES;
		return X86_PMC_IDX_FIXED_BUS_CYCLES;


	return -1;
	return -1;
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)


	/*
	/*
	 * Check whether the Architectural PerfMon supports
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired Event or not.
	 * Branch Misses Retired hw_event or not.
	 */
	 */
	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+13 −13
Original line number Original line Diff line number Diff line
@@ -3044,22 +3044,22 @@ perf_counter_read_event(struct perf_counter *counter,
			struct task_struct *task)
			struct task_struct *task)
{
{
	struct perf_output_handle handle;
	struct perf_output_handle handle;
	struct perf_read_event event = {
	struct perf_read_event read_event = {
		.header = {
		.header = {
			.type = PERF_EVENT_READ,
			.type = PERF_EVENT_READ,
			.misc = 0,
			.misc = 0,
			.size = sizeof(event) + perf_counter_read_size(counter),
			.size = sizeof(read_event) + perf_counter_read_size(counter),
		},
		},
		.pid = perf_counter_pid(counter, task),
		.pid = perf_counter_pid(counter, task),
		.tid = perf_counter_tid(counter, task),
		.tid = perf_counter_tid(counter, task),
	};
	};
	int ret;
	int ret;


	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
	ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0);
	if (ret)
	if (ret)
		return;
		return;


	perf_output_put(&handle, event);
	perf_output_put(&handle, read_event);
	perf_output_read(&handle, counter);
	perf_output_read(&handle, counter);


	perf_output_end(&handle);
	perf_output_end(&handle);
@@ -3698,14 +3698,14 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)


static int perf_swcounter_match(struct perf_counter *counter,
static int perf_swcounter_match(struct perf_counter *counter,
				enum perf_type_id type,
				enum perf_type_id type,
				u32 event, struct pt_regs *regs)
				u32 event_id, struct pt_regs *regs)
{
{
	if (!perf_swcounter_is_counting(counter))
	if (!perf_swcounter_is_counting(counter))
		return 0;
		return 0;


	if (counter->attr.type != type)
	if (counter->attr.type != type)
		return 0;
		return 0;
	if (counter->attr.config != event)
	if (counter->attr.config != event_id)
		return 0;
		return 0;


	if (regs) {
	if (regs) {
@@ -3721,7 +3721,7 @@ static int perf_swcounter_match(struct perf_counter *counter,


static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
				     enum perf_type_id type,
				     enum perf_type_id type,
				     u32 event, u64 nr, int nmi,
				     u32 event_id, u64 nr, int nmi,
				     struct perf_sample_data *data,
				     struct perf_sample_data *data,
				     struct pt_regs *regs)
				     struct pt_regs *regs)
{
{
@@ -3732,7 +3732,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,


	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_swcounter_match(counter, type, event, regs))
		if (perf_swcounter_match(counter, type, event_id, regs))
			perf_swcounter_add(counter, nr, nmi, data, regs);
			perf_swcounter_add(counter, nr, nmi, data, regs);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();
@@ -4036,17 +4036,17 @@ atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];


static void sw_perf_counter_destroy(struct perf_counter *counter)
static void sw_perf_counter_destroy(struct perf_counter *counter)
{
{
	u64 event = counter->attr.config;
	u64 event_id = counter->attr.config;


	WARN_ON(counter->parent);
	WARN_ON(counter->parent);


	atomic_dec(&perf_swcounter_enabled[event]);
	atomic_dec(&perf_swcounter_enabled[event_id]);
}
}


static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
{
	const struct pmu *pmu = NULL;
	const struct pmu *pmu = NULL;
	u64 event = counter->attr.config;
	u64 event_id = counter->attr.config;


	/*
	/*
	 * Software counters (currently) can't in general distinguish
	 * Software counters (currently) can't in general distinguish
@@ -4055,7 +4055,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
	 * to be kernel events, and page faults are never hypervisor
	 * to be kernel events, and page faults are never hypervisor
	 * events.
	 * events.
	 */
	 */
	switch (event) {
	switch (event_id) {
	case PERF_COUNT_SW_CPU_CLOCK:
	case PERF_COUNT_SW_CPU_CLOCK:
		pmu = &perf_ops_cpu_clock;
		pmu = &perf_ops_cpu_clock;


@@ -4077,7 +4077,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
	case PERF_COUNT_SW_CONTEXT_SWITCHES:
	case PERF_COUNT_SW_CONTEXT_SWITCHES:
	case PERF_COUNT_SW_CPU_MIGRATIONS:
	case PERF_COUNT_SW_CPU_MIGRATIONS:
		if (!counter->parent) {
		if (!counter->parent) {
			atomic_inc(&perf_swcounter_enabled[event]);
			atomic_inc(&perf_swcounter_enabled[event_id]);
			counter->destroy = sw_perf_counter_destroy;
			counter->destroy = sw_perf_counter_destroy;
		}
		}
		pmu = &perf_ops_generic;
		pmu = &perf_ops_generic;