Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe9081cc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf, x86: Add simple group validation



Refuse to add events when the group wouldn't fit onto the PMU
anymore.

Naive implementation.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@gmail.com>
LKML-Reference: <1254911461.26976.239.camel@twins>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b690081d
Loading
Loading
Loading
Loading
+69 −21
Original line number Diff line number Diff line
@@ -114,7 +114,8 @@ struct x86_pmu {
	u64		intel_ctrl;
	void		(*enable_bts)(u64 config);
	void		(*disable_bts)(void);
	int		(*get_event_idx)(struct hw_perf_event *hwc);
	int		(*get_event_idx)(struct cpu_hw_events *cpuc,
					 struct hw_perf_event *hwc);
};

static struct x86_pmu x86_pmu __read_mostly;
@@ -1390,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
		x86_pmu_enable_event(hwc, idx);
}

static int
fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
static int fixed_mode_idx(struct hw_perf_event *hwc)
{
	unsigned int hw_event;

@@ -1424,9 +1424,9 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
/*
 * generic counter allocator: get next free counter
 */
static int gen_get_event_idx(struct hw_perf_event *hwc)
static int
gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	int idx;

	idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
@@ -1436,16 +1436,16 @@ static int gen_get_event_idx(struct hw_perf_event *hwc)
/*
 * intel-specific counter allocator: check event constraints
 */
static int intel_get_event_idx(struct hw_perf_event *hwc)
static int
intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	const struct event_constraint *event_constraint;
	int i, code;

	if (!event_constraint)
		goto skip;

	code = hwc->config & 0xff;
	code = hwc->config & CORE_EVNTSEL_EVENT_MASK;

	for_each_event_constraint(event_constraint, event_constraint) {
		if (code == event_constraint->code) {
@@ -1457,19 +1457,15 @@ static int intel_get_event_idx(struct hw_perf_event *hwc)
		}
	}
skip:
	return gen_get_event_idx(hwc);
	return gen_get_event_idx(cpuc, hwc);
}

/*
 * Find a PMC slot for the freshly enabled / scheduled in event:
 */
static int x86_pmu_enable(struct perf_event *event)
static int
x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	int idx;

	idx = fixed_mode_idx(event, hwc);
	idx = fixed_mode_idx(hwc);
	if (idx == X86_PMC_IDX_FIXED_BTS) {
		/* BTS is already occupied. */
		if (test_and_set_bit(idx, cpuc->used_mask))
@@ -1499,7 +1495,7 @@ static int x86_pmu_enable(struct perf_event *event)
		/* Try to get the previous generic event again */
		if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
			idx = x86_pmu.get_event_idx(hwc);
			idx = x86_pmu.get_event_idx(cpuc, hwc);
			if (idx == -1)
				return -EAGAIN;

@@ -1510,6 +1506,22 @@ try_generic:
		hwc->event_base  = x86_pmu.perfctr;
	}

	return idx;
}

/*
 * Find a PMC slot for the freshly enabled / scheduled in event:
 */
static int x86_pmu_enable(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	int idx;

	idx = x86_schedule_event(cpuc, hwc);
	if (idx < 0)
		return idx;

	perf_events_lapic_init();

	x86_pmu.disable(hwc, idx);
@@ -2212,11 +2224,47 @@ static const struct pmu pmu = {
	.unthrottle	= x86_pmu_unthrottle,
};

static int
validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
{
	struct hw_perf_event fake_event = event->hw;

	if (event->pmu != &pmu)
		return 0;

	return x86_schedule_event(cpuc, &fake_event);
}

static int validate_group(struct perf_event *event)
{
	struct perf_event *sibling, *leader = event->group_leader;
	struct cpu_hw_events fake_pmu;

	memset(&fake_pmu, 0, sizeof(fake_pmu));

	if (!validate_event(&fake_pmu, leader))
		return -ENOSPC;

	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
		if (!validate_event(&fake_pmu, sibling))
			return -ENOSPC;
	}

	if (!validate_event(&fake_pmu, event))
		return -ENOSPC;

	return 0;
}

const struct pmu *hw_perf_event_init(struct perf_event *event)
{
	int err;

	err = __hw_perf_event_init(event);
	if (!err) {
		if (event->group_leader != event)
			err = validate_group(event);
	}
	if (err) {
		if (event->destroy)
			event->destroy(event);