Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 43ef205b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf/x86/intel: Remove intel_excl_states::init_state



For some obscure reason intel_{start,stop}_scheduling() copy the HT
state to an intermediate array. This would make sense if we ever were
to make changes to it which we'd have to discard.

Except we don't. By the time we call intel_commit_scheduling() we're;
as the name implies; committed to them. We'll never back out.

A further hint its pointless is that stop_scheduling() unconditionally
publishes the state.

So the intermediate array is pointless, modify the state in place and
kill the extra array.

And remove the pointless array initialization: INTEL_EXCL_UNUSED == 0.

Note; all is serialized by intel_excl_cntr::lock.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1fe684e3
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -884,7 +884,6 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
	}

	if (!assign || unsched) {

		for (i = 0; i < n; i++) {
			e = cpuc->event_list[i];
			/*
+0 −1
Original line number Diff line number Diff line
@@ -133,7 +133,6 @@ enum intel_excl_state_type {
};

struct intel_excl_states {
	enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
	bool sched_started; /* true if scheduling has started */
};
+2 −20
Original line number Diff line number Diff line
@@ -1927,11 +1927,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
	 * makes scheduling appear as a transaction
	 */
	raw_spin_lock(&excl_cntrs->lock);

	/*
	 * Save a copy of our state to work on.
	 */
	memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
}

static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
@@ -1955,9 +1950,9 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
	lockdep_assert_held(&excl_cntrs->lock);

	if (c->flags & PERF_X86_EVENT_EXCL)
		xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
		xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
	else
		xl->init_state[cntr] = INTEL_EXCL_SHARED;
		xl->state[cntr] = INTEL_EXCL_SHARED;
}

static void
@@ -1980,11 +1975,6 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)

	xl = &excl_cntrs->states[tid];

	/*
	 * Commit the working state.
	 */
	memcpy(xl->state, xl->init_state, sizeof(xl->state));

	xl->sched_started = false;
	/*
	 * release shared state lock (acquired in intel_start_scheduling())
@@ -2519,19 +2509,11 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
{
	struct intel_excl_cntrs *c;
	int i;

	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
			 GFP_KERNEL, cpu_to_node(cpu));
	if (c) {
		raw_spin_lock_init(&c->lock);
		for (i = 0; i < X86_PMC_IDX_MAX; i++) {
			c->states[0].state[i] = INTEL_EXCL_UNUSED;
			c->states[0].init_state[i] = INTEL_EXCL_UNUSED;

			c->states[1].state[i] = INTEL_EXCL_UNUSED;
			c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
		}
		c->core_id = -1;
	}
	return c;