Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4295ee62 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar
Browse files

perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()



MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.

[ Impact: micro-optimization on AMD CPUs ]

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4138960a
Loading
Loading
Loading
Loading
+14 −10
Original line number Diff line number Diff line
@@ -334,12 +334,14 @@ static u64 pmc_amd_save_disable_all(void)
	for (idx = 0; idx < nr_counters_generic; idx++) {
		u64 val;

		if (!test_bit(idx, cpuc->active_mask))
			continue;
		rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
		if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
			continue;
		val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
	}
	}

	return enabled;
}
@@ -372,15 +374,17 @@ static void pmc_amd_restore_all(u64 ctrl)
		return;

	for (idx = 0; idx < nr_counters_generic; idx++) {
		if (test_bit(idx, cpuc->active_mask)) {
		u64 val;

		if (!test_bit(idx, cpuc->active_mask))
			continue;
		rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
			continue;
		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
	}
}
}

void hw_perf_restore(u64 ctrl)
{