Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4369891 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar
Browse files

perf_counter, x86: rework counter disable functions



As for the enable function, this patch reworks the disable functions
and introduces x86_pmu_disable_counter(). The internal function i/f in
struct x86_pmu changed too.

[ Impact: refactor and generalize code ]

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7c90cc45
Loading
Loading
Loading
Loading
+23 −25
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ struct x86_pmu {
	u64		(*save_disable_all)(void);
	void		(*restore_all)(u64);
	void		(*enable)(struct hw_perf_counter *, int);
	void		(*disable)(int, u64);
	void		(*disable)(struct hw_perf_counter *, int);
	unsigned	eventsel;
	unsigned	perfctr;
	u64		(*event_map)(int);
@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
			      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}

static void intel_pmu_disable_counter(int idx, u64 config)
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
	wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
}

static void amd_pmu_disable_counter(int idx, u64 config)
{
	wrmsrl(MSR_K7_EVNTSEL0 + idx, config);

}
	int err;

static void hw_perf_disable(int idx, u64 config)
{
	if (unlikely(!perf_counters_initialized))
		return;

	x86_pmu.disable(idx, config);
	err = checking_wrmsrl(hwc->config_base + idx,
			      hwc->config);
}

static inline void
__pmc_fixed_disable(struct perf_counter *counter,
		    struct hw_perf_counter *hwc, int __idx)
intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
{
	int idx = __idx - X86_PMC_IDX_FIXED;
	u64 ctrl_val, mask;
@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter,
}

static inline void
__x86_pmu_disable(struct perf_counter *counter,
		  struct hw_perf_counter *hwc, int idx)
intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
		__pmc_fixed_disable(counter, hwc, idx);
	else
		hw_perf_disable(idx, hwc->config);
	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
		intel_pmu_disable_fixed(hwc, idx);
		return;
	}

	x86_pmu_disable_counter(hwc, idx);
}

static inline void
amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
	x86_pmu_disable_counter(hwc, idx);
}

static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
	if (cpuc->enabled)
		x86_pmu_enable_counter(hwc, idx);
	else
		amd_pmu_disable_counter(idx, hwc->config);
		x86_pmu_disable_counter(hwc, idx);
}

static int
@@ -622,7 +620,7 @@ static int x86_pmu_enable(struct perf_counter *counter)

	perf_counters_lapic_init(hwc->nmi);

	__x86_pmu_disable(counter, hwc, idx);
	x86_pmu.disable(hwc, idx);

	cpuc->counters[idx] = counter;
	set_bit(idx, cpuc->active);
@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
	 * could reenable again:
	 */
	clear_bit(idx, cpuc->active);
	__x86_pmu_disable(counter, hwc, idx);
	x86_pmu.disable(hwc, idx);

	/*
	 * Make sure the cleared pointer becomes visible before we
@@ -762,7 +760,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)

		intel_pmu_save_and_restart(counter);
		if (perf_counter_overflow(counter, nmi, regs, 0))
			__x86_pmu_disable(counter, &counter->hw, bit);
			intel_pmu_disable_counter(&counter->hw, bit);
	}

	intel_pmu_ack_status(ack);