Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55de0f2e authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar
Browse files

perf_counter, x86: rename intel only functions



[ Impact: cleanup ]

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-13-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 26816c28
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
 * Save and restart an expired counter. Called by NMI contexts,
 * so it has to be careful about preempting normal counter ops:
 */
static void perf_save_and_restart(struct perf_counter *counter)
static void intel_pmu_save_and_restart(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;
@@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
	struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
	int ret = 0;

	cpuc->throttle_ctrl = hw_perf_save_disable();
	cpuc->throttle_ctrl = intel_pmu_save_disable_all();

	status = intel_pmu_get_status(cpuc->throttle_ctrl);
	if (!status)
@@ -770,7 +770,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
		if (!counter)
			continue;

		perf_save_and_restart(counter);
		intel_pmu_save_and_restart(counter);
		if (perf_counter_overflow(counter, nmi, regs, 0))
			__x86_pmu_disable(counter, &counter->hw, bit);
	}
@@ -788,7 +788,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
	 * Restore - do not reenable when global enable is off or throttled:
	 */
	if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
		hw_perf_restore(cpuc->throttle_ctrl);
		intel_pmu_restore_all(cpuc->throttle_ctrl);

	return ret;
}