Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b07f8915 authored by Andi Kleen's avatar Andi Kleen Committed by Andi Kleen
Browse files

[PATCH] x86: Temporarily revert parts of the Core 2 nmi nmi watchdog support



This makes merging easier.  They are readded a few patches later.

Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent 874c4fe3
Loading
Loading
Loading
Loading
+1 −64
Original line number Original line Diff line number Diff line
@@ -24,7 +24,6 @@


#include <asm/smp.h>
#include <asm/smp.h>
#include <asm/nmi.h>
#include <asm/nmi.h>
#include <asm/intel_arch_perfmon.h>


#include "mach_traps.h"
#include "mach_traps.h"


@@ -96,9 +95,6 @@ int nmi_active;
	(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT|	\
	(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT|	\
	 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
	 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)


#define ARCH_PERFMON_NMI_EVENT_SEL	ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_NMI_EVENT_UMASK	ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK

#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
 * the CPU is idle. To make sure the NMI watchdog really ticks on all
 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -211,8 +207,6 @@ static int __init setup_nmi_watchdog(char *str)


__setup("nmi_watchdog=", setup_nmi_watchdog);
__setup("nmi_watchdog=", setup_nmi_watchdog);


static void disable_intel_arch_watchdog(void);

static void disable_lapic_nmi_watchdog(void)
static void disable_lapic_nmi_watchdog(void)
{
{
	if (nmi_active <= 0)
	if (nmi_active <= 0)
@@ -222,10 +216,6 @@ static void disable_lapic_nmi_watchdog(void)
		wrmsr(MSR_K7_EVNTSEL0, 0, 0);
		wrmsr(MSR_K7_EVNTSEL0, 0, 0);
		break;
		break;
	case X86_VENDOR_INTEL:
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
			disable_intel_arch_watchdog();
			break;
		}
		switch (boot_cpu_data.x86) {
		switch (boot_cpu_data.x86) {
		case 6:
		case 6:
			if (boot_cpu_data.x86_model > 0xd)
			if (boot_cpu_data.x86_model > 0xd)
@@ -454,53 +444,6 @@ static int setup_p4_watchdog(void)
	return 1;
	return 1;
}
}


static void disable_intel_arch_watchdog(void)
{
	unsigned ebx;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Unhalted Core Cycles Event or not.
	 * NOTE: Corresponding bit = 0 in ebp indicates event present.
	 */
	ebx = cpuid_ebx(10);
	if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
		wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
}

static int setup_intel_arch_watchdog(void)
{
	unsigned int evntsel;
	unsigned ebx;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Unhalted Core Cycles Event or not.
	 * NOTE: Corresponding bit = 0 in ebp indicates event present.
	 */
	ebx = cpuid_ebx(10);
	if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
		return 0;

	nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;

	clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
	clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);

	evntsel = ARCH_PERFMON_EVENTSEL_INT
		| ARCH_PERFMON_EVENTSEL_OS
		| ARCH_PERFMON_EVENTSEL_USR
		| ARCH_PERFMON_NMI_EVENT_SEL
		| ARCH_PERFMON_NMI_EVENT_UMASK;

	wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
	write_watchdog_counter("INTEL_ARCH_PERFCTR0");
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
	wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
	return 1;
}

void setup_apic_nmi_watchdog (void)
void setup_apic_nmi_watchdog (void)
{
{
	switch (boot_cpu_data.x86_vendor) {
	switch (boot_cpu_data.x86_vendor) {
@@ -510,11 +453,6 @@ void setup_apic_nmi_watchdog (void)
		setup_k7_watchdog();
		setup_k7_watchdog();
		break;
		break;
	case X86_VENDOR_INTEL:
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
			if (!setup_intel_arch_watchdog())
				return;
			break;
		}
		switch (boot_cpu_data.x86) {
		switch (boot_cpu_data.x86) {
		case 6:
		case 6:
			if (boot_cpu_data.x86_model > 0xd)
			if (boot_cpu_data.x86_model > 0xd)
@@ -619,8 +557,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
			apic_write(APIC_LVTPC, APIC_DM_NMI);
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
		}
		else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
		else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) {
		         nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
			/* Only P6 based Pentium M need to re-unmask
			/* Only P6 based Pentium M need to re-unmask
			 * the apic vector but it doesn't hurt
			 * the apic vector but it doesn't hurt
			 * other P6 variant */
			 * other P6 variant */
+5 −76
Original line number Original line Diff line number Diff line
@@ -26,7 +26,6 @@
#include <asm/proto.h>
#include <asm/proto.h>
#include <asm/kdebug.h>
#include <asm/kdebug.h>
#include <asm/mce.h>
#include <asm/mce.h>
#include <asm/intel_arch_perfmon.h>


/*
/*
 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -66,9 +65,6 @@ static unsigned int nmi_p4_cccr_val;
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING	0x76
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING	0x76
#define K7_NMI_EVENT		K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
#define K7_NMI_EVENT		K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING


#define ARCH_PERFMON_NMI_EVENT_SEL	ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_NMI_EVENT_UMASK	ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK

#define MSR_P4_MISC_ENABLE	0x1A0
#define MSR_P4_MISC_ENABLE	0x1A0
#define MSR_P4_MISC_ENABLE_PERF_AVAIL	(1<<7)
#define MSR_P4_MISC_ENABLE_PERF_AVAIL	(1<<7)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL	(1<<12)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL	(1<<12)
@@ -100,10 +96,7 @@ static __cpuinit inline int nmi_known_cpu(void)
	case X86_VENDOR_AMD:
	case X86_VENDOR_AMD:
		return boot_cpu_data.x86 == 15;
		return boot_cpu_data.x86 == 15;
	case X86_VENDOR_INTEL:
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return boot_cpu_data.x86 == 15;
			return 1;
		else
			return (boot_cpu_data.x86 == 15);
	}
	}
	return 0;
	return 0;
}
}
@@ -209,8 +202,6 @@ int __init setup_nmi_watchdog(char *str)


__setup("nmi_watchdog=", setup_nmi_watchdog);
__setup("nmi_watchdog=", setup_nmi_watchdog);


static void disable_intel_arch_watchdog(void);

static void disable_lapic_nmi_watchdog(void)
static void disable_lapic_nmi_watchdog(void)
{
{
	if (nmi_active <= 0)
	if (nmi_active <= 0)
@@ -223,8 +214,6 @@ static void disable_lapic_nmi_watchdog(void)
		if (boot_cpu_data.x86 == 15) {
		if (boot_cpu_data.x86 == 15) {
			wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
			wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
			wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
			wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
		} else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
			disable_intel_arch_watchdog();
		}
		}
		break;
		break;
	}
	}
@@ -377,53 +366,6 @@ static void setup_k7_watchdog(void)
	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
}


static void disable_intel_arch_watchdog(void)
{
	unsigned ebx;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Unhalted Core Cycles Event or not.
	 * NOTE: Corresponding bit = 0 in ebp indicates event present.
	 */
	ebx = cpuid_ebx(10);
	if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
		wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
}

static int setup_intel_arch_watchdog(void)
{
	unsigned int evntsel;
	unsigned ebx;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Unhalted Core Cycles Event or not.
	 * NOTE: Corresponding bit = 0 in ebp indicates event present.
	 */
	ebx = cpuid_ebx(10);
	if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
		return 0;

	nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;

	clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
	clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);

	evntsel = ARCH_PERFMON_EVENTSEL_INT
		| ARCH_PERFMON_EVENTSEL_OS
		| ARCH_PERFMON_EVENTSEL_USR
		| ARCH_PERFMON_NMI_EVENT_SEL
		| ARCH_PERFMON_NMI_EVENT_UMASK;

	wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
	wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
	wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
	return 1;
}



static int setup_p4_watchdog(void)
static int setup_p4_watchdog(void)
{
{
@@ -477,16 +419,10 @@ void setup_apic_nmi_watchdog(void)
		setup_k7_watchdog();
		setup_k7_watchdog();
		break;
		break;
	case X86_VENDOR_INTEL:
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
		if (boot_cpu_data.x86 != 15)
			if (!setup_intel_arch_watchdog())
			return;
			return;
		} else if (boot_cpu_data.x86 == 15) {
		if (!setup_p4_watchdog())
		if (!setup_p4_watchdog())
			return;
			return;
		} else {
			return;
		}

		break;
		break;


	default:
	default:
@@ -571,13 +507,6 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
 			 */
 			 */
 			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
 			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
 			apic_write(APIC_LVTPC, APIC_DM_NMI);
 			apic_write(APIC_LVTPC, APIC_DM_NMI);
 		} else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
			/*
			 * For Intel based architectural perfmon
			 * - LVTPC is masked on interrupt and must be
			 *   unmasked by the LVTPC handler.
			 */
			apic_write(APIC_LVTPC, APIC_DM_NMI);
 		}
 		}
		wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
		wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
	}
	}
+0 −19
Original line number Original line Diff line number Diff line
#ifndef X86_INTEL_ARCH_PERFMON_H
#define X86_INTEL_ARCH_PERFMON_H 1

#define MSR_ARCH_PERFMON_PERFCTR0		0xc1
#define MSR_ARCH_PERFMON_PERFCTR1		0xc2

#define MSR_ARCH_PERFMON_EVENTSEL0		0x186
#define MSR_ARCH_PERFMON_EVENTSEL1		0x187

#define ARCH_PERFMON_EVENTSEL0_ENABLE      (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT          (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS           (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR          (1 << 16)

#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL	(0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK	(0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)

#endif	/* X86_INTEL_ARCH_PERFMON_H */
+0 −19
Original line number Original line Diff line number Diff line
#ifndef X86_64_INTEL_ARCH_PERFMON_H
#define X86_64_INTEL_ARCH_PERFMON_H 1

#define MSR_ARCH_PERFMON_PERFCTR0		0xc1
#define MSR_ARCH_PERFMON_PERFCTR1		0xc2

#define MSR_ARCH_PERFMON_EVENTSEL0		0x186
#define MSR_ARCH_PERFMON_EVENTSEL1		0x187

#define ARCH_PERFMON_EVENTSEL0_ENABLE      (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT          (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS           (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR          (1 << 16)

#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL	(0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK	(0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)

#endif	/* X86_64_INTEL_ARCH_PERFMON_H */