Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 39dcfa55 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, AMD: Fix ARAT feature setting again
  Revert "x86, AMD: Fix APIC timer erratum 400 affecting K8 Rev.A-E processors"
  x86, apic: Fix spurious error interrupts triggering on all non-boot APs
  x86, mce, AMD: Fix leaving freed data in a list
  x86: Fix UV BAU for non-consecutive nasids
  x86, UV: Fix NMI handler for UV platforms
parents 7f12b72b 14fb57dc
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -78,6 +78,7 @@
#define		APIC_DEST_LOGICAL	0x00800
#define		APIC_DEST_PHYSICAL	0x00000
#define		APIC_DM_FIXED		0x00000
#define		APIC_DM_FIXED_MASK	0x00700
#define		APIC_DM_LOWEST		0x00100
#define		APIC_DM_SMI		0x00200
#define		APIC_DM_REMRD		0x00300
+13 −4
Original line number Diff line number Diff line
@@ -94,6 +94,8 @@
/* after this # consecutive successes, bump up the throttle if it was lowered */
#define COMPLETE_THRESHOLD 5

#define UV_LB_SUBNODEID 0x10

/*
 * number of entries in the destination side payload queue
 */
@@ -124,7 +126,7 @@
 * The distribution specification (32 bytes) is interpreted as a 256-bit
 * distribution vector. Adjacent bits correspond to consecutive even numbered
 * nodeIDs. The result of adding the index of a given bit to the 15-bit
 * 'base_dest_nodeid' field of the header corresponds to the
 * 'base_dest_nasid' field of the header corresponds to the
 * destination nodeID associated with that specified bit.
 */
struct bau_target_uvhubmask {
@@ -176,7 +178,7 @@ struct bau_msg_payload {
struct bau_msg_header {
	unsigned int dest_subnodeid:6;	/* must be 0x10, for the LB */
	/* bits 5:0 */
	unsigned int base_dest_nodeid:15; /* nasid of the */
	unsigned int base_dest_nasid:15; /* nasid of the */
	/* bits 20:6 */			  /* first bit in uvhub map */
	unsigned int command:8;	/* message type */
	/* bits 28:21 */
@@ -378,6 +380,10 @@ struct ptc_stats {
	unsigned long d_rcanceled; /* number of messages canceled by resets */
};

struct hub_and_pnode {
	short uvhub;
	short pnode;
};
/*
 * one per-cpu; to locate the software tables
 */
@@ -399,10 +405,12 @@ struct bau_control {
	int baudisabled;
	int set_bau_off;
	short cpu;
	short osnode;
	short uvhub_cpu;
	short uvhub;
	short cpus_in_socket;
	short cpus_in_uvhub;
	short partition_base_pnode;
	unsigned short message_number;
	unsigned short uvhub_quiesce;
	short socket_acknowledge_count[DEST_Q_SIZE];
@@ -422,15 +430,16 @@ struct bau_control {
	int congested_period;
	cycles_t period_time;
	long period_requests;
	struct hub_and_pnode *target_hub_and_pnode;
};

static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
{
	return constant_test_bit(uvhub, &dstp->bits[0]);
}
static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
{
	__set_bit(uvhub, &dstp->bits[0]);
	__set_bit(pnode, &dstp->bits[0]);
}
static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
				    int nbits)
+2 −0
Original line number Diff line number Diff line
@@ -398,6 +398,8 @@ struct uv_blade_info {
	unsigned short	nr_online_cpus;
	unsigned short	pnode;
	short		memory_nid;
	spinlock_t	nmi_lock;
	unsigned long	nmi_count;
};
extern struct uv_blade_info *uv_blade_info;
extern short *uv_node_to_blade;
+15 −1
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@
 *
 * SGI UV MMR definitions
 *
 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
 */

#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
    } s;
};

/* ========================================================================= */
/*                               UVH_SCRATCH5                                */
/* ========================================================================= */
#define UVH_SCRATCH5 0x2d0200UL
#define UVH_SCRATCH5_32 0x00778

#define UVH_SCRATCH5_SCRATCH5_SHFT 0
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
union uvh_scratch5_u {
    unsigned long	v;
    struct uvh_scratch5_s {
	unsigned long	scratch5 : 64;  /* RW, W1CS */
    } s;
};

#endif /* __ASM_UV_MMRS_X86_H__ */
+43 −5
Original line number Diff line number Diff line
@@ -37,6 +37,13 @@
#include <asm/smp.h>
#include <asm/x86_init.h>
#include <asm/emergency-restart.h>
#include <asm/nmi.h>

/* BMC sets a bit this MMR non-zero before sending an NMI */
#define UVH_NMI_MMR				UVH_SCRATCH5
#define UVH_NMI_MMR_CLEAR			(UVH_NMI_MMR + 8)
#define UV_NMI_PENDING_MASK			(1UL << 63)
DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);

DEFINE_PER_CPU(int, x2apic_extra_bits);

@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
 */
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
{
	unsigned long real_uv_nmi;
	int bid;

	if (reason != DIE_NMIUNKNOWN)
		return NOTIFY_OK;

	if (in_crash_kexec)
		/* do nothing if entering the crash kernel */
		return NOTIFY_OK;

	/*
	 * Each blade has an MMR that indicates when an NMI has been sent
	 * to cpus on the blade. If an NMI is detected, atomically
	 * clear the MMR and update a per-blade NMI count used to
	 * cause each cpu on the blade to notice a new NMI.
	 */
	bid = uv_numa_blade_id();
	real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);

	if (unlikely(real_uv_nmi)) {
		spin_lock(&uv_blade_info[bid].nmi_lock);
		real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
		if (real_uv_nmi) {
			uv_blade_info[bid].nmi_count++;
			uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
		}
		spin_unlock(&uv_blade_info[bid].nmi_lock);
	}

	if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
		return NOTIFY_DONE;

	__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;

	/*
	 * Use a lock so only one cpu prints at a time
	 * to prevent intermixed output.
	 * Use a lock so only one cpu prints at a time.
	 * This prevents intermixed output.
	 */
	spin_lock(&uv_nmi_lock);
	pr_info("NMI stack dump cpu %u:\n", smp_processor_id());
	pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
	dump_stack();
	spin_unlock(&uv_nmi_lock);

@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
}

static struct notifier_block uv_dump_stack_nmi_nb = {
	.notifier_call	= uv_handle_nmi
	.notifier_call	= uv_handle_nmi,
	.priority = NMI_LOCAL_LOW_PRIOR - 1,
};

void uv_register_nmi_notifier(void)
@@ -720,8 +756,9 @@ void __init uv_system_init(void)
	printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());

	bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
	uv_blade_info = kmalloc(bytes, GFP_KERNEL);
	uv_blade_info = kzalloc(bytes, GFP_KERNEL);
	BUG_ON(!uv_blade_info);

	for (blade = 0; blade < uv_num_possible_blades(); blade++)
		uv_blade_info[blade].memory_nid = -1;

@@ -747,6 +784,7 @@ void __init uv_system_init(void)
			uv_blade_info[blade].pnode = pnode;
			uv_blade_info[blade].nr_possible_cpus = 0;
			uv_blade_info[blade].nr_online_cpus = 0;
			spin_lock_init(&uv_blade_info[blade].nmi_lock);
			max_pnode = max(pnode, max_pnode);
			blade++;
		}
Loading