Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1cb2a8e1 authored by Ingo Molnar's avatar Ingo Molnar Committed by H. Peter Anvin
Browse files

x86, mce: clean up mce_amd_64.c



Make the coding style match that of the rest of the x86 arch code.

[ Impact: cleanup ]

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent cb6f3c15
Loading
Loading
Loading
Loading
+103 −85
Original line number Diff line number Diff line
@@ -13,22 +13,22 @@
 *
 *  All MC4_MISCi registers are shared between multi-cores
 */

#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kobject.h>
#include <linux/sysdev.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/smp.h>

#include <asm/percpu.h>
#include <asm/apic.h>
#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/percpu.h>
#include <asm/idle.h>

#define PFX               "mce_threshold: "
#define VERSION           "version 1.1.1"
@@ -110,6 +110,7 @@ static void threshold_restart_bank(void *_tr)
	} else if (tr->old_limit) {	/* change limit w/o reset */
		int new_count = (mci_misc_hi & THRESHOLD_MAX) +
		    (tr->old_limit - tr->b->threshold_limit);

		mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
		    (new_count & THRESHOLD_MAX);
	}
@@ -125,11 +126,11 @@ static void threshold_restart_bank(void *_tr)
/* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
	unsigned int bank, block;
	unsigned int cpu = smp_processor_id();
	u8 lvt_off;
	u32 low = 0, high = 0, address = 0;
	unsigned int bank, block;
	struct thresh_restart tr;
	u8 lvt_off;

	for (bank = 0; bank < NR_BANKS; ++bank) {
		for (block = 0; block < NR_BLOCKS; ++block) {
@@ -140,8 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
				if (!address)
					break;
				address += MCG_XBLK_ADDR;
			}
			else
			} else
				++address;

			if (rdmsr_safe(address, &low, &high))
@@ -193,9 +193,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
 */
static void amd_threshold_interrupt(void)
{
	u32 low = 0, high = 0, address = 0;
	unsigned int bank, block;
	struct mce m;
	u32 low = 0, high = 0, address = 0;

	mce_setup(&m);

@@ -204,16 +204,16 @@ static void amd_threshold_interrupt(void)
		if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
			continue;
		for (block = 0; block < NR_BLOCKS; ++block) {
			if (block == 0)
			if (block == 0) {
				address = MSR_IA32_MC0_MISC + bank * 4;
			else if (block == 1) {
			} else if (block == 1) {
				address = (low & MASK_BLKPTR_LO) >> 21;
				if (!address)
					break;
				address += MCG_XBLK_ADDR;
			}
			else
			} else {
				++address;
			}

			if (rdmsr_safe(address, &low, &high))
				break;
@@ -229,8 +229,10 @@ static void amd_threshold_interrupt(void)
			     (high & MASK_LOCKED_HI))
				continue;

			/* Log the machine check that caused the threshold
			   event. */
			/*
			 * Log the machine check that caused the threshold
			 * event.
			 */
			machine_check_poll(MCP_TIMESTAMP,
					&__get_cpu_var(mce_poll_banks));

@@ -266,36 +268,44 @@ static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)

static ssize_t store_interrupt_enable(struct threshold_block *b,
				      const char *buf, size_t count)
static ssize_t
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t count)
{
	char *end;
	struct thresh_restart tr;
	unsigned long new = simple_strtoul(buf, &end, 0);
	unsigned long new;
	char *end;

	new = simple_strtoul(buf, &end, 0);
	if (end == buf)
		return -EINVAL;

	b->interrupt_enable = !!new;

	tr.b		= b;
	tr.reset	= 0;
	tr.old_limit	= 0;

	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);

	return end - buf;
}

static ssize_t store_threshold_limit(struct threshold_block *b,
				     const char *buf, size_t count)
static ssize_t
store_threshold_limit(struct threshold_block *b, const char *buf, size_t count)
{
	char *end;
	struct thresh_restart tr;
	unsigned long new = simple_strtoul(buf, &end, 0);
	unsigned long new;
	char *end;

	new = simple_strtoul(buf, &end, 0);
	if (end == buf)
		return -EINVAL;

	if (new > THRESHOLD_MAX)
		new = THRESHOLD_MAX;
	if (new < 1)
		new = 1;

	tr.old_limit = b->threshold_limit;
	b->threshold_limit = new;
	tr.b = b;
@@ -338,7 +348,8 @@ static ssize_t store_error_count(struct threshold_block *b,
	return 1;
}

#define THRESHOLD_ATTR(_name,_mode,_show,_store) {            \
#define THRESHOLD_ATTR(_name, _mode, _show, _store)			\
{									\
	.attr	= {.name = __stringify(_name), .mode = _mode },		\
	.show	= _show,						\
	.store	= _store,						\
@@ -367,7 +378,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
	struct threshold_block *b = to_block(kobj);
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;

	ret = a->show ? a->show(b, buf) : -EIO;

	return ret;
}

@@ -377,7 +390,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
	struct threshold_block *b = to_block(kobj);
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;

	ret = a->store ? a->store(b, buf, count) : -EIO;

	return ret;
}

@@ -396,9 +411,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
					       unsigned int block,
					       u32 address)
{
	int err;
	u32 low, high;
	struct threshold_block *b = NULL;
	u32 low, high;
	int err;

	if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
		return 0;
@@ -430,11 +445,12 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,

	INIT_LIST_HEAD(&b->miscj);

	if (per_cpu(threshold_banks, cpu)[bank]->blocks)
	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
		list_add(&b->miscj,
			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
	else
	} else {
		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
	}

	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
				   per_cpu(threshold_banks, cpu)[bank]->kobj,
@@ -447,8 +463,9 @@ recurse:
		if (!address)
			return 0;
		address += MCG_XBLK_ADDR;
	} else
	} else {
		++address;
	}

	err = allocate_threshold_blocks(cpu, bank, ++block, address);
	if (err)
@@ -507,6 +524,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)

		cpumask_copy(b->cpus, cpu_core_mask(cpu));
		per_cpu(threshold_banks, cpu)[bank] = b;

		goto out;
	}
#endif
@@ -605,15 +623,13 @@ static void deallocate_threshold_block(unsigned int cpu,

static void threshold_remove_bank(unsigned int cpu, int bank)
{
	int i = 0;
	struct threshold_bank *b;
	char name[32];
	int i = 0;

	b = per_cpu(threshold_banks, cpu)[bank];

	if (!b)
		return;

	if (!b->blocks)
		goto free_out;

@@ -624,6 +640,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
	if (shared_bank[bank] && b->blocks->cpu != cpu) {
		sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
		per_cpu(threshold_banks, cpu)[bank] = NULL;

		return;
	}
#endif
@@ -659,8 +676,8 @@ static void threshold_remove_device(unsigned int cpu)
}

/* get notified when a cpu comes on/off */
static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action,
						     unsigned int cpu)
static void __cpuinit
amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
{
	if (cpu >= NR_CPUS)
		return;
@@ -686,11 +703,12 @@ static __init int threshold_init_device(void)
	/* to hit CPUs online before the notifier is up */
	for_each_online_cpu(lcpu) {
		int err = threshold_create_device(lcpu);

		if (err)
			return err;
	}
	threshold_cpu_callback = amd_64_threshold_cpu_callback;

	return 0;
}

device_initcall(threshold_init_device);