Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a54123e2 authored by Bob Breuer's avatar Bob Breuer Committed by David S. Miller
Browse files

[SPARC]: Try to start getting SMP back into shape.



Todo items:
 - IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc?
 - sun4d
 - re-indent large chunks of sun4m_smp.c
 - some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2)

Last I checked (with 2.6.14), random programs segfault with dual
HyperSPARC.  And with SuperSPARC II's, it seems stable but will
eventually die from a write lock error (wrong lock owner or something).

I haven't tried the HyperSPARC + highmem combination recently, so that
may still be a problem.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 674a396c
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@ menu "General machine setup"

config SMP
	bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
	depends on BROKEN
	---help---
	  This enables support for systems with more than one CPU. If you have
	  a system with only one CPU, say N. If you have a system with more
+34 −32
Original line number Diff line number Diff line
@@ -154,9 +154,11 @@ void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
struct irqaction static_irqaction[MAX_STATIC_ALLOC];
int static_irq_count;

struct irqaction *irq_action[NR_IRQS] = {
	[0 ... (NR_IRQS-1)] = NULL
};
struct {
	struct irqaction *action;
	int flags;
} sparc_irq[NR_IRQS];
#define SPARC_IRQ_INPROGRESS 1

/* Used to protect the IRQ action lists */
DEFINE_SPINLOCK(irq_action_lock);
@@ -177,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
	}
	spin_lock_irqsave(&irq_action_lock, flags);
	if (i < NR_IRQS) {
	        action = *(i + irq_action);
		action = sparc_irq[i].action;
		if (!action) 
			goto out_unlock;
		seq_printf(p, "%3d: ", i);
@@ -186,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
#else
		for_each_online_cpu(j) {
			seq_printf(p, "%10u ",
				    kstat_cpu(cpu_logical_map(j)).irqs[i]);
				    kstat_cpu(j).irqs[i]);
		}
#endif
		seq_printf(p, " %c %s",
@@ -207,7 +209,7 @@ out_unlock:
void free_irq(unsigned int irq, void *dev_id)
{
	struct irqaction * action;
	struct irqaction * tmp = NULL;
	struct irqaction **actionp;
        unsigned long flags;
	unsigned int cpu_irq;
	
@@ -225,7 +227,8 @@ void free_irq(unsigned int irq, void *dev_id)

	spin_lock_irqsave(&irq_action_lock, flags);

	action = *(cpu_irq + irq_action);
	actionp = &sparc_irq[cpu_irq].action;
	action = *actionp;

	if (!action->handler) {
		printk("Trying to free free IRQ%d\n",irq);
@@ -235,7 +238,7 @@ void free_irq(unsigned int irq, void *dev_id)
		for (; action; action = action->next) {
			if (action->dev_id == dev_id)
				break;
			tmp = action;
			actionp = &action->next;
		}
		if (!action) {
			printk("Trying to free free shared IRQ%d\n",irq);
@@ -255,10 +258,7 @@ void free_irq(unsigned int irq, void *dev_id)
		goto out_unlock;
	}

	if (action && tmp)
		tmp->next = action->next;
	else
		*(cpu_irq + irq_action) = action->next;
	*actionp = action->next;

	spin_unlock_irqrestore(&irq_action_lock, flags);

@@ -268,7 +268,7 @@ void free_irq(unsigned int irq, void *dev_id)

	kfree(action);

	if (!(*(cpu_irq + irq_action)))
	if (!sparc_irq[cpu_irq].action)
		disable_irq(irq);

out_unlock:
@@ -287,8 +287,11 @@ EXPORT_SYMBOL(free_irq);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
	printk("synchronize_irq says: implement me!\n");
	BUG();
	unsigned int cpu_irq;

	cpu_irq = irq & (NR_IRQS - 1);
	while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
		cpu_relax();
}
#endif /* SMP */

@@ -299,7 +302,7 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
	unsigned int cpu_irq;
	
	cpu_irq = irq & (NR_IRQS - 1);
	action = *(cpu_irq + irq_action);
	action = sparc_irq[cpu_irq].action;

        printk("IO device interrupt, irq = %d\n", irq);
        printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 
@@ -330,7 +333,8 @@ void handler_irq(int irq, struct pt_regs * regs)
	if(irq < 10)
		smp4m_irq_rotate(cpu);
#endif
	action = *(irq + irq_action);
	action = sparc_irq[irq].action;
	sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
	kstat_cpu(cpu).irqs[irq]++;
	do {
		if (!action || !action->handler)
@@ -338,6 +342,7 @@ void handler_irq(int irq, struct pt_regs * regs)
		action->handler(irq, action->dev_id, regs);
		action = action->next;
	} while (action);
	sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
	enable_pil_irq(irq);
	irq_exit();
}
@@ -389,7 +394,7 @@ int request_fast_irq(unsigned int irq,

	spin_lock_irqsave(&irq_action_lock, flags);

	action = *(cpu_irq + irq_action);
	action = sparc_irq[cpu_irq].action;
	if(action) {
		if(action->flags & SA_SHIRQ)
			panic("Trying to register fast irq when already shared.\n");
@@ -452,7 +457,7 @@ int request_fast_irq(unsigned int irq,
	action->dev_id = NULL;
	action->next = NULL;

	*(cpu_irq + irq_action) = action;
	sparc_irq[cpu_irq].action = action;

	enable_irq(irq);

@@ -467,7 +472,7 @@ int request_irq(unsigned int irq,
		irqreturn_t (*handler)(int, void *, struct pt_regs *),
		unsigned long irqflags, const char * devname, void *dev_id)
{
	struct irqaction * action, *tmp = NULL;
	struct irqaction * action, **actionp;
	unsigned long flags;
	unsigned int cpu_irq;
	int ret;
@@ -490,20 +495,20 @@ int request_irq(unsigned int irq,
	    
	spin_lock_irqsave(&irq_action_lock, flags);

	action = *(cpu_irq + irq_action);
	actionp = &sparc_irq[cpu_irq].action;
	action = *actionp;
	if (action) {
		if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
			for (tmp = action; tmp->next; tmp = tmp->next);
		} else {
		if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) {
			ret = -EBUSY;
			goto out_unlock;
		}
		if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
		if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) {
			printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
			ret = -EBUSY;
			goto out_unlock;
		}
		action = NULL;		/* Or else! */
		for ( ; action; action = *actionp)
			actionp = &action->next;
	}

	/* If this is flagged as statically allocated then we use our
@@ -532,10 +537,7 @@ int request_irq(unsigned int irq,
	action->next = NULL;
	action->dev_id = dev_id;

	if (tmp)
		tmp->next = action;
	else
		*(cpu_irq + irq_action) = action;
	*actionp = action;

	enable_irq(irq);

+63 −21
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ volatile int __cpu_logical_map[NR_CPUS];

cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
cpumask_t smp_commenced_mask = CPU_MASK_NONE;

/* The only guaranteed locking primitive available on all Sparc
 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
@@ -57,11 +58,6 @@ cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
/* Used to make bitops atomic */
unsigned char bitops_spinlock = 0;

volatile unsigned long ipi_count;

volatile int smp_process_available=0;
volatile int smp_commenced = 0;

void __init smp_store_cpu_info(int id)
{
	int cpu_node;
@@ -79,6 +75,22 @@ void __init smp_store_cpu_info(int id)

void __init smp_cpus_done(unsigned int max_cpus)
{
	extern void smp4m_smp_done(void);
	unsigned long bogosum = 0;
	int cpu, num;

	for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
		if (cpu_online(cpu)) {
			num++;
			bogosum += cpu_data(cpu).udelay_val;
		}

	printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
		num, bogosum/(500000/HZ),
		(bogosum/(5000/HZ))%100);

	BUG_ON(sparc_cpu_model != sun4m);
	smp4m_smp_done();
}

void cpu_panic(void)
@@ -89,17 +101,6 @@ void cpu_panic(void)

struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };

void __init smp_boot_cpus(void)
{
	extern void smp4m_boot_cpus(void);
	extern void smp4d_boot_cpus(void);
	
	if (sparc_cpu_model == sun4m)
		smp4m_boot_cpus();
	else
		smp4d_boot_cpus();
}

void smp_send_reschedule(int cpu)
{
	/* See sparc64 */
@@ -252,20 +253,61 @@ int setup_profiling_timer(unsigned int multiplier)
	return 0;
}

void __init smp_prepare_cpus(unsigned int maxcpus)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	extern void smp4m_boot_cpus(void);
	int i, cpuid, ncpus, extra;

	BUG_ON(sparc_cpu_model != sun4m);
	printk("Entering SMP Mode...\n");

	ncpus = 1;
	extra = 0;
	for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
		if (cpuid == boot_cpu_id)
			continue;
		if (cpuid < NR_CPUS && ncpus++ < max_cpus)
			cpu_set(cpuid, phys_cpu_present_map);
		else
			extra++;
	}
	if (max_cpus >= NR_CPUS && extra)
		printk("Warning: NR_CPUS is too low to start all cpus\n");

	smp_store_cpu_info(boot_cpu_id);

	smp4m_boot_cpus();
}

void __devinit smp_prepare_boot_cpu(void)
{
	current_thread_info()->cpu = hard_smp_processor_id();
	cpu_set(smp_processor_id(), cpu_online_map);
	cpu_set(smp_processor_id(), phys_cpu_present_map);
	int cpuid = hard_smp_processor_id();

	if (cpuid >= NR_CPUS) {
		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
		prom_halt();
	}
	if (cpuid != 0)
		printk("boot cpu id != 0, this could work but is untested\n");

	current_thread_info()->cpu = cpuid;
	cpu_set(cpuid, cpu_online_map);
	cpu_set(cpuid, phys_cpu_present_map);
}

int __devinit __cpu_up(unsigned int cpu)
{
	panic("smp doesn't work\n");
	extern int smp4m_boot_one_cpu(int);
	int ret;

	ret = smp4m_boot_one_cpu(cpu);

	if (!ret) {
		cpu_set(cpu, smp_commenced_mask);
		while (!cpu_online(cpu))
			mb();
	}
	return ret;
}

void smp_bogo(struct seq_file *m)
+0 −4
Original line number Diff line number Diff line
@@ -136,10 +136,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
/* IRQ implementation. */
EXPORT_SYMBOL(synchronize_irq);

/* Misc SMP information */
EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);

/* CPU online map and active count. */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
+1 −1
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ unsigned char cpu_leds[32];
unsigned char sbus_tid[32];
#endif

extern struct irqaction *irq_action[];
static struct irqaction *irq_action[NR_IRQS];
extern spinlock_t irq_action_lock;

struct sbus_action {
Loading