Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e130bedb authored by Paul Mackerras's avatar Paul Mackerras
Browse files
parents 00557b59 756e7104
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ CFLAGS_btext.o += -fPIC
endif

obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
				   signal_32.o pmc.o
				   irq.o signal_32.o pmc.o
obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
				   signal_64.o ptrace32.o systbl.o \
				   paca.o ioctl32.o cpu_setup_power4.o \
+112 −153
Original line number Diff line number Diff line
@@ -5,8 +5,8 @@
 *    Copyright (C) 1992 Linus Torvalds
 *  Adapted from arch/i386 by Gary Thomas
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Updated and modified by Cort Dougan (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Cort Dougan
 *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
 *    Copyright (C) 1996-2001 Cort Dougan
 *  Adapted for Power Macintosh by Paul Mackerras
 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
@@ -21,6 +21,14 @@
 * instead of just grabbing them. Thus setups with different IRQ numbers
 * shouldn't result in any weird surprises, and installing new handlers
 * should be easier.
 *
 * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 * mask register (of which only 16 are defined), hence the weird shifting
 * and complement of the cached_irq_mask.  I want to be able to stuff
 * this right into the SIU SMASK register.
 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
 * to reduce code space and undefined function references.
 */

#include <linux/errno.h>
@@ -29,6 +37,7 @@
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
@@ -40,9 +49,13 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#ifdef CONFIG_PPC64
#include <linux/kallsyms.h>
#endif

#include <asm/uaccess.h>
#include <asm/system.h>
@@ -52,21 +65,42 @@
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/machdep.h>
#ifdef CONFIG_PPC64
#include <asm/iseries/it_lp_queue.h>
#include <asm/paca.h>
#endif

#ifdef CONFIG_SMP
static int ppc_spurious_interrupts;

#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
extern void iSeries_smp_message_recv(struct pt_regs *);
#endif

extern irq_desc_t irq_desc[NR_IRQS];
#ifdef CONFIG_PPC32
#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)

unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
atomic_t ppc_n_lost_interrupts;

#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif

#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
extern atomic_t ipi_recv;
extern atomic_t ipi_sent;
#endif
#endif /* CONFIG_PPC32 */

#ifdef CONFIG_PPC64
EXPORT_SYMBOL(irq_desc);

int distribute_irqs = 1;
int __irq_offset_value;
int ppc_spurious_interrupts;
u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */

int show_interrupts(struct seq_file *p, void *v)
{
@@ -76,11 +110,9 @@ int show_interrupts(struct seq_file *p, void *v)
	unsigned long flags;

	if (i == 0) {
		seq_printf(p, "           ");
		for (j=0; j<NR_CPUS; j++) {
			if (cpu_online(j))
		seq_puts(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ", j);
		}
		seq_putc(p, '\n');
	}

@@ -92,17 +124,15 @@ int show_interrupts(struct seq_file *p, void *v)
			goto skip;
		seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
		for (j = 0; j < NR_CPUS; j++) {
			if (cpu_online(j))
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
		}
#else
		seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
		if (desc->handler)
			seq_printf(p, " %s ", desc->handler->typename);
		else
			seq_printf(p, "  None      ");
			seq_puts(p, "  None      ");
		seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
		seq_printf(p, "    %s", action->name);
		for (action = action->next; action; action = action->next)
@@ -110,8 +140,25 @@ int show_interrupts(struct seq_file *p, void *v)
		seq_putc(p, '\n');
skip:
		spin_unlock_irqrestore(&desc->lock, flags);
	} else if (i == NR_IRQS)
	} else if (i == NR_IRQS) {
#ifdef CONFIG_PPC32
#ifdef CONFIG_TAU_INT
		if (tau_initialized){
			seq_puts(p, "TAU: ");
			for (j = 0; j < NR_CPUS; j++)
				if (cpu_online(j))
					seq_printf(p, "%10u ", tau_interrupts(j));
			seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
		}
#endif
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
		/* should this be per processor send/receive? */
		seq_printf(p, "IPI (recv/sent): %10u/%u\n",
				atomic_read(&ipi_recv), atomic_read(&ipi_sent));
#endif
#endif /* CONFIG_PPC32 */
		seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
	}
	return 0;
}

@@ -144,126 +191,6 @@ void fixup_irqs(cpumask_t map)
}
#endif

extern int noirqdebug;

/*
 * Eventually, this should take an array of interrupts and an array size
 * so it can dispatch multiple interrupts.
 */
void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
{
	int status;
	struct irqaction *action;
	int cpu = smp_processor_id();
	irq_desc_t *desc = get_irq_desc(irq);
	irqreturn_t action_ret;
#ifdef CONFIG_IRQSTACKS
	struct thread_info *curtp, *irqtp;
#endif

	kstat_cpu(cpu).irqs[irq]++;

	if (desc->status & IRQ_PER_CPU) {
		/* no locking required for CPU-local interrupts: */
		ack_irq(irq);
		action_ret = handle_IRQ_event(irq, regs, desc->action);
		desc->handler->end(irq);
		return;
	}

	spin_lock(&desc->lock);
	ack_irq(irq);	
	/*
	   REPLAY is when Linux resends an IRQ that was dropped earlier
	   WAITING is used by probe to mark irqs that are being tested
	   */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
		action = desc->action;
		if (!action || !action->handler) {
			ppc_spurious_interrupts++;
			printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
			/* We can't call disable_irq here, it would deadlock */
			if (!desc->depth)
				desc->depth = 1;
			desc->status |= IRQ_DISABLED;
			/* This is not a real spurrious interrupt, we
			 * have to eoi it, so we jump to out
			 */
			mask_irq(irq);
			goto out;
		}
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	   Since we set PENDING, if another processor is handling
	   a different instance of this same irq, the other processor
	   will take care of it.
	 */
	if (unlikely(!action))
		goto out;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		spin_unlock(&desc->lock);

#ifdef CONFIG_IRQSTACKS
		/* Switch to the irq stack to handle this */
		curtp = current_thread_info();
		irqtp = hardirq_ctx[smp_processor_id()];
		if (curtp != irqtp) {
			irqtp->task = curtp->task;
			irqtp->flags = 0;
			action_ret = call_handle_IRQ_event(irq, regs, action, irqtp);
			irqtp->task = NULL;
			if (irqtp->flags)
				set_bits(irqtp->flags, &curtp->flags);
		} else
#endif
			action_ret = handle_IRQ_event(irq, regs, action);

		spin_lock(&desc->lock);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret, regs);
		if (likely(!(desc->status & IRQ_PENDING)))
			break;
		desc->status &= ~IRQ_PENDING;
	}
out:
	desc->status &= ~IRQ_INPROGRESS;
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	if (desc->handler) {
		if (desc->handler->end)
			desc->handler->end(irq);
		else if (desc->handler->enable)
			desc->handler->enable(irq);
	}
	spin_unlock(&desc->lock);
}

#ifdef CONFIG_PPC_ISERIES
void do_IRQ(struct pt_regs *regs)
{
@@ -310,6 +237,9 @@ void do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
	int irq;
#ifdef CONFIG_IRQSTACKS
	struct thread_info *curtp, *irqtp;
#endif

        irq_enter();

@@ -328,20 +258,44 @@ void do_IRQ(struct pt_regs *regs)
	}
#endif

	/*
	 * Every platform is required to implement ppc_md.get_irq.
	 * This function will either return an irq number or -1 to
	 * indicate there are no more pending.
	 * The value -2 is for buggy hardware and means that this IRQ
	 * has already been handled. -- Tom
	 */
	irq = ppc_md.get_irq(regs);

	if (irq >= 0)
		ppc_irq_dispatch_handler(regs, irq);
	else
	if (irq >= 0) {
#ifdef CONFIG_IRQSTACKS
		/* Switch to the irq stack to handle this */
		curtp = current_thread_info();
		irqtp = hardirq_ctx[smp_processor_id()];
		if (curtp != irqtp) {
			irqtp->task = curtp->task;
			irqtp->flags = 0;
			call___do_IRQ(irq, regs, irqtp);
			irqtp->task = NULL;
			if (irqtp->flags)
				set_bits(irqtp->flags, &curtp->flags);
		} else
#endif
			__do_IRQ(irq, regs);
	} else
#ifdef CONFIG_PPC32
		if (irq != -2)
#endif
			/* That's not SMP safe ... but who cares ? */
			ppc_spurious_interrupts++;

        irq_exit();
}

#endif	/* CONFIG_PPC_ISERIES */

void __init init_IRQ(void)
{
#ifdef CONFIG_PPC64
	static int once = 0;

	if (once)
@@ -349,10 +303,14 @@ void __init init_IRQ(void)

	once++;

#endif
	ppc_md.init_IRQ();
#ifdef CONFIG_PPC64
	irq_ctx_init();
#endif
}

#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
@@ -517,3 +475,4 @@ static int __init setup_noirqdistrib(char *str)
}

__setup("noirqdistrib", setup_noirqdistrib);
#endif /* CONFIG_PPC64 */
+4 −4
Original line number Diff line number Diff line
@@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq)
	mtlr	r0
	blr

_GLOBAL(call_handle_IRQ_event)
_GLOBAL(call___do_IRQ)
	mflr	r0
	std	r0,16(r1)
	stdu	r1,THREAD_SIZE-112(r6)
	mr	r1,r6
	bl	.handle_IRQ_event
	stdu	r1,THREAD_SIZE-112(r5)
	mr	r1,r5
	bl	.__do_IRQ
	ld	r1,0(r1)
	ld	r0,16(r1)
	mtlr	r0
+2 −0
Original line number Diff line number Diff line
@@ -53,6 +53,8 @@
#include <asm/lmb.h>
#include <asm/xmon.h>

#include "setup.h"

#undef DEBUG

#ifdef DEBUG
+6 −0
Original line number Diff line number Diff line
#ifndef _POWERPC_KERNEL_SETUP_H
#define _POWERPC_KERNEL_SETUP_H

void check_for_initrd(void);

#endif /* _POWERPC_KERNEL_SETUP_H */
Loading