Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8047e247 authored by David S. Miller's avatar David S. Miller
Browse files

[SPARC64]: Virtualize IRQ numbers.



Inspired by PowerPC XICS interrupt support code.

All IRQs are virtualized in order to keep NR_IRQS from needing
to be too large.  Interrupts on sparc64 are arbitrary 11-bit
values, but we don't need to define NR_IRQS to 2048 if we
virtualize the IRQs.

As PCI and SBUS controller drivers build device IRQs, we divy
out virtual IRQ numbers incrementally starting at 1.  Zero is
a special virtual IRQ used for the timer interrupt.

So device drivers all see virtual IRQs, and all the normal
interfaces such as request_irq(), enable_irq(), etc. translate
that into a real IRQ number in order to configure the IRQ.

At this point knowledge of the struct ino_bucket is almost
entirely contained within arch/sparc64/kernel/irq.c  There are
a few small bits in the PCI controller drivers that need to
be swept away before we can remove ino_bucket's definition
out of asm-sparc64/irq.h and privately into kernel/irq.c

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37cdcd9e
Loading
Loading
Loading
Loading
+179 −84
Original line number Diff line number Diff line
@@ -70,7 +70,10 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
 */
#define irq_work(__cpu)	&(trap_block[(__cpu)].irq_worklist)

static struct irqaction *irq_action[NR_IRQS];
static struct irqaction timer_irq_action = {
	.name = "timer",
};
static struct irqaction *irq_action[NR_IRQS] = { &timer_irq_action, };

/* This only synchronizes entities which modify IRQ handler
 * state and some selected user-level spots that want to
@@ -79,6 +82,59 @@ static struct irqaction *irq_action[NR_IRQS];
 */
static DEFINE_SPINLOCK(irq_action_lock);

static unsigned int virt_to_real_irq_table[NR_IRQS];
static unsigned char virt_irq_cur = 1;

static unsigned char virt_irq_alloc(unsigned int real_irq)
{
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

	ent = virt_irq_cur;
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
		return 0;
	}

	virt_irq_cur = ent + 1;
	virt_to_real_irq_table[ent] = real_irq;

	return ent;
}

#if 0 /* Currently unused. */
static unsigned char real_to_virt_irq(unsigned int real_irq)
{
	struct ino_bucket *bucket = __bucket(real_irq);

	return bucket->virt_irq;
}
#endif

static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
	return virt_to_real_irq_table[virt_irq];
}

void irq_install_pre_handler(int virt_irq,
			     void (*func)(struct ino_bucket *, void *, void *),
			     void *arg1, void *arg2)
{
	unsigned int real_irq = virt_to_real_irq(virt_irq);
	struct ino_bucket *bucket;
	struct irq_desc *d;

	if (unlikely(!real_irq))
		return;

	bucket = __bucket(real_irq);
	d = bucket->irq_info;
	d->pre_handler = func;
	d->pre_handler_arg1 = arg1;
	d->pre_handler_arg2 = arg2;
}

static void register_irq_proc (unsigned int irq);

/*
@@ -164,14 +220,18 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
	return tid;
}

/* Now these are always passed a true fully specified sun4u INO. */
void enable_irq(unsigned int irq)
void enable_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = __bucket(irq);
	unsigned int real_irq = virt_to_real_irq(virt_irq);
	struct ino_bucket *bucket;
	unsigned long imap, cpuid;

	if (unlikely(!real_irq))
		return;

	bucket = __bucket(real_irq);
	imap = bucket->imap;
	if (imap == 0UL)
	if (unlikely(imap == 0UL))
		return;

	preempt_disable();
@@ -182,7 +242,7 @@ void enable_irq(unsigned int irq)
	cpuid = real_hard_smp_processor_id();

	if (tlb_type == hypervisor) {
		unsigned int ino = __irq_ino(irq);
		unsigned int ino = __irq_ino(real_irq);
		int err;

		err = sun4v_intr_settarget(ino, cpuid);
@@ -211,16 +271,22 @@ void enable_irq(unsigned int irq)
	preempt_enable();
}

/* This now gets passed true ino's as well. */
void disable_irq(unsigned int irq)
void disable_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = __bucket(irq);
	unsigned int real_irq = virt_to_real_irq(virt_irq);
	struct ino_bucket *bucket;
	unsigned long imap;

	if (unlikely(!real_irq))
		return;

	bucket = __bucket(real_irq);
	imap = bucket->imap;
	if (imap != 0UL) {
	if (unlikely(imap == 0UL))
		return;

	if (tlb_type == hypervisor) {
			unsigned int ino = __irq_ino(irq);
		unsigned int ino = __irq_ino(real_irq);
		int err;

		err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -240,7 +306,6 @@ void disable_irq(unsigned int irq)
		upa_writel(tmp, imap);
	}
}
}

static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
			    unsigned long iclr, unsigned long imap,
@@ -253,14 +318,14 @@ static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
	prom_halt();
}

unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags)
{
	struct ino_bucket *bucket;
	int ino;

	BUG_ON(tlb_type == hypervisor);

	/* RULE: Both must be specified in all other cases. */
	/* RULE: Both must be specified. */
	if (iclr == 0UL || imap == 0UL) {
		prom_printf("Invalid build_irq %d %016lx %016lx\n",
			    inofixup, iclr, imap);
@@ -298,10 +363,12 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
	 */
	bucket->imap  = imap;
	bucket->iclr  = iclr;
	bucket->flags = 0;
	if (!bucket->virt_irq)
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
	bucket->flags = flags;

out:
	return __irq(bucket);
	return bucket->virt_irq;
}

unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
@@ -322,7 +389,8 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
	 */
	bucket->imap = ~0UL - sysino;
	bucket->iclr = ~0UL - sysino;

	if (!bucket->virt_irq)
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
	bucket->flags = flags;

	bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
@@ -331,7 +399,7 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
		prom_halt();
	}

	return __irq(bucket);
	return bucket->virt_irq;
}

static void atomic_bucket_insert(struct ino_bucket *bucket)
@@ -390,37 +458,42 @@ static struct irqaction *get_action_slot(struct ino_bucket *bucket)
	return NULL;
}

int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
int request_irq(unsigned int virt_irq,
		irqreturn_t (*handler)(int, void *, struct pt_regs *),
		unsigned long irqflags, const char *name, void *dev_id)
{
	struct irqaction *action;
	struct ino_bucket *bucket = __bucket(irq);
	struct ino_bucket *bucket;
	unsigned long flags;
	unsigned int real_irq;
	int pending = 0;

	real_irq = virt_to_real_irq(virt_irq);
	if (unlikely(!real_irq))
		return -EINVAL;

	if (unlikely(!handler))
		return -EINVAL;

	bucket = __bucket(real_irq);
	if (unlikely(!bucket->irq_info))
		return -ENODEV;

	if (irqflags & SA_SAMPLE_RANDOM) {
		/*
		 * This function might sleep, we want to call it first,
	 	 * outside of the atomic block. In SA_STATIC_ALLOC case,
		 * random driver's kmalloc will fail, but it is safe.
		 * If already initialized, random driver will not reinit.
		 * outside of the atomic block.
		 * Yes, this might clear the entropy pool if the wrong
		 * driver is attempted to be loaded, without actually
		 * installing a new handler, but is this really a problem,
		 * only the sysadmin is able to do this.
		 */
		rand_initialize_irq(PIL_DEVICE_IRQ);
		rand_initialize_irq(virt_irq);
	}

	spin_lock_irqsave(&irq_action_lock, flags);

	if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) {
	if (check_irq_sharing(virt_irq, irqflags)) {
		spin_unlock_irqrestore(&irq_action_lock, flags);
		return -EBUSY;
	}
@@ -441,12 +514,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
	action->name = name;
	action->next = NULL;
	action->dev_id = dev_id;
	put_ino_in_irqaction(action, irq);
	put_ino_in_irqaction(action, __irq_ino(real_irq));
	put_smpaff_in_irqaction(action, CPU_MASK_NONE);

	append_irq_action(PIL_DEVICE_IRQ, action);
	append_irq_action(virt_irq, action);

	enable_irq(irq);
	enable_irq(virt_irq);

	/* We ate the IVEC already, this makes sure it does not get lost. */
	if (pending) {
@@ -456,7 +529,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_

	spin_unlock_irqrestore(&irq_action_lock, flags);

	register_irq_proc(__irq_ino(irq));
	register_irq_proc(virt_irq);

#ifdef CONFIG_SMP
	distribute_irqs();
@@ -466,17 +539,17 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_

EXPORT_SYMBOL(request_irq);

static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
static struct irqaction *unlink_irq_action(unsigned int virt_irq, void *dev_id)
{
	struct irqaction *action, **pp;

	pp = irq_action + PIL_DEVICE_IRQ;
	pp = irq_action + virt_irq;
	action = *pp;
	if (unlikely(!action))
		return NULL;

	if (unlikely(!action->handler)) {
		printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ);
		printk("Freeing free IRQ %d\n", virt_irq);
		return NULL;
	}

@@ -491,28 +564,33 @@ static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
	return action;
}

void free_irq(unsigned int irq, void *dev_id)
void free_irq(unsigned int virt_irq, void *dev_id)
{
	struct irqaction *action;
	struct ino_bucket *bucket;
	struct irq_desc *desc;
	unsigned long flags;
	unsigned int real_irq;
	int ent, i;

	real_irq = virt_to_real_irq(virt_irq);
	if (unlikely(!real_irq))
		return;

	spin_lock_irqsave(&irq_action_lock, flags);

	action = unlink_irq_action(irq, dev_id);
	action = unlink_irq_action(virt_irq, dev_id);

	spin_unlock_irqrestore(&irq_action_lock, flags);

	if (unlikely(!action))
		return;

	synchronize_irq(irq);
	synchronize_irq(virt_irq);

	spin_lock_irqsave(&irq_action_lock, flags);

	bucket = __bucket(irq);
	bucket = __bucket(real_irq);
	desc = bucket->irq_info;

	for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
@@ -545,7 +623,7 @@ void free_irq(unsigned int irq, void *dev_id)
		 * the same IMAP are active.
		 */
		if (ent == NUM_IVECS)
			disable_irq(irq);
			disable_irq(virt_irq);
	}

	spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -554,10 +632,15 @@ void free_irq(unsigned int irq, void *dev_id)
EXPORT_SYMBOL(free_irq);

#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
void synchronize_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = __bucket(irq);
	unsigned int real_irq = virt_to_real_irq(virt_irq);
	struct ino_bucket *bucket;

	if (unlikely(!real_irq))
		return;

	bucket = __bucket(real_irq);
#if 0
	/* The following is how I wish I could implement this.
	 * Unfortunately the ICLR registers are read-only, you can
@@ -616,7 +699,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)

		action_mask &= ~mask;

		if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
		if (p->handler(bp->virt_irq, p->dev_id, regs) == IRQ_HANDLED)
			random |= p->flags;

		if (!action_mask)
@@ -637,7 +720,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)

	/* Test and add entropy */
	if (random & SA_SAMPLE_RANDOM)
		add_interrupt_randomness(PIL_DEVICE_IRQ);
		add_interrupt_randomness(bp->virt_irq);
out:
	bp->flags &= ~IBF_INPROGRESS;
}
@@ -657,7 +740,7 @@ void timer_irq(int irq, struct pt_regs *regs)
	clear_softint(clr_mask);

	irq_enter();
	kstat_this_cpu.irqs[irq]++;
	kstat_this_cpu.irqs[0]++;
	timer_interrupt(irq, NULL, regs);
	irq_exit();
}
@@ -1023,7 +1106,7 @@ void __init init_IRQ(void)
}

static struct proc_dir_entry *root_irq_dir;
static struct proc_dir_entry * irq_dir [NUM_IVECS];
static struct proc_dir_entry *irq_dir[NR_IRQS];

#ifdef CONFIG_SMP

@@ -1047,11 +1130,20 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
	return len;
}

static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
static inline void set_intr_affinity(int virt_irq, cpumask_t hw_aff)
{
	struct ino_bucket *bp = ivector_table + irq;
	struct irq_desc *desc = bp->irq_info;
	struct irqaction *ap = desc->action;
	struct ino_bucket *bp;
	struct irq_desc *desc;
	struct irqaction *ap;
	unsigned int real_irq;

	real_irq = virt_to_real_irq(virt_irq);
	if (unlikely(!real_irq))
		return;

	bp = __bucket(real_irq);
	desc = bp->irq_info;
	ap = desc->action;

	/* Users specify affinity in terms of hw cpu ids.
	 * As soon as we do this, handler_irq() might see and take action.
@@ -1060,13 +1152,16 @@ static inline void set_intr_affinity(int irq, cpumask_t hw_aff)

	/* Migration is simply done by the next cpu to service this
	 * interrupt.
	 *
	 * XXX Broken, this doesn't happen anymore...
	 */
}

static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
static int irq_affinity_write_proc(struct file *file,
				   const char __user *buffer,
				   unsigned long count, void *data)
{
	int irq = (long) data, full_count = count, err;
	int virt_irq = (long) data, full_count = count, err;
	cpumask_t new_value;

	err = cpumask_parse(buffer, count, new_value);
@@ -1080,7 +1175,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
	if (cpus_empty(new_value))
		return -EINVAL;

	set_intr_affinity(irq, new_value);
	set_intr_affinity(virt_irq, new_value);

	return full_count;
}
@@ -1089,18 +1184,18 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer

#define MAX_NAMELEN 10

static void register_irq_proc (unsigned int irq)
static void register_irq_proc(unsigned int virt_irq)
{
	char name [MAX_NAMELEN];

	if (!root_irq_dir || irq_dir[irq])
	if (!root_irq_dir || irq_dir[virt_irq])
		return;

	memset(name, 0, MAX_NAMELEN);
	sprintf(name, "%x", irq);
	sprintf(name, "%d", virt_irq);

	/* create /proc/irq/1234 */
	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
	irq_dir[virt_irq] = proc_mkdir(name, root_irq_dir);

#ifdef CONFIG_SMP
	/* XXX SMP affinity not supported on starfire yet. */
@@ -1112,7 +1207,7 @@ static void register_irq_proc (unsigned int irq)

		if (entry) {
			entry->nlink = 1;
			entry->data = (void *)(long)irq;
			entry->data = (void *)(long)virt_irq;
			entry->read_proc = irq_affinity_read_proc;
			entry->write_proc = irq_affinity_write_proc;
		}
+1 −5
Original line number Diff line number Diff line
@@ -280,7 +280,6 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
				     struct pci_dev *pdev,
				     unsigned int ino)
{
	struct ino_bucket *bucket;
	unsigned long imap, iclr;
	unsigned long imap_off, iclr_off;
	int inofixup = 0;
@@ -309,10 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
	if ((ino & 0x20) == 0)
		inofixup = ino & 0x03;

	bucket = __bucket(build_irq(inofixup, iclr, imap));
	bucket->flags |= IBF_PCI;

	return __irq(bucket);
	return build_irq(inofixup, iclr, imap, IBF_PCI);
}

/* PSYCHO error handling support. */
+9 −9
Original line number Diff line number Diff line
@@ -544,10 +544,10 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
				    struct pci_dev *pdev,
				    unsigned int ino)
{
	struct ino_bucket *bucket;
	unsigned long imap, iclr;
	unsigned long imap_off, iclr_off;
	int inofixup = 0;
	int virt_irq;

	ino &= PCI_IRQ_INO;
	if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -573,23 +573,23 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
	if ((ino & 0x20) == 0)
		inofixup = ino & 0x03;

	bucket = __bucket(build_irq(inofixup, iclr, imap));
	bucket->flags |= IBF_PCI;
	virt_irq = build_irq(inofixup, iclr, imap, IBF_PCI);

	if (pdev) {
		struct pcidev_cookie *pcp = pdev->sysdata;

		if (pdev->bus->number != pcp->pbm->pci_first_busno) {
			struct pci_controller_info *p = pcp->pbm->parent;
			struct irq_desc *d = bucket->irq_info;

			d->pre_handler = sabre_wsync_handler;
			d->pre_handler_arg1 = pdev;
			d->pre_handler_arg2 = (void *)
				p->pbm_A.controller_regs + SABRE_WRSYNC;
			irq_install_pre_handler(virt_irq,
						sabre_wsync_handler,
						pdev,
						(void *)
						p->pbm_A.controller_regs +
						SABRE_WRSYNC);
		}
	}
	return __irq(bucket);
	return virt_irq;
}

/* SABRE error handling support. */
+41 −45
Original line number Diff line number Diff line
@@ -270,25 +270,33 @@ static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void
	}
}

static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
					unsigned int ino)
{
	ino &= PCI_IRQ_INO;
	return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
}

static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
					unsigned int ino)
{
	ino &= PCI_IRQ_INO;
	return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
}

static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
				     struct pci_dev *pdev,
				     unsigned int ino)
{
	struct ino_bucket *bucket;
	unsigned long imap, iclr;
	unsigned long imap_off, iclr_off;
	int ign_fixup;
	int virt_irq;

	ino &= PCI_IRQ_INO;
	imap_off = schizo_imap_offset(ino);

	/* Now build the IRQ bucket. */
	imap = pbm->pbm_regs + imap_off;
	imap += 4;

	iclr_off = schizo_iclr_offset(ino);
	iclr = pbm->pbm_regs + iclr_off;
	iclr += 4;
	imap = schizo_ino_to_imap(pbm, ino);
	iclr = schizo_ino_to_iclr(pbm, ino);

	/* On Schizo, no inofixup occurs.  This is because each
	 * INO has it's own IMAP register.  On Psycho and Sabre
@@ -305,19 +313,17 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
			ign_fixup = (1 << 6);
	}

	bucket = __bucket(build_irq(ign_fixup, iclr, imap));
	bucket->flags |= IBF_PCI;
	virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI);

	if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
		struct irq_desc *p = bucket->irq_info;

		p->pre_handler = tomatillo_wsync_handler;
		p->pre_handler_arg1 = ((pbm->chip_version <= 4) ?
				       (void *) 1 : (void *) 0);
		p->pre_handler_arg2 = (void *) pbm->sync_reg;
		irq_install_pre_handler(virt_irq,
					tomatillo_wsync_handler,
					((pbm->chip_version <= 4) ?
					 (void *) 1 : (void *) 0),
					(void *) pbm->sync_reg);
	}

	return __irq(bucket);
	return virt_irq;
}

/* SCHIZO error handling support. */
@@ -358,7 +364,6 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
{
	struct pci_pbm_info *pbm;
	struct ino_bucket *bucket;
	unsigned long iclr;

	/* Do not clear the interrupt for the other PCI bus.
@@ -376,11 +381,11 @@ static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
	else
		pbm = &p->pbm_A;

	irq = schizo_irq_build(pbm, NULL,
	schizo_irq_build(pbm, NULL,
			 (pbm->portid << 6) | (irq & IMAP_INO));
	bucket = __bucket(irq);
	iclr = bucket->iclr;

	iclr = schizo_ino_to_iclr(pbm,
				  (pbm->portid << 6) | (irq & IMAP_INO));
	upa_writel(ICLR_IDLE, iclr);
}

@@ -1125,7 +1130,6 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
{
	struct pci_pbm_info *pbm;
	unsigned int irq;
	struct ino_bucket *bucket;
	u64 tmp, err_mask, err_no_mask;

	/* Build IRQs and register handlers. */
@@ -1137,8 +1141,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
	upa_writel(tmp, (pbm->pbm_regs +
			 schizo_imap_offset(SCHIZO_UE_INO) + 4));

@@ -1150,8 +1153,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
	upa_writel(tmp, (pbm->pbm_regs +
			 schizo_imap_offset(SCHIZO_CE_INO) + 4));

@@ -1164,8 +1166,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
						 SCHIZO_PCIERR_A_INO)));
	upa_writel(tmp, (pbm->pbm_regs +
			 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));

@@ -1178,8 +1180,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
						 SCHIZO_PCIERR_B_INO)));
	upa_writel(tmp, (pbm->pbm_regs +
			 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));

@@ -1191,8 +1193,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
						 SCHIZO_SERR_INO)));
	upa_writel(tmp, (pbm->pbm_regs +
			 schizo_imap_offset(SCHIZO_SERR_INO) + 4));

@@ -1263,7 +1265,6 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
{
	struct pci_pbm_info *pbm;
	unsigned int irq;
	struct ino_bucket *bucket;
	u64 tmp, err_mask, err_no_mask;

	/* Build IRQs and register handlers. */
@@ -1275,8 +1276,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));

	pbm = pbm_for_ino(p, SCHIZO_CE_INO);
@@ -1287,8 +1287,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));

	pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
@@ -1299,8 +1298,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));

	pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
@@ -1311,8 +1309,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));

	pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
@@ -1323,8 +1320,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
			    pbm->name);
		prom_halt();
	}
	bucket = __bucket(irq);
	tmp = upa_readl(bucket->imap);
	tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));

	/* Enable UE and CE interrupts for controller. */
+1 −1
Original line number Diff line number Diff line
@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)

		iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
	}
	return build_irq(sbus_level, iclr, imap);
	return build_irq(sbus_level, iclr, imap, 0);
}

/* Error interrupt handling. */
Loading