Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ebfff14 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras
Browse files

[POWERPC] Add new interrupt mapping core and change platforms to use it



This adds the new irq remapper core and removes the old one.  Because
there are some fundamental conflicts with the old code, like the value
of NO_IRQ which I'm now setting to 0 (as per discussions with Linus),
etc..., this commit also changes the relevant platform and driver code
over to use the new remapper (so as not to cause difficulties later
in bisecting).

This patch removes the old pre-parsing of the open firmware interrupt
tree along with all the bogus assumptions it made to try to renumber
interrupts according to the platform. This is all to be handled by the
new code now.

For the pSeries XICS interrupt controller, a single remapper host is
created for the whole machine regardless of how many interrupt
presentation and source controllers are found, and it's set to match
any device node that isn't a 8259.  That works fine on pSeries and
avoids having to deal with some of the complexities of split source
controllers vs. presentation controllers in the pSeries device trees.

The powerpc i8259 PIC driver now always requests the legacy interrupt
range. It also has the feature of being able to match any device node
(including NULL) if passed no device node as an input. That will help
porting over platforms with broken device-trees like Pegasos who don't
have a proper interrupt tree.

Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent f63e115f
Loading
Loading
Loading
Loading
+2 −7
Original line number Original line Diff line number Diff line
@@ -323,13 +323,11 @@ int ibmebus_request_irq(struct ibmebus_dev *dev,
			unsigned long irq_flags, const char * devname,
			unsigned long irq_flags, const char * devname,
			void *dev_id)
			void *dev_id)
{
{
	unsigned int irq = virt_irq_create_mapping(ist);
	unsigned int irq = irq_create_mapping(NULL, ist, 0);
	
	
	if (irq == NO_IRQ)
	if (irq == NO_IRQ)
		return -EINVAL;
		return -EINVAL;
	
	
	irq = irq_offset_up(irq);
	
	return request_irq(irq, handler,
	return request_irq(irq, handler,
			   irq_flags, devname, dev_id);
			   irq_flags, devname, dev_id);
}
}
@@ -337,12 +335,9 @@ EXPORT_SYMBOL(ibmebus_request_irq);


void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id)
void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id)
{
{
	unsigned int irq = virt_irq_create_mapping(ist);
	unsigned int irq = irq_find_mapping(NULL, ist);
	
	
	irq = irq_offset_up(irq);
	free_irq(irq, dev_id);
	free_irq(irq, dev_id);
	
	return;
}
}
EXPORT_SYMBOL(ibmebus_free_irq);
EXPORT_SYMBOL(ibmebus_free_irq);


+507 −122
Original line number Original line Diff line number Diff line
@@ -29,6 +29,8 @@
 * to reduce code space and undefined function references.
 * to reduce code space and undefined function references.
 */
 */


#undef DEBUG

#include <linux/module.h>
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/kernel_stat.h>
@@ -46,7 +48,10 @@
#include <linux/cpumask.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>


#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/system.h>
@@ -57,6 +62,7 @@
#include <asm/prom.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#ifdef CONFIG_PPC_ISERIES
#ifdef CONFIG_PPC_ISERIES
#include <asm/paca.h>
#include <asm/paca.h>
#endif
#endif
@@ -88,7 +94,6 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(irq_desc);


int distribute_irqs = 1;
int distribute_irqs = 1;
u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_PPC64 */


int show_interrupts(struct seq_file *p, void *v)
int show_interrupts(struct seq_file *p, void *v)
@@ -181,7 +186,7 @@ void fixup_irqs(cpumask_t map)


void do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
{
	int irq;
	unsigned int irq;
#ifdef CONFIG_IRQSTACKS
#ifdef CONFIG_IRQSTACKS
	struct thread_info *curtp, *irqtp;
	struct thread_info *curtp, *irqtp;
#endif
#endif
@@ -212,7 +217,7 @@ void do_IRQ(struct pt_regs *regs)
	 */
	 */
	irq = ppc_md.get_irq(regs);
	irq = ppc_md.get_irq(regs);


	if (irq >= 0) {
	if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
#ifdef CONFIG_IRQSTACKS
#ifdef CONFIG_IRQSTACKS
		/* Switch to the irq stack to handle this */
		/* Switch to the irq stack to handle this */
		curtp = current_thread_info();
		curtp = current_thread_info();
@@ -231,7 +236,7 @@ void do_IRQ(struct pt_regs *regs)
		} else
		} else
#endif
#endif
			generic_handle_irq(irq, regs);
			generic_handle_irq(irq, regs);
	} else if (irq != -2)
	} else if (irq != NO_IRQ_IGNORE)
		/* That's not SMP safe ... but who cares ? */
		/* That's not SMP safe ... but who cares ? */
		ppc_spurious_interrupts++;
		ppc_spurious_interrupts++;


@@ -254,181 +259,561 @@ void __init init_IRQ(void)
#endif
#endif
}
}


#ifdef CONFIG_PPC64
/*
 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
 */


#define UNDEFINED_IRQ 0xffffffff
#ifdef CONFIG_IRQSTACKS
unsigned int virt_irq_to_real_map[NR_IRQS];
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;

void irq_ctx_init(void)
{
	struct thread_info *tp;
	int i;

	for_each_possible_cpu(i) {
		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
		tp = softirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = SOFTIRQ_OFFSET;

		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
		tp = hardirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = HARDIRQ_OFFSET;
	}
}

static inline void do_softirq_onstack(void)
{
	struct thread_info *curtp, *irqtp;

	curtp = current_thread_info();
	irqtp = softirq_ctx[smp_processor_id()];
	irqtp->task = curtp->task;
	call_do_softirq(irqtp);
	irqtp->task = NULL;
}

#else
#define do_softirq_onstack()	__do_softirq()
#endif /* CONFIG_IRQSTACKS */

void do_softirq(void)
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	if (local_softirq_pending()) {
		account_system_vtime(current);
		local_bh_disable();
		do_softirq_onstack();
		account_system_vtime(current);
		__local_bh_enable();
	}

	local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);



/*
/*
 * Don't use virtual irqs 0, 1, 2 for devices.
 * IRQ controller and virtual interrupts
 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
 * and 2 is the XICS IPI interrupt.
 * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
 * that when we offset them we don't end up with an interrupt
 * number >= virt_irq_max.
 */
 */
#define MIN_VIRT_IRQ	3


unsigned int virt_irq_max;
#ifdef CONFIG_PPC_MERGE
static unsigned int max_virt_irq;

static unsigned int nr_virt_irqs;
static LIST_HEAD(irq_hosts);
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;


void
struct irq_map_entry irq_map[NR_IRQS];
virt_irq_init(void)
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;

struct irq_host *irq_alloc_host(unsigned int revmap_type,
				unsigned int revmap_arg,
				struct irq_host_ops *ops,
				irq_hw_number_t inval_irq)
{
{
	int i;
	struct irq_host *host;
	unsigned int size = sizeof(struct irq_host);
	unsigned int i;
	unsigned int *rmap;
	unsigned long flags;


	if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
	/* Allocate structure and revmap table if using linear mapping */
		virt_irq_max = NR_IRQS - 1;
	if (revmap_type == IRQ_HOST_MAP_LINEAR)
	max_virt_irq = virt_irq_max - __irq_offset_value;
		size += revmap_arg * sizeof(unsigned int);
	nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
	if (mem_init_done)
		host = kzalloc(size, GFP_KERNEL);
	else {
		host = alloc_bootmem(size);
		if (host)
			memset(host, 0, size);
	}
	if (host == NULL)
		return NULL;


	for (i = 0; i < NR_IRQS; i++)
	/* Fill structure */
		virt_irq_to_real_map[i] = UNDEFINED_IRQ;
	host->revmap_type = revmap_type;
	host->inval_irq = inval_irq;
	host->ops = ops;

	spin_lock_irqsave(&irq_big_lock, flags);

	/* If it's a legacy controller, check for duplicates and
	 * mark it as allocated (we use irq 0 host pointer for that
	 */
	if (revmap_type == IRQ_HOST_MAP_LEGACY) {
		if (irq_map[0].host != NULL) {
			spin_unlock_irqrestore(&irq_big_lock, flags);
			/* If we are early boot, we can't free the structure,
			 * too bad...
			 * this will be fixed once slab is made available early
			 * instead of the current cruft
			 */
			if (mem_init_done)
				kfree(host);
			return NULL;
		}
		irq_map[0].host = host;
	}
	}


/* Create a mapping for a real_irq if it doesn't already exist.
	list_add(&host->link, &irq_hosts);
 * Return the virtual irq as a convenience.
	spin_unlock_irqrestore(&irq_big_lock, flags);

	/* Additional setups per revmap type */
	switch(revmap_type) {
	case IRQ_HOST_MAP_LEGACY:
		/* 0 is always the invalid number for legacy */
		host->inval_irq = 0;
		/* setup us as the host for all legacy interrupts */
		for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
			irq_map[i].hwirq = 0;
			smp_wmb();
			irq_map[i].host = host;
			smp_wmb();

			/* Clear some flags */
			get_irq_desc(i)->status
				&= ~(IRQ_NOREQUEST | IRQ_LEVEL);

			/* Legacy flags are left to default at this point,
			 * one can then use irq_create_mapping() to
			 * explicitely change them
			 */
			 */
int virt_irq_create_mapping(unsigned int real_irq)
			ops->map(host, i, i, 0);
		}
		break;
	case IRQ_HOST_MAP_LINEAR:
		rmap = (unsigned int *)(host + 1);
		for (i = 0; i < revmap_arg; i++)
			rmap[i] = IRQ_NONE;
		host->revmap_data.linear.size = revmap_arg;
		smp_wmb();
		host->revmap_data.linear.revmap = rmap;
		break;
	default:
		break;
	}

	pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);

	return host;
}

struct irq_host *irq_find_host(struct device_node *node)
{
{
	unsigned int virq, first_virq;
	struct irq_host *h, *found = NULL;
	static int warned;
	unsigned long flags;


	if (ppc64_interrupt_controller == IC_OPEN_PIC)
	/* We might want to match the legacy controller last since
		return real_irq;	/* no mapping for openpic (for now) */
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	spin_lock_irqsave(&irq_big_lock, flags);
	list_for_each_entry(h, &irq_hosts, link)
		if (h->ops->match == NULL || h->ops->match(h, node)) {
			found = h;
			break;
		}
	spin_unlock_irqrestore(&irq_big_lock, flags);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);


	if (ppc64_interrupt_controller == IC_CELL_PIC)
void irq_set_default_host(struct irq_host *host)
		return real_irq;	/* no mapping for iic either */
{
	pr_debug("irq: Default host set to @0x%p\n", host);


	/* don't map interrupts < MIN_VIRT_IRQ */
	irq_default_host = host;
	if (real_irq < MIN_VIRT_IRQ) {
		virt_irq_to_real_map[real_irq] = real_irq;
		return real_irq;
}
}


	/* map to a number between MIN_VIRT_IRQ and max_virt_irq */
void irq_set_virq_count(unsigned int count)
	virq = real_irq;
{
	if (virq > max_virt_irq)
	pr_debug("irq: Trying to set virq count to %d\n", count);
		virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;

	BUG_ON(count < NUM_ISA_INTERRUPTS);
	if (count < NR_IRQS)
		irq_virq_count = count;
}

unsigned int irq_create_mapping(struct irq_host *host,
				irq_hw_number_t hwirq,
				unsigned int flags)
{
	unsigned int virq, hint;

	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n",
		 host, hwirq, flags);

	/* Look for default host if nececssary */
	if (host == NULL)
		host = irq_default_host;
	if (host == NULL) {
		printk(KERN_WARNING "irq_create_mapping called for"
		       " NULL host, hwirq=%lx\n", hwirq);
		WARN_ON(1);
		return NO_IRQ;
	}
	pr_debug("irq: -> using host @%p\n", host);


	/* search for this number or a free slot */
	/* Check if mapping already exist, if it does, call
	first_virq = virq;
	 * host->ops->map() to update the flags
	while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
	 */
		if (virt_irq_to_real_map[virq] == real_irq)
	virq = irq_find_mapping(host, hwirq);
	if (virq != IRQ_NONE) {
		pr_debug("irq: -> existing mapping on virq %d\n", virq);
		host->ops->map(host, virq, hwirq, flags);
		return virq;
		return virq;
		if (++virq > max_virt_irq)
			virq = MIN_VIRT_IRQ;
		if (virq == first_virq)
			goto nospace;	/* oops, no free slots */
	}
	}


	virt_irq_to_real_map[virq] = real_irq;
	/* Get a virtual interrupt number */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
		/* Handle legacy */
		virq = (unsigned int)hwirq;
		if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
			return NO_IRQ;
		return virq;
		return virq;
	} else {
		/* Allocate a virtual interrupt number */
		hint = hwirq % irq_virq_count;
		virq = irq_alloc_virt(host, 1, hint);
		if (virq == NO_IRQ) {
			pr_debug("irq: -> virq allocation failed\n");
			return NO_IRQ;
		}
	}
	pr_debug("irq: -> obtained virq %d\n", virq);

	/* Clear some flags */
	get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL);


 nospace:
	/* map it */
	if (!warned) {
	if (host->ops->map(host, virq, hwirq, flags)) {
		printk(KERN_CRIT "Interrupt table is full\n");
		pr_debug("irq: -> mapping failed, freeing\n");
		printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
		irq_free_virt(virq, 1);
		       "in your kernel sources and rebuild.\n", virt_irq_max);
		return NO_IRQ;
		warned = 1;
	}
	}
	smp_wmb();
	irq_map[virq].hwirq = hwirq;
	smp_mb();
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

extern unsigned int irq_create_of_mapping(struct device_node *controller,
					  u32 *intspec, unsigned int intsize)
{
	struct irq_host *host;
	irq_hw_number_t hwirq;
	unsigned int flags = IRQ_TYPE_NONE;

	if (controller == NULL)
		host = irq_default_host;
	else
		host = irq_find_host(controller);
	if (host == NULL)
		return NO_IRQ;

	/* If host has no translation, then we assume interrupt line */
	if (host->ops->xlate == NULL)
		hwirq = intspec[0];
	else {
		if (host->ops->xlate(host, controller, intspec, intsize,
				     &hwirq, &flags))
			return NO_IRQ;
			return NO_IRQ;
	}
	}


/*
	return irq_create_mapping(host, hwirq, flags);
 * In most cases will get a hit on the very first slot checked in the
}
 * virt_irq_to_real_map.  Only when there are a large number of
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 * IRQs will this be expensive.

 */
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
{
{
	unsigned int virq;
	struct of_irq oirq;
	unsigned int first_virq;


	virq = real_irq;
	if (of_irq_map_one(dev, index, &oirq))
		return NO_IRQ;


	if (virq > max_virt_irq)
	return irq_create_of_mapping(oirq.controller, oirq.specifier,
		virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
				     oirq.size);
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);


	first_virq = virq;
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_host *host = irq_map[virq].host;
	irq_hw_number_t hwirq;
	unsigned long flags;


	do {
	WARN_ON (host == NULL);
		if (virt_irq_to_real_map[virq] == real_irq)
	if (host == NULL)
			return virq;
		return;


		virq++;
	/* Never unmap legacy interrupts */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
		return;

	/* remove chip and handler */
	set_irq_chip_and_handler(virq, NULL, NULL);

	/* Make sure it's completed */
	synchronize_irq(virq);

	/* Tell the PIC about it */
	if (host->ops->unmap)
		host->ops->unmap(host, virq);
	smp_mb();

	/* Clear reverse map */
	hwirq = irq_map[virq].hwirq;
	switch(host->revmap_type) {
	case IRQ_HOST_MAP_LINEAR:
		if (hwirq < host->revmap_data.linear.size)
			host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
		break;
	case IRQ_HOST_MAP_TREE:
		/* Check if radix tree allocated yet */
		if (host->revmap_data.tree.gfp_mask == 0)
			break;
		/* XXX radix tree not safe ! remove lock whem it becomes safe
		 * and use some RCU sync to make sure everything is ok before we
		 * can re-use that map entry
		 */
		spin_lock_irqsave(&irq_big_lock, flags);
		radix_tree_delete(&host->revmap_data.tree, hwirq);
		spin_unlock_irqrestore(&irq_big_lock, flags);
		break;
	}


		if (virq >= max_virt_irq)
	/* Destroy map */
			virq = 0;
	smp_mb();
	irq_map[virq].hwirq = host->inval_irq;


	} while (first_virq != virq);
	/* Set some flags */
	get_irq_desc(virq)->status |= IRQ_NOREQUEST;


	/* Free it */
	irq_free_virt(virq, 1);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

unsigned int irq_find_mapping(struct irq_host *host,
			      irq_hw_number_t hwirq)
{
	unsigned int i;
	unsigned int hint = hwirq % irq_virq_count;

	/* Look for default host if nececssary */
	if (host == NULL)
		host = irq_default_host;
	if (host == NULL)
		return NO_IRQ;
		return NO_IRQ;


	/* legacy -> bail early */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
		return hwirq;

	/* Slow path does a linear search of the map */
	if (hint < NUM_ISA_INTERRUPTS)
		hint = NUM_ISA_INTERRUPTS;
	i = hint;
	do  {
		if (irq_map[i].host == host &&
		    irq_map[i].hwirq == hwirq)
			return i;
		i++;
		if (i >= irq_virq_count)
			i = NUM_ISA_INTERRUPTS;
	} while(i != hint);
	return NO_IRQ;
}
}
#endif /* CONFIG_PPC64 */
EXPORT_SYMBOL_GPL(irq_find_mapping);


#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;


void irq_ctx_init(void)
unsigned int irq_radix_revmap(struct irq_host *host,
			      irq_hw_number_t hwirq)
{
{
	struct thread_info *tp;
	struct radix_tree_root *tree;
	int i;
	struct irq_map_entry *ptr;
	unsigned int virq;
	unsigned long flags;


	for_each_possible_cpu(i) {
	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
		tp = softirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = SOFTIRQ_OFFSET;


		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
	/* Check if the radix tree exist yet. We test the value of
		tp = hardirq_ctx[i];
	 * the gfp_mask for that. Sneaky but saves another int in the
		tp->cpu = i;
	 * structure. If not, we fallback to slow mode
		tp->preempt_count = HARDIRQ_OFFSET;
	 */
	tree = &host->revmap_data.tree;
	if (tree->gfp_mask == 0)
		return irq_find_mapping(host, hwirq);

	/* XXX Current radix trees are NOT SMP safe !!! Remove that lock
	 * when that is fixed (when Nick's patch gets in
	 */
	spin_lock_irqsave(&irq_big_lock, flags);

	/* Now try to resolve */
	ptr = radix_tree_lookup(tree, hwirq);
	/* Found it, return */
	if (ptr) {
		virq = ptr - irq_map;
		goto bail;
	}
	}

	/* If not there, try to insert it */
	virq = irq_find_mapping(host, hwirq);
	if (virq != NO_IRQ)
		radix_tree_insert(tree, virq, &irq_map[virq]);
 bail:
	spin_unlock_irqrestore(&irq_big_lock, flags);
	return virq;
}
}


static inline void do_softirq_onstack(void)
unsigned int irq_linear_revmap(struct irq_host *host,
			       irq_hw_number_t hwirq)
{
{
	struct thread_info *curtp, *irqtp;
	unsigned int *revmap;


	curtp = current_thread_info();
	WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
	irqtp = softirq_ctx[smp_processor_id()];

	irqtp->task = curtp->task;
	/* Check revmap bounds */
	call_do_softirq(irqtp);
	if (unlikely(hwirq >= host->revmap_data.linear.size))
	irqtp->task = NULL;
		return irq_find_mapping(host, hwirq);

	/* Check if revmap was allocated */
	revmap = host->revmap_data.linear.revmap;
	if (unlikely(revmap == NULL))
		return irq_find_mapping(host, hwirq);

	/* Fill up revmap with slow path if no mapping found */
	if (unlikely(revmap[hwirq] == NO_IRQ))
		revmap[hwirq] = irq_find_mapping(host, hwirq);

	return revmap[hwirq];
}
}


#else
unsigned int irq_alloc_virt(struct irq_host *host,
#define do_softirq_onstack()	__do_softirq()
			    unsigned int count,
#endif /* CONFIG_IRQSTACKS */
			    unsigned int hint)
{
	unsigned long flags;
	unsigned int i, j, found = NO_IRQ;
	unsigned int limit = irq_virq_count - count;


void do_softirq(void)
	if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
		return NO_IRQ;

	spin_lock_irqsave(&irq_big_lock, flags);

	/* Use hint for 1 interrupt if any */
	if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
	    hint < irq_virq_count && irq_map[hint].host == NULL) {
		found = hint;
		goto hint_found;
	}

	/* Look for count consecutive numbers in the allocatable
	 * (non-legacy) space
	 */
	for (i = NUM_ISA_INTERRUPTS; i <= limit; ) {
		for (j = i; j < (i + count); j++)
			if (irq_map[j].host != NULL) {
				i = j + 1;
				continue;
			}
		found = i;
		break;
	}
	if (found == NO_IRQ) {
		spin_unlock_irqrestore(&irq_big_lock, flags);
		return NO_IRQ;
	}
 hint_found:
	for (i = found; i < (found + count); i++) {
		irq_map[i].hwirq = host->inval_irq;
		smp_wmb();
		irq_map[i].host = host;
	}
	spin_unlock_irqrestore(&irq_big_lock, flags);
	return found;
}

void irq_free_virt(unsigned int virq, unsigned int count)
{
{
	unsigned long flags;
	unsigned long flags;
	unsigned int i;


	if (in_interrupt())
	WARN_ON (virq < NUM_ISA_INTERRUPTS);
		return;
	WARN_ON (count == 0 || (virq + count) > irq_virq_count);


	local_irq_save(flags);
	spin_lock_irqsave(&irq_big_lock, flags);
	for (i = virq; i < (virq + count); i++) {
		struct irq_host *host;


	if (local_softirq_pending()) {
		if (i < NUM_ISA_INTERRUPTS ||
		account_system_vtime(current);
		    (virq + count) > irq_virq_count)
		local_bh_disable();
			continue;
		do_softirq_onstack();

		account_system_vtime(current);
		host = irq_map[i].host;
		__local_bh_enable();
		irq_map[i].hwirq = host->inval_irq;
		smp_wmb();
		irq_map[i].host = NULL;
	}
	spin_unlock_irqrestore(&irq_big_lock, flags);
}
}


	local_irq_restore(flags);
void irq_early_init(void)
{
	unsigned int i;

	for (i = 0; i < NR_IRQS; i++)
		get_irq_desc(i)->status |= IRQ_NOREQUEST;
}
}
EXPORT_SYMBOL(do_softirq);

/* We need to create the radix trees late */
static int irq_late_init(void)
{
	struct irq_host *h;
	unsigned long flags;

	spin_lock_irqsave(&irq_big_lock, flags);
	list_for_each_entry(h, &irq_hosts, link) {
		if (h->revmap_type == IRQ_HOST_MAP_TREE)
			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
	}
	spin_unlock_irqrestore(&irq_big_lock, flags);

	return 0;
}
arch_initcall(irq_late_init);

#endif /* CONFIG_PPC_MERGE */


#ifdef CONFIG_PCI_MSI
#ifdef CONFIG_PCI_MSI
int pci_enable_msi(struct pci_dev * pdev)
int pci_enable_msi(struct pci_dev * pdev)
+22 −24
Original line number Original line Diff line number Diff line
@@ -28,6 +28,7 @@ static struct legacy_serial_info {
	struct device_node		*np;
	struct device_node		*np;
	unsigned int			speed;
	unsigned int			speed;
	unsigned int			clock;
	unsigned int			clock;
	int				irq_check_parent;
	phys_addr_t			taddr;
	phys_addr_t			taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static unsigned int legacy_serial_count;
static unsigned int legacy_serial_count;
@@ -36,7 +37,7 @@ static int legacy_serial_console = -1;
static int __init add_legacy_port(struct device_node *np, int want_index,
static int __init add_legacy_port(struct device_node *np, int want_index,
				  int iotype, phys_addr_t base,
				  int iotype, phys_addr_t base,
				  phys_addr_t taddr, unsigned long irq,
				  phys_addr_t taddr, unsigned long irq,
				  upf_t flags)
				  upf_t flags, int irq_check_parent)
{
{
	u32 *clk, *spd, clock = BASE_BAUD * 16;
	u32 *clk, *spd, clock = BASE_BAUD * 16;
	int index;
	int index;
@@ -68,7 +69,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
	if (legacy_serial_infos[index].np != 0) {
	if (legacy_serial_infos[index].np != 0) {
		/* if we still have some room, move it, else override */
		/* if we still have some room, move it, else override */
		if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
		if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
			printk(KERN_INFO "Moved legacy port %d -> %d\n",
			printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
			       index, legacy_serial_count);
			       index, legacy_serial_count);
			legacy_serial_ports[legacy_serial_count] =
			legacy_serial_ports[legacy_serial_count] =
				legacy_serial_ports[index];
				legacy_serial_ports[index];
@@ -76,7 +77,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
				legacy_serial_infos[index];
				legacy_serial_infos[index];
			legacy_serial_count++;
			legacy_serial_count++;
		} else {
		} else {
			printk(KERN_INFO "Replacing legacy port %d\n", index);
			printk(KERN_DEBUG "Replacing legacy port %d\n", index);
		}
		}
	}
	}


@@ -95,10 +96,11 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
	legacy_serial_infos[index].np = of_node_get(np);
	legacy_serial_infos[index].np = of_node_get(np);
	legacy_serial_infos[index].clock = clock;
	legacy_serial_infos[index].clock = clock;
	legacy_serial_infos[index].speed = spd ? *spd : 0;
	legacy_serial_infos[index].speed = spd ? *spd : 0;
	legacy_serial_infos[index].irq_check_parent = irq_check_parent;


	printk(KERN_INFO "Found legacy serial port %d for %s\n",
	printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
	       index, np->full_name);
	       index, np->full_name);
	printk(KERN_INFO "  %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
	printk(KERN_DEBUG "  %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
	       (iotype == UPIO_PORT) ? "port" : "mem",
	       (iotype == UPIO_PORT) ? "port" : "mem",
	       (unsigned long long)base, (unsigned long long)taddr, irq,
	       (unsigned long long)base, (unsigned long long)taddr, irq,
	       legacy_serial_ports[index].uartclk,
	       legacy_serial_ports[index].uartclk,
@@ -132,7 +134,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
	/* Add port, irq will be dealt with later. We passed a translated
	/* Add port, irq will be dealt with later. We passed a translated
	 * IO port value. It will be fixed up later along with the irq
	 * IO port value. It will be fixed up later along with the irq
	 */
	 */
	return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags);
	return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
}
}


static int __init add_legacy_isa_port(struct device_node *np,
static int __init add_legacy_isa_port(struct device_node *np,
@@ -170,7 +172,7 @@ static int __init add_legacy_isa_port(struct device_node *np,


	/* Add port, irq will be dealt with later */
	/* Add port, irq will be dealt with later */
	return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
	return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
			       NO_IRQ, UPF_BOOT_AUTOCONF);
			       NO_IRQ, UPF_BOOT_AUTOCONF, 0);


}
}


@@ -242,7 +244,8 @@ static int __init add_legacy_pci_port(struct device_node *np,
	/* Add port, irq will be dealt with later. We passed a translated
	/* Add port, irq will be dealt with later. We passed a translated
	 * IO port value. It will be fixed up later along with the irq
	 * IO port value. It will be fixed up later along with the irq
	 */
	 */
	return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF);
	return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
			       UPF_BOOT_AUTOCONF, np != pci_dev);
}
}
#endif
#endif


@@ -373,28 +376,23 @@ static void __init fixup_port_irq(int index,
				  struct device_node *np,
				  struct device_node *np,
				  struct plat_serial8250_port *port)
				  struct plat_serial8250_port *port)
{
{
	DBG("fixup_port_irq(%d)\n", index);
	unsigned int virq;


	/* Check for interrupts in that node */
	DBG("fixup_port_irq(%d)\n", index);
	if (np->n_intrs > 0) {
		port->irq = np->intrs[0].line;
		DBG(" port %d (%s), irq=%d\n",
		    index, np->full_name, port->irq);
		return;
	}


	/* Check for interrupts in the parent */
	virq = irq_of_parse_and_map(np, 0);
	if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) {
		np = of_get_parent(np);
		np = of_get_parent(np);
		if (np == NULL)
		if (np == NULL)
			return;
			return;

		virq = irq_of_parse_and_map(np, 0);
	if (np->n_intrs > 0) {
		port->irq = np->intrs[0].line;
		DBG(" port %d (%s), irq=%d\n",
		    index, np->full_name, port->irq);
	}
		of_node_put(np);
		of_node_put(np);
	}
	}
	if (virq == NO_IRQ)
		return;

	port->irq = virq;
}


static void __init fixup_port_pio(int index,
static void __init fixup_port_pio(int index,
				  struct device_node *np,
				  struct device_node *np,
+37 −0
Original line number Original line Diff line number Diff line
@@ -1404,6 +1404,43 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
	/* XXX FIXME - update OF device tree node interrupt property */
	/* XXX FIXME - update OF device tree node interrupt property */
}
}


#ifdef CONFIG_PPC_MERGE
/* XXX This is a copy of the ppc64 version. This is temporary until we start
 * merging the 2 PCI layers
 */
/*
 * Reads the interrupt pin to determine if interrupt is use by card.
 * If the interrupt is used, then gets the interrupt line from the
 * openfirmware and sets it in the pci_dev and pci_config line.
 */
int pci_read_irq_line(struct pci_dev *pci_dev)
{
	struct of_irq oirq;
	unsigned int virq;

	DBG("Try to map irq for %s...\n", pci_name(pci_dev));

	if (of_irq_map_pci(pci_dev, &oirq)) {
		DBG(" -> failed !\n");
		return -1;
	}

	DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
	    oirq.size, oirq.specifier[0], oirq.controller->full_name);

	virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
	if(virq == NO_IRQ) {
		DBG(" -> failed to map !\n");
		return -1;
	}
	pci_dev->irq = virq;
	pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);

	return 0;
}
EXPORT_SYMBOL(pci_read_irq_line);
#endif /* CONFIG_PPC_MERGE */

int pcibios_enable_device(struct pci_dev *dev, int mask)
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
{
	u16 cmd, old_cmd;
	u16 cmd, old_cmd;
+16 −17

File changed.

Preview size limit exceeded, changes collapsed.

Loading