Loading arch/sh/kernel/irq.c +14 −10 Original line number Diff line number Diff line Loading @@ -296,13 +296,16 @@ int __init arch_probe_nr_irqs(void) #endif #ifdef CONFIG_HOTPLUG_CPU static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip = irq_data_get_irq_chip(data); printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); irq, data->node, cpu); raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); raw_spin_unlock_irq(&desc->lock); } Loading @@ -313,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) */ void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq, cpu = smp_processor_id(); for_each_irq_desc(irq, desc) { if (desc->node == cpu) { unsigned int newcpu = cpumask_any_and(desc->affinity, for_each_active_irq(irq) { struct irq_data *data = irq_get_irq_data(irq); if (data->node == cpu) { unsigned int newcpu = cpumask_any_and(data->affinity, cpu_online_mask); if (newcpu >= nr_cpu_ids) { if (printk_ratelimit()) printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", irq, cpu); cpumask_setall(desc->affinity); newcpu = cpumask_any_and(desc->affinity, cpumask_setall(data->affinity); newcpu = cpumask_any_and(data->affinity, cpu_online_mask); } route_irq(desc, irq, newcpu); route_irq(data, irq, newcpu); } } } Loading Loading
arch/sh/kernel/irq.c +14 −10 Original line number Diff line number Diff line Loading @@ -296,13 +296,16 @@ int __init arch_probe_nr_irqs(void) #endif #ifdef CONFIG_HOTPLUG_CPU static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip = irq_data_get_irq_chip(data); printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); irq, data->node, cpu); raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); raw_spin_unlock_irq(&desc->lock); } Loading @@ -313,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) */ void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq, cpu = smp_processor_id(); for_each_irq_desc(irq, desc) { if (desc->node == cpu) { unsigned int newcpu = cpumask_any_and(desc->affinity, for_each_active_irq(irq) { struct irq_data *data = irq_get_irq_data(irq); if (data->node == cpu) { unsigned int newcpu = cpumask_any_and(data->affinity, cpu_online_mask); if (newcpu >= nr_cpu_ids) { if (printk_ratelimit()) printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", irq, cpu); cpumask_setall(desc->affinity); newcpu = cpumask_any_and(desc->affinity, cpumask_setall(data->affinity); newcpu = cpumask_any_and(data->affinity, cpu_online_mask); } route_irq(desc, irq, newcpu); route_irq(data, irq, newcpu); } } } Loading