Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9ec4fa27 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar
Browse files

irq, cpumask: correct CPUMASKS_OFFSTACK typo and fix fallout



CPUMASKS_OFFSTACK is not defined anywhere (it is CPUMASK_OFFSTACK).
It is a typo and init_allocate_desc_masks() is called before it set
affinity to all cpus...

Split init_alloc_desc_masks() into all_desc_masks() and init_desc_masks().

Also use CPUMASK_OFFSTACK in alloc_desc_masks().

[ Impact: fix smp_affinity copying/setup when moving irq_desc between CPUs ]

Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Acked-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
LKML-Reference: <49F6546E.3040406@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e25c2c87
Loading
Loading
Loading
Loading
+18 −9
Original line number Diff line number Diff line
@@ -424,27 +424,25 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);

#ifdef CONFIG_SMP
/**
 * init_alloc_desc_masks - allocate cpumasks for irq_desc
 * alloc_desc_masks - allocate cpumasks for irq_desc
 * @desc:	pointer to irq_desc struct
 * @cpu:	cpu which will be handling the cpumasks
 * @boot:	true if need bootmem
 *
 * Allocates affinity and pending_mask cpumask if required.
 * Returns true if successful (or not required).
 * Side effect: affinity has all bits set, pending_mask has all bits clear.
 */
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu,
								bool boot)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
	int node;

	if (boot) {
		alloc_bootmem_cpumask_var(&desc->affinity);
		cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
		alloc_bootmem_cpumask_var(&desc->pending_mask);
		cpumask_clear(desc->pending_mask);
#endif
		return true;
	}
@@ -453,18 +451,25 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,

	if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
		return false;
	cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
		free_cpumask_var(desc->affinity);
		return false;
	}
	cpumask_clear(desc->pending_mask);
#endif
#endif
	return true;
}

static inline void init_desc_masks(struct irq_desc *desc)
{
	cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
}

/**
 * init_copy_desc_masks - copy cpumasks for irq_desc
 * @old_desc:	pointer to old irq_desc struct
@@ -478,7 +483,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
#ifdef CONFIG_CPUMASKS_OFFSTACK
#ifdef CONFIG_CPUMASK_OFFSTACK
	cpumask_copy(new_desc->affinity, old_desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -499,12 +504,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc,

#else /* !CONFIG_SMP */

static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu,
								bool boot)
{
	return true;
}

static inline void init_desc_masks(struct irq_desc *desc)
{
}

static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
+6 −3
Original line number Diff line number Diff line
@@ -115,10 +115,11 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
		printk(KERN_ERR "can not alloc kstat_irqs\n");
		BUG_ON(1);
	}
	if (!init_alloc_desc_masks(desc, cpu, false)) {
	if (!alloc_desc_masks(desc, cpu, false)) {
		printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
		BUG_ON(1);
	}
	init_desc_masks(desc);
	arch_init_chip_data(desc, cpu);
}

@@ -169,7 +170,8 @@ int __init early_irq_init(void)
		desc[i].irq = i;
		desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
		init_alloc_desc_masks(&desc[i], 0, true);
		alloc_desc_masks(&desc[i], 0, true);
		init_desc_masks(&desc[i]);
		irq_desc_ptrs[i] = desc + i;
	}

@@ -256,7 +258,8 @@ int __init early_irq_init(void)

	for (i = 0; i < count; i++) {
		desc[i].irq = i;
		init_alloc_desc_masks(&desc[i], 0, true);
		alloc_desc_masks(&desc[i], 0, true);
		init_desc_masks(&desc[i]);
		desc[i].kstat_irqs = kstat_irqs_all[i];
	}
	return arch_early_irq_init();
+1 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
		 struct irq_desc *desc, int cpu)
{
	memcpy(desc, old_desc, sizeof(struct irq_desc));
	if (!init_alloc_desc_masks(desc, cpu, false)) {
	if (!alloc_desc_masks(desc, cpu, false)) {
		printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
				"for migration.\n", irq);
		return false;