Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f7ace0c authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar
Browse files

cpumask: update irq_desc to use cpumask_var_t



Impact: reduce memory usage, use new cpumask API.

Replace the affinity and pending_masks with cpumask_var_t's.  This adds
to the significant size reduction done with the SPARSE_IRQS changes.

The added functions (init_alloc_desc_masks & init_copy_desc_masks) are
in the include file so they can be inlined (and optimized out for the
!CONFIG_CPUMASKS_OFFSTACK case.)  [Naming chosen to be consistent with
the other init*irq functions, as well as the backwards arg declaration
of "from, to" instead of the more common "to, from" standard.]

Includes a slight change to the declaration of struct irq_desc to embed
the pending_mask within ifdef(CONFIG_SMP) to be consistent with other
references, and some small changes to Xen.

Tested: sparse/non-sparse/cpumask_offstack/non-cpumask_offstack/nonuma/nosmp on x86_64

Signed-off-by: default avatarMike Travis <travis@sgi.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: virtualization@lists.osdl.org
Cc: xen-devel@lists.xensource.com
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
parent c5976504
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -356,7 +356,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)

	if (!cfg->move_in_progress) {
		/* it means that domain is not changed */
		if (!cpumask_intersects(&desc->affinity, mask))
		if (!cpumask_intersects(desc->affinity, mask))
			cfg->move_desc_pending = 1;
	}
}
@@ -579,9 +579,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
	if (assign_irq_vector(irq, cfg, mask))
		return BAD_APICID;

	cpumask_and(&desc->affinity, cfg->domain, mask);
	cpumask_and(desc->affinity, cfg->domain, mask);
	set_extra_move_desc(desc, mask);
	return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
	return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
}

static void
@@ -2383,7 +2383,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);

	cpumask_copy(&desc->affinity, mask);
	cpumask_copy(desc->affinity, mask);
}

static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2405,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
	}

	/* everthing is clear. we have right of way */
	migrate_ioapic_irq_desc(desc, &desc->pending_mask);
	migrate_ioapic_irq_desc(desc, desc->pending_mask);

	ret = 0;
	desc->status &= ~IRQ_MOVE_PENDING;
	cpumask_clear(&desc->pending_mask);
	cpumask_clear(desc->pending_mask);

unmask:
	unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2434,7 @@ static void ir_irq_migration(struct work_struct *work)
				continue;
			}

			desc->chip->set_affinity(irq, &desc->pending_mask);
			desc->chip->set_affinity(irq, desc->pending_mask);
			spin_unlock_irqrestore(&desc->lock, flags);
		}
	}
@@ -2448,7 +2448,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{
	if (desc->status & IRQ_LEVEL) {
		desc->status |= IRQ_MOVE_PENDING;
		cpumask_copy(&desc->pending_mask, mask);
		cpumask_copy(desc->pending_mask, mask);
		migrate_irq_remapped_level_desc(desc);
		return;
	}
@@ -2516,7 +2516,7 @@ static void irq_complete_move(struct irq_desc **descp)

		/* domain has not changed, but affinity did */
		me = smp_processor_id();
		if (cpu_isset(me, desc->affinity)) {
		if (cpumask_test_cpu(me, desc->affinity)) {
			*descp = desc = move_irq_desc(desc, me);
			/* get the new one */
			cfg = desc->chip_data;
@@ -4039,7 +4039,7 @@ void __init setup_ioapic_dest(void)
			 */
			if (desc->status &
			    (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
				mask = &desc->affinity;
				mask = desc->affinity;
			else
				mask = TARGET_CPUS;

+1 −1
Original line number Diff line number Diff line
@@ -248,7 +248,7 @@ void fixup_irqs(void)
		if (irq == 2)
			continue;

		affinity = &desc->affinity;
		affinity = desc->affinity;
		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
			printk("Breaking affinity for irq %i\n", irq);
			affinity = cpu_all_mask;
+1 −1
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ void fixup_irqs(void)
		/* interrupt's are disabled at this point */
		spin_lock(&desc->lock);

		affinity = &desc->affinity;
		affinity = desc->affinity;
		if (!irq_has_action(irq) ||
		    cpumask_equal(affinity, cpu_online_mask)) {
			spin_unlock(&desc->lock);
+2 −2
Original line number Diff line number Diff line
@@ -125,7 +125,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)

	BUG_ON(irq == -1);
#ifdef CONFIG_SMP
	irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
	cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
#endif

	__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
@@ -142,7 +142,7 @@ static void init_evtchn_cpu_bindings(void)

	/* By default all event channels notify CPU#0. */
	for_each_irq_desc(i, desc) {
		desc->affinity = cpumask_of_cpu(0);
		cpumask_copy(desc->affinity, cpumask_of(0));
	}
#endif

+78 −3
Original line number Diff line number Diff line
@@ -182,11 +182,11 @@ struct irq_desc {
	unsigned int		irqs_unhandled;
	spinlock_t		lock;
#ifdef CONFIG_SMP
	cpumask_t		affinity;
	cpumask_var_t		affinity;
	unsigned int		cpu;
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_t		pending_mask;
	cpumask_var_t		pending_mask;
#endif
#endif
#ifdef CONFIG_PROC_FS
	struct proc_dir_entry	*dir;
@@ -422,4 +422,79 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);

#endif /* !CONFIG_S390 */

#ifdef CONFIG_SMP
/**
 * init_alloc_desc_masks - allocate cpumasks for irq_desc
 * @desc:	pointer to irq_desc struct
 * @boot:	true if need bootmem
 *
 * Allocates affinity and pending_mask cpumask if required.
 * Returns true if successful (or not required).
 * Side effect: affinity has all bits set, pending_mask has all bits clear.
 */
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int node,
								bool boot)
{
	if (boot) {
		alloc_bootmem_cpumask_var(&desc->affinity);
		cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
		alloc_bootmem_cpumask_var(&desc->pending_mask);
		cpumask_clear(desc->pending_mask);
#endif
		return true;
	}

	if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
		return false;
	cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
		free_cpumask_var(desc->affinity);
		return false;
	}
	cpumask_clear(desc->pending_mask);
#endif
	return true;
}

/**
 * init_copy_desc_masks - copy cpumasks for irq_desc
 * @old_desc:	pointer to old irq_desc struct
 * @new_desc:	pointer to new irq_desc struct
 *
 * Insures affinity and pending_masks are copied to new irq_desc.
 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
 * irq_desc struct so the copy is redundant.
 */

static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
#ifdef CONFIG_CPUMASKS_OFFSTACK
	cpumask_copy(new_desc->affinity, old_desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
#endif
#endif
}

#else /* !CONFIG_SMP */

static inline bool init_alloc_desc_masks(struct irq_desc *desc, int node,
								bool boot)
{
	return true;
}

static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
}

#endif	/* CONFIG_SMP */

#endif /* _LINUX_IRQ_H */
Loading