Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52b166af authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/apic: Move online masking to core code



All implementations of apic->cpu_mask_to_apicid_and() mask out the offline
cpus. The callsite already has a mask available, which has the offline CPUs
removed. Use that and remove the extra bits.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235446.560868224@linutronix.de
parent bbcf9574
Loading
Loading
Loading
Loading
+9 −18
Original line number Original line Diff line number Diff line
@@ -2205,36 +2205,27 @@ int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
				   const struct cpumask *andmask,
				   const struct cpumask *andmask,
				   unsigned int *apicid)
				   unsigned int *apicid)
{
{
	unsigned int cpu;
	unsigned int cpu = cpumask_first_and(cpumask, andmask);


	for_each_cpu_and(cpu, cpumask, andmask) {
	if (cpu >= nr_cpu_ids)
		if (cpumask_test_cpu(cpu, cpu_online_mask))
		return -EINVAL;
			break;
	}

	if (likely(cpu < nr_cpu_ids)) {
	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
	return 0;
	return 0;
}
}


	return -EINVAL;
}

int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
				const struct cpumask *andmask,
				const struct cpumask *andmask,
				unsigned int *apicid)
				unsigned int *apicid)
{
{
	unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
	unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
				 cpumask_bits(andmask)[0] &
				 cpumask_bits(andmask)[0] &
				 cpumask_bits(cpu_online_mask)[0] &
				 APIC_ALL_CPUS;
				 APIC_ALL_CPUS;


	if (likely(cpu_mask)) {
	if (!cpu_mask)
		return -EINVAL;
	*apicid = (unsigned int)cpu_mask;
	*apicid = (unsigned int)cpu_mask;
	return 0;
	return 0;
}
}
	return -EINVAL;
}


/*
/*
 * Override the generic EOI implementation with an optimized version.
 * Override the generic EOI implementation with an optimized version.
+4 −1
Original line number Original line Diff line number Diff line
@@ -221,8 +221,11 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
	 * as we already established, that mask & d->domain & cpu_online_mask
	 * as we already established, that mask & d->domain & cpu_online_mask
	 * is not empty.
	 * is not empty.
	 *
	 * vector_searchmask is a subset of d->domain and has the offline
	 * cpus masked out.
	 */
	 */
	BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
	BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
					    &d->cfg.dest_apicid));
					    &d->cfg.dest_apicid));
	return 0;
	return 0;
}
}
+9 −16
Original line number Original line Diff line number Diff line
@@ -108,31 +108,24 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
			      const struct cpumask *andmask,
			      const struct cpumask *andmask,
			      unsigned int *apicid)
			      unsigned int *apicid)
{
{
	unsigned int cpu;
	u32 dest = 0;
	u32 dest = 0;
	u16 cluster;
	u16 cluster;
	int i;


	for_each_cpu_and(i, cpumask, andmask) {
	cpu = cpumask_first_and(cpumask, andmask);
		if (!cpumask_test_cpu(i, cpu_online_mask))
	if (cpu >= nr_cpu_ids)
			continue;
		dest = per_cpu(x86_cpu_to_logical_apicid, i);
		cluster = x2apic_cluster(i);
		break;
	}

	if (!dest)
		return -EINVAL;
		return -EINVAL;


	for_each_cpu_and(i, cpumask, andmask) {
	dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
		if (!cpumask_test_cpu(i, cpu_online_mask))
	cluster = x2apic_cluster(cpu);
			continue;

		if (cluster != x2apic_cluster(i))
	for_each_cpu_and(cpu, cpumask, andmask) {
		if (cluster != x2apic_cluster(cpu))
			continue;
			continue;
		dest |= per_cpu(x86_cpu_to_logical_apicid, i);
		dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
	}
	}


	*apicid = dest;
	*apicid = dest;

	return 0;
	return 0;
}
}