Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83a10522 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/apic: Move common APIC callbacks



Move more apic struct specific functions out of the header and the apic
management code into the common source file.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarYu Chen <yu.c.chen@intel.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213153.834421893@linutronix.de
parent 64063505
Loading
Loading
Loading
Loading
+12 −61
Original line number Diff line number Diff line
@@ -476,94 +476,45 @@ DECLARE_PER_CPU(int, x2apic_extra_bits);

extern void generic_bigsmp_probe(void);


#ifdef CONFIG_X86_LOCAL_APIC

#include <asm/smp.h>

#define APIC_DFR_VALUE	(APIC_DFR_FLAT)

static inline const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
	return cpu_online_mask;
#else
	return cpumask_of(0);
#endif
}

static inline const struct cpumask *online_target_cpus(void)
{
	return cpu_online_mask;
}

DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);

extern struct apic apic_noop;

static inline unsigned int read_apic_id(void)
{
	unsigned int reg;

	reg = apic_read(APIC_ID);
	unsigned int reg = apic_read(APIC_ID);

	return apic->get_apic_id(reg);
}

static inline int default_apic_id_valid(int apicid)
{
	return (apicid < 255);
}

extern const struct cpumask *default_target_cpus(void);
extern const struct cpumask *online_target_cpus(void);
extern int default_apic_id_valid(int apicid);
extern int default_acpi_madt_oem_check(char *, char *);

extern void default_setup_apic_routing(void);

extern struct apic apic_noop;

extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
				   struct irq_data *irqdata,
				   unsigned int *apicid);
extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
				      struct irq_data *irqdata,
				      unsigned int *apicid);

static inline void
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
			      const struct cpumask *mask)
{
	/* Careful. Some cpus do not strictly honor the set of cpus
	 * specified in the interrupt destination when using lowest
	 * priority interrupt delivery mode.
	 *
	 * In particular there was a hyperthreading cpu observed to
	 * deliver interrupts to the wrong hyperthread when only one
	 * hyperthread was specified in the interrupt desitination.
	 */
	cpumask_clear(retmask);
	cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}

static inline void
default_vector_allocation_domain(int cpu, struct cpumask *retmask,
				 const struct cpumask *mask)
{
	cpumask_copy(retmask, cpumask_of(cpu));
}

static inline bool default_check_apicid_used(physid_mask_t *map, int apicid)
{
	return physid_isset(apicid, *map);
}

static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
	*retmap = *phys_map;
}

extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
extern void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
				   const struct cpumask *mask);
extern void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
				      const struct cpumask *mask);
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);

#endif /* CONFIG_X86_LOCAL_APIC */

extern void irq_enter(void);
extern void irq_exit(void);

+0 −28
Original line number Diff line number Diff line
@@ -2273,34 +2273,6 @@ int hard_smp_processor_id(void)
	return read_apic_id();
}

int default_cpu_mask_to_apicid(const struct cpumask *mask,
			       struct irq_data *irqdata,
			       unsigned int *apicid)
{
	unsigned int cpu = cpumask_first(mask);

	if (cpu >= nr_cpu_ids)
		return -EINVAL;
	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
	irq_data_update_effective_affinity(irqdata, cpumask_of(cpu));
	return 0;
}

int flat_cpu_mask_to_apicid(const struct cpumask *mask,
			    struct irq_data *irqdata,
			    unsigned int *apicid)

{
	struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
	unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;

	if (!cpu_mask)
		return -EINVAL;
	*apicid = (unsigned int)cpu_mask;
	cpumask_bits(effmsk)[0] = cpu_mask;
	return 0;
}

/*
 * Override the generic EOI implementation with an optimized version.
 * Only called during early boot when only one CPU is active and with
+78 −0
Original line number Diff line number Diff line
@@ -6,6 +6,64 @@
#include <linux/irq.h>
#include <asm/apic.h>

int default_cpu_mask_to_apicid(const struct cpumask *msk, struct irq_data *irqd,
			       unsigned int *apicid)
{
	unsigned int cpu = cpumask_first(msk);

	if (cpu >= nr_cpu_ids)
		return -EINVAL;
	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
	return 0;
}

int flat_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqd,
			    unsigned int *apicid)

{
	struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqd);
	unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;

	if (!cpu_mask)
		return -EINVAL;
	*apicid = (unsigned int)cpu_mask;
	cpumask_bits(effmsk)[0] = cpu_mask;
	return 0;
}

bool default_check_apicid_used(physid_mask_t *map, int apicid)
{
	return physid_isset(apicid, *map);
}

void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
				   const struct cpumask *mask)
{
	/*
	 * Careful. Some cpus do not strictly honor the set of cpus
	 * specified in the interrupt destination when using lowest
	 * priority interrupt delivery mode.
	 *
	 * In particular there was a hyperthreading cpu observed to
	 * deliver interrupts to the wrong hyperthread when only one
	 * hyperthread was specified in the interrupt desitination.
	 */
	cpumask_clear(retmask);
	cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}

void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
				      const struct cpumask *mask)
{
	cpumask_copy(retmask, cpumask_of(cpu));
}

void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
	*retmap = *phys_map;
}

int default_cpu_present_to_apicid(int mps_cpu)
{
	if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -13,8 +71,28 @@ int default_cpu_present_to_apicid(int mps_cpu)
	else
		return BAD_APICID;
}
EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid);

int default_check_phys_apicid_present(int phys_apicid)
{
	return physid_isset(phys_apicid, phys_cpu_present_map);
}

const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
	return cpu_online_mask;
#else
	return cpumask_of(0);
#endif
}

const struct cpumask *online_target_cpus(void)
{
	return cpu_online_mask;
}

int default_apic_id_valid(int apicid)
{
	return (apicid < 255);
}