Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9466d603 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'master' of...

parents 1f3f424a 83b19597
Loading
Loading
Loading
Loading
+7 −6
Original line number Original line Diff line number Diff line
@@ -591,19 +591,20 @@ config IOMMU_HELPER


config MAXSMP
config MAXSMP
	bool "Configure Maximum number of SMP Processors and NUMA Nodes"
	bool "Configure Maximum number of SMP Processors and NUMA Nodes"
	depends on X86_64 && SMP && BROKEN
	depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
	select CPUMASK_OFFSTACK
	default n
	default n
	help
	help
	  Configure maximum number of CPUS and NUMA Nodes for this architecture.
	  Configure maximum number of CPUS and NUMA Nodes for this architecture.
	  If unsure, say N.
	  If unsure, say N.


config NR_CPUS
config NR_CPUS
	int "Maximum number of CPUs (2-512)" if !MAXSMP
	int "Maximum number of CPUs" if SMP && !MAXSMP
	range 2 512
	range 2 512 if SMP && !MAXSMP
	depends on SMP
	default "1" if !SMP
	default "4096" if MAXSMP
	default "4096" if MAXSMP
	default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
	default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
	default "8"
	default "8" if SMP
	help
	help
	  This allows you to specify the maximum number of CPUs which this
	  This allows you to specify the maximum number of CPUs which this
	  kernel will support.  The maximum supported value is 512 and the
	  kernel will support.  The maximum supported value is 512 and the
+23 −7
Original line number Original line Diff line number Diff line
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
	return (1);
	return (1);
}
}


static inline cpumask_t target_cpus(void)
static inline const cpumask_t *target_cpus(void)
{
{
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
        return cpu_online_map;
	return &cpu_online_map;
#else
#else
        return cpumask_of_cpu(0);
	return &cpumask_of_cpu(0);
#endif
#endif
}
}


@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)


static inline int cpu_present_to_apicid(int mps_cpu)
static inline int cpu_present_to_apicid(int mps_cpu)
{
{
	if (mps_cpu < NR_CPUS)
	if (mps_cpu < nr_cpu_ids)
		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);


	return BAD_APICID;
	return BAD_APICID;
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_to_logical_apicid(int cpu)
{
{
	if (cpu >= NR_CPUS)
	if (cpu >= nr_cpu_ids)
		return BAD_APICID;
		return BAD_APICID;
	return cpu_physical_id(cpu);
	return cpu_physical_id(cpu);
}
}
@@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
}
}


/* As we are using single CPU as destination, pick only one CPU here */
/* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
{
	int cpu;
	int cpu;
	int apicid;	
	int apicid;	


	cpu = first_cpu(cpumask);
	cpu = first_cpu(*cpumask);
	apicid = cpu_to_logical_apicid(cpu);
	apicid = cpu_to_logical_apicid(cpu);
	return apicid;
	return apicid;
}
}


static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
						  const struct cpumask *andmask)
{
	int cpu;

	/*
	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
	 * May as well be the first.
	 */
	cpu = cpumask_any_and(cpumask, andmask);
	if (cpu < nr_cpu_ids)
		return cpu_to_logical_apicid(cpu);

	return BAD_APICID;
}

static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
{
	return cpuid_apic >> index_msb;
	return cpuid_apic >> index_msb;
+5 −8
Original line number Original line Diff line number Diff line
#ifndef __ASM_MACH_IPI_H
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H


void send_IPI_mask_sequence(cpumask_t mask, int vector);
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);


static inline void send_IPI_mask(cpumask_t mask, int vector)
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
{
	send_IPI_mask_sequence(mask, vector);
	send_IPI_mask_sequence(mask, vector);
}
}


static inline void send_IPI_allbutself(int vector)
static inline void send_IPI_allbutself(int vector)
{
{
	cpumask_t mask = cpu_online_map;
	send_IPI_mask_allbutself(cpu_online_mask, vector);
	cpu_clear(smp_processor_id(), mask);

	if (!cpus_empty(mask))
		send_IPI_mask(mask, vector);
}
}


static inline void send_IPI_all(int vector)
static inline void send_IPI_all(int vector)
{
{
	send_IPI_mask(cpu_online_map, vector);
	send_IPI_mask(cpu_online_mask, vector);
}
}


#endif /* __ASM_MACH_IPI_H */
#endif /* __ASM_MACH_IPI_H */
+68 −18
Original line number Original line Diff line number Diff line
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
	        return (1);
	        return (1);
}
}


static inline cpumask_t target_cpus_cluster(void)
static inline const cpumask_t *target_cpus_cluster(void)
{
{
	return CPU_MASK_ALL;
	return &CPU_MASK_ALL;
}
}


static inline cpumask_t target_cpus(void)
static inline const cpumask_t *target_cpus(void)
{
{
	return cpumask_of_cpu(smp_processor_id());
	return &cpumask_of_cpu(smp_processor_id());
}
}


#define APIC_DFR_VALUE_CLUSTER		(APIC_DFR_CLUSTER)
#define APIC_DFR_VALUE_CLUSTER		(APIC_DFR_CLUSTER)
@@ -82,7 +82,8 @@ static inline void setup_apic_routing(void)
	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
	printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
	printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
		(apic_version[apic] == 0x14) ?
		(apic_version[apic] == 0x14) ?
		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
			"Physical Cluster" : "Logical Cluster",
			nr_ioapics, cpus_addr(*target_cpus())[0]);
}
}


static inline int multi_timer_check(int apic, int irq)
static inline int multi_timer_check(int apic, int irq)
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
{
{
	if (!mps_cpu)
	if (!mps_cpu)
		return boot_cpu_physical_apicid;
		return boot_cpu_physical_apicid;
	else if (mps_cpu < NR_CPUS)
	else if (mps_cpu < nr_cpu_ids)
		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
	else
	else
		return BAD_APICID;
		return BAD_APICID;
@@ -120,7 +121,7 @@ extern u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_to_logical_apicid(int cpu)
{
{
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
       if (cpu >= NR_CPUS)
	if (cpu >= nr_cpu_ids)
		return BAD_APICID;
		return BAD_APICID;
	return (int)cpu_2_logical_apicid[cpu];
	return (int)cpu_2_logical_apicid[cpu];
#else
#else
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
	return (1);
	return (1);
}
}


static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
static inline unsigned int
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
{
{
	int num_bits_set;
	int num_bits_set;
	int cpus_found = 0;
	int cpus_found = 0;
	int cpu;
	int cpu;
	int apicid;
	int apicid;


	num_bits_set = cpus_weight(cpumask);
	num_bits_set = cpumask_weight(cpumask);
	/* Return id to all */
	/* Return id to all */
	if (num_bits_set == NR_CPUS)
	if (num_bits_set == NR_CPUS)
		return 0xFF;
		return 0xFF;
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
	 * The cpus in the mask must all be on the apic cluster.  If are not
	 * The cpus in the mask must all be on the apic cluster.  If are not
	 * on the same apicid cluster return default value of TARGET_CPUS.
	 * on the same apicid cluster return default value of TARGET_CPUS.
	 */
	 */
	cpu = first_cpu(cpumask);
	cpu = cpumask_first(cpumask);
	apicid = cpu_to_logical_apicid(cpu);
	apicid = cpu_to_logical_apicid(cpu);
	while (cpus_found < num_bits_set) {
	while (cpus_found < num_bits_set) {
		if (cpu_isset(cpu, cpumask)) {
		if (cpumask_test_cpu(cpu, cpumask)) {
			int new_apicid = cpu_to_logical_apicid(cpu);
			int new_apicid = cpu_to_logical_apicid(cpu);
			if (apicid_cluster(apicid) !=
			if (apicid_cluster(apicid) !=
					apicid_cluster(new_apicid)){
					apicid_cluster(new_apicid)){
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
	return apicid;
	return apicid;
}
}


static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
{
	int num_bits_set;
	int num_bits_set;
	int cpus_found = 0;
	int cpus_found = 0;
	int cpu;
	int cpu;
	int apicid;
	int apicid;


	num_bits_set = cpus_weight(cpumask);
	num_bits_set = cpus_weight(*cpumask);
	/* Return id to all */
	/* Return id to all */
	if (num_bits_set == NR_CPUS)
	if (num_bits_set == NR_CPUS)
		return cpu_to_logical_apicid(0);
		return cpu_to_logical_apicid(0);
@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
	 * The cpus in the mask must all be on the apic cluster.  If are not
	 * The cpus in the mask must all be on the apic cluster.  If are not
	 * on the same apicid cluster return default value of TARGET_CPUS.
	 * on the same apicid cluster return default value of TARGET_CPUS.
	 */
	 */
	cpu = first_cpu(cpumask);
	cpu = first_cpu(*cpumask);
	apicid = cpu_to_logical_apicid(cpu);
	apicid = cpu_to_logical_apicid(cpu);
	while (cpus_found < num_bits_set) {
	while (cpus_found < num_bits_set) {
		if (cpu_isset(cpu, cpumask)) {
		if (cpu_isset(cpu, *cpumask)) {
			int new_apicid = cpu_to_logical_apicid(cpu);
			int new_apicid = cpu_to_logical_apicid(cpu);
			if (apicid_cluster(apicid) !=
			if (apicid_cluster(apicid) !=
					apicid_cluster(new_apicid)){
					apicid_cluster(new_apicid)){
@@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
	return apicid;
	return apicid;
}
}


static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
						  const struct cpumask *andmask)
{
	int num_bits_set;
	int num_bits_set2;
	int cpus_found = 0;
	int cpu;
	int apicid = 0;

	num_bits_set = cpumask_weight(cpumask);
	num_bits_set2 = cpumask_weight(andmask);
	num_bits_set = min(num_bits_set, num_bits_set2);
	/* Return id to all */
	if (num_bits_set >= nr_cpu_ids)
#if defined CONFIG_ES7000_CLUSTERED_APIC
		return 0xFF;
#else
		return cpu_to_logical_apicid(0);
#endif
	/*
	 * The cpus in the mask must all be on the apic cluster.  If are not
	 * on the same apicid cluster return default value of TARGET_CPUS.
	 */
	cpu = cpumask_first_and(cpumask, andmask);
	apicid = cpu_to_logical_apicid(cpu);

	while (cpus_found < num_bits_set) {
		if (cpumask_test_cpu(cpu, cpumask) &&
		    cpumask_test_cpu(cpu, andmask)) {
			int new_apicid = cpu_to_logical_apicid(cpu);
			if (apicid_cluster(apicid) !=
					apicid_cluster(new_apicid)) {
				printk(KERN_WARNING
					"%s: Not a valid mask!\n", __func__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
				return 0xFF;
#else
				return cpu_to_logical_apicid(0);
#endif
			}
			apicid = new_apicid;
			cpus_found++;
		}
		cpu++;
	}
	return apicid;
}

static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
{
	return cpuid_apic >> index_msb;
	return cpuid_apic >> index_msb;
+5 −7
Original line number Original line Diff line number Diff line
#ifndef __ASM_ES7000_IPI_H
#ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H


void send_IPI_mask_sequence(cpumask_t mask, int vector);
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);


static inline void send_IPI_mask(cpumask_t mask, int vector)
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
{
	send_IPI_mask_sequence(mask, vector);
	send_IPI_mask_sequence(mask, vector);
}
}


static inline void send_IPI_allbutself(int vector)
static inline void send_IPI_allbutself(int vector)
{
{
	cpumask_t mask = cpu_online_map;
	send_IPI_mask_allbutself(cpu_online_mask, vector);
	cpu_clear(smp_processor_id(), mask);
	if (!cpus_empty(mask))
		send_IPI_mask(mask, vector);
}
}


static inline void send_IPI_all(int vector)
static inline void send_IPI_all(int vector)
{
{
	send_IPI_mask(cpu_online_map, vector);
	send_IPI_mask(cpu_online_mask, vector);
}
}


#endif /* __ASM_ES7000_IPI_H */
#endif /* __ASM_ES7000_IPI_H */
Loading