Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 238a5b4b authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'cpus4096' of...

parents 17d85bc7 73e907de
Loading
Loading
Loading
Loading
+0 −18
Original line number Diff line number Diff line
@@ -3,8 +3,6 @@
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>

#ifdef CONFIG_X86_64

extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask;

extern void setup_cpu_local_masks(void);

#else /* CONFIG_X86_32 */

extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;

#define cpu_callin_mask		((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask	((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask	((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask	((struct cpumask *)&cpu_sibling_setup_map)

static inline void setup_cpu_local_masks(void) { }

#endif /* CONFIG_X86_32 */

#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */
+0 −5
Original line number Diff line number Diff line
@@ -109,11 +109,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
	return sd->node;
}

static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
{
	return node_to_cpumask(__pcibus_to_node(bus));
}

static inline const struct cpumask *
cpumask_of_pcibus(const struct pci_bus *bus)
{
+1 −1
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
	unsigned long		loops_per_jiffy;
#ifdef CONFIG_SMP
	/* cpus sharing the last level cache: */
	cpumask_t		llc_shared_map;
	cpumask_var_t		llc_shared_map;
#endif
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
+7 −6
Original line number Diff line number Diff line
@@ -21,19 +21,19 @@
extern int smp_num_siblings;
extern unsigned int num_processors;

DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
DECLARE_PER_CPU(int, cpu_number);

static inline struct cpumask *cpu_sibling_mask(int cpu)
{
	return &per_cpu(cpu_sibling_map, cpu);
	return per_cpu(cpu_sibling_map, cpu);
}

static inline struct cpumask *cpu_core_mask(int cpu)
{
	return &per_cpu(cpu_core_map, cpu);
	return per_cpu(cpu_core_map, cpu);
}

DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu)
	smp_ops.send_call_func_single_ipi(cpu);
}

static inline void arch_send_call_function_ipi(cpumask_t mask)
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_ops.send_call_func_ipi(&mask);
	smp_ops.send_call_func_ipi(mask);
}

void cpu_disable_common(void);
+18 −71
Original line number Diff line number Diff line
@@ -44,9 +44,6 @@

#ifdef CONFIG_X86_32

/* Mappings between node number and cpus on that node. */
extern cpumask_t node_to_cpumask_map[];

/* Mappings between logical cpu number and node number */
extern int cpu_to_node_map[];

@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu)
}
#define early_cpu_to_node(cpu)	cpu_to_node(cpu)

/* Returns a bitmask of CPUs on Node 'node'.
 *
 * Side note: this function creates the returned cpumask on the stack
 * so with a high NR_CPUS count, excessive stack space is used.  The
 * cpumask_of_node function should be used whenever possible.
 */
static inline cpumask_t node_to_cpumask(int node)
{
	return node_to_cpumask_map[node];
}

/* Returns a bitmask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
	return &node_to_cpumask_map[node];
}

static inline void setup_node_to_cpumask_map(void) { }

#else /* CONFIG_X86_64 */

/* Mappings between node number and cpus on that node. */
extern cpumask_t *node_to_cpumask_map;

/* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);

@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
extern int cpu_to_node(int cpu);
extern int early_cpu_to_node(int cpu);
extern const cpumask_t *cpumask_of_node(int node);
extern cpumask_t node_to_cpumask(int node);

#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */

@@ -108,34 +81,25 @@ static inline int early_cpu_to_node(int cpu)
	return early_per_cpu(x86_cpu_to_node_map, cpu);
}

/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const cpumask_t *cpumask_of_node(int node)
{
	return &node_to_cpumask_map[node];
}
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */

#endif /* CONFIG_X86_64 */

/* Returns a bitmask of CPUs on Node 'node'. */
static inline cpumask_t node_to_cpumask(int node)
/* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];

#ifdef CONFIG_DEBUG_PER_CPU_MAPS
extern const struct cpumask *cpumask_of_node(int node);
#else
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
	return node_to_cpumask_map[node];
}

#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
#endif

extern void setup_node_to_cpumask_map(void);

/*
 * Replace default node_to_cpumask_ptr with optimized version
 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 */
#define node_to_cpumask_ptr(v, node)		\
		const cpumask_t *v = cpumask_of_node(node)

#define node_to_cpumask_ptr_next(v, node)	\
			   v = cpumask_of_node(node)

#endif /* CONFIG_X86_64 */

/*
 * Returns the number of the node containing Node 'node'. This
 * architecture is flat, so it is a pretty simple function!
@@ -143,7 +107,6 @@ extern void setup_node_to_cpumask_map(void);
#define parent_node(node) (node)

#define pcibus_to_node(bus) __pcibus_to_node(bus)
#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)

#ifdef CONFIG_X86_32
extern unsigned long node_start_pfn[];
@@ -209,30 +172,17 @@ static inline int early_cpu_to_node(int cpu)
	return 0;
}

static inline const cpumask_t *cpumask_of_node(int node)
{
	return &cpu_online_map;
}
static inline cpumask_t node_to_cpumask(int node)
static inline const struct cpumask *cpumask_of_node(int node)
{
	return cpu_online_map;
	return cpu_online_mask;
}
static inline int node_to_first_cpu(int node)
{
	return first_cpu(cpu_online_map);
	return cpumask_first(cpu_online_mask);
}

static inline void setup_node_to_cpumask_map(void) { }

/*
 * Replace default node_to_cpumask_ptr with optimized version
 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 */
#define node_to_cpumask_ptr(v, node)		\
		const cpumask_t *v = cpumask_of_node(node)

#define node_to_cpumask_ptr_next(v, node)	\
			   v = cpumask_of_node(node)
#endif

#include <asm-generic/topology.h>
@@ -245,16 +195,13 @@ static inline int node_to_first_cpu(int node)
}
#endif

extern cpumask_t cpu_coregroup_map(int cpu);
extern const struct cpumask *cpu_coregroup_mask(int cpu);

#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu)		(per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu)		(per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu)		(&per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))

/* indicates that pointers to the topology cpumask_t maps are valid */
#define arch_provides_topology_pointers		yes
@@ -268,7 +215,7 @@ struct pci_bus;
void set_pci_bus_resources_arch_default(struct pci_bus *b);

#ifdef CONFIG_SMP
#define mc_capable()	(cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
#define mc_capable()	(cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
#define smt_capable()			(smp_num_siblings > 1)
#endif

Loading