Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d1e57508 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky
Browse files

s390/topology: cleanup topology code



Mainly merge all different per-cpu arrays into a single array which
holds all topology information per logical cpu.
Also fix the broken core vs socket variable naming and simplify the
locking a bit.
When running in environments without topology information also
invent book, socket and core ids, so that not all ids are zero.

Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 0a4ccc99
Loading
Loading
Loading
Loading
+18 −16
Original line number Diff line number Diff line
@@ -8,32 +8,34 @@ struct cpu;

#ifdef CONFIG_SCHED_BOOK

extern unsigned char cpu_socket_id[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
struct cpu_topology_s390 {
	unsigned short core_id;
	unsigned short socket_id;
	unsigned short book_id;
	cpumask_t core_mask;
	cpumask_t book_mask;
};

extern struct cpu_topology_s390 cpu_topology[NR_CPUS];

#define topology_physical_package_id(cpu)	(cpu_topology[cpu].socket_id)
#define topology_core_id(cpu)			(cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu)		(&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu)			(cpu_topology[cpu].book_id)
#define topology_book_cpumask(cpu)		(&cpu_topology[cpu].book_mask)

extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
#define mc_capable() 1

static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
	return &cpu_core_map[cpu];
	return &cpu_topology[cpu].core_mask;
}

#define topology_core_id(cpu)		(cpu_core_id[cpu])
#define topology_core_cpumask(cpu)	(&cpu_core_map[cpu])
#define mc_capable()			(1)

extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];

static inline const struct cpumask *cpu_book_mask(int cpu)
{
	return &cpu_book_map[cpu];
	return &cpu_topology[cpu].book_mask;
}

#define topology_book_id(cpu)		(cpu_book_id[cpu])
#define topology_book_cpumask(cpu)	(&cpu_book_map[cpu])

int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
+54 −59
Original line number Diff line number Diff line
@@ -29,48 +29,38 @@ struct mask_info {
	cpumask_t mask;
};

static int topology_enabled = 1;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);

static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
unsigned char cpu_socket_id[NR_CPUS];
static int topology_enabled = 1;
static DECLARE_WORK(topology_work, topology_work_fn);

/* topology_lock protects the socket and book linked lists */
static DEFINE_SPINLOCK(topology_lock);
static struct mask_info socket_info;
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];

struct cpu_topology_s390 cpu_topology[NR_CPUS];

static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
	cpumask_t mask;

	cpumask_clear(&mask);
	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
	cpumask_copy(&mask, cpumask_of(cpu));
	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
		return mask;
	for (; info; info = info->next) {
		if (cpumask_test_cpu(cpu, &info->mask))
			return info->mask;
	}
	while (info) {
		if (cpumask_test_cpu(cpu, &info->mask)) {
			mask = info->mask;
			break;
		}
		info = info->next;
	}
	if (cpumask_empty(&mask))
		cpumask_copy(&mask, cpumask_of(cpu));
	return mask;
}

static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
					  struct mask_info *book,
					  struct mask_info *core,
					  int one_core_per_cpu)
					  struct mask_info *socket,
					  int one_socket_per_cpu)
{
	unsigned int cpu;

@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,

		rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
		lcpu = smp_find_processor_id(rcpu);
		if (lcpu >= 0) {
		if (lcpu < 0)
			continue;
		cpumask_set_cpu(lcpu, &book->mask);
			cpu_book_id[lcpu] = book->id;
			cpumask_set_cpu(lcpu, &core->mask);
			cpu_core_id[lcpu] = rcpu;
			if (one_core_per_cpu) {
				cpu_socket_id[lcpu] = rcpu;
				core = core->next;
		cpu_topology[lcpu].book_id = book->id;
		cpumask_set_cpu(lcpu, &socket->mask);
		cpu_topology[lcpu].core_id = rcpu;
		if (one_socket_per_cpu) {
			cpu_topology[lcpu].socket_id = rcpu;
			socket = socket->next;
		} else {
				cpu_socket_id[lcpu] = core->id;
			cpu_topology[lcpu].socket_id = socket->id;
		}
		smp_cpu_set_polarization(lcpu, tl_cpu->pp);
	}
	}
	return core;
	return socket;
}

static void clear_masks(void)
{
	struct mask_info *info;

	info = &core_info;
	info = &socket_info;
	while (info) {
		cpumask_clear(&info->mask);
		info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
	return (union topology_entry *)((struct topology_container *)tle + 1);
}

static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
{
	struct mask_info *core = &core_info;
	struct mask_info *socket = &socket_info;
	struct mask_info *book = &book_info;
	union topology_entry *tle, *end;

@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
			book->id = tle->container.id;
			break;
		case 1:
			core = core->next;
			core->id = tle->container.id;
			socket = socket->next;
			socket->id = tle->container.id;
			break;
		case 0:
			add_cpus_to_mask(&tle->cpu, book, core, 0);
			add_cpus_to_mask(&tle->cpu, book, socket, 0);
			break;
		default:
			clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
	}
}

static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
{
	struct mask_info *core = &core_info;
	struct mask_info *socket = &socket_info;
	struct mask_info *book = &book_info;
	union topology_entry *tle, *end;

@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
			book->id = tle->container.id;
			break;
		case 0:
			core = add_cpus_to_mask(&tle->cpu, book, core, 1);
			socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
			break;
		default:
			clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
	}
}

static void tl_to_cores(struct sysinfo_15_1_x *info)
static void tl_to_masks(struct sysinfo_15_1_x *info)
{
	struct cpuid cpu_id;

	get_cpu_id(&cpu_id);
	spin_lock_irq(&topology_lock);
	get_cpu_id(&cpu_id);
	clear_masks();
	switch (cpu_id.machine) {
	case 0x2097:
	case 0x2098:
		__tl_to_cores_z10(info);
		__tl_to_masks_z10(info);
		break;
	default:
		__tl_to_cores_generic(info);
		__tl_to_masks_generic(info);
	}
	spin_unlock_irq(&topology_lock);
}
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
	return rc;
}

static void update_cpu_core_map(void)
static void update_cpu_masks(void)
{
	unsigned long flags;
	int cpu;

	spin_lock_irqsave(&topology_lock, flags);
	for_each_possible_cpu(cpu) {
		cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
		cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
		cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
		cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
		if (!MACHINE_HAS_TOPOLOGY) {
			cpu_topology[cpu].core_id = cpu;
			cpu_topology[cpu].socket_id = cpu;
			cpu_topology[cpu].book_id = cpu;
		}
	}
	spin_unlock_irqrestore(&topology_lock, flags);
}
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
	int cpu;

	if (!MACHINE_HAS_TOPOLOGY) {
		update_cpu_core_map();
		update_cpu_masks();
		topology_update_polarization_simple();
		return 0;
	}
	store_topology(info);
	tl_to_cores(info);
	update_cpu_core_map();
	tl_to_masks(info);
	update_cpu_masks();
	for_each_online_cpu(cpu) {
		dev = get_cpu_device(cpu);
		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
		printk(KERN_CONT " %d", info->mag[i]);
	printk(KERN_CONT " / %d\n", info->mnest);
	alloc_masks(info, &core_info, 1);
	alloc_masks(info, &socket_info, 1);
	alloc_masks(info, &book_info, 2);
}

@@ -454,7 +449,7 @@ static int __init topology_init(void)
	}
	set_topology_timer();
out:
	update_cpu_core_map();
	update_cpu_masks();
	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);