Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f78eae2e authored by David S. Miller's avatar David S. Miller Committed by David S. Miller
Browse files

[SPARC64]: Proper multi-core scheduling support.



The scheduling domain hierarchy is:

   all cpus -->
      cpus that share an instruction cache -->
          cpus that share an integer execution unit

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d887ab3a
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -396,6 +396,15 @@ config SCHED_SMT
	  when dealing with UltraSPARC cpus at a cost of slightly increased
	  overhead in some places. If unsure say N here.

config SCHED_MC
	bool "Multi-core scheduler support"
	depends on SMP
	default y
	help
	  Multi-core scheduler support improves the CPU scheduler's decision
	  making when dealing with multi-core CPU chips at a cost of slightly
	  increased overhead in some places. If unsure say N here.

source "kernel/Kconfig.preempt"

config CMDLINE_BOOL
+49 −0
Original line number Diff line number Diff line
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
	}
}

static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
{
	int i;

	for (i = 0; i < mp->num_arcs; i++) {
		struct mdesc_node *t = mp->arcs[i].arc;
		const u64 *id;

		if (strcmp(mp->arcs[i].name, "back"))
			continue;

		if (strcmp(t->name, "cpu"))
			continue;

		id = md_get_property(t, "id", NULL);
		if (*id < NR_CPUS)
			cpu_data(*id).proc_id = proc_id;
	}
}

static void __init __set_proc_ids(const char *exec_unit_name)
{
	struct mdesc_node *mp;
	int idx;

	idx = 0;
	md_for_each_node_by_name(mp, exec_unit_name) {
		const char *type;
		int len;

		type = md_get_property(mp, "type", &len);
		if (!find_in_proplist(type, "int", len) &&
		    !find_in_proplist(type, "integer", len))
			continue;

		mark_proc_ids(mp, idx);

		idx++;
	}
}

static void __init set_proc_ids(void)
{
	__set_proc_ids("exec_unit");
	__set_proc_ids("exec-unit");
}

static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{
	u64 val;
@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
#endif

		c->core_id = 0;
		c->proc_id = -1;
	}

	set_core_ids();
	set_proc_ids();

	smp_fill_in_sib_core_maps();
}
+1 −0
Original line number Diff line number Diff line
@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)

			cpu_data(cpuid).core_id = 0;
		}
		cpu_data(cpuid).proc_id = -1;

#ifdef CONFIG_SMP
		cpu_set(cpuid, cpu_present_map);
+18 −1
Original line number Diff line number Diff line
@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;

@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
		unsigned int j;

		if (cpu_data(i).core_id == 0) {
			cpu_set(i, cpu_sibling_map[i]);
			cpu_set(i, cpu_core_map[i]);
			continue;
		}

		for_each_possible_cpu(j) {
			if (cpu_data(i).core_id ==
			    cpu_data(j).core_id)
				cpu_set(j, cpu_core_map[i]);
		}
	}

	for_each_possible_cpu(i) {
		unsigned int j;

		if (cpu_data(i).proc_id == -1) {
			cpu_set(i, cpu_sibling_map[i]);
			continue;
		}

		for_each_possible_cpu(j) {
			if (cpu_data(i).proc_id ==
			    cpu_data(j).proc_id)
				cpu_set(j, cpu_sibling_map[i]);
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ typedef struct {
	unsigned int	ecache_size;
	unsigned int	ecache_line_size;
	int		core_id;
	unsigned int	__pad3;
	int		proc_id;
} cpuinfo_sparc;

DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
Loading