Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6a3e2d3 authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar
Browse files

sched: walt: fix sched_cluster initialization



If cluster topology parsing failed or system booted with single cpu,
then topology possible sibling mask could be empty. This leads to
initialization of sched_clusters with empty cpus and there are many
call sites, which use the cluster cpu mask without checking for empty mask.
They lead to accessing invalid cpus ( minus one) leading to crashes.
Fix it by resetting to an initial cluster structure that has all possible
cpus as cluster mask. Also, warn when an empty cluster is detected.

To continue using init_cluster on device tree failures:
- Use init_cluster as default entry in cluster_head, and once
  cluster topology parsed successfully, cluster_head gets updated with
  proper cpu cluster information.
- If cluster topology parsing failed, free allocated sched_clusters and
  make sure rq points to init_cluster instead of invalid address.
- Remove unused cluster id bitmaps.

Change-Id: Ic65ba86ff90a271098fb593221d1679d634930b1
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent 3654d89f
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -2861,8 +2861,6 @@ static inline void restore_cgroup_boost_settings(void) { }

extern int alloc_related_thread_groups(void);

extern unsigned long all_cluster_ids[];

extern void check_for_migration(struct rq *rq, struct task_struct *p);

static inline int is_reserved(int cpu)
+46 −30
Original line number Diff line number Diff line
@@ -2250,14 +2250,36 @@ static void walt_cpus_capacity_changed(const cpumask_t *cpus)
}


static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
struct sched_cluster *sched_cluster[NR_CPUS];
int num_clusters;
static int num_sched_clusters;

struct list_head cluster_head;
cpumask_t asym_cap_sibling_cpus = CPU_MASK_NONE;

static struct sched_cluster init_cluster = {
	.list			=	LIST_HEAD_INIT(init_cluster.list),
	.id			=	0,
	.max_power_cost		=	1,
	.min_power_cost		=	1,
	.max_possible_capacity	=	1024,
	.efficiency		=	1,
	.cur_freq		=	1,
	.max_freq		=	1,
	.max_mitigated_freq	=	UINT_MAX,
	.min_freq		=	1,
	.max_possible_freq	=	1,
	.exec_scale_factor	=	1024,
	.aggr_grp_load		=	0,
};

void init_clusters(void)
{
	init_cluster.cpus = *cpu_possible_mask;
	raw_spin_lock_init(&init_cluster.load_lock);
	INIT_LIST_HEAD(&cluster_head);
	list_add(&init_cluster.list, &cluster_head);
}

static void
insert_cluster(struct sched_cluster *cluster, struct list_head *head)
{
@@ -2319,8 +2341,22 @@ static void add_cluster(const struct cpumask *cpus, struct list_head *head)
		cpu_rq(i)->cluster = cluster;

	insert_cluster(cluster, head);
	set_bit(num_clusters, all_cluster_ids);
	num_clusters++;
	num_sched_clusters++;
}

static void cleanup_clusters(struct list_head *head)
{
	struct sched_cluster *cluster, *tmp;
	int i;

	list_for_each_entry_safe(cluster, tmp, head, list) {
		for_each_cpu(i, &cluster->cpus)
			cpu_rq(i)->cluster = &init_cluster;

		list_del(&cluster->list);
		num_sched_clusters--;
		kfree(cluster);
	}
}

static int compute_max_possible_capacity(struct sched_cluster *cluster)
@@ -2436,7 +2472,11 @@ void update_cluster_topology(void)

	for_each_cpu(i, &cpus) {
		cluster_cpus = topology_possible_sibling_cpumask(i);
		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
		if (cpumask_empty(cluster_cpus)) {
			WARN(1, "WALT: Invalid cpu topology!!");
			cleanup_clusters(&new_head);
			return;
		}
		cpumask_andnot(&cpus, &cpus, cluster_cpus);
		add_cluster(cluster_cpus, &new_head);
	}
@@ -2460,30 +2500,6 @@ void update_cluster_topology(void)
		cpumask_clear(&asym_cap_sibling_cpus);
}

struct sched_cluster init_cluster = {
	.list			=	LIST_HEAD_INIT(init_cluster.list),
	.id			=	0,
	.max_power_cost		=	1,
	.min_power_cost		=	1,
	.max_possible_capacity	=	1024,
	.efficiency		=	1,
	.cur_freq		=	1,
	.max_freq		=	1,
	.max_mitigated_freq	=	UINT_MAX,
	.min_freq		=	1,
	.max_possible_freq	=	1,
	.exec_scale_factor	=	1024,
	.aggr_grp_load		=	0,
};

void init_clusters(void)
{
	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
	init_cluster.cpus = *cpu_possible_mask;
	raw_spin_lock_init(&init_cluster.load_lock);
	INIT_LIST_HEAD(&cluster_head);
}

static unsigned long cpu_max_table_freq[NR_CPUS];

static int cpufreq_notifier_policy(struct notifier_block *nb,
+0 −2
Original line number Diff line number Diff line
@@ -45,8 +45,6 @@ extern __read_mostly unsigned int sched_freq_aggregate;
extern __read_mostly unsigned int sched_group_upmigrate;
extern __read_mostly unsigned int sched_group_downmigrate;

extern struct sched_cluster init_cluster;

extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
						u64 wallclock, u64 irqtime);