Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7bb7779c authored by Avaneesh Kumar Dwivedi's avatar Avaneesh Kumar Dwivedi Committed by Chetan C R
Browse files

core_ctl: Add check for available cpus before accessing per_cpus



For qultivate target its trying to call per_cpu() for
cpu's which are fused out and leading to crash So, Add
a check for available cpu's before calling per_cpu.

Change-Id: Idfd97fcfc83baa59afe9010396e7b6314087bf13
Signed-off-by: default avatarAvaneesh Kumar Dwivedi <quic_akdwived@quicinc.com>
Signed-off-by: default avatarChetan C R <quic_cchinnad@quicinc.com>
parent 1de0be8d
Loading
Loading
Loading
Loading
+20 −8
Original line number Diff line number Diff line
@@ -340,7 +340,7 @@ static ssize_t store_not_preferred(struct cluster_data *state,
				   const char *buf, size_t count)
{
	struct cpu_data *c;
	unsigned int i;
	unsigned int i, mask;
	unsigned int val[MAX_CPUS_PER_CLUSTER];
	unsigned long flags;
	int ret;
@@ -353,10 +353,16 @@ static ssize_t store_not_preferred(struct cluster_data *state,
		return -EINVAL;

	spin_lock_irqsave(&state_lock, flags);
	for (i = 0; i < state->num_cpus; i++) {
		c = &per_cpu(cpu_state, i + state->first_cpu);
	for (i = 0, mask = 0; i < state->num_cpus;) {
		if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) {
			mask++;
			continue;
		}

		c = &per_cpu(cpu_state, i + mask + state->first_cpu);
		c->not_preferred = val[i];
		not_preferred_count += !!val[i];
		i++;
	}
	state->nr_not_preferred_cpus = not_preferred_count;
	spin_unlock_irqrestore(&state_lock, flags);
@@ -369,20 +375,26 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
	struct cpu_data *c;
	ssize_t count = 0;
	unsigned long flags;
	int i;
	int i, mask;

	spin_lock_irqsave(&state_lock, flags);
	for (i = 0; i < state->num_cpus; i++) {
		c = &per_cpu(cpu_state, i + state->first_cpu);
	for (i = 0, mask = 0; i < state->num_cpus;) {
		if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) {
			mask++;
			continue;
		}

		c = &per_cpu(cpu_state, i + mask + state->first_cpu);
		count += scnprintf(buf + count, PAGE_SIZE - count,
			"CPU#%d: %u\n", c->cpu, c->not_preferred);
		i++;
	}

	spin_unlock_irqrestore(&state_lock, flags);

	return count;
}


struct core_ctl_attr {
	struct attribute attr;
	ssize_t (*show)(const struct cluster_data *, char *);