Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d22fa70a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Fix stale max_capacity issue during CPU hotplug"

parents feabd355 d752014c
Loading
Loading
Loading
Loading
+22 −5
Original line number Diff line number Diff line
@@ -523,19 +523,29 @@ static void pm_qos_irq_release(struct kref *ref)
}

static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
		const cpumask_t *mask)
		const cpumask_t *unused_mask)
{
	unsigned long flags;
	struct pm_qos_request *req = container_of(notify,
					struct pm_qos_request, irq_notify);
	struct pm_qos_constraints *c =
				pm_qos_array[req->pm_qos_class]->constraints;
	struct irq_desc *desc = irq_to_desc(req->irq);
	struct cpumask *new_affinity =
			irq_data_get_effective_affinity_mask(&desc->irq_data);
	bool affinity_changed = false;

	spin_lock_irqsave(&pm_qos_lock, flags);
	cpumask_copy(&req->cpus_affine, mask);
	if (!cpumask_equal(&req->cpus_affine, new_affinity)) {
		cpumask_copy(&req->cpus_affine, new_affinity);
		affinity_changed = true;
	}

	spin_unlock_irqrestore(&pm_qos_lock, flags);

	pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio);
	if (affinity_changed)
		pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ,
				     req->node.prio);
}
#endif

@@ -580,9 +590,16 @@ void pm_qos_add_request(struct pm_qos_request *req,
			if (!desc)
				return;

			mask = desc->irq_data.common->affinity;
			/*
			 * If the IRQ is not started, the effective affinity
			 * won't be set. So fallback to the default affinity.
			 */
			mask = irq_data_get_effective_affinity_mask(
						&desc->irq_data);
			if (cpumask_empty(mask))
				mask = irq_data_get_affinity_mask(
						&desc->irq_data);

			/* Get the current affinity */
			cpumask_copy(&req->cpus_affine, mask);
			req->irq_notify.irq = req->irq;
			req->irq_notify.notify = pm_qos_irq_notify;
+2 −0
Original line number Diff line number Diff line
@@ -6316,6 +6316,7 @@ int sched_cpu_activate(unsigned int cpu)
	rq_unlock_irqrestore(rq, &rf);

	update_max_interval();
	walt_update_min_max_capacity();

	return 0;
}
@@ -6351,6 +6352,7 @@ int sched_cpu_deactivate(unsigned int cpu)
		return ret;
	}
	sched_domains_numa_masks_clear(cpu);
	walt_update_min_max_capacity();
	return 0;
}

+6 −1
Original line number Diff line number Diff line
@@ -2819,7 +2819,10 @@ static inline void __update_min_max_capacity(void)
	int i;
	int max_cap = 0, min_cap = INT_MAX;

	for_each_online_cpu(i) {
	for_each_possible_cpu(i) {
		if (!cpu_active(i))
			continue;

		max_cap = max(max_cap, cpu_capacity(i));
		min_cap = min(min_cap, cpu_capacity(i));
	}
@@ -3103,6 +3106,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
}

extern void walt_map_freq_to_load(void);
extern void walt_update_min_max_capacity(void);

static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
{
@@ -3256,6 +3260,7 @@ static inline unsigned int power_cost(int cpu, u64 demand)

static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline void walt_map_freq_to_load(void) { }
static inline void walt_update_min_max_capacity(void) { }
#endif	/* CONFIG_SCHED_WALT */

#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+2 −2
Original line number Diff line number Diff line
@@ -2180,7 +2180,7 @@ static int compute_max_possible_capacity(struct sched_cluster *cluster)
	return capacity;
}

static void update_min_max_capacity(void)
void walt_update_min_max_capacity(void)
{
	unsigned long flags;

@@ -2351,7 +2351,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
	if (val != CPUFREQ_NOTIFY)
		return 0;

	update_min_max_capacity();
	walt_update_min_max_capacity();

	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
	if (min_max_freq == 1)