Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06c68199 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: use the CPU true capacity while sorting the min/mid/max CPUs"

parents 41d55caa 3b70c457
Loading
Loading
Loading
Loading
+0 −24
Original line number Diff line number Diff line
@@ -9208,10 +9208,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
	struct sched_group *sdg = sd->groups;
	struct max_cpu_capacity *mcc;
	unsigned long max_capacity;
	int max_cap_cpu;
	unsigned long flags;

	capacity *= arch_scale_max_freq_capacity(sd, cpu);
	capacity >>= SCHED_CAPACITY_SHIFT;
@@ -9219,26 +9215,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
	capacity = min(capacity, thermal_cap(cpu));
	cpu_rq(cpu)->cpu_capacity_orig = capacity;

	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;

	raw_spin_lock_irqsave(&mcc->lock, flags);
	max_capacity = mcc->val;
	max_cap_cpu = mcc->cpu;

	if ((max_capacity > capacity && max_cap_cpu == cpu) ||
	    (max_capacity < capacity)) {
		mcc->val = capacity;
		mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
		raw_spin_unlock_irqrestore(&mcc->lock, flags);
		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
				cpu, capacity);
		goto skip_unlock;
#endif
	}
	raw_spin_unlock_irqrestore(&mcc->lock, flags);

skip_unlock: __attribute__ ((unused));
	capacity = scale_rt_capacity(cpu, capacity);

	if (!capacity)
+20 −8
Original line number Diff line number Diff line
@@ -1976,12 +1976,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att

		sd = *per_cpu_ptr(d.sd, i);

		if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
				cpu_rq(max_cpu)->cpu_capacity_orig))
		if ((max_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) >
				arch_scale_cpu_capacity(NULL, max_cpu)))
			WRITE_ONCE(d.rd->max_cap_orig_cpu, i);

		if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
				cpu_rq(min_cpu)->cpu_capacity_orig))
		if ((min_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) <
				arch_scale_cpu_capacity(NULL, min_cpu)))
			WRITE_ONCE(d.rd->min_cap_orig_cpu, i);

		cpu_attach_domain(sd, d.rd, i);
@@ -1992,14 +1992,26 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
		int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
		int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);

		if ((cpu_rq(i)->cpu_capacity_orig
				!=  cpu_rq(min_cpu)->cpu_capacity_orig) &&
				(cpu_rq(i)->cpu_capacity_orig
				!=  cpu_rq(max_cpu)->cpu_capacity_orig)) {
		if ((arch_scale_cpu_capacity(NULL, i)
				!=  arch_scale_cpu_capacity(NULL, min_cpu)) &&
				(arch_scale_cpu_capacity(NULL, i)
				!=  arch_scale_cpu_capacity(NULL, max_cpu))) {
			WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
			break;
		}
	}

	/*
	 * The max_cpu_capacity reflect the original capacity which does not
	 * change dynamically. So update the max cap CPU and its capacity
	 * here.
	 */
	if (d.rd->max_cap_orig_cpu != -1) {
		d.rd->max_cpu_capacity.cpu = d.rd->max_cap_orig_cpu;
		d.rd->max_cpu_capacity.val = arch_scale_cpu_capacity(NULL,
						d.rd->max_cap_orig_cpu);
	}

	rcu_read_unlock();

	if (has_asym)