Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe764fe4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman Committed by Alistair Delva
Browse files

Revert "ANDROID: sched: Update max cpu capacity in case of max frequency constraints"



This reverts commit bfc73d18.

We want to back-out the eas-dev merge that happened in the tree after
5.3-rc1 as those patches "should" all be in Linus's tree now.

This is done to handle the merge conflicts with 5.4-rc1.

Cc: Todd Kjos <tkjos@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
Change-Id: I6e5f8f45be6a2ea01446dd2cea0e3da1fa839404
parent e99623b3
Loading
Loading
Loading
Loading
+2 −32
Original line number Diff line number Diff line
@@ -6231,7 +6231,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
		return 0;

	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity.val;
	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;

	/* Minimum capacity is close to max, no need to abort wake_affine */
	if (max_cap - min_cap < max_cap >> 3)
@@ -7781,46 +7781,16 @@ static unsigned long scale_rt_capacity(int cpu, unsigned long max)
	return scale_irq_capacity(free, irq, max);
}

void init_max_cpu_capacity(struct max_cpu_capacity *mcc) {
	raw_spin_lock_init(&mcc->lock);
	mcc->val = 0;
	mcc->cpu = -1;
}

static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
	unsigned long capacity = arch_scale_cpu_capacity(cpu);
	struct sched_group *sdg = sd->groups;
	struct max_cpu_capacity *mcc;
	unsigned long max_capacity;
	int max_cap_cpu;
	unsigned long flags;

	cpu_rq(cpu)->cpu_capacity_orig = capacity;

	capacity *= arch_scale_max_freq_capacity(sd, cpu);
	capacity >>= SCHED_CAPACITY_SHIFT;

	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;

	raw_spin_lock_irqsave(&mcc->lock, flags);
	max_capacity = mcc->val;
	max_cap_cpu = mcc->cpu;

	if ((max_capacity > capacity && max_cap_cpu == cpu) ||
	    (max_capacity < capacity)) {
		mcc->val = capacity;
		mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
		raw_spin_unlock_irqrestore(&mcc->lock, flags);
		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
				cpu, capacity);
		goto skip_unlock;
#endif
	}
	raw_spin_unlock_irqrestore(&mcc->lock, flags);

skip_unlock: __attribute__ ((unused));
	capacity = scale_rt_capacity(cpu, capacity);

	if (!capacity)
@@ -7925,7 +7895,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
{
	return rq->misfit_task_load &&
		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity.val ||
		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
		 check_cpu_capacity(rq, sd));
}

+1 −9
Original line number Diff line number Diff line
@@ -710,12 +710,6 @@ struct perf_domain {
	struct rcu_head rcu;
};

struct max_cpu_capacity {
	raw_spinlock_t lock;
	unsigned long val;
	int cpu;
};

/* Scheduling group status flags */
#define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */
#define SG_OVERUTILIZED		0x2 /* One or more CPUs are over-utilized. */
@@ -774,8 +768,7 @@ struct root_domain {
	cpumask_var_t		rto_mask;
	struct cpupri		cpupri;

	/* Maximum cpu capacity in the system. */
	struct max_cpu_capacity max_cpu_capacity;
	unsigned long		max_cpu_capacity;

	/*
	 * NULL-terminated list of performance domains intersecting with the
@@ -788,7 +781,6 @@ extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;

extern void init_defrootdomain(void);
extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
extern int sched_init_domains(const struct cpumask *cpu_map);
extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
extern void sched_get_rd(struct root_domain *rd);
+12 −3
Original line number Diff line number Diff line
@@ -510,9 +510,6 @@ static int init_rootdomain(struct root_domain *rd)

	if (cpupri_init(&rd->cpupri) != 0)
		goto free_cpudl;

	init_max_cpu_capacity(&rd->max_cpu_capacity);

	return 0;

free_cpudl:
@@ -1933,6 +1930,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
	enum s_alloc alloc_state;
	struct sched_domain *sd;
	struct s_data d;
	struct rq *rq = NULL;
	int i, ret = -ENOMEM;
	struct sched_domain_topology_level *tl_asym;
	bool has_asym = false;
@@ -1995,7 +1993,13 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
	/* Attach the domains */
	rcu_read_lock();
	for_each_cpu(i, cpu_map) {
		rq = cpu_rq(i);
		sd = *per_cpu_ptr(d.sd, i);

		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);

		cpu_attach_domain(sd, d.rd, i);
	}
	rcu_read_unlock();
@@ -2003,6 +2007,11 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
	if (has_asym)
		static_branch_enable_cpuslocked(&sched_asym_cpucapacity);

	if (rq && sched_debug_enabled) {
		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
	}

	ret = 0;
error:
	__free_domain_allocs(&d, alloc_state, cpu_map);