Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c234ba42 authored by Ivaylo Georgiev's avatar Ivaylo Georgiev
Browse files

Revert "BACKPORT: sched/rt: Make RT capacity-aware"



This reverts commit cdadd914.

This is a preparation change for merging android-4.19.110 into
msm-4.19 branch.

Logic for RT task placement has already been added.

Change-Id: I2373371283cd6fe318047ea9635ed4db7d0c020a
Signed-off-by: default avatarIvaylo Georgiev <irgeorgiev@codeaurora.org>
parent 3f355772
Loading
Loading
Loading
Loading
+2 −23
Original line number Diff line number Diff line
@@ -50,8 +50,6 @@ static int convert_prio(int prio)
 * @cp: The cpupri context
 * @p: The task
 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
 *              fits a specific criteria so that we only return those CPUs.
 *
 * Note: This function returns the recommended CPUs as calculated during the
 * current invocation.  By the time the call returns, the CPUs may have in
@@ -63,8 +61,7 @@ static int convert_prio(int prio)
 * Return: (int)bool - CPUs were found
 */
int cpupri_find(struct cpupri *cp, struct task_struct *p,
		struct cpumask *lowest_mask,
		bool (*fitness_fn)(struct task_struct *p, int cpu))
		struct cpumask *lowest_mask)
{
	int idx = 0;
	int task_pri = convert_prio(p->prio);
@@ -105,8 +102,6 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
			continue;

		if (lowest_mask) {
			int cpu;

			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			/*
@@ -117,23 +112,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
			 * condition, simply act as though we never hit this
			 * priority level and continue on.
			 */
			if (cpumask_empty(lowest_mask))
				continue;

			if (!fitness_fn)
				return 1;

			/* Ensure the capacity of the CPUs fit the task */
			for_each_cpu(cpu, lowest_mask) {
				if (!fitness_fn(p, cpu))
					cpumask_clear_cpu(cpu, lowest_mask);
			}

			/*
			 * If no CPU at the current priority can fit the task
			 * continue looking
			 */
			if (cpumask_empty(lowest_mask))
			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
				continue;
		}

+1 −3
Original line number Diff line number Diff line
@@ -18,9 +18,7 @@ struct cpupri {
};

#ifdef CONFIG_SMP
int  cpupri_find(struct cpupri *cp, struct task_struct *p,
		 struct cpumask *lowest_mask,
		 bool (*fitness_fn)(struct task_struct *p, int cpu));
int  cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int  cpupri_init(struct cpupri *cp);
void cpupri_cleanup(struct cpupri *cp);
+15 −68
Original line number Diff line number Diff line
@@ -436,45 +436,6 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
	return rt_se->on_rq;
}

#ifdef CONFIG_UCLAMP_TASK
/*
 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
 * settings.
 *
 * This check is only important for heterogeneous systems where uclamp_min value
 * is higher than the capacity of a @cpu. For non-heterogeneous system this
 * function will always return true.
 *
 * The function will return true if the capacity of the @cpu is >= the
 * uclamp_min and false otherwise.
 *
 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
 * > uclamp_max.
 */
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
	unsigned int min_cap;
	unsigned int max_cap;
	unsigned int cpu_cap;

	/* Only heterogeneous systems can benefit from this check */
	if (!static_branch_unlikely(&sched_asym_cpucapacity))
		return true;

	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
	max_cap = uclamp_eff_value(p, UCLAMP_MAX);

	cpu_cap = capacity_orig_of(cpu);

	return cpu_cap >= min(min_cap, max_cap);
}
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
	return true;
}
#endif

#ifdef CONFIG_RT_GROUP_SCHED

static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
@@ -1434,7 +1395,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
{
	struct task_struct *curr;
	struct rq *rq;
	bool test;

	/* For anything but wake ups, just return the task_cpu */
	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1466,16 +1426,10 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
	 *
	 * This test is optimistic, if we get it wrong the load-balancer
	 * will have to sort it out.
	 *
	 * We take into account the capacity of the CPU to ensure it fits the
	 * requirement of the task - which is only important on heterogeneous
	 * systems like big.LITTLE.
	 */
	test = curr &&
	       unlikely(rt_task(curr)) &&
	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);

	if (test || !rt_task_fits_capacity(p, cpu)) {
	if (curr && unlikely(rt_task(curr)) &&
	    (curr->nr_cpus_allowed < 2 ||
	     curr->prio <= p->prio)) {
		int target = find_lowest_rq(p);

		/*
@@ -1499,15 +1453,15 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
	 * let's hope p can move out.
	 */
	if (rq->curr->nr_cpus_allowed == 1 ||
	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL))
	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
		return;

	/*
	 * p is migratable, so let's not schedule it and
	 * see if it is pushed or pulled somewhere else.
	 */
	if (p->nr_cpus_allowed != 1 &&
	    cpupri_find(&rq->rd->cpupri, p, NULL, NULL))
	if (p->nr_cpus_allowed != 1
	    && cpupri_find(&rq->rd->cpupri, p, NULL))
		return;

	/*
@@ -1662,8 +1616,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
	    cpumask_test_cpu(cpu, &p->cpus_allowed) &&
	    rt_task_fits_capacity(p, cpu))
	    cpumask_test_cpu(cpu, &p->cpus_allowed))
		return 1;

	return 0;
@@ -1705,8 +1658,7 @@ static int find_lowest_rq(struct task_struct *task)
	if (task->nr_cpus_allowed == 1)
		return -1; /* No other targets possible */

	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask,
			 rt_task_fits_capacity))
	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
		return -1; /* No targets found */

	/*
@@ -2212,14 +2164,12 @@ static void pull_rt_task(struct rq *this_rq)
 */
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
	bool need_to_push = !task_running(rq, p) &&
	if (!task_running(rq, p) &&
	    !test_tsk_need_resched(rq->curr) &&
	    p->nr_cpus_allowed > 1 &&
	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
	    (rq->curr->nr_cpus_allowed < 2 ||
			     rq->curr->prio <= p->prio);

	if (need_to_push || !rt_task_fits_capacity(p, cpu_of(rq)))
	     rq->curr->prio <= p->prio))
		push_rt_tasks(rq);
}

@@ -2291,10 +2241,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
	 */
	if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
		bool need_to_push = rq->rt.overloaded ||
				    !rt_task_fits_capacity(p, cpu_of(rq));

		if (p->nr_cpus_allowed > 1 && need_to_push)
		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
			rt_queue_push_tasks(rq);
#endif /* CONFIG_SMP */
		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))