Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db1f2312 authored by Stephen Dickey's avatar Stephen Dickey
Browse files

sched/cpupri: skip isolated cpus



Currently there exists a path where cpupri_find could
return lowest_mask without removing isolated cpus. This
happened when __cpu_pri() didn't consider isolated
cpus and indicated that the lowest_mask was not empty.
Subsequently, cpupri_find_fitness() would find the
lowest_mask was empty, after removing isolated cpus.
This led to cpupri_find() being called again,
cpupri_find_fitness being re-entered, and this time,
since fitness_fn is NULL, returns true. This means there
is a case where lowest_mask only has isolated CPUs
and cpupri_find_fitness will indicate yes, cpus were found.

When invokved in the rt wakeup path through find_lowest_rq(),
this caused the pushes of rt tasks to be placed on
isolated cpus.  This also causes the the wakeups of rt
tasks on isolated cpus, although this is a rare occurrence.

Fix it by ensuring __cpu_pri() removes isolated cpus from
lowest_mask, which in turn causes cpupri_find_fitness() to
skip the current task_pri index prior to making any other
decision.  For the case where lowest_mask only has isolated
cpus left, this will result in no CPU being found, and
a return of false from cpupri_find_fitness().

Change-Id: I3e28b8366513ee51dc5c2d4eb0a2eb146c82f255
Signed-off-by: default avatarStephen Dickey <dickey@codeaurora.org>
parent 0afd6cbe
Loading
Loading
Loading
Loading
+27 −7
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ static int convert_prio(int prio)
	return cpupri;
}

#ifdef CONFIG_SCHED_WALT
/**
 * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
 *			 non-preemptible
@@ -61,9 +62,16 @@ drop_nopreempt_cpus(struct cpumask *lowest_mask)
		cpu = cpumask_next(cpu, lowest_mask);
	}
}
#endif /* CONFIG_SCHED_WALT */

#ifndef CONFIG_SCHED_WALT
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
				struct cpumask *lowest_mask, int idx)
#else
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
				struct cpumask *lowest_mask, int idx,
				bool drop_nopreempts)
#endif
{
	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
	int skip = 0;
@@ -100,6 +108,13 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
	if (lowest_mask) {
		cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);

#ifdef CONFIG_SCHED_WALT
		if (drop_nopreempts)
			drop_nopreempt_cpus(lowest_mask);

		cpumask_andnot(lowest_mask, lowest_mask,
			       cpu_isolated_mask);
#endif
		/*
		 * We have to ensure that we have at least one bit
		 * still set in the array, since the map could have
@@ -144,26 +159,28 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
{
	int task_pri = convert_prio(p->prio);
	int idx, cpu;

#ifdef CONFIG_SCHED_WALT
	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
#endif

	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);

#ifdef CONFIG_SCHED_WALT
retry:
#endif
	for (idx = 0; idx < task_pri; idx++) {

#ifndef CONFIG_SCHED_WALT
		if (!__cpupri_find(cp, p, lowest_mask, idx))
#else
		if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
			continue;
#endif

		if (!lowest_mask || !fitness_fn)
			return 1;

#ifdef CONFIG_SCHED_WALT
		cpumask_andnot(lowest_mask, lowest_mask,
			       cpu_isolated_mask);
#endif
		if (drop_nopreempts)
			drop_nopreempt_cpus(lowest_mask);

		/* Ensure the capacity of the CPUs fit the task */
		for_each_cpu(cpu, lowest_mask) {
			if (!fitness_fn(p, cpu))
@@ -179,6 +196,8 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,

		return 1;
	}

#ifdef CONFIG_SCHED_WALT
	/*
	 * If we can't find any non-preemptible cpu's, retry so we can
	 * find the lowest priority target and avoid priority inversion.
@@ -187,6 +206,7 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
		drop_nopreempts = false;
		goto retry;
	}
#endif

	/*
	 * If we failed to find a fitting lowest_mask, kick off a new search