Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 433e9848 authored by John Dias's avatar John Dias Committed by Satya Durga Srinivasu Prabhala
Browse files

sched: avoid scheduling RT threads on cores currently handling softirqs

Bug: 31501544
Change-Id: I99dd7aaa12c11270b28dbabea484bcc8fb8ba0c1
Git-commit: 080ea011fd9f47315e1fc53185872ef813b59d00
Git-repo: https://android.googlesource.com/kernel/msm


[pkondeti@codeaurora.org: resolved minor merge conflicts and fixed
checkpatch warnings]
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: resolve trivial merge conflicts]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent b31361a5
Loading
Loading
Loading
Loading
+35 −2
Original line number Diff line number Diff line
@@ -27,6 +27,8 @@
 *  of the License.
 */

#include "sched.h"

#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
@@ -50,6 +52,27 @@ static int convert_prio(int prio)
	return cpupri;
}

/**
 * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
 *			 non-preemptible
 * @lowest_mask: mask with selected CPUs (non-NULL)
 */
static void
drop_nopreempt_cpus(struct cpumask *lowest_mask)
{
	unsigned int cpu = cpumask_first(lowest_mask);

	while (cpu < nr_cpu_ids) {
		/* unlocked access */
		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);

		if (task_may_not_preempt(task, cpu))
			cpumask_clear_cpu(cpu, lowest_mask);

		cpu = cpumask_next(cpu, lowest_mask);
	}
}

/**
 * cpupri_find - find the best (lowest-pri) CPU in the system
 * @cp: The cpupri context
@@ -70,9 +93,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
{
	int idx = 0;
	int task_pri = convert_prio(p->prio);
	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;

	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);

retry:
	for (idx = 0; idx < task_pri; idx++) {
		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
		int skip = 0;
@@ -108,7 +133,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,

		if (lowest_mask) {
			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			if (drop_nopreempts)
				drop_nopreempt_cpus(lowest_mask);
			/*
			 * We have to ensure that we have at least one bit
			 * still set in the array, since the map could have
@@ -123,7 +149,14 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,

		return 1;
	}

	/*
	 * If we can't find any non-preemptible cpu's, retry so we can
	 * find the lowest priority target and avoid priority inversion.
	 */
	if (drop_nopreempts) {
		drop_nopreempts = false;
		goto retry;
	}
	return 0;
}

+36 −6
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@

#include "sched.h"

#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/irq_work.h>

@@ -1450,12 +1451,27 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

/*
 * Return whether the task on the given cpu is currently non-preemptible
 * while handling a softirq or is likely to block preemptions soon because
 * it is a ksoftirq thread.
 */
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);

	return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
	       task == cpu_ksoftirqd;
}

static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
		  int sibling_count_hint)
{
	struct task_struct *curr;
	struct rq *rq;
	bool may_not_preempt;

	/* For anything but wake ups, just return the task_cpu */
	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1467,7 +1483,17 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
	curr = READ_ONCE(rq->curr); /* unlocked access */

	/*
	 * If the current task on @p's runqueue is an RT task, then
	 * If the current task on @p's runqueue is a softirq task,
	 * it may run without preemption for a time that is
	 * ill-suited for a waiting RT task. Therefore, try to
	 * wake this RT task on another runqueue.
	 *
	 * Also, if the current task on @p's runqueue is an RT task, then
	 * it may run without preemption for a time that is
	 * ill-suited for a waiting RT task. Therefore, try to
	 * wake this RT task on another runqueue.
	 *
	 * Also, if the current task on @p's runqueue is an RT task, then
	 * try to see if we can wake this RT task up on another
	 * runqueue. Otherwise simply start this RT task
	 * on its current runqueue.
@@ -1488,18 +1514,22 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
	 * This test is optimistic, if we get it wrong the load-balancer
	 * will have to sort it out.
	 */
	if (energy_aware() ||
	    (curr && unlikely(rt_task(curr)) &&
	may_not_preempt = task_may_not_preempt(curr, cpu);
	if (energy_aware() || may_not_preempt ||
	    (unlikely(rt_task(curr)) &&
	     (curr->nr_cpus_allowed < 2 ||
	      curr->prio <= p->prio))) {
		int target = find_lowest_rq(p);

		/*
		 * Don't bother moving it if the destination CPU is
		 * not running a lower priority task.
		 * If cpu is non-preemptible, prefer remote cpu
		 * even if it's running a higher-prio task.
		 * Otherwise: Don't bother moving it if the
		 * destination CPU is not running a lower priority task.
		 */
		if (target != -1 &&
		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
		   (may_not_preempt ||
		    p->prio < cpu_rq(target)->rt.highest_prio.curr))
			cpu = target;
	}
	rcu_read_unlock();
+5 −0
Original line number Diff line number Diff line
@@ -2279,6 +2279,11 @@ extern void set_rq_online (struct rq *rq);
extern void set_rq_offline(struct rq *rq);
extern bool sched_smp_initialized;

/*
 * task_may_not_preempt - check whether a task may not be preemptible soon
 */
extern bool task_may_not_preempt(struct task_struct *task, int cpu);

#else /* CONFIG_SMP */

/*