Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07b4032c authored by Gregory Haskins's avatar Gregory Haskins Committed by Ingo Molnar
Browse files

sched: break out search for RT tasks



Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().

Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e7693a36
Loading
Loading
Loading
Loading
+39 −27
Original line number Diff line number Diff line
@@ -263,25 +263,21 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);

/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task,
				      struct rq *this_rq)
static int find_lowest_rq(struct task_struct *task)
{
	struct rq *lowest_rq = NULL;
	int cpu;
	int tries;
	cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
	struct rq *lowest_rq = NULL;

	cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);

	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
	/*
	 * Scan each rq for the lowest prio.
	 */
	for_each_cpu_mask(cpu, *cpu_mask) {
			struct rq *rq = &per_cpu(runqueues, cpu);
		struct rq *rq = cpu_rq(cpu);

			if (cpu == this_rq->cpu)
		if (cpu == rq->cpu)
			continue;

		/* We look for lowest RT prio or non-rt CPU */
@@ -297,20 +293,36 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
		}
	}

		if (!lowest_rq)
	return lowest_rq ? lowest_rq->cpu : -1;
}

/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task,
				      struct rq *rq)
{
	struct rq *lowest_rq = NULL;
	int cpu;
	int tries;

	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
		cpu = find_lowest_rq(task);

		if (cpu == -1)
			break;

		lowest_rq = cpu_rq(cpu);

		/* if the prio of this runqueue changed, try again */
		if (double_lock_balance(this_rq, lowest_rq)) {
		if (double_lock_balance(rq, lowest_rq)) {
			/*
			 * We had to unlock the run queue. In
			 * the mean time, task could have
			 * migrated already or had its affinity changed.
			 * Also make sure that it wasn't scheduled on its rq.
			 */
			if (unlikely(task_rq(task) != this_rq ||
			if (unlikely(task_rq(task) != rq ||
				     !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
				     task_running(this_rq, task) ||
				     task_running(rq, task) ||
				     !task->se.on_rq)) {
				spin_unlock(&lowest_rq->lock);
				lowest_rq = NULL;