Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b34662c authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa Committed by Gerrit - the friendly Code Review server
Browse files

sched: Ensure proper task migration when a CPU is isolated



migrate_tasks() migrates all tasks of a CPU by using pick_next_task().
This works in the hotplug case as we force migrate every single task
allowing pick_next_task() to return a new task on every loop iteration.
In the case of isolation, however, task migration is not guaranteed
which causes pick_next_task() to keep returning the same task over and
over again until we terminate the loop without having migrated all the
tasks that were supposed to migrated.

Fix the above problem by temporarily dequeuing tasks that are pinned
and marking them with TASK_ON_RQ_MIGRATING. This not only allows
pick_next_task() to properly walk the runqueue but also prevents any
migrations or changes in affinity for the dequeued tasks. Once we are
done with migrating all possible tasks, we re-enqueue all the dequeued
tasks.

While at it, ensure consistent ordering between task de-activation and
setting the TASK_ON_RQ_MIGRATING flag across all scheduling classes.

Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
[markivx: Fix minor comment conflict]
Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>

Change-Id: Ia6f8aea264e441ff9a8e3ce85828ac2d7a9a1781
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 9342b762
Loading
Loading
Loading
Loading
+39 −6
Original line number Original line Diff line number Diff line
@@ -5572,6 +5572,37 @@ static struct task_struct fake_task = {
	.sched_class = &fake_sched_class,
	.sched_class = &fake_sched_class,
};
};


/*
 * Remove a task from the runqueue and pretend that it's migrating. This
 * should prevent migrations for the detached task and disallow further
 * changes to tsk_cpus_allowed.
 */
static void
detach_one_task(struct task_struct *p, struct rq *rq, struct list_head *tasks)
{
	lockdep_assert_held(&rq->lock);

	p->on_rq = TASK_ON_RQ_MIGRATING;
	deactivate_task(rq, p, 0);
	list_add(&p->se.group_node, tasks);
}

static void attach_tasks(struct list_head *tasks, struct rq *rq)
{
	struct task_struct *p;

	lockdep_assert_held(&rq->lock);

	while (!list_empty(tasks)) {
		p = list_first_entry(tasks, struct task_struct, se.group_node);
		list_del_init(&p->se.group_node);

		BUG_ON(task_rq(p) != rq);
		activate_task(rq, p, 0);
		p->on_rq = TASK_ON_RQ_QUEUED;
	}
}

/*
/*
 * Migrate all tasks (not pinned if pinned argument say so) from the rq,
 * Migrate all tasks (not pinned if pinned argument say so) from the rq,
 * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq().
 * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq().
@@ -5589,6 +5620,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
	struct pin_cookie cookie;
	struct pin_cookie cookie;
	int dest_cpu;
	int dest_cpu;
	unsigned int num_pinned_kthreads = 1; /* this thread */
	unsigned int num_pinned_kthreads = 1; /* this thread */
	LIST_HEAD(tasks);
	cpumask_t avail_cpus;
	cpumask_t avail_cpus;


	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
@@ -5613,12 +5645,10 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,


	for (;;) {
	for (;;) {
		/*
		/*
		 * There's this thread running + pinned threads, bail when
		 * There's this thread running, bail when that's the only
		 * those are the only remaining threads:
		 * remaining thread.
		 */
		 */
		if ((migrate_pinned_tasks && rq->nr_running == 1) ||
		if (rq->nr_running == 1)
		   (!migrate_pinned_tasks &&
		    rq->nr_running <= num_pinned_kthreads))
			break;
			break;


		/*
		/*
@@ -5630,7 +5660,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,


		if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
		if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
			!cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
			!cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
			lockdep_unpin_lock(&rq->lock, cookie);
			detach_one_task(next, rq, &tasks);
			num_pinned_kthreads += 1;
			num_pinned_kthreads += 1;
			continue;
			continue;
		}
		}
@@ -5675,6 +5705,9 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
	}
	}


	rq->stop = stop;
	rq->stop = stop;

	if (num_pinned_kthreads > 1)
		attach_tasks(&tasks, rq);
}
}


void set_rq_online(struct rq *rq);
void set_rq_online(struct rq *rq);