Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 013ff13a authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

cpuset: Restore tasks affinity while moving across cpusets



When tasks move across cpusets, the current affinity settings
are lost. Cache the task affinity and restore it during cpuset
migration. The restoring happens only when the cached affinity
is subset of the current cpuset settings.

Change-Id: I6c2ec1d5e3d994e176926d94b9e0cc92418020cc
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 2758920d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -236,6 +236,7 @@ extern struct cred init_cred;
	.policy		= SCHED_NORMAL,					\
	.cpus_allowed	= CPU_MASK_ALL,					\
	.nr_cpus_allowed= NR_CPUS,					\
	.cpus_requested	= CPU_MASK_ALL,					\
	.mm		= NULL,						\
	.active_mm	= &init_mm,					\
	.restart_block = {						\
+1 −0
Original line number Diff line number Diff line
@@ -810,6 +810,7 @@ struct task_struct {
	unsigned int			policy;
	int				nr_cpus_allowed;
	cpumask_t			cpus_allowed;
	cpumask_t			cpus_requested;

#ifdef CONFIG_PREEMPT_RCU
	int				rcu_read_lock_nesting;
+16 −2
Original line number Diff line number Diff line
@@ -867,6 +867,20 @@ void rebuild_sched_domains(void)
	mutex_unlock(&cpuset_mutex);
}

static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
			       const struct cpumask *new_mask)
{
	int ret;

	if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) {
		ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
		if (!ret)
			return ret;
	}

	return set_cpus_allowed_ptr(p, new_mask);
}

/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -882,7 +896,7 @@ static void update_tasks_cpumask(struct cpuset *cs)

	css_task_iter_start(&cs->css, 0, &it);
	while ((task = css_task_iter_next(&it)))
		set_cpus_allowed_ptr(task, cs->effective_cpus);
		update_cpus_allowed(cs, task, cs->effective_cpus);
	css_task_iter_end(&it);
}

@@ -1550,7 +1564,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
		 * can_attach beforehand should guarantee that this doesn't
		 * fail.  TODO: have a better way to handle failure here
		 */
		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
		WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));

		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
		cpuset_update_task_spread_flag(cs, task);
+4 −0
Original line number Diff line number Diff line
@@ -4889,6 +4889,9 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
		retval = -EINVAL;
	}

	if (!retval && !(p->flags & PF_KTHREAD))
		cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask);

out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
@@ -6318,6 +6321,7 @@ void __init sched_init_smp(void)
	/* Move init over to a non-isolated CPU */
	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
		BUG();
	cpumask_copy(&current->cpus_requested, cpu_possible_mask);
	sched_init_granularity();
	free_cpumask_var(non_isolated_cpus);