Loading include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -823,6 +823,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; cpumask_t cpus_requested; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; Loading init/init_task.c +1 −0 Original line number Diff line number Diff line Loading @@ -73,6 +73,7 @@ struct task_struct init_task .policy = SCHED_NORMAL, .cpus_allowed = CPU_MASK_ALL, .nr_cpus_allowed= NR_CPUS, .cpus_requested = CPU_MASK_ALL, .mm = NULL, .active_mm = &init_mm, .restart_block = { Loading kernel/cgroup/cpuset.c +16 −2 Original line number Diff line number Diff line Loading @@ -863,6 +863,20 @@ void rebuild_sched_domains(void) mutex_unlock(&cpuset_mutex); } static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask) { int ret; if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->cpus_requested); if (!ret) return ret; } return set_cpus_allowed_ptr(p, new_mask); } /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed Loading @@ -878,7 +892,7 @@ static void update_tasks_cpumask(struct cpuset *cs) css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) set_cpus_allowed_ptr(task, cs->effective_cpus); update_cpus_allowed(cs, task, cs->effective_cpus); css_task_iter_end(&it); } Loading Loading @@ -1546,7 +1560,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here */ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach)); cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, task); Loading kernel/sched/core.c +4 −0 Original line number Diff line number Diff line Loading @@ -5005,6 +5005,9 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) retval = -EINVAL; } if (!retval && !(p->flags & PF_KTHREAD)) cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask); out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: Loading Loading @@ -6434,6 +6437,7 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) BUG(); cpumask_copy(¤t->cpus_requested, cpu_possible_mask); sched_init_granularity(); init_sched_rt_class(); Loading Loading
include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -823,6 +823,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; cpumask_t cpus_requested; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; Loading
init/init_task.c +1 −0 Original line number Diff line number Diff line Loading @@ -73,6 +73,7 @@ struct task_struct init_task .policy = SCHED_NORMAL, .cpus_allowed = CPU_MASK_ALL, .nr_cpus_allowed= NR_CPUS, .cpus_requested = CPU_MASK_ALL, .mm = NULL, .active_mm = &init_mm, .restart_block = { Loading
kernel/cgroup/cpuset.c +16 −2 Original line number Diff line number Diff line Loading @@ -863,6 +863,20 @@ void rebuild_sched_domains(void) mutex_unlock(&cpuset_mutex); } static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask) { int ret; if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->cpus_requested); if (!ret) return ret; } return set_cpus_allowed_ptr(p, new_mask); } /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed Loading @@ -878,7 +892,7 @@ static void update_tasks_cpumask(struct cpuset *cs) css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) set_cpus_allowed_ptr(task, cs->effective_cpus); update_cpus_allowed(cs, task, cs->effective_cpus); css_task_iter_end(&it); } Loading Loading @@ -1546,7 +1560,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here */ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach)); cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, task); Loading
kernel/sched/core.c +4 −0 Original line number Diff line number Diff line Loading @@ -5005,6 +5005,9 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) retval = -EINVAL; } if (!retval && !(p->flags & PF_KTHREAD)) cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask); out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: Loading Loading @@ -6434,6 +6437,7 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) BUG(); cpumask_copy(¤t->cpus_requested, cpu_possible_mask); sched_init_granularity(); init_sched_rt_class(); Loading