Loading include/linux/init_task.h +1 −0 Original line number Diff line number Diff line Loading @@ -236,6 +236,7 @@ extern struct cred init_cred; .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ .nr_cpus_allowed= NR_CPUS, \ .cpus_requested = CPU_MASK_ALL, \ .mm = NULL, \ .active_mm = &init_mm, \ .restart_block = { \ Loading include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -810,6 +810,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; cpumask_t cpus_requested; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; Loading kernel/cgroup/cpuset.c +16 −2 Original line number Diff line number Diff line Loading @@ -867,6 +867,20 @@ void rebuild_sched_domains(void) mutex_unlock(&cpuset_mutex); } static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask) { int ret; if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->cpus_requested); if (!ret) return ret; } return set_cpus_allowed_ptr(p, new_mask); } /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed Loading @@ -882,7 +896,7 @@ static void update_tasks_cpumask(struct cpuset *cs) css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) set_cpus_allowed_ptr(task, cs->effective_cpus); update_cpus_allowed(cs, task, cs->effective_cpus); css_task_iter_end(&it); } Loading Loading @@ -1550,7 +1564,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here */ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach)); cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, task); Loading kernel/sched/core.c +4 −0 Original line number Diff line number Diff line Loading @@ -4889,6 +4889,9 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) retval = -EINVAL; } if (!retval && !(p->flags & PF_KTHREAD)) cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask); out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: Loading Loading @@ -6318,6 +6321,7 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); cpumask_copy(¤t->cpus_requested, cpu_possible_mask); sched_init_granularity(); free_cpumask_var(non_isolated_cpus); Loading Loading
include/linux/init_task.h +1 −0 Original line number Diff line number Diff line Loading @@ -236,6 +236,7 @@ extern struct cred init_cred; .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ .nr_cpus_allowed= NR_CPUS, \ .cpus_requested = CPU_MASK_ALL, \ .mm = NULL, \ .active_mm = &init_mm, \ .restart_block = { \ Loading
include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -810,6 +810,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; cpumask_t cpus_requested; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; Loading
kernel/cgroup/cpuset.c +16 −2 Original line number Diff line number Diff line Loading @@ -867,6 +867,20 @@ void rebuild_sched_domains(void) mutex_unlock(&cpuset_mutex); } static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask) { int ret; if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->cpus_requested); if (!ret) return ret; } return set_cpus_allowed_ptr(p, new_mask); } /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed Loading @@ -882,7 +896,7 @@ static void update_tasks_cpumask(struct cpuset *cs) css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) set_cpus_allowed_ptr(task, cs->effective_cpus); update_cpus_allowed(cs, task, cs->effective_cpus); css_task_iter_end(&it); } Loading Loading @@ -1550,7 +1564,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here */ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach)); cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, task); Loading
kernel/sched/core.c +4 −0 Original line number Diff line number Diff line Loading @@ -4889,6 +4889,9 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) retval = -EINVAL; } if (!retval && !(p->flags & PF_KTHREAD)) cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask); out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: Loading Loading @@ -6318,6 +6321,7 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); cpumask_copy(¤t->cpus_requested, cpu_possible_mask); sched_init_granularity(); free_cpumask_var(non_isolated_cpus); Loading