Loading kernel/cgroup/cpuset.c +27 −14 Original line number Diff line number Diff line Loading @@ -105,6 +105,7 @@ struct cpuset { /* user-configured CPUs and Memory Nodes allow to tasks */ cpumask_var_t cpus_allowed; cpumask_var_t cpus_requested; nodemask_t mems_allowed; /* effective CPUs and Memory Nodes allow to tasks */ Loading Loading @@ -432,7 +433,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) { return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && return cpumask_subset(p->cpus_requested, q->cpus_requested) && nodes_subset(p->mems_allowed, q->mems_allowed) && is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_mem_exclusive(p) <= is_mem_exclusive(q); Loading Loading @@ -469,8 +470,13 @@ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) goto free_two; if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) goto free_three; return 0; free_three: free_cpumask_var(*pmask3); free_two: free_cpumask_var(*pmask2); free_one: Loading @@ -487,6 +493,7 @@ static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) { if (cs) { free_cpumask_var(cs->cpus_allowed); free_cpumask_var(cs->cpus_requested); free_cpumask_var(cs->effective_cpus); free_cpumask_var(cs->subparts_cpus); } Loading Loading @@ -515,6 +522,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) } cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); cpumask_copy(trial->cpus_requested, cs->cpus_requested); cpumask_copy(trial->effective_cpus, cs->effective_cpus); return trial; } Loading Loading @@ -583,7 +591,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) cpuset_for_each_child(c, css, par) { if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) cpumask_intersects(trial->cpus_requested, c->cpus_requested)) goto out; if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c != cur && Loading Loading @@ -1017,7 +1025,7 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, #ifdef CONFIG_SCHED_WALT int ret; if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_allowed)) { if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->wts.cpus_requested); if (!ret) return ret; Loading Loading @@ -1063,10 +1071,10 @@ static void compute_effective_cpumask(struct cpumask *new_cpus, if (parent->nr_subparts_cpus) { cpumask_or(new_cpus, parent->effective_cpus, parent->subparts_cpus); cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); cpumask_and(new_cpus, new_cpus, cs->cpus_requested); cpumask_and(new_cpus, new_cpus, cpu_active_mask); } else { cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); } } Loading Loading @@ -1489,25 +1497,26 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, return -EACCES; /* * An empty cpus_allowed is ok only if the cpuset has no tasks. * An empty cpus_requested is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. */ if (!*buf) { cpumask_clear(trialcs->cpus_allowed); cpumask_clear(trialcs->cpus_requested); } else { retval = cpulist_parse(buf, trialcs->cpus_allowed); retval = cpulist_parse(buf, trialcs->cpus_requested); if (retval < 0) return retval; } if (!cpumask_subset(trialcs->cpus_allowed, top_cpuset.cpus_allowed)) if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) return -EINVAL; } cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); /* Nothing to do if the cpus didn't change */ if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) return 0; retval = validate_change(cs, trialcs); Loading Loading @@ -1535,6 +1544,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); /* * Make sure that subparts_cpus is a subset of cpus_allowed. Loading Loading @@ -2416,7 +2426,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) switch (type) { case FILE_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); break; case FILE_MEMLIST: seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); Loading Loading @@ -2785,6 +2795,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->cpus_requested, parent->cpus_requested); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: Loading Loading @@ -2901,8 +2912,10 @@ int __init cpuset_init(void) BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)); cpumask_setall(top_cpuset.cpus_allowed); cpumask_setall(top_cpuset.cpus_requested); nodes_setall(top_cpuset.mems_allowed); cpumask_setall(top_cpuset.effective_cpus); nodes_setall(top_cpuset.effective_mems); Loading Loading
kernel/cgroup/cpuset.c +27 −14 Original line number Diff line number Diff line Loading @@ -105,6 +105,7 @@ struct cpuset { /* user-configured CPUs and Memory Nodes allow to tasks */ cpumask_var_t cpus_allowed; cpumask_var_t cpus_requested; nodemask_t mems_allowed; /* effective CPUs and Memory Nodes allow to tasks */ Loading Loading @@ -432,7 +433,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) { return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && return cpumask_subset(p->cpus_requested, q->cpus_requested) && nodes_subset(p->mems_allowed, q->mems_allowed) && is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_mem_exclusive(p) <= is_mem_exclusive(q); Loading Loading @@ -469,8 +470,13 @@ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) goto free_two; if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) goto free_three; return 0; free_three: free_cpumask_var(*pmask3); free_two: free_cpumask_var(*pmask2); free_one: Loading @@ -487,6 +493,7 @@ static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) { if (cs) { free_cpumask_var(cs->cpus_allowed); free_cpumask_var(cs->cpus_requested); free_cpumask_var(cs->effective_cpus); free_cpumask_var(cs->subparts_cpus); } Loading Loading @@ -515,6 +522,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) } cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); cpumask_copy(trial->cpus_requested, cs->cpus_requested); cpumask_copy(trial->effective_cpus, cs->effective_cpus); return trial; } Loading Loading @@ -583,7 +591,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) cpuset_for_each_child(c, css, par) { if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) cpumask_intersects(trial->cpus_requested, c->cpus_requested)) goto out; if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c != cur && Loading Loading @@ -1017,7 +1025,7 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, #ifdef CONFIG_SCHED_WALT int ret; if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_allowed)) { if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_requested)) { ret = set_cpus_allowed_ptr(p, &p->wts.cpus_requested); if (!ret) return ret; Loading Loading @@ -1063,10 +1071,10 @@ static void compute_effective_cpumask(struct cpumask *new_cpus, if (parent->nr_subparts_cpus) { cpumask_or(new_cpus, parent->effective_cpus, parent->subparts_cpus); cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); cpumask_and(new_cpus, new_cpus, cs->cpus_requested); cpumask_and(new_cpus, new_cpus, cpu_active_mask); } else { cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); } } Loading Loading @@ -1489,25 +1497,26 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, return -EACCES; /* * An empty cpus_allowed is ok only if the cpuset has no tasks. * An empty cpus_requested is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. */ if (!*buf) { cpumask_clear(trialcs->cpus_allowed); cpumask_clear(trialcs->cpus_requested); } else { retval = cpulist_parse(buf, trialcs->cpus_allowed); retval = cpulist_parse(buf, trialcs->cpus_requested); if (retval < 0) return retval; } if (!cpumask_subset(trialcs->cpus_allowed, top_cpuset.cpus_allowed)) if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) return -EINVAL; } cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); /* Nothing to do if the cpus didn't change */ if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) return 0; retval = validate_change(cs, trialcs); Loading Loading @@ -1535,6 +1544,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); /* * Make sure that subparts_cpus is a subset of cpus_allowed. Loading Loading @@ -2416,7 +2426,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) switch (type) { case FILE_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); break; case FILE_MEMLIST: seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); Loading Loading @@ -2785,6 +2795,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->cpus_requested, parent->cpus_requested); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: Loading Loading @@ -2901,8 +2912,10 @@ int __init cpuset_init(void) BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)); cpumask_setall(top_cpuset.cpus_allowed); cpumask_setall(top_cpuset.cpus_requested); nodes_setall(top_cpuset.mems_allowed); cpumask_setall(top_cpuset.effective_cpus); nodes_setall(top_cpuset.effective_mems); Loading