Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 96f874e2 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar
Browse files

sched: convert remaining old-style cpumask operators



Impact: Trivial API conversion

  NR_CPUS -> nr_cpu_ids
  cpumask_t -> struct cpumask
  sizeof(cpumask_t) -> cpumask_size()
  cpumask_a = cpumask_b -> cpumask_copy(&cpumask_a, &cpumask_b)

  cpu_set() -> cpumask_set_cpu()
  first_cpu() -> cpumask_first()
  cpumask_of_cpu() -> cpumask_of()
  cpus_* -> cpumask_*

There are some FIXMEs where we all archs to complete infrastructure
(patches have been sent):

  cpu_coregroup_map -> cpu_coregroup_mask
  node_to_cpumask* -> cpumask_of_node

There is also one FIXME where we pass an array of cpumasks to
partition_sched_domains(): this implies knowing the definition of
'struct cpumask' and the size of a cpumask.  This will be fixed in a
future patch.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0e3900e6
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -879,7 +879,7 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
	return to_cpumask(sd->span);
}

extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
				    struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);

@@ -888,7 +888,7 @@ extern int arch_reinit_sched_domains(void);
struct sched_domain_attr;

static inline void
partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
			struct sched_domain_attr *dattr_new)
{
}
@@ -970,7 +970,7 @@ struct sched_class {
	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);

	void (*set_cpus_allowed)(struct task_struct *p,
				 const cpumask_t *newmask);
				 const struct cpumask *newmask);

	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
@@ -1612,12 +1612,12 @@ extern cputime_t task_gtime(struct task_struct *p);

#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
				const cpumask_t *new_mask);
				const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
				       const cpumask_t *new_mask)
				       const struct cpumask *new_mask)
{
	if (!cpu_isset(0, *new_mask))
	if (!cpumask_test_cpu(0, new_mask))
		return -EINVAL;
	return 0;
}
@@ -2230,8 +2230,8 @@ __trace_special(void *__tr, void *__data,
}
#endif

extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

extern int sched_mc_power_savings, sched_smt_power_savings;

+113 −99
Original line number Diff line number Diff line
@@ -2829,7 +2829,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
	struct rq *rq;

	rq = task_rq_lock(p, &flags);
	if (!cpu_isset(dest_cpu, p->cpus_allowed)
	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
	    || unlikely(!cpu_active(dest_cpu)))
		goto out;

@@ -2895,7 +2895,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
	 * 3) are cache-hot on their current CPU.
	 */
	if (!cpu_isset(this_cpu, p->cpus_allowed)) {
	if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
		schedstat_inc(p, se.nr_failed_migrations_affine);
		return 0;
	}
@@ -3070,7 +3070,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
		   unsigned long *imbalance, enum cpu_idle_type idle,
		   int *sd_idle, const cpumask_t *cpus, int *balance)
		   int *sd_idle, const struct cpumask *cpus, int *balance)
{
	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -3387,7 +3387,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
 */
static struct rq *
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
		   unsigned long imbalance, const cpumask_t *cpus)
		   unsigned long imbalance, const struct cpumask *cpus)
{
	struct rq *busiest = NULL, *rq;
	unsigned long max_load = 0;
@@ -3396,7 +3396,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
	for_each_cpu(i, sched_group_cpus(group)) {
		unsigned long wl;

		if (!cpu_isset(i, *cpus))
		if (!cpumask_test_cpu(i, cpus))
			continue;

		rq = cpu_rq(i);
@@ -3426,7 +3426,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
 */
static int load_balance(int this_cpu, struct rq *this_rq,
			struct sched_domain *sd, enum cpu_idle_type idle,
			int *balance, cpumask_t *cpus)
			int *balance, struct cpumask *cpus)
{
	int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
	struct sched_group *group;
@@ -3434,7 +3434,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
	struct rq *busiest;
	unsigned long flags;

	cpus_setall(*cpus);
	cpumask_setall(cpus);

	/*
	 * When power savings policy is enabled for the parent domain, idle
@@ -3494,8 +3494,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,

		/* All tasks on this runqueue were pinned by CPU affinity */
		if (unlikely(all_pinned)) {
			cpu_clear(cpu_of(busiest), *cpus);
			if (!cpus_empty(*cpus))
			cpumask_clear_cpu(cpu_of(busiest), cpus);
			if (!cpumask_empty(cpus))
				goto redo;
			goto out_balanced;
		}
@@ -3512,7 +3512,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
			/* don't kick the migration_thread, if the curr
			 * task on busiest cpu can't be moved to this_cpu
			 */
			if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
			if (!cpumask_test_cpu(this_cpu,
					      &busiest->curr->cpus_allowed)) {
				spin_unlock_irqrestore(&busiest->lock, flags);
				all_pinned = 1;
				goto out_one_pinned;
@@ -3587,7 +3588,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 */
static int
load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
			cpumask_t *cpus)
			struct cpumask *cpus)
{
	struct sched_group *group;
	struct rq *busiest = NULL;
@@ -3596,7 +3597,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
	int sd_idle = 0;
	int all_pinned = 0;

	cpus_setall(*cpus);
	cpumask_setall(cpus);

	/*
	 * When power savings policy is enabled for the parent domain, idle
@@ -3640,8 +3641,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
		double_unlock_balance(this_rq, busiest);

		if (unlikely(all_pinned)) {
			cpu_clear(cpu_of(busiest), *cpus);
			if (!cpus_empty(*cpus))
			cpumask_clear_cpu(cpu_of(busiest), cpus);
			if (!cpumask_empty(cpus))
				goto redo;
		}
	}
@@ -5376,7 +5377,7 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
	return retval;
}

long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
	cpumask_var_t cpus_allowed, new_mask;
	struct task_struct *p;
@@ -5445,13 +5446,13 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
}

static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
			     cpumask_t *new_mask)
			     struct cpumask *new_mask)
{
	if (len < sizeof(cpumask_t)) {
		memset(new_mask, 0, sizeof(cpumask_t));
	} else if (len > sizeof(cpumask_t)) {
		len = sizeof(cpumask_t);
	}
	if (len < cpumask_size())
		cpumask_clear(new_mask);
	else if (len > cpumask_size())
		len = cpumask_size();

	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}

@@ -5477,7 +5478,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
	return retval;
}

long sched_getaffinity(pid_t pid, cpumask_t *mask)
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
	struct task_struct *p;
	int retval;
@@ -5494,7 +5495,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
	if (retval)
		goto out_unlock;

	cpus_and(*mask, p->cpus_allowed, cpu_online_map);
	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);

out_unlock:
	read_unlock(&tasklist_lock);
@@ -5872,7 +5873,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
	idle->se.exec_start = sched_clock();

	idle->prio = idle->normal_prio = MAX_PRIO;
	idle->cpus_allowed = cpumask_of_cpu(cpu);
	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
	__set_task_cpu(idle, cpu);

	rq->curr = rq->idle = idle;
@@ -5956,7 +5957,7 @@ static inline void sched_init_granularity(void)
 * task must not exit() & deallocate itself prematurely. The
 * call is not atomic; no spinlocks may be held.
 */
int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
	struct migration_req req;
	unsigned long flags;
@@ -5964,13 +5965,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
	int ret = 0;

	rq = task_rq_lock(p, &flags);
	if (!cpus_intersects(*new_mask, cpu_online_map)) {
	if (!cpumask_intersects(new_mask, cpu_online_mask)) {
		ret = -EINVAL;
		goto out;
	}

	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
		     !cpus_equal(p->cpus_allowed, *new_mask))) {
		     !cpumask_equal(&p->cpus_allowed, new_mask))) {
		ret = -EINVAL;
		goto out;
	}
@@ -5978,12 +5979,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
	if (p->sched_class->set_cpus_allowed)
		p->sched_class->set_cpus_allowed(p, new_mask);
	else {
		p->cpus_allowed = *new_mask;
		p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
		cpumask_copy(&p->cpus_allowed, new_mask);
		p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
	}

	/* Can the task run on the task's current CPU? If so, we're done */
	if (cpu_isset(task_cpu(p), *new_mask))
	if (cpumask_test_cpu(task_cpu(p), new_mask))
		goto out;

	if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
@@ -6028,7 +6029,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
	if (task_cpu(p) != src_cpu)
		goto done;
	/* Affinity changed (again). */
	if (!cpu_isset(dest_cpu, p->cpus_allowed))
	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
		goto fail;

	on_rq = p->se.on_rq;
@@ -6629,13 +6630,13 @@ early_initcall(migration_init);
#ifdef CONFIG_SCHED_DEBUG

static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
				  cpumask_t *groupmask)
				  struct cpumask *groupmask)
{
	struct sched_group *group = sd->groups;
	char str[256];

	cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd));
	cpus_clear(*groupmask);
	cpumask_clear(groupmask);

	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);

@@ -6936,24 +6937,25 @@ __setup("isolcpus=", isolated_cpu_setup);
/*
 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
 * to a function which identifies what group(along with sched group) a CPU
 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
 * (due to the fact that we keep track of groups covered with a cpumask_t).
 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
 * (due to the fact that we keep track of groups covered with a struct cpumask).
 *
 * init_sched_build_groups will build a circular linked list of the groups
 * covered by the given span, and will set each group's ->cpumask correctly,
 * and ->cpu_power to 0.
 */
static void
init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
			int (*group_fn)(int cpu, const cpumask_t *cpu_map,
init_sched_build_groups(const struct cpumask *span,
			const struct cpumask *cpu_map,
			int (*group_fn)(int cpu, const struct cpumask *cpu_map,
					struct sched_group **sg,
					cpumask_t *tmpmask),
			cpumask_t *covered, cpumask_t *tmpmask)
					struct cpumask *tmpmask),
			struct cpumask *covered, struct cpumask *tmpmask)
{
	struct sched_group *first = NULL, *last = NULL;
	int i;

	cpus_clear(*covered);
	cpumask_clear(covered);

	for_each_cpu(i, span) {
		struct sched_group *sg;
@@ -6970,7 +6972,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
			if (group_fn(j, cpu_map, NULL, tmpmask) != group)
				continue;

			cpu_set(j, *covered);
			cpumask_set_cpu(j, covered);
			cpumask_set_cpu(j, sched_group_cpus(sg));
		}
		if (!first)
@@ -7035,9 +7037,10 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
 * should be one that prevents unnecessary balancing, but also spreads tasks
 * out optimally.
 */
static void sched_domain_node_span(int node, cpumask_t *span)
static void sched_domain_node_span(int node, struct cpumask *span)
{
	nodemask_t used_nodes;
	/* FIXME: use cpumask_of_node() */
	node_to_cpumask_ptr(nodemask, node);
	int i;

@@ -7081,8 +7084,8 @@ static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);

static int
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
		 cpumask_t *unused)
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
		 struct sched_group **sg, struct cpumask *unused)
{
	if (sg)
		*sg = &per_cpu(sched_group_cpus, cpu).sg;
@@ -7100,22 +7103,21 @@ static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);

#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
		  cpumask_t *mask)
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
		  struct sched_group **sg, struct cpumask *mask)
{
	int group;

	*mask = per_cpu(cpu_sibling_map, cpu);
	cpus_and(*mask, *mask, *cpu_map);
	group = first_cpu(*mask);
	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
	group = cpumask_first(mask);
	if (sg)
		*sg = &per_cpu(sched_group_core, group).sg;
	return group;
}
#elif defined(CONFIG_SCHED_MC)
static int
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
		  cpumask_t *unused)
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
		  struct sched_group **sg, struct cpumask *unused)
{
	if (sg)
		*sg = &per_cpu(sched_group_core, cpu).sg;
@@ -7127,18 +7129,18 @@ static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);

static int
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
		  cpumask_t *mask)
cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
		  struct sched_group **sg, struct cpumask *mask)
{
	int group;
#ifdef CONFIG_SCHED_MC
	/* FIXME: Use cpu_coregroup_mask. */
	*mask = cpu_coregroup_map(cpu);
	cpus_and(*mask, *mask, *cpu_map);
	group = first_cpu(*mask);
	group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
	*mask = per_cpu(cpu_sibling_map, cpu);
	cpus_and(*mask, *mask, *cpu_map);
	group = first_cpu(*mask);
	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
	group = cpumask_first(mask);
#else
	group = cpu;
#endif
@@ -7159,14 +7161,16 @@ static struct sched_group ***sched_group_nodes_bycpu;
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);

static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
				 struct sched_group **sg, cpumask_t *nodemask)
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
				 struct sched_group **sg,
				 struct cpumask *nodemask)
{
	int group;
	/* FIXME: use cpumask_of_node */
	node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));

	cpus_and(*nodemask, *pnodemask, *cpu_map);
	group = first_cpu(*nodemask);
	cpumask_and(nodemask, pnodemask, cpu_map);
	group = cpumask_first(nodemask);

	if (sg)
		*sg = &per_cpu(sched_group_allnodes, group).sg;
@@ -7202,7 +7206,8 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)

#ifdef CONFIG_NUMA
/* Free memory allocated for various sched_group structures */
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
static void free_sched_groups(const struct cpumask *cpu_map,
			      struct cpumask *nodemask)
{
	int cpu, i;

@@ -7215,10 +7220,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)

		for (i = 0; i < nr_node_ids; i++) {
			struct sched_group *oldsg, *sg = sched_group_nodes[i];
			/* FIXME: Use cpumask_of_node */
			node_to_cpumask_ptr(pnodemask, i);

			cpus_and(*nodemask, *pnodemask, *cpu_map);
			if (cpus_empty(*nodemask))
			if (cpumask_empty(nodemask))
				continue;

			if (sg == NULL)
@@ -7236,7 +7242,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
	}
}
#else /* !CONFIG_NUMA */
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
static void free_sched_groups(const struct cpumask *cpu_map,
			      struct cpumask *nodemask)
{
}
#endif /* CONFIG_NUMA */
@@ -7366,7 +7373,7 @@ static void set_domain_attribute(struct sched_domain *sd,
 * Build sched domains for a given set of cpus and attach the sched domains
 * to the individual cpus
 */
static int __build_sched_domains(const cpumask_t *cpu_map,
static int __build_sched_domains(const struct cpumask *cpu_map,
				 struct sched_domain_attr *attr)
{
	int i, err = -ENOMEM;
@@ -7416,7 +7423,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	}

#ifdef CONFIG_NUMA
	sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
	sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
#endif

	/*
@@ -7425,12 +7432,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	for_each_cpu(i, cpu_map) {
		struct sched_domain *sd = NULL, *p;

		/* FIXME: use cpumask_of_node */
		*nodemask = node_to_cpumask(cpu_to_node(i));
		cpus_and(*nodemask, *nodemask, *cpu_map);

#ifdef CONFIG_NUMA
		if (cpus_weight(*cpu_map) >
				SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
		if (cpumask_weight(cpu_map) >
				SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
			sd = &per_cpu(allnodes_domains, i);
			SD_INIT(sd, ALLNODES);
			set_domain_attribute(sd, attr);
@@ -7491,9 +7499,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_SMT
	/* Set up CPU (sibling) groups */
	for_each_cpu(i, cpu_map) {
		*this_sibling_map = per_cpu(cpu_sibling_map, i);
		cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
		if (i != first_cpu(*this_sibling_map))
		cpumask_and(this_sibling_map,
			    &per_cpu(cpu_sibling_map, i), cpu_map);
		if (i != cpumask_first(this_sibling_map))
			continue;

		init_sched_build_groups(this_sibling_map, cpu_map,
@@ -7505,9 +7513,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_MC
	/* Set up multi-core groups */
	for_each_cpu(i, cpu_map) {
		/* FIXME: Use cpu_coregroup_mask */
		*this_core_map = cpu_coregroup_map(i);
		cpus_and(*this_core_map, *this_core_map, *cpu_map);
		if (i != first_cpu(*this_core_map))
		if (i != cpumask_first(this_core_map))
			continue;

		init_sched_build_groups(this_core_map, cpu_map,
@@ -7518,9 +7527,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

	/* Set up physical groups */
	for (i = 0; i < nr_node_ids; i++) {
		/* FIXME: Use cpumask_of_node */
		*nodemask = node_to_cpumask(i);
		cpus_and(*nodemask, *nodemask, *cpu_map);
		if (cpus_empty(*nodemask))
		if (cpumask_empty(nodemask))
			continue;

		init_sched_build_groups(nodemask, cpu_map,
@@ -7541,17 +7551,18 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
		struct sched_group *sg, *prev;
		int j;

		/* FIXME: Use cpumask_of_node */
		*nodemask = node_to_cpumask(i);
		cpus_clear(*covered);
		cpumask_clear(covered);

		cpus_and(*nodemask, *nodemask, *cpu_map);
		if (cpus_empty(*nodemask)) {
		if (cpumask_empty(nodemask)) {
			sched_group_nodes[i] = NULL;
			continue;
		}

		sched_domain_node_span(i, domainspan);
		cpus_and(*domainspan, *domainspan, *cpu_map);
		cpumask_and(domainspan, domainspan, cpu_map);

		sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
				  GFP_KERNEL, i);
@@ -7570,21 +7581,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
		sg->__cpu_power = 0;
		cpumask_copy(sched_group_cpus(sg), nodemask);
		sg->next = sg;
		cpus_or(*covered, *covered, *nodemask);
		cpumask_or(covered, covered, nodemask);
		prev = sg;

		for (j = 0; j < nr_node_ids; j++) {
			int n = (i + j) % nr_node_ids;
			/* FIXME: Use cpumask_of_node */
			node_to_cpumask_ptr(pnodemask, n);

			cpus_complement(*notcovered, *covered);
			cpus_and(*tmpmask, *notcovered, *cpu_map);
			cpus_and(*tmpmask, *tmpmask, *domainspan);
			if (cpus_empty(*tmpmask))
			cpumask_complement(notcovered, covered);
			cpumask_and(tmpmask, notcovered, cpu_map);
			cpumask_and(tmpmask, tmpmask, domainspan);
			if (cpumask_empty(tmpmask))
				break;

			cpus_and(*tmpmask, *tmpmask, *pnodemask);
			if (cpus_empty(*tmpmask))
			cpumask_and(tmpmask, tmpmask, pnodemask);
			if (cpumask_empty(tmpmask))
				continue;

			sg = kmalloc_node(sizeof(struct sched_group) +
@@ -7598,7 +7610,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
			sg->__cpu_power = 0;
			cpumask_copy(sched_group_cpus(sg), tmpmask);
			sg->next = prev->next;
			cpus_or(*covered, *covered, *tmpmask);
			cpumask_or(covered, covered, tmpmask);
			prev->next = sg;
			prev = sg;
		}
@@ -7634,7 +7646,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	if (sd_allnodes) {
		struct sched_group *sg;

		cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
		cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
								tmpmask);
		init_numa_sched_groups_power(sg);
	}
@@ -7690,12 +7702,12 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif
}

static int build_sched_domains(const cpumask_t *cpu_map)
static int build_sched_domains(const struct cpumask *cpu_map)
{
	return __build_sched_domains(cpu_map, NULL);
}

static cpumask_t *doms_cur;	/* current sched domains */
static struct cpumask *doms_cur;	/* current sched domains */
static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
				/* attribues of custom domains in 'doms_cur' */
@@ -7716,13 +7728,13 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
 * For now this just excludes isolated cpus, but could be used to
 * exclude other special cases in the future.
 */
static int arch_init_sched_domains(const cpumask_t *cpu_map)
static int arch_init_sched_domains(const struct cpumask *cpu_map)
{
	int err;

	arch_update_cpu_topology();
	ndoms_cur = 1;
	doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
	doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
	if (!doms_cur)
		doms_cur = fallback_doms;
	cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
@@ -7733,8 +7745,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
	return err;
}

static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
				       cpumask_t *tmpmask)
static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
				       struct cpumask *tmpmask)
{
	free_sched_groups(cpu_map, tmpmask);
}
@@ -7743,15 +7755,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
 * Detach sched domains from a group of cpus specified in cpu_map
 * These cpus will now be attached to the NULL domain
 */
static void detach_destroy_domains(const cpumask_t *cpu_map)
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
	cpumask_t tmpmask;
	/* Save because hotplug lock held. */
	static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
	int i;

	for_each_cpu(i, cpu_map)
		cpu_attach_domain(NULL, &def_root_domain, i);
	synchronize_sched();
	arch_destroy_sched_domains(cpu_map, &tmpmask);
	arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
}

/* handle null as "default" */
@@ -7776,7 +7789,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
 * doms_new[] to the current sched domain partitioning, doms_cur[].
 * It destroys each deleted domain and builds each new domain.
 *
 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
 * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
 * The masks don't intersect (don't overlap.) We should setup one
 * sched domain for each mask. CPUs not in any of the cpumasks will
 * not be load balanced. If the same cpumask appears both in the
@@ -7790,13 +7803,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
 * the single partition 'fallback_doms', it also forces the domains
 * to be rebuilt.
 *
 * If doms_new == NULL it will be replaced with cpu_online_map.
 * If doms_new == NULL it will be replaced with cpu_online_mask.
 * ndoms_new == 0 is a special case for destroying existing domains,
 * and it will not create the default domain.
 *
 * Call with hotplug lock held
 */
void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
/* FIXME: Change to struct cpumask *doms_new[] */
void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
			     struct sched_domain_attr *dattr_new)
{
	int i, j, n;
@@ -7811,7 +7825,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
	/* Destroy deleted domains */
	for (i = 0; i < ndoms_cur; i++) {
		for (j = 0; j < n; j++) {
			if (cpus_equal(doms_cur[i], doms_new[j])
			if (cpumask_equal(&doms_cur[i], &doms_new[j])
			    && dattrs_equal(dattr_cur, i, dattr_new, j))
				goto match1;
		}
@@ -7831,7 +7845,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
	/* Build new domains */
	for (i = 0; i < ndoms_new; i++) {
		for (j = 0; j < ndoms_cur; j++) {
			if (cpus_equal(doms_new[i], doms_cur[j])
			if (cpumask_equal(&doms_new[i], &doms_cur[j])
			    && dattrs_equal(dattr_new, i, dattr_cur, j))
				goto match2;
		}
+2 −2
Original line number Diff line number Diff line
@@ -1017,7 +1017,7 @@ static void yield_task_fair(struct rq *rq)
 * search starts with cpus closest then further out as needed,
 * so we always favor a closer, idle cpu.
 * Domains may include CPUs that are not usable for migration,
 * hence we need to mask them out (cpu_active_map)
 * hence we need to mask them out (cpu_active_mask)
 *
 * Returns the CPU we should wake onto.
 */
@@ -1244,7 +1244,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
		}
	}

	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
	if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
		goto out;

	/*
+9 −9
Original line number Diff line number Diff line
@@ -923,7 +923,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
	    (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
	    (p->rt.nr_cpus_allowed > 1))
		return 1;
	return 0;
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
static int find_lowest_rq(struct task_struct *task)
{
	struct sched_domain *sd;
	cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask);
	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
	int this_cpu = smp_processor_id();
	int cpu      = task_cpu(task);

@@ -997,7 +997,7 @@ static int find_lowest_rq(struct task_struct *task)
	 * I guess we might want to change cpupri_find() to ignore those
	 * in the first place.
	 */
	cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
	cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);

	/*
	 * At this point we have built a mask of cpus representing the
@@ -1007,7 +1007,7 @@ static int find_lowest_rq(struct task_struct *task)
	 * We prioritize the last cpu that the task executed on since
	 * it is most likely cache-hot in that location.
	 */
	if (cpu_isset(cpu, *lowest_mask))
	if (cpumask_test_cpu(cpu, lowest_mask))
		return cpu;

	/*
@@ -1064,8 +1064,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
			 * Also make sure that it wasn't scheduled on its rq.
			 */
			if (unlikely(task_rq(task) != rq ||
				     !cpu_isset(lowest_rq->cpu,
						task->cpus_allowed) ||
				     !cpumask_test_cpu(lowest_rq->cpu,
						       &task->cpus_allowed) ||
				     task_running(rq, task) ||
				     !task->se.on_rq)) {

@@ -1315,9 +1315,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
}

static void set_cpus_allowed_rt(struct task_struct *p,
				const cpumask_t *new_mask)
				const struct cpumask *new_mask)
{
	int weight = cpus_weight(*new_mask);
	int weight = cpumask_weight(new_mask);

	BUG_ON(!rt_task(p));

@@ -1338,7 +1338,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
		update_rt_migration(rq);
	}

	p->cpus_allowed    = *new_mask;
	cpumask_copy(&p->cpus_allowed, new_mask);
	p->rt.nr_cpus_allowed = weight;
}