Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c6c4927b authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar
Browse files

sched: convert struct root_domain to cpumask_var_t.



Impact: (future) size reduction for large NR_CPUS.

Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS.  cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.

def_root_domain is static, and so its masks are initialized with
alloc_bootmem_cpumask_var.  After that, alloc_cpumask_var is used.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6a7b3dc3
Loading
Loading
Loading
Loading
+51 −18
Original line number Diff line number Diff line
@@ -487,14 +487,14 @@ struct rt_rq {
 */
struct root_domain {
	atomic_t refcount;
	cpumask_t span;
	cpumask_t online;
	cpumask_var_t span;
	cpumask_var_t online;

	/*
	 * The "RT overload" flag: it gets set if a CPU has more than
	 * one runnable RT task.
	 */
	cpumask_t rto_mask;
	cpumask_var_t rto_mask;
	atomic_t rto_count;
#ifdef CONFIG_SMP
	struct cpupri cpupri;
@@ -6444,7 +6444,7 @@ static void set_rq_online(struct rq *rq)
	if (!rq->online) {
		const struct sched_class *class;

		cpu_set(rq->cpu, rq->rd->online);
		cpumask_set_cpu(rq->cpu, rq->rd->online);
		rq->online = 1;

		for_each_class(class) {
@@ -6464,7 +6464,7 @@ static void set_rq_offline(struct rq *rq)
				class->rq_offline(rq);
		}

		cpu_clear(rq->cpu, rq->rd->online);
		cpumask_clear_cpu(rq->cpu, rq->rd->online);
		rq->online = 0;
	}
}
@@ -6505,7 +6505,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
		rq = cpu_rq(cpu);
		spin_lock_irqsave(&rq->lock, flags);
		if (rq->rd) {
			BUG_ON(!cpu_isset(cpu, rq->rd->span));
			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));

			set_rq_online(rq);
		}
@@ -6567,7 +6567,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
		rq = cpu_rq(cpu);
		spin_lock_irqsave(&rq->lock, flags);
		if (rq->rd) {
			BUG_ON(!cpu_isset(cpu, rq->rd->span));
			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
			set_rq_offline(rq);
		}
		spin_unlock_irqrestore(&rq->lock, flags);
@@ -6768,6 +6768,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
	return 1;
}

static void free_rootdomain(struct root_domain *rd)
{
	free_cpumask_var(rd->rto_mask);
	free_cpumask_var(rd->online);
	free_cpumask_var(rd->span);
	kfree(rd);
}

static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
	unsigned long flags;
@@ -6777,38 +6785,60 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
	if (rq->rd) {
		struct root_domain *old_rd = rq->rd;

		if (cpu_isset(rq->cpu, old_rd->online))
		if (cpumask_test_cpu(rq->cpu, old_rd->online))
			set_rq_offline(rq);

		cpu_clear(rq->cpu, old_rd->span);
		cpumask_clear_cpu(rq->cpu, old_rd->span);

		if (atomic_dec_and_test(&old_rd->refcount))
			kfree(old_rd);
			free_rootdomain(old_rd);
	}

	atomic_inc(&rd->refcount);
	rq->rd = rd;

	cpu_set(rq->cpu, rd->span);
	if (cpu_isset(rq->cpu, cpu_online_map))
	cpumask_set_cpu(rq->cpu, rd->span);
	if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
		set_rq_online(rq);

	spin_unlock_irqrestore(&rq->lock, flags);
}

static void init_rootdomain(struct root_domain *rd)
static int init_rootdomain(struct root_domain *rd, bool bootmem)
{
	memset(rd, 0, sizeof(*rd));

	cpus_clear(rd->span);
	cpus_clear(rd->online);
	if (bootmem) {
		alloc_bootmem_cpumask_var(&def_root_domain.span);
		alloc_bootmem_cpumask_var(&def_root_domain.online);
		alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
		cpupri_init(&rd->cpupri);
		return 0;
	}

	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
		goto free_rd;
	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
		goto free_span;
	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
		goto free_online;

	cpupri_init(&rd->cpupri);
	return 0;

free_online:
	free_cpumask_var(rd->online);
free_span:
	free_cpumask_var(rd->span);
free_rd:
	kfree(rd);
	return -ENOMEM;
}

static void init_defrootdomain(void)
{
	init_rootdomain(&def_root_domain);
	init_rootdomain(&def_root_domain, true);

	atomic_set(&def_root_domain.refcount, 1);
}

@@ -6820,7 +6850,10 @@ static struct root_domain *alloc_rootdomain(void)
	if (!rd)
		return NULL;

	init_rootdomain(rd);
	if (init_rootdomain(rd, false) != 0) {
		kfree(rd);
		return NULL;
	}

	return rd;
}
@@ -7632,7 +7665,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_NUMA
error:
	free_sched_groups(cpu_map, tmpmask);
	kfree(rd);
	free_rootdomain(rd);
	goto free_tmpmask;
#endif
}
+13 −13
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq)
	if (!rq->online)
		return;

	cpu_set(rq->cpu, rq->rd->rto_mask);
	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
	/*
	 * Make sure the mask is visible before we set
	 * the overload count. That is checked to determine
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq)

	/* the order here really doesn't matter */
	atomic_dec(&rq->rd->rto_count);
	cpu_clear(rq->cpu, rq->rd->rto_mask);
	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}

static void update_rt_migration(struct rq *rq)
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
}

#ifdef CONFIG_SMP
static inline cpumask_t sched_rt_period_mask(void)
static inline const struct cpumask *sched_rt_period_mask(void)
{
	return cpu_rq(smp_processor_id())->rd->span;
}
#else
static inline cpumask_t sched_rt_period_mask(void)
static inline const struct cpumask *sched_rt_period_mask(void)
{
	return cpu_online_map;
	return cpu_online_mask;
}
#endif

@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
	return rt_rq->rt_throttled;
}

static inline cpumask_t sched_rt_period_mask(void)
static inline const struct cpumask *sched_rt_period_mask(void)
{
	return cpu_online_map;
	return cpu_online_mask;
}

static inline
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
	int i, weight, more = 0;
	u64 rt_period;

	weight = cpus_weight(rd->span);
	weight = cpumask_weight(rd->span);

	spin_lock(&rt_b->rt_runtime_lock);
	rt_period = ktime_to_ns(rt_b->rt_period);
	for_each_cpu_mask_nr(i, rd->span) {
	for_each_cpu(i, rd->span) {
		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
		s64 diff;

@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq)
		/*
		 * Greedy reclaim, take back as much as we can.
		 */
		for_each_cpu_mask(i, rd->span) {
		for_each_cpu(i, rd->span) {
			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
			s64 diff;

@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
	int i, idle = 1;
	cpumask_t span;
	const struct cpumask *span;

	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
		return 1;

	span = sched_rt_period_mask();
	for_each_cpu_mask(i, span) {
	for_each_cpu(i, span) {
		int enqueue = 0;
		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
		struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -1181,7 +1181,7 @@ static int pull_rt_task(struct rq *this_rq)

	next = pick_next_task_rt(this_rq);

	for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
	for_each_cpu(cpu, this_rq->rd->rto_mask) {
		if (this_cpu == cpu)
			continue;