Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb755805 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: extract walk_tg_tree()



Extract walk_tg_tree() and make it a little more generic so we can use it
in the schedulablity test.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0b148fa0
Loading
Loading
Loading
Loading
+46 −33
Original line number Diff line number Diff line
@@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
	update_load_sub(&rq->load, load);
}

#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	if (rq->nr_running)
		rq->avg_load_per_task = rq->load.weight / rq->nr_running;

	return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
typedef int (*tg_visitor)(struct task_group *, void *);

/*
 * Iterate the full tree, calling @down when first entering a node and @up when
 * leaving it for the final time.
 */
static void
walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
	struct task_group *parent, *child;
	int ret;

	rcu_read_lock();
	parent = &root_task_group;
down:
	(*down)(parent, cpu, sd);
	ret = (*down)(parent, data);
	if (ret)
		goto out_unlock;
	list_for_each_entry_rcu(child, &parent->children, siblings) {
		parent = child;
		goto down;
@@ -1426,14 +1412,42 @@ walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
up:
		continue;
	}
	(*up)(parent, cpu, sd);
	ret = (*up)(parent, data);
	if (ret)
		goto out_unlock;

	child = parent;
	parent = parent->parent;
	if (parent)
		goto up;
out_unlock:
	rcu_read_unlock();

	return ret;
}

static int tg_nop(struct task_group *tg, void *data)
{
	return 0;
}
#endif

#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	if (rq->nr_running)
		rq->avg_load_per_task = rq->load.weight / rq->nr_running;

	return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

static void __set_se_shares(struct sched_entity *se, unsigned long shares);

@@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
 * This needs to be done in a bottom-up fashion because the rq weight of a
 * parent group depends on the shares of its child groups.
 */
static void
tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
static int tg_shares_up(struct task_group *tg, void *data)
{
	unsigned long rq_weight = 0;
	unsigned long shares = 0;
	struct sched_domain *sd = data;
	int i;

	for_each_cpu_mask(i, sd->span) {
@@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
		__update_group_shares_cpu(tg, i, shares, rq_weight);
		spin_unlock_irqrestore(&rq->lock, flags);
	}

	return 0;
}

/*
@@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
 * This needs to be done in a top-down fashion because the load of a child
 * group is a fraction of its parents load.
 */
static void
tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
static int tg_load_down(struct task_group *tg, void *data)
{
	unsigned long load;
	long cpu = (long)data;

	if (!tg->parent) {
		load = cpu_rq(cpu)->load.weight;
@@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
	}

	tg->cfs_rq[cpu]->h_load = load;
}

static void
tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
{
	return 0;
}

static void update_shares(struct sched_domain *sd)
@@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)

	if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
		sd->last_update = now;
		walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
		walk_tg_tree(tg_nop, tg_shares_up, sd);
	}
}

@@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
	spin_lock(&rq->lock);
}

static void update_h_load(int cpu)
static void update_h_load(long cpu)
{
	walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}

#else