Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e201c56b authored by Quentin Perret's avatar Quentin Perret
Browse files

FROMLIST: sched/fair: Clean-up update_sg_lb_stats parameters



In preparation for the introduction of a new root domain flag which can
be set during load balance (the 'overutilized' flag), clean-up the set
of parameters passed to update_sg_lb_stats(). More specifically, the
'local_group' and 'local_idx' parameters can be removed since they can
easily be reconstructed from within the function.

While at it, transform the 'overload' parameter into a flag stored in
the 'sg_status' parameter hence facilitating the definition of new flags
when needed.

Change-Id: Ic2ccb51fdc08d7da0f8cc0442ef97cbcb4a52c86
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Suggested-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
Message-Id: <20181016101513.26919-12-quentin.perret@arm.com>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent b78eec5f
Loading
Loading
Loading
Loading
+11 −16
Original line number Diff line number Diff line
@@ -7884,16 +7884,16 @@ static bool update_nohz_stats(struct rq *rq, bool force)
 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
 * @env: The load balancing environment.
 * @group: sched_group whose statistics are to be updated.
 * @load_idx: Load index of sched_domain of this_cpu for load calc.
 * @local_group: Does group contain this_cpu.
 * @sgs: variable to hold the statistics for this group.
 * @overload: Indicate pullable load (e.g. >1 runnable task).
 * @sg_status: Holds flag indicating the status of the sched_group
 */
static inline void update_sg_lb_stats(struct lb_env *env,
			struct sched_group *group, int load_idx,
			int local_group, struct sg_lb_stats *sgs,
			bool *overload)
				      struct sched_group *group,
				      struct sg_lb_stats *sgs,
				      int *sg_status)
{
	int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
	int load_idx = get_sd_load_idx(env->sd, env->idle);
	unsigned long load;
	int i, nr_running;

@@ -7917,7 +7917,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,

		nr_running = rq->nr_running;
		if (nr_running > 1)
			*overload = true;
			*sg_status |= SG_OVERLOAD;

#ifdef CONFIG_NUMA_BALANCING
		sgs->nr_numa_running += rq->nr_numa_running;
@@ -7933,7 +7933,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
		    sgs->group_misfit_task_load < rq->misfit_task_load) {
			sgs->group_misfit_task_load = rq->misfit_task_load;
			*overload = 1;
			*sg_status |= SG_OVERLOAD;
		}
	}

@@ -8078,17 +8078,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
	struct sched_group *sg = env->sd->groups;
	struct sg_lb_stats *local = &sds->local_stat;
	struct sg_lb_stats tmp_sgs;
	int load_idx;
	bool overload = false;
	bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
	int sg_status = 0;

#ifdef CONFIG_NO_HZ_COMMON
	if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
		env->flags |= LBF_NOHZ_STATS;
#endif

	load_idx = get_sd_load_idx(env->sd, env->idle);

	do {
		struct sg_lb_stats *sgs = &tmp_sgs;
		int local_group;
@@ -8103,8 +8100,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
				update_group_capacity(env->sd, env->dst_cpu);
		}

		update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
						&overload);
		update_sg_lb_stats(env, sg, sgs, &sg_status);

		if (local_group)
			goto next_group;
@@ -8154,8 +8150,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd

	if (!env->sd->parent) {
		/* update overload indicator if we are at root domain */
		if (READ_ONCE(env->dst_rq->rd->overload) != overload)
			WRITE_ONCE(env->dst_rq->rd->overload, overload);
		WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
	}
}

+3 −0
Original line number Diff line number Diff line
@@ -709,6 +709,9 @@ struct perf_domain {
	struct rcu_head rcu;
};

/* Scheduling group status flags */
#define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */

/*
 * We add the notion of a root-domain which will be used to define per-domain
 * variables. Each exclusive cpuset essentially defines an island domain by