Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 369da7fc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two load-balancing fixes for cgroups-intense workloads"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion
  sched/fair: Fix effective_load() to consistently use smoothed load
parents 612807fe ea1dc6fc
Loading
Loading
Loading
Loading
+20 −22
Original line number Diff line number Diff line
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
	}
}

static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
#else
void init_entity_runnable_average(struct sched_entity *se)
{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)

#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
	long tg_weight;
	long tg_weight, load, shares;

	/*
	 * Use this CPU's real-time load instead of the last load contribution
	 * as the updating of the contribution is delayed, and we will use the
	 * the real-time load to calc the share. See update_tg_load_avg().
	 * This really should be: cfs_rq->avg.load_avg, but instead we use
	 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
	 * the shares for small weight interactive tasks.
	 */
	tg_weight = atomic_long_read(&tg->load_avg);
	tg_weight -= cfs_rq->tg_load_avg_contrib;
	tg_weight += cfs_rq->load.weight;
	load = scale_load_down(cfs_rq->load.weight);

	return tg_weight;
}

static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
	long tg_weight, load, shares;
	tg_weight = atomic_long_read(&tg->load_avg);

	tg_weight = calc_tg_weight(tg, cfs_rq);
	load = cfs_rq->load.weight;
	/* Ensure tg_weight >= load */
	tg_weight -= cfs_rq->tg_load_avg_contrib;
	tg_weight += load;

	shares = (tg->shares * load);
	if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
	return tg->shares;
}
# endif /* CONFIG_SMP */

static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
			    unsigned long weight)
{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
		return wl;

	for_each_sched_entity(se) {
		long w, W;
		struct cfs_rq *cfs_rq = se->my_q;
		long W, w = cfs_rq_load_avg(cfs_rq);

		tg = se->my_q->tg;
		tg = cfs_rq->tg;

		/*
		 * W = @wg + \Sum rw_j
		 */
		W = wg + calc_tg_weight(tg, se->my_q);
		W = wg + atomic_long_read(&tg->load_avg);

		/* Ensure \Sum rw_j >= rw_i */
		W -= cfs_rq->tg_load_avg_contrib;
		W += w;

		/*
		 * w = rw_i + @wl
		 */
		w = cfs_rq_load_avg(se->my_q) + wl;
		w += wl;

		/*
		 * wl = S * s'_i; see (2)