Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 141965c7 authored by Alex Shi's avatar Alex Shi Committed by Ingo Molnar
Browse files

Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"



Remove CONFIG_FAIR_GROUP_SCHED that covers the runnable info, then
we can use runnable load variables.

Also remove 2 CONFIG_FAIR_GROUP_SCHED setting which is not in reverted
patch(introduced in 9ee474f5), but also need to revert.

Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/51CA76A3.3050207@intel.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent be7002e6
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -994,12 +994,7 @@ struct sched_entity {
	struct cfs_rq		*my_q;
#endif

/*
 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
 * removed when useful for applications beyond shares distribution (e.g.
 * load-balance).
 */
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
#ifdef CONFIG_SMP
	/* Per-entity load-tracking */
	struct sched_avg	avg;
#endif
+1 −6
Original line number Diff line number Diff line
@@ -1611,12 +1611,7 @@ static void __sched_fork(struct task_struct *p)
	p->se.vruntime			= 0;
	INIT_LIST_HEAD(&p->se.group_node);

/*
 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
 * removed when useful for applications beyond shares distribution (e.g.
 * load-balance).
 */
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
#ifdef CONFIG_SMP
	p->se.avg.runnable_avg_period = 0;
	p->se.avg.runnable_avg_sum = 0;
#endif
+4 −13
Original line number Diff line number Diff line
@@ -1128,8 +1128,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_FAIR_GROUP_SCHED */

/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
#ifdef CONFIG_SMP
/*
 * We choose a half-life close to 1 scheduling period.
 * Note: The tables below are dependent on this value.
@@ -3430,12 +3429,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
	return new_cpu;
}

/*
 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
 * removed when useful for applications beyond shares distribution (e.g.
 * load-balance).
 */
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
 * cfs_rq_of(p) references at time of call are still valid and identify the
@@ -3459,7 +3452,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
		atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
	}
}
#endif
#endif /* CONFIG_SMP */

static unsigned long
@@ -5861,7 +5853,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
		se->vruntime -= cfs_rq->min_vruntime;
	}

#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
#ifdef CONFIG_SMP
	/*
	* Remove our load from contribution when we leave sched_fair
	* and ensure we don't carry in an old decay_count if we
@@ -5920,7 +5912,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifndef CONFIG_64BIT
	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
#ifdef CONFIG_SMP
	atomic64_set(&cfs_rq->decay_counter, 1);
	atomic64_set(&cfs_rq->removed_load, 0);
#endif
@@ -6162,9 +6154,8 @@ const struct sched_class fair_sched_class = {

#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
	.migrate_task_rq	= migrate_task_rq_fair,
#endif

	.rq_online		= rq_online_fair,
	.rq_offline		= rq_offline_fair,

+2 −17
Original line number Diff line number Diff line
@@ -269,12 +269,6 @@ struct cfs_rq {
#endif

#ifdef CONFIG_SMP
/*
 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
 * removed when useful for applications beyond shares distribution (e.g.
 * load-balance).
 */
#ifdef CONFIG_FAIR_GROUP_SCHED
	/*
	 * CFS Load tracking
	 * Under CFS, load is tracked on a per-entity basis and aggregated up.
@@ -284,9 +278,9 @@ struct cfs_rq {
	u64 runnable_load_avg, blocked_load_avg;
	atomic64_t decay_counter, removed_load;
	u64 last_decay;
#endif /* CONFIG_FAIR_GROUP_SCHED */
/* These always depend on CONFIG_FAIR_GROUP_SCHED */

#ifdef CONFIG_FAIR_GROUP_SCHED
	/* Required to track per-cpu representation of a task_group */
	u32 tg_runnable_contrib;
	u64 tg_load_contrib;
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -1027,17 +1021,8 @@ extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq, int cpu);
extern void idle_balance(int this_cpu, struct rq *this_rq);

/*
 * Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg
 * becomes useful in lb
 */
#if defined(CONFIG_FAIR_GROUP_SCHED)
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);
#else
static inline void idle_enter_fair(struct rq *this_rq) {}
static inline void idle_exit_fair(struct rq *this_rq) {}
#endif

#else	/* CONFIG_SMP */