Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55e16d30 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched/fair: Rework throttle_count sync



Since we already take rq->lock when creating a cgroup, use it to also
sync the throttle_count and avoid the extra state and enqueue path
branch.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bsegall@google.com
Cc: linux-kernel@vger.kernel.org
[ Fixed build warning. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 599b4840
Loading
Loading
Loading
Loading
+19 −20
Original line number Diff line number Diff line
@@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
	if (!cfs_bandwidth_used())
		return;

	/* Synchronize hierarchical throttle counter: */
	if (unlikely(!cfs_rq->throttle_uptodate)) {
		struct rq *rq = rq_of(cfs_rq);
		struct cfs_rq *pcfs_rq;
		struct task_group *tg;

		cfs_rq->throttle_uptodate = 1;

		/* Get closest up-to-date node, because leaves go first: */
		for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
			pcfs_rq = tg->cfs_rq[cpu_of(rq)];
			if (pcfs_rq->throttle_uptodate)
				break;
		}
		if (tg) {
			cfs_rq->throttle_count = pcfs_rq->throttle_count;
			cfs_rq->throttled_clock_task = rq_clock_task(rq);
		}
	}

	/* an active group must be handled by the update_curr()->put() path */
	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
		return;
@@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
		throttle_cfs_rq(cfs_rq);
}

static void sync_throttle(struct task_group *tg, int cpu)
{
	struct cfs_rq *pcfs_rq, *cfs_rq;

	if (!cfs_bandwidth_used())
		return;

	if (!tg->parent)
		return;

	cfs_rq = tg->cfs_rq[cpu];
	pcfs_rq = tg->parent->cfs_rq[cpu];

	cfs_rq->throttle_count = pcfs_rq->throttle_count;
	pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
}

/* conditionally throttle active cfs_rq's from put_prev_entity() */
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
@@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static inline void sync_throttle(struct task_group *tg, int cpu) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}

static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)

		raw_spin_lock_irq(&rq->lock);
		post_init_entity_util_avg(se);
		sync_throttle(tg, i);
		raw_spin_unlock_irq(&rq->lock);
	}
}
+1 −1
Original line number Diff line number Diff line
@@ -438,7 +438,7 @@ struct cfs_rq {

	u64 throttled_clock, throttled_clock_task;
	u64 throttled_clock_task_time;
	int throttled, throttle_count, throttle_uptodate;
	int throttled, throttle_count;
	struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */