Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4da3abce authored by Luca Abeni's avatar Luca Abeni Committed by Ingo Molnar
Browse files

sched/deadline: Do not reclaim the whole CPU bandwidth



Original GRUB tends to reclaim 100% of the CPU time... And this
allows a CPU hog to starve non-deadline tasks.
To address this issue, allow the scheduler to reclaim only a
specified fraction of CPU time, stored in the new "bw_ratio"
field of the dl runqueue structure.

Tested-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: default avatarLuca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Claudio Scordino <claudio@evidence.eu.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Link: http://lkml.kernel.org/r/1495138417-6203-6-git-send-email-luca.abeni@santannapisa.it


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c52f14d3
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -6759,6 +6759,16 @@ static int sched_dl_global_validate(void)
	return ret;
}

void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
{
	if (global_rt_runtime() == RUNTIME_INF) {
		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
	} else {
		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
	}
}

static void sched_dl_do_global(void)
{
	u64 new_bw = -1;
@@ -6784,6 +6794,7 @@ static void sched_dl_do_global(void)
		raw_spin_unlock_irqrestore(&dl_b->lock, flags);

		rcu_read_unlock_sched();
		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
	}
}

+11 −1
Original line number Diff line number Diff line
@@ -268,6 +268,7 @@ void init_dl_rq(struct dl_rq *dl_rq)
#endif

	dl_rq->running_bw = 0;
	init_dl_rq_bw_ratio(dl_rq);
}

#ifdef CONFIG_SMP
@@ -924,11 +925,20 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 * Uact is the (per-runqueue) active utilization.
 * Since rq->dl.running_bw contains Uact * 2^BW_SHIFT, the result
 * has to be shifted right by BW_SHIFT.
 * To reclaim only a fraction Umax of the CPU time, the
 * runtime accounting rule is modified as
 * "dq = -Uact / Umax dt"; since rq->dl.bw_ratio contains
 * 2^RATIO_SHIFT / Umax, delta is multiplied by bw_ratio and shifted
 * right by RATIO_SHIFT.
 * Since delta is a 64 bit variable, to have an overflow its value
 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
 * So, overflow is not an issue here.
 */
u64 grub_reclaim(u64 delta, struct rq *rq)
{
	delta *= rq->dl.running_bw;
	delta >>= BW_SHIFT;
	delta *= rq->dl.bw_ratio;
	delta >>= BW_SHIFT + RATIO_SHIFT;

	return delta;
}
+8 −0
Original line number Diff line number Diff line
@@ -565,6 +565,12 @@ struct dl_rq {
	 * task blocks
	 */
	u64 running_bw;

	/*
	 * Inverse of the fraction of CPU utilization that can be reclaimed
	 * by the GRUB algorithm.
	 */
	u64 bw_ratio;
};

#ifdef CONFIG_SMP
@@ -1495,9 +1501,11 @@ extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);

#define BW_SHIFT	20
#define BW_UNIT		(1 << BW_SHIFT)
#define RATIO_SHIFT	8
unsigned long to_ratio(u64 period, u64 runtime);

extern void init_entity_runnable_average(struct sched_entity *se);