Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db292ca3 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched: default to more agressive yield for SCHED_BATCH tasks



do more agressive yield for SCHED_BATCH tuned tasks: they are all
about throughput anyway. This allows a gentler migration path for
any apps that relied on stronger yield.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 77034937
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 */
static void yield_task_fair(struct rq *rq)
{
	struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
	struct sched_entity *rightmost, *se = &rq->curr->se;
	struct task_struct *curr = rq->curr;
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	struct sched_entity *rightmost, *se = &curr->se;

	/*
	 * Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
	if (unlikely(cfs_rq->nr_running == 1))
		return;

	if (likely(!sysctl_sched_compat_yield)) {
	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
		__update_rq_clock(rq);
		/*
		 * Update run-time statistics of the 'current'.