Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 61d531a1 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: fair: add support to check for misfit tasks"

parents 551a21d3 ea6dd087
Loading
Loading
Loading
Loading
+45 −0
Original line number Diff line number Diff line
@@ -45,6 +45,8 @@
static inline bool task_fits_max(struct task_struct *p, int cpu);
static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
					u32 new_task_load, u32 new_pred_demand);
static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
					int delta, bool inc);
#endif /* CONFIG_SCHED_WALT */

#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
@@ -5016,6 +5018,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	struct sched_entity *se = &p->se;
	int task_new = !(flags & ENQUEUE_WAKEUP);

#ifdef CONFIG_SCHED_WALT
	p->misfit = !task_fits_max(p, rq->cpu);
#endif
	/*
	 * If in_iowait is set, the code below may not trigger any cpufreq
	 * utilization updates, so do it here explicitly with the IOWAIT flag
@@ -11228,6 +11233,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;
#ifdef CONFIG_SCHED_WALT
	bool old_misfit = curr->misfit;
	bool misfit;
#endif

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
@@ -11239,6 +11248,15 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

	update_misfit_task(rq, curr);

#ifdef CONFIG_SCHED_WALT
	misfit = rq->misfit_task;

	if (old_misfit != misfit) {
		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
		curr->misfit = misfit;
	}
#endif

	update_overutilized_status(rq);
}

@@ -11885,6 +11903,27 @@ static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
	}
}

static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
							int delta, bool inc)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);

		cfs_rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
		BUG_ON(cfs_rq->walt_stats.nr_big_tasks < 0);

		if (cfs_rq_throttled(cfs_rq))
			break;
	}

	/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
	if (!se)
		walt_adjust_nr_big_tasks(rq, delta, inc);
}

/*
 * Check if task is part of a hierarchy where some cfs_rq does not have any
 * runtime left.
@@ -11920,6 +11959,12 @@ static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
	fixup_walt_sched_stats_common(rq, p, new_task_load, new_pred_demand);
}

static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
							int delta, bool inc)
{
	walt_adjust_nr_big_tasks(rq, delta, inc);
}

static int task_will_be_throttled(struct task_struct *p)
{
	return false;