Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3c1ecd8 authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Steve Muckle
Browse files

sched: window-stats: Account idle time as busy time



Provide a knob to consider idle time as busy time, when cpu becomes
idle as a result of io_schedule() call. This will let governor
parameter 'io_is_busy' to be appropriately honored.

Change-Id: Id9fb4fe448e8e4909696aa8a3be5a165ad7529d3
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@codeaurora.org>
parent c55cc8b6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1719,8 +1719,8 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
extern int task_free_register(struct notifier_block *n);
extern int task_free_unregister(struct notifier_block *n);
extern int sched_set_window(u64 window_start, unsigned int window_size);
static inline void sched_set_io_is_busy(int val) {};
extern unsigned long sched_get_busy(int cpu);
extern void sched_set_io_is_busy(int val);

/*
 * Per process flags
+39 −8
Original line number Diff line number Diff line
@@ -1136,6 +1136,7 @@ unsigned int min_possible_efficiency = 1024;
__read_mostly int sysctl_sched_freq_inc_notify_slack_pct;
__read_mostly int sysctl_sched_freq_dec_notify_slack_pct = 25;
static __read_mostly unsigned int sched_account_wait_time = 1;
static __read_mostly unsigned int sched_io_is_busy;

/*
 * Maximum possible frequency across all cpus. Task demand and cpu
@@ -1301,6 +1302,14 @@ static int __init set_sched_ravg_window(char *str)

early_param("sched_ravg_window", set_sched_ravg_window);

static inline int cpu_is_waiting_on_io(struct rq *rq)
{
	if (!sched_io_is_busy)
		return 0;

	return atomic_read(&rq->nr_iowait);
}

static inline void
move_window_start(struct rq *rq, u64 wallclock, int update_sum,
						 struct task_struct *p)
@@ -1316,7 +1325,7 @@ move_window_start(struct rq *rq, u64 wallclock, int update_sum,
	nr_windows = div64_u64(delta, sched_ravg_window);
	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;

	if (is_idle_task(rq->curr)) {
	if (is_idle_task(rq->curr) && !cpu_is_waiting_on_io(rq)) {
		if (nr_windows == 1)
			rq->prev_runnable_sum = rq->curr_runnable_sum;
		else
@@ -1372,6 +1381,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
	int update_sum, new_window;
	u64 mark_start = p->ravg.mark_start;
	u64 window_start;
	s64 delta = 0;

	if (sched_use_pelt || !rq->window_start)
		return;
@@ -1385,15 +1395,30 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
	move_window_start(rq, wallclock, update_sum, p);
	window_start = rq->window_start;

	/*
	 * Don't bother accounting for idle task, also we would not want
	 * to attribute its time to the aggregate RQ busy time
	 */
	if (is_idle_task(p))
		return;
	if (is_idle_task(p)) {
		if (!(event == PUT_PREV_TASK && cpu_is_waiting_on_io(rq)))
			goto done;

		if (window_start > mark_start) {
			delta = window_start - mark_start;
			if (delta > window_size) {
				rq->curr_runnable_sum = 0;
				delta = window_size;
			}
			delta = scale_exec_time(delta, rq);
			rq->curr_runnable_sum += delta;
			rq->prev_runnable_sum = rq->curr_runnable_sum;
			rq->curr_runnable_sum = 0;
			mark_start = window_start;
		}
		delta = wallclock - mark_start;
		delta = scale_exec_time(delta, rq);
		rq->curr_runnable_sum += delta;

		goto done;
	}

	do {
		s64 delta = 0;
		int nr_full_windows = 0;
		u64 now = wallclock;
		u32 sum = 0;
@@ -1473,6 +1498,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
		}
	}

done:
	trace_sched_update_task_ravg(p, rq, event, wallclock);

	p->ravg.mark_start = wallclock;
@@ -1630,6 +1656,11 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size,
	}
}

void sched_set_io_is_busy(int val)
{
	sched_io_is_busy = val;
}

int sched_set_window(u64 window_start, unsigned int window_size)
{
	u64 ws, now;