Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d99db09 authored by Abhijeet Dharmapurikar's avatar Abhijeet Dharmapurikar Committed by Lingutla Chandrasekhar
Browse files

sched: walt: Improve the scheduler



This change is for general scheduler improvements.

Change-Id: Ib8505248b91fb33395fe53f2dfacc8ec68c0273c
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent 13dd4dd6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ TRACE_EVENT(sched_update_task_ravg,
		__entry->evt            = evt;
		__entry->cpu            = rq->cpu;
		__entry->cur_pid        = rq->curr->pid;
		__entry->cur_freq       = rq->freq;
		__entry->cur_freq       = rq->task_exec_scale;
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid            = p->pid;
		__entry->mark_start     = p->ravg.mark_start;
+1 −1
Original line number Diff line number Diff line
@@ -993,7 +993,7 @@ struct rq {
	u64			avg_irqload;
	u64			irqload_ts;
	struct task_struct	*ed_task;
	u64			freq;
	u64			task_exec_scale;
	u64			old_busy_time, old_busy_time_group;
	u64			old_estimated_time;
	u64			curr_runnable_sum;
+8 −8
Original line number Diff line number Diff line
@@ -1430,11 +1430,7 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,

static inline u64 scale_exec_time(u64 delta, struct rq *rq)
{
	delta = DIV64_U64_ROUNDUP(delta * rq->freq, max_possible_freq);
	delta *= rq->cluster->exec_scale_factor;
	delta >>= 10;

	return delta;
	return (delta * rq->task_exec_scale) >> 10;
}

/* Convert busy time to frequency equivalent
@@ -2018,7 +2014,9 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
	lockdep_assert_held(&rq->lock);

	if (!use_cycle_counter) {
		rq->freq = cpu_cur_freq(cpu);
		rq->task_exec_scale = DIV64_U64_ROUNDUP(cpu_cur_freq(cpu) *
				topology_get_cpu_scale(NULL, cpu),
				rq->cluster->max_possible_freq);
		return;
	}

@@ -2053,7 +2051,9 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
			time_delta = wallclock - p->ravg.mark_start;
		SCHED_BUG_ON((s64)time_delta < 0);

		rq->freq = DIV64_U64_ROUNDUP(cycles_delta, time_delta);
		rq->task_exec_scale = DIV64_U64_ROUNDUP(cycles_delta *
				topology_get_cpu_scale(NULL, cpu),
				time_delta * rq->cluster->max_possible_freq);
	}

	p->cpu_cycles = cur_cycles;
@@ -3620,7 +3620,7 @@ void walt_sched_init_rq(struct rq *rq)
	rq->cur_irqload = 0;
	rq->avg_irqload = 0;
	rq->irqload_ts = 0;
	rq->freq = 1;
	rq->task_exec_scale = 1024;

	/*
	 * All cpus part of same cluster by default. This avoids the
+1 −1
Original line number Diff line number Diff line
@@ -375,7 +375,7 @@ static inline void walt_rq_dump(int cpu)
	SCHED_PRINT(rq->nt_curr_runnable_sum);
	SCHED_PRINT(rq->nt_prev_runnable_sum);
	SCHED_PRINT(rq->cum_window_demand_scaled);
	SCHED_PRINT(rq->freq);
	SCHED_PRINT(rq->task_exec_scale);
	SCHED_PRINT(rq->grp_time.curr_runnable_sum);
	SCHED_PRINT(rq->grp_time.prev_runnable_sum);
	SCHED_PRINT(rq->grp_time.nt_curr_runnable_sum);