Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0a662ae authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: walt: Improve the scheduler"

parents ac809c69 ef9b421b
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -204,10 +204,10 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
TRACE_EVENT(sched_update_task_ravg,

	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
		 u64 wallclock, u64 irqtime,
		 struct group_cpu_time *cpu_time),

	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
	TP_ARGS(p, rq, evt, wallclock, irqtime, cpu_time),

	TP_STRUCT__entry(
		__array(char,			comm, TASK_COMM_LEN)
@@ -250,7 +250,7 @@ TRACE_EVENT(sched_update_task_ravg,
		__entry->evt            = evt;
		__entry->cpu            = rq->cpu;
		__entry->cur_pid        = rq->curr->pid;
		__entry->cur_freq       = cpu_cycles_to_freq(cycles, exec_time);
		__entry->cur_freq       = rq->task_exec_scale;
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid            = p->pid;
		__entry->mark_start     = p->ravg.mark_start;
@@ -301,10 +301,10 @@ TRACE_EVENT(sched_update_task_ravg,
TRACE_EVENT(sched_update_task_ravg_mini,

	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
		 u64 wallclock, u64 irqtime,
		 struct group_cpu_time *cpu_time),

	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
	TP_ARGS(p, rq, evt, wallclock, irqtime, cpu_time),

	TP_STRUCT__entry(
		__array(char,			comm, TASK_COMM_LEN)
+1 −6
Original line number Diff line number Diff line
@@ -108,11 +108,6 @@ struct walt_sched_stats {
	u64 pred_demands_sum_scaled;
};

struct cpu_cycle {
	u64 cycles;
	u64 time;
};

struct group_cpu_time {
	u64 curr_runnable_sum;
	u64 prev_runnable_sum;
@@ -998,7 +993,7 @@ struct rq {
	u64			avg_irqload;
	u64			irqload_ts;
	struct task_struct	*ed_task;
	struct cpu_cycle	cc;
	u64			task_exec_scale;
	u64			old_busy_time, old_busy_time_group;
	u64			old_estimated_time;
	u64			curr_runnable_sum;
+25 −27
Original line number Diff line number Diff line
@@ -1430,14 +1430,7 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,

static inline u64 scale_exec_time(u64 delta, struct rq *rq)
{
	u32 freq;

	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
	delta *= rq->cluster->exec_scale_factor;
	delta >>= 10;

	return delta;
	return (delta * rq->task_exec_scale) >> 10;
}

/* Convert busy time to frequency equivalent
@@ -2014,13 +2007,16 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
			  u64 wallclock, u64 irqtime)
{
	u64 cur_cycles;
	u64 cycles_delta;
	u64 time_delta;
	int cpu = cpu_of(rq);

	lockdep_assert_held(&rq->lock);

	if (!use_cycle_counter) {
		rq->cc.cycles = cpu_cur_freq(cpu);
		rq->cc.time = 1;
		rq->task_exec_scale = DIV64_U64_ROUNDUP(cpu_cur_freq(cpu) *
				topology_get_cpu_scale(NULL, cpu),
				rq->cluster->max_possible_freq);
		return;
	}

@@ -2035,10 +2031,10 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
	 */
	if (!is_idle_task(rq->curr) || irqtime) {
		if (unlikely(cur_cycles < p->cpu_cycles))
			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
			cycles_delta = cur_cycles + (U64_MAX - p->cpu_cycles);
		else
			rq->cc.cycles = cur_cycles - p->cpu_cycles;
		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
			cycles_delta = cur_cycles - p->cpu_cycles;
		cycles_delta = cycles_delta * NSEC_PER_MSEC;

		if (event == IRQ_UPDATE && is_idle_task(p))
			/*
@@ -2046,20 +2042,24 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
			 * entry time is CPU cycle counter stall period.
			 * Upon IRQ handler entry sched_account_irqstart()
			 * replenishes idle task's cpu cycle counter so
			 * rq->cc.cycles now represents increased cycles during
			 * cycles_delta now represents increased cycles during
			 * IRQ handler rather than time between idle entry and
			 * IRQ exit.  Thus use irqtime as time delta.
			 */
			rq->cc.time = irqtime;
			time_delta = irqtime;
		else
			rq->cc.time = wallclock - p->ravg.mark_start;
		SCHED_BUG_ON((s64)rq->cc.time < 0);
			time_delta = wallclock - p->ravg.mark_start;
		SCHED_BUG_ON((s64)time_delta < 0);

		rq->task_exec_scale = DIV64_U64_ROUNDUP(cycles_delta *
				topology_get_cpu_scale(NULL, cpu),
				time_delta * rq->cluster->max_possible_freq);
	}

	p->cpu_cycles = cur_cycles;

	trace_sched_get_task_cpu_cycles(cpu, event,
					rq->cc.cycles, rq->cc.time, p);
					cycles_delta, time_delta, p);
}

static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
@@ -2103,9 +2103,9 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
		goto done;

	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
				rq->cc.cycles, rq->cc.time, &rq->grp_time);
				&rq->grp_time);
	trace_sched_update_task_ravg_mini(p, rq, event, wallclock, irqtime,
				rq->cc.cycles, rq->cc.time, &rq->grp_time);
				&rq->grp_time);

done:
	p->ravg.mark_start = wallclock;
@@ -2224,10 +2224,9 @@ void mark_task_starting(struct task_struct *p)

#define pct_to_min_scaled(tunable) \
		div64_u64(((u64)sched_ravg_window * tunable *		\
			  cluster_max_freq(sched_cluster[0]) *	\
			  sched_cluster[0]->efficiency),	\
			  ((u64)max_possible_freq *		\
			  max_possible_efficiency * 100))
			 topology_get_cpu_scale(NULL,			\
			 cluster_first_cpu(sched_cluster[0]))),	\
			 ((u64)SCHED_CAPACITY_SCALE * 100))

static inline void walt_update_group_thresholds(void)
{
@@ -3620,8 +3619,7 @@ void walt_sched_init_rq(struct rq *rq)
	rq->cur_irqload = 0;
	rq->avg_irqload = 0;
	rq->irqload_ts = 0;
	rq->cc.cycles = 1;
	rq->cc.time = 1;
	rq->task_exec_scale = 1024;

	/*
	 * All cpus part of same cluster by default. This avoids the
+1 −2
Original line number Diff line number Diff line
@@ -375,8 +375,7 @@ static inline void walt_rq_dump(int cpu)
	SCHED_PRINT(rq->nt_curr_runnable_sum);
	SCHED_PRINT(rq->nt_prev_runnable_sum);
	SCHED_PRINT(rq->cum_window_demand_scaled);
	SCHED_PRINT(rq->cc.time);
	SCHED_PRINT(rq->cc.cycles);
	SCHED_PRINT(rq->task_exec_scale);
	SCHED_PRINT(rq->grp_time.curr_runnable_sum);
	SCHED_PRINT(rq->grp_time.prev_runnable_sum);
	SCHED_PRINT(rq->grp_time.nt_curr_runnable_sum);