Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3fecaa6e authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar Committed by Gerrit - the friendly Code Review server
Browse files

sched: improve the scheduler



This change is for general scheduler improvements.

Change-Id: I28584a336ed8834116173c520564300214505e2d
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent 307d1bd6
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -83,10 +83,10 @@ __get_update_sum(struct rq *rq, enum migrate_types migrate_type,


TRACE_EVENT(sched_update_pred_demand,
TRACE_EVENT(sched_update_pred_demand,


	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
	TP_PROTO(struct task_struct *p, u32 runtime, int pct,
		 unsigned int pred_demand),
		 unsigned int pred_demand),


	TP_ARGS(rq, p, runtime, pct, pred_demand),
	TP_ARGS(p, runtime, pct, pred_demand),


	TP_STRUCT__entry(
	TP_STRUCT__entry(
		__array(char,		comm, TASK_COMM_LEN)
		__array(char,		comm, TASK_COMM_LEN)
@@ -106,7 +106,7 @@ TRACE_EVENT(sched_update_pred_demand,
		__entry->pred_demand     = pred_demand;
		__entry->pred_demand     = pred_demand;
		memcpy(__entry->bucket, p->ravg.busy_buckets,
		memcpy(__entry->bucket, p->ravg.busy_buckets,
					NUM_BUSY_BUCKETS * sizeof(u8));
					NUM_BUSY_BUCKETS * sizeof(u8));
		__entry->cpu            = rq->cpu;
		__entry->cpu            = task_cpu(p);
	),
	),


	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
+8 −9
Original line number Original line Diff line number Diff line
@@ -1047,7 +1047,6 @@ static inline int busy_to_bucket(u32 normalized_rt)
/*
/*
 * get_pred_busy - calculate predicted demand for a task on runqueue
 * get_pred_busy - calculate predicted demand for a task on runqueue
 *
 *
 * @rq: runqueue of task p
 * @p: task whose prediction is being updated
 * @p: task whose prediction is being updated
 * @start: starting bucket. returned prediction should not be lower than
 * @start: starting bucket. returned prediction should not be lower than
 *         this bucket.
 *         this bucket.
@@ -1063,7 +1062,7 @@ static inline int busy_to_bucket(u32 normalized_rt)
 * time and returns the latest that falls into the bucket. If no such busy
 * time and returns the latest that falls into the bucket. If no such busy
 * time exists, it returns the medium of that bucket.
 * time exists, it returns the medium of that bucket.
 */
 */
static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
static u32 get_pred_busy(struct task_struct *p,
				int start, u32 runtime)
				int start, u32 runtime)
{
{
	int i;
	int i;
@@ -1121,18 +1120,18 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
	 */
	 */
	ret = max(runtime, ret);
	ret = max(runtime, ret);
out:
out:
	trace_sched_update_pred_demand(rq, p, runtime,
	trace_sched_update_pred_demand(p, runtime,
		mult_frac((unsigned int)cur_freq_runtime, 100,
		mult_frac((unsigned int)cur_freq_runtime, 100,
			  sched_ravg_window), ret);
			  sched_ravg_window), ret);
	return ret;
	return ret;
}
}


static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
static inline u32 calc_pred_demand(struct task_struct *p)
{
{
	if (p->ravg.pred_demand >= p->ravg.curr_window)
	if (p->ravg.pred_demand >= p->ravg.curr_window)
		return p->ravg.pred_demand;
		return p->ravg.pred_demand;


	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
	return get_pred_busy(p, busy_to_bucket(p->ravg.curr_window),
			     p->ravg.curr_window);
			     p->ravg.curr_window);
}
}


@@ -1167,7 +1166,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
			return;
			return;
	}
	}


	new = calc_pred_demand(rq, p);
	new = calc_pred_demand(p);
	old = p->ravg.pred_demand;
	old = p->ravg.pred_demand;


	if (old >= new)
	if (old >= new)
@@ -1693,7 +1692,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
}
}




static inline u32 predict_and_update_buckets(struct rq *rq,
static inline u32 predict_and_update_buckets(
			struct task_struct *p, u32 runtime) {
			struct task_struct *p, u32 runtime) {


	int bidx;
	int bidx;
@@ -1703,7 +1702,7 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
		return 0;
		return 0;


	bidx = busy_to_bucket(runtime);
	bidx = busy_to_bucket(runtime);
	pred_demand = get_pred_busy(rq, p, bidx, runtime);
	pred_demand = get_pred_busy(p, bidx, runtime);
	bucket_increase(p->ravg.busy_buckets, bidx);
	bucket_increase(p->ravg.busy_buckets, bidx);


	return pred_demand;
	return pred_demand;
@@ -1801,7 +1800,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
		else
		else
			demand = max(avg, runtime);
			demand = max(avg, runtime);
	}
	}
	pred_demand = predict_and_update_buckets(rq, p, runtime);
	pred_demand = predict_and_update_buckets(p, runtime);
	demand_scaled = scale_demand(demand);
	demand_scaled = scale_demand(demand);
	pred_demand_scaled = scale_demand(pred_demand);
	pred_demand_scaled = scale_demand(pred_demand);