Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f43931e8 authored by Rohit Gupta's avatar Rohit Gupta
Browse files

sched: Delete heavy task heuristics in prediction code



Heavy task prediction code needs further tuning to avoid any
negative power impact. Delete the code for now instead of adding
tunables to avoid inefficiencies in the scheduler path.

Change-Id: I71e3b37a5c99e24bc5be93cc825d7e171e8ff7ce
Signed-off-by: default avatarRohit Gupta <rohgup@codeaurora.org>
parent e6993466
Loading
Loading
Loading
Loading
+1 −43
Original line number Diff line number Diff line
@@ -774,13 +774,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;

/*
 * Major task runtime. If a task runs for more than sched_major_task_runtime
 * in a window, it's considered to be generating majority of workload
 * for this window. Prediction could be adjusted for such tasks.
 */
__read_mostly unsigned int sched_major_task_runtime = 10000000;

static unsigned int sync_cpu;

struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
@@ -1015,9 +1008,6 @@ void set_hmp_defaults(void)

	update_up_down_migrate();

	sched_major_task_runtime =
		mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);

	sched_init_task_load_windows =
		div64_u64((u64)sysctl_sched_init_task_load_pct *
			  (u64)sched_ravg_window, 100);
@@ -1961,8 +1951,6 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
}

#define HEAVY_TASK_SKIP 2
#define HEAVY_TASK_SKIP_LIMIT 4
/*
 * get_pred_busy - calculate predicted demand for a task on runqueue
 *
@@ -1990,7 +1978,7 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
	u32 *hist = p->ravg.sum_history;
	u32 dmin, dmax;
	u64 cur_freq_runtime = 0;
	int first = NUM_BUSY_BUCKETS, final, skip_to;
	int first = NUM_BUSY_BUCKETS, final;
	u32 ret = runtime;

	/* skip prediction for new tasks due to lack of history */
@@ -2010,36 +1998,6 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,

	/* compute the bucket for prediction */
	final = first;
	if (first < HEAVY_TASK_SKIP_LIMIT) {
		/* compute runtime at current CPU frequency */
		cur_freq_runtime = mult_frac(runtime, max_possible_efficiency,
					     rq->cluster->efficiency);
		cur_freq_runtime = scale_load_to_freq(cur_freq_runtime,
				max_possible_freq, rq->cluster->cur_freq);
		/*
		 * if the task runs for majority of the window, try to
		 * pick higher buckets.
		 */
		if (cur_freq_runtime >= sched_major_task_runtime) {
			int next = NUM_BUSY_BUCKETS;
			/*
			 * if there is a higher bucket that's consistently
			 * hit, don't jump beyond that.
			 */
			for (i = start + 1; i <= HEAVY_TASK_SKIP_LIMIT &&
			     i < NUM_BUSY_BUCKETS; i++) {
				if (buckets[i] > CONSISTENT_THRES) {
					next = i;
					break;
				}
			}
			skip_to = min(next, start + HEAVY_TASK_SKIP);
			/* don't jump beyond HEAVY_TASK_SKIP_LIMIT */
			skip_to = min(HEAVY_TASK_SKIP_LIMIT, skip_to);
			/* don't go below first non-empty bucket, if any */
			final = max(first, skip_to);
		}
	}

	/* determine demand range for the predicted bucket */
	if (final < 2) {
+0 −2
Original line number Diff line number Diff line
@@ -1066,7 +1066,6 @@ static inline void sched_ttwu_pending(void) { }
#define FREQ_REPORT_CPU_LOAD			1
#define FREQ_REPORT_TOP_TASK			2

#define MAJOR_TASK_PCT 85
#define SCHED_UPMIGRATE_MIN_NICE 15
#define EXITING_TASK_MARKER	0xdeaddead

@@ -1093,7 +1092,6 @@ extern unsigned int sched_init_task_load_windows;
extern unsigned int up_down_migrate_scale_factor;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sched_pred_alert_load;
extern unsigned int sched_major_task_runtime;
extern struct sched_cluster init_cluster;
extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;