Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa07bbe6 authored by Abhijeet Dharmapurikar's avatar Abhijeet Dharmapurikar
Browse files

sched: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I18364c6061ed7525755aaf187bf15a8cb9b54a8a
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
parent 66e38d99
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -801,6 +801,7 @@ struct task_struct {
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
	u8 unfilter;
#endif

#ifdef CONFIG_CGROUP_SCHED
+2 −0
Original line number Diff line number Diff line
@@ -42,6 +42,8 @@ extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;
extern unsigned int sysctl_sched_coloc_downmigrate_ns;
extern unsigned int sysctl_sched_task_unfilter_nr_windows;

extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
+5 −2
Original line number Diff line number Diff line
@@ -1234,6 +1234,7 @@ TRACE_EVENT(sched_task_util,
		__field(bool,		is_rtg)
		__field(bool,		rtg_skip_min)
		__field(int,		start_cpu)
		__field(int,		unfilter)
	),

	TP_fast_assign(
@@ -1252,14 +1253,16 @@ TRACE_EVENT(sched_task_util,
		__entry->is_rtg                 = is_rtg;
		__entry->rtg_skip_min		= rtg_skip_min;
		__entry->start_cpu		= start_cpu;
		__entry->unfilter		= p->unfilter;
	),

	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d",
	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d",
		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
		__entry->candidates, __entry->best_energy_cpu, __entry->sync,
		__entry->need_idle, __entry->fastpath, __entry->placement_boost,
		__entry->latency, __entry->stune_boosted,
		__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu)
		__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu,
		__entry->unfilter)
)

/*
+2 −2
Original line number Diff line number Diff line
@@ -6733,8 +6733,8 @@ static int get_start_cpu(struct task_struct *p)
	int start_cpu = rd->min_cap_orig_cpu;
	bool boosted = schedtune_task_boost(p) > 0 ||
			task_boost_policy(p) == SCHED_BOOST_ON_BIG;
	bool task_skip_min = get_rtg_status(p) &&
		(task_util(p) > sched_task_filter_util);
	bool task_skip_min = (sched_boost() != CONSERVATIVE_BOOST)
				&& get_rtg_status(p) && p->unfilter;

	/*
	 * note about min/mid/max_cap_orig_cpu - either all of them will be -ve
+9 −0
Original line number Diff line number Diff line
@@ -1727,6 +1727,8 @@ account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
	return 1;
}

unsigned int sysctl_sched_task_unfilter_nr_windows = 10;

/*
 * Called when new window is starting for a task, to record cpu usage over
 * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
@@ -1807,6 +1809,12 @@ static void update_history(struct rq *rq, struct task_struct *p,
	p->ravg.pred_demand = pred_demand;
	p->ravg.pred_demand_scaled = pred_demand_scaled;

	if (demand_scaled > sched_task_filter_util)
		p->unfilter = sysctl_sched_task_unfilter_nr_windows;
	else
		if (p->unfilter)
			p->unfilter = p->unfilter - 1;

done:
	trace_sched_update_history(rq, p, runtime, samples, event);
}
@@ -2087,6 +2095,7 @@ void init_new_task_load(struct task_struct *p)
	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
		p->ravg.sum_history[i] = init_load_windows;
	p->misfit = false;
	p->unfilter = sysctl_sched_task_unfilter_nr_windows;
}

/*
Loading