Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52030b82 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge remote-tracking branch '318/dev/msm-3.18-sched' into msm318"

parents d1d953cf 8aac8dcc
Loading
Loading
Loading
Loading
+18 −1
Original line number Diff line number Diff line
@@ -1293,7 +1293,7 @@ categorized as small wakee tasks. Scheduler places small wakee tasks on the
waker's cluster.


*** 7.26 sched_big_waker_task_load
*** 7.27 sched_big_waker_task_load

Appears at: /proc/sys/kernel/sched_big_waker_task_load

@@ -1303,6 +1303,23 @@ This tunable is a percentage. Configure the minimum demand of big sync waker
task.  Scheduler places small wakee tasks woken up by big sync waker on the
waker's cluster.

*** 7.28 sched_prefer_sync_wakee_to_waker

Appears at: /proc/sys/kernel/sched_prefer_sync_wakee_to_waker

Default value: 0

The default sync wakee policy has a preference to select an idle CPU in the
waker cluster compared to the waker CPU running only 1 task. By selecting
an idle CPU, it eliminates the chance of waker migrating to a different CPU
after the wakee preempts it. This policy is also not susceptible to the
incorrect "sync" usage i.e the waker does not goto sleep after waking up
the wakee.

However LPM exit latency associated with an idle CPU outweigh the above
benefits on some targets. When this knob is turned on, the waker CPU is
selected if it has only 1 runnable task.

=========================
8. HMP SCHEDULER TRACE POINTS
=========================
+1 −0
Original line number Diff line number Diff line
@@ -1324,6 +1324,7 @@ struct task_struct {
	u32 init_load_pct;
	u64 last_wake_ts;
	u64 last_switch_out_ts;
	u64 last_cpu_selected_ts;
#ifdef CONFIG_SCHED_QHMP
	u64 run_start;
#endif
+1 −0
Original line number Diff line number Diff line
@@ -69,6 +69,7 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;

#ifdef CONFIG_SCHED_QHMP
extern unsigned int sysctl_sched_min_runtime;
+1 −0
Original line number Diff line number Diff line
@@ -2822,6 +2822,7 @@ static inline void mark_task_starting(struct task_struct *p)

	wallclock = sched_ktime_clock();
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_cpu_selected_ts = wallclock;
	p->last_switch_out_ts = 0;
}

+31 −6
Original line number Diff line number Diff line
@@ -2479,6 +2479,13 @@ unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
unsigned int __read_mostly sched_big_waker_task_load;
unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;

/*
 * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
 * task. This eliminates the LPM exit latency associated with the idle
 * CPUs in the waker cluster.
 */
unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;

/*
 * CPUs with load greater than the sched_spill_load_threshold are not
 * eligible for task placement. When all CPUs in a cluster achieve a
@@ -2548,6 +2555,9 @@ static unsigned int __read_mostly
sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;

static unsigned int __read_mostly
sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;

unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;

void update_up_down_migrate(void)
@@ -3239,6 +3249,7 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
	struct sched_cluster *cluster;

	if (env->boost || env->reason || env->need_idle ||
				!task->ravg.mark_start ||
				!sched_short_sleep_task_threshold)
		return false;

@@ -3247,6 +3258,10 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
					unlikely(!cpu_active(prev_cpu)))
		return false;

	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
				sched_long_cpu_selection_threshold)
		return false;

	/*
	 * This function should be used by task wake up path only as it's
	 * assuming p->last_switch_out_ts as last sleep time.
@@ -3307,6 +3322,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
	struct cluster_cpu_stats stats;
	bool fast_path = false;
	struct related_thread_group *grp;
	int cpu = raw_smp_processor_id();

	struct cpu_select_env env = {
		.p			= p,
@@ -3336,12 +3352,20 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
		else
			env.rtg = grp;
	} else {
		cluster = cpu_rq(smp_processor_id())->cluster;
		if (wake_to_waker_cluster(&env) &&
		    cluster_allowed(p, cluster)) {
		cluster = cpu_rq(cpu)->cluster;
		if (wake_to_waker_cluster(&env)) {
			if (sysctl_sched_prefer_sync_wakee_to_waker &&
				cpu_rq(cpu)->nr_running == 1 &&
				cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
				cpu_active(cpu)) {
				fast_path = true;
				target = cpu;
				goto out;
			} else if (cluster_allowed(p, cluster)) {
				env.need_waker_cluster = 1;
				bitmap_zero(env.candidate_list, NR_CPUS);
				__set_bit(cluster->id, env.candidate_list);
			}
		} else if (bias_to_prev_cpu(&env, &stats)) {
			fast_path = true;
			goto out;
@@ -3384,6 +3408,7 @@ retry:
		if (stats.best_capacity_cpu >= 0)
			target = stats.best_capacity_cpu;
	}
	p->last_cpu_selected_ts = sched_ktime_clock();

out:
	rcu_read_unlock();
Loading