Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95019eec authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

Merge remote-tracking branch '318/dev/msm-3.18-sched' into msm318



* 318/dev/msm-3.18-sched:
  sched: qhmp: add a knob to restrict tasks spreading
  sched: qhmp: remove the dead code in qhmp_rt.c

Change-Id: Ia7dd2e7620e0441a677795a4633e363423851162
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parents e551e7a6 b20a943e
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -1485,6 +1485,22 @@ overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
how overcommitment threshold is defined and also notes on
'sched_upmigrate_min_nice' tunable.

*** 7.26 sched_restrict_tasks_spread

Default value: 0

Appears at /proc/sys/kernel/sched_restrict_tasks_spread

When this knob is enabled tasks spreading is contained by applying the
following two policies.

- The small tasks are packed up to spill thresholds which otherwise are
packed up to mostly_idle thresholds.

- The current CPU selection algorithm for RT tasks looks for the least loaded
CPU in the lower power cluster. Restrict the search to first available lower
power CPU. When sched_boost is ON, this restriction is not applied.

=========================
8. HMP SCHEDULER TRACE POINTS
=========================
+1 −0
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
#ifdef CONFIG_SCHED_QHMP
extern unsigned int sysctl_sched_min_runtime;
extern unsigned int sysctl_sched_small_task_pct;
extern unsigned int sysctl_sched_restrict_tasks_spread;
#else
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_enable_colocation;
+46 −17
Original line number Diff line number Diff line
@@ -2559,6 +2559,15 @@ unsigned int up_down_migrate_scale_factor = 1024;
 */
unsigned int sysctl_sched_boost;

/*
 * When sched_restrict_tasks_spread is enabled, small tasks are packed
 * up to spill thresholds, which otherwise are packed up to mostly_idle
 * thresholds. The RT tasks are also placed on the fist available lowest
 * power CPU which otherwise placed on least loaded CPU including idle
 * CPUs.
 */
unsigned int __read_mostly sysctl_sched_restrict_tasks_spread;

static inline int available_cpu_capacity(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
@@ -3085,13 +3094,30 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
		}

		cpu_load = cpu_load_sync(i, sync);

		if (sysctl_sched_restrict_tasks_spread) {
			tload = scale_load_to_cpu(task_load(p), i);
			if (!spill_threshold_crossed(tload, cpu_load, rq)) {
				if (cpu_load < min_load) {
					min_load = cpu_load;
					best_busy_cpu = i;
				}
			}
			continue;
		}

		if (mostly_idle_cpu_sync(i, cpu_load, sync))
			return i;

	} while ((i = cpumask_first(&search_cpu)) < nr_cpu_ids);

	if (best_busy_cpu != -1)
		return best_busy_cpu;

	if (min_cstate_cpu != -1)
		return min_cstate_cpu;

	if (!sysctl_sched_restrict_tasks_spread) {
		cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
		cpumask_andnot(&search_cpu, &search_cpu, &fb_search_cpu);
		for_each_cpu(i, &search_cpu) {
@@ -3105,7 +3131,8 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
			cpu_load = cpu_load_sync(i, sync);
			if (!spill_threshold_crossed(tload, cpu_load, rq)) {
				if (cpu_load < min_load ||
			    (prev_cpu && cpu_load == min_load)) {
						(prev_cpu &&
						 cpu_load == min_load)) {
					min_load = cpu_load;
					best_busy_cpu = i;
				}
@@ -3115,6 +3142,8 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
		if (best_busy_cpu != -1)
			return best_busy_cpu;

	}

	for_each_cpu(i, &fb_search_cpu) {
		rq = cpu_rq(i);
		prev_cpu = (i == task_cpu(p));
+14 −18
Original line number Diff line number Diff line
@@ -1178,23 +1178,12 @@ dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

#ifdef CONFIG_SCHED_QHMP
static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
}
#else
static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta);
}
#endif

#else	/* CONFIG_SCHED_HMP */

@@ -1653,6 +1642,8 @@ static int find_lowest_rq_hmp(struct task_struct *task)
	int best_cpu = -1;
	int prev_cpu = task_cpu(task);
	int i;
	int restrict_tasks_spread = sched_boost() ? 0 :
			sysctl_sched_restrict_tasks_spread;

	/* Make sure the mask is initialized first */
	if (unlikely(!lowest_mask))
@@ -1679,18 +1670,12 @@ static int find_lowest_rq_hmp(struct task_struct *task)
		cpu_load = scale_load_to_cpu(
			cpu_rq(i)->hmp_stats.cumulative_runnable_avg, i);

#ifdef CONFIG_SCHED_QHMP
		cpu_cost = power_cost(cpu_load, i);
		trace_sched_cpu_load(cpu_rq(i), idle_cpu(i), mostly_idle_cpu(i),
				     sched_irqload(i), cpu_cost, cpu_temp(i));

		if (sched_boost() && capacity(cpu_rq(i)) != max_capacity)
			continue;
#else
		cpu_cost = power_cost(i, cpu_cravg_sync(i, 0));
		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
			sched_irqload(i), cpu_cost, cpu_temp(i));
#endif

		if (power_delta_exceeded(cpu_cost, min_cost)) {
			if (cpu_cost > min_cost)
@@ -1704,7 +1689,18 @@ static int find_lowest_rq_hmp(struct task_struct *task)
		if (sched_cpu_high_irqload(i))
			continue;

		if (cpu_load < min_load ||
		if (restrict_tasks_spread) {
			if (best_cpu == -1) {
				best_cpu = i;
				continue;

			}

			if (cpu_cost < min_cost) {
				min_cost = cpu_cost;
				best_cpu = i;
			}
		} else if (cpu_load < min_load ||
		    (cpu_load == min_load &&
		     (i == prev_cpu || (best_cpu != prev_cpu &&
					cpus_share_cache(prev_cpu, i))))) {
+2 −0
Original line number Diff line number Diff line
@@ -915,6 +915,8 @@ extern unsigned int sched_init_task_load_pelt;
extern unsigned int sched_init_task_load_windows;
extern unsigned int sched_heavy_task;
extern unsigned int up_down_migrate_scale_factor;
extern unsigned int sysctl_sched_restrict_tasks_spread;

extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern void fixup_nr_big_small_task(int cpu, int reset_stats);
extern unsigned int max_task_load(void);
Loading