Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fdbc39e6 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Improve the scheduler"

parents 8be8c0c3 6228645c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
+4 −2
Original line number Diff line number Diff line
@@ -9883,9 +9883,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
			/* TODO:don't assume same cap cpus are in same domain */
			capacity_local = capacity_orig_of(cpu_local);
			capacity_busiest = capacity_orig_of(cpu_busiest);
			if (capacity_local > capacity_busiest) {
			if ((sds.busiest->group_weight > 1) &&
				capacity_local > capacity_busiest) {
				goto out_balanced;
			} else if (capacity_local == capacity_busiest) {
			} else if (capacity_local == capacity_busiest ||
				   asym_cap_siblings(cpu_local, cpu_busiest)) {
				if (cpu_rq(cpu_busiest)->nr_running < 2)
					goto out_balanced;
			}
+41 −1
Original line number Diff line number Diff line
@@ -2161,7 +2161,7 @@ u64 freq_policy_load(struct rq *rq);
extern u64 walt_load_reported_window;

static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	u64 util, util_unboosted;
	struct rq *rq = cpu_rq(cpu);
@@ -2197,6 +2197,41 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
	return (util >= capacity) ? capacity : util;
}

#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x)	\
			(max(orig, mult_frac(other, x, 100)))

static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	struct sched_walt_cpu_load wl_other = {0};
	unsigned long util = 0, util_other = 0;
	unsigned long capacity = capacity_orig_of(cpu);
	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;

	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
		return __cpu_util_freq_walt(cpu, walt_load);

	for_each_cpu(i, &asym_cap_sibling_cpus) {
		if (i == cpu)
			util = __cpu_util_freq_walt(cpu, walt_load);
		else
			util_other = __cpu_util_freq_walt(i, &wl_other);
	}

	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
		mpct = 100;

	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
	walt_load->prev_window_util = util;

	walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
						   mpct);
	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
						   mpct);

	return (util >= capacity) ? capacity : util;
}

static inline unsigned long
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
{
@@ -2931,6 +2966,9 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
	if (src_cpu == dst_cpu)
		return 1;

	if (asym_cap_siblings(src_cpu, dst_cpu))
		return 1;

	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
}

@@ -3141,6 +3179,8 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
	return NULL;
}

static inline int asym_cap_siblings(int cpu1, int cpu2) { return 0; }

static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
	return load;
+11 −1
Original line number Diff line number Diff line
@@ -114,6 +114,7 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
unsigned int sysctl_sched_walt_rotate_big_tasks;
unsigned int walt_rotation_enabled;

__read_mostly unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct = 100;
__read_mostly unsigned int sched_ravg_hist_size = 5;

static __read_mostly unsigned int sched_io_is_busy = 1;
@@ -3160,7 +3161,7 @@ void walt_irq_work(struct irq_work *irq_work)
	struct rq *rq;
	int cpu;
	u64 wc;
	bool is_migration = false;
	bool is_migration = false, is_asym_migration = false;
	u64 total_grp_load = 0, min_cluster_grp_load = 0;
	int level = 0;

@@ -3191,6 +3192,11 @@ void walt_irq_work(struct irq_work *irq_work)
				account_load_subtractions(rq);
				aggr_grp_load += rq->grp_time.prev_runnable_sum;
			}
			if (is_migration && rq->notif_pending &&
			    cpumask_test_cpu(cpu, &asym_cap_sibling_cpus)) {
				is_asym_migration = true;
				rq->notif_pending = false;
			}
		}

		cluster->aggr_grp_load = aggr_grp_load;
@@ -3232,6 +3238,10 @@ void walt_irq_work(struct irq_work *irq_work)
				}
			}

			if (is_asym_migration && cpumask_test_cpu(cpu,
							&asym_cap_sibling_cpus))
				flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;

			if (i == num_cpus)
				cpufreq_update_util(cpu_rq(cpu), flag);
			else
+10 −0
Original line number Diff line number Diff line
@@ -414,6 +414,16 @@ static struct ctl_table kern_table[] = {
		.extra1		= &zero,
		.extra2		= &two_million,
	},

	{
		.procname       = "sched_asym_cap_sibling_freq_match_pct",
		.data           = &sysctl_sched_asym_cap_sibling_freq_match_pct,
		.maxlen         = sizeof(unsigned int),
		.mode           = 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1         = &one,
		.extra2         = &one_hundred,
	},
#endif
#ifdef CONFIG_SMP
	{