Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a124aaf authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Improve the scheduler"

parents c984db76 da935c6b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -63,6 +63,7 @@ extern unsigned int sysctl_walt_rtg_cfs_boost_prio;
extern unsigned int sysctl_walt_low_latency_task_threshold;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_walt_cpu_high_irqload;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_en;

extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+53 −15
Original line number Diff line number Diff line
@@ -119,6 +119,9 @@ unsigned int sysctl_sched_walt_rotate_big_tasks;
unsigned int walt_rotation_enabled;

__read_mostly unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct = 100;
__read_mostly unsigned int sysctl_sched_asym_cap_sibling_freq_match_en;
static cpumask_t asym_freq_match_cpus = CPU_MASK_NONE;

__read_mostly unsigned int sched_ravg_hist_size = 5;

static __read_mostly unsigned int sched_io_is_busy = 1;
@@ -645,23 +648,31 @@ __cpu_util_freq_walt(int cpu, struct walt_cpu_load *walt_load)
unsigned long
cpu_util_freq_walt(int cpu, struct walt_cpu_load *walt_load)
{
	struct walt_cpu_load wl_other = {0};
	unsigned long util = 0, util_other = 0;
	static unsigned long util_other;
	static struct walt_cpu_load wl_other;
	unsigned long util = 0;
	unsigned long capacity = capacity_orig_of(cpu);
	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;
	int mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;
	int max_cap_cpu;

	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus) &&
		!(sysctl_sched_asym_cap_sibling_freq_match_en &&
		cpumask_test_cpu(cpu, &asym_freq_match_cpus)))
		return __cpu_util_freq_walt(cpu, walt_load);

	for_each_cpu(i, &asym_cap_sibling_cpus) {
		if (i == cpu)
	/* FIXME: Prime always last cpu */
	max_cap_cpu = cpumask_last(&asym_freq_match_cpus);
	util = __cpu_util_freq_walt(cpu, walt_load);
		else
			util_other = __cpu_util_freq_walt(i, &wl_other);
	}

	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
	if (cpu != max_cap_cpu) {
		if (cpumask_first(&asym_freq_match_cpus) == cpu)
			util_other =
				__cpu_util_freq_walt(max_cap_cpu, &wl_other);
		else
			goto out;
	} else {
		mpct = 100;
	}

	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);

@@ -669,6 +680,18 @@ cpu_util_freq_walt(int cpu, struct walt_cpu_load *walt_load)
						   mpct);
	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
						   mpct);
out:
	if (cpu != max_cap_cpu) {
		if (util > util_other) {
			util_other = util;
			wl_other.nl = walt_load->nl;
		}
		if (wl_other.pl < walt_load->pl)
			wl_other.pl = walt_load->pl;
	} else {
		util_other = 0;
		memset(&wl_other, 0, sizeof(wl_other));
	}

	return (util >= capacity) ? capacity : util;
}
@@ -2616,6 +2639,14 @@ void walt_update_cluster_topology(void)
				   &asym_cap_sibling_cpus, &cluster->cpus);
	}

	if (num_sched_clusters > 2) {
		for_each_sched_cluster(cluster) {
			if (!is_min_capacity_cluster(cluster))
				cpumask_or(&asym_freq_match_cpus,
					&asym_freq_match_cpus, &cluster->cpus);
		}
	}

	if (cpumask_weight(&asym_cap_sibling_cpus) == 1)
		cpumask_clear(&asym_cap_sibling_cpus);

@@ -3426,6 +3457,13 @@ void walt_irq_work(struct irq_work *irq_work)
	int level = 0;
	u64 cur_jiffies_ts;
	unsigned long flags;
	struct cpumask freq_match_cpus;

	if (sysctl_sched_asym_cap_sibling_freq_match_en &&
		!cpumask_empty(&asym_freq_match_cpus))
		cpumask_copy(&freq_match_cpus, &asym_freq_match_cpus);
	else
		cpumask_copy(&freq_match_cpus, &asym_cap_sibling_cpus);

	/* Am I the window rollover work or the migration work? */
	if (irq_work == &walt_migration_irq_work)
@@ -3457,7 +3495,7 @@ void walt_irq_work(struct irq_work *irq_work)
					rq->wrq.grp_time.prev_runnable_sum;
			}
			if (is_migration && rq->wrq.notif_pending &&
			    cpumask_test_cpu(cpu, &asym_cap_sibling_cpus)) {
				cpumask_test_cpu(cpu, &freq_match_cpus)) {
				is_asym_migration = true;
				rq->wrq.notif_pending = false;
			}
@@ -3472,11 +3510,11 @@ void walt_irq_work(struct irq_work *irq_work)
	}

	if (total_grp_load) {
		if (cpumask_weight(&asym_cap_sibling_cpus)) {
		if (cpumask_weight(&freq_match_cpus)) {
			u64 big_grp_load =
					  total_grp_load - min_cluster_grp_load;

			for_each_cpu(cpu, &asym_cap_sibling_cpus)
			for_each_cpu(cpu, &freq_match_cpus)
				cpu_cluster(cpu)->aggr_grp_load = big_grp_load;
		}
		rtgb_active = is_rtgb_active();
@@ -3508,7 +3546,7 @@ void walt_irq_work(struct irq_work *irq_work)
			}

			if (is_asym_migration && cpumask_test_cpu(cpu,
							&asym_cap_sibling_cpus))
							&freq_match_cpus))
				flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;

			if (i == num_cpus)
+9 −0
Original line number Diff line number Diff line
@@ -461,6 +461,15 @@ static struct ctl_table kern_table[] = {
		.extra1		= SYSCTL_ONE,
		.extra2		= &one_hundred,
	},
	{
		.procname	= "sched_asym_cap_sibling_freq_match_enable",
		.data		= &sysctl_sched_asym_cap_sibling_freq_match_en,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{
		.procname	= "sched_coloc_downmigrate_ns",
		.data		= &sysctl_sched_coloc_downmigrate_ns,