Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bf0f948 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: introduce per CPU sched_load_boost knob"

parents 56c06772 858d5751
Loading
Loading
Loading
Loading
+55 −0
Original line number Diff line number Diff line
@@ -208,6 +208,59 @@ static struct attribute_group cpu_isolated_attr_group = {

#endif

static ssize_t show_sched_load_boost(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	ssize_t rc;
	unsigned int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	boost = per_cpu(sched_load_boost, cpuid);
	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", boost);

	return rc;
}

static ssize_t __ref store_sched_load_boost(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	int err;
	int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	err = kstrtoint(strstrip((char *)buf), 0, &boost);
	if (err)
		return err;

	/*
	 * -100 is low enough to cancel out CPU's load and make it near zro.
	 * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
	 * can take without overflow.
	 */
	if (boost < -100 || boost > 1000)
		return -EINVAL;

	per_cpu(sched_load_boost, cpuid) = boost;

	return count;
}

static DEVICE_ATTR(sched_load_boost, 0644,
		   show_sched_load_boost,
		   store_sched_load_boost);

static struct attribute *sched_cpu_attrs[] = {
	&dev_attr_sched_load_boost.attr,
	NULL
};

static struct attribute_group sched_cpu_attr_group = {
	.attrs = sched_cpu_attrs,
};

static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
@@ -215,6 +268,7 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
	&sched_cpu_attr_group,
	NULL
};

@@ -225,6 +279,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
	&sched_cpu_attr_group,
	NULL
};

+2 −0
Original line number Diff line number Diff line
@@ -3964,4 +3964,6 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */

extern DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

#endif
+3 −3
Original line number Diff line number Diff line
@@ -3232,9 +3232,9 @@ unsigned int capacity_margin_freq = 1280; /* ~20% margin */

static inline
unsigned long sum_capacity_reqs(unsigned long cfs_cap,
				struct sched_capacity_reqs *scr)
				struct sched_capacity_reqs *scr, int cpu)
{
	unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
	unsigned long total = add_capacity_margin(cfs_cap + scr->rt, cpu);
	return total += scr->dl;
}

@@ -3246,7 +3246,7 @@ static void sched_freq_tick_pelt(int cpu)
	struct sched_capacity_reqs *scr;

	scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
	if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
	if (sum_capacity_reqs(cpu_utilization, scr, cpu) < capacity_curr)
		return;

	/*
+2 −1
Original line number Diff line number Diff line
@@ -89,6 +89,7 @@ unsigned int sysctl_sched_is_big_little = 1;
unsigned int sysctl_sched_sync_hint_enable = 1;
unsigned int sysctl_sched_initial_task_util = 0;
unsigned int sysctl_sched_cstate_aware = 1;
DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_use_walt_cpu_util = 1;
@@ -7043,7 +7044,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
			cpu_idle_idx = idle_get_state_idx(cpu_rq(i));

			if (!need_idle &&
			    add_capacity_margin(new_util_cum) <
			    add_capacity_margin(new_util_cum, i) <
			    capacity_curr_of(i)) {
				if (sysctl_sched_cstate_aware) {
					if (cpu_idle_idx < min_idle_idx) {
+1 −1
Original line number Diff line number Diff line
@@ -1811,7 +1811,7 @@ static int find_lowest_rq(struct task_struct *task)
					cpu_idle_idx =
					     idle_get_state_idx(cpu_rq(cpu));

				if (add_capacity_margin(new_util_cum) <
				if (add_capacity_margin(new_util_cum, cpu) <
				    capacity_curr_of(cpu)) {
					if (cpu_idle_idx < best_cpu_idle_idx ||
					    (best_cpu != task_cpu(task) &&
Loading