Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bada57af authored by Olav Haugan's avatar Olav Haugan
Browse files

sched: Add tunables for static cpu and cluster cost



Add per-cpu tunable to set the extra cost to use a CPU that is idle.
Add the same for a cluster.

Change-Id: I4aa53f3c42c963df7abc7480980f747f0413d389
Signed-off-by: default avatarOlav Haugan <ohaugan@codeaurora.org>
parent 32e5f45f
Loading
Loading
Loading
Loading
+104 −5
Original line number Diff line number Diff line
@@ -179,6 +179,99 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_SCHED_HMP

static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);

	return rc;
}

static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int err;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
	if (err)
		return err;

	err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);

	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);

	return rc;
}

static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int err;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
	if (err)
		return err;

	err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);

	if (err >= 0)
		err = count;

	return err;
}

static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
					show_sched_static_cpu_pwr_cost,
					store_sched_static_cpu_pwr_cost);
static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
					show_sched_static_cluster_pwr_cost,
					store_sched_static_cluster_pwr_cost);

static struct attribute *hmp_sched_cpu_attrs[] = {
	&dev_attr_sched_static_cpu_pwr_cost.attr,
	&dev_attr_sched_static_cluster_pwr_cost.attr,
	NULL
};

static struct attribute_group sched_hmp_cpu_attr_group = {
	.attrs = hmp_sched_cpu_attrs,
};

#endif /* CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_QHMP
static ssize_t show_sched_mostly_idle_load(struct device *dev,
		 struct device_attribute *attr, char *buf)
@@ -333,7 +426,7 @@ static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664,
static DEVICE_ATTR(sched_prefer_idle, 0664,
		show_sched_prefer_idle, store_sched_prefer_idle);

static struct attribute *hmp_sched_cpu_attrs[] = {
static struct attribute *qhmp_sched_cpu_attrs[] = {
	&dev_attr_sched_mostly_idle_load.attr,
	&dev_attr_sched_mostly_idle_nr_run.attr,
	&dev_attr_sched_mostly_idle_freq.attr,
@@ -341,8 +434,8 @@ static struct attribute *hmp_sched_cpu_attrs[] = {
	NULL
};

static struct attribute_group sched_hmp_cpu_attr_group = {
	.attrs = hmp_sched_cpu_attrs,
static struct attribute_group sched_qhmp_cpu_attr_group = {
	.attrs = qhmp_sched_cpu_attrs,
};

#endif	/* CONFIG_SCHED_QHMP */
@@ -350,8 +443,11 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_QHMP
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_QHMP
	&sched_qhmp_cpu_attr_group,
#endif
	NULL
};
@@ -360,8 +456,11 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_QHMP
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_QHMP
	&sched_qhmp_cpu_attr_group,
#endif
	NULL
};
+4 −0
Original line number Diff line number Diff line
@@ -2195,6 +2195,10 @@ extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
#ifdef CONFIG_SCHED_QHMP
extern int sched_set_cpu_prefer_idle(int cpu, int prefer_idle);
extern int sched_get_cpu_prefer_idle(int cpu);
+4 −2
Original line number Diff line number Diff line
@@ -177,6 +177,7 @@ TRACE_EVENT(sched_cpu_load,
		__field(unsigned int, max_freq			)
		__field(unsigned int, power_cost		)
		__field(	 int, cstate			)
		__field(	 int, dstate			)
		__field(	 int, temp			)
	),

@@ -195,16 +196,17 @@ TRACE_EVENT(sched_cpu_load,
		__entry->max_freq		= rq->max_freq;
		__entry->power_cost		= power_cost;
		__entry->cstate			= rq->cstate;
		__entry->dstate			= rq->dstate;
		__entry->temp			= temp;
	),

	TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u nr_small %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u nr_small %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d dstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
	__entry->nr_big_tasks, __entry->nr_small_tasks,
	__entry->load_scale_factor, __entry->capacity,
	__entry->cumulative_runnable_avg, __entry->irqload,
	__entry->cur_freq, __entry->max_freq,
	__entry->power_cost, __entry->cstate, __entry->temp)
	__entry->power_cost, __entry->cstate, __entry->dstate, __entry->temp)
);

TRACE_EVENT(sched_set_boost,
+5 −2
Original line number Diff line number Diff line
@@ -174,6 +174,7 @@ TRACE_EVENT(sched_cpu_load,
		__field(unsigned int, max_freq			)
		__field(unsigned int, power_cost		)
		__field(	 int, cstate			)
		__field(	 int, dstate			)
		__field(	 int, temp			)
	),

@@ -190,14 +191,16 @@ TRACE_EVENT(sched_cpu_load,
		__entry->max_freq		= rq->max_freq;
		__entry->power_cost		= power_cost;
		__entry->cstate			= rq->cstate;
		__entry->dstate			= rq->dstate;
		__entry->temp			= temp;
	),

	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d dstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
	__entry->load_scale_factor, __entry->capacity,
	__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
	__entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
	__entry->max_freq, __entry->power_cost, __entry->cstate,
	__entry->dstate, __entry->temp)
);

TRACE_EVENT(sched_set_boost,
+29 −0
Original line number Diff line number Diff line
@@ -1171,6 +1171,32 @@ static inline void clear_hmp_request(int cpu)
	}
}

int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
{
	struct rq *rq = cpu_rq(cpu);

	rq->static_cpu_pwr_cost = cost;
	return 0;
}

unsigned int sched_get_static_cpu_pwr_cost(int cpu)
{
	return cpu_rq(cpu)->static_cpu_pwr_cost;
}

int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
{
	struct rq *rq = cpu_rq(cpu);

	rq->static_cluster_pwr_cost = cost;
	return 0;
}

unsigned int sched_get_static_cluster_pwr_cost(int cpu)
{
	return cpu_rq(cpu)->static_cluster_pwr_cost;
}

#else

static inline int got_boost_kick(void)
@@ -9033,6 +9059,9 @@ void __init sched_init(void)
		rq->cur_irqload = 0;
		rq->avg_irqload = 0;
		rq->irqload_ts = 0;
		rq->static_cpu_pwr_cost = 0;
		rq->static_cluster_pwr_cost = 0;

#ifdef CONFIG_SCHED_FREQ_INPUT
		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
		rq->old_busy_time = 0;
Loading