Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 367e4c9c authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

sched: Update the wakeup placement logic for fair and rt tasks



For the fair sched class, update the select_best_cpu() policy to do
power based placement. The hope is to minimize the voltage at which
the CPU runs.

While RT tasks already do power based placement, their placement
preference has to now take into account the power cost of all tasks
on a given CPU. Also remove the check for sched_boost since
sched_boost no longer intends to elevate all tasks to the highest
capacity cluster.

Change-Id: Ic6a7625c97d567254d93b94cec3174a91727cb87
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent 824efcd5
Loading
Loading
Loading
Loading
+0 −173
Original line number Diff line number Diff line
@@ -179,179 +179,9 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_SCHED_HMP
static ssize_t show_sched_mostly_idle_load(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int mostly_idle_pct;

	cpunum = cpu->dev.id;

	mostly_idle_pct = sched_get_cpu_mostly_idle_load(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_pct);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_load(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int mostly_idle_load, err;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_load);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_load(cpuid, mostly_idle_load);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_mostly_idle_freq(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	unsigned int mostly_idle_freq;

	cpunum = cpu->dev.id;

	mostly_idle_freq = sched_get_cpu_mostly_idle_freq(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_freq);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_freq(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id, err;
	unsigned int mostly_idle_freq;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_freq);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_freq(cpuid, mostly_idle_freq);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_mostly_idle_nr_run(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int mostly_idle_nr_run;

	cpunum = cpu->dev.id;

	mostly_idle_nr_run = sched_get_cpu_mostly_idle_nr_run(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_nr_run);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_nr_run(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int mostly_idle_nr_run, err;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_nr_run);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_nr_run(cpuid, mostly_idle_nr_run);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_prefer_idle(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int prefer_idle;

	cpunum = cpu->dev.id;

	prefer_idle = sched_get_cpu_prefer_idle(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", prefer_idle);

	return rc;
}

static ssize_t __ref store_sched_prefer_idle(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int prefer_idle, err;

	err = kstrtoint(strstrip((char *)buf), 0, &prefer_idle);
	if (err)
		return err;

	err = sched_set_cpu_prefer_idle(cpuid, prefer_idle);
	if (err >= 0)
		err = count;

	return err;
}

static DEVICE_ATTR(sched_mostly_idle_freq, 0664, show_sched_mostly_idle_freq,
						store_sched_mostly_idle_freq);
static DEVICE_ATTR(sched_mostly_idle_load, 0664, show_sched_mostly_idle_load,
						store_sched_mostly_idle_load);
static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664,
		show_sched_mostly_idle_nr_run, store_sched_mostly_idle_nr_run);
static DEVICE_ATTR(sched_prefer_idle, 0664,
		show_sched_prefer_idle, store_sched_prefer_idle);

static struct attribute *hmp_sched_cpu_attrs[] = {
	&dev_attr_sched_mostly_idle_load.attr,
	&dev_attr_sched_mostly_idle_nr_run.attr,
	&dev_attr_sched_mostly_idle_freq.attr,
	&dev_attr_sched_prefer_idle.attr,
	NULL
};

static struct attribute_group sched_hmp_cpu_attr_group = {
	.attrs = hmp_sched_cpu_attrs,
};

#endif	/* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
	NULL
};
@@ -359,9 +189,6 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
	NULL
};
+0 −9
Original line number Diff line number Diff line
@@ -2172,15 +2172,6 @@ extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_cpu_prefer_idle(int cpu, int prefer_idle);
extern int sched_get_cpu_prefer_idle(int cpu);
extern int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct);
extern int sched_get_cpu_mostly_idle_load(int cpu);
extern int sched_set_cpu_mostly_idle_nr_run(int cpu, int nr_run);
extern int sched_get_cpu_mostly_idle_nr_run(int cpu);
extern int
sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq);
extern unsigned int sched_get_cpu_mostly_idle_freq(int cpu);

#else
static inline int sched_set_boost(int enable)
+11 −15
Original line number Diff line number Diff line
@@ -116,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task,
TRACE_EVENT(sched_task_load,

	TP_PROTO(struct task_struct *p, int boost, int reason,
		 int sync, int prefer_idle),
		 int sync, int need_idle),

	TP_ARGS(p, boost, reason, sync, prefer_idle),
	TP_ARGS(p, boost, reason, sync, need_idle),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
@@ -130,7 +130,7 @@ TRACE_EVENT(sched_task_load,
		__field(	int,	boost			)
		__field(	int,	reason			)
		__field(	int,	sync			)
		__field(	int,	prefer_idle		)
		__field(	int,	need_idle		)
	),

	TP_fast_assign(
@@ -143,27 +143,24 @@ TRACE_EVENT(sched_task_load,
		__entry->boost		= boost;
		__entry->reason		= reason;
		__entry->sync		= sync;
		__entry->prefer_idle	= prefer_idle;
		__entry->need_idle	= need_idle;
	),

	TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d prefer_idle=%d",
	TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d, need_idle=%d",
		__entry->pid, __entry->comm, __entry->sum,
		__entry->sum_scaled, __entry->period, __entry->demand,
		__entry->boost, __entry->reason, __entry->sync,
		__entry->prefer_idle)
		__entry->boost, __entry->reason, __entry->sync, __entry->need_idle)
);

TRACE_EVENT(sched_cpu_load,

	TP_PROTO(struct rq *rq, int idle, int mostly_idle, u64 irqload,
		 unsigned int power_cost, int temp),
	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),

	TP_ARGS(rq, idle, mostly_idle, irqload, power_cost, temp),
	TP_ARGS(rq, idle, irqload, power_cost, temp),

	TP_STRUCT__entry(
		__field(unsigned int, cpu			)
		__field(unsigned int, idle			)
		__field(unsigned int, mostly_idle		)
		__field(unsigned int, nr_running		)
		__field(unsigned int, nr_big_tasks		)
		__field(unsigned int, load_scale_factor		)
@@ -180,7 +177,6 @@ TRACE_EVENT(sched_cpu_load,
	TP_fast_assign(
		__entry->cpu			= rq->cpu;
		__entry->idle			= idle;
		__entry->mostly_idle		= mostly_idle;
		__entry->nr_running		= rq->nr_running;
		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
		__entry->load_scale_factor	= rq->load_scale_factor;
@@ -194,9 +190,9 @@ TRACE_EVENT(sched_cpu_load,
		__entry->temp			= temp;
	),

	TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
	__entry->nr_big_tasks, __entry->load_scale_factor, __entry->capacity,
	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
	__entry->load_scale_factor, __entry->capacity,
	__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
	__entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
);
+1 −11
Original line number Diff line number Diff line
@@ -2068,7 +2068,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
	u64 start_ts = sched_clock();
	int reason = WINDOW_CHANGE;
	unsigned int old = 0, new = 0;
	unsigned int old_window_size = sched_ravg_window;

	disable_window_stats();

@@ -2091,13 +2090,8 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
	for_each_possible_cpu(cpu) {
		struct rq *rq = cpu_rq(cpu);

		if (window_start) {
			u32 mostly_idle_load = rq->mostly_idle_load;

		if (window_start)
			rq->window_start = window_start;
			rq->mostly_idle_load = div64_u64((u64)mostly_idle_load *
				 (u64)sched_ravg_window, (u64)old_window_size);
		}
#ifdef CONFIG_SCHED_FREQ_INPUT
		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
#endif
@@ -8935,13 +8929,9 @@ void __init sched_init(void)
		rq->window_start = 0;
		rq->hmp_stats.nr_big_tasks = 0;
		rq->hmp_flags = 0;
		rq->mostly_idle_load = pct_to_real(20);
		rq->mostly_idle_nr_run = 3;
		rq->mostly_idle_freq = 0;
		rq->cur_irqload = 0;
		rq->avg_irqload = 0;
		rq->irqload_ts = 0;
		rq->prefer_idle = 1;
#ifdef CONFIG_SCHED_FREQ_INPUT
		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
		rq->old_busy_time = 0;
+0 −2
Original line number Diff line number Diff line
@@ -316,8 +316,6 @@ do { \
	P(cpu_capacity);
#endif
#ifdef CONFIG_SCHED_HMP
	P(mostly_idle_load);
	P(mostly_idle_nr_run);
	P(load_scale_factor);
	P(capacity);
	P(max_possible_capacity);
Loading