Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fefab9c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: add frequency zone awareness to the load balancer"

parents c1206fe4 c083fe17
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@
	interrupt-parent = <&intc>;

	chosen {
		bootargs = "sched_enable_hmp=1";
		bootargs = "sched_enable_hmp=1 sched_enable_power_aware=1";
	};

	aliases {
+0 −173
Original line number Diff line number Diff line
@@ -179,179 +179,9 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_SCHED_HMP
static ssize_t show_sched_mostly_idle_load(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int mostly_idle_pct;

	cpunum = cpu->dev.id;

	mostly_idle_pct = sched_get_cpu_mostly_idle_load(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_pct);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_load(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int mostly_idle_load, err;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_load);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_load(cpuid, mostly_idle_load);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_mostly_idle_freq(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	unsigned int mostly_idle_freq;

	cpunum = cpu->dev.id;

	mostly_idle_freq = sched_get_cpu_mostly_idle_freq(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_freq);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_freq(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id, err;
	unsigned int mostly_idle_freq;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_freq);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_freq(cpuid, mostly_idle_freq);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_mostly_idle_nr_run(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int mostly_idle_nr_run;

	cpunum = cpu->dev.id;

	mostly_idle_nr_run = sched_get_cpu_mostly_idle_nr_run(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_nr_run);

	return rc;
}

static ssize_t __ref store_sched_mostly_idle_nr_run(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int mostly_idle_nr_run, err;

	err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_nr_run);
	if (err)
		return err;

	err = sched_set_cpu_mostly_idle_nr_run(cpuid, mostly_idle_nr_run);
	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_prefer_idle(struct device *dev,
		 struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpunum;
	int prefer_idle;

	cpunum = cpu->dev.id;

	prefer_idle = sched_get_cpu_prefer_idle(cpunum);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", prefer_idle);

	return rc;
}

static ssize_t __ref store_sched_prefer_idle(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int prefer_idle, err;

	err = kstrtoint(strstrip((char *)buf), 0, &prefer_idle);
	if (err)
		return err;

	err = sched_set_cpu_prefer_idle(cpuid, prefer_idle);
	if (err >= 0)
		err = count;

	return err;
}

static DEVICE_ATTR(sched_mostly_idle_freq, 0664, show_sched_mostly_idle_freq,
						store_sched_mostly_idle_freq);
static DEVICE_ATTR(sched_mostly_idle_load, 0664, show_sched_mostly_idle_load,
						store_sched_mostly_idle_load);
static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664,
		show_sched_mostly_idle_nr_run, store_sched_mostly_idle_nr_run);
static DEVICE_ATTR(sched_prefer_idle, 0664,
		show_sched_prefer_idle, store_sched_prefer_idle);

static struct attribute *hmp_sched_cpu_attrs[] = {
	&dev_attr_sched_mostly_idle_load.attr,
	&dev_attr_sched_mostly_idle_nr_run.attr,
	&dev_attr_sched_mostly_idle_freq.attr,
	&dev_attr_sched_prefer_idle.attr,
	NULL
};

static struct attribute_group sched_hmp_cpu_attr_group = {
	.attrs = hmp_sched_cpu_attrs,
};

#endif	/* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
	NULL
};
@@ -359,9 +189,6 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
	NULL
};
+0 −9
Original line number Diff line number Diff line
@@ -2172,15 +2172,6 @@ extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_cpu_prefer_idle(int cpu, int prefer_idle);
extern int sched_get_cpu_prefer_idle(int cpu);
extern int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct);
extern int sched_get_cpu_mostly_idle_load(int cpu);
extern int sched_set_cpu_mostly_idle_nr_run(int cpu, int nr_run);
extern int sched_get_cpu_mostly_idle_nr_run(int cpu);
extern int
sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq);
extern unsigned int sched_get_cpu_mostly_idle_freq(int cpu);

#else
static inline int sched_set_boost(int enable)
+2 −1
Original line number Diff line number Diff line
@@ -62,11 +62,12 @@ extern int sysctl_sched_freq_dec_notify;
#ifdef CONFIG_SCHED_HMP
extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_small_task_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
extern unsigned int sysctl_sched_downmigrate_pct;
extern int sysctl_sched_upmigrate_min_nice;
extern unsigned int sysctl_sched_powerband_limit_pct;
extern unsigned int sysctl_sched_lowspill_freq;
extern unsigned int sysctl_sched_pack_freq;
extern unsigned int sysctl_sched_boost;

#else /* CONFIG_SCHED_HMP */
+15 −28
Original line number Diff line number Diff line
@@ -115,10 +115,10 @@ TRACE_EVENT(sched_enq_deq_task,

TRACE_EVENT(sched_task_load,

	TP_PROTO(struct task_struct *p, int small_task, int boost, int reason,
		 int sync, int prefer_idle),
	TP_PROTO(struct task_struct *p, int boost, int reason,
		 int sync, int need_idle),

	TP_ARGS(p, small_task, boost, reason, sync, prefer_idle),
	TP_ARGS(p, boost, reason, sync, need_idle),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
@@ -127,11 +127,10 @@ TRACE_EVENT(sched_task_load,
		__field(unsigned int,	sum_scaled		)
		__field(unsigned int,	period			)
		__field(unsigned int,	demand			)
		__field(	int,	small_task		)
		__field(	int,	boost			)
		__field(	int,	reason			)
		__field(	int,	sync			)
		__field(	int,	prefer_idle		)
		__field(	int,	need_idle		)
	),

	TP_fast_assign(
@@ -141,34 +140,29 @@ TRACE_EVENT(sched_task_load,
		__entry->sum_scaled	= p->se.avg.runnable_avg_sum_scaled;
		__entry->period		= p->se.avg.runnable_avg_period;
		__entry->demand		= p->ravg.demand;
		__entry->small_task	= small_task;
		__entry->boost		= boost;
		__entry->reason		= reason;
		__entry->sync		= sync;
		__entry->prefer_idle	= prefer_idle;
		__entry->need_idle	= need_idle;
	),

	TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u small=%d boost=%d reason=%d sync=%d prefer_idle=%d",
	TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d, need_idle=%d",
		__entry->pid, __entry->comm, __entry->sum,
		__entry->sum_scaled, __entry->period, __entry->demand,
		__entry->small_task, __entry->boost, __entry->reason,
		__entry->sync, __entry->prefer_idle)
		__entry->boost, __entry->reason, __entry->sync, __entry->need_idle)
);

TRACE_EVENT(sched_cpu_load,

	TP_PROTO(struct rq *rq, int idle, int mostly_idle, u64 irqload,
		 unsigned int power_cost, int temp),
	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),

	TP_ARGS(rq, idle, mostly_idle, irqload, power_cost, temp),
	TP_ARGS(rq, idle, irqload, power_cost, temp),

	TP_STRUCT__entry(
		__field(unsigned int, cpu			)
		__field(unsigned int, idle			)
		__field(unsigned int, mostly_idle		)
		__field(unsigned int, nr_running		)
		__field(unsigned int, nr_big_tasks		)
		__field(unsigned int, nr_small_tasks		)
		__field(unsigned int, load_scale_factor		)
		__field(unsigned int, capacity			)
		__field(	 u64, cumulative_runnable_avg	)
@@ -183,10 +177,8 @@ TRACE_EVENT(sched_cpu_load,
	TP_fast_assign(
		__entry->cpu			= rq->cpu;
		__entry->idle			= idle;
		__entry->mostly_idle		= mostly_idle;
		__entry->nr_running		= rq->nr_running;
		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
		__entry->nr_small_tasks		= rq->hmp_stats.nr_small_tasks;
		__entry->load_scale_factor	= rq->load_scale_factor;
		__entry->capacity		= rq->capacity;
		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
@@ -198,13 +190,11 @@ TRACE_EVENT(sched_cpu_load,
		__entry->temp			= temp;
	),

	TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u nr_small %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
	__entry->nr_big_tasks, __entry->nr_small_tasks,
	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
	__entry->load_scale_factor, __entry->capacity,
	__entry->cumulative_runnable_avg, __entry->irqload,
	__entry->cur_freq, __entry->max_freq,
	__entry->power_cost, __entry->cstate, __entry->temp)
	__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
	__entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
);

TRACE_EVENT(sched_set_boost,
@@ -310,7 +300,6 @@ TRACE_EVENT(sched_update_history,
		__field(unsigned int,	demand			)
		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
		__field(unsigned int,	nr_big_tasks		)
		__field(unsigned int,	nr_small_tasks		)
		__field(	 int,	cpu			)
	),

@@ -324,18 +313,16 @@ TRACE_EVENT(sched_update_history,
		memcpy(__entry->hist, p->ravg.sum_history,
					RAVG_HIST_SIZE_MAX * sizeof(u32));
		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
		__entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks;
		__entry->cpu            = rq->cpu;
	),

	TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u nr_small %u",
	TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u",
		__entry->pid, __entry->comm,
		__entry->runtime, __entry->samples,
		task_event_names[__entry->evt],
		__entry->demand, __entry->hist[0],
		__entry->hist[1], __entry->hist[2], __entry->hist[3],
		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks,
		__entry->nr_small_tasks)
		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
);

TRACE_EVENT(sched_reset_all_window_stats,
Loading