Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20acfe73 authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

sched: improve the scheduler



This change is for general scheduler improvement.

CRs-Fixed: 2040904
Change-Id: I6efa77cd260228a29a0105146fcdecc4b5ee176e
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent bab41885
Loading
Loading
Loading
Loading
+8 −6
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ extern u64 nr_running_integral(unsigned int cpu);

extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
extern unsigned int sched_get_cpu_util(int cpu);

extern void calc_global_load(unsigned long ticks);

@@ -1675,6 +1676,7 @@ struct task_struct {
	struct related_thread_group *grp;
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
#endif

#ifdef CONFIG_CGROUP_SCHED
@@ -2638,7 +2640,6 @@ extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
@@ -2671,11 +2672,6 @@ static inline unsigned long sched_get_busy(int cpu)
static inline void sched_get_cpus_busy(struct sched_load *busy,
				       const struct cpumask *query_cpus) {};

static inline int sched_set_boost(int enable)
{
	return -EINVAL;
}

static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
@@ -2701,6 +2697,7 @@ extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
					  u32 fmax);
extern int sched_set_boost(int enable);
#else
static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
@@ -2708,6 +2705,11 @@ register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
	return 0;
}
static inline void sched_set_io_is_busy(int val) {};

static inline int sched_set_boost(int enable)
{
	return -EINVAL;
}
#endif /* CONFIG_SCHED_WALT */

#ifndef CONFIG_SCHED_WALT
+1 −1
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@ extern unsigned int sysctl_sched_init_task_load_pct;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_boost;
#endif

#ifdef CONFIG_SCHED_HMP
@@ -55,7 +56,6 @@ extern unsigned int sysctl_sched_downmigrate_pct;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
+27 −17
Original line number Diff line number Diff line
@@ -523,6 +523,23 @@ TRACE_EVENT(sched_migration_update_sum,
		__entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
);

TRACE_EVENT(sched_set_boost,

	TP_PROTO(int type),

	TP_ARGS(type),

	TP_STRUCT__entry(
		__field(int, type			)
	),

	TP_fast_assign(
		__entry->type = type;
	),

	TP_printk("type %d", __entry->type)
);

#endif

#ifdef CONFIG_SCHED_WALT
@@ -639,23 +656,6 @@ DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
	TP_ARGS(rq, idle, irqload, power_cost, temp)
);

TRACE_EVENT(sched_set_boost,

	TP_PROTO(int type),

	TP_ARGS(type),

	TP_STRUCT__entry(
		__field(int, type			)
	),

	TP_fast_assign(
		__entry->type = type;
	),

	TP_printk("type %d", __entry->type)
);

TRACE_EVENT(sched_reset_all_window_stats,

	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
@@ -869,6 +869,16 @@ DECLARE_EVENT_CLASS(sched_task_util,
		__entry->comm, __entry->pid, __entry->task_cpu, __entry->task_util, __entry->nominated_cpu, __entry->target_cpu, __entry->ediff, __entry->need_idle, __entry->latency)
);

DEFINE_EVENT(sched_task_util, sched_task_util_bias_to_waker,
	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
);

DEFINE_EVENT(sched_task_util, sched_task_util_colocated,
	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
);

DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+1 −1
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@ obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o swait.o completion.o idle.o sched_avg.o
obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
obj-$(CONFIG_SCHED_WALT) += walt.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
+3 −1
Original line number Diff line number Diff line
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
 * GNU General Public License for more details.
 */

#include <linux/jiffies.h>
#include "sched.h"
#include <linux/of.h>
#include <linux/sched/core_ctl.h>
@@ -139,6 +140,7 @@ static void _sched_set_boost(int old_val, int type)
	case RESTRAINED_BOOST:
		freq_aggr_threshold_backup =
			update_freq_aggregate_threshold(1);
		mod_timer(&sched_grp_timer, jiffies + 1);
		break;

	default:
Loading