Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 303c7f06 authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar
Browse files

sched: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: Ie8a902708242f79adc38d5ab406e10d27e6524e9
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent 46e7ca64
Loading
Loading
Loading
Loading
+14 −98
Original line number Diff line number Diff line
@@ -91,9 +91,16 @@ extern __read_mostly bool sched_predl;
extern unsigned int sched_capacity_margin_up[NR_CPUS];
extern unsigned int sched_capacity_margin_down[NR_CPUS];

struct sched_walt_cpu_load {
	unsigned long prev_window_util;
	unsigned long nl;
	unsigned long pl;
	bool rtgb_active;
	u64 ws;
};

#ifdef CONFIG_SCHED_WALT
extern unsigned int sched_ravg_window;
extern unsigned int walt_cpu_util_freq_divisor;

struct walt_sched_stats {
	int nr_big_tasks;
@@ -2040,7 +2047,10 @@ static inline int hrtick_enabled(struct rq *rq)

#ifdef CONFIG_SCHED_WALT
u64 sched_ktime_clock(void);
unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load);
#else
#define sched_ravg_window TICK_NSEC
static inline u64 sched_ktime_clock(void)
{
	return 0;
@@ -2142,14 +2152,6 @@ static inline unsigned long cpu_util(int cpu)
	return min_t(unsigned long, util, capacity_orig_of(cpu));
}

struct sched_walt_cpu_load {
	unsigned long prev_window_util;
	unsigned long nl;
	unsigned long pl;
	bool rtgb_active;
	u64 ws;
};

static inline unsigned long cpu_util_cum(int cpu, int delta)
{
	u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
@@ -2170,103 +2172,17 @@ extern unsigned long boosted_cpu_util(int cpu, unsigned long other_util,
				      struct sched_walt_cpu_load *walt_load);
#endif

#ifdef CONFIG_SCHED_WALT
u64 freq_policy_load(struct rq *rq);

extern u64 walt_load_reported_window;
extern bool rtgb_active;

static inline unsigned long
__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	u64 util, util_unboosted;
	struct rq *rq = cpu_rq(cpu);
	unsigned long capacity = capacity_orig_of(cpu);
	int boost;

	boost = per_cpu(sched_load_boost, cpu);
	util_unboosted = util = freq_policy_load(rq);
	util = div64_u64(util * (100 + boost),
			walt_cpu_util_freq_divisor);

	if (walt_load) {
		u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
				rq->grp_time.nt_prev_runnable_sum;
		u64 pl = rq->walt_stats.pred_demands_sum_scaled;

		/* do_pl_notif() needs unboosted signals */
		rq->old_busy_time = div64_u64(util_unboosted,
						sched_ravg_window >>
						SCHED_CAPACITY_SHIFT);
		rq->old_estimated_time = pl;

		nl = div64_u64(nl * (100 + boost),
		walt_cpu_util_freq_divisor);
		pl = div64_u64(pl * (100 + boost), 100);

		walt_load->prev_window_util = util;
		walt_load->nl = nl;
		walt_load->pl = pl;
		walt_load->ws = walt_load_reported_window;
		walt_load->rtgb_active = rtgb_active;
	}

	return (util >= capacity) ? capacity : util;
}

#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x)	\
			(max(orig, mult_frac(other, x, 100)))

static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	struct sched_walt_cpu_load wl_other = {0};
	unsigned long util = 0, util_other = 0;
	unsigned long capacity = capacity_orig_of(cpu);
	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;

	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
		return __cpu_util_freq_walt(cpu, walt_load);

	for_each_cpu(i, &asym_cap_sibling_cpus) {
		if (i == cpu)
			util = __cpu_util_freq_walt(cpu, walt_load);
		else
			util_other = __cpu_util_freq_walt(i, &wl_other);
	}

	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
		mpct = 100;

	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
	walt_load->prev_window_util = util;

	walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
						   mpct);
	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
						   mpct);

	return (util >= capacity) ? capacity : util;
}

static inline unsigned long
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
{
	return cpu_util_freq_walt(cpu, walt_load);
}

#ifdef CONFIG_SCHED_WALT
	return cpu_util_freq_walt(cpu, walt_load);
#else

static inline unsigned long
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
{
	return cpu_util(cpu);
#endif
}

#define sched_ravg_window TICK_NSEC

#endif /* CONFIG_SCHED_WALT */

extern unsigned int capacity_margin_freq;

static inline unsigned long
+77 −5
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
static bool use_cycle_counter;
DEFINE_MUTEX(cluster_lock);
static atomic64_t walt_irq_work_lastq_ws;
u64 walt_load_reported_window;
static u64 walt_load_reported_window;

static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;
@@ -129,7 +129,7 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
 * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
 * boost.
 */
__read_mostly unsigned int walt_cpu_util_freq_divisor;
static __read_mostly unsigned int walt_cpu_util_freq_divisor;

/* Initial task load. Newly created tasks are assigned this load. */
unsigned int __read_mostly sched_init_task_load_windows;
@@ -476,7 +476,7 @@ static u32 top_task_load(struct rq *rq)
	}
}

u64 freq_policy_load(struct rq *rq)
static inline u64 freq_policy_load(struct rq *rq)
{
	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
	struct sched_cluster *cluster = rq->cluster;
@@ -513,6 +513,80 @@ u64 freq_policy_load(struct rq *rq)
	return load;
}

static bool rtgb_active;

static inline unsigned long
__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	u64 util, util_unboosted;
	struct rq *rq = cpu_rq(cpu);
	unsigned long capacity = capacity_orig_of(cpu);
	int boost;

	boost = per_cpu(sched_load_boost, cpu);
	util_unboosted = util = freq_policy_load(rq);
	util = div64_u64(util * (100 + boost),
			walt_cpu_util_freq_divisor);

	if (walt_load) {
		u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
				rq->grp_time.nt_prev_runnable_sum;
		u64 pl = rq->walt_stats.pred_demands_sum_scaled;

		/* do_pl_notif() needs unboosted signals */
		rq->old_busy_time = div64_u64(util_unboosted,
						sched_ravg_window >>
						SCHED_CAPACITY_SHIFT);
		rq->old_estimated_time = pl;

		nl = div64_u64(nl * (100 + boost), walt_cpu_util_freq_divisor);
		pl = div64_u64(pl * (100 + boost), 100);

		walt_load->prev_window_util = util;
		walt_load->nl = nl;
		walt_load->pl = pl;
		walt_load->ws = walt_load_reported_window;
		walt_load->rtgb_active = rtgb_active;
	}

	return (util >= capacity) ? capacity : util;
}

#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x)	\
			(max(orig, mult_frac(other, x, 100)))

unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
	struct sched_walt_cpu_load wl_other = {0};
	unsigned long util = 0, util_other = 0;
	unsigned long capacity = capacity_orig_of(cpu);
	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;

	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
		return __cpu_util_freq_walt(cpu, walt_load);

	for_each_cpu(i, &asym_cap_sibling_cpus) {
		if (i == cpu)
			util = __cpu_util_freq_walt(cpu, walt_load);
		else
			util_other = __cpu_util_freq_walt(i, &wl_other);
	}

	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
		mpct = 100;

	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
	walt_load->prev_window_util = util;

	walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
						   mpct);
	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
						   mpct);

	return (util >= capacity) ? capacity : util;
}

/*
 * In this function we match the accumulated subtractions with the current
 * and previous windows we are operating with. Ignore any entries where
@@ -3051,8 +3125,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}

bool rtgb_active;

static bool is_rtgb_active(void)
{
	struct related_thread_group *grp;