Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cd35a9bc authored by Vikram Mulukutla's avatar Vikram Mulukutla Committed by Satya Durga Srinivasu Prabhala
Browse files

sched: sched_avg: Add snapshot of sched_avg



This snapshot is taken from msm-4.9 as of commit 12a64233c6b30a7
("sched: Fix for 32bit '__udivdi3' build error").

Change-Id: I2bac563ddd0985f0bc85a8d7fc6f3c9694c84264
Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>
[satyap@codeaurora.org:
 1. Resolve merge conflicts
 2. Fix compilation issues for ARCH=um
 3. Comment out parts of code for compilation]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent bb5b6b3e
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -23,9 +23,21 @@ extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);

#ifdef CONFIG_SMP
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
					unsigned int *max_nr,
					unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
extern u64 sched_get_cpu_last_busy_time(int cpu);
#else
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
{
}
static inline void sched_get_nr_running_avg(int *avg, int *iowait_avg,
				int *big_avg, unsigned int *max_nr,
				unsigned int *big_max_nr)
{
}
static inline unsigned int sched_get_cpu_util(int cpu)
{
	return 0;
+31 −0
Original line number Diff line number Diff line
@@ -1005,6 +1005,37 @@ TRACE_EVENT(sched_find_best_target,
		__entry->target)
);

/*
 * Tracepoint for sched_get_nr_running_avg
 */
TRACE_EVENT(sched_get_nr_running_avg,

	TP_PROTO(int avg, int big_avg, int iowait_avg,
		unsigned int max_nr, unsigned int big_max_nr),

	TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),

	TP_STRUCT__entry(
		__field( int,   avg                     )
		__field( int,   big_avg                 )
		__field( int,   iowait_avg              )
		__field( unsigned int,  max_nr          )
		__field( unsigned int,  big_max_nr      )
	),

	TP_fast_assign(
		__entry->avg            = avg;
		__entry->big_avg        = big_avg;
		__entry->iowait_avg     = iowait_avg;
		__entry->max_nr         = max_nr;
		__entry->big_max_nr     = big_max_nr;
	),

	TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
		__entry->avg, __entry->big_avg, __entry->iowait_avg,
		__entry->max_nr, __entry->big_max_nr)
);

#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */

+1 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o idle.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o sched_avg.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += energy.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+2 −0
Original line number Diff line number Diff line
@@ -1742,6 +1742,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
{
	unsigned prev_nr = rq->nr_running;

	sched_update_nr_prod(cpu_of(rq), count, true);
	rq->nr_running = prev_nr + count;

	if (prev_nr < 2 && rq->nr_running >= 2) {
@@ -1756,6 +1757,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)

static inline void sub_nr_running(struct rq *rq, unsigned count)
{
	sched_update_nr_prod(cpu_of(rq), count, false);
	rq->nr_running -= count;
	/* Check if we still need preemption */
	sched_update_tick_dependency(rq);
+216 −0
Original line number Diff line number Diff line
/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

/*
 * Scheduler hook for average runqueue determination
 */
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/sched.h>
#include <linux/math64.h>

#include "sched.h"
#include "walt.h"
#include <trace/events/sched.h>

static DEFINE_PER_CPU(u64, nr_prod_sum);
static DEFINE_PER_CPU(u64, last_time);
static DEFINE_PER_CPU(u64, nr_big_prod_sum);
static DEFINE_PER_CPU(u64, nr);
static DEFINE_PER_CPU(u64, nr_max);

static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;

static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);

#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
 * sched_get_nr_running_avg
 * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
 *	    Returns the avg * 100 to return up to two decimal points
 *	    of accuracy.
 *
 * Obtains the average nr_running value since the last poll.
 * This function may not be called concurrently with itself
 */
void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
			      unsigned int *max_nr, unsigned int *big_max_nr)
{
	int cpu;
	u64 curr_time = sched_clock();
	u64 diff = curr_time - last_get_time;
	u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0;

	*avg = 0;
	*iowait_avg = 0;
	*big_avg = 0;
	*max_nr = 0;
	*big_max_nr = 0;

	if (!diff)
		return;

	/* read and reset nr_running counts */
	for_each_possible_cpu(cpu) {
		unsigned long flags;

		spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
		curr_time = sched_clock();
		diff = curr_time - per_cpu(last_time, cpu);
		BUG_ON((s64)diff < 0);

		tmp_avg += per_cpu(nr_prod_sum, cpu);
		tmp_avg += per_cpu(nr, cpu) * diff;

		tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
		tmp_big_avg += nr_eligible_big_tasks(cpu) * diff;

		tmp_iowait += per_cpu(iowait_prod_sum, cpu);
		tmp_iowait +=  nr_iowait_cpu(cpu) * diff;

		per_cpu(last_time, cpu) = curr_time;

		per_cpu(nr_prod_sum, cpu) = 0;
		per_cpu(nr_big_prod_sum, cpu) = 0;
		per_cpu(iowait_prod_sum, cpu) = 0;

		if (*max_nr < per_cpu(nr_max, cpu))
			*max_nr = per_cpu(nr_max, cpu);

		if (is_max_capacity_cpu(cpu)) {
			if (*big_max_nr < per_cpu(nr_max, cpu))
				*big_max_nr = per_cpu(nr_max, cpu);
		}

		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
	}

	diff = curr_time - last_get_time;
	last_get_time = curr_time;

	/*
	 * Any task running on BIG cluster and BIG tasks running on little
	 * cluster contributes to big_avg. Small or medium tasks can also
	 * run on BIG cluster when co-location and scheduler boost features
	 * are activated. We don't want these tasks to downmigrate to little
	 * cluster when BIG CPUs are available but isolated. Round up the
	 * average values so that core_ctl aggressively unisolate BIG CPUs.
	 */
	*avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
	*big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
	*iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);

	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
				       *max_nr, *big_max_nr);

	BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
	pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
				 __func__, *avg, *big_avg, *iowait_avg);
}
EXPORT_SYMBOL(sched_get_nr_running_avg);

#define BUSY_NR_RUN		3
#define BUSY_LOAD_FACTOR	10
static inline void update_last_busy_time(int cpu, bool dequeue,
				unsigned long prev_nr_run, u64 curr_time)
{
	bool nr_run_trigger = false, load_trigger = false;

	if (!hmp_capable() || is_min_capacity_cpu(cpu))
		return;

	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
		nr_run_trigger = true;

	if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
			capacity_orig_of(cpu))
		load_trigger = true;

	if (nr_run_trigger || load_trigger)
		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
}

/**
 * sched_update_nr_prod
 * @cpu: The core id of the nr running driver.
 * @delta: Adjust nr by 'delta' amount
 * @inc: Whether we are increasing or decreasing the count
 * @return: N/A
 *
 * Update average with latest nr_running value for CPU
 */
void sched_update_nr_prod(int cpu, long delta, bool inc)
{
	u64 diff;
	u64 curr_time;
	unsigned long flags, nr_running;

	spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
	nr_running = per_cpu(nr, cpu);
	curr_time = sched_clock();
	diff = curr_time - per_cpu(last_time, cpu);
	BUG_ON((s64)diff < 0);
	per_cpu(last_time, cpu) = curr_time;
	per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);

	BUG_ON((s64)per_cpu(nr, cpu) < 0);

	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);

	update_last_busy_time(cpu, !inc, nr_running, curr_time);

	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
EXPORT_SYMBOL(sched_update_nr_prod);

/*
 * Returns the CPU utilization % in the last window.
 *
 */
unsigned int sched_get_cpu_util(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	u64 util;
	unsigned long capacity, flags;
	unsigned int busy;

	raw_spin_lock_irqsave(&rq->lock, flags);

	util = rq->cfs.avg.util_avg;
	capacity = capacity_orig_of(cpu);

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
		util = div64_u64(util,
				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
	}
#endif
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	util = (util >= capacity) ? capacity : util;
	busy = div64_ul((util * 100), capacity);
	return busy;
}

u64 sched_get_cpu_last_busy_time(int cpu)
{
	return atomic64_read(&per_cpu(last_busy_time, cpu));
}
Loading