Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 271e314b authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: add sched_get_cpu_last_busy_time() API



sched_get_cpu_last_busy_time() returns the last time stamp when
a given CPU is busy with more than 2 runnable tasks or has load
greater than 50% of it's max capacity. The LPM driver can make
use of this API and create a policy to prevent a recently loaded
CPU entering deep sleep state.

This API is implemented only for the higher capacity CPUs in
the system. It returns 0 for other CPUs.

Change-Id: I97ef47970a71647f4f55f21165d0cc1351770a53
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent eb5b60ca
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -183,6 +183,7 @@ extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
				     unsigned int *max_nr,
				     unsigned int *max_nr,
				     unsigned int *big_max_nr);
				     unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
extern unsigned int sched_get_cpu_util(int cpu);
extern u64 sched_get_cpu_last_busy_time(int cpu);
#else
#else
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
{
{
@@ -196,6 +197,10 @@ static inline unsigned int sched_get_cpu_util(int cpu)
{
{
	return 0;
	return 0;
}
}
static inline u64 sched_get_cpu_last_busy_time(int cpu)
{
	return 0;
}
#endif
#endif


extern void calc_global_load(unsigned long ticks);
extern void calc_global_load(unsigned long ticks);
+7 −0
Original line number Original line Diff line number Diff line
@@ -2474,6 +2474,11 @@ static inline bool is_max_capacity_cpu(int cpu)
	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
}
}


static inline bool is_min_capacity_cpu(int cpu)
{
	return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
}

/*
/*
 * 'load' is in reference to "best cpu" at its best frequency.
 * 'load' is in reference to "best cpu" at its best frequency.
 * Scale that in reference to a given cpu, accounting for how bad it is
 * Scale that in reference to a given cpu, accounting for how bad it is
@@ -2673,7 +2678,9 @@ static inline int sched_boost(void)
	return 0;
	return 0;
}
}


static inline bool hmp_capable(void) { return false; }
static inline bool is_max_capacity_cpu(int cpu) { return true; }
static inline bool is_max_capacity_cpu(int cpu) { return true; }
static inline bool is_min_capacity_cpu(int cpu) { return true; }


static inline int
static inline int
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+30 −0
Original line number Original line Diff line number Diff line
@@ -33,6 +33,8 @@ static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
static s64 last_get_time;


static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);

#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
/**
 * sched_get_nr_running_avg
 * sched_get_nr_running_avg
@@ -120,6 +122,27 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
}
}
EXPORT_SYMBOL(sched_get_nr_running_avg);
EXPORT_SYMBOL(sched_get_nr_running_avg);


#define BUSY_NR_RUN		3
#define BUSY_LOAD_FACTOR	2
static inline void update_last_busy_time(int cpu, bool dequeue,
				unsigned long prev_nr_run, u64 curr_time)
{
	bool nr_run_trigger = false, load_trigger = false;

	if (!hmp_capable() || is_min_capacity_cpu(cpu))
		return;

	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
		nr_run_trigger = true;

	if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
			capacity_orig_of(cpu))
		load_trigger = true;

	if (nr_run_trigger || load_trigger)
		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
}

/**
/**
 * sched_update_nr_prod
 * sched_update_nr_prod
 * @cpu: The core id of the nr running driver.
 * @cpu: The core id of the nr running driver.
@@ -148,6 +171,8 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);


	update_last_busy_time(cpu, !inc, nr_running, curr_time);

	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
@@ -184,3 +209,8 @@ unsigned int sched_get_cpu_util(int cpu)
	busy = (util * 100) / capacity;
	busy = (util * 100) / capacity;
	return busy;
	return busy;
}
}

u64 sched_get_cpu_last_busy_time(int cpu)
{
	return atomic64_read(&per_cpu(last_busy_time, cpu));
}