Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6df49f6 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: prevent task migration while governor queries CPUs' load"

parents c16f08c4 0133d85e
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -1963,6 +1963,8 @@ extern int task_free_unregister(struct notifier_block *n);
#if defined(CONFIG_SCHED_FREQ_INPUT)
#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(unsigned long *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
extern void sched_set_io_is_busy(int val);
#else
#else
static inline int sched_set_window(u64 window_start, unsigned int window_size)
static inline int sched_set_window(u64 window_start, unsigned int window_size)
+87 −19
Original line number Original line Diff line number Diff line
@@ -1345,6 +1345,8 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load)
static int send_notification(struct rq *rq)
static int send_notification(struct rq *rq)
{
{
	unsigned int cur_freq, freq_required;
	unsigned int cur_freq, freq_required;
	unsigned long flags;
	int rc = 0;


	if (!sched_enable_hmp)
	if (!sched_enable_hmp)
		return 0;
		return 0;
@@ -1355,7 +1357,14 @@ static int send_notification(struct rq *rq)
	if (nearly_same_freq(cur_freq, freq_required))
	if (nearly_same_freq(cur_freq, freq_required))
		return 0;
		return 0;


	return 1;
	raw_spin_lock_irqsave(&rq->lock, flags);
	if (!rq->notifier_sent) {
		rq->notifier_sent = 1;
		rc = 1;
	}
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	return rc;
}
}


/* Alert governor if there is a need to change frequency */
/* Alert governor if there is a need to change frequency */
@@ -2141,35 +2150,93 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)


#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_FREQ_INPUT


unsigned long sched_get_busy(int cpu)
static inline u64
scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
{
	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
}

void sched_get_cpus_busy(unsigned long *busy, const struct cpumask *query_cpus)
{
{
	unsigned long flags;
	unsigned long flags;
	struct rq *rq = cpu_rq(cpu);
	struct rq *rq;
	u64 load;
	const int cpus = cpumask_weight(query_cpus);
	u64 load[cpus];
	unsigned int cur_freq[cpus], max_freq[cpus];
	int notifier_sent[cpus];
	int cpu, i = 0;
	unsigned int window_size;

	if (unlikely(cpus == 0))
		return;


	/*
	/*
	 * This function could be called in timer context, and the
	 * This function could be called in timer context, and the
	 * current task may have been executing for a long time. Ensure
	 * current task may have been executing for a long time. Ensure
	 * that the window stats are current by doing an update.
	 * that the window stats are current by doing an update.
	 */
	 */
	raw_spin_lock_irqsave(&rq->lock, flags);
	local_irq_save(flags);
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
	for_each_cpu(cpu, query_cpus)
	load = rq->old_busy_time = rq->prev_runnable_sum;
		raw_spin_lock(&cpu_rq(cpu)->lock);
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	window_size = sched_ravg_window;

	for_each_cpu(cpu, query_cpus) {
		rq = cpu_rq(cpu);


		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
		load[i] = rq->old_busy_time = rq->prev_runnable_sum;
		/*
		/*
		 * Scale load in reference to rq->max_possible_freq.
		 * Scale load in reference to rq->max_possible_freq.
		 *
		 *
		 * Note that scale_load_to_cpu() scales load in reference to
		 * Note that scale_load_to_cpu() scales load in reference to
	 * rq->max_freq
		 * rq->max_freq.
		 */
		 */
	load = scale_load_to_cpu(load, cpu);
		load[i] = scale_load_to_cpu(load[i], cpu);
	load = div64_u64(load * (u64)rq->max_freq, (u64)rq->max_possible_freq);

	load = div64_u64(load, NSEC_PER_USEC);
		notifier_sent[i] = rq->notifier_sent;
		rq->notifier_sent = 0;
		cur_freq[i] = rq->cur_freq;
		max_freq[i] = rq->max_freq;
		i++;
	}

	for_each_cpu(cpu, query_cpus)
		raw_spin_unlock(&(cpu_rq(cpu))->lock);
	local_irq_restore(flags);

	i = 0;
	for_each_cpu(cpu, query_cpus) {
		rq = cpu_rq(cpu);

		if (!notifier_sent[i]) {
			load[i] = scale_load_to_freq(load[i], max_freq[i],
						     cur_freq[i]);
			if (load[i] > window_size)
				load[i] = window_size;
			load[i] = scale_load_to_freq(load[i], cur_freq[i],
						     rq->max_possible_freq);
		} else {
			load[i] = scale_load_to_freq(load[i], max_freq[i],
						     rq->max_possible_freq);
		}

		busy[i] = div64_u64(load[i], NSEC_PER_USEC);

		trace_sched_get_busy(cpu, busy[i]);
		i++;
	}
}

unsigned long sched_get_busy(int cpu)
{
	struct cpumask query_cpu = CPU_MASK_NONE;
	unsigned long busy;


	trace_sched_get_busy(cpu, load);
	cpumask_set_cpu(cpu, &query_cpu);
	sched_get_cpus_busy(&busy, &query_cpu);


	return load;
	return busy;
}
}


void sched_set_io_is_busy(int val)
void sched_set_io_is_busy(int val)
@@ -8882,6 +8949,7 @@ void __init sched_init(void)
#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_FREQ_INPUT
		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
		rq->old_busy_time = 0;
		rq->old_busy_time = 0;
		rq->notifier_sent = 0;
#endif
#endif
#endif
#endif
		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
+1 −0
Original line number Original line Diff line number Diff line
@@ -649,6 +649,7 @@ struct rq {


#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_FREQ_INPUT
	unsigned int old_busy_time;
	unsigned int old_busy_time;
	int notifier_sent;
#endif
#endif
#endif
#endif