Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72ff2188 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Fix incorrect usage of SCHED_CPUFREQ_INTERCLUSTER_MIG flag"

parents a44c232a 4d5dd1ca
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -3971,6 +3971,7 @@ static inline unsigned long rlimit_max(unsigned int limit)
#define SCHED_CPUFREQ_WALT (1U << 4)
#define SCHED_CPUFREQ_PL	(1U << 5)
#define SCHED_CPUFREQ_EARLY_DET	(1U << 6)
#define SCHED_CPUFREQ_FORCE_UPDATE (1U << 7)

#define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)

+3 −1
Original line number Diff line number Diff line
@@ -822,6 +822,7 @@ struct rq {
	u8 curr_table;
	int prev_top;
	int curr_top;
	bool notif_pending;
#endif

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -2266,7 +2267,8 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)

#ifdef CONFIG_SCHED_WALT
	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
				SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET;
				SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET |
				SCHED_CPUFREQ_FORCE_UPDATE;

	/*
	 * Skip if we've already reported, but not if this is an inter-cluster
+26 −6
Original line number Diff line number Diff line
@@ -822,8 +822,11 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)

	migrate_top_tasks(p, src_rq, dest_rq);

	if (!same_freq_domain(new_cpu, task_cpu(p)))
	if (!same_freq_domain(new_cpu, task_cpu(p))) {
		src_rq->notif_pending = true;
		dest_rq->notif_pending = true;
		irq_work_queue(&walt_migration_irq_work);
	}

	if (p == src_rq->ed_task) {
		src_rq->ed_task = NULL;
@@ -3113,10 +3116,11 @@ void walt_irq_work(struct irq_work *irq_work)
	int cpu;
	u64 wc;
	int flag = SCHED_CPUFREQ_WALT;
	bool is_migration = false;

	/* Am I the window rollover work or the migration work? */
	if (irq_work == &walt_migration_irq_work)
		flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;
		is_migration = true;

	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3143,14 +3147,29 @@ void walt_irq_work(struct irq_work *irq_work)
		raw_spin_unlock(&cluster->load_lock);
	}

	for_each_sched_cluster(cluster)
		for_each_cpu(cpu, &cluster->cpus)
			cpufreq_update_util(cpu_rq(cpu), flag);
	for_each_sched_cluster(cluster) {
		for_each_cpu(cpu, &cluster->cpus) {
			int nflag = flag;

			rq = cpu_rq(cpu);

			if (is_migration) {
				if (rq->notif_pending) {
					nflag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;
					rq->notif_pending = false;
				} else {
					nflag |= SCHED_CPUFREQ_FORCE_UPDATE;
				}
			}

			cpufreq_update_util(rq, nflag);
		}
	}

	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_unlock(&cpu_rq(cpu)->lock);

	if (irq_work != &walt_migration_irq_work)
	if (!is_migration)
		core_ctl_check(this_rq()->window_start);
}

@@ -3246,6 +3265,7 @@ void walt_sched_init(struct rq *rq)
		clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
	}
	rq->cum_window_demand = 0;
	rq->notif_pending = false;

	walt_cpu_util_freq_divisor =
	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;