Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1742238c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Remove unnecessary calls to cpufreq_update_util()"

parents f6f6c391 256be6b2
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -2174,7 +2174,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	wallclock = ktime_get_ns();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	cpufreq_update_util(rq, 0);
	raw_spin_unlock(&rq->lock);

	rcu_read_lock();
@@ -2263,7 +2262,6 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
		cpufreq_update_util(rq, 0);
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
		note_task_waking(p, wallclock);
	}
@@ -3636,7 +3634,6 @@ static void __sched notrace __schedule(bool preempt)

		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
		cpufreq_update_util(rq, 0);
		rq->nr_switches++;
		rq->curr = next;
		++*switch_count;
@@ -3645,7 +3642,6 @@ static void __sched notrace __schedule(bool preempt)
		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
	} else {
		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
		cpufreq_update_util(rq, 0);
		lockdep_unpin_lock(&rq->lock, cookie);
		raw_spin_unlock_irq(&rq->lock);
	}
+3 −16
Original line number Diff line number Diff line
@@ -798,7 +798,6 @@ struct rq {
	int cstate, wakeup_latency, wakeup_energy;
	u64 window_start;
	s64 cum_window_start;
	u64 load_reported_window;
	unsigned long walt_flags;

	u64 cur_irqload;
@@ -1862,6 +1861,8 @@ cpu_util_freq_pelt(int cpu)
}

#ifdef CONFIG_SCHED_WALT
extern atomic64_t walt_irq_work_lastq_ws;

static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
@@ -1898,7 +1899,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
		walt_load->prev_window_util = util;
		walt_load->nl = nl;
		walt_load->pl = pl;
		walt_load->ws = rq->load_reported_window;
		walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws);
	}

	return (util >= capacity) ? capacity : util;
@@ -2268,22 +2269,8 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
	struct update_util_data *data;

#ifdef CONFIG_SCHED_WALT
	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
				SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET |
				SCHED_CPUFREQ_FORCE_UPDATE;

	/*
	 * Skip if we've already reported, but not if this is an inter-cluster
	 * migration. Also only allow WALT update sites.
	 */
	if (!(flags & SCHED_CPUFREQ_WALT))
		return;
	if (!sched_disable_window_stats &&
		(rq->load_reported_window == rq->window_start) &&
		!(flags & exception_flags))
		return;
	if (!(flags & exception_flags))
		rq->load_reported_window = rq->window_start;
#endif

	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+1 −1
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
static bool use_cycle_counter;
DEFINE_MUTEX(cluster_lock);
static atomic64_t walt_irq_work_lastq_ws;
atomic64_t walt_irq_work_lastq_ws;

static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;