Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3c813a9 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: walt: Add BUG_ON() when wallclock goes backwards"

parents 671a9748 ee5b4c10
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -2172,7 +2172,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	rq = cpu_rq(task_cpu(p));
	raw_spin_lock(&rq->lock);
	old_load = task_load(p);
	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	raw_spin_unlock(&rq->lock);
@@ -2259,7 +2259,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
	trace_sched_waking(p);

	if (!task_on_rq_queued(p)) {
		u64 wallclock = ktime_get_ns();
		u64 wallclock = sched_ktime_clock();

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
@@ -3261,7 +3261,7 @@ void scheduler_tick(void)
	old_load = task_load(curr);
	set_window_start(rq);

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);

	update_rq_clock(rq);
@@ -3633,7 +3633,7 @@ static void __sched notrace __schedule(bool preempt)
	clear_preempt_need_resched();
	rq->clock_skip_update = 0;

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();
	if (likely(prev != next)) {
		if (!prev->on_rq)
			prev->last_sleep_ts = wallclock;
@@ -9611,7 +9611,7 @@ void sched_exit(struct task_struct *p)
	rq = task_rq_lock(p, &rf);

	/* rq->curr == p */
	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	/*
+2 −2
Original line number Diff line number Diff line
@@ -466,7 +466,7 @@ static void sugov_work(struct kthread_work *work)
	mutex_lock(&sg_policy->work_lock);
	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
			   ktime_get_ns());
			   sched_ktime_clock());
	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
				CPUFREQ_RELATION_L);
@@ -919,7 +919,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
		mutex_lock(&sg_policy->work_lock);
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
				   ktime_get_ns());
				   sched_ktime_clock());
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
		cpufreq_policy_apply_limits(policy);
		mutex_unlock(&sg_policy->work_lock);
+1 −1
Original line number Diff line number Diff line
@@ -11820,7 +11820,7 @@ static void walt_check_for_rotation(struct rq *src_rq)
	if (is_max_capacity_cpu(src_cpu))
		return;

	wc = ktime_get_ns();
	wc = sched_ktime_clock();
	for_each_possible_cpu(i) {
		struct rq *rq = cpu_rq(i);

+6 −1
Original line number Diff line number Diff line
@@ -2234,8 +2234,13 @@ static inline u64 irq_time_read(int cpu)
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */

#ifdef CONFIG_SCHED_WALT
u64 sched_ktime_clock(void);
void note_task_waking(struct task_struct *p, u64 wallclock);
#else /* CONFIG_SCHED_WALT */
static inline u64 sched_ktime_clock(void)
{
	return 0;
}
static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
#endif /* CONFIG_SCHED_WALT */

@@ -2276,7 +2281,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
					cpu_of(rq)));
	if (data)
		data->func(data, ktime_get_ns(), flags);
		data->func(data, sched_ktime_clock(), flags);
}

static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
+46 −16
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
 *             and Todd Kjos
 */

#include <linux/syscore_ops.h>
#include <linux/cpufreq.h>
#include <linux/list_sort.h>
#include <linux/jiffies.h>
@@ -41,6 +42,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",

#define EARLY_DETECTION_DURATION 9500000

static ktime_t ktime_last;
static bool sched_ktime_suspended;
static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
static bool use_cycle_counter;
DEFINE_MUTEX(cluster_lock);
@@ -50,6 +53,37 @@ u64 walt_load_reported_window;
static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;

u64 sched_ktime_clock(void)
{
	if (unlikely(sched_ktime_suspended))
		return ktime_to_ns(ktime_last);
	return ktime_get_ns();
}

static void sched_resume(void)
{
	sched_ktime_suspended = false;
}

static int sched_suspend(void)
{
	ktime_last = ktime_get();
	sched_ktime_suspended = true;
	return 0;
}

static struct syscore_ops sched_syscore_ops = {
	.resume	= sched_resume,
	.suspend = sched_suspend
};

static int __init sched_init_ops(void)
{
	register_syscore_ops(&sched_syscore_ops);
	return 0;
}
late_initcall(sched_init_ops);

static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
				     unsigned long *flags)
{
@@ -270,12 +304,7 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
	u64 old_window_start = rq->window_start;

	delta = wallclock - rq->window_start;
	/* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
	if (delta < 0) {
		delta = 0;
		WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
	}

	BUG_ON(delta < 0);
	if (delta < sched_ravg_window)
		return old_window_start;

@@ -366,7 +395,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
	if (is_idle_task(curr)) {
		/* We're here without rq->lock held, IRQ disabled */
		raw_spin_lock(&rq->lock);
		update_task_cpu_cycles(curr, cpu, ktime_get_ns());
		update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
		raw_spin_unlock(&rq->lock);
	}
}
@@ -427,7 +456,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
	cur_jiffies_ts = get_jiffies_64();

	if (is_idle_task(curr))
		update_task_ravg(curr, rq, IRQ_UPDATE, ktime_get_ns(),
		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
				 delta);

	nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -764,7 +793,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	if (sched_disable_window_stats)
		goto done;

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();

	update_task_ravg(task_rq(p)->curr, task_rq(p),
			 TASK_UPDATE,
@@ -2051,7 +2080,7 @@ void mark_task_starting(struct task_struct *p)
		return;
	}

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_enqueued_ts = wallclock;
	p->last_switch_out_ts = 0;
@@ -2453,7 +2482,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,

				raw_spin_lock_irqsave(&rq->lock, flags);
				update_task_ravg(rq->curr, rq, TASK_UPDATE,
						 ktime_get_ns(), 0);
						 sched_ktime_clock(), 0);
				raw_spin_unlock_irqrestore(&rq->lock, flags);
			}
		}
@@ -2603,7 +2632,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
		return;
	}

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();

	/*
	 * wakeup of two or more related tasks could race with each other and
@@ -2630,7 +2659,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)

	grp->preferred_cluster = best_cluster(grp,
			combined_demand, group_boost);
	grp->last_update = ktime_get_ns();
	grp->last_update = sched_ktime_clock();
	trace_sched_set_preferred_cluster(grp, combined_demand);
}

@@ -2654,7 +2683,7 @@ int update_preferred_cluster(struct related_thread_group *grp,
	 * has passed since we last updated preference
	 */
	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
		ktime_get_ns() - grp->last_update > sched_ravg_window)
		sched_ktime_clock() - grp->last_update > sched_ravg_window)
		return 1;

	return 0;
@@ -3037,7 +3066,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
	bool new_task;
	int i;

	wallclock = ktime_get_ns();
	wallclock = sched_ktime_clock();

	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
@@ -3145,8 +3174,9 @@ void walt_irq_work(struct irq_work *irq_work)
	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_lock(&cpu_rq(cpu)->lock);

	wc = ktime_get_ns();
	wc = sched_ktime_clock();
	walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws);

	for_each_sched_cluster(cluster) {
		u64 aggr_grp_load = 0;

Loading