Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24c18127 authored by Stephen Boyd's avatar Stephen Boyd
Browse files

sched: Remove sched_ktime_clock()



The timekeeping subsystem is suspended shortly before suspending
and resumed shortly after resuming. During this time, interrupts
are disabled and only one CPU is online. The scheduler shouldn't
really be involved at this point in suspend/resume, so having
this API doesn't really make any sense. Remove it.

Change-Id: Iad94c7a3b8f7cbad530d9ddbeb858d759119ab6e
Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
parent 51257b78
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -740,7 +740,7 @@ DECLARE_EVENT_CLASS(sched_task_util,
		__entry->ediff			= ediff;
		__entry->need_idle		= need_idle;
		__entry->latency		= p->ravg.mark_start ?
						  sched_ktime_clock() -
						  ktime_get_ns() -
						  p->ravg.mark_start : 0;
	),

+5 −5
Original line number Diff line number Diff line
@@ -2165,7 +2165,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	rq = cpu_rq(task_cpu(p));
	raw_spin_lock(&rq->lock);
	old_load = task_load(p);
	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	cpufreq_update_util(rq, 0);
@@ -2253,7 +2253,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
	trace_sched_waking(p);

	if (!task_on_rq_queued(p)) {
		u64 wallclock = sched_ktime_clock();
		u64 wallclock = ktime_get_ns();

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
@@ -3312,7 +3312,7 @@ void scheduler_tick(void)
	old_load = task_load(curr);
	set_window_start(rq);

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);

	update_rq_clock(rq);
@@ -3648,7 +3648,7 @@ static void __sched notrace __schedule(bool preempt)
	clear_preempt_need_resched();
	rq->clock_skip_update = 0;

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();
	if (likely(prev != next)) {
		if (!prev->on_rq)
			prev->last_sleep_ts = wallclock;
@@ -9571,7 +9571,7 @@ void sched_exit(struct task_struct *p)
	rq = task_rq_lock(p, &rf);

	/* rq->curr == p */
	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	/*
+2 −2
Original line number Diff line number Diff line
@@ -419,7 +419,7 @@ static void sugov_work(struct kthread_work *work)
	mutex_lock(&sg_policy->work_lock);
	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
			   sched_ktime_clock());
			   ktime_get_ns());
	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
				CPUFREQ_RELATION_L);
@@ -802,7 +802,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
		mutex_lock(&sg_policy->work_lock);
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
				   sched_ktime_clock());
				   ktime_get_ns());
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
		cpufreq_policy_apply_limits(policy);
		mutex_unlock(&sg_policy->work_lock);
+1 −6
Original line number Diff line number Diff line
@@ -2205,13 +2205,8 @@ static inline u64 irq_time_read(int cpu)
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */

#ifdef CONFIG_SCHED_WALT
u64 sched_ktime_clock(void);
void note_task_waking(struct task_struct *p, u64 wallclock);
#else /* CONFIG_SCHED_WALT */
static inline u64 sched_ktime_clock(void)
{
	return 0;
}
static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
#endif /* CONFIG_SCHED_WALT */

@@ -2264,7 +2259,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
					cpu_of(rq)));
	if (data)
		data->func(data, sched_ktime_clock(), flags);
		data->func(data, ktime_get_ns(), flags);
}

static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
+8 −42
Original line number Diff line number Diff line
@@ -19,7 +19,6 @@
 *             and Todd Kjos
 */

#include <linux/syscore_ops.h>
#include <linux/cpufreq.h>
#include <linux/list_sort.h>
#include <linux/jiffies.h>
@@ -42,8 +41,6 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",

#define EARLY_DETECTION_DURATION 9500000

static ktime_t ktime_last;
static bool sched_ktime_suspended;
static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
static bool use_cycle_counter;
DEFINE_MUTEX(cluster_lock);
@@ -52,37 +49,6 @@ static atomic64_t walt_irq_work_lastq_ws;
static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;

u64 sched_ktime_clock(void)
{
	if (unlikely(sched_ktime_suspended))
		return ktime_to_ns(ktime_last);
	return ktime_get_ns();
}

static void sched_resume(void)
{
	sched_ktime_suspended = false;
}

static int sched_suspend(void)
{
	ktime_last = ktime_get();
	sched_ktime_suspended = true;
	return 0;
}

static struct syscore_ops sched_syscore_ops = {
	.resume	= sched_resume,
	.suspend = sched_suspend
};

static int __init sched_init_ops(void)
{
	register_syscore_ops(&sched_syscore_ops);
	return 0;
}
late_initcall(sched_init_ops);

static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
				     unsigned long *flags)
{
@@ -421,7 +387,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
	cur_jiffies_ts = get_jiffies_64();

	if (is_idle_task(curr))
		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
		update_task_ravg(curr, rq, IRQ_UPDATE, ktime_get_ns(),
				 delta);

	nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -758,7 +724,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	if (sched_disable_window_stats)
		goto done;

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();

	update_task_ravg(task_rq(p)->curr, task_rq(p),
			 TASK_UPDATE,
@@ -2028,7 +1994,7 @@ void mark_task_starting(struct task_struct *p)
		return;
	}

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_cpu_selected_ts = wallclock;
	p->last_switch_out_ts = 0;
@@ -2474,7 +2440,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
	if (list_empty(&grp->tasks))
		return;

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();

	/*
	 * wakeup of two or more related tasks could race with each other and
@@ -2501,7 +2467,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)

	grp->preferred_cluster = best_cluster(grp,
			combined_demand, group_boost);
	grp->last_update = sched_ktime_clock();
	grp->last_update = ktime_get_ns();
	trace_sched_set_preferred_cluster(grp, combined_demand);
}

@@ -2525,7 +2491,7 @@ int update_preferred_cluster(struct related_thread_group *grp,
	 * has passed since we last updated preference
	 */
	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
		sched_ktime_clock() - grp->last_update > sched_ravg_window)
		ktime_get_ns() - grp->last_update > sched_ravg_window)
		return 1;

	return 0;
@@ -2911,7 +2877,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
	bool new_task;
	int i;

	wallclock = sched_ktime_clock();
	wallclock = ktime_get_ns();

	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
@@ -3018,7 +2984,7 @@ void walt_irq_work(struct irq_work *irq_work)
	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_lock(&cpu_rq(cpu)->lock);

	wc = sched_ktime_clock();
	wc = ktime_get_ns();

	for_each_sched_cluster(cluster) {
		u64 aggr_grp_load = 0;