Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f17fe85b authored by Steve Muckle's avatar Steve Muckle Committed by Syed Rameez Mustafa
Browse files

sched: tighten up jiffy to sched_clock mapping



The tick code already tracks exact time a tick is expected
to arrive. This can be used to eliminate slack in the jiffy
to sched_clock mapping that aligns windows between a caller
of sched_set_window and the scheduler itself.

Change-Id: I9d47466658d01e6857d7457405459436d504a2ca
Signed-off-by: default avatarSteve Muckle <smuckle@codeaurora.org>
parent 4fe162d7
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -72,6 +72,7 @@ struct tick_sched {

extern void __init tick_init(void);
extern int tick_is_oneshot_available(void);
extern u64 jiffy_to_sched_clock(u64 *now, u64 *jiffy_sched_clock);
extern struct tick_device *tick_get_device(int cpu);

# ifdef CONFIG_HIGH_RES_TIMERS
+15 −15
Original line number Diff line number Diff line
@@ -1224,8 +1224,6 @@ __read_mostly unsigned int sched_ravg_window = 10000000;
unsigned int __read_mostly sched_disable_window_stats;

static unsigned int sync_cpu;
static u64 sched_init_jiffy;
static u64 sched_clock_at_init_jiffy;

#define EXITING_TASK_MARKER	0xdeaddead

@@ -1897,23 +1895,16 @@ static inline void mark_task_starting(struct task_struct *p)
	p->ravg.mark_start = wallclock;
}

static int update_alignment;

static inline void set_window_start(struct rq *rq)
{
	int cpu = cpu_of(rq);
	struct rq *sync_rq = cpu_rq(sync_cpu);

	if (cpu == sync_cpu && !update_alignment) {
		sched_init_jiffy = get_jiffies_64();
		sched_clock_at_init_jiffy = sched_clock();
	}

	if (rq->window_start || !sched_enable_hmp)
		return;

	if (cpu == sync_cpu) {
		rq->window_start = sched_clock_at_init_jiffy;
		rq->window_start = sched_clock();
	} else {
		raw_spin_unlock(&rq->lock);
		double_rq_lock(rq, sync_rq);
@@ -2140,20 +2131,29 @@ void sched_set_io_is_busy(int val)

int sched_set_window(u64 window_start, unsigned int window_size)
{
	u64 ws, now;
	u64 now, cur_jiffies, jiffy_sched_clock;
	s64 ws;
	unsigned long flags;

	if (sched_use_pelt ||
		 (window_size * TICK_NSEC <  MIN_SCHED_RAVG_WINDOW))
			return -EINVAL;

	mutex_lock(&policy_mutex);
	update_alignment = 1;

	ws = (window_start - sched_init_jiffy); /* jiffy difference */
	/* Get a consistent view of sched_clock, jiffies, and the time
	 * since the last jiffy (based on last_jiffies_update). */
	local_irq_save(flags);
	cur_jiffies = jiffy_to_sched_clock(&now, &jiffy_sched_clock);
	local_irq_restore(flags);

	/* translate window_start from jiffies to nanoseconds */
	ws = (window_start - cur_jiffies); /* jiffy difference */
	ws *= TICK_NSEC;
	ws += sched_clock_at_init_jiffy;
	ws += jiffy_sched_clock;

	now = sched_clock();
	/* roll back calculated window start so that it is in
	 * the past (window stats must have a current window) */
	while (ws > now)
		ws -= (window_size * TICK_NSEC);

+32 −0
Original line number Diff line number Diff line
@@ -45,6 +45,38 @@ DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 */
static ktime_t last_jiffies_update;

/*
 * Conversion from ktime to sched_clock is error prone. Use this
 * as a safetly margin when calculating the sched_clock value at
 * a particular jiffy as last_jiffies_update uses ktime.
 */
#define SCHED_CLOCK_MARGIN 100000

static u64 ns_since_jiffy(void)
{
	ktime_t delta;

	delta = ktime_sub(ktime_get(), last_jiffies_update);

	return ktime_to_ns(delta);
}

u64 jiffy_to_sched_clock(u64 *now, u64 *jiffy_sched_clock)
{
	u64 cur_jiffies;
	unsigned long seq;

	do {
		seq = read_seqbegin(&jiffies_lock);
		*now = sched_clock();
		*jiffy_sched_clock = *now -
			(ns_since_jiffy() + SCHED_CLOCK_MARGIN);
		cur_jiffies = get_jiffies_64();
	} while (read_seqretry(&jiffies_lock, seq));

	return cur_jiffies;
}

struct tick_sched *tick_get_tick_sched(int cpu)
{
	return &per_cpu(tick_cpu_sched, cpu);