Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19c3205c authored by John Stultz's avatar John Stultz
Browse files

Merge branch 'fortglx/3.12/sched-clock64-base' into fortglx/3.13/time



Merge in 64bit sched_clock support that missed 3.12.

Conflicts:
	kernel/time/sched_clock.c

Signed-off-by: default avatarJohn.Stultz <john.stultz@linaro.org>
parents 272b98c6 e7e3ff1b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -292,6 +292,8 @@ extern void clocksource_resume(void);
extern struct clocksource * __init __weak clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);

extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask);
extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);

+2 −0
Original line number Diff line number Diff line
@@ -15,6 +15,8 @@ static inline void sched_clock_postinit(void) { }
#endif

extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
extern void sched_clock_register(u64 (*read)(void), int bits,
				 unsigned long rate);

extern unsigned long long (*sched_clock_func)(void);

+30 −15
Original line number Diff line number Diff line
@@ -537,40 +537,55 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
}

/**
 * clocksource_max_deferment - Returns max time the clocksource can be deferred
 * @cs:         Pointer to clocksource
 *
 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
 * @mult:	cycle to nanosecond multiplier
 * @shift:	cycle to nanosecond divisor (power of two)
 * @maxadj:	maximum adjustment value to mult (~11%)
 * @mask:	bitmask for two's complement subtraction of non 64 bit counters
 */
static u64 clocksource_max_deferment(struct clocksource *cs)
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
{
	u64 max_nsecs, max_cycles;

	/*
	 * Calculate the maximum number of cycles that we can pass to the
	 * cyc2ns function without overflowing a 64-bit signed result. The
	 * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
	 * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
	 * which is equivalent to the below.
	 * max_cycles < (2^63)/(cs->mult + cs->maxadj)
	 * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
	 * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
	 * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
	 * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
	 * max_cycles < (2^63)/(mult + maxadj)
	 * max_cycles < 2^(log2((2^63)/(mult + maxadj)))
	 * max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
	 * max_cycles < 2^(63 - log2(mult + maxadj))
	 * max_cycles < 1 << (63 - log2(mult + maxadj))
	 * Please note that we add 1 to the result of the log2 to account for
	 * any rounding errors, ensure the above inequality is satisfied and
	 * no overflow will occur.
	 */
	max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
	max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1));

	/*
	 * The actual maximum number of cycles we can defer the clocksource is
	 * determined by the minimum of max_cycles and cs->mask.
	 * determined by the minimum of max_cycles and mask.
	 * Note: Here we subtract the maxadj to make sure we don't sleep for
	 * too long if there's a large negative adjustment.
	 */
	max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
	max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
					cs->shift);
	max_cycles = min(max_cycles, mask);
	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);

	return max_nsecs;
}

/**
 * clocksource_max_deferment - Returns max time the clocksource can be deferred
 * @cs:         Pointer to clocksource
 *
 */
static u64 clocksource_max_deferment(struct clocksource *cs)
{
	u64 max_nsecs;

	max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
					  cs->mask);
	/*
	 * To ensure that the clocksource does not wrap whilst we are idle,
	 * limit the time the clocksource can be deferred by 12.5%. Please
+61 −50
Original line number Diff line number Diff line
@@ -8,25 +8,28 @@
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/syscore_ops.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
#include <linux/sched_clock.h>
#include <linux/seqlock.h>
#include <linux/bitops.h>

struct clock_data {
	ktime_t wrap_kt;
	u64 epoch_ns;
	u32 epoch_cyc;
	u32 epoch_cyc_copy;
	u64 epoch_cyc;
	seqcount_t seq;
	unsigned long rate;
	u32 mult;
	u32 shift;
	bool suspended;
};

static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
static struct hrtimer sched_clock_timer;
static int irqtime = -1;

core_param(irqtime, irqtime, int, 0400);
@@ -35,14 +38,25 @@ static struct clock_data cd = {
	.mult	= NSEC_PER_SEC / HZ,
};

static u32 __read_mostly sched_clock_mask = 0xffffffff;
static u64 __read_mostly sched_clock_mask;

static u32 notrace jiffy_sched_clock_read(void)
static u64 notrace jiffy_sched_clock_read(void)
{
	return (u32)(jiffies - INITIAL_JIFFIES);
	/*
	 * We don't need to use get_jiffies_64 on 32-bit arches here
	 * because we register with BITS_PER_LONG
	 */
	return (u64)(jiffies - INITIAL_JIFFIES);
}

static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
static u32 __read_mostly (*read_sched_clock_32)(void);

static u64 notrace read_sched_clock_32_wrapper(void)
{
	return read_sched_clock_32();
}

static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;

static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
@@ -52,25 +66,18 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
static unsigned long long notrace sched_clock_32(void)
{
	u64 epoch_ns;
	u32 epoch_cyc;
	u32 cyc;
	u64 epoch_cyc;
	u64 cyc;
	unsigned long seq;

	if (cd.suspended)
		return cd.epoch_ns;

	/*
	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
	 * ensuring that we always write epoch_cyc, epoch_ns and
	 * epoch_cyc_copy in strict order, and read them in strict order.
	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
	 * the middle of an update, and we should repeat the load.
	 */
	do {
		seq = read_seqcount_begin(&cd.seq);
		epoch_cyc = cd.epoch_cyc;
		smp_rmb();
		epoch_ns = cd.epoch_ns;
		smp_rmb();
	} while (epoch_cyc != cd.epoch_cyc_copy);
	} while (read_seqcount_retry(&cd.seq, seq));

	cyc = read_sched_clock();
	cyc = (cyc - epoch_cyc) & sched_clock_mask;
@@ -83,49 +90,46 @@ static unsigned long long notrace sched_clock_32(void)
static void notrace update_sched_clock(void)
{
	unsigned long flags;
	u32 cyc;
	u64 cyc;
	u64 ns;

	cyc = read_sched_clock();
	ns = cd.epoch_ns +
		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
			  cd.mult, cd.shift);
	/*
	 * Write epoch_cyc and epoch_ns in a way that the update is
	 * detectable in cyc_to_fixed_sched_clock().
	 */

	raw_local_irq_save(flags);
	cd.epoch_cyc_copy = cyc;
	smp_wmb();
	write_seqcount_begin(&cd.seq);
	cd.epoch_ns = ns;
	smp_wmb();
	cd.epoch_cyc = cyc;
	write_seqcount_end(&cd.seq);
	raw_local_irq_restore(flags);
}

static void sched_clock_poll(unsigned long wrap_ticks)
static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
{
	mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
	update_sched_clock();
	hrtimer_forward_now(hrt, cd.wrap_kt);
	return HRTIMER_RESTART;
}

void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
void __init sched_clock_register(u64 (*read)(void), int bits,
				 unsigned long rate)
{
	unsigned long r, w;
	unsigned long r;
	u64 res, wrap;
	char r_unit;

	if (cd.rate > rate)
		return;

	BUG_ON(bits > 32);
	WARN_ON(!irqs_disabled());
	read_sched_clock = read;
	sched_clock_mask = (1ULL << bits) - 1;
	sched_clock_mask = CLOCKSOURCE_MASK(bits);
	cd.rate = rate;

	/* calculate the mult/shift to convert counter ticks to ns. */
	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);

	r = rate;
	if (r >= 4000000) {
@@ -138,20 +142,14 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
		r_unit = ' ';

	/* calculate how many ns until we wrap */
	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
	do_div(wrap, NSEC_PER_MSEC);
	w = wrap;
	wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
	cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));

	/* calculate the ns resolution of this counter */
	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
		bits, r, r_unit, res, w);
	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
		bits, r, r_unit, res, wrap);

	/*
	 * Start the timer to keep sched_clock() properly updated and
	 * sets the initial epoch.
	 */
	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
	update_sched_clock();

	/*
@@ -166,6 +164,12 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
	pr_debug("Registered %pF as sched_clock source\n", read);
}

void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
	read_sched_clock_32 = read;
	sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
}

unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;

unsigned long long notrace sched_clock(void)
@@ -180,14 +184,22 @@ void __init sched_clock_postinit(void)
	 * make it the final one one.
	 */
	if (read_sched_clock == jiffy_sched_clock_read)
		setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);

	sched_clock_poll(sched_clock_timer.data);
	update_sched_clock();

	/*
	 * Start the timer to keep sched_clock() properly updated and
	 * sets the initial epoch.
	 */
	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sched_clock_timer.function = sched_clock_poll;
	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
}

static int sched_clock_suspend(void)
{
	sched_clock_poll(sched_clock_timer.data);
	sched_clock_poll(&sched_clock_timer);
	cd.suspended = true;
	return 0;
}
@@ -195,7 +207,6 @@ static int sched_clock_suspend(void)
static void sched_clock_resume(void)
{
	cd.epoch_cyc = read_sched_clock();
	cd.epoch_cyc_copy = cd.epoch_cyc;
	cd.suspended = false;
}