Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5d0859ce authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'sched/clock' into tracing/ftrace

Conflicts:
	kernel/sched_clock.c
parents 14131f2f 83ce4009
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/module.h>

@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)

	/*
	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
	 * with P/T states and does not stop in deep C-states
	 * with P/T states and does not stop in deep C-states.
	 *
	 * It is also reliable across cores and sockets. (but not across
	 * cabinets - we turn it off in that case explicitly.)
	 */
	if (c->x86_power & (1 << 8)) {
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
		set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
		sched_clock_stable = 1;
	}

}
+10 −0
Original line number Diff line number Diff line
@@ -1672,6 +1672,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
	return set_cpus_allowed_ptr(p, &new_mask);
}

/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern int sched_clock_stable;
#endif

extern unsigned long long sched_clock(void);

extern void sched_clock_init(void);
+22 −24
Original line number Diff line number Diff line
@@ -24,12 +24,12 @@
 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
 * consistent between cpus (never more than 2 jiffies difference).
 */
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>

/*
 * Scheduler clock - returns current time in nanosec units.
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
static __read_mostly int sched_clock_running;

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly int sched_clock_stable;
#else
static const int sched_clock_stable = 1;
#endif

struct sched_clock_data {
	/*
@@ -117,6 +121,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
	if (unlikely(delta < 0))
		delta = 0;

	if (unlikely(!sched_clock_running))
		return 0ull;

	/*
	 * scd->clock = clamp(scd->tick_gtod + delta,
	 *		      max(scd->tick_gtod, scd->clock),
@@ -149,8 +156,11 @@ static void lock_double_clock(struct sched_clock_data *data1,

u64 sched_clock_cpu(int cpu)
{
	struct sched_clock_data *scd = cpu_sdc(cpu);
	u64 now, clock, this_clock, remote_clock;
	struct sched_clock_data *scd;

	if (sched_clock_stable)
		return sched_clock();

	/*
	 * Normally this is not called in NMI context - but if it is,
@@ -162,6 +172,7 @@ u64 sched_clock_cpu(int cpu)
	if (unlikely(!sched_clock_running))
		return 0ull;

	scd = cpu_sdc(cpu);
	WARN_ON_ONCE(!irqs_disabled());
	now = sched_clock();

@@ -201,6 +212,8 @@ u64 sched_clock_cpu(int cpu)
	return clock;
}

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK

void sched_clock_tick(void)
{
	struct sched_clock_data *scd = this_scd();
@@ -243,22 +256,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);

#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */

void sched_clock_init(void)
{
	sched_clock_running = 1;
}

u64 sched_clock_cpu(int cpu)
{
	if (unlikely(!sched_clock_running))
		return 0;

	return sched_clock();
}

#endif
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */

unsigned long long cpu_clock(int cpu)
{