Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a369583 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/tsc: Validate TSC_ADJUST after resume



Some 'feature' BIOSes fiddle with the TSC_ADJUST register during
suspend/resume which renders the TSC unusable.

Add sanity checks into the resume path and restore the
original value if it was adjusted.

Reported-and-tested-by: default avatarRoland Scheidegger <rscheidegger_lists@hispeed.ch>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Bruce Schlobohm <bruce.schlobohm@intel.com>
Cc: Kevin Stanton <kevin.b.stanton@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Allen Hung <allen_hung@dell.com>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/20161213131211.317654500@linutronix.de


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 31f8a651
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -47,12 +47,12 @@ extern int tsc_clocksource_reliable;
 */
#ifdef CONFIG_X86_TSC
extern bool tsc_store_and_check_tsc_adjust(void);
extern void tsc_verify_tsc_adjust(void);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
static inline void tsc_verify_tsc_adjust(void) { }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_source(int cpu) { }
static inline void check_tsc_sync_target(void) { }
#endif
+1 −1
Original line number Diff line number Diff line
@@ -277,7 +277,7 @@ void exit_idle(void)

void arch_cpu_idle_enter(void)
{
	tsc_verify_tsc_adjust();
	tsc_verify_tsc_adjust(false);
	local_touch_nmi();
	enter_idle();
}
+6 −0
Original line number Diff line number Diff line
@@ -1080,6 +1080,11 @@ static void detect_art(void)

static struct clocksource clocksource_tsc;

static void tsc_resume(struct clocksource *cs)
{
	tsc_verify_tsc_adjust(true);
}

/*
 * We used to compare the TSC to the cycle_last value in the clocksource
 * structure to avoid a nasty time-warp. This can be observed in a
@@ -1112,6 +1117,7 @@ static struct clocksource clocksource_tsc = {
	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
				  CLOCK_SOURCE_MUST_VERIFY,
	.archdata               = { .vclock_mode = VCLOCK_TSC },
	.resume			= tsc_resume,
};

void mark_tsc_unstable(char *reason)
+3 −3
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@ struct tsc_adjust {

static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);

void tsc_verify_tsc_adjust(void)
void tsc_verify_tsc_adjust(bool resume)
{
	struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
	s64 curval;
@@ -39,7 +39,7 @@ void tsc_verify_tsc_adjust(void)
		return;

	/* Rate limit the MSR check */
	if (time_before(jiffies, adj->nextcheck))
	if (!resume && time_before(jiffies, adj->nextcheck))
		return;

	adj->nextcheck = jiffies + HZ;
@@ -51,7 +51,7 @@ void tsc_verify_tsc_adjust(void)
	/* Restore the original value */
	wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);

	if (!adj->warned) {
	if (!adj->warned || resume) {
		pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
			smp_processor_id(), adj->adjusted, curval);
		adj->warned = true;
+1 −0
Original line number Diff line number Diff line
@@ -252,6 +252,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
	fix_processor_context();

	do_fpu_end();
	tsc_verify_tsc_adjust(true);
	x86_platform.restore_sched_clock_state();
	mtrr_bp_restore();
	perf_restore_debug_store();