Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe3cd484 authored by Feng Tang's avatar Feng Tang Committed by Greg Kroah-Hartman
Browse files

x86/tsc: Add a timer to make sure TSC_adjust is always checked

commit c7719e79347803b8e3b6b50da8c6db410a3012b5 upstream.

The TSC_ADJUST register is checked every time a CPU enters idle state, but
Thomas Gleixner mentioned there is still a caveat that a system won't enter
idle [1], either because it's too busy or configured purposely to not enter
idle.

Setup a periodic timer (every 10 minutes) to make sure the check is
happening on a regular base.

[1] https://lore.kernel.org/lkml/875z286xtk.fsf@nanos.tec.linutronix.de/



Fixes: 6e3cd95234dc ("x86/hpet: Use another crystalball to evaluate HPET usability")
Requested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarFeng Tang <feng.tang@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211117023751.24190-1-feng.tang@intel.com


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 957a203f
Loading
Loading
Loading
Loading
+41 −0
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@ struct tsc_adjust {
};

static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
static struct timer_list tsc_sync_check_timer;

/*
 * TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
	}
}

/*
 * Normally the tsc_sync will be checked every time system enters idle
 * state, but there is still caveat that a system won't enter idle,
 * either because it's too busy or configured purposely to not enter
 * idle.
 *
 * So setup a periodic timer (every 10 minutes) to make sure the check
 * is always on.
 */

#define SYNC_CHECK_INTERVAL		(HZ * 600)

static void tsc_sync_check_timer_fn(struct timer_list *unused)
{
	int next_cpu;

	tsc_verify_tsc_adjust(false);

	/* Run the check for all onlined CPUs in turn */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);

	tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
	add_timer_on(&tsc_sync_check_timer, next_cpu);
}

static int __init start_sync_check_timer(void)
{
	if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
		return 0;

	timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
	tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
	add_timer(&tsc_sync_check_timer);

	return 0;
}
late_initcall(start_sync_check_timer);

static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
				   unsigned int cpu, bool bootcpu)
{