summary refs log tree commit diff
path: root/arch/x86/kernel/tsc_sync.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-11-19 13:47:37 +0000
committerThomas Gleixner <tglx@linutronix.de>2016-11-29 19:23:16 +0100
commit1d0095feea591bbd94f35d8a98aed746319783e1 (patch)
tree6ebcaa4abfcc9a31ae7c968059ce0ed3f60d2ae7 /arch/x86/kernel/tsc_sync.c
parent8b223bc7abe0e30e8d297a24ee6c6c07ef8d0bb9 (diff)
downloadlinux-1d0095feea591bbd94f35d8a98aed746319783e1.tar.gz
x86/tsc: Verify TSC_ADJUST from idle
When entering idle, it's a good oportunity to verify that the TSC_ADJUST
MSR has not been tampered with (BIOS hiding SMM cycles). If tampering is
detected, emit a warning and restore it to the previous value.

This is especially important for machines, which mark the TSC reliable
because there is no watchdog clocksource available (SoCs).

This is not sufficient for HPC (NOHZ_FULL) situations where a CPU never
goes idle, but adding a timer to do the check periodically is not an option
either. On a machine, which has this issue, the check triggeres right
during boot, so there is a decent chance that the sysadmin will notice.

Rate limit the check to once per second and warn only once per cpu.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/20161119134017.732180441@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
-rw-r--r--arch/x86/kernel/tsc_sync.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index bd2bd5e89d96..f713e84d1cb4 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -22,12 +22,42 @@
 #include <asm/tsc.h>
 
 struct tsc_adjust {
-	s64	bootval;
-	s64	adjusted;
+	s64		bootval;
+	s64		adjusted;
+	unsigned long	nextcheck;
+	bool		warned;
 };
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
 
+void tsc_verify_tsc_adjust(void)
+{
+	struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
+	s64 curval;
+
+	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+		return;
+
+	/* Rate limit the MSR check */
+	if (time_before(jiffies, adj->nextcheck))
+		return;
+
+	adj->nextcheck = jiffies + HZ;
+
+	rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+	if (adj->adjusted == curval)
+		return;
+
+	/* Restore the original value */
+	wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
+
+	if (!adj->warned) {
+		pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
+			smp_processor_id(), adj->adjusted, curval);
+		adj->warned = true;
+	}
+}
+
 #ifndef CONFIG_SMP
 void __init tsc_store_and_check_tsc_adjust(void)
 {
@@ -40,6 +70,7 @@ void __init tsc_store_and_check_tsc_adjust(void)
 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
 	cur->bootval = bootval;
 	cur->adjusted = bootval;
+	cur->nextcheck = jiffies + HZ;
 	pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval);
 }
 
@@ -59,6 +90,8 @@ void tsc_store_and_check_tsc_adjust(void)
 
 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
 	cur->bootval = bootval;
+	cur->nextcheck = jiffies + HZ;
+	cur->warned = false;
 
 	/*
 	 * Check whether this CPU is the first in a package to come up. In