__use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark
it as __ro_after_init.
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
arch/x86/kernel/tsc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 15f97c0abc9d0..f19b42ea40573 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz);
static int __read_mostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
-static DEFINE_STATIC_KEY_FALSE(__use_tsc);
+static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
int tsc_clocksource_reliable;
--
2.41.0
On Mon, Nov 20, 2023 at 11:55:28AM +0100, Valentin Schneider wrote:
> __use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark
> it as __ro_after_init.
>
> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
> ---
> arch/x86/kernel/tsc.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
> index 15f97c0abc9d0..f19b42ea40573 100644
> --- a/arch/x86/kernel/tsc.c
> +++ b/arch/x86/kernel/tsc.c
> @@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz);
> static int __read_mostly tsc_unstable;
> static unsigned int __initdata tsc_early_khz;
>
> -static DEFINE_STATIC_KEY_FALSE(__use_tsc);
> +static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
So sure, we can absolutely do that, but do we want to take this one
further perhaps? "notsc" on x86_64 makes no sense what so ever. Lets
drag things into this millennium.
---
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 15f97c0abc9d..4cfcf5946162 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -44,7 +44,9 @@ EXPORT_SYMBOL(tsc_khz);
static int __read_mostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
+#ifndef CONFIG_X86_64
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
+#endif
int tsc_clocksource_reliable;
@@ -230,24 +232,26 @@ static void __init cyc2ns_init_secondary_cpus(void)
*/
noinstr u64 native_sched_clock(void)
{
- if (static_branch_likely(&__use_tsc)) {
- u64 tsc_now = rdtsc();
+#ifndef CONFIG_X86_64
+ if (!static_branch_unlikely(&__use_tsc)) {
+ /*
+ * Fall back to jiffies if there's no TSC available:
+ * ( But note that we still use it if the TSC is marked
+ * unstable. We do this because unlike Time Of Day,
+ * the scheduler clock tolerates small errors and it's
+ * very important for it to be as fast as the platform
+ * can achieve it. )
+ */
- /* return the value in ns */
- return __cycles_2_ns(tsc_now);
+ /* No locking but a rare wrong value is not a big deal: */
+ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
+#endif
- /*
- * Fall back to jiffies if there's no TSC available:
- * ( But note that we still use it if the TSC is marked
- * unstable. We do this because unlike Time Of Day,
- * the scheduler clock tolerates small errors and it's
- * very important for it to be as fast as the platform
- * can achieve it. )
- */
+ u64 tsc_now = rdtsc();
- /* No locking but a rare wrong value is not a big deal: */
- return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
+ /* return the value in ns */
+ return __cycles_2_ns(tsc_now);
}
/*
@@ -291,7 +295,8 @@ int check_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
-#ifdef CONFIG_X86_TSC
+#ifndef CONFIG_X86_64
+#if defined(CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
mark_tsc_unstable("boot parameter notsc");
@@ -310,6 +315,7 @@ int __init notsc_setup(char *str)
#endif
__setup("notsc", notsc_setup);
+#endif
static int no_sched_irq_time;
static int no_tsc_watchdog;
@@ -1556,7 +1562,9 @@ static void __init tsc_enable_sched_clock(void)
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
cyc2ns_init_boot_cpu();
+#ifndef CONFIG_X86_64
static_branch_enable(&__use_tsc);
+#endif
}
void __init tsc_early_init(void)
On 20/11/23 13:05, Peter Zijlstra wrote: > On Mon, Nov 20, 2023 at 11:55:28AM +0100, Valentin Schneider wrote: >> __use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark >> it as __ro_after_init. >> >> Signed-off-by: Valentin Schneider <vschneid@redhat.com> >> --- >> arch/x86/kernel/tsc.c | 2 +- >> 1 file changed, 1 insertion(+), 1 deletion(-) >> >> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c >> index 15f97c0abc9d0..f19b42ea40573 100644 >> --- a/arch/x86/kernel/tsc.c >> +++ b/arch/x86/kernel/tsc.c >> @@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz); >> static int __read_mostly tsc_unstable; >> static unsigned int __initdata tsc_early_khz; >> >> -static DEFINE_STATIC_KEY_FALSE(__use_tsc); >> +static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc); > > So sure, we can absolutely do that, but do we want to take this one > further perhaps? "notsc" on x86_64 makes no sense what so ever. Lets > drag things into this millennium. > Just to make sure I follow: currently, for the static key to be enabled, we (mostly) need: o X86_FEATURE_TSC is in CPUID o determine_cpu_tsc_frequencies()->pit_hpet_ptimer_calibrate_cpu() passes IIUC all X86_64 systems have a TSC, so the CPUID feature should be a given. AFAICT pit_hpt_ptimer_calibrate_cpu() relies on having either HPET or the ACPI PM timer, the latter should be widely available, though X86_PM_TIMER can be disabled via EXPERT - is that a fringe case we don't care about, or did I miss something? I don't really know this stuff, and I'm trying to write a changelog...
On Mon, Dec 04, 2023 at 05:51:49PM +0100, Valentin Schneider wrote: > On 20/11/23 13:05, Peter Zijlstra wrote: > > On Mon, Nov 20, 2023 at 11:55:28AM +0100, Valentin Schneider wrote: > >> __use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark > >> it as __ro_after_init. > >> > >> Signed-off-by: Valentin Schneider <vschneid@redhat.com> > >> --- > >> arch/x86/kernel/tsc.c | 2 +- > >> 1 file changed, 1 insertion(+), 1 deletion(-) > >> > >> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > >> index 15f97c0abc9d0..f19b42ea40573 100644 > >> --- a/arch/x86/kernel/tsc.c > >> +++ b/arch/x86/kernel/tsc.c > >> @@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz); > >> static int __read_mostly tsc_unstable; > >> static unsigned int __initdata tsc_early_khz; > >> > >> -static DEFINE_STATIC_KEY_FALSE(__use_tsc); > >> +static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc); > > > > So sure, we can absolutely do that, but do we want to take this one > > further perhaps? "notsc" on x86_64 makes no sense what so ever. Lets > > drag things into this millennium. > > > > Just to make sure I follow: currently, for the static key to be enabled, we > (mostly) need: > o X86_FEATURE_TSC is in CPUID > o determine_cpu_tsc_frequencies()->pit_hpet_ptimer_calibrate_cpu() passes > > IIUC all X86_64 systems have a TSC, so the CPUID feature should be a given. > > AFAICT pit_hpt_ptimer_calibrate_cpu() relies on having either HPET or the > ACPI PM timer, the latter should be widely available, though X86_PM_TIMER > can be disabled via EXPERT - is that a fringe case we don't care about, or > did I miss something? I don't really know this stuff, and I'm trying to > write a changelog... Ah, I was mostly just going by the fact that all of x86_64 have TSC and disabling it makes no sense. TSC calibration is always 'fun', but I don't know of a system where its failure causes us to not use TSC, Thomas?
On 04/12/23 19:20, Peter Zijlstra wrote:
> On Mon, Dec 04, 2023 at 05:51:49PM +0100, Valentin Schneider wrote:
>> On 20/11/23 13:05, Peter Zijlstra wrote:
>> > On Mon, Nov 20, 2023 at 11:55:28AM +0100, Valentin Schneider wrote:
>> >> __use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark
>> >> it as __ro_after_init.
>> >>
>> >> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
>> >> ---
>> >> arch/x86/kernel/tsc.c | 2 +-
>> >> 1 file changed, 1 insertion(+), 1 deletion(-)
>> >>
>> >> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
>> >> index 15f97c0abc9d0..f19b42ea40573 100644
>> >> --- a/arch/x86/kernel/tsc.c
>> >> +++ b/arch/x86/kernel/tsc.c
>> >> @@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz);
>> >> static int __read_mostly tsc_unstable;
>> >> static unsigned int __initdata tsc_early_khz;
>> >>
>> >> -static DEFINE_STATIC_KEY_FALSE(__use_tsc);
>> >> +static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
>> >
>> > So sure, we can absolutely do that, but do we want to take this one
>> > further perhaps? "notsc" on x86_64 makes no sense what so ever. Lets
>> > drag things into this millennium.
>> >
>>
>> Just to make sure I follow: currently, for the static key to be enabled, we
>> (mostly) need:
>> o X86_FEATURE_TSC is in CPUID
>> o determine_cpu_tsc_frequencies()->pit_hpet_ptimer_calibrate_cpu() passes
>>
>> IIUC all X86_64 systems have a TSC, so the CPUID feature should be a given.
>>
>> AFAICT pit_hpt_ptimer_calibrate_cpu() relies on having either HPET or the
>> ACPI PM timer, the latter should be widely available, though X86_PM_TIMER
>> can be disabled via EXPERT - is that a fringe case we don't care about, or
>> did I miss something? I don't really know this stuff, and I'm trying to
>> write a changelog...
>
> Ah, I was mostly just going by the fact that all of x86_64 have TSC and
> disabling it makes no sense.
>
> TSC calibration is always 'fun', but I don't know of a system where its
> failure causes us to not use TSC, Thomas?
Having another look at this, it looks like the actual requirements for the
TSC being used are either of:
o CPUID accepting 0x16 as eax input (cf. cpu_khz_from_cpuid())
o MSR_FSB_FREQ being available (cf. cpu_khz_from_msr())
o pit_hpet_ptimer_calibrate_cpu() doesn't mess up
I couldn't find any guarantees for x86_64 on having the processor frequency
information CPUID leaf, nor for the FSB_FREQ MSR (both tsc_msr_cpu_ids and
the SDM seem to point at only a handful of models).
Also for x86_64 there is this "apicpmtimer" cmdline arg which currently
disables the TSC. The commit that introduced it [1] suggests there are x86_64
systems out there with calibration issues, so now I'm not sure whether we
can kill the static key for x86_64 :(
[1]: 0c3749c41f5e ("[PATCH] x86_64: Calibrate APIC timer using PM timer")
followed by: 7fd67843b96f ("[PATCH] x86_64: Disable tsc when apicpmtimer is active")
© 2016 - 2025 Red Hat, Inc.