Several functions used in unaligned access probing are only run at
init time. Annotate them appropriately.
Fixes: f413aae96cda ("riscv: Set unaligned access speed at compile time")
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
---
arch/riscv/include/asm/cpufeature.h | 4 ++--
arch/riscv/kernel/traps_misaligned.c | 8 ++++----
arch/riscv/kernel/unaligned_access_speed.c | 14 +++++++-------
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 569140d6e639..19defdc2002d 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -63,7 +63,7 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-bool check_unaligned_access_emulated_all_cpus(void);
+bool __init check_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
@@ -76,7 +76,7 @@ static inline bool unaligned_ctl_available(void)
}
#endif
-bool check_vector_unaligned_access_emulated_all_cpus(void);
+bool __init check_vector_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
DECLARE_PER_CPU(long, vector_misaligned_access);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..aacbd9d7196e 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -605,7 +605,7 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un
kernel_vector_end();
}
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@@ -625,7 +625,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
return true;
}
#else
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
return false;
}
@@ -659,7 +659,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
}
}
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@@ -684,7 +684,7 @@ bool unaligned_ctl_available(void)
return unaligned_ctl;
}
#else
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 91f189cf1611..b7a8ff7ba6df 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -121,7 +121,7 @@ static int check_unaligned_access(void *param)
return 0;
}
-static void check_unaligned_access_nonboot_cpu(void *param)
+static void __init check_unaligned_access_nonboot_cpu(void *param)
{
unsigned int cpu = smp_processor_id();
struct page **pages = param;
@@ -175,7 +175,7 @@ static void set_unaligned_access_static_branches(void)
modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
}
-static int lock_and_set_unaligned_access_static_branch(void)
+static int __init lock_and_set_unaligned_access_static_branch(void)
{
cpus_read_lock();
set_unaligned_access_static_branches();
@@ -218,7 +218,7 @@ static int riscv_offline_cpu(unsigned int cpu)
}
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int check_unaligned_access_speed_all_cpus(void)
+static int __init check_unaligned_access_speed_all_cpus(void)
{
unsigned int cpu;
unsigned int cpu_count = num_possible_cpus();
@@ -264,7 +264,7 @@ static int check_unaligned_access_speed_all_cpus(void)
return 0;
}
#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
-static int check_unaligned_access_speed_all_cpus(void)
+static int __init check_unaligned_access_speed_all_cpus(void)
{
return 0;
}
@@ -379,7 +379,7 @@ static int riscv_online_cpu_vec(unsigned int cpu)
}
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
@@ -393,13 +393,13 @@ static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unuse
return 0;
}
#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
return 0;
}
#endif
-static int check_unaligned_access_all_cpus(void)
+static int __init check_unaligned_access_all_cpus(void)
{
bool all_cpus_emulated, all_cpus_vec_unsupported;
--
2.48.1
Hi Drew,
On 07/02/2025 17:19, Andrew Jones wrote:
> Several functions used in unaligned access probing are only run at
> init time. Annotate them appropriately.
>
> Fixes: f413aae96cda ("riscv: Set unaligned access speed at compile time")
> Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
> ---
> arch/riscv/include/asm/cpufeature.h | 4 ++--
> arch/riscv/kernel/traps_misaligned.c | 8 ++++----
> arch/riscv/kernel/unaligned_access_speed.c | 14 +++++++-------
> 3 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> index 569140d6e639..19defdc2002d 100644
> --- a/arch/riscv/include/asm/cpufeature.h
> +++ b/arch/riscv/include/asm/cpufeature.h
> @@ -63,7 +63,7 @@ void __init riscv_user_isa_enable(void);
> #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
> _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
>
> -bool check_unaligned_access_emulated_all_cpus(void);
> +bool __init check_unaligned_access_emulated_all_cpus(void);
> #if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
> void check_unaligned_access_emulated(struct work_struct *work __always_unused);
> void unaligned_emulation_finish(void);
> @@ -76,7 +76,7 @@ static inline bool unaligned_ctl_available(void)
> }
> #endif
>
> -bool check_vector_unaligned_access_emulated_all_cpus(void);
> +bool __init check_vector_unaligned_access_emulated_all_cpus(void);
> #if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
> void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
> DECLARE_PER_CPU(long, vector_misaligned_access);
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index 7cc108aed74e..aacbd9d7196e 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -605,7 +605,7 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un
> kernel_vector_end();
> }
>
> -bool check_vector_unaligned_access_emulated_all_cpus(void)
> +bool __init check_vector_unaligned_access_emulated_all_cpus(void)
> {
> int cpu;
>
> @@ -625,7 +625,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
> return true;
> }
> #else
> -bool check_vector_unaligned_access_emulated_all_cpus(void)
> +bool __init check_vector_unaligned_access_emulated_all_cpus(void)
> {
> return false;
> }
> @@ -659,7 +659,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
> }
> }
>
> -bool check_unaligned_access_emulated_all_cpus(void)
> +bool __init check_unaligned_access_emulated_all_cpus(void)
> {
> int cpu;
>
> @@ -684,7 +684,7 @@ bool unaligned_ctl_available(void)
> return unaligned_ctl;
> }
> #else
> -bool check_unaligned_access_emulated_all_cpus(void)
> +bool __init check_unaligned_access_emulated_all_cpus(void)
> {
> return false;
> }
> diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
> index 91f189cf1611..b7a8ff7ba6df 100644
> --- a/arch/riscv/kernel/unaligned_access_speed.c
> +++ b/arch/riscv/kernel/unaligned_access_speed.c
> @@ -121,7 +121,7 @@ static int check_unaligned_access(void *param)
> return 0;
> }
>
> -static void check_unaligned_access_nonboot_cpu(void *param)
> +static void __init check_unaligned_access_nonboot_cpu(void *param)
> {
> unsigned int cpu = smp_processor_id();
> struct page **pages = param;
> @@ -175,7 +175,7 @@ static void set_unaligned_access_static_branches(void)
> modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
> }
>
> -static int lock_and_set_unaligned_access_static_branch(void)
> +static int __init lock_and_set_unaligned_access_static_branch(void)
> {
> cpus_read_lock();
> set_unaligned_access_static_branches();
> @@ -218,7 +218,7 @@ static int riscv_offline_cpu(unsigned int cpu)
> }
>
> /* Measure unaligned access speed on all CPUs present at boot in parallel. */
> -static int check_unaligned_access_speed_all_cpus(void)
> +static int __init check_unaligned_access_speed_all_cpus(void)
> {
> unsigned int cpu;
> unsigned int cpu_count = num_possible_cpus();
> @@ -264,7 +264,7 @@ static int check_unaligned_access_speed_all_cpus(void)
> return 0;
> }
> #else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
> -static int check_unaligned_access_speed_all_cpus(void)
> +static int __init check_unaligned_access_speed_all_cpus(void)
> {
> return 0;
> }
> @@ -379,7 +379,7 @@ static int riscv_online_cpu_vec(unsigned int cpu)
> }
>
> /* Measure unaligned access speed on all CPUs present at boot in parallel. */
> -static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
> +static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
> {
> schedule_on_each_cpu(check_vector_unaligned_access);
>
> @@ -393,13 +393,13 @@ static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unuse
> return 0;
> }
> #else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
> -static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
> +static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
> {
> return 0;
> }
> #endif
>
> -static int check_unaligned_access_all_cpus(void)
> +static int __init check_unaligned_access_all_cpus(void)
> {
> bool all_cpus_emulated, all_cpus_vec_unsupported;
>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Thanks,
Alex
© 2016 - 2026 Red Hat, Inc.