Split the code that check for the uniformity of misaligned accesses
performance on all cpus from check_unaligned_access_emulated_all_cpus()
to its own function which will be used for delegation check. No
functional changes intended.
Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
---
arch/riscv/kernel/traps_misaligned.c | 20 ++++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index e551ba17f557..287ec37021c8 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -647,6 +647,18 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
}
#endif
+static bool all_cpus_unaligned_scalar_access_emulated(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ if (per_cpu(misaligned_access_speed, cpu) !=
+ RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
+ return false;
+
+ return true;
+}
+
#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
static bool unaligned_ctl __read_mostly;
@@ -685,8 +697,6 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
bool __init check_unaligned_access_emulated_all_cpus(void)
{
- int cpu;
-
/*
* We can only support PR_UNALIGN controls if all CPUs have misaligned
* accesses emulated since tasks requesting such control can run on any
@@ -694,10 +704,8 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
*/
on_each_cpu(check_unaligned_access_emulated, NULL, 1);
- for_each_online_cpu(cpu)
- if (per_cpu(misaligned_access_speed, cpu)
- != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
- return false;
+ if (!all_cpus_unaligned_scalar_access_emulated())
+ return false;
unaligned_ctl = true;
return true;
--
2.49.0
On Thu, May 15, 2025 at 10:22:10AM +0200, Clément Léger wrote:
> Split the code that check for the uniformity of misaligned accesses
> performance on all cpus from check_unaligned_access_emulated_all_cpus()
> to its own function which will be used for delegation check. No
> functional changes intended.
>
> Signed-off-by: Clément Léger <cleger@rivosinc.com>
> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
> ---
> arch/riscv/kernel/traps_misaligned.c | 20 ++++++++++++++------
> 1 file changed, 14 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index e551ba17f557..287ec37021c8 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -647,6 +647,18 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
> }
> #endif
>
> +static bool all_cpus_unaligned_scalar_access_emulated(void)
> +{
> + int cpu;
> +
> + for_each_online_cpu(cpu)
> + if (per_cpu(misaligned_access_speed, cpu) !=
misaligned_access_speed is only defined when
CONFIG_RISCV_SCALAR_MISALIGNED. This function should return false when
!CONFIG_RISCV_SCALAR_MISALIGNED and only use this logic otherwise.
- Charlie
> + RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
> + return false;
> +
> + return true;
> +}
> +
> #ifdef CONFIG_RISCV_SCALAR_MISALIGNED
>
> static bool unaligned_ctl __read_mostly;
> @@ -685,8 +697,6 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
>
> bool __init check_unaligned_access_emulated_all_cpus(void)
> {
> - int cpu;
> -
> /*
> * We can only support PR_UNALIGN controls if all CPUs have misaligned
> * accesses emulated since tasks requesting such control can run on any
> @@ -694,10 +704,8 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
> */
> on_each_cpu(check_unaligned_access_emulated, NULL, 1);
>
> - for_each_online_cpu(cpu)
> - if (per_cpu(misaligned_access_speed, cpu)
> - != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
> - return false;
> + if (!all_cpus_unaligned_scalar_access_emulated())
> + return false;
>
> unaligned_ctl = true;
> return true;
> --
> 2.49.0
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
On 20/05/2025 01:32, Charlie Jenkins wrote:
> On Thu, May 15, 2025 at 10:22:10AM +0200, Clément Léger wrote:
>> Split the code that check for the uniformity of misaligned accesses
>> performance on all cpus from check_unaligned_access_emulated_all_cpus()
>> to its own function which will be used for delegation check. No
>> functional changes intended.
>>
>> Signed-off-by: Clément Léger <cleger@rivosinc.com>
>> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
>> ---
>> arch/riscv/kernel/traps_misaligned.c | 20 ++++++++++++++------
>> 1 file changed, 14 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
>> index e551ba17f557..287ec37021c8 100644
>> --- a/arch/riscv/kernel/traps_misaligned.c
>> +++ b/arch/riscv/kernel/traps_misaligned.c
>> @@ -647,6 +647,18 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
>> }
>> #endif
>>
>> +static bool all_cpus_unaligned_scalar_access_emulated(void)
>> +{
>> + int cpu;
>> +
>> + for_each_online_cpu(cpu)
>> + if (per_cpu(misaligned_access_speed, cpu) !=
>
> misaligned_access_speed is only defined when
> CONFIG_RISCV_SCALAR_MISALIGNED. This function should return false when
> !CONFIG_RISCV_SCALAR_MISALIGNED and only use this logic otherwise.
Hi Charlie,
misaligned_access_speed is defined in unaligned_access_speed.c which is
compiled based on CONFIG_RISCV_MISALIGNED (ditto for trap_misaligned.c)
obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
However, the declaration for it in the header cpu-feature.h however is
under a CONFIG_RISCV_SCALAR_MISALIGNED ifdef. So either the declaration
or the definition is wrong but the ifdefery soup makes it quite
difficult to understand what's going on.
I would suggest to move the DECLARE_PER_CPU under
CONFIG_RISCV_MISALIGNED so that it reduces ifdef in traps_misaligned as
well.
Thanks,
Clément
>
> - Charlie
>
>> + RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
>> + return false;
>> +
>> + return true;
>> +}
>> +
>> #ifdef CONFIG_RISCV_SCALAR_MISALIGNED
>>
>> static bool unaligned_ctl __read_mostly;
>> @@ -685,8 +697,6 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
>>
>> bool __init check_unaligned_access_emulated_all_cpus(void)
>> {
>> - int cpu;
>> -
>> /*
>> * We can only support PR_UNALIGN controls if all CPUs have misaligned
>> * accesses emulated since tasks requesting such control can run on any
>> @@ -694,10 +704,8 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
>> */
>> on_each_cpu(check_unaligned_access_emulated, NULL, 1);
>>
>> - for_each_online_cpu(cpu)
>> - if (per_cpu(misaligned_access_speed, cpu)
>> - != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
>> - return false;
>> + if (!all_cpus_unaligned_scalar_access_emulated())
>> + return false;
>>
>> unaligned_ctl = true;
>> return true;
>> --
>> 2.49.0
>>
>>
>> _______________________________________________
>> linux-riscv mailing list
>> linux-riscv@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-riscv
On Tue, May 20, 2025 at 10:19:47AM +0200, Clément Léger wrote:
>
>
> On 20/05/2025 01:32, Charlie Jenkins wrote:
> > On Thu, May 15, 2025 at 10:22:10AM +0200, Clément Léger wrote:
> >> Split the code that check for the uniformity of misaligned accesses
> >> performance on all cpus from check_unaligned_access_emulated_all_cpus()
> >> to its own function which will be used for delegation check. No
> >> functional changes intended.
> >>
> >> Signed-off-by: Clément Léger <cleger@rivosinc.com>
> >> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
> >> ---
> >> arch/riscv/kernel/traps_misaligned.c | 20 ++++++++++++++------
> >> 1 file changed, 14 insertions(+), 6 deletions(-)
> >>
> >> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> >> index e551ba17f557..287ec37021c8 100644
> >> --- a/arch/riscv/kernel/traps_misaligned.c
> >> +++ b/arch/riscv/kernel/traps_misaligned.c
> >> @@ -647,6 +647,18 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
> >> }
> >> #endif
> >>
> >> +static bool all_cpus_unaligned_scalar_access_emulated(void)
> >> +{
> >> + int cpu;
> >> +
> >> + for_each_online_cpu(cpu)
> >> + if (per_cpu(misaligned_access_speed, cpu) !=
> >
> > misaligned_access_speed is only defined when
> > CONFIG_RISCV_SCALAR_MISALIGNED. This function should return false when
> > !CONFIG_RISCV_SCALAR_MISALIGNED and only use this logic otherwise.
>
> Hi Charlie,
>
> misaligned_access_speed is defined in unaligned_access_speed.c which is
> compiled based on CONFIG_RISCV_MISALIGNED (ditto for trap_misaligned.c)
>
> obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
>
> However, the declaration for it in the header cpu-feature.h however is
> under a CONFIG_RISCV_SCALAR_MISALIGNED ifdef. So either the declaration
> or the definition is wrong but the ifdefery soup makes it quite
> difficult to understand what's going on.
>
> I would suggest to move the DECLARE_PER_CPU under
> CONFIG_RISCV_MISALIGNED so that it reduces ifdef in traps_misaligned as
> well.
Here is the patch I am using locally for testing purposes, but if there
is a way to reduce the number of ifdefs that is probably the better way to go:
From 18f9a056d3b597934c931abdf72fb6e775ccb714 Mon Sep 17 00:00:00 2001
From: Charlie Jenkins <charlie@rivosinc.com>
Date: Mon, 19 May 2025 16:35:51 -0700
Subject: [PATCH] fixup! riscv: misaligned: move emulated access uniformity
check in a function
---
arch/riscv/kernel/traps_misaligned.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index f3ab84bc4632..1449c6a4ac21 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -647,6 +647,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
}
#endif
+#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
+
+static bool unaligned_ctl __read_mostly;
+
static bool all_cpus_unaligned_scalar_access_emulated(void)
{
int cpu;
@@ -659,10 +663,6 @@ static bool all_cpus_unaligned_scalar_access_emulated(void)
return true;
}
-#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
-
-static bool unaligned_ctl __read_mostly;
-
static void check_unaligned_access_emulated(void *arg __always_unused)
{
int cpu = smp_processor_id();
@@ -716,6 +716,10 @@ bool unaligned_ctl_available(void)
return unaligned_ctl;
}
#else
+static bool all_cpus_unaligned_scalar_access_emulated(void)
+{
+ return false;
+}
bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
--
2.43.0
- Charlie
>
> Thanks,
>
> Clément
>
> >
> > - Charlie
> >
> >> + RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
> >> + return false;
> >> +
> >> + return true;
> >> +}
> >> +
> >> #ifdef CONFIG_RISCV_SCALAR_MISALIGNED
> >>
> >> static bool unaligned_ctl __read_mostly;
> >> @@ -685,8 +697,6 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
> >>
> >> bool __init check_unaligned_access_emulated_all_cpus(void)
> >> {
> >> - int cpu;
> >> -
> >> /*
> >> * We can only support PR_UNALIGN controls if all CPUs have misaligned
> >> * accesses emulated since tasks requesting such control can run on any
> >> @@ -694,10 +704,8 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
> >> */
> >> on_each_cpu(check_unaligned_access_emulated, NULL, 1);
> >>
> >> - for_each_online_cpu(cpu)
> >> - if (per_cpu(misaligned_access_speed, cpu)
> >> - != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
> >> - return false;
> >> + if (!all_cpus_unaligned_scalar_access_emulated())
> >> + return false;
> >>
> >> unaligned_ctl = true;
> >> return true;
> >> --
> >> 2.49.0
> >>
> >>
> >> _______________________________________________
> >> linux-riscv mailing list
> >> linux-riscv@lists.infradead.org
> >> http://lists.infradead.org/mailman/listinfo/linux-riscv
>
On 20/05/2025 19:08, Charlie Jenkins wrote:
> On Tue, May 20, 2025 at 10:19:47AM +0200, Clément Léger wrote:
>>
>>
>> On 20/05/2025 01:32, Charlie Jenkins wrote:
>>> On Thu, May 15, 2025 at 10:22:10AM +0200, Clément Léger wrote:
>>>> Split the code that check for the uniformity of misaligned accesses
>>>> performance on all cpus from check_unaligned_access_emulated_all_cpus()
>>>> to its own function which will be used for delegation check. No
>>>> functional changes intended.
>>>>
>>>> Signed-off-by: Clément Léger <cleger@rivosinc.com>
>>>> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
>>>> ---
>>>> arch/riscv/kernel/traps_misaligned.c | 20 ++++++++++++++------
>>>> 1 file changed, 14 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
>>>> index e551ba17f557..287ec37021c8 100644
>>>> --- a/arch/riscv/kernel/traps_misaligned.c
>>>> +++ b/arch/riscv/kernel/traps_misaligned.c
>>>> @@ -647,6 +647,18 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
>>>> }
>>>> #endif
>>>>
>>>> +static bool all_cpus_unaligned_scalar_access_emulated(void)
>>>> +{
>>>> + int cpu;
>>>> +
>>>> + for_each_online_cpu(cpu)
>>>> + if (per_cpu(misaligned_access_speed, cpu) !=
>>>
>>> misaligned_access_speed is only defined when
>>> CONFIG_RISCV_SCALAR_MISALIGNED. This function should return false when
>>> !CONFIG_RISCV_SCALAR_MISALIGNED and only use this logic otherwise.
>>
>> Hi Charlie,
>>
>> misaligned_access_speed is defined in unaligned_access_speed.c which is
>> compiled based on CONFIG_RISCV_MISALIGNED (ditto for trap_misaligned.c)
>>
>> obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
>>
>> However, the declaration for it in the header cpu-feature.h however is
>> under a CONFIG_RISCV_SCALAR_MISALIGNED ifdef. So either the declaration
>> or the definition is wrong but the ifdefery soup makes it quite
>> difficult to understand what's going on.
>>
>> I would suggest to move the DECLARE_PER_CPU under
>> CONFIG_RISCV_MISALIGNED so that it reduces ifdef in traps_misaligned as
>> well.
>
> Here is the patch I am using locally for testing purposes, but if there
> is a way to reduce the number of ifdefs that is probably the better way to go:
>
Hi Charlie,
I have another way to do so which indeed reduces the number of
ifdef/duplicated functions. I'll submit that.
Thanks,
Clément
> From 18f9a056d3b597934c931abdf72fb6e775ccb714 Mon Sep 17 00:00:00 2001
> From: Charlie Jenkins <charlie@rivosinc.com>
> Date: Mon, 19 May 2025 16:35:51 -0700
> Subject: [PATCH] fixup! riscv: misaligned: move emulated access uniformity
> check in a function
>
> ---
> arch/riscv/kernel/traps_misaligned.c | 12 ++++++++----
> 1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index f3ab84bc4632..1449c6a4ac21 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -647,6 +647,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
> }
> #endif
>
> +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
> +
> +static bool unaligned_ctl __read_mostly;
> +
> static bool all_cpus_unaligned_scalar_access_emulated(void)
> {
> int cpu;
> @@ -659,10 +663,6 @@ static bool all_cpus_unaligned_scalar_access_emulated(void)
> return true;
> }
>
> -#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
> -
> -static bool unaligned_ctl __read_mostly;
> -
> static void check_unaligned_access_emulated(void *arg __always_unused)
> {
> int cpu = smp_processor_id();
> @@ -716,6 +716,10 @@ bool unaligned_ctl_available(void)
> return unaligned_ctl;
> }
> #else
> +static bool all_cpus_unaligned_scalar_access_emulated(void)
> +{
> + return false;
> +}
> bool __init check_unaligned_access_emulated_all_cpus(void)
> {
> return false;
© 2016 - 2026 Red Hat, Inc.