Support static branches depending on the value of misaligned accesses.
This will be used by a later patch in the series. All cpus must be
considered "fast" for this static branch to be flipped.
Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
---
arch/riscv/include/asm/cpufeature.h | 2 ++
arch/riscv/kernel/cpufeature.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 32 insertions(+)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index a418c3112cd6..7b129e5e2f07 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -133,4 +133,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
+DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
#endif
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b3785ffc1570..095eb6ebdcaa 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -10,6 +10,7 @@
#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
+#include <linux/jump_label.h>
#include <linux/log2.h>
#include <linux/memory.h>
#include <linux/module.h>
@@ -728,6 +729,35 @@ void riscv_user_isa_enable(void)
csr_set(CSR_SENVCFG, ENVCFG_CBZE);
}
+DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
+static int set_unaligned_access_static_branches(void)
+{
+ /*
+ * This will be called after check_unaligned_access_all_cpus so the
+ * result of unaligned access speed for all cpus will be available.
+ */
+
+ int cpu;
+ bool fast_misaligned_access_speed = true;
+
+ for_each_online_cpu(cpu) {
+ int this_perf = per_cpu(misaligned_access_speed, cpu);
+
+ if (this_perf != RISCV_HWPROBE_MISALIGNED_FAST) {
+ fast_misaligned_access_speed = false;
+ break;
+ }
+ }
+
+ if (fast_misaligned_access_speed)
+ static_branch_enable(&fast_misaligned_access_speed_key);
+
+ return 0;
+}
+
+arch_initcall_sync(set_unaligned_access_static_branches);
+
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch
--
2.43.0
On Thu, Dec 21, 2023 at 7:38 AM Charlie Jenkins <charlie@rivosinc.com> wrote:
>
> Support static branches depending on the value of misaligned accesses.
> This will be used by a later patch in the series. All cpus must be
> considered "fast" for this static branch to be flipped.
>
> Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> ---
> arch/riscv/include/asm/cpufeature.h | 2 ++
> arch/riscv/kernel/cpufeature.c | 30 ++++++++++++++++++++++++++++++
> 2 files changed, 32 insertions(+)
>
> diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> index a418c3112cd6..7b129e5e2f07 100644
> --- a/arch/riscv/include/asm/cpufeature.h
> +++ b/arch/riscv/include/asm/cpufeature.h
> @@ -133,4 +133,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
> return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
> }
>
> +DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> +
> #endif
> diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
> index b3785ffc1570..095eb6ebdcaa 100644
> --- a/arch/riscv/kernel/cpufeature.c
> +++ b/arch/riscv/kernel/cpufeature.c
> @@ -10,6 +10,7 @@
> #include <linux/bitmap.h>
> #include <linux/cpuhotplug.h>
> #include <linux/ctype.h>
> +#include <linux/jump_label.h>
> #include <linux/log2.h>
> #include <linux/memory.h>
> #include <linux/module.h>
> @@ -728,6 +729,35 @@ void riscv_user_isa_enable(void)
> csr_set(CSR_SENVCFG, ENVCFG_CBZE);
> }
>
> +DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> +
> +static int set_unaligned_access_static_branches(void)
> +{
> + /*
> + * This will be called after check_unaligned_access_all_cpus so the
> + * result of unaligned access speed for all cpus will be available.
> + */
> +
> + int cpu;
> + bool fast_misaligned_access_speed = true;
> +
> + for_each_online_cpu(cpu) {
Each online_cpu? Is there any offline_cpu that is no
fast_misaligned_access_speed?
Move into your riscv_online_cpu for each CPU, and use stop_machine for
synchronization.
> + int this_perf = per_cpu(misaligned_access_speed, cpu);
> +
> + if (this_perf != RISCV_HWPROBE_MISALIGNED_FAST) {
> + fast_misaligned_access_speed = false;
> + break;
> + }
> + }
> +
> + if (fast_misaligned_access_speed)
> + static_branch_enable(&fast_misaligned_access_speed_key);
> +
> + return 0;
> +}
> +
> +arch_initcall_sync(set_unaligned_access_static_branches);
> +
> #ifdef CONFIG_RISCV_ALTERNATIVE
> /*
> * Alternative patch sites consider 48 bits when determining when to patch
>
> --
> 2.43.0
>
>
--
Best Regards
Guo Ren
On Fri, Dec 22, 2023 at 08:33:18AM +0800, Guo Ren wrote:
> On Thu, Dec 21, 2023 at 7:38 AM Charlie Jenkins <charlie@rivosinc.com> wrote:
> >
> > Support static branches depending on the value of misaligned accesses.
> > This will be used by a later patch in the series. All cpus must be
> > considered "fast" for this static branch to be flipped.
> >
> > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> > ---
> > arch/riscv/include/asm/cpufeature.h | 2 ++
> > arch/riscv/kernel/cpufeature.c | 30 ++++++++++++++++++++++++++++++
> > 2 files changed, 32 insertions(+)
> >
> > diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> > index a418c3112cd6..7b129e5e2f07 100644
> > --- a/arch/riscv/include/asm/cpufeature.h
> > +++ b/arch/riscv/include/asm/cpufeature.h
> > @@ -133,4 +133,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
> > return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
> > }
> >
> > +DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> > +
> > #endif
> > diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
> > index b3785ffc1570..095eb6ebdcaa 100644
> > --- a/arch/riscv/kernel/cpufeature.c
> > +++ b/arch/riscv/kernel/cpufeature.c
> > @@ -10,6 +10,7 @@
> > #include <linux/bitmap.h>
> > #include <linux/cpuhotplug.h>
> > #include <linux/ctype.h>
> > +#include <linux/jump_label.h>
> > #include <linux/log2.h>
> > #include <linux/memory.h>
> > #include <linux/module.h>
> > @@ -728,6 +729,35 @@ void riscv_user_isa_enable(void)
> > csr_set(CSR_SENVCFG, ENVCFG_CBZE);
> > }
> >
> > +DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> > +
> > +static int set_unaligned_access_static_branches(void)
> > +{
> > + /*
> > + * This will be called after check_unaligned_access_all_cpus so the
> > + * result of unaligned access speed for all cpus will be available.
> > + */
> > +
> > + int cpu;
> > + bool fast_misaligned_access_speed = true;
> > +
> > + for_each_online_cpu(cpu) {
> Each online_cpu? Is there any offline_cpu that is no
> fast_misaligned_access_speed?
I think instead of checking offline cpus, it would make more sense to
adjust the static branch when offline cpus come online. Since
riscv_online_cpu is called when a new CPU comes online, I can update the
static branch inside of that function.
>
> Move into your riscv_online_cpu for each CPU, and use stop_machine for
> synchronization.
>
I do not understand what you mean by "Move into your riscv_online_cpu
for each CPU", but I am assuming you are referring to updating the
static branch inside of riscv_online_cpu.
I believe any race condition that could be solved by stop_machine will
become irrelevent by ensuring that the static branch is updated when a
new cpu comes online.
- Charlie
> > + int this_perf = per_cpu(misaligned_access_speed, cpu);
> > +
> > + if (this_perf != RISCV_HWPROBE_MISALIGNED_FAST) {
> > + fast_misaligned_access_speed = false;
> > + break;
> > + }
> > + }
> > +
> > + if (fast_misaligned_access_speed)
> > + static_branch_enable(&fast_misaligned_access_speed_key);
> > +
> > + return 0;
> > +}
> > +
> > +arch_initcall_sync(set_unaligned_access_static_branches);
> > +
> > #ifdef CONFIG_RISCV_ALTERNATIVE
> > /*
> > * Alternative patch sites consider 48 bits when determining when to patch
> >
> > --
> > 2.43.0
> >
> >
>
>
> --
> Best Regards
> Guo Ren
On Fri, Dec 22, 2023 at 9:37 AM Charlie Jenkins <charlie@rivosinc.com> wrote:
>
> On Fri, Dec 22, 2023 at 08:33:18AM +0800, Guo Ren wrote:
> > On Thu, Dec 21, 2023 at 7:38 AM Charlie Jenkins <charlie@rivosinc.com> wrote:
> > >
> > > Support static branches depending on the value of misaligned accesses.
> > > This will be used by a later patch in the series. All cpus must be
> > > considered "fast" for this static branch to be flipped.
> > >
> > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> > > ---
> > > arch/riscv/include/asm/cpufeature.h | 2 ++
> > > arch/riscv/kernel/cpufeature.c | 30 ++++++++++++++++++++++++++++++
> > > 2 files changed, 32 insertions(+)
> > >
> > > diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> > > index a418c3112cd6..7b129e5e2f07 100644
> > > --- a/arch/riscv/include/asm/cpufeature.h
> > > +++ b/arch/riscv/include/asm/cpufeature.h
> > > @@ -133,4 +133,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
> > > return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
> > > }
> > >
> > > +DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> > > +
> > > #endif
> > > diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
> > > index b3785ffc1570..095eb6ebdcaa 100644
> > > --- a/arch/riscv/kernel/cpufeature.c
> > > +++ b/arch/riscv/kernel/cpufeature.c
> > > @@ -10,6 +10,7 @@
> > > #include <linux/bitmap.h>
> > > #include <linux/cpuhotplug.h>
> > > #include <linux/ctype.h>
> > > +#include <linux/jump_label.h>
> > > #include <linux/log2.h>
> > > #include <linux/memory.h>
> > > #include <linux/module.h>
> > > @@ -728,6 +729,35 @@ void riscv_user_isa_enable(void)
> > > csr_set(CSR_SENVCFG, ENVCFG_CBZE);
> > > }
> > >
> > > +DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
> > > +
> > > +static int set_unaligned_access_static_branches(void)
> > > +{
> > > + /*
> > > + * This will be called after check_unaligned_access_all_cpus so the
> > > + * result of unaligned access speed for all cpus will be available.
> > > + */
> > > +
> > > + int cpu;
> > > + bool fast_misaligned_access_speed = true;
> > > +
> > > + for_each_online_cpu(cpu) {
> > Each online_cpu? Is there any offline_cpu that is no
> > fast_misaligned_access_speed?
>
> I think instead of checking offline cpus, it would make more sense to
> adjust the static branch when offline cpus come online. Since
> riscv_online_cpu is called when a new CPU comes online, I can update the
> static branch inside of that function.
>
> >
> > Move into your riscv_online_cpu for each CPU, and use stop_machine for
> > synchronization.
> >
>
> I do not understand what you mean by "Move into your riscv_online_cpu
> for each CPU", but I am assuming you are referring to updating the
> static branch inside of riscv_online_cpu.
I mean in:
arch/riscv/kernel/cpufeature.c: riscv_online_cpu()
Yes,"adjust the static branch when offline cpus come online ..."
>
> I believe any race condition that could be solved by stop_machine will
> become irrelevent by ensuring that the static branch is updated when a
> new cpu comes online.
Em... stop_machine may be not necessary.
>
> - Charlie
>
> > > + int this_perf = per_cpu(misaligned_access_speed, cpu);
> > > +
> > > + if (this_perf != RISCV_HWPROBE_MISALIGNED_FAST) {
> > > + fast_misaligned_access_speed = false;
> > > + break;
> > > + }
> > > + }
> > > +
> > > + if (fast_misaligned_access_speed)
> > > + static_branch_enable(&fast_misaligned_access_speed_key);
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +arch_initcall_sync(set_unaligned_access_static_branches);
> > > +
> > > #ifdef CONFIG_RISCV_ALTERNATIVE
> > > /*
> > > * Alternative patch sites consider 48 bits when determining when to patch
> > >
> > > --
> > > 2.43.0
> > >
> > >
> >
> >
> > --
> > Best Regards
> > Guo Ren
--
Best Regards
Guo Ren
On Wed, Dec 20, 2023 at 3:37 PM Charlie Jenkins <charlie@rivosinc.com> wrote: > > Support static branches depending on the value of misaligned accesses. > This will be used by a later patch in the series. All cpus must be > considered "fast" for this static branch to be flipped. > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> You didn't pick up my tag from the last spin, so here it is again: Reviewed-by: Evan Green <evan@rivosinc.com>
© 2016 - 2025 Red Hat, Inc.