When KCOV is enabled all functions get instrumented, unless
the __no_sanitize_coverage attribute is used. To prepare for
__no_sanitize_coverage being applied to __init functions, we have to
handle differences in how GCC's inline optimizations get resolved. For
loongarch this exposed several places where __init annotations were
missing but ended up being "accidentally correct". Fix these cases and
force one function to be inline with __always_inline.
Signed-off-by: Kees Cook <kees@kernel.org>
---
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tianyang Zhang <zhangtianyang@loongson.cn>
Cc: Bibo Mao <maobibo@loongson.cn>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: <loongarch@lists.linux.dev>
---
arch/loongarch/include/asm/smp.h | 2 +-
arch/loongarch/kernel/time.c | 2 +-
arch/loongarch/mm/ioremap.c | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index ad0bd234a0f1..88e19d8a11f4 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -39,7 +39,7 @@ int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
#endif
-static inline void plat_smp_setup(void)
+static __always_inline void plat_smp_setup(void)
{
loongson_smp_setup();
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index bc75a3a69fc8..367906b10f81 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
return 0;
}
-static unsigned long __init get_loops_per_jiffy(void)
+static unsigned long get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
index 70ca73019811..df949a3d0f34 100644
--- a/arch/loongarch/mm/ioremap.c
+++ b/arch/loongarch/mm/ioremap.c
@@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
}
-void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
{
return early_memremap(phys_addr, size);
}
-void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return early_memremap(phys_addr, size);
--
2.34.1
Hi, Kees,
On Fri, May 23, 2025 at 12:39 PM Kees Cook <kees@kernel.org> wrote:
>
> When KCOV is enabled all functions get instrumented, unless
> the __no_sanitize_coverage attribute is used. To prepare for
> __no_sanitize_coverage being applied to __init functions, we have to
> handle differences in how GCC's inline optimizations get resolved. For
> loongarch this exposed several places where __init annotations were
> missing but ended up being "accidentally correct". Fix these cases and
> force one function to be inline with __always_inline.
>
> Signed-off-by: Kees Cook <kees@kernel.org>
> ---
> Cc: Huacai Chen <chenhuacai@kernel.org>
> Cc: WANG Xuerui <kernel@xen0n.name>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Tianyang Zhang <zhangtianyang@loongson.cn>
> Cc: Bibo Mao <maobibo@loongson.cn>
> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
> Cc: <loongarch@lists.linux.dev>
> ---
> arch/loongarch/include/asm/smp.h | 2 +-
> arch/loongarch/kernel/time.c | 2 +-
> arch/loongarch/mm/ioremap.c | 4 ++--
> 3 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
> index ad0bd234a0f1..88e19d8a11f4 100644
> --- a/arch/loongarch/include/asm/smp.h
> +++ b/arch/loongarch/include/asm/smp.h
> @@ -39,7 +39,7 @@ int loongson_cpu_disable(void);
> void loongson_cpu_die(unsigned int cpu);
> #endif
>
> -static inline void plat_smp_setup(void)
> +static __always_inline void plat_smp_setup(void)
Similar to x86 and arm, I prefer to mark it as __init rather than
__always_inline.
Huacai
> {
> loongson_smp_setup();
> }
> diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
> index bc75a3a69fc8..367906b10f81 100644
> --- a/arch/loongarch/kernel/time.c
> +++ b/arch/loongarch/kernel/time.c
> @@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
> return 0;
> }
>
> -static unsigned long __init get_loops_per_jiffy(void)
> +static unsigned long get_loops_per_jiffy(void)
> {
> unsigned long lpj = (unsigned long)const_clock_freq;
>
> diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
> index 70ca73019811..df949a3d0f34 100644
> --- a/arch/loongarch/mm/ioremap.c
> +++ b/arch/loongarch/mm/ioremap.c
> @@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
>
> }
>
> -void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
> +void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
> {
> return early_memremap(phys_addr, size);
> }
>
> -void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
> +void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
> unsigned long prot_val)
> {
> return early_memremap(phys_addr, size);
> --
> 2.34.1
>
Hi, Kees,
On Thu, Jun 19, 2025 at 4:55 PM Huacai Chen <chenhuacai@kernel.org> wrote:
>
> Hi, Kees,
>
> On Fri, May 23, 2025 at 12:39 PM Kees Cook <kees@kernel.org> wrote:
> >
> > When KCOV is enabled all functions get instrumented, unless
> > the __no_sanitize_coverage attribute is used. To prepare for
> > __no_sanitize_coverage being applied to __init functions, we have to
> > handle differences in how GCC's inline optimizations get resolved. For
> > loongarch this exposed several places where __init annotations were
> > missing but ended up being "accidentally correct". Fix these cases and
> > force one function to be inline with __always_inline.
> >
> > Signed-off-by: Kees Cook <kees@kernel.org>
> > ---
> > Cc: Huacai Chen <chenhuacai@kernel.org>
> > Cc: WANG Xuerui <kernel@xen0n.name>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Tianyang Zhang <zhangtianyang@loongson.cn>
> > Cc: Bibo Mao <maobibo@loongson.cn>
> > Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
> > Cc: <loongarch@lists.linux.dev>
> > ---
> > arch/loongarch/include/asm/smp.h | 2 +-
> > arch/loongarch/kernel/time.c | 2 +-
> > arch/loongarch/mm/ioremap.c | 4 ++--
> > 3 files changed, 4 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
> > index ad0bd234a0f1..88e19d8a11f4 100644
> > --- a/arch/loongarch/include/asm/smp.h
> > +++ b/arch/loongarch/include/asm/smp.h
> > @@ -39,7 +39,7 @@ int loongson_cpu_disable(void);
> > void loongson_cpu_die(unsigned int cpu);
> > #endif
> >
> > -static inline void plat_smp_setup(void)
> > +static __always_inline void plat_smp_setup(void)
> Similar to x86 and arm, I prefer to mark it as __init rather than
> __always_inline.
If you have no objections, I will apply this patch with the above modification.
Huacai
>
> Huacai
>
> > {
> > loongson_smp_setup();
> > }
> > diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
> > index bc75a3a69fc8..367906b10f81 100644
> > --- a/arch/loongarch/kernel/time.c
> > +++ b/arch/loongarch/kernel/time.c
> > @@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
> > return 0;
> > }
> >
> > -static unsigned long __init get_loops_per_jiffy(void)
> > +static unsigned long get_loops_per_jiffy(void)
> > {
> > unsigned long lpj = (unsigned long)const_clock_freq;
> >
> > diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
> > index 70ca73019811..df949a3d0f34 100644
> > --- a/arch/loongarch/mm/ioremap.c
> > +++ b/arch/loongarch/mm/ioremap.c
> > @@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
> >
> > }
> >
> > -void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
> > +void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
> > {
> > return early_memremap(phys_addr, size);
> > }
> >
> > -void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
> > +void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
> > unsigned long prot_val)
> > {
> > return early_memremap(phys_addr, size);
> > --
> > 2.34.1
> >
On June 24, 2025 5:31:12 AM PDT, Huacai Chen <chenhuacai@kernel.org> wrote:
>Hi, Kees,
>
>On Thu, Jun 19, 2025 at 4:55 PM Huacai Chen <chenhuacai@kernel.org> wrote:
>>
>> Hi, Kees,
>>
>> On Fri, May 23, 2025 at 12:39 PM Kees Cook <kees@kernel.org> wrote:
>> >
>> > When KCOV is enabled all functions get instrumented, unless
>> > the __no_sanitize_coverage attribute is used. To prepare for
>> > __no_sanitize_coverage being applied to __init functions, we have to
>> > handle differences in how GCC's inline optimizations get resolved. For
>> > loongarch this exposed several places where __init annotations were
>> > missing but ended up being "accidentally correct". Fix these cases and
>> > force one function to be inline with __always_inline.
>> >
>> > Signed-off-by: Kees Cook <kees@kernel.org>
>> > ---
>> > Cc: Huacai Chen <chenhuacai@kernel.org>
>> > Cc: WANG Xuerui <kernel@xen0n.name>
>> > Cc: Thomas Gleixner <tglx@linutronix.de>
>> > Cc: Tianyang Zhang <zhangtianyang@loongson.cn>
>> > Cc: Bibo Mao <maobibo@loongson.cn>
>> > Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
>> > Cc: <loongarch@lists.linux.dev>
>> > ---
>> > arch/loongarch/include/asm/smp.h | 2 +-
>> > arch/loongarch/kernel/time.c | 2 +-
>> > arch/loongarch/mm/ioremap.c | 4 ++--
>> > 3 files changed, 4 insertions(+), 4 deletions(-)
>> >
>> > diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
>> > index ad0bd234a0f1..88e19d8a11f4 100644
>> > --- a/arch/loongarch/include/asm/smp.h
>> > +++ b/arch/loongarch/include/asm/smp.h
>> > @@ -39,7 +39,7 @@ int loongson_cpu_disable(void);
>> > void loongson_cpu_die(unsigned int cpu);
>> > #endif
>> >
>> > -static inline void plat_smp_setup(void)
>> > +static __always_inline void plat_smp_setup(void)
>> Similar to x86 and arm, I prefer to mark it as __init rather than
>> __always_inline.
>If you have no objections, I will apply this patch with the above modification.
That's fine by me; thank you! I didn't have a chance yet to verify that it actually fixes the mismatches I saw, but if it looks good to you, yes please. :)
-Kees
>
>
>Huacai
>
>>
>> Huacai
>>
>> > {
>> > loongson_smp_setup();
>> > }
>> > diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
>> > index bc75a3a69fc8..367906b10f81 100644
>> > --- a/arch/loongarch/kernel/time.c
>> > +++ b/arch/loongarch/kernel/time.c
>> > @@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
>> > return 0;
>> > }
>> >
>> > -static unsigned long __init get_loops_per_jiffy(void)
>> > +static unsigned long get_loops_per_jiffy(void)
>> > {
>> > unsigned long lpj = (unsigned long)const_clock_freq;
>> >
>> > diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
>> > index 70ca73019811..df949a3d0f34 100644
>> > --- a/arch/loongarch/mm/ioremap.c
>> > +++ b/arch/loongarch/mm/ioremap.c
>> > @@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
>> >
>> > }
>> >
>> > -void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
>> > +void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
>> > {
>> > return early_memremap(phys_addr, size);
>> > }
>> >
>> > -void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
>> > +void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
>> > unsigned long prot_val)
>> > {
>> > return early_memremap(phys_addr, size);
>> > --
>> > 2.34.1
>> >
--
Kees Cook
© 2016 - 2025 Red Hat, Inc.