This is the first half of CONFIG_KCOV_EXT_RECORDS.
Set the appropriate compiler flags to call separate hooks for function
entry/exit, and provide these hooks, but don't make it visible in the KCOV
UAPI yet.
With -fsanitize-coverage=trace-pc-entry-exit, the compiler behavior changes
as follows:
- The __sanitizer_cov_trace_pc() call on function entry is replaced with a
call to __sanitizer_cov_trace_pc_entry(); so for now,
__sanitizer_cov_trace_pc_entry() must be treated the same way as
__sanitizer_cov_trace_pc().
- On function exit, an extra call to __sanitizer_cov_trace_pc_exit()
happens; since function exit produced no coverage in the old UAPI,
__sanitizer_cov_trace_pc_exit() should do nothing for now.
Cc: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Jann Horn <jannh@google.com>
---
include/linux/kcov.h | 2 ++
kernel/kcov.c | 30 +++++++++++++++++++++++-------
lib/Kconfig.debug | 14 ++++++++++++++
scripts/Makefile.kcov | 2 ++
tools/objtool/check.c | 2 ++
5 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 0143358874b0..e5502d674029 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -81,6 +81,8 @@ typedef unsigned long long kcov_u64;
#endif
void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_pc_entry(void);
+void __sanitizer_cov_trace_pc_exit(void);
void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 0b369e88c7c9..2cc48b65384b 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -202,15 +202,10 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
return ip;
}
-/*
- * Entry point from instrumented code.
- * This is called once per basic-block/edge.
- */
-void notrace __sanitizer_cov_trace_pc(void)
+static void notrace kcov_add_pc_record(unsigned long record)
{
struct task_struct *t;
unsigned long *area;
- unsigned long ip = canonicalize_ip(_RET_IP_);
unsigned long pos;
t = current;
@@ -230,11 +225,32 @@ void notrace __sanitizer_cov_trace_pc(void)
*/
WRITE_ONCE(area[0], pos);
barrier();
- area[pos] = ip;
+ area[pos] = record;
}
}
+
+/*
+ * Entry point from instrumented code.
+ * This is called once per basic-block/edge.
+ */
+void notrace __sanitizer_cov_trace_pc(void)
+{
+ kcov_add_pc_record(canonicalize_ip(_RET_IP_));
+}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
+#ifdef CONFIG_KCOV_EXT_RECORDS
+void notrace __sanitizer_cov_trace_pc_entry(void)
+{
+ unsigned long record = canonicalize_ip(_RET_IP_);
+
+ kcov_add_pc_record(record);
+}
+void notrace __sanitizer_cov_trace_pc_exit(void)
+{
+}
+#endif
+
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 93f356d2b3d9..dddc330ad3ca 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2215,6 +2215,20 @@ config KCOV
For more details, see Documentation/dev-tools/kcov.rst.
+config KCOV_EXT_RECORDS
+ bool "Support extended KCOV records with function entry/exit records"
+ depends on KCOV
+ depends on 64BIT
+ # TODO: check CLANG_VERSION instead once this has landed in an LLVM
+ # release
+ depends on $(cc-option,-fsanitize-coverage=trace-pc-entry-exit)
+ help
+ Extended KCOV records allow distinguishing between multiple types of
+ records: Normal edge coverage, function entry, and function exit.
+
+ This will likely cause a small additional slowdown compared to normal
+ KCOV.
+
config KCOV_ENABLE_COMPARISONS
bool "Enable comparison operands collection by KCOV"
depends on KCOV
diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov
index 78305a84ba9d..aa0be904268f 100644
--- a/scripts/Makefile.kcov
+++ b/scripts/Makefile.kcov
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
kcov-flags-y += -fsanitize-coverage=trace-pc
+kcov-flags-$(CONFIG_KCOV_EXT_RECORDS) += -fsanitize-coverage=trace-pc-entry-exit
kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -fsanitize-coverage=trace-cmp
kcov-rflags-y += -Cpasses=sancov-module
kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-level=3
kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-trace-pc
+kcov-rflags-$(CONFIG_KCOV_EXT_RECORDS) += -Cllvm-args=-sanitizer-coverage-trace-pc-entry-exit
kcov-rflags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -Cllvm-args=-sanitizer-coverage-trace-compares
export CFLAGS_KCOV := $(kcov-flags-y)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a30379e4ff97..ae3127227621 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1251,6 +1251,8 @@ static const char *uaccess_safe_builtin[] = {
"write_comp_data",
"check_kcov_mode",
"__sanitizer_cov_trace_pc",
+ "__sanitizer_cov_trace_pc_entry",
+ "__sanitizer_cov_trace_pc_exit",
"__sanitizer_cov_trace_const_cmp1",
"__sanitizer_cov_trace_const_cmp2",
"__sanitizer_cov_trace_const_cmp4",
--
2.53.0.473.g4a7958ca14-goog
On Wed, 11 Mar 2026 at 22:06, Jann Horn <jannh@google.com> wrote:
>
> This is the first half of CONFIG_KCOV_EXT_RECORDS.
>
> Set the appropriate compiler flags to call separate hooks for function
> entry/exit, and provide these hooks, but don't make it visible in the KCOV
> UAPI yet.
>
> With -fsanitize-coverage=trace-pc-entry-exit, the compiler behavior changes
> as follows:
>
> - The __sanitizer_cov_trace_pc() call on function entry is replaced with a
> call to __sanitizer_cov_trace_pc_entry(); so for now,
> __sanitizer_cov_trace_pc_entry() must be treated the same way as
> __sanitizer_cov_trace_pc().
> - On function exit, an extra call to __sanitizer_cov_trace_pc_exit()
> happens; since function exit produced no coverage in the old UAPI,
> __sanitizer_cov_trace_pc_exit() should do nothing for now.
>
> Cc: Josh Poimboeuf <jpoimboe@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Jann Horn <jannh@google.com>
> ---
> include/linux/kcov.h | 2 ++
> kernel/kcov.c | 30 +++++++++++++++++++++++-------
> lib/Kconfig.debug | 14 ++++++++++++++
> scripts/Makefile.kcov | 2 ++
> tools/objtool/check.c | 2 ++
> 5 files changed, 43 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/kcov.h b/include/linux/kcov.h
> index 0143358874b0..e5502d674029 100644
> --- a/include/linux/kcov.h
> +++ b/include/linux/kcov.h
> @@ -81,6 +81,8 @@ typedef unsigned long long kcov_u64;
> #endif
>
> void __sanitizer_cov_trace_pc(void);
> +void __sanitizer_cov_trace_pc_entry(void);
> +void __sanitizer_cov_trace_pc_exit(void);
> void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
> void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
> void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
> diff --git a/kernel/kcov.c b/kernel/kcov.c
> index 0b369e88c7c9..2cc48b65384b 100644
> --- a/kernel/kcov.c
> +++ b/kernel/kcov.c
> @@ -202,15 +202,10 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
> return ip;
> }
>
> -/*
> - * Entry point from instrumented code.
> - * This is called once per basic-block/edge.
> - */
> -void notrace __sanitizer_cov_trace_pc(void)
> +static void notrace kcov_add_pc_record(unsigned long record)
> {
> struct task_struct *t;
> unsigned long *area;
> - unsigned long ip = canonicalize_ip(_RET_IP_);
> unsigned long pos;
>
> t = current;
> @@ -230,11 +225,32 @@ void notrace __sanitizer_cov_trace_pc(void)
> */
> WRITE_ONCE(area[0], pos);
> barrier();
> - area[pos] = ip;
> + area[pos] = record;
> }
> }
> +
> +/*
> + * Entry point from instrumented code.
> + * This is called once per basic-block/edge.
> + */
> +void notrace __sanitizer_cov_trace_pc(void)
> +{
> + kcov_add_pc_record(canonicalize_ip(_RET_IP_));
> +}
> EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
>
> +#ifdef CONFIG_KCOV_EXT_RECORDS
> +void notrace __sanitizer_cov_trace_pc_entry(void)
> +{
> + unsigned long record = canonicalize_ip(_RET_IP_);
> +
> + kcov_add_pc_record(record);
> +}
> +void notrace __sanitizer_cov_trace_pc_exit(void)
> +{
> +}
> +#endif
> +
> #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
> static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> {
> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> index 93f356d2b3d9..dddc330ad3ca 100644
> --- a/lib/Kconfig.debug
> +++ b/lib/Kconfig.debug
> @@ -2215,6 +2215,20 @@ config KCOV
>
> For more details, see Documentation/dev-tools/kcov.rst.
>
> +config KCOV_EXT_RECORDS
> + bool "Support extended KCOV records with function entry/exit records"
> + depends on KCOV
> + depends on 64BIT
> + # TODO: check CLANG_VERSION instead once this has landed in an LLVM
> + # release
I think the compiler option check is actually better, since it will
allow us to test earlier and supports compiler backports.
But it may be good to add a reference to the compiler patch in the
commit description.
Otherwise:
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
> + depends on $(cc-option,-fsanitize-coverage=trace-pc-entry-exit)
> + help
> + Extended KCOV records allow distinguishing between multiple types of
> + records: Normal edge coverage, function entry, and function exit.
> +
> + This will likely cause a small additional slowdown compared to normal
> + KCOV.
> +
> config KCOV_ENABLE_COMPARISONS
> bool "Enable comparison operands collection by KCOV"
> depends on KCOV
> diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov
> index 78305a84ba9d..aa0be904268f 100644
> --- a/scripts/Makefile.kcov
> +++ b/scripts/Makefile.kcov
> @@ -1,10 +1,12 @@
> # SPDX-License-Identifier: GPL-2.0-only
> kcov-flags-y += -fsanitize-coverage=trace-pc
> +kcov-flags-$(CONFIG_KCOV_EXT_RECORDS) += -fsanitize-coverage=trace-pc-entry-exit
> kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -fsanitize-coverage=trace-cmp
>
> kcov-rflags-y += -Cpasses=sancov-module
> kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-level=3
> kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-trace-pc
> +kcov-rflags-$(CONFIG_KCOV_EXT_RECORDS) += -Cllvm-args=-sanitizer-coverage-trace-pc-entry-exit
> kcov-rflags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -Cllvm-args=-sanitizer-coverage-trace-compares
>
> export CFLAGS_KCOV := $(kcov-flags-y)
> diff --git a/tools/objtool/check.c b/tools/objtool/check.c
> index a30379e4ff97..ae3127227621 100644
> --- a/tools/objtool/check.c
> +++ b/tools/objtool/check.c
> @@ -1251,6 +1251,8 @@ static const char *uaccess_safe_builtin[] = {
> "write_comp_data",
> "check_kcov_mode",
> "__sanitizer_cov_trace_pc",
> + "__sanitizer_cov_trace_pc_entry",
> + "__sanitizer_cov_trace_pc_exit",
> "__sanitizer_cov_trace_const_cmp1",
> "__sanitizer_cov_trace_const_cmp2",
> "__sanitizer_cov_trace_const_cmp4",
>
> --
> 2.53.0.473.g4a7958ca14-goog
>
On Fri, Mar 13, 2026 at 8:54 AM Dmitry Vyukov <dvyukov@google.com> wrote: > On Wed, 11 Mar 2026 at 22:06, Jann Horn <jannh@google.com> wrote: > > diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug > > index 93f356d2b3d9..dddc330ad3ca 100644 > > --- a/lib/Kconfig.debug > > +++ b/lib/Kconfig.debug > > @@ -2215,6 +2215,20 @@ config KCOV > > > > For more details, see Documentation/dev-tools/kcov.rst. > > > > +config KCOV_EXT_RECORDS > > + bool "Support extended KCOV records with function entry/exit records" > > + depends on KCOV > > + depends on 64BIT > > + # TODO: check CLANG_VERSION instead once this has landed in an LLVM > > + # release > > I think the compiler option check is actually better, since it will > allow us to test earlier and supports compiler backports. Makes sense, I'll remove the TODO. (I was thinking about it from the perspective that every compiler flag test increases the kernel build time a tiny bit, because it causes an extra compiler invocation at the start of the build.) > But it may be good to add a reference to the compiler patch in the > commit description. Ack, will add that. > Otherwise: > > Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Thanks!
On Fri, 13 Mar 2026 at 08:53, Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Wed, 11 Mar 2026 at 22:06, Jann Horn <jannh@google.com> wrote:
> >
> > This is the first half of CONFIG_KCOV_EXT_RECORDS.
> >
> > Set the appropriate compiler flags to call separate hooks for function
> > entry/exit, and provide these hooks, but don't make it visible in the KCOV
> > UAPI yet.
> >
> > With -fsanitize-coverage=trace-pc-entry-exit, the compiler behavior changes
> > as follows:
> >
> > - The __sanitizer_cov_trace_pc() call on function entry is replaced with a
> > call to __sanitizer_cov_trace_pc_entry(); so for now,
> > __sanitizer_cov_trace_pc_entry() must be treated the same way as
> > __sanitizer_cov_trace_pc().
> > - On function exit, an extra call to __sanitizer_cov_trace_pc_exit()
> > happens; since function exit produced no coverage in the old UAPI,
> > __sanitizer_cov_trace_pc_exit() should do nothing for now.
> >
> > Cc: Josh Poimboeuf <jpoimboe@kernel.org>
> > Cc: Peter Zijlstra <peterz@infradead.org>
> > Signed-off-by: Jann Horn <jannh@google.com>
> > ---
> > include/linux/kcov.h | 2 ++
> > kernel/kcov.c | 30 +++++++++++++++++++++++-------
> > lib/Kconfig.debug | 14 ++++++++++++++
> > scripts/Makefile.kcov | 2 ++
> > tools/objtool/check.c | 2 ++
> > 5 files changed, 43 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/linux/kcov.h b/include/linux/kcov.h
> > index 0143358874b0..e5502d674029 100644
> > --- a/include/linux/kcov.h
> > +++ b/include/linux/kcov.h
> > @@ -81,6 +81,8 @@ typedef unsigned long long kcov_u64;
> > #endif
> >
> > void __sanitizer_cov_trace_pc(void);
> > +void __sanitizer_cov_trace_pc_entry(void);
> > +void __sanitizer_cov_trace_pc_exit(void);
> > void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
> > void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
> > void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
> > diff --git a/kernel/kcov.c b/kernel/kcov.c
> > index 0b369e88c7c9..2cc48b65384b 100644
> > --- a/kernel/kcov.c
> > +++ b/kernel/kcov.c
> > @@ -202,15 +202,10 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
> > return ip;
> > }
> >
> > -/*
> > - * Entry point from instrumented code.
> > - * This is called once per basic-block/edge.
> > - */
> > -void notrace __sanitizer_cov_trace_pc(void)
> > +static void notrace kcov_add_pc_record(unsigned long record)
__alwaysinline just in case
> > {
> > struct task_struct *t;
> > unsigned long *area;
> > - unsigned long ip = canonicalize_ip(_RET_IP_);
> > unsigned long pos;
> >
> > t = current;
> > @@ -230,11 +225,32 @@ void notrace __sanitizer_cov_trace_pc(void)
> > */
> > WRITE_ONCE(area[0], pos);
> > barrier();
> > - area[pos] = ip;
> > + area[pos] = record;
> > }
> > }
> > +
> > +/*
> > + * Entry point from instrumented code.
> > + * This is called once per basic-block/edge.
> > + */
> > +void notrace __sanitizer_cov_trace_pc(void)
> > +{
> > + kcov_add_pc_record(canonicalize_ip(_RET_IP_));
> > +}
> > EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
> >
> > +#ifdef CONFIG_KCOV_EXT_RECORDS
> > +void notrace __sanitizer_cov_trace_pc_entry(void)
> > +{
> > + unsigned long record = canonicalize_ip(_RET_IP_);
> > +
> > + kcov_add_pc_record(record);
> > +}
> > +void notrace __sanitizer_cov_trace_pc_exit(void)
> > +{
> > +}
> > +#endif
> > +
> > #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
> > static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> > {
> > diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> > index 93f356d2b3d9..dddc330ad3ca 100644
> > --- a/lib/Kconfig.debug
> > +++ b/lib/Kconfig.debug
> > @@ -2215,6 +2215,20 @@ config KCOV
> >
> > For more details, see Documentation/dev-tools/kcov.rst.
> >
> > +config KCOV_EXT_RECORDS
> > + bool "Support extended KCOV records with function entry/exit records"
> > + depends on KCOV
> > + depends on 64BIT
> > + # TODO: check CLANG_VERSION instead once this has landed in an LLVM
> > + # release
>
> I think the compiler option check is actually better, since it will
> allow us to test earlier and supports compiler backports.
> But it may be good to add a reference to the compiler patch in the
> commit description.
>
> Otherwise:
>
> Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
>
> > + depends on $(cc-option,-fsanitize-coverage=trace-pc-entry-exit)
> > + help
> > + Extended KCOV records allow distinguishing between multiple types of
> > + records: Normal edge coverage, function entry, and function exit.
> > +
> > + This will likely cause a small additional slowdown compared to normal
> > + KCOV.
> > +
> > config KCOV_ENABLE_COMPARISONS
> > bool "Enable comparison operands collection by KCOV"
> > depends on KCOV
> > diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov
> > index 78305a84ba9d..aa0be904268f 100644
> > --- a/scripts/Makefile.kcov
> > +++ b/scripts/Makefile.kcov
> > @@ -1,10 +1,12 @@
> > # SPDX-License-Identifier: GPL-2.0-only
> > kcov-flags-y += -fsanitize-coverage=trace-pc
> > +kcov-flags-$(CONFIG_KCOV_EXT_RECORDS) += -fsanitize-coverage=trace-pc-entry-exit
> > kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -fsanitize-coverage=trace-cmp
> >
> > kcov-rflags-y += -Cpasses=sancov-module
> > kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-level=3
> > kcov-rflags-y += -Cllvm-args=-sanitizer-coverage-trace-pc
> > +kcov-rflags-$(CONFIG_KCOV_EXT_RECORDS) += -Cllvm-args=-sanitizer-coverage-trace-pc-entry-exit
> > kcov-rflags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -Cllvm-args=-sanitizer-coverage-trace-compares
> >
> > export CFLAGS_KCOV := $(kcov-flags-y)
> > diff --git a/tools/objtool/check.c b/tools/objtool/check.c
> > index a30379e4ff97..ae3127227621 100644
> > --- a/tools/objtool/check.c
> > +++ b/tools/objtool/check.c
> > @@ -1251,6 +1251,8 @@ static const char *uaccess_safe_builtin[] = {
> > "write_comp_data",
> > "check_kcov_mode",
> > "__sanitizer_cov_trace_pc",
> > + "__sanitizer_cov_trace_pc_entry",
> > + "__sanitizer_cov_trace_pc_exit",
> > "__sanitizer_cov_trace_const_cmp1",
> > "__sanitizer_cov_trace_const_cmp2",
> > "__sanitizer_cov_trace_const_cmp4",
> >
> > --
> > 2.53.0.473.g4a7958ca14-goog
> >
On Fri, Mar 13, 2026 at 8:59 AM Dmitry Vyukov <dvyukov@google.com> wrote: > On Fri, 13 Mar 2026 at 08:53, Dmitry Vyukov <dvyukov@google.com> wrote: > > On Wed, 11 Mar 2026 at 22:06, Jann Horn <jannh@google.com> wrote: > > > @@ -202,15 +202,10 @@ static notrace unsigned long canonicalize_ip(unsigned long ip) > > > return ip; > > > } > > > > > > -/* > > > - * Entry point from instrumented code. > > > - * This is called once per basic-block/edge. > > > - */ > > > -void notrace __sanitizer_cov_trace_pc(void) > > > +static void notrace kcov_add_pc_record(unsigned long record) > > __alwaysinline just in case Ack, will add in v2.
© 2016 - 2026 Red Hat, Inc.