Keep kcov_state.area as the pointer to the memory buffer used by
kcov and shared with the userspace. Store the pointer to the trace
(part of the buffer holding sequential events) separately, as we will
be splitting that buffer in multiple parts.
No functional changes so far.
Signed-off-by: Alexander Potapenko <glider@google.com>
---
Change-Id: I50b5589ef0e0b6726aa0579334093c648f76790a
v2:
- Address comments by Dmitry Vyukov:
- tweak commit description
- Address comments by Marco Elver:
- rename sanitizer_cov_write_subsequent() to kcov_append_to_buffer()
- Update code to match the new description of struct kcov_state
---
include/linux/kcov_types.h | 9 ++++++-
kernel/kcov.c | 54 ++++++++++++++++++++++----------------
2 files changed, 39 insertions(+), 24 deletions(-)
diff --git a/include/linux/kcov_types.h b/include/linux/kcov_types.h
index 53b25b6f0addd..233e7a682654b 100644
--- a/include/linux/kcov_types.h
+++ b/include/linux/kcov_types.h
@@ -7,9 +7,16 @@
struct kcov_state {
/* Size of the area (in long's). */
unsigned int size;
+ /*
+ * Pointer to user-provided memory used by kcov. This memory may
+ * contain multiple buffers.
+ */
+ void *area;
+ /* Size of the trace (in long's). */
+ unsigned int trace_size;
/* Buffer for coverage collection, shared with the userspace. */
- void *area;
+ unsigned long *trace;
/*
* KCOV sequence number: incremented each time kcov is reenabled, used
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8e98ca8d52743..038261145cf93 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -195,11 +195,11 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
return ip;
}
-static notrace void kcov_append_to_buffer(unsigned long *area, int size,
+static notrace void kcov_append_to_buffer(unsigned long *trace, int size,
unsigned long ip)
{
/* The first 64-bit word is the number of subsequent PCs. */
- unsigned long pos = READ_ONCE(area[0]) + 1;
+ unsigned long pos = READ_ONCE(trace[0]) + 1;
if (likely(pos < size)) {
/*
@@ -209,9 +209,9 @@ static notrace void kcov_append_to_buffer(unsigned long *area, int size,
* overitten by the recursive __sanitizer_cov_trace_pc().
* Update pos before writing pc to avoid such interleaving.
*/
- WRITE_ONCE(area[0], pos);
+ WRITE_ONCE(trace[0], pos);
barrier();
- area[pos] = ip;
+ trace[pos] = ip;
}
}
@@ -225,8 +225,8 @@ void notrace __sanitizer_cov_trace_pc_guard(u32 *guard)
if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
return;
- kcov_append_to_buffer(current->kcov_state.area,
- current->kcov_state.size,
+ kcov_append_to_buffer(current->kcov_state.trace,
+ current->kcov_state.trace_size,
canonicalize_ip(_RET_IP_));
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard);
@@ -242,8 +242,8 @@ void notrace __sanitizer_cov_trace_pc(void)
if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
return;
- kcov_append_to_buffer(current->kcov_state.area,
- current->kcov_state.size,
+ kcov_append_to_buffer(current->kcov_state.trace,
+ current->kcov_state.trace_size,
canonicalize_ip(_RET_IP_));
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
@@ -252,9 +252,9 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
- struct task_struct *t;
- u64 *area;
u64 count, start_index, end_pos, max_pos;
+ struct task_struct *t;
+ u64 *trace;
t = current;
if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
@@ -266,22 +266,22 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
* We write all comparison arguments and types as u64.
* The buffer was allocated for t->kcov_state.size unsigned longs.
*/
- area = (u64 *)t->kcov_state.area;
+ trace = (u64 *)t->kcov_state.trace;
max_pos = t->kcov_state.size * sizeof(unsigned long);
- count = READ_ONCE(area[0]);
+ count = READ_ONCE(trace[0]);
/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
start_index = 1 + count * KCOV_WORDS_PER_CMP;
end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
if (likely(end_pos <= max_pos)) {
/* See comment in kcov_append_to_buffer(). */
- WRITE_ONCE(area[0], count + 1);
+ WRITE_ONCE(trace[0], count + 1);
barrier();
- area[start_index] = type;
- area[start_index + 1] = arg1;
- area[start_index + 2] = arg2;
- area[start_index + 3] = ip;
+ trace[start_index] = type;
+ trace[start_index + 1] = arg1;
+ trace[start_index + 2] = arg2;
+ trace[start_index + 3] = ip;
}
}
@@ -382,11 +382,13 @@ static void kcov_start(struct task_struct *t, struct kcov *kcov,
static void kcov_stop(struct task_struct *t)
{
+ int saved_sequence = t->kcov_state.sequence;
+
WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
barrier();
t->kcov = NULL;
- t->kcov_state.size = 0;
- t->kcov_state.area = NULL;
+ t->kcov_state = (typeof(t->kcov_state)){ 0 };
+ t->kcov_state.sequence = saved_sequence;
}
static void kcov_task_reset(struct task_struct *t)
@@ -736,6 +738,8 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
kcov->state.area = area;
kcov->state.size = size;
+ kcov->state.trace = area;
+ kcov->state.trace_size = size;
kcov->mode = KCOV_MODE_INIT;
spin_unlock_irqrestore(&kcov->lock, flags);
return 0;
@@ -928,10 +932,12 @@ void kcov_remote_start(u64 handle)
local_lock_irqsave(&kcov_percpu_data.lock, flags);
}
- /* Reset coverage size. */
- *(u64 *)area = 0;
state.area = area;
state.size = size;
+ state.trace = area;
+ state.trace_size = size;
+ /* Reset coverage size. */
+ state.trace[0] = 0;
if (in_serving_softirq()) {
kcov_remote_softirq_start(t);
@@ -1004,8 +1010,8 @@ void kcov_remote_stop(void)
struct task_struct *t = current;
struct kcov *kcov;
unsigned int mode;
- void *area;
- unsigned int size;
+ void *area, *trace;
+ unsigned int size, trace_size;
int sequence;
unsigned long flags;
@@ -1037,6 +1043,8 @@ void kcov_remote_stop(void)
kcov = t->kcov;
area = t->kcov_state.area;
size = t->kcov_state.size;
+ trace = t->kcov_state.trace;
+ trace_size = t->kcov_state.trace_size;
sequence = t->kcov_state.sequence;
kcov_stop(t);
--
2.50.0.727.gbf7dc18ff4-goog
On Thu, 26 Jun 2025 at 15:42, Alexander Potapenko <glider@google.com> wrote: > > Keep kcov_state.area as the pointer to the memory buffer used by > kcov and shared with the userspace. Store the pointer to the trace > (part of the buffer holding sequential events) separately, as we will > be splitting that buffer in multiple parts. > No functional changes so far. > > Signed-off-by: Alexander Potapenko <glider@google.com> > > --- > Change-Id: I50b5589ef0e0b6726aa0579334093c648f76790a > > v2: > - Address comments by Dmitry Vyukov: > - tweak commit description > - Address comments by Marco Elver: > - rename sanitizer_cov_write_subsequent() to kcov_append_to_buffer() > - Update code to match the new description of struct kcov_state > --- > include/linux/kcov_types.h | 9 ++++++- > kernel/kcov.c | 54 ++++++++++++++++++++++---------------- > 2 files changed, 39 insertions(+), 24 deletions(-) > > diff --git a/include/linux/kcov_types.h b/include/linux/kcov_types.h > index 53b25b6f0addd..233e7a682654b 100644 > --- a/include/linux/kcov_types.h > +++ b/include/linux/kcov_types.h > @@ -7,9 +7,16 @@ > struct kcov_state { > /* Size of the area (in long's). */ > unsigned int size; > + /* > + * Pointer to user-provided memory used by kcov. This memory may s/kcov/KCOV/ for consistency > + * contain multiple buffers. > + */ > + void *area; > > + /* Size of the trace (in long's). */ > + unsigned int trace_size; > /* Buffer for coverage collection, shared with the userspace. */ > - void *area; > + unsigned long *trace; > > /* > * KCOV sequence number: incremented each time kcov is reenabled, used > diff --git a/kernel/kcov.c b/kernel/kcov.c > index 8e98ca8d52743..038261145cf93 100644 > --- a/kernel/kcov.c > +++ b/kernel/kcov.c > @@ -195,11 +195,11 @@ static notrace unsigned long canonicalize_ip(unsigned long ip) > return ip; > } > > -static notrace void kcov_append_to_buffer(unsigned long *area, int size, > +static notrace void kcov_append_to_buffer(unsigned long *trace, int size, > unsigned long ip) > { > /* The first 64-bit word is the number of subsequent PCs. */ > - unsigned long pos = READ_ONCE(area[0]) + 1; > + unsigned long pos = READ_ONCE(trace[0]) + 1; > > if (likely(pos < size)) { > /* > @@ -209,9 +209,9 @@ static notrace void kcov_append_to_buffer(unsigned long *area, int size, > * overitten by the recursive __sanitizer_cov_trace_pc(). > * Update pos before writing pc to avoid such interleaving. > */ > - WRITE_ONCE(area[0], pos); > + WRITE_ONCE(trace[0], pos); > barrier(); > - area[pos] = ip; > + trace[pos] = ip; > } > } > > @@ -225,8 +225,8 @@ void notrace __sanitizer_cov_trace_pc_guard(u32 *guard) > if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current)) > return; > > - kcov_append_to_buffer(current->kcov_state.area, > - current->kcov_state.size, > + kcov_append_to_buffer(current->kcov_state.trace, > + current->kcov_state.trace_size, > canonicalize_ip(_RET_IP_)); > } > EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard); > @@ -242,8 +242,8 @@ void notrace __sanitizer_cov_trace_pc(void) > if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current)) > return; > > - kcov_append_to_buffer(current->kcov_state.area, > - current->kcov_state.size, > + kcov_append_to_buffer(current->kcov_state.trace, > + current->kcov_state.trace_size, > canonicalize_ip(_RET_IP_)); > } > EXPORT_SYMBOL(__sanitizer_cov_trace_pc); > @@ -252,9 +252,9 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_pc); > #ifdef CONFIG_KCOV_ENABLE_COMPARISONS > static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) > { > - struct task_struct *t; > - u64 *area; > u64 count, start_index, end_pos, max_pos; > + struct task_struct *t; > + u64 *trace; > > t = current; > if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) > @@ -266,22 +266,22 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) > * We write all comparison arguments and types as u64. > * The buffer was allocated for t->kcov_state.size unsigned longs. > */ > - area = (u64 *)t->kcov_state.area; > + trace = (u64 *)t->kcov_state.trace; > max_pos = t->kcov_state.size * sizeof(unsigned long); > > - count = READ_ONCE(area[0]); > + count = READ_ONCE(trace[0]); > > /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ > start_index = 1 + count * KCOV_WORDS_PER_CMP; > end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); > if (likely(end_pos <= max_pos)) { > /* See comment in kcov_append_to_buffer(). */ > - WRITE_ONCE(area[0], count + 1); > + WRITE_ONCE(trace[0], count + 1); > barrier(); > - area[start_index] = type; > - area[start_index + 1] = arg1; > - area[start_index + 2] = arg2; > - area[start_index + 3] = ip; > + trace[start_index] = type; > + trace[start_index + 1] = arg1; > + trace[start_index + 2] = arg2; > + trace[start_index + 3] = ip; > } > } > > @@ -382,11 +382,13 @@ static void kcov_start(struct task_struct *t, struct kcov *kcov, > > static void kcov_stop(struct task_struct *t) > { > + int saved_sequence = t->kcov_state.sequence; > + > WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); > barrier(); > t->kcov = NULL; > - t->kcov_state.size = 0; > - t->kcov_state.area = NULL; > + t->kcov_state = (typeof(t->kcov_state)){ 0 }; In a previous patch you used the following syntax, let's stick to one of these forms: data->saved_state = (struct kcov_state){}; > + t->kcov_state.sequence = saved_sequence; > } > > static void kcov_task_reset(struct task_struct *t) > @@ -736,6 +738,8 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) > } > kcov->state.area = area; > kcov->state.size = size; > + kcov->state.trace = area; > + kcov->state.trace_size = size; > kcov->mode = KCOV_MODE_INIT; > spin_unlock_irqrestore(&kcov->lock, flags); > return 0; > @@ -928,10 +932,12 @@ void kcov_remote_start(u64 handle) > local_lock_irqsave(&kcov_percpu_data.lock, flags); > } > > - /* Reset coverage size. */ > - *(u64 *)area = 0; > state.area = area; > state.size = size; > + state.trace = area; > + state.trace_size = size; > + /* Reset coverage size. */ > + state.trace[0] = 0; > > if (in_serving_softirq()) { > kcov_remote_softirq_start(t); > @@ -1004,8 +1010,8 @@ void kcov_remote_stop(void) > struct task_struct *t = current; > struct kcov *kcov; > unsigned int mode; > - void *area; > - unsigned int size; > + void *area, *trace; > + unsigned int size, trace_size; > int sequence; > unsigned long flags; > > @@ -1037,6 +1043,8 @@ void kcov_remote_stop(void) > kcov = t->kcov; > area = t->kcov_state.area; > size = t->kcov_state.size; > + trace = t->kcov_state.trace; > + trace_size = t->kcov_state.trace_size; > sequence = t->kcov_state.sequence; > > kcov_stop(t); > -- > 2.50.0.727.gbf7dc18ff4-goog >
> > + * Pointer to user-provided memory used by kcov. This memory may > > s/kcov/KCOV/ for consistency Ack. > > @@ -382,11 +382,13 @@ static void kcov_start(struct task_struct *t, struct kcov *kcov, > > > > static void kcov_stop(struct task_struct *t) > > { > > + int saved_sequence = t->kcov_state.sequence; > > + > > WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); > > barrier(); > > t->kcov = NULL; > > - t->kcov_state.size = 0; > > - t->kcov_state.area = NULL; > > + t->kcov_state = (typeof(t->kcov_state)){ 0 }; > > In a previous patch you used the following syntax, let's stick to one > of these forms: > > data->saved_state = (struct kcov_state){}; Yeah, I did some research recently and figured out {} is more preferred.
Hi Alexander, kernel test robot noticed the following build warnings: [auto build test WARNING on tip/x86/core] [cannot apply to akpm-mm/mm-everything tip/sched/core arnd-asm-generic/master akpm-mm/mm-nonmm-unstable masahiroy-kbuild/for-next masahiroy-kbuild/fixes shuah-kselftest/next shuah-kselftest/fixes linus/master mcgrof/modules-next v6.16-rc3 next-20250627] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Alexander-Potapenko/x86-kcov-disable-instrumentation-of-arch-x86-kernel-tsc-c/20250626-214703 base: tip/x86/core patch link: https://lore.kernel.org/r/20250626134158.3385080-8-glider%40google.com patch subject: [PATCH v2 07/11] kcov: add trace and trace_size to struct kcov_state config: x86_64-buildonly-randconfig-004-20250627 (https://download.01.org/0day-ci/archive/20250627/202506271946.HACEE9U0-lkp@intel.com/config) compiler: clang version 20.1.7 (https://github.com/llvm/llvm-project 6146a88f60492b520a36f8f8f3231e15f3cc6082) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250627/202506271946.HACEE9U0-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202506271946.HACEE9U0-lkp@intel.com/ All warnings (new ones prefixed by >>): >> kernel/kcov.c:1013:15: warning: variable 'trace' set but not used [-Wunused-but-set-variable] 1013 | void *area, *trace; | ^ >> kernel/kcov.c:1014:21: warning: variable 'trace_size' set but not used [-Wunused-but-set-variable] 1014 | unsigned int size, trace_size; | ^ 2 warnings generated. vim +/trace +1013 kernel/kcov.c 1006 1007 /* See the comment before kcov_remote_start() for usage details. */ 1008 void kcov_remote_stop(void) 1009 { 1010 struct task_struct *t = current; 1011 struct kcov *kcov; 1012 unsigned int mode; > 1013 void *area, *trace; > 1014 unsigned int size, trace_size; 1015 int sequence; 1016 unsigned long flags; 1017 1018 if (!in_task() && !in_softirq_really()) 1019 return; 1020 1021 local_lock_irqsave(&kcov_percpu_data.lock, flags); 1022 1023 mode = READ_ONCE(t->kcov_mode); 1024 barrier(); 1025 if (!kcov_mode_enabled(mode)) { 1026 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1027 return; 1028 } 1029 /* 1030 * When in softirq, check if the corresponding kcov_remote_start() 1031 * actually found the remote handle and started collecting coverage. 1032 */ 1033 if (in_serving_softirq() && !t->kcov_softirq) { 1034 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1035 return; 1036 } 1037 /* Make sure that kcov_softirq is only set when in softirq. */ 1038 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { 1039 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1040 return; 1041 } 1042 1043 kcov = t->kcov; 1044 area = t->kcov_state.area; 1045 size = t->kcov_state.size; 1046 trace = t->kcov_state.trace; 1047 trace_size = t->kcov_state.trace_size; 1048 sequence = t->kcov_state.sequence; 1049 1050 kcov_stop(t); 1051 if (in_serving_softirq()) { 1052 t->kcov_softirq = 0; 1053 kcov_remote_softirq_stop(t); 1054 } 1055 1056 spin_lock(&kcov->lock); 1057 /* 1058 * KCOV_DISABLE could have been called between kcov_remote_start() 1059 * and kcov_remote_stop(), hence the sequence check. 1060 */ 1061 if (sequence == kcov->state.sequence && kcov->remote) 1062 kcov_move_area(kcov->mode, kcov->state.area, kcov->state.size, 1063 area); 1064 spin_unlock(&kcov->lock); 1065 1066 if (in_task()) { 1067 spin_lock(&kcov_remote_lock); 1068 kcov_remote_area_put(area, size); 1069 spin_unlock(&kcov_remote_lock); 1070 } 1071 1072 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1073 1074 /* Get in kcov_remote_start(). */ 1075 kcov_put(kcov); 1076 } 1077 EXPORT_SYMBOL(kcov_remote_stop); 1078 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.