include/linux/ftrace.h | 2 ++ kernel/trace/trace.h | 3 +-- kernel/trace/trace_entries.h | 8 +++---- kernel/trace/trace_functions_graph.c | 31 +++++++++++++--------------- kernel/trace/trace_irqsoff.c | 5 +++-- kernel/trace/trace_sched_wakeup.c | 6 ++++-- 6 files changed, 28 insertions(+), 27 deletions(-)
Commit <66611c0475709607f398e2a5d691b1fc72fe9dfc>
(fgraph: Remove calltime and rettime from generic)
incorrectly modified the offset values for calltime and rettime fields
in the funcgraph_exit traceevent on 32-bit ARM, which are used to parse
the corresponding values fromtrace rawdata. The actual memory offset of
calltime is 20 (not 24), and rettime is 28 (not 32) for the
funcgraph_exit event.
Before the fix,the funcgraph_exit format was:
~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
name: funcgraph_exit
ID: 10
format:
...
field:unsigned long long calltime; offset:24; size:8; signed:0;
field:unsigned long long rettime; offset:32; size:8; signed:0;
After the fix, the correct funcgraph_exit format is:
name: funcgraph_exit
ID: 10
format:
...
field:unsigned long long calltime; offset:20; size:8; signed:0;
field:unsigned long long rettime; offset:28; size:8; signed:0;
Signed-off-by: jempty.liang <imntjempty@163.com>
---
include/linux/ftrace.h | 2 ++
kernel/trace/trace.h | 3 +--
kernel/trace/trace_entries.h | 8 +++----
kernel/trace/trace_functions_graph.c | 31 +++++++++++++---------------
kernel/trace/trace_irqsoff.c | 5 +++--
kernel/trace/trace_sched_wakeup.c | 6 ++++--
6 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a3a8989e3268..52727a342273 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1191,6 +1191,8 @@ struct ftrace_graph_ret {
int depth;
/* Number of functions that overran the depth limit for current task */
unsigned int overrun;
+ unsigned long long calltime;
+ unsigned long long rettime;
} __packed;
struct fgraph_ops;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 69e7defba6c6..18c8a0b1ecd5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -968,8 +968,7 @@ extern int __trace_graph_retaddr_entry(struct trace_array *tr,
struct ftrace_regs *fregs);
extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned int trace_ctx,
- u64 calltime, u64 rettime);
+ unsigned int trace_ctx);
extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index f6a8d29c0d76..362a757e65a2 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -127,8 +127,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__field_packed( unsigned long, ret, retval )
__field_packed( unsigned int, ret, depth )
__field_packed( unsigned int, ret, overrun )
- __field(unsigned long long, calltime )
- __field(unsigned long long, rettime )
+ __field_packed(unsigned long long, ret, calltime)
+ __field_packed(unsigned long long, ret, rettime)
),
F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u retval: %lx",
@@ -149,8 +149,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__field_packed( unsigned long, ret, func )
__field_packed( unsigned int, ret, depth )
__field_packed( unsigned int, ret, overrun )
- __field(unsigned long long, calltime )
- __field(unsigned long long, rettime )
+ __field_packed(unsigned long long, ret, calltime)
+ __field_packed(unsigned long long, ret, rettime)
),
F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u",
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 1de6f1573621..0d2266ec67a4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -317,10 +317,12 @@ __trace_graph_function(struct trace_array *tr,
struct ftrace_graph_ret ret = {
.func = ip,
.depth = 0,
+ .calltime = time,
+ .rettime = time,
};
__trace_graph_entry(tr, &ent, trace_ctx);
- __trace_graph_return(tr, &ret, trace_ctx, time, time);
+ __trace_graph_return(tr, &ret, trace_ctx);
}
void
@@ -333,8 +335,7 @@ trace_graph_function(struct trace_array *tr,
void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned int trace_ctx,
- u64 calltime, u64 rettime)
+ unsigned int trace_ctx)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -346,8 +347,6 @@ void __trace_graph_return(struct trace_array *tr,
return;
entry = ring_buffer_event_data(event);
entry->ret = *trace;
- entry->calltime = calltime;
- entry->rettime = rettime;
trace_buffer_unlock_commit_nostack(buffer, event);
}
@@ -372,10 +371,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
struct trace_array *tr = gops->private;
struct fgraph_times *ftimes;
unsigned int trace_ctx;
- u64 calltime, rettime;
int size;
- rettime = trace_clock_local();
+ trace->rettime = trace_clock_local();
ftrace_graph_addr_finish(gops, trace);
@@ -390,10 +388,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
handle_nosleeptime(tr, trace, ftimes, size);
- calltime = ftimes->calltime;
+ trace->calltime = ftimes->calltime;
trace_ctx = tracing_gen_ctx();
- __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
+ __trace_graph_return(tr, trace, trace_ctx);
}
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
@@ -418,8 +416,10 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
tr = gops->private;
handle_nosleeptime(tr, trace, ftimes, size);
+ trace->calltime = ftimes->calltime;
+
if (tracing_thresh &&
- (trace_clock_local() - ftimes->calltime < tracing_thresh))
+ (trace->rettime - ftimes->calltime < tracing_thresh))
return;
else
trace_graph_return(trace, gops, fregs);
@@ -956,7 +956,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
- duration = ret_entry->rettime - ret_entry->calltime;
+ duration = graph_ret->rettime - graph_ret->calltime;
if (data) {
struct fgraph_cpu_data *cpu_data;
@@ -1275,14 +1275,11 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
}
static enum print_line_t
-print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
+print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
struct trace_entry *ent, struct trace_iterator *iter,
u32 flags)
{
- struct ftrace_graph_ret *trace = &retentry->ret;
- u64 calltime = retentry->calltime;
- u64 rettime = retentry->rettime;
- unsigned long long duration = rettime - calltime;
+ unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
unsigned long func;
@@ -1482,7 +1479,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
- return print_graph_return(field, s, entry, iter, flags);
+ return print_graph_return(&field->ret, s, entry, iter, flags);
}
case TRACE_STACK:
case TRACE_FN:
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 17673905907c..946be462a211 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -229,11 +229,12 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
if (!func_prolog_dec(tr, &data, &flags))
return;
- rettime = trace_clock_local();
+ trace->rettime = trace_clock_local();
calltime = fgraph_retrieve_data(gops->idx, &size);
if (calltime) {
+ trace->calltime = *calltime;
trace_ctx = tracing_gen_ctx_flags(flags);
- __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+ __trace_graph_return(tr, trace, trace_ctx);
}
local_dec(&data->disabled);
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 8faa73d3bba1..3bcfd1bf60ad 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -164,11 +164,13 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
- rettime = trace_clock_local();
+ trace->rettime = trace_clock_local();
calltime = fgraph_retrieve_data(gops->idx, &size);
- if (calltime)
+ if (calltime) {
+ trace->calltime = *calltime;
__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+ }
local_dec(&data->disabled);
preempt_enable_notrace();
--
2.25.1
Hi jempty.liang,
kernel test robot noticed the following build errors:
[auto build test ERROR on trace/for-next]
[also build test ERROR on next-20260202]
[cannot apply to linus/master v6.16-rc1]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/jempty-liang/tracing-Fix-funcgraph_exit-calltime-rettime-offset-for-32-bit-ARM/20260202-203926
base: https://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace for-next
patch link: https://lore.kernel.org/r/20260202123342.2544795-1-imntjempty%40163.com
patch subject: [PATCH v2] tracing: Fix funcgraph_exit calltime/rettime offset for 32-bit ARM
config: i386-allnoconfig-bpf (https://download.01.org/0day-ci/archive/20260203/202602030843.MZJLItcH-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260203/202602030843.MZJLItcH-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602030843.MZJLItcH-lkp@intel.com/
All error/warnings (new ones prefixed by >>):
>> kernel/trace/trace_irqsoff.c:224:6: warning: unused variable 'rettime' [-Wunused-variable]
224 | u64 rettime;
| ^~~~~~~
1 warning generated.
--
>> kernel/trace/trace_sched_wakeup.c:172:46: error: too many arguments to function call, expected 3, have 5
172 | __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
| ~~~~~~~~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~
kernel/trace/trace.h:969:13: note: '__trace_graph_return' declared here
969 | extern void __trace_graph_return(struct trace_array *tr,
| ^ ~~~~~~~~~~~~~~~~~~~~~~~
970 | struct ftrace_graph_ret *trace,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
971 | unsigned int trace_ctx);
| ~~~~~~~~~~~~~~~~~~~~~~
1 error generated.
vim +172 kernel/trace/trace_sched_wakeup.c
7495a5beaa22f1 Jiri Olsa 2010-09-23 150
37238abe3cb47b Steven Rostedt (VMware 2024-06-03 151) static void wakeup_graph_return(struct ftrace_graph_ret *trace,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 152) struct fgraph_ops *gops,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 153) struct ftrace_regs *fregs)
7495a5beaa22f1 Jiri Olsa 2010-09-23 154 {
7495a5beaa22f1 Jiri Olsa 2010-09-23 155 struct trace_array *tr = wakeup_trace;
7495a5beaa22f1 Jiri Olsa 2010-09-23 156 struct trace_array_cpu *data;
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 157 unsigned int trace_ctx;
a485ea9e3ef31a Steven Rostedt 2025-01-13 158 u64 *calltime;
66611c04757096 Steven Rostedt 2025-01-21 159 u64 rettime;
a485ea9e3ef31a Steven Rostedt 2025-01-13 160 int size;
7495a5beaa22f1 Jiri Olsa 2010-09-23 161
12117f3307b63f Steven Rostedt (VMware 2024-06-03 162) ftrace_graph_addr_finish(gops, trace);
5cf99a0f3161bc Steven Rostedt (VMware 2018-11-29 163)
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 164 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
7495a5beaa22f1 Jiri Olsa 2010-09-23 165 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 166
77a67833e830fd jempty.liang 2026-02-02 167 trace->rettime = trace_clock_local();
66611c04757096 Steven Rostedt 2025-01-21 168
a485ea9e3ef31a Steven Rostedt 2025-01-13 169 calltime = fgraph_retrieve_data(gops->idx, &size);
77a67833e830fd jempty.liang 2026-02-02 170 if (calltime) {
77a67833e830fd jempty.liang 2026-02-02 171 trace->calltime = *calltime;
66611c04757096 Steven Rostedt 2025-01-21 @172 __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
77a67833e830fd jempty.liang 2026-02-02 173 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 174
4f7bf54b07e5ac Steven Rostedt 2025-10-08 175 local_dec(&data->disabled);
7495a5beaa22f1 Jiri Olsa 2010-09-23 176 preempt_enable_notrace();
7495a5beaa22f1 Jiri Olsa 2010-09-23 177 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 178 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 179
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi jempty.liang,
kernel test robot noticed the following build errors:
[auto build test ERROR on trace/for-next]
[also build test ERROR on linus/master v6.19-rc8 next-20260202]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/jempty-liang/tracing-Fix-funcgraph_exit-calltime-rettime-offset-for-32-bit-ARM/20260202-203926
base: https://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace for-next
patch link: https://lore.kernel.org/r/20260202123342.2544795-1-imntjempty%40163.com
patch subject: [PATCH v2] tracing: Fix funcgraph_exit calltime/rettime offset for 32-bit ARM
config: x86_64-rhel-9.4-rust (https://download.01.org/0day-ci/archive/20260203/202602031332.eQ1Jm69n-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
rustc: rustc 1.88.0 (6b00bc388 2025-06-23)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260203/202602031332.eQ1Jm69n-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602031332.eQ1Jm69n-lkp@intel.com/
All errors (new ones prefixed by >>):
>> kernel/trace/trace_sched_wakeup.c:172:46: error: too many arguments to function call, expected 3, have 5
172 | __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
| ~~~~~~~~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~
kernel/trace/trace.h:969:13: note: '__trace_graph_return' declared here
969 | extern void __trace_graph_return(struct trace_array *tr,
| ^ ~~~~~~~~~~~~~~~~~~~~~~~
970 | struct ftrace_graph_ret *trace,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
971 | unsigned int trace_ctx);
| ~~~~~~~~~~~~~~~~~~~~~~
1 error generated.
vim +172 kernel/trace/trace_sched_wakeup.c
7495a5beaa22f1 Jiri Olsa 2010-09-23 150
37238abe3cb47b Steven Rostedt (VMware 2024-06-03 151) static void wakeup_graph_return(struct ftrace_graph_ret *trace,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 152) struct fgraph_ops *gops,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 153) struct ftrace_regs *fregs)
7495a5beaa22f1 Jiri Olsa 2010-09-23 154 {
7495a5beaa22f1 Jiri Olsa 2010-09-23 155 struct trace_array *tr = wakeup_trace;
7495a5beaa22f1 Jiri Olsa 2010-09-23 156 struct trace_array_cpu *data;
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 157 unsigned int trace_ctx;
a485ea9e3ef31a Steven Rostedt 2025-01-13 158 u64 *calltime;
66611c04757096 Steven Rostedt 2025-01-21 159 u64 rettime;
a485ea9e3ef31a Steven Rostedt 2025-01-13 160 int size;
7495a5beaa22f1 Jiri Olsa 2010-09-23 161
12117f3307b63f Steven Rostedt (VMware 2024-06-03 162) ftrace_graph_addr_finish(gops, trace);
5cf99a0f3161bc Steven Rostedt (VMware 2018-11-29 163)
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 164 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
7495a5beaa22f1 Jiri Olsa 2010-09-23 165 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 166
77a67833e830fd jempty.liang 2026-02-02 167 trace->rettime = trace_clock_local();
66611c04757096 Steven Rostedt 2025-01-21 168
a485ea9e3ef31a Steven Rostedt 2025-01-13 169 calltime = fgraph_retrieve_data(gops->idx, &size);
77a67833e830fd jempty.liang 2026-02-02 170 if (calltime) {
77a67833e830fd jempty.liang 2026-02-02 171 trace->calltime = *calltime;
66611c04757096 Steven Rostedt 2025-01-21 @172 __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
77a67833e830fd jempty.liang 2026-02-02 173 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 174
4f7bf54b07e5ac Steven Rostedt 2025-10-08 175 local_dec(&data->disabled);
7495a5beaa22f1 Jiri Olsa 2010-09-23 176 preempt_enable_notrace();
7495a5beaa22f1 Jiri Olsa 2010-09-23 177 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 178 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 179
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi jempty.liang,
kernel test robot noticed the following build errors:
[auto build test ERROR on trace/for-next]
[also build test ERROR on linus/master v6.19-rc8 next-20260202]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/jempty-liang/tracing-Fix-funcgraph_exit-calltime-rettime-offset-for-32-bit-ARM/20260202-203926
base: https://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace for-next
patch link: https://lore.kernel.org/r/20260202123342.2544795-1-imntjempty%40163.com
patch subject: [PATCH v2] tracing: Fix funcgraph_exit calltime/rettime offset for 32-bit ARM
config: sh-allmodconfig (https://download.01.org/0day-ci/archive/20260203/202602031258.iZkcSFA8-lkp@intel.com/config)
compiler: sh4-linux-gcc (GCC) 15.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260203/202602031258.iZkcSFA8-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602031258.iZkcSFA8-lkp@intel.com/
All errors (new ones prefixed by >>):
kernel/trace/trace_sched_wakeup.c: In function 'wakeup_graph_return':
>> kernel/trace/trace_sched_wakeup.c:172:17: error: too many arguments to function '__trace_graph_return'; expected 3, have 5
172 | __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
| ^~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~
In file included from kernel/trace/trace_sched_wakeup.c:20:
kernel/trace/trace.h:969:13: note: declared here
969 | extern void __trace_graph_return(struct trace_array *tr,
| ^~~~~~~~~~~~~~~~~~~~
vim +/__trace_graph_return +172 kernel/trace/trace_sched_wakeup.c
7495a5beaa22f1 Jiri Olsa 2010-09-23 150
37238abe3cb47b Steven Rostedt (VMware 2024-06-03 151) static void wakeup_graph_return(struct ftrace_graph_ret *trace,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 152) struct fgraph_ops *gops,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 153) struct ftrace_regs *fregs)
7495a5beaa22f1 Jiri Olsa 2010-09-23 154 {
7495a5beaa22f1 Jiri Olsa 2010-09-23 155 struct trace_array *tr = wakeup_trace;
7495a5beaa22f1 Jiri Olsa 2010-09-23 156 struct trace_array_cpu *data;
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 157 unsigned int trace_ctx;
a485ea9e3ef31a Steven Rostedt 2025-01-13 158 u64 *calltime;
66611c04757096 Steven Rostedt 2025-01-21 159 u64 rettime;
a485ea9e3ef31a Steven Rostedt 2025-01-13 160 int size;
7495a5beaa22f1 Jiri Olsa 2010-09-23 161
12117f3307b63f Steven Rostedt (VMware 2024-06-03 162) ftrace_graph_addr_finish(gops, trace);
5cf99a0f3161bc Steven Rostedt (VMware 2018-11-29 163)
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 164 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
7495a5beaa22f1 Jiri Olsa 2010-09-23 165 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 166
77a67833e830fd jempty.liang 2026-02-02 167 trace->rettime = trace_clock_local();
66611c04757096 Steven Rostedt 2025-01-21 168
a485ea9e3ef31a Steven Rostedt 2025-01-13 169 calltime = fgraph_retrieve_data(gops->idx, &size);
77a67833e830fd jempty.liang 2026-02-02 170 if (calltime) {
77a67833e830fd jempty.liang 2026-02-02 171 trace->calltime = *calltime;
66611c04757096 Steven Rostedt 2025-01-21 @172 __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
77a67833e830fd jempty.liang 2026-02-02 173 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 174
4f7bf54b07e5ac Steven Rostedt 2025-10-08 175 local_dec(&data->disabled);
7495a5beaa22f1 Jiri Olsa 2010-09-23 176 preempt_enable_notrace();
7495a5beaa22f1 Jiri Olsa 2010-09-23 177 return;
7495a5beaa22f1 Jiri Olsa 2010-09-23 178 }
7495a5beaa22f1 Jiri Olsa 2010-09-23 179
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi jempty.liang,
kernel test robot noticed the following build warnings:
[auto build test WARNING on trace/for-next]
[also build test WARNING on linus/master v6.19-rc8 next-20260202]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/jempty-liang/tracing-Fix-funcgraph_exit-calltime-rettime-offset-for-32-bit-ARM/20260202-203926
base: https://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace for-next
patch link: https://lore.kernel.org/r/20260202123342.2544795-1-imntjempty%40163.com
patch subject: [PATCH v2] tracing: Fix funcgraph_exit calltime/rettime offset for 32-bit ARM
config: x86_64-buildonly-randconfig-004-20260203 (https://download.01.org/0day-ci/archive/20260203/202602031142.uHVfwxw3-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260203/202602031142.uHVfwxw3-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602031142.uHVfwxw3-lkp@intel.com/
All warnings (new ones prefixed by >>):
kernel/trace/trace_irqsoff.c: In function 'irqsoff_graph_return':
>> kernel/trace/trace_irqsoff.c:224:13: warning: unused variable 'rettime' [-Wunused-variable]
224 | u64 rettime;
| ^~~~~~~
vim +/rettime +224 kernel/trace/trace_irqsoff.c
62b915f1060996 Jiri Olsa 2010-04-02 214
37238abe3cb47b Steven Rostedt (VMware 2024-06-03 215) static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 216) struct fgraph_ops *gops,
2ca8c112c9676e Masami Hiramatsu (Google 2024-12-26 217) struct ftrace_regs *fregs)
62b915f1060996 Jiri Olsa 2010-04-02 218 {
62b915f1060996 Jiri Olsa 2010-04-02 219 struct trace_array *tr = irqsoff_trace;
62b915f1060996 Jiri Olsa 2010-04-02 220 struct trace_array_cpu *data;
62b915f1060996 Jiri Olsa 2010-04-02 221 unsigned long flags;
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 222 unsigned int trace_ctx;
a485ea9e3ef31a Steven Rostedt 2025-01-13 223 u64 *calltime;
66611c04757096 Steven Rostedt 2025-01-21 @224 u64 rettime;
a485ea9e3ef31a Steven Rostedt 2025-01-13 225 int size;
62b915f1060996 Jiri Olsa 2010-04-02 226
12117f3307b63f Steven Rostedt (VMware 2024-06-03 227) ftrace_graph_addr_finish(gops, trace);
5cf99a0f3161bc Steven Rostedt (VMware 2018-11-29 228)
5e6d2b9cfa3a6e Steven Rostedt 2010-10-05 229 if (!func_prolog_dec(tr, &data, &flags))
62b915f1060996 Jiri Olsa 2010-04-02 230 return;
62b915f1060996 Jiri Olsa 2010-04-02 231
77a67833e830fd jempty.liang 2026-02-02 232 trace->rettime = trace_clock_local();
a485ea9e3ef31a Steven Rostedt 2025-01-13 233 calltime = fgraph_retrieve_data(gops->idx, &size);
c834a97962c708 Steven Rostedt 2025-10-08 234 if (calltime) {
77a67833e830fd jempty.liang 2026-02-02 235 trace->calltime = *calltime;
36590c50b2d072 Sebastian Andrzej Siewior 2021-01-25 236 trace_ctx = tracing_gen_ctx_flags(flags);
77a67833e830fd jempty.liang 2026-02-02 237 __trace_graph_return(tr, trace, trace_ctx);
c834a97962c708 Steven Rostedt 2025-10-08 238 }
90633c34c36d0c Steven Rostedt 2025-05-05 239 local_dec(&data->disabled);
62b915f1060996 Jiri Olsa 2010-04-02 240 }
62b915f1060996 Jiri Olsa 2010-04-02 241
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Mon, 2 Feb 2026 12:33:42 +0000
"jempty.liang" <imntjempty@163.com> wrote:
> Commit <66611c0475709607f398e2a5d691b1fc72fe9dfc>
> (fgraph: Remove calltime and rettime from generic)
> incorrectly modified the offset values for calltime and rettime fields
> in the funcgraph_exit traceevent on 32-bit ARM, which are used to parse
> the corresponding values fromtrace rawdata. The actual memory offset of
> calltime is 20 (not 24), and rettime is 28 (not 32) for the
> funcgraph_exit event.
OK, so this is a 32bit issue and not an ARM one. I was able to reproduce it
on 32bit x86 too.
Basically the problem is that the structure used to output the field offset
is out of sync with the actual fields of the structure.
>
> Before the fix,the funcgraph_exit format was:
>
> ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
>
> name: funcgraph_exit
> ID: 10
> format:
> ...
> field:unsigned long long calltime; offset:24; size:8; signed:0;
> field:unsigned long long rettime; offset:32; size:8; signed:0;
>
> After the fix, the correct funcgraph_exit format is:
>
> name: funcgraph_exit
> ID: 10
> format:
> ...
> field:unsigned long long calltime; offset:20; size:8; signed:0;
> field:unsigned long long rettime; offset:28; size:8; signed:0;
>
Thus, the way the calltime and rettime are defined is via:
> --- a/kernel/trace/trace_entries.h
> +++ b/kernel/trace/trace_entries.h
> @@ -127,8 +127,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
> __field_packed( unsigned long, ret, retval )
> __field_packed( unsigned int, ret, depth )
> __field_packed( unsigned int, ret, overrun )
> - __field(unsigned long long, calltime )
> - __field(unsigned long long, rettime )
The __field() macro.
> + __field_packed(unsigned long long, ret, calltime)
> + __field_packed(unsigned long long, ret, rettime)
You converted it to a __field_packed() macro. The reason this worked is
because fields within a structure defined by __field_packed() has an
alignment of "1" to pack it.
Thus, your "fix" is simply hiding the real bug, which is that the alignment
algorithm is wrong.
Can you try this patch to see if it fixes the issue for you?
Thanks,
-- Steve
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 1698fc22afa0..68ef39cf0710 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -88,7 +88,9 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef __field_ext
#define __field_ext(_type, _item, _filter_type) { \
.type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
+ .size = sizeof(_type), \
+ .align = __alignof__(_type) > __alignof__(long) ? __alignof__(long) :\
+ __alignof__(_type), \
is_signed_type(_type), .filter_type = _filter_type },
At 2026-02-02 23:38:04, "Steven Rostedt" <rostedt@goodmis.org> wrote:
>On Mon, 2 Feb 2026 12:33:42 +0000
>"jempty.liang" <imntjempty@163.com> wrote:
>
>> Commit <66611c0475709607f398e2a5d691b1fc72fe9dfc>
>> (fgraph: Remove calltime and rettime from generic)
>> incorrectly modified the offset values for calltime and rettime fields
>> in the funcgraph_exit traceevent on 32-bit ARM, which are used to parse
>> the corresponding values fromtrace rawdata. The actual memory offset of
>> calltime is 20 (not 24), and rettime is 28 (not 32) for the
>> funcgraph_exit event.
>
>OK, so this is a 32bit issue and not an ARM one. I was able to reproduce it
>on 32bit x86 too.
>
>Basically the problem is that the structure used to output the field offset
>is out of sync with the actual fields of the structure.
>
>>
>> Before the fix,the funcgraph_exit format was:
>>
>> ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
>>
>> name: funcgraph_exit
>> ID: 10
>> format:
>> ...
>> field:unsigned long long calltime; offset:24; size:8; signed:0;
>> field:unsigned long long rettime; offset:32; size:8; signed:0;
>>
>> After the fix, the correct funcgraph_exit format is:
>>
>> name: funcgraph_exit
>> ID: 10
>> format:
>> ...
>> field:unsigned long long calltime; offset:20; size:8; signed:0;
>> field:unsigned long long rettime; offset:28; size:8; signed:0;
>>
>
>Thus, the way the calltime and rettime are defined is via:
>
>
>> --- a/kernel/trace/trace_entries.h
>> +++ b/kernel/trace/trace_entries.h
>> @@ -127,8 +127,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
>> __field_packed( unsigned long, ret, retval )
>> __field_packed( unsigned int, ret, depth )
>> __field_packed( unsigned int, ret, overrun )
>> - __field(unsigned long long, calltime )
>> - __field(unsigned long long, rettime )
>
>The __field() macro.
>
>> + __field_packed(unsigned long long, ret, calltime)
>> + __field_packed(unsigned long long, ret, rettime)
>
>You converted it to a __field_packed() macro. The reason this worked is
>because fields within a structure defined by __field_packed() has an
>alignment of "1" to pack it.
>
>Thus, your "fix" is simply hiding the real bug, which is that the alignment
>algorithm is wrong.
>
>Can you try this patch to see if it fixes the issue for you?
>
>Thanks,
>
>-- Steve
>
>diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
>index 1698fc22afa0..68ef39cf0710 100644
>--- a/kernel/trace/trace_export.c
>+++ b/kernel/trace/trace_export.c
>@@ -88,7 +88,9 @@ static void __always_unused ____ftrace_check_##name(void) \
> #undef __field_ext
> #define __field_ext(_type, _item, _filter_type) { \
> .type = #_type, .name = #_item, \
>- .size = sizeof(_type), .align = __alignof__(_type), \
>+ .size = sizeof(_type), \
>+ .align = __alignof__(_type) > __alignof__(long) ? __alignof__(long) :\
>+ __alignof__(_type), \
> is_signed_type(_type), .filter_type = _filter_type },
>
>
It works on the 32-bit ARM platform.
On Mon, 2 Feb 2026 10:38:04 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:
> Can you try this patch to see if it fixes the issue for you?
Ignore that patch, try this one instead. This was fixed for trace_events a
while ago, but the same fix wasn't done for ftrace events.
-- Steve
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 1698fc22afa0..5b96ac750049 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -14,6 +14,9 @@
#include "trace_output.h"
+/* The alignment of a type when in a structure */
+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
+
/* Stub function for events with triggers */
static int ftrace_event_register(struct trace_event_call *call,
enum trace_reg type, void *data)
@@ -88,7 +91,7 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef __field_ext
#define __field_ext(_type, _item, _filter_type) { \
.type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
is_signed_type(_type), .filter_type = _filter_type },
At 2026-02-03 00:08:28, "Steven Rostedt" <rostedt@goodmis.org> wrote:
>On Mon, 2 Feb 2026 10:38:04 -0500
>Steven Rostedt <rostedt@goodmis.org> wrote:
>
>> Can you try this patch to see if it fixes the issue for you?
>
>Ignore that patch, try this one instead. This was fixed for trace_events a
>while ago, but the same fix wasn't done for ftrace events.
>
>-- Steve
>
>diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
>index 1698fc22afa0..5b96ac750049 100644
>--- a/kernel/trace/trace_export.c
>+++ b/kernel/trace/trace_export.c
>@@ -14,6 +14,9 @@
>
> #include "trace_output.h"
>
>+/* The alignment of a type when in a structure */
>+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
>+
> /* Stub function for events with triggers */
> static int ftrace_event_register(struct trace_event_call *call,
> enum trace_reg type, void *data)
>@@ -88,7 +91,7 @@ static void __always_unused ____ftrace_check_##name(void) \
> #undef __field_ext
> #define __field_ext(_type, _item, _filter_type) { \
> .type = #_type, .name = #_item, \
>- .size = sizeof(_type), .align = __alignof__(_type), \
>+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
> is_signed_type(_type), .filter_type = _filter_type },
>
On the 32-bit ARM platform, when _type is unsigned long long, the resulting align value is 8 instead of the expected 4.
>
On Tue, 3 Feb 2026 18:04:21 +0800 (CST)
"jempty.liang" <imntjempty@163.com> wrote:
> > #include "trace_output.h"
> >
> >+/* The alignment of a type when in a structure */
> >+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
> >+
> > /* Stub function for events with triggers */
> > static int ftrace_event_register(struct trace_event_call *call,
> > enum trace_reg type, void *data)
> >@@ -88,7 +91,7 @@ static void __always_unused ____ftrace_check_##name(void) \
> > #undef __field_ext
> > #define __field_ext(_type, _item, _filter_type) { \
> > .type = #_type, .name = #_item, \
> >- .size = sizeof(_type), .align = __alignof__(_type), \
> >+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
> > is_signed_type(_type), .filter_type = _filter_type },
> >
> On the 32-bit ARM platform, when _type is unsigned long long, the resulting align value is 8 instead of the expected 4.
> >
Are you saying this still doesn't work?
That would be an issue because then it would have the same bugs with
generic trace events which uses the same solution.
-- Steve
On Tue, 3 Feb 2026 09:30:18 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:
> > On the 32-bit ARM platform, when _type is unsigned long long, the resulting align value is 8 instead of the expected 4.
> > >
>
> Are you saying this still doesn't work?
>
> That would be an issue because then it would have the same bugs with
> generic trace events which uses the same solution.
OK, so talking with others that know more about arm32 than I do, it can
indeed still produce an 8 byte alignment.
Anyway, I still want to fix it properly. Does this patch work for you?
-- Steve
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b6d42fe06115..c11edec5d8f5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -68,14 +68,17 @@ enum trace_type {
#undef __field_fn
#define __field_fn(type, item) type item;
+#undef __field_packed
+#define __field_packed(type, item) type item;
+
#undef __field_struct
#define __field_struct(type, item) __field(type, item)
#undef __field_desc
#define __field_desc(type, container, item)
-#undef __field_packed
-#define __field_packed(type, container, item)
+#undef __field_desc_packed
+#define __field_desc_packed(type, container, item)
#undef __array
#define __array(type, item, size) type item[size];
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index f6a8d29c0d76..54417468fdeb 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -79,8 +79,8 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ent, graph_ent )
- __field_packed( unsigned long, graph_ent, func )
- __field_packed( unsigned long, graph_ent, depth )
+ __field_desc_packed(unsigned long, graph_ent, func )
+ __field_desc_packed(unsigned long, graph_ent, depth )
__dynamic_array(unsigned long, args )
),
@@ -96,9 +96,9 @@ FTRACE_ENTRY_PACKED(fgraph_retaddr_entry, fgraph_retaddr_ent_entry,
F_STRUCT(
__field_struct( struct fgraph_retaddr_ent, graph_rent )
- __field_packed( unsigned long, graph_rent.ent, func )
- __field_packed( unsigned long, graph_rent.ent, depth )
- __field_packed( unsigned long, graph_rent, retaddr )
+ __field_desc_packed( unsigned long, graph_rent.ent, func )
+ __field_desc_packed( unsigned long, graph_rent.ent, depth )
+ __field_desc_packed( unsigned long, graph_rent, retaddr )
__dynamic_array(unsigned long, args )
),
@@ -123,12 +123,12 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ret, ret )
- __field_packed( unsigned long, ret, func )
- __field_packed( unsigned long, ret, retval )
- __field_packed( unsigned int, ret, depth )
- __field_packed( unsigned int, ret, overrun )
- __field(unsigned long long, calltime )
- __field(unsigned long long, rettime )
+ __field_desc_packed( unsigned long, ret, func )
+ __field_desc_packed( unsigned long, ret, retval )
+ __field_desc_packed( unsigned int, ret, depth )
+ __field_desc_packed( unsigned int, ret, overrun )
+ __field_packed(unsigned long long, calltime)
+ __field_packed(unsigned long long, rettime )
),
F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u retval: %lx",
@@ -146,11 +146,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ret, ret )
- __field_packed( unsigned long, ret, func )
- __field_packed( unsigned int, ret, depth )
- __field_packed( unsigned int, ret, overrun )
- __field(unsigned long long, calltime )
- __field(unsigned long long, rettime )
+ __field_desc_packed( unsigned long, ret, func )
+ __field_desc_packed( unsigned int, ret, depth )
+ __field_desc_packed( unsigned int, ret, overrun )
+ __field_packed(unsigned long long, calltime )
+ __field_packed(unsigned long long, rettime )
),
F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u",
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 1698fc22afa0..32a42ef31855 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -42,11 +42,14 @@ static int ftrace_event_register(struct trace_event_call *call,
#undef __field_fn
#define __field_fn(type, item) type item;
+#undef __field_packed
+#define __field_packed(type, item) type item;
+
#undef __field_desc
#define __field_desc(type, container, item) type item;
-#undef __field_packed
-#define __field_packed(type, container, item) type item;
+#undef __field_desc_packed
+#define __field_desc_packed(type, container, item) type item;
#undef __array
#define __array(type, item, size) type item[size];
@@ -104,11 +107,14 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef __field_fn
#define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN)
+#undef __field_packed
+#define __field_packed(_type, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
+
#undef __field_desc
#define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
-#undef __field_packed
-#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
+#undef __field_desc_packed
+#define __field_desc_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
#undef __array
#define __array(_type, _item, _len) { \
@@ -146,11 +152,14 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
#undef __field_fn
#define __field_fn(type, item)
+#undef __field_packed
+#define __field_packed(type, item)
+
#undef __field_desc
#define __field_desc(type, container, item)
-#undef __field_packed
-#define __field_packed(type, container, item)
+#undef __field_desc_packed
+#define __field_desc_packed(type, container, item)
#undef __array
#define __array(type, item, len)
At 2026-02-04 00:20:40, "Steven Rostedt" <rostedt@goodmis.org> wrote:
>On Tue, 3 Feb 2026 09:30:18 -0500
>Steven Rostedt <rostedt@goodmis.org> wrote:
>
>> > On the 32-bit ARM platform, when _type is unsigned long long, the resulting align value is 8 instead of the expected 4.
>> > >
>>
>> Are you saying this still doesn't work?
>>
>> That would be an issue because then it would have the same bugs with
>> generic trace events which uses the same solution.
>
>OK, so talking with others that know more about arm32 than I do, it can
>indeed still produce an 8 byte alignment.
>
>Anyway, I still want to fix it properly. Does this patch work for you?
>
>-- Steve
Yes, I've verified it and this patch works correctly on the 32-bit ARM platform.
>
>diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
>index b6d42fe06115..c11edec5d8f5 100644
>--- a/kernel/trace/trace.h
>+++ b/kernel/trace/trace.h
>@@ -68,14 +68,17 @@ enum trace_type {
> #undef __field_fn
> #define __field_fn(type, item) type item;
>
>+#undef __field_packed
>+#define __field_packed(type, item) type item;
>+
> #undef __field_struct
> #define __field_struct(type, item) __field(type, item)
>
> #undef __field_desc
> #define __field_desc(type, container, item)
>
>-#undef __field_packed
>-#define __field_packed(type, container, item)
>+#undef __field_desc_packed
>+#define __field_desc_packed(type, container, item)
>
> #undef __array
> #define __array(type, item, size) type item[size];
>diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
>index f6a8d29c0d76..54417468fdeb 100644
>--- a/kernel/trace/trace_entries.h
>+++ b/kernel/trace/trace_entries.h
>@@ -79,8 +79,8 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
>
> F_STRUCT(
> __field_struct( struct ftrace_graph_ent, graph_ent )
>- __field_packed( unsigned long, graph_ent, func )
>- __field_packed( unsigned long, graph_ent, depth )
>+ __field_desc_packed(unsigned long, graph_ent, func )
>+ __field_desc_packed(unsigned long, graph_ent, depth )
> __dynamic_array(unsigned long, args )
> ),
>
>@@ -96,9 +96,9 @@ FTRACE_ENTRY_PACKED(fgraph_retaddr_entry, fgraph_retaddr_ent_entry,
>
> F_STRUCT(
> __field_struct( struct fgraph_retaddr_ent, graph_rent )
>- __field_packed( unsigned long, graph_rent.ent, func )
>- __field_packed( unsigned long, graph_rent.ent, depth )
>- __field_packed( unsigned long, graph_rent, retaddr )
>+ __field_desc_packed( unsigned long, graph_rent.ent, func )
>+ __field_desc_packed( unsigned long, graph_rent.ent, depth )
>+ __field_desc_packed( unsigned long, graph_rent, retaddr )
> __dynamic_array(unsigned long, args )
> ),
>
>@@ -123,12 +123,12 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
>
> F_STRUCT(
> __field_struct( struct ftrace_graph_ret, ret )
>- __field_packed( unsigned long, ret, func )
>- __field_packed( unsigned long, ret, retval )
>- __field_packed( unsigned int, ret, depth )
>- __field_packed( unsigned int, ret, overrun )
>- __field(unsigned long long, calltime )
>- __field(unsigned long long, rettime )
>+ __field_desc_packed( unsigned long, ret, func )
>+ __field_desc_packed( unsigned long, ret, retval )
>+ __field_desc_packed( unsigned int, ret, depth )
>+ __field_desc_packed( unsigned int, ret, overrun )
>+ __field_packed(unsigned long long, calltime)
>+ __field_packed(unsigned long long, rettime )
> ),
>
> F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u retval: %lx",
>@@ -146,11 +146,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
>
> F_STRUCT(
> __field_struct( struct ftrace_graph_ret, ret )
>- __field_packed( unsigned long, ret, func )
>- __field_packed( unsigned int, ret, depth )
>- __field_packed( unsigned int, ret, overrun )
>- __field(unsigned long long, calltime )
>- __field(unsigned long long, rettime )
>+ __field_desc_packed( unsigned long, ret, func )
>+ __field_desc_packed( unsigned int, ret, depth )
>+ __field_desc_packed( unsigned int, ret, overrun )
>+ __field_packed(unsigned long long, calltime )
>+ __field_packed(unsigned long long, rettime )
> ),
>
> F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u",
>diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
>index 1698fc22afa0..32a42ef31855 100644
>--- a/kernel/trace/trace_export.c
>+++ b/kernel/trace/trace_export.c
>@@ -42,11 +42,14 @@ static int ftrace_event_register(struct trace_event_call *call,
> #undef __field_fn
> #define __field_fn(type, item) type item;
>
>+#undef __field_packed
>+#define __field_packed(type, item) type item;
>+
> #undef __field_desc
> #define __field_desc(type, container, item) type item;
>
>-#undef __field_packed
>-#define __field_packed(type, container, item) type item;
>+#undef __field_desc_packed
>+#define __field_desc_packed(type, container, item) type item;
>
> #undef __array
> #define __array(type, item, size) type item[size];
>@@ -104,11 +107,14 @@ static void __always_unused ____ftrace_check_##name(void) \
> #undef __field_fn
> #define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN)
>
>+#undef __field_packed
>+#define __field_packed(_type, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
>+
> #undef __field_desc
> #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
>
>-#undef __field_packed
>-#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
>+#undef __field_desc_packed
>+#define __field_desc_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
>
> #undef __array
> #define __array(_type, _item, _len) { \
>@@ -146,11 +152,14 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
> #undef __field_fn
> #define __field_fn(type, item)
>
>+#undef __field_packed
>+#define __field_packed(type, item)
>+
> #undef __field_desc
> #define __field_desc(type, container, item)
>
>-#undef __field_packed
>-#define __field_packed(type, container, item)
>+#undef __field_desc_packed
>+#define __field_desc_packed(type, container, item)
>
> #undef __array
> #define __array(type, item, len)
© 2016 - 2026 Red Hat, Inc.