Unlike sched_skip_vma_numa tracepoint which tracks skipped VMAs, this
tracks the task subjected to cpuset.mems pinning and prints out its
allowed memory node mask.
Signed-off-by: Libo Chen <libo.chen@oracle.com>
---
include/trace/events/sched.h | 31 +++++++++++++++++++++++++++++++
kernel/sched/fair.c | 4 +++-
2 files changed, 34 insertions(+), 1 deletion(-)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8994e97d86c1..91f9dc177dad 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -745,6 +745,37 @@ TRACE_EVENT(sched_skip_vma_numa,
__entry->vm_end,
__print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
);
+
+TRACE_EVENT(sched_skip_cpuset_numa,
+
+ TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
+
+ TP_ARGS(tsk, mem_allowed_ptr),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, tgid )
+ __field( pid_t, ngid )
+ __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = task_pid_nr(tsk);
+ __entry->tgid = task_tgid_nr(tsk);
+ __entry->ngid = task_numa_group_id(tsk);
+ memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
+ sizeof(__entry->mem_allowed));
+ ),
+
+ TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
+ __entry->comm,
+ __entry->pid,
+ __entry->tgid,
+ __entry->ngid,
+ MAX_NUMNODES, __entry->mem_allowed)
+);
#endif /* CONFIG_NUMA_BALANCING */
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c9903b1b3948..cc892961ce15 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3333,8 +3333,10 @@ static void task_numa_work(struct callback_head *work)
* Memory is pinned to only one NUMA node via cpuset.mems, naturally
* no page can be migrated.
*/
- if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1)
+ if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1) {
+ trace_sched_skip_cpuset_numa(current, &cpuset_current_mems_allowed);
return;
+ }
if (!mm->numa_next_scan) {
mm->numa_next_scan = now +
--
2.43.5
On Wed, 23 Apr 2025 17:01:46 -0700
Libo Chen <libo.chen@oracle.com> wrote:
> +++ b/include/trace/events/sched.h
> @@ -745,6 +745,37 @@ TRACE_EVENT(sched_skip_vma_numa,
> __entry->vm_end,
> __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
> );
> +
> +TRACE_EVENT(sched_skip_cpuset_numa,
> +
> + TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
> +
> + TP_ARGS(tsk, mem_allowed_ptr),
> +
> + TP_STRUCT__entry(
> + __array( char, comm, TASK_COMM_LEN )
> + __field( pid_t, pid )
> + __field( pid_t, tgid )
> + __field( pid_t, ngid )
> + __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
> + ),
> +
> + TP_fast_assign(
> + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
> + __entry->pid = task_pid_nr(tsk);
> + __entry->tgid = task_tgid_nr(tsk);
> + __entry->ngid = task_numa_group_id(tsk);
> + memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
> + sizeof(__entry->mem_allowed));
Is mem_allowed->bits guaranteed to be the size of BITS_TO_LONGS(MAX_NUM_NODES)
in size? If not, then memcpy will read beyond that size.
-- Steve
> + ),
> +
> + TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
> + __entry->comm,
> + __entry->pid,
> + __entry->tgid,
> + __entry->ngid,
> + MAX_NUMNODES, __entry->mem_allowed)
> +);
On 4/23/25 17:18, Steven Rostedt wrote:
> On Wed, 23 Apr 2025 17:01:46 -0700
> Libo Chen <libo.chen@oracle.com> wrote:
>
>> +++ b/include/trace/events/sched.h
>> @@ -745,6 +745,37 @@ TRACE_EVENT(sched_skip_vma_numa,
>> __entry->vm_end,
>> __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
>> );
>> +
>> +TRACE_EVENT(sched_skip_cpuset_numa,
>> +
>> + TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
>> +
>> + TP_ARGS(tsk, mem_allowed_ptr),
>> +
>> + TP_STRUCT__entry(
>> + __array( char, comm, TASK_COMM_LEN )
>> + __field( pid_t, pid )
>> + __field( pid_t, tgid )
>> + __field( pid_t, ngid )
>> + __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
>> + ),
>> +
>> + TP_fast_assign(
>> + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
>> + __entry->pid = task_pid_nr(tsk);
>> + __entry->tgid = task_tgid_nr(tsk);
>> + __entry->ngid = task_numa_group_id(tsk);
>> + memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
>> + sizeof(__entry->mem_allowed));
>
> Is mem_allowed->bits guaranteed to be the size of BITS_TO_LONGS(MAX_NUM_NODES)
> in size? If not, then memcpy will read beyond that size.
>
Yes, evidence can be found in the definitions of nodemask_t and DECLARE_BITMAP:
// include/linux/nodemask_types.h
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
// include/linux/types.h
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
Thanks,
Libo
> -- Steve
>
>
>> + ),
>> +
>> + TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
>> + __entry->comm,
>> + __entry->pid,
>> + __entry->tgid,
>> + __entry->ngid,
>> + MAX_NUMNODES, __entry->mem_allowed)
>> +);
>
On Wed, 23 Apr 2025 17:36:30 -0700
Libo Chen <libo.chen@oracle.com> wrote:
> >> + TP_fast_assign(
> >> + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
> >> + __entry->pid = task_pid_nr(tsk);
> >> + __entry->tgid = task_tgid_nr(tsk);
> >> + __entry->ngid = task_numa_group_id(tsk);
> >> + memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
> >> + sizeof(__entry->mem_allowed));
> >
> > Is mem_allowed->bits guaranteed to be the size of BITS_TO_LONGS(MAX_NUM_NODES)
> > in size? If not, then memcpy will read beyond that size.
> >
>
> Yes, evidence can be found in the definitions of nodemask_t and DECLARE_BITMAP:
>
> // include/linux/nodemask_types.h
> typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
>
> // include/linux/types.h
> #define DECLARE_BITMAP(name,bits) \
> unsigned long name[BITS_TO_LONGS(bits)]
>
Hmm, I wonder then if we should add in TP_fast_assign():
BUILD_BUG_ON(sizeof(nodemask_t) != BITS_TO_LONGS(MAX_NUM_NODES) * sizeof(long));
-- Steve
On 4/23/25 18:01, Steven Rostedt wrote:
> On Wed, 23 Apr 2025 17:36:30 -0700
> Libo Chen <libo.chen@oracle.com> wrote:
>
>>>> + TP_fast_assign(
>>>> + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
>>>> + __entry->pid = task_pid_nr(tsk);
>>>> + __entry->tgid = task_tgid_nr(tsk);
>>>> + __entry->ngid = task_numa_group_id(tsk);
>>>> + memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
>>>> + sizeof(__entry->mem_allowed));
>>>
>>> Is mem_allowed->bits guaranteed to be the size of BITS_TO_LONGS(MAX_NUM_NODES)
>>> in size? If not, then memcpy will read beyond that size.
>>>
>>
>> Yes, evidence can be found in the definitions of nodemask_t and DECLARE_BITMAP:
>>
>> // include/linux/nodemask_types.h
>> typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
>>
>> // include/linux/types.h
>> #define DECLARE_BITMAP(name,bits) \
>> unsigned long name[BITS_TO_LONGS(bits)]
>>
>
> Hmm, I wonder then if we should add in TP_fast_assign():
>
> BUILD_BUG_ON(sizeof(nodemask_t) != BITS_TO_LONGS(MAX_NUM_NODES) * sizeof(long));
>
to guard against potential changes in nodemask_t definition?
> -- Steve
On Wed, 23 Apr 2025 18:12:55 -0700 Libo Chen <libo.chen@oracle.com> wrote: > > Hmm, I wonder then if we should add in TP_fast_assign(): > > > > BUILD_BUG_ON(sizeof(nodemask_t) != BITS_TO_LONGS(MAX_NUM_NODES) * sizeof(long)); > > > > to guard against potential changes in nodemask_t definition? Correct. Whenever there's an implicit dependency like this, where if something were to change it can cause a bug in the kernel, it's always better to have a build time check to catch it before it becomes an issue. -- Steve
On 4/23/25 18:33, Steven Rostedt wrote: > On Wed, 23 Apr 2025 18:12:55 -0700 > Libo Chen <libo.chen@oracle.com> wrote: > >>> Hmm, I wonder then if we should add in TP_fast_assign(): >>> >>> BUILD_BUG_ON(sizeof(nodemask_t) != BITS_TO_LONGS(MAX_NUM_NODES) * sizeof(long)); >>> >> >> to guard against potential changes in nodemask_t definition? > > Correct. > > Whenever there's an implicit dependency like this, where if something were > to change it can cause a bug in the kernel, it's always better to have a > build time check to catch it before it becomes an issue. > Okay that's reasonable. I will add it~
© 2016 - 2026 Red Hat, Inc.