As part of the effort to move to mm->flags becoming a bitmap field, convert
existing users to making use of the mm_flags_*() accessors which will, when
the conversion is complete, be the only means of accessing mm_struct flags.
No functional change intended.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
fs/proc/array.c | 2 +-
fs/proc/base.c | 4 ++--
fs/proc/task_mmu.c | 2 +-
kernel/fork.c | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index d6a0369caa93..c286dc12325e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -422,7 +422,7 @@ static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm)
bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE);
if (thp_enabled)
- thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags);
+ thp_enabled = !mm_flags_test(MMF_DISABLE_THP, mm);
seq_printf(m, "THP_enabled:\t%d\n", thp_enabled);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f0c093c58aaf..b997ceef9135 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1163,7 +1163,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
struct task_struct *p = find_lock_task_mm(task);
if (p) {
- if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
+ if (mm_flags_test(MMF_MULTIPROCESS, p->mm)) {
mm = p->mm;
mmgrab(mm);
}
@@ -3276,7 +3276,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
seq_printf(m, "ksm_merge_any: %s\n",
- test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no");
+ mm_flags_test(MMF_VM_MERGE_ANY, mm) ? "yes" : "no");
ret = mmap_read_lock_killable(mm);
if (ret) {
mmput(mm);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e64cf40ce9c4..e8e7bef34531 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1592,7 +1592,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
return false;
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
+ if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm)))
return false;
folio = vm_normal_folio(vma, addr, pte);
if (!folio)
diff --git a/kernel/fork.c b/kernel/fork.c
index b311caec6419..68c81539193d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1887,7 +1887,7 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
/* We need to synchronize with __set_oom_adj */
mutex_lock(&oom_adj_mutex);
- set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
+ mm_flags_set(MMF_MULTIPROCESS, tsk->mm);
/* Update the values in case they were changed after copy_signal */
tsk->signal->oom_score_adj = current->signal->oom_score_adj;
tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
--
2.50.1
On 12.08.25 17:44, Lorenzo Stoakes wrote: > As part of the effort to move to mm->flags becoming a bitmap field, convert > existing users to making use of the mm_flags_*() accessors which will, when > the conversion is complete, be the only means of accessing mm_struct flags. > > No functional change intended. > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > --- Acked-by: David Hildenbrand <david@redhat.com> -- Cheers David / dhildenb
On Tue, Aug 12, 2025 at 04:44:18PM +0100, Lorenzo Stoakes wrote: > As part of the effort to move to mm->flags becoming a bitmap field, convert > existing users to making use of the mm_flags_*() accessors which will, when > the conversion is complete, be the only means of accessing mm_struct flags. > > No functional change intended. > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> > --- > fs/proc/array.c | 2 +- > fs/proc/base.c | 4 ++-- > fs/proc/task_mmu.c | 2 +- > kernel/fork.c | 2 +- > 4 files changed, 5 insertions(+), 5 deletions(-) > > diff --git a/fs/proc/array.c b/fs/proc/array.c > index d6a0369caa93..c286dc12325e 100644 > --- a/fs/proc/array.c > +++ b/fs/proc/array.c > @@ -422,7 +422,7 @@ static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) > bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE); > > if (thp_enabled) > - thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags); > + thp_enabled = !mm_flags_test(MMF_DISABLE_THP, mm); > seq_printf(m, "THP_enabled:\t%d\n", thp_enabled); > } > > diff --git a/fs/proc/base.c b/fs/proc/base.c > index f0c093c58aaf..b997ceef9135 100644 > --- a/fs/proc/base.c > +++ b/fs/proc/base.c > @@ -1163,7 +1163,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) > struct task_struct *p = find_lock_task_mm(task); > > if (p) { > - if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) { > + if (mm_flags_test(MMF_MULTIPROCESS, p->mm)) { > mm = p->mm; > mmgrab(mm); > } > @@ -3276,7 +3276,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, > seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); > seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); > seq_printf(m, "ksm_merge_any: %s\n", > - test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no"); > + mm_flags_test(MMF_VM_MERGE_ANY, mm) ? "yes" : "no"); > ret = mmap_read_lock_killable(mm); > if (ret) { > mmput(mm); > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index e64cf40ce9c4..e8e7bef34531 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -1592,7 +1592,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, > return false; > if (!is_cow_mapping(vma->vm_flags)) > return false; > - if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) > + if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))) > return false; > folio = vm_normal_folio(vma, addr, pte); > if (!folio) > diff --git a/kernel/fork.c b/kernel/fork.c > index b311caec6419..68c81539193d 100644 > --- a/kernel/fork.c > +++ b/kernel/fork.c > @@ -1887,7 +1887,7 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) > > /* We need to synchronize with __set_oom_adj */ > mutex_lock(&oom_adj_mutex); > - set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); > + mm_flags_set(MMF_MULTIPROCESS, tsk->mm); > /* Update the values in case they were changed after copy_signal */ > tsk->signal->oom_score_adj = current->signal->oom_score_adj; > tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; > -- > 2.50.1 > -- Sincerely yours, Mike.
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250812 11:48]: > As part of the effort to move to mm->flags becoming a bitmap field, convert > existing users to making use of the mm_flags_*() accessors which will, when > the conversion is complete, be the only means of accessing mm_struct flags. > > No functional change intended. > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> > --- > fs/proc/array.c | 2 +- > fs/proc/base.c | 4 ++-- > fs/proc/task_mmu.c | 2 +- > kernel/fork.c | 2 +- > 4 files changed, 5 insertions(+), 5 deletions(-) > > diff --git a/fs/proc/array.c b/fs/proc/array.c > index d6a0369caa93..c286dc12325e 100644 > --- a/fs/proc/array.c > +++ b/fs/proc/array.c > @@ -422,7 +422,7 @@ static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) > bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE); > > if (thp_enabled) > - thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags); > + thp_enabled = !mm_flags_test(MMF_DISABLE_THP, mm); > seq_printf(m, "THP_enabled:\t%d\n", thp_enabled); > } > > diff --git a/fs/proc/base.c b/fs/proc/base.c > index f0c093c58aaf..b997ceef9135 100644 > --- a/fs/proc/base.c > +++ b/fs/proc/base.c > @@ -1163,7 +1163,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) > struct task_struct *p = find_lock_task_mm(task); > > if (p) { > - if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) { > + if (mm_flags_test(MMF_MULTIPROCESS, p->mm)) { > mm = p->mm; > mmgrab(mm); > } > @@ -3276,7 +3276,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, > seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); > seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); > seq_printf(m, "ksm_merge_any: %s\n", > - test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no"); > + mm_flags_test(MMF_VM_MERGE_ANY, mm) ? "yes" : "no"); > ret = mmap_read_lock_killable(mm); > if (ret) { > mmput(mm); > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index e64cf40ce9c4..e8e7bef34531 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -1592,7 +1592,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, > return false; > if (!is_cow_mapping(vma->vm_flags)) > return false; > - if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) > + if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))) > return false; > folio = vm_normal_folio(vma, addr, pte); > if (!folio) > diff --git a/kernel/fork.c b/kernel/fork.c > index b311caec6419..68c81539193d 100644 > --- a/kernel/fork.c > +++ b/kernel/fork.c > @@ -1887,7 +1887,7 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) > > /* We need to synchronize with __set_oom_adj */ > mutex_lock(&oom_adj_mutex); > - set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); > + mm_flags_set(MMF_MULTIPROCESS, tsk->mm); > /* Update the values in case they were changed after copy_signal */ > tsk->signal->oom_score_adj = current->signal->oom_score_adj; > tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; > -- > 2.50.1 >
© 2016 - 2025 Red Hat, Inc.