mm/filemap.c | 2 +- mm/mmu_gather.c | 4 ++-- mm/rmap.c | 2 +- mm/vmalloc.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-)
Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
GFP_NOWAIT implicitly include __GFP_NOWARN.
Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
`GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
redundant flags across subsystems.
No functional changes.
Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
---
mm/filemap.c | 2 +-
mm/mmu_gather.c | 4 ++--
mm/rmap.c | 2 +-
mm/vmalloc.c | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 4e5c9544fee4..c21e98657e0b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp &= ~__GFP_FS;
if (fgp_flags & FGP_NOWAIT) {
gfp &= ~GFP_KERNEL;
- gfp |= GFP_NOWAIT | __GFP_NOWARN;
+ gfp |= GFP_NOWAIT;
}
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index b49cc6385f1f..374aa6f021c6 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return false;
- batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ batch = (void *)__get_free_page(GFP_NOWAIT);
if (!batch)
return false;
@@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
struct mmu_table_batch **batch = &tlb->batch;
if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
if (*batch == NULL) {
tlb_table_invalidate(tlb);
tlb_remove_table_one(table);
diff --git a/mm/rmap.c b/mm/rmap.c
index 568198e9efc2..7baa7385e1ce 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
- avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+ avc = anon_vma_chain_alloc(GFP_NOWAIT);
if (unlikely(!avc)) {
unlock_anon_vma_root(root);
root = NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6dbcdceecae1..90c3de1a0417 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void)
int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
if (n > 1) {
- vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
+ vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
if (vn) {
/* Node partition is 16 pages. */
vmap_zone_size = (1 << 4) * PAGE_SIZE;
--
2.34.1
On Tue, Aug 12, 2025 at 05:57:46PM +0800, Qianfeng Rong wrote: > Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made > GFP_NOWAIT implicitly include __GFP_NOWARN. > > Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g., > `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these > redundant flags across subsystems. > > No functional changes. > > Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com> > --- Maybe .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, in mm/damon/paddr.c also can be cleaned up to .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | | __GFP_NOMEMALLOC | GFP_NOWAIT, ? With or without that: Reviewed-by: Harry Yoo <harry.yoo@oracle.com> -- Cheers, Harry / Hyeonggon > mm/filemap.c | 2 +- > mm/mmu_gather.c | 4 ++-- > mm/rmap.c | 2 +- > mm/vmalloc.c | 2 +- > 4 files changed, 5 insertions(+), 5 deletions(-) > > diff --git a/mm/filemap.c b/mm/filemap.c > index 4e5c9544fee4..c21e98657e0b 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > gfp &= ~__GFP_FS; > if (fgp_flags & FGP_NOWAIT) { > gfp &= ~GFP_KERNEL; > - gfp |= GFP_NOWAIT | __GFP_NOWARN; > + gfp |= GFP_NOWAIT; > } > if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) > fgp_flags |= FGP_LOCK; > diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c > index b49cc6385f1f..374aa6f021c6 100644 > --- a/mm/mmu_gather.c > +++ b/mm/mmu_gather.c > @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb) > if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) > return false; > > - batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); > + batch = (void *)__get_free_page(GFP_NOWAIT); > if (!batch) > return false; > > @@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) > struct mmu_table_batch **batch = &tlb->batch; > > if (*batch == NULL) { > - *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); > + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT); > if (*batch == NULL) { > tlb_table_invalidate(tlb); > tlb_remove_table_one(table); > diff --git a/mm/rmap.c b/mm/rmap.c > index 568198e9efc2..7baa7385e1ce 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) > list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { > struct anon_vma *anon_vma; > > - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); > + avc = anon_vma_chain_alloc(GFP_NOWAIT); > if (unlikely(!avc)) { > unlock_anon_vma_root(root); > root = NULL; > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 6dbcdceecae1..90c3de1a0417 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void) > int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); > > if (n > 1) { > - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); > + vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); > if (vn) { > /* Node partition is 16 pages. */ > vmap_zone_size = (1 << 4) * PAGE_SIZE; > -- > 2.34.1 >
On Tue, 12 Aug 2025 22:11:16 +0900 Harry Yoo <harry.yoo@oracle.com> wrote: > On Tue, Aug 12, 2025 at 05:57:46PM +0800, Qianfeng Rong wrote: > > Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made > > GFP_NOWAIT implicitly include __GFP_NOWARN. > > > > Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g., > > `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these > > redundant flags across subsystems. > > > > No functional changes. > > > > Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com> > > --- > > Maybe > > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, > > in mm/damon/paddr.c also can be cleaned up to > > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > | __GFP_NOMEMALLOC | GFP_NOWAIT, > > ? Thank you for catching this, Harry! FYI, the code has moved into mm/damon/ops-common.c by commit 13dde31db71f ("mm/damon: move migration helpers from paddr to ops-common"). Please feel free to make the cleanup if anyone willing to. Thanks, SJ [...]
在 2025/8/12 21:11, Harry Yoo 写道: > [You don't often get email from harry.yoo@oracle.com. Learn why this is important at https://aka.ms/LearnAboutSenderIdentification ] > > On Tue, Aug 12, 2025 at 05:57:46PM +0800, Qianfeng Rong wrote: >> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made >> GFP_NOWAIT implicitly include __GFP_NOWARN. >> >> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g., >> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these >> redundant flags across subsystems. >> >> No functional changes. >> >> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com> >> --- > Maybe > > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, > > in mm/damon/paddr.c also can be cleaned up to > > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > | __GFP_NOMEMALLOC | GFP_NOWAIT, > > ? Thanks for the reminder. I did miss one modification spot. After checking, I found this code section was moved to mm/damon/ops-common.c. I'll submit v2 immediately. Best regards, Qianfeng > > With or without that: > > Reviewed-by: Harry Yoo <harry.yoo@oracle.com> > > -- > Cheers, > Harry / Hyeonggon >
© 2016 - 2025 Red Hat, Inc.