mm/damon/ops-common.c | 2 +- mm/filemap.c | 2 +- mm/mmu_gather.c | 4 ++-- mm/rmap.c | 2 +- mm/vmalloc.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-)
Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
GFP_NOWAIT implicitly include __GFP_NOWARN.
Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
`GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
redundant flags across subsystems.
No functional changes.
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
---
v1->v2:
- Added a modification to remove redundant __GFP_NOWARN in
mm/damon/ops-common.c
---
mm/damon/ops-common.c | 2 +-
mm/filemap.c | 2 +-
mm/mmu_gather.c | 4 ++--
mm/rmap.c | 2 +-
mm/vmalloc.c | 2 +-
5 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 99321ff5cb92..b43595730f08 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list(
* instead of migrated.
*/
.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
- __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
+ __GFP_NOMEMALLOC | GFP_NOWAIT,
.nid = target_nid,
};
diff --git a/mm/filemap.c b/mm/filemap.c
index 4e5c9544fee4..c21e98657e0b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp &= ~__GFP_FS;
if (fgp_flags & FGP_NOWAIT) {
gfp &= ~GFP_KERNEL;
- gfp |= GFP_NOWAIT | __GFP_NOWARN;
+ gfp |= GFP_NOWAIT;
}
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index b49cc6385f1f..374aa6f021c6 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return false;
- batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ batch = (void *)__get_free_page(GFP_NOWAIT);
if (!batch)
return false;
@@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
struct mmu_table_batch **batch = &tlb->batch;
if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
if (*batch == NULL) {
tlb_table_invalidate(tlb);
tlb_remove_table_one(table);
diff --git a/mm/rmap.c b/mm/rmap.c
index 568198e9efc2..7baa7385e1ce 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
- avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+ avc = anon_vma_chain_alloc(GFP_NOWAIT);
if (unlikely(!avc)) {
unlock_anon_vma_root(root);
root = NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6dbcdceecae1..90c3de1a0417 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void)
int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
if (n > 1) {
- vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
+ vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
if (vn) {
/* Node partition is 16 pages. */
vmap_zone_size = (1 << 4) * PAGE_SIZE;
--
2.34.1
On 12.08.25 15:52, Qianfeng Rong wrote:
> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> GFP_NOWAIT implicitly include __GFP_NOWARN.
>
> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> redundant flags across subsystems.
>
> No functional changes.
>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
> ---
Acked-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
On Tue, 12 Aug 2025 21:52:25 +0800 Qianfeng Rong <rongqianfeng@vivo.com> wrote:
> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> GFP_NOWAIT implicitly include __GFP_NOWARN.
>
> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> redundant flags across subsystems.
>
> No functional changes.
>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Thanks,
SJ
[...]
On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> GFP_NOWAIT implicitly include __GFP_NOWARN.
>
> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> redundant flags across subsystems.
>
> No functional changes.
>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> GFP_NOWAIT implicitly include __GFP_NOWARN.
>
> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> redundant flags across subsystems.
>
> No functional changes.
>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
LGTM, I wonder if there are other such redundancies in the kernel?
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> ---
> v1->v2:
> - Added a modification to remove redundant __GFP_NOWARN in
> mm/damon/ops-common.c
> ---
> mm/damon/ops-common.c | 2 +-
> mm/filemap.c | 2 +-
> mm/mmu_gather.c | 4 ++--
> mm/rmap.c | 2 +-
> mm/vmalloc.c | 2 +-
> 5 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index 99321ff5cb92..b43595730f08 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list(
> * instead of migrated.
> */
> .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
> - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
> + __GFP_NOMEMALLOC | GFP_NOWAIT,
> .nid = target_nid,
> };
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 4e5c9544fee4..c21e98657e0b 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
> gfp &= ~__GFP_FS;
> if (fgp_flags & FGP_NOWAIT) {
> gfp &= ~GFP_KERNEL;
> - gfp |= GFP_NOWAIT | __GFP_NOWARN;
> + gfp |= GFP_NOWAIT;
> }
> if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
> fgp_flags |= FGP_LOCK;
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index b49cc6385f1f..374aa6f021c6 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
> if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
> return false;
>
> - batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> + batch = (void *)__get_free_page(GFP_NOWAIT);
> if (!batch)
> return false;
>
> @@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
> struct mmu_table_batch **batch = &tlb->batch;
>
> if (*batch == NULL) {
> - *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
> if (*batch == NULL) {
> tlb_table_invalidate(tlb);
> tlb_remove_table_one(table);
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 568198e9efc2..7baa7385e1ce 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
> list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
> struct anon_vma *anon_vma;
>
> - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
> + avc = anon_vma_chain_alloc(GFP_NOWAIT);
> if (unlikely(!avc)) {
> unlock_anon_vma_root(root);
> root = NULL;
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6dbcdceecae1..90c3de1a0417 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void)
> int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
>
> if (n > 1) {
> - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
> + vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
> if (vn) {
> /* Node partition is 16 pages. */
> vmap_zone_size = (1 << 4) * PAGE_SIZE;
> --
> 2.34.1
>
在 2025/8/13 0:46, Lorenzo Stoakes 写道:
> On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
>> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
>> GFP_NOWAIT implicitly include __GFP_NOWARN.
>>
>> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
>> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
>> redundant flags across subsystems.
>>
>> No functional changes.
>>
>> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
>> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
> LGTM, I wonder if there are other such redundancies in the kernel?
For similar redundancies in other subsystems, I submitted separate patches
to minimize potential merge conflicts.
Best regards,
Qianfeng
>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>
>
On Tue, Aug 12, 2025 at 05:46:47PM +0100, Lorenzo Stoakes wrote:
> On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
> > Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> > GFP_NOWAIT implicitly include __GFP_NOWARN.
> >
> > Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> > `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> > redundant flags across subsystems.
> >
> > No functional changes.
> >
> > Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> > Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
>
> LGTM, I wonder if there are other such redundancies in the kernel?
>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>
> > ---
> > v1->v2:
> > - Added a modification to remove redundant __GFP_NOWARN in
> > mm/damon/ops-common.c
> > ---
> > mm/damon/ops-common.c | 2 +-
> > mm/filemap.c | 2 +-
> > mm/mmu_gather.c | 4 ++--
> > mm/rmap.c | 2 +-
> > mm/vmalloc.c | 2 +-
> > 5 files changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> > index 99321ff5cb92..b43595730f08 100644
> > --- a/mm/damon/ops-common.c
> > +++ b/mm/damon/ops-common.c
> > @@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list(
> > * instead of migrated.
> > */
> > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
> > - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
> > + __GFP_NOMEMALLOC | GFP_NOWAIT,
> > .nid = target_nid,
> > };
> >
> > diff --git a/mm/filemap.c b/mm/filemap.c
> > index 4e5c9544fee4..c21e98657e0b 100644
> > --- a/mm/filemap.c
> > +++ b/mm/filemap.c
> > @@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
> > gfp &= ~__GFP_FS;
> > if (fgp_flags & FGP_NOWAIT) {
> > gfp &= ~GFP_KERNEL;
> > - gfp |= GFP_NOWAIT | __GFP_NOWARN;
> > + gfp |= GFP_NOWAIT;
> > }
> > if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
> > fgp_flags |= FGP_LOCK;
> > diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> > index b49cc6385f1f..374aa6f021c6 100644
> > --- a/mm/mmu_gather.c
> > +++ b/mm/mmu_gather.c
> > @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
> > if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
> > return false;
> >
> > - batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> > + batch = (void *)__get_free_page(GFP_NOWAIT);
> > if (!batch)
> > return false;
> >
> > @@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
> > struct mmu_table_batch **batch = &tlb->batch;
> >
> > if (*batch == NULL) {
> > - *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> > + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
> > if (*batch == NULL) {
> > tlb_table_invalidate(tlb);
> > tlb_remove_table_one(table);
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index 568198e9efc2..7baa7385e1ce 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
> > list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
> > struct anon_vma *anon_vma;
> >
> > - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
> > + avc = anon_vma_chain_alloc(GFP_NOWAIT);
> > if (unlikely(!avc)) {
> > unlock_anon_vma_root(root);
> > root = NULL;
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 6dbcdceecae1..90c3de1a0417 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void)
> > int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
> >
> > if (n > 1) {
> > - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
> > + vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
> > if (vn) {
> > /* Node partition is 16 pages. */
> > vmap_zone_size = (1 << 4) * PAGE_SIZE;
> > --
> > 2.34.1
> >
Reviewed-by: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
--
Uladzislau Rezki
On Tue, Aug 12, 2025 at 05:46:47PM +0100, Lorenzo Stoakes wrote:
> On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
> > Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> > GFP_NOWAIT implicitly include __GFP_NOWARN.
> >
> > Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> > `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> > redundant flags across subsystems.
> >
> > No functional changes.
> >
> > Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> > Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
>
> LGTM, I wonder if there are other such redundancies in the kernel?
Looks like theres a lot left for this specific case. At least 48 show up
that are spread out across subsystems when running 'git grep
"GFP_NOWAIT.*GFP_NOWARN"'.
I think they should be cleaned up in sets per-subsystem to minimize
merge conflicts, as suggested in the commit mentioned above (16f5dfbc851b).
在 2025/8/13 1:55, Vishal Moola (Oracle) 写道:
> [You don't often get email from vishal.moola@gmail.com. Learn why this is important at https://aka.ms/LearnAboutSenderIdentification ]
>
> On Tue, Aug 12, 2025 at 05:46:47PM +0100, Lorenzo Stoakes wrote:
>> On Tue, Aug 12, 2025 at 09:52:25PM +0800, Qianfeng Rong wrote:
>>> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
>>> GFP_NOWAIT implicitly include __GFP_NOWARN.
>>>
>>> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
>>> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
>>> redundant flags across subsystems.
>>>
>>> No functional changes.
>>>
>>> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
>>> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
>> LGTM, I wonder if there are other such redundancies in the kernel?
> Looks like theres a lot left for this specific case. At least 48 show up
> that are spread out across subsystems when running 'git grep
> "GFP_NOWAIT.*GFP_NOWARN"'.
>
> I think they should be cleaned up in sets per-subsystem to minimize
> merge conflicts, as suggested in the commit mentioned above (16f5dfbc851b).
I agree. I submitted similar patches for each subsystem separately.
It may take some time to see the code merged into the next branch.
Best regards,
Qianfeng
* Qianfeng Rong <rongqianfeng@vivo.com> [250812 09:52]:
> Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
> GFP_NOWAIT implicitly include __GFP_NOWARN.
>
> Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
> `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
> redundant flags across subsystems.
>
> No functional changes.
>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
> v1->v2:
> - Added a modification to remove redundant __GFP_NOWARN in
> mm/damon/ops-common.c
> ---
> mm/damon/ops-common.c | 2 +-
> mm/filemap.c | 2 +-
> mm/mmu_gather.c | 4 ++--
> mm/rmap.c | 2 +-
> mm/vmalloc.c | 2 +-
> 5 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index 99321ff5cb92..b43595730f08 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list(
> * instead of migrated.
> */
> .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
> - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
> + __GFP_NOMEMALLOC | GFP_NOWAIT,
> .nid = target_nid,
> };
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 4e5c9544fee4..c21e98657e0b 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
> gfp &= ~__GFP_FS;
> if (fgp_flags & FGP_NOWAIT) {
> gfp &= ~GFP_KERNEL;
> - gfp |= GFP_NOWAIT | __GFP_NOWARN;
> + gfp |= GFP_NOWAIT;
> }
> if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
> fgp_flags |= FGP_LOCK;
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index b49cc6385f1f..374aa6f021c6 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
> if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
> return false;
>
> - batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> + batch = (void *)__get_free_page(GFP_NOWAIT);
> if (!batch)
> return false;
>
> @@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
> struct mmu_table_batch **batch = &tlb->batch;
>
> if (*batch == NULL) {
> - *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
> if (*batch == NULL) {
> tlb_table_invalidate(tlb);
> tlb_remove_table_one(table);
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 568198e9efc2..7baa7385e1ce 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
> list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
> struct anon_vma *anon_vma;
>
> - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
> + avc = anon_vma_chain_alloc(GFP_NOWAIT);
> if (unlikely(!avc)) {
> unlock_anon_vma_root(root);
> root = NULL;
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6dbcdceecae1..90c3de1a0417 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void)
> int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
>
> if (n > 1) {
> - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
> + vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
> if (vn) {
> /* Node partition is 16 pages. */
> vmap_zone_size = (1 << 4) * PAGE_SIZE;
> --
> 2.34.1
>
© 2016 - 2026 Red Hat, Inc.