include/linux/highmem.h | 8 +------- mm/huge_memory.c | 3 ++- mm/internal.h | 6 ++++++ mm/memory.c | 3 ++- 4 files changed, 11 insertions(+), 9 deletions(-)
Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and
init_on_free=1 boot options") forces allocated page to be zeroed in
post_alloc_hook() when init_on_alloc=1.
For order-0 folios, if arch does not define
vma_alloc_zeroed_movable_folio(), the default implementation again zeros
the page return from the buddy allocator. So the page is zeroed twice.
Fix it by passing __GFP_ZERO instead to avoid double page zeroing.
At the moment, s390,arm64,x86,alpha,m68k are not impacted since they
define their own vma_alloc_zeroed_movable_folio().
For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to
zero the folio again. Fix it by calling folio_zero_user() only if
init_on_alloc is set. All arch are impacted.
Added alloc_zeroed() helper to encapsulate the init_on_alloc check.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/highmem.h | 8 +-------
mm/huge_memory.c | 3 ++-
mm/internal.h | 6 ++++++
mm/memory.c | 3 ++-
4 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bec9bd715acf..6e452bd8e7e3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -224,13 +224,7 @@ static inline
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
- struct folio *folio;
-
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
- if (folio)
- clear_user_highpage(&folio->page, vaddr);
-
- return folio;
+ return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr);
}
#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 82f464865570..5dcbea96edb7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
}
folio_throttle_swaprate(folio, gfp);
- folio_zero_user(folio, addr);
+ if (!alloc_zeroed())
+ folio_zero_user(folio, addr);
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
* folio_zero_user writes become visible before the set_pmd_at()
diff --git a/mm/internal.h b/mm/internal.h
index 906da6280c2d..508f7802dd2b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1233,6 +1233,12 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr,
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
+static inline bool alloc_zeroed(void)
+{
+ return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc);
+}
+
enum {
/* mark page accessed */
FOLL_TOUCH = 1 << 16,
diff --git a/mm/memory.c b/mm/memory.c
index c67359ddb61a..88252f0e06d0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4719,7 +4719,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
goto next;
}
folio_throttle_swaprate(folio, gfp);
- folio_zero_user(folio, vmf->address);
+ if (!alloc_zeroed())
+ folio_zero_user(folio, vmf->address);
return folio;
}
next:
--
2.45.2
Am 11.10.24 um 17:03 schrieb Zi Yan: > Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and > init_on_free=1 boot options") forces allocated page to be zeroed in > post_alloc_hook() when init_on_alloc=1. > > For order-0 folios, if arch does not define > vma_alloc_zeroed_movable_folio(), the default implementation again zeros > the page return from the buddy allocator. So the page is zeroed twice. > Fix it by passing __GFP_ZERO instead to avoid double page zeroing. > At the moment, s390,arm64,x86,alpha,m68k are not impacted since they > define their own vma_alloc_zeroed_movable_folio(). > > For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to > zero the folio again. Fix it by calling folio_zero_user() only if > init_on_alloc is set. All arch are impacted. > > Added alloc_zeroed() helper to encapsulate the init_on_alloc check. > > Signed-off-by: Zi Yan <ziy@nvidia.com> > --- > include/linux/highmem.h | 8 +------- > mm/huge_memory.c | 3 ++- > mm/internal.h | 6 ++++++ > mm/memory.c | 3 ++- > 4 files changed, 11 insertions(+), 9 deletions(-) > > diff --git a/include/linux/highmem.h b/include/linux/highmem.h > index bec9bd715acf..6e452bd8e7e3 100644 > --- a/include/linux/highmem.h > +++ b/include/linux/highmem.h > @@ -224,13 +224,7 @@ static inline > struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, > unsigned long vaddr) > { > - struct folio *folio; > - > - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); > - if (folio) > - clear_user_highpage(&folio->page, vaddr); > - > - return folio; > + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); > } > #endif > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 82f464865570..5dcbea96edb7 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, > } > folio_throttle_swaprate(folio, gfp); > > - folio_zero_user(folio, addr); > + if (!alloc_zeroed()) > + folio_zero_user(folio, addr); It might be reasonable to spell out why we are not using GFP_ZERO somewhere, something like /* * We are not using __GFP_ZERO because folio_zero_user() will make sure that the * page corresponding to the faulting address will be hot in the cache. */ Sth. like that maybe. Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
On 21 Oct 2024, at 8:23, David Hildenbrand wrote: > Am 11.10.24 um 17:03 schrieb Zi Yan: >> Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and >> init_on_free=1 boot options") forces allocated page to be zeroed in >> post_alloc_hook() when init_on_alloc=1. >> >> For order-0 folios, if arch does not define >> vma_alloc_zeroed_movable_folio(), the default implementation again zeros >> the page return from the buddy allocator. So the page is zeroed twice. >> Fix it by passing __GFP_ZERO instead to avoid double page zeroing. >> At the moment, s390,arm64,x86,alpha,m68k are not impacted since they >> define their own vma_alloc_zeroed_movable_folio(). >> >> For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to >> zero the folio again. Fix it by calling folio_zero_user() only if >> init_on_alloc is set. All arch are impacted. >> >> Added alloc_zeroed() helper to encapsulate the init_on_alloc check. >> >> Signed-off-by: Zi Yan <ziy@nvidia.com> >> --- >> include/linux/highmem.h | 8 +------- >> mm/huge_memory.c | 3 ++- >> mm/internal.h | 6 ++++++ >> mm/memory.c | 3 ++- >> 4 files changed, 11 insertions(+), 9 deletions(-) >> >> diff --git a/include/linux/highmem.h b/include/linux/highmem.h >> index bec9bd715acf..6e452bd8e7e3 100644 >> --- a/include/linux/highmem.h >> +++ b/include/linux/highmem.h >> @@ -224,13 +224,7 @@ static inline >> struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, >> unsigned long vaddr) >> { >> - struct folio *folio; >> - >> - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); >> - if (folio) >> - clear_user_highpage(&folio->page, vaddr); >> - >> - return folio; >> + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); >> } >> #endif >> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >> index 82f464865570..5dcbea96edb7 100644 >> --- a/mm/huge_memory.c >> +++ b/mm/huge_memory.c >> @@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, >> } >> folio_throttle_swaprate(folio, gfp); >> - folio_zero_user(folio, addr); >> + if (!alloc_zeroed()) >> + folio_zero_user(folio, addr); > > > > It might be reasonable to spell out why we are not using GFP_ZERO somewhere, something like > > /* > * We are not using __GFP_ZERO because folio_zero_user() will make sure that the > * page corresponding to the faulting address will be hot in the cache. > */ > > Sth. like that maybe. I changed the wording a bit to fit the if statement and put the comment in both call sites. Let me know how it looks. Thanks. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 830d6aa5bf97..b304bb3ffcef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1180,6 +1180,11 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, } folio_throttle_swaprate(folio, gfp); + /* + * When a folio is not zeroed during allocation (__GFP_ZERO not used), + * folio_zero_user() is used to make sure that the page corresponding + * to the faulting address will be hot in the cache after zeroing. + */ if (!alloc_zeroed()) folio_zero_user(folio, addr); /* diff --git a/mm/memory.c b/mm/memory.c index 0f614523b9f4..42c8bb5fcd8e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4719,6 +4719,13 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) goto next; } folio_throttle_swaprate(folio, gfp); + /* + * When a folio is not zeroed during allocation + * (__GFP_ZERO not used), folio_zero_user() is used + * to make sure that the page corresponding to the + * faulting address will be hot in the cache after + * zeroing. + */ if (!alloc_zeroed()) folio_zero_user(folio, vmf->address); return folio; > > Acked-by: David Hildenbrand <david@redhat.com> Thanks. -- Best Regards, Yan, Zi
On 21 Oct 2024, at 10:21, Zi Yan wrote: > On 21 Oct 2024, at 8:23, David Hildenbrand wrote: > >> Am 11.10.24 um 17:03 schrieb Zi Yan: >>> Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and >>> init_on_free=1 boot options") forces allocated page to be zeroed in >>> post_alloc_hook() when init_on_alloc=1. >>> >>> For order-0 folios, if arch does not define >>> vma_alloc_zeroed_movable_folio(), the default implementation again zeros >>> the page return from the buddy allocator. So the page is zeroed twice. >>> Fix it by passing __GFP_ZERO instead to avoid double page zeroing. >>> At the moment, s390,arm64,x86,alpha,m68k are not impacted since they >>> define their own vma_alloc_zeroed_movable_folio(). >>> >>> For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to >>> zero the folio again. Fix it by calling folio_zero_user() only if >>> init_on_alloc is set. All arch are impacted. >>> >>> Added alloc_zeroed() helper to encapsulate the init_on_alloc check. >>> >>> Signed-off-by: Zi Yan <ziy@nvidia.com> >>> --- >>> include/linux/highmem.h | 8 +------- >>> mm/huge_memory.c | 3 ++- >>> mm/internal.h | 6 ++++++ >>> mm/memory.c | 3 ++- >>> 4 files changed, 11 insertions(+), 9 deletions(-) >>> >>> diff --git a/include/linux/highmem.h b/include/linux/highmem.h >>> index bec9bd715acf..6e452bd8e7e3 100644 >>> --- a/include/linux/highmem.h >>> +++ b/include/linux/highmem.h >>> @@ -224,13 +224,7 @@ static inline >>> struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, >>> unsigned long vaddr) >>> { >>> - struct folio *folio; >>> - >>> - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); >>> - if (folio) >>> - clear_user_highpage(&folio->page, vaddr); >>> - >>> - return folio; >>> + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); >>> } >>> #endif >>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >>> index 82f464865570..5dcbea96edb7 100644 >>> --- a/mm/huge_memory.c >>> +++ b/mm/huge_memory.c >>> @@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, >>> } >>> folio_throttle_swaprate(folio, gfp); >>> - folio_zero_user(folio, addr); >>> + if (!alloc_zeroed()) >>> + folio_zero_user(folio, addr); >> >> >> >> It might be reasonable to spell out why we are not using GFP_ZERO somewhere, something like >> >> /* >> * We are not using __GFP_ZERO because folio_zero_user() will make sure that the >> * page corresponding to the faulting address will be hot in the cache. >> */ >> >> Sth. like that maybe. > > I changed the wording a bit to fit the if statement and put the comment in both > call sites. Let me know how it looks. Thanks. Hi Andrew, Do you mind folding the changes below into the original patch? Thanks. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 830d6aa5bf97..b304bb3ffcef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1180,6 +1180,11 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, } folio_throttle_swaprate(folio, gfp); + /* + * When a folio is not zeroed during allocation (__GFP_ZERO not used), + * folio_zero_user() is used to make sure that the page corresponding + * to the faulting address will be hot in the cache after zeroing. + */ if (!alloc_zeroed()) folio_zero_user(folio, addr); /* diff --git a/mm/memory.c b/mm/memory.c index 0f614523b9f4..42c8bb5fcd8e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4719,6 +4719,13 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) goto next; } folio_throttle_swaprate(folio, gfp); + /* + * When a folio is not zeroed during allocation + * (__GFP_ZERO not used), folio_zero_user() is used + * to make sure that the page corresponding to the + * faulting address will be hot in the cache after + * zeroing. + */ if (!alloc_zeroed()) folio_zero_user(folio, vmf->address); return folio; >> >> Acked-by: David Hildenbrand <david@redhat.com> > > Thanks. -- Best Regards, Yan, Zi
On 10/11/24 17:03, Zi Yan wrote: > Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and > init_on_free=1 boot options") forces allocated page to be zeroed in > post_alloc_hook() when init_on_alloc=1. > > For order-0 folios, if arch does not define > vma_alloc_zeroed_movable_folio(), the default implementation again zeros > the page return from the buddy allocator. So the page is zeroed twice. > Fix it by passing __GFP_ZERO instead to avoid double page zeroing. > At the moment, s390,arm64,x86,alpha,m68k are not impacted since they > define their own vma_alloc_zeroed_movable_folio(). > > For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to > zero the folio again. Fix it by calling folio_zero_user() only if > init_on_alloc is set. All arch are impacted. ^ not set? > > Added alloc_zeroed() helper to encapsulate the init_on_alloc check. > > Signed-off-by: Zi Yan <ziy@nvidia.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/highmem.h | 8 +------- > mm/huge_memory.c | 3 ++- > mm/internal.h | 6 ++++++ > mm/memory.c | 3 ++- > 4 files changed, 11 insertions(+), 9 deletions(-) > > diff --git a/include/linux/highmem.h b/include/linux/highmem.h > index bec9bd715acf..6e452bd8e7e3 100644 > --- a/include/linux/highmem.h > +++ b/include/linux/highmem.h > @@ -224,13 +224,7 @@ static inline > struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, > unsigned long vaddr) > { > - struct folio *folio; > - > - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); > - if (folio) > - clear_user_highpage(&folio->page, vaddr); > - > - return folio; > + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); > } > #endif > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 82f464865570..5dcbea96edb7 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, > } > folio_throttle_swaprate(folio, gfp); > > - folio_zero_user(folio, addr); > + if (!alloc_zeroed()) > + folio_zero_user(folio, addr); > /* > * The memory barrier inside __folio_mark_uptodate makes sure that > * folio_zero_user writes become visible before the set_pmd_at() > diff --git a/mm/internal.h b/mm/internal.h > index 906da6280c2d..508f7802dd2b 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -1233,6 +1233,12 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr, > void touch_pmd(struct vm_area_struct *vma, unsigned long addr, > pmd_t *pmd, bool write); > > +static inline bool alloc_zeroed(void) > +{ > + return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, > + &init_on_alloc); > +} > + > enum { > /* mark page accessed */ > FOLL_TOUCH = 1 << 16, > diff --git a/mm/memory.c b/mm/memory.c > index c67359ddb61a..88252f0e06d0 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4719,7 +4719,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) > goto next; > } > folio_throttle_swaprate(folio, gfp); > - folio_zero_user(folio, vmf->address); > + if (!alloc_zeroed()) > + folio_zero_user(folio, vmf->address); > return folio; > } > next:
On 16 Oct 2024, at 8:53, Vlastimil Babka wrote: > On 10/11/24 17:03, Zi Yan wrote: >> Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and >> init_on_free=1 boot options") forces allocated page to be zeroed in >> post_alloc_hook() when init_on_alloc=1. >> >> For order-0 folios, if arch does not define >> vma_alloc_zeroed_movable_folio(), the default implementation again zeros >> the page return from the buddy allocator. So the page is zeroed twice. >> Fix it by passing __GFP_ZERO instead to avoid double page zeroing. >> At the moment, s390,arm64,x86,alpha,m68k are not impacted since they >> define their own vma_alloc_zeroed_movable_folio(). >> >> For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to >> zero the folio again. Fix it by calling folio_zero_user() only if >> init_on_alloc is set. All arch are impacted. > > ^ not set? You are right. The sentence should be: "Fix it by calling folio_zero_user() only if init_on_alloc is not set." Hi Andrew, Do you want me to resend this with fixed commit log? > >> >> Added alloc_zeroed() helper to encapsulate the init_on_alloc check. >> >> Signed-off-by: Zi Yan <ziy@nvidia.com> > > Acked-by: Vlastimil Babka <vbabka@suse.cz> Thanks. -- Best Regards, Yan, Zi
+Vlastimil On 11 Oct 2024, at 11:03, Zi Yan wrote: > Commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and > init_on_free=1 boot options") forces allocated page to be zeroed in > post_alloc_hook() when init_on_alloc=1. > > For order-0 folios, if arch does not define > vma_alloc_zeroed_movable_folio(), the default implementation again zeros > the page return from the buddy allocator. So the page is zeroed twice. > Fix it by passing __GFP_ZERO instead to avoid double page zeroing. > At the moment, s390,arm64,x86,alpha,m68k are not impacted since they > define their own vma_alloc_zeroed_movable_folio(). > > For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to > zero the folio again. Fix it by calling folio_zero_user() only if > init_on_alloc is set. All arch are impacted. > > Added alloc_zeroed() helper to encapsulate the init_on_alloc check. > > Signed-off-by: Zi Yan <ziy@nvidia.com> > --- > include/linux/highmem.h | 8 +------- > mm/huge_memory.c | 3 ++- > mm/internal.h | 6 ++++++ > mm/memory.c | 3 ++- > 4 files changed, 11 insertions(+), 9 deletions(-) > > diff --git a/include/linux/highmem.h b/include/linux/highmem.h > index bec9bd715acf..6e452bd8e7e3 100644 > --- a/include/linux/highmem.h > +++ b/include/linux/highmem.h > @@ -224,13 +224,7 @@ static inline > struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, > unsigned long vaddr) > { > - struct folio *folio; > - > - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); > - if (folio) > - clear_user_highpage(&folio->page, vaddr); > - > - return folio; > + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); > } > #endif > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 82f464865570..5dcbea96edb7 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1176,7 +1176,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, > } > folio_throttle_swaprate(folio, gfp); > > - folio_zero_user(folio, addr); > + if (!alloc_zeroed()) > + folio_zero_user(folio, addr); > /* > * The memory barrier inside __folio_mark_uptodate makes sure that > * folio_zero_user writes become visible before the set_pmd_at() > diff --git a/mm/internal.h b/mm/internal.h > index 906da6280c2d..508f7802dd2b 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -1233,6 +1233,12 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr, > void touch_pmd(struct vm_area_struct *vma, unsigned long addr, > pmd_t *pmd, bool write); > > +static inline bool alloc_zeroed(void) > +{ > + return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, > + &init_on_alloc); > +} > + > enum { > /* mark page accessed */ > FOLL_TOUCH = 1 << 16, > diff --git a/mm/memory.c b/mm/memory.c > index c67359ddb61a..88252f0e06d0 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4719,7 +4719,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) > goto next; > } > folio_throttle_swaprate(folio, gfp); > - folio_zero_user(folio, vmf->address); > + if (!alloc_zeroed()) > + folio_zero_user(folio, vmf->address); > return folio; > } > next: > -- > 2.45.2 Best Regards, Yan, Zi
© 2016 - 2024 Red Hat, Inc.