In preparation for using vm_flags to ensure guard pages for shadow stacks
supply them as an argument to generic_get_unmapped_area(). The only user
outside of the core code is the PowerPC book3s64 implementation which is
trivially wrapping the generic implementation in the radix_enabled() case.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/powerpc/mm/book3s64/slice.c | 4 ++--
include/linux/sched/mm.h | 4 ++--
mm/mmap.c | 10 ++++++----
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index ada6bf896ef8..87307d0fc3b8 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -641,7 +641,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(¤t->mm->context), 0);
@@ -655,7 +655,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(¤t->mm->context), 1);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index c4d34abc45d4..07bb8d4181d7 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -204,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
diff --git a/mm/mmap.c b/mm/mmap.c
index 7528146f886f..b06ba847c96e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1789,7 +1789,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -1823,7 +1823,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
@@ -1834,7 +1835,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
@@ -1887,7 +1888,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
--
2.39.2
On Mon, Sep 02, 2024 at 08:08:14PM +0100, Mark Brown wrote: >In preparation for using vm_flags to ensure guard pages for shadow stacks >supply them as an argument to generic_get_unmapped_area(). The only user >outside of the core code is the PowerPC book3s64 implementation which is >trivially wrapping the generic implementation in the radix_enabled() case. > >Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Deepak Gupta <debug@rivosinc.com>
Mark Brown <broonie@kernel.org> writes:
> In preparation for using vm_flags to ensure guard pages for shadow stacks
> supply them as an argument to generic_get_unmapped_area(). The only user
> outside of the core code is the PowerPC book3s64 implementation which is
> trivially wrapping the generic implementation in the radix_enabled() case.
>
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
> arch/powerpc/mm/book3s64/slice.c | 4 ++--
> include/linux/sched/mm.h | 4 ++--
> mm/mmap.c | 10 ++++++----
> 3 files changed, 10 insertions(+), 8 deletions(-)
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
cheers
> diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
> index ada6bf896ef8..87307d0fc3b8 100644
> --- a/arch/powerpc/mm/book3s64/slice.c
> +++ b/arch/powerpc/mm/book3s64/slice.c
> @@ -641,7 +641,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 0);
> @@ -655,7 +655,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr0, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 1);
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index c4d34abc45d4..07bb8d4181d7 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -204,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> #else
> static inline void arch_pick_mmap_layout(struct mm_struct *mm,
> struct rlimit *rlim_stack) {}
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 7528146f886f..b06ba847c96e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1789,7 +1789,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct mm_struct *mm = current->mm;
> struct vm_area_struct *vma, *prev;
> @@ -1823,7 +1823,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
> @@ -1834,7 +1835,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct vm_area_struct *vma, *prev;
> struct mm_struct *mm = current->mm;
> @@ -1887,7 +1888,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
>
> --
> 2.39.2
* Mark Brown <broonie@kernel.org> [240902 15:09]:
> In preparation for using vm_flags to ensure guard pages for shadow stacks
> supply them as an argument to generic_get_unmapped_area(). The only user
> outside of the core code is the PowerPC book3s64 implementation which is
> trivially wrapping the generic implementation in the radix_enabled() case.
>
> Signed-off-by: Mark Brown <broonie@kernel.org>
It is interesting that book3s64 ppc is special in this regard.
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> ---
> arch/powerpc/mm/book3s64/slice.c | 4 ++--
> include/linux/sched/mm.h | 4 ++--
> mm/mmap.c | 10 ++++++----
> 3 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
> index ada6bf896ef8..87307d0fc3b8 100644
> --- a/arch/powerpc/mm/book3s64/slice.c
> +++ b/arch/powerpc/mm/book3s64/slice.c
> @@ -641,7 +641,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 0);
> @@ -655,7 +655,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr0, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 1);
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index c4d34abc45d4..07bb8d4181d7 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -204,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> #else
> static inline void arch_pick_mmap_layout(struct mm_struct *mm,
> struct rlimit *rlim_stack) {}
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 7528146f886f..b06ba847c96e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1789,7 +1789,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct mm_struct *mm = current->mm;
> struct vm_area_struct *vma, *prev;
> @@ -1823,7 +1823,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
> @@ -1834,7 +1835,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct vm_area_struct *vma, *prev;
> struct mm_struct *mm = current->mm;
> @@ -1887,7 +1888,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
>
> --
> 2.39.2
>
On Mon, Sep 02, 2024 at 08:08:14PM GMT, Mark Brown wrote:
> In preparation for using vm_flags to ensure guard pages for shadow stacks
> supply them as an argument to generic_get_unmapped_area(). The only user
> outside of the core code is the PowerPC book3s64 implementation which is
> trivially wrapping the generic implementation in the radix_enabled() case.
>
> Signed-off-by: Mark Brown <broonie@kernel.org>
Acked-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> (for mm/mmap.c part)
> ---
> arch/powerpc/mm/book3s64/slice.c | 4 ++--
> include/linux/sched/mm.h | 4 ++--
> mm/mmap.c | 10 ++++++----
> 3 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
> index ada6bf896ef8..87307d0fc3b8 100644
> --- a/arch/powerpc/mm/book3s64/slice.c
> +++ b/arch/powerpc/mm/book3s64/slice.c
> @@ -641,7 +641,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 0);
> @@ -655,7 +655,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr0, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 1);
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index c4d34abc45d4..07bb8d4181d7 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -204,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> #else
> static inline void arch_pick_mmap_layout(struct mm_struct *mm,
> struct rlimit *rlim_stack) {}
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 7528146f886f..b06ba847c96e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1789,7 +1789,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct mm_struct *mm = current->mm;
> struct vm_area_struct *vma, *prev;
> @@ -1823,7 +1823,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
> @@ -1834,7 +1835,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct vm_area_struct *vma, *prev;
> struct mm_struct *mm = current->mm;
> @@ -1887,7 +1888,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
>
> --
> 2.39.2
>
© 2016 - 2025 Red Hat, Inc.