[PATCH v3 08/33] linux-user: Remove qemu_host_page_{size, mask} from mmap.c

Richard Henderson posted 33 patches 10 months, 4 weeks ago
Maintainers: Richard Henderson <richard.henderson@linaro.org>, Paolo Bonzini <pbonzini@redhat.com>, Riku Voipio <riku.voipio@iki.fi>, Warner Losh <imp@bsdimp.com>, Kyle Evans <kevans@freebsd.org>, Stefan Berger <stefanb@linux.vnet.ibm.com>, Eduardo Habkost <eduardo@habkost.net>, Marcel Apfelbaum <marcel.apfelbaum@gmail.com>, "Philippe Mathieu-Daudé" <philmd@linaro.org>, Yanan Wang <wangyanan55@huawei.com>, Laurent Vivier <laurent@vivier.eu>, Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>, David Hildenbrand <david@redhat.com>, Peter Maydell <peter.maydell@linaro.org>, Nicholas Piggin <npiggin@gmail.com>, Daniel Henrique Barboza <danielhb413@gmail.com>, "Cédric Le Goater" <clg@kaod.org>, "Alex Bennée" <alex.bennee@linaro.org>, Yoshinori Sato <ysato@users.sourceforge.jp>
There is a newer version of this series
[PATCH v3 08/33] linux-user: Remove qemu_host_page_{size, mask} from mmap.c
Posted by Richard Henderson 10 months, 4 weeks ago
Use qemu_real_host_page_size instead.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 linux-user/mmap.c | 66 +++++++++++++++++++++++------------------------
 1 file changed, 33 insertions(+), 33 deletions(-)

diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 96c9433e27..4d3c8717b9 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -165,6 +165,7 @@ static int target_to_host_prot(int prot)
 /* NOTE: all the constants are the HOST ones, but addresses are target. */
 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
 {
+    int host_page_size = qemu_real_host_page_size();
     abi_ulong starts[3];
     abi_ulong lens[3];
     int prots[3];
@@ -189,13 +190,13 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
     }
 
     last = start + len - 1;
-    host_start = start & qemu_host_page_mask;
+    host_start = start & -host_page_size;
     host_last = HOST_PAGE_ALIGN(last) - 1;
     nranges = 0;
 
     mmap_lock();
 
-    if (host_last - host_start < qemu_host_page_size) {
+    if (host_last - host_start < host_page_size) {
         /* Single host page contains all guest pages: sum the prot. */
         prot1 = target_prot;
         for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
@@ -205,7 +206,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
             prot1 |= page_get_flags(a + 1);
         }
         starts[nranges] = host_start;
-        lens[nranges] = qemu_host_page_size;
+        lens[nranges] = host_page_size;
         prots[nranges] = prot1;
         nranges++;
     } else {
@@ -218,10 +219,10 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
             /* If the resulting sum differs, create a new range. */
             if (prot1 != target_prot) {
                 starts[nranges] = host_start;
-                lens[nranges] = qemu_host_page_size;
+                lens[nranges] = host_page_size;
                 prots[nranges] = prot1;
                 nranges++;
-                host_start += qemu_host_page_size;
+                host_start += host_page_size;
             }
         }
 
@@ -233,9 +234,9 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
             }
             /* If the resulting sum differs, create a new range. */
             if (prot1 != target_prot) {
-                host_last -= qemu_host_page_size;
+                host_last -= host_page_size;
                 starts[nranges] = host_last + 1;
-                lens[nranges] = qemu_host_page_size;
+                lens[nranges] = host_page_size;
                 prots[nranges] = prot1;
                 nranges++;
             }
@@ -270,6 +271,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
                       int prot, int flags, int fd, off_t offset)
 {
+    int host_page_size = qemu_real_host_page_size();
     abi_ulong real_last;
     void *host_start;
     int prot_old, prot_new;
@@ -286,7 +288,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
         return false;
     }
 
-    real_last = real_start + qemu_host_page_size - 1;
+    real_last = real_start + host_page_size - 1;
     host_start = g2h_untagged(real_start);
 
     /* Get the protection of the target pages outside the mapping. */
@@ -304,12 +306,12 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
          * outside of the fragment we need to map.  Allocate a new host
          * page to cover, discarding whatever else may have been present.
          */
-        void *p = mmap(host_start, qemu_host_page_size,
+        void *p = mmap(host_start, host_page_size,
                        target_to_host_prot(prot),
                        flags | MAP_ANONYMOUS, -1, 0);
         if (p != host_start) {
             if (p != MAP_FAILED) {
-                munmap(p, qemu_host_page_size);
+                munmap(p, host_page_size);
                 errno = EEXIST;
             }
             return false;
@@ -324,7 +326,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
     /* Adjust protection to be able to write. */
     if (!(host_prot_old & PROT_WRITE)) {
         host_prot_old |= PROT_WRITE;
-        mprotect(host_start, qemu_host_page_size, host_prot_old);
+        mprotect(host_start, host_page_size, host_prot_old);
     }
 
     /* Read or zero the new guest pages. */
@@ -338,7 +340,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
 
     /* Put final protection */
     if (host_prot_new != host_prot_old) {
-        mprotect(host_start, qemu_host_page_size, host_prot_new);
+        mprotect(host_start, host_page_size, host_prot_new);
     }
     return true;
 }
@@ -373,17 +375,18 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
  */
 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
 {
+    int host_page_size = qemu_real_host_page_size();
     void *ptr, *prev;
     abi_ulong addr;
     int wrapped, repeat;
 
-    align = MAX(align, qemu_host_page_size);
+    align = MAX(align, host_page_size);
 
     /* If 'start' == 0, then a default start address is used. */
     if (start == 0) {
         start = mmap_next_start;
     } else {
-        start &= qemu_host_page_mask;
+        start &= -host_page_size;
     }
     start = ROUND_UP(start, align);
 
@@ -492,6 +495,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
                      int flags, int fd, off_t offset)
 {
+    int host_page_size = qemu_real_host_page_size();
     abi_ulong ret, last, real_start, real_last, retaddr, host_len;
     abi_ulong passthrough_start = -1, passthrough_last = 0;
     int page_flags;
@@ -537,8 +541,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
         }
     }
 
-    real_start = start & qemu_host_page_mask;
-    host_offset = offset & qemu_host_page_mask;
+    real_start = start & -host_page_size;
+    host_offset = offset & -host_page_size;
 
     /*
      * If the user is asking for the kernel to find a location, do that
@@ -567,8 +571,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
      * may need to truncate file maps at EOF and add extra anonymous pages
      * up to the targets page boundary.
      */
-    if ((qemu_real_host_page_size() < qemu_host_page_size) &&
-        !(flags & MAP_ANONYMOUS)) {
+    if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
         struct stat sb;
 
         if (fstat(fd, &sb) == -1) {
@@ -595,11 +598,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
         host_len = HOST_PAGE_ALIGN(host_len);
         host_prot = target_to_host_prot(target_prot);
 
-        /*
-         * Note: we prefer to control the mapping address. It is
-         * especially important if qemu_host_page_size >
-         * qemu_real_host_page_size.
-         */
+        /* Note: we prefer to control the mapping address. */
         p = mmap(g2h_untagged(start), host_len, host_prot,
                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
         if (p == MAP_FAILED) {
@@ -665,7 +664,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
          * aligned, so we read it
          */
         if (!(flags & MAP_ANONYMOUS) &&
-            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
+            (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) {
             /*
              * msync() won't work here, so we return an error if write is
              * possible while it is a shared mapping
@@ -694,7 +693,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
 
         /* handle the start of the mapping */
         if (start > real_start) {
-            if (real_last == real_start + qemu_host_page_size - 1) {
+            if (real_last == real_start + host_page_size - 1) {
                 /* one single host page */
                 if (!mmap_frag(real_start, start, last,
                                target_prot, flags, fd, offset)) {
@@ -703,21 +702,21 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
                 goto the_end1;
             }
             if (!mmap_frag(real_start, start,
-                           real_start + qemu_host_page_size - 1,
+                           real_start + host_page_size - 1,
                            target_prot, flags, fd, offset)) {
                 goto fail;
             }
-            real_start += qemu_host_page_size;
+            real_start += host_page_size;
         }
         /* handle the end of the mapping */
         if (last < real_last) {
-            abi_ulong real_page = real_last - qemu_host_page_size + 1;
+            abi_ulong real_page = real_last - host_page_size + 1;
             if (!mmap_frag(real_page, real_page, last,
                            target_prot, flags, fd,
                            offset + real_page - start)) {
                 goto fail;
             }
-            real_last -= qemu_host_page_size;
+            real_last -= host_page_size;
         }
 
         /* map the middle (easier) */
@@ -784,6 +783,7 @@ fail:
 
 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
 {
+    int host_page_size = qemu_real_host_page_size();
     abi_ulong real_start;
     abi_ulong real_last;
     abi_ulong real_len;
@@ -793,7 +793,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
     int prot;
 
     last = start + len - 1;
-    real_start = start & qemu_host_page_mask;
+    real_start = start & -host_page_size;
     real_last = HOST_PAGE_ALIGN(last) - 1;
 
     /*
@@ -802,7 +802,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
      * The single page special case is required for the last page,
      * lest real_start overflow to zero.
      */
-    if (real_last - real_start < qemu_host_page_size) {
+    if (real_last - real_start < host_page_size) {
         prot = 0;
         for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
             prot |= page_get_flags(a);
@@ -818,14 +818,14 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
             prot |= page_get_flags(a);
         }
         if (prot != 0) {
-            real_start += qemu_host_page_size;
+            real_start += host_page_size;
         }
 
         for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
             prot |= page_get_flags(a + 1);
         }
         if (prot != 0) {
-            real_last -= qemu_host_page_size;
+            real_last -= host_page_size;
         }
 
         if (real_last < real_start) {
-- 
2.34.1
Re: [PATCH v3 08/33] linux-user: Remove qemu_host_page_{size, mask} from mmap.c
Posted by Ilya Leoshkevich 10 months ago
On Tue, Jan 02, 2024 at 12:57:43PM +1100, Richard Henderson wrote:
> Use qemu_real_host_page_size instead.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  linux-user/mmap.c | 66 +++++++++++++++++++++++------------------------
>  1 file changed, 33 insertions(+), 33 deletions(-)

Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
Re: [PATCH v3 08/33] linux-user: Remove qemu_host_page_{size, mask} from mmap.c
Posted by Pierrick Bouvier 10 months, 3 weeks ago
On 1/2/24 05:57, Richard Henderson wrote:
> Use qemu_real_host_page_size instead.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   linux-user/mmap.c | 66 +++++++++++++++++++++++------------------------
>   1 file changed, 33 insertions(+), 33 deletions(-)
> 
> diff --git a/linux-user/mmap.c b/linux-user/mmap.c
> index 96c9433e27..4d3c8717b9 100644
> --- a/linux-user/mmap.c
> +++ b/linux-user/mmap.c
> @@ -165,6 +165,7 @@ static int target_to_host_prot(int prot)
>   /* NOTE: all the constants are the HOST ones, but addresses are target. */
>   int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>   {
> +    int host_page_size = qemu_real_host_page_size();
>       abi_ulong starts[3];
>       abi_ulong lens[3];
>       int prots[3];
> @@ -189,13 +190,13 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>       }
>   
>       last = start + len - 1;
> -    host_start = start & qemu_host_page_mask;
> +    host_start = start & -host_page_size;
>       host_last = HOST_PAGE_ALIGN(last) - 1;
>       nranges = 0;
>   
>       mmap_lock();
>   
> -    if (host_last - host_start < qemu_host_page_size) {
> +    if (host_last - host_start < host_page_size) {
>           /* Single host page contains all guest pages: sum the prot. */
>           prot1 = target_prot;
>           for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
> @@ -205,7 +206,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>               prot1 |= page_get_flags(a + 1);
>           }
>           starts[nranges] = host_start;
> -        lens[nranges] = qemu_host_page_size;
> +        lens[nranges] = host_page_size;
>           prots[nranges] = prot1;
>           nranges++;
>       } else {
> @@ -218,10 +219,10 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>               /* If the resulting sum differs, create a new range. */
>               if (prot1 != target_prot) {
>                   starts[nranges] = host_start;
> -                lens[nranges] = qemu_host_page_size;
> +                lens[nranges] = host_page_size;
>                   prots[nranges] = prot1;
>                   nranges++;
> -                host_start += qemu_host_page_size;
> +                host_start += host_page_size;
>               }
>           }
>   
> @@ -233,9 +234,9 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>               }
>               /* If the resulting sum differs, create a new range. */
>               if (prot1 != target_prot) {
> -                host_last -= qemu_host_page_size;
> +                host_last -= host_page_size;
>                   starts[nranges] = host_last + 1;
> -                lens[nranges] = qemu_host_page_size;
> +                lens[nranges] = host_page_size;
>                   prots[nranges] = prot1;
>                   nranges++;
>               }
> @@ -270,6 +271,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>   static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
>                         int prot, int flags, int fd, off_t offset)
>   {
> +    int host_page_size = qemu_real_host_page_size();
>       abi_ulong real_last;
>       void *host_start;
>       int prot_old, prot_new;
> @@ -286,7 +288,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
>           return false;
>       }
>   
> -    real_last = real_start + qemu_host_page_size - 1;
> +    real_last = real_start + host_page_size - 1;
>       host_start = g2h_untagged(real_start);
>   
>       /* Get the protection of the target pages outside the mapping. */
> @@ -304,12 +306,12 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
>            * outside of the fragment we need to map.  Allocate a new host
>            * page to cover, discarding whatever else may have been present.
>            */
> -        void *p = mmap(host_start, qemu_host_page_size,
> +        void *p = mmap(host_start, host_page_size,
>                          target_to_host_prot(prot),
>                          flags | MAP_ANONYMOUS, -1, 0);
>           if (p != host_start) {
>               if (p != MAP_FAILED) {
> -                munmap(p, qemu_host_page_size);
> +                munmap(p, host_page_size);
>                   errno = EEXIST;
>               }
>               return false;
> @@ -324,7 +326,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
>       /* Adjust protection to be able to write. */
>       if (!(host_prot_old & PROT_WRITE)) {
>           host_prot_old |= PROT_WRITE;
> -        mprotect(host_start, qemu_host_page_size, host_prot_old);
> +        mprotect(host_start, host_page_size, host_prot_old);
>       }
>   
>       /* Read or zero the new guest pages. */
> @@ -338,7 +340,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
>   
>       /* Put final protection */
>       if (host_prot_new != host_prot_old) {
> -        mprotect(host_start, qemu_host_page_size, host_prot_new);
> +        mprotect(host_start, host_page_size, host_prot_new);
>       }
>       return true;
>   }
> @@ -373,17 +375,18 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
>    */
>   abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
>   {
> +    int host_page_size = qemu_real_host_page_size();
>       void *ptr, *prev;
>       abi_ulong addr;
>       int wrapped, repeat;
>   
> -    align = MAX(align, qemu_host_page_size);
> +    align = MAX(align, host_page_size);
>   
>       /* If 'start' == 0, then a default start address is used. */
>       if (start == 0) {
>           start = mmap_next_start;
>       } else {
> -        start &= qemu_host_page_mask;
> +        start &= -host_page_size;
>       }
>       start = ROUND_UP(start, align);
>   
> @@ -492,6 +495,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
>   abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>                        int flags, int fd, off_t offset)
>   {
> +    int host_page_size = qemu_real_host_page_size();
>       abi_ulong ret, last, real_start, real_last, retaddr, host_len;
>       abi_ulong passthrough_start = -1, passthrough_last = 0;
>       int page_flags;
> @@ -537,8 +541,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>           }
>       }
>   
> -    real_start = start & qemu_host_page_mask;
> -    host_offset = offset & qemu_host_page_mask;
> +    real_start = start & -host_page_size;
> +    host_offset = offset & -host_page_size;
>   
>       /*
>        * If the user is asking for the kernel to find a location, do that
> @@ -567,8 +571,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>        * may need to truncate file maps at EOF and add extra anonymous pages
>        * up to the targets page boundary.
>        */
> -    if ((qemu_real_host_page_size() < qemu_host_page_size) &&
> -        !(flags & MAP_ANONYMOUS)) {
> +    if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
>           struct stat sb;
>   
>           if (fstat(fd, &sb) == -1) {
> @@ -595,11 +598,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>           host_len = HOST_PAGE_ALIGN(host_len);
>           host_prot = target_to_host_prot(target_prot);
>   
> -        /*
> -         * Note: we prefer to control the mapping address. It is
> -         * especially important if qemu_host_page_size >
> -         * qemu_real_host_page_size.
> -         */
> +        /* Note: we prefer to control the mapping address. */
>           p = mmap(g2h_untagged(start), host_len, host_prot,
>                    flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
>           if (p == MAP_FAILED) {
> @@ -665,7 +664,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>            * aligned, so we read it
>            */
>           if (!(flags & MAP_ANONYMOUS) &&
> -            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
> +            (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) {
>               /*
>                * msync() won't work here, so we return an error if write is
>                * possible while it is a shared mapping
> @@ -694,7 +693,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>   
>           /* handle the start of the mapping */
>           if (start > real_start) {
> -            if (real_last == real_start + qemu_host_page_size - 1) {
> +            if (real_last == real_start + host_page_size - 1) {
>                   /* one single host page */
>                   if (!mmap_frag(real_start, start, last,
>                                  target_prot, flags, fd, offset)) {
> @@ -703,21 +702,21 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>                   goto the_end1;
>               }
>               if (!mmap_frag(real_start, start,
> -                           real_start + qemu_host_page_size - 1,
> +                           real_start + host_page_size - 1,
>                              target_prot, flags, fd, offset)) {
>                   goto fail;
>               }
> -            real_start += qemu_host_page_size;
> +            real_start += host_page_size;
>           }
>           /* handle the end of the mapping */
>           if (last < real_last) {
> -            abi_ulong real_page = real_last - qemu_host_page_size + 1;
> +            abi_ulong real_page = real_last - host_page_size + 1;
>               if (!mmap_frag(real_page, real_page, last,
>                              target_prot, flags, fd,
>                              offset + real_page - start)) {
>                   goto fail;
>               }
> -            real_last -= qemu_host_page_size;
> +            real_last -= host_page_size;
>           }
>   
>           /* map the middle (easier) */
> @@ -784,6 +783,7 @@ fail:
>   
>   static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
>   {
> +    int host_page_size = qemu_real_host_page_size();
>       abi_ulong real_start;
>       abi_ulong real_last;
>       abi_ulong real_len;
> @@ -793,7 +793,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
>       int prot;
>   
>       last = start + len - 1;
> -    real_start = start & qemu_host_page_mask;
> +    real_start = start & -host_page_size;
>       real_last = HOST_PAGE_ALIGN(last) - 1;
>   
>       /*
> @@ -802,7 +802,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
>        * The single page special case is required for the last page,
>        * lest real_start overflow to zero.
>        */
> -    if (real_last - real_start < qemu_host_page_size) {
> +    if (real_last - real_start < host_page_size) {
>           prot = 0;
>           for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
>               prot |= page_get_flags(a);
> @@ -818,14 +818,14 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
>               prot |= page_get_flags(a);
>           }
>           if (prot != 0) {
> -            real_start += qemu_host_page_size;
> +            real_start += host_page_size;
>           }
>   
>           for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
>               prot |= page_get_flags(a + 1);
>           }
>           if (prot != 0) {
> -            real_last -= qemu_host_page_size;
> +            real_last -= host_page_size;
>           }
>   
>           if (real_last < real_start) {

For me, the transformation ~page_mask == page_size - 1 and page_mask == 
-page_size was not so intuitive, until I tried it on paper.

Could it be useful to have macros PAGE_BITS(page_size), and 
PAGE_MASK(page_size) for this?. All would be defined from page_sz, so 
there is no duplicated data, and it would avoid a subtle error when 
forgetting an operator, while helping to read the code.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Re: [PATCH v3 08/33] linux-user: Remove qemu_host_page_{size, mask} from mmap.c
Posted by Philippe Mathieu-Daudé 10 months, 3 weeks ago
On 8/1/24 10:47, Pierrick Bouvier wrote:
> On 1/2/24 05:57, Richard Henderson wrote:
>> Use qemu_real_host_page_size instead.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   linux-user/mmap.c | 66 +++++++++++++++++++++++------------------------
>>   1 file changed, 33 insertions(+), 33 deletions(-)
>>
>> diff --git a/linux-user/mmap.c b/linux-user/mmap.c
>> index 96c9433e27..4d3c8717b9 100644
>> --- a/linux-user/mmap.c
>> +++ b/linux-user/mmap.c
>> @@ -165,6 +165,7 @@ static int target_to_host_prot(int prot)
>>   /* NOTE: all the constants are the HOST ones, but addresses are 
>> target. */
>>   int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
>>   {
>> +    int host_page_size = qemu_real_host_page_size();
>>       abi_ulong starts[3];
>>       abi_ulong lens[3];
>>       int prots[3];
>> @@ -189,13 +190,13 @@ int target_mprotect(abi_ulong start, abi_ulong 
>> len, int target_prot)
>>       }
>>       last = start + len - 1;
>> -    host_start = start & qemu_host_page_mask;
>> +    host_start = start & -host_page_size;
>>       host_last = HOST_PAGE_ALIGN(last) - 1;
>>       nranges = 0;
>>       mmap_lock();
>> -    if (host_last - host_start < qemu_host_page_size) {
>> +    if (host_last - host_start < host_page_size) {
>>           /* Single host page contains all guest pages: sum the prot. */
>>           prot1 = target_prot;
>>           for (abi_ulong a = host_start; a < start; a += 
>> TARGET_PAGE_SIZE) {
>> @@ -205,7 +206,7 @@ int target_mprotect(abi_ulong start, abi_ulong 
>> len, int target_prot)
>>               prot1 |= page_get_flags(a + 1);
>>           }
>>           starts[nranges] = host_start;
>> -        lens[nranges] = qemu_host_page_size;
>> +        lens[nranges] = host_page_size;
>>           prots[nranges] = prot1;
>>           nranges++;
>>       } else {
>> @@ -218,10 +219,10 @@ int target_mprotect(abi_ulong start, abi_ulong 
>> len, int target_prot)
>>               /* If the resulting sum differs, create a new range. */
>>               if (prot1 != target_prot) {
>>                   starts[nranges] = host_start;
>> -                lens[nranges] = qemu_host_page_size;
>> +                lens[nranges] = host_page_size;
>>                   prots[nranges] = prot1;
>>                   nranges++;
>> -                host_start += qemu_host_page_size;
>> +                host_start += host_page_size;
>>               }
>>           }
>> @@ -233,9 +234,9 @@ int target_mprotect(abi_ulong start, abi_ulong 
>> len, int target_prot)
>>               }
>>               /* If the resulting sum differs, create a new range. */
>>               if (prot1 != target_prot) {
>> -                host_last -= qemu_host_page_size;
>> +                host_last -= host_page_size;
>>                   starts[nranges] = host_last + 1;
>> -                lens[nranges] = qemu_host_page_size;
>> +                lens[nranges] = host_page_size;
>>                   prots[nranges] = prot1;
>>                   nranges++;
>>               }
>> @@ -270,6 +271,7 @@ int target_mprotect(abi_ulong start, abi_ulong 
>> len, int target_prot)
>>   static bool mmap_frag(abi_ulong real_start, abi_ulong start, 
>> abi_ulong last,
>>                         int prot, int flags, int fd, off_t offset)
>>   {
>> +    int host_page_size = qemu_real_host_page_size();
>>       abi_ulong real_last;
>>       void *host_start;
>>       int prot_old, prot_new;
>> @@ -286,7 +288,7 @@ static bool mmap_frag(abi_ulong real_start, 
>> abi_ulong start, abi_ulong last,
>>           return false;
>>       }
>> -    real_last = real_start + qemu_host_page_size - 1;
>> +    real_last = real_start + host_page_size - 1;
>>       host_start = g2h_untagged(real_start);
>>       /* Get the protection of the target pages outside the mapping. */
>> @@ -304,12 +306,12 @@ static bool mmap_frag(abi_ulong real_start, 
>> abi_ulong start, abi_ulong last,
>>            * outside of the fragment we need to map.  Allocate a new host
>>            * page to cover, discarding whatever else may have been 
>> present.
>>            */
>> -        void *p = mmap(host_start, qemu_host_page_size,
>> +        void *p = mmap(host_start, host_page_size,
>>                          target_to_host_prot(prot),
>>                          flags | MAP_ANONYMOUS, -1, 0);
>>           if (p != host_start) {
>>               if (p != MAP_FAILED) {
>> -                munmap(p, qemu_host_page_size);
>> +                munmap(p, host_page_size);
>>                   errno = EEXIST;
>>               }
>>               return false;
>> @@ -324,7 +326,7 @@ static bool mmap_frag(abi_ulong real_start, 
>> abi_ulong start, abi_ulong last,
>>       /* Adjust protection to be able to write. */
>>       if (!(host_prot_old & PROT_WRITE)) {
>>           host_prot_old |= PROT_WRITE;
>> -        mprotect(host_start, qemu_host_page_size, host_prot_old);
>> +        mprotect(host_start, host_page_size, host_prot_old);
>>       }
>>       /* Read or zero the new guest pages. */
>> @@ -338,7 +340,7 @@ static bool mmap_frag(abi_ulong real_start, 
>> abi_ulong start, abi_ulong last,
>>       /* Put final protection */
>>       if (host_prot_new != host_prot_old) {
>> -        mprotect(host_start, qemu_host_page_size, host_prot_new);
>> +        mprotect(host_start, host_page_size, host_prot_new);
>>       }
>>       return true;
>>   }
>> @@ -373,17 +375,18 @@ static abi_ulong 
>> mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
>>    */
>>   abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong 
>> align)
>>   {
>> +    int host_page_size = qemu_real_host_page_size();
>>       void *ptr, *prev;
>>       abi_ulong addr;
>>       int wrapped, repeat;
>> -    align = MAX(align, qemu_host_page_size);
>> +    align = MAX(align, host_page_size);
>>       /* If 'start' == 0, then a default start address is used. */
>>       if (start == 0) {
>>           start = mmap_next_start;
>>       } else {
>> -        start &= qemu_host_page_mask;
>> +        start &= -host_page_size;
>>       }
>>       start = ROUND_UP(start, align);
>> @@ -492,6 +495,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong 
>> size, abi_ulong align)
>>   abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>>                        int flags, int fd, off_t offset)
>>   {
>> +    int host_page_size = qemu_real_host_page_size();
>>       abi_ulong ret, last, real_start, real_last, retaddr, host_len;
>>       abi_ulong passthrough_start = -1, passthrough_last = 0;
>>       int page_flags;
>> @@ -537,8 +541,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>           }
>>       }
>> -    real_start = start & qemu_host_page_mask;
>> -    host_offset = offset & qemu_host_page_mask;
>> +    real_start = start & -host_page_size;
>> +    host_offset = offset & -host_page_size;
>>       /*
>>        * If the user is asking for the kernel to find a location, do that
>> @@ -567,8 +571,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>        * may need to truncate file maps at EOF and add extra anonymous 
>> pages
>>        * up to the targets page boundary.
>>        */
>> -    if ((qemu_real_host_page_size() < qemu_host_page_size) &&
>> -        !(flags & MAP_ANONYMOUS)) {
>> +    if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
>>           struct stat sb;
>>           if (fstat(fd, &sb) == -1) {
>> @@ -595,11 +598,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>           host_len = HOST_PAGE_ALIGN(host_len);
>>           host_prot = target_to_host_prot(target_prot);
>> -        /*
>> -         * Note: we prefer to control the mapping address. It is
>> -         * especially important if qemu_host_page_size >
>> -         * qemu_real_host_page_size.
>> -         */
>> +        /* Note: we prefer to control the mapping address. */
>>           p = mmap(g2h_untagged(start), host_len, host_prot,
>>                    flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
>>           if (p == MAP_FAILED) {
>> @@ -665,7 +664,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>            * aligned, so we read it
>>            */
>>           if (!(flags & MAP_ANONYMOUS) &&
>> -            (offset & ~qemu_host_page_mask) != (start & 
>> ~qemu_host_page_mask)) {
>> +            (offset & (host_page_size - 1)) != (start & 
>> (host_page_size - 1))) {
>>               /*
>>                * msync() won't work here, so we return an error if 
>> write is
>>                * possible while it is a shared mapping
>> @@ -694,7 +693,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>           /* handle the start of the mapping */
>>           if (start > real_start) {
>> -            if (real_last == real_start + qemu_host_page_size - 1) {
>> +            if (real_last == real_start + host_page_size - 1) {
>>                   /* one single host page */
>>                   if (!mmap_frag(real_start, start, last,
>>                                  target_prot, flags, fd, offset)) {
>> @@ -703,21 +702,21 @@ abi_long target_mmap(abi_ulong start, abi_ulong 
>> len, int target_prot,
>>                   goto the_end1;
>>               }
>>               if (!mmap_frag(real_start, start,
>> -                           real_start + qemu_host_page_size - 1,
>> +                           real_start + host_page_size - 1,
>>                              target_prot, flags, fd, offset)) {
>>                   goto fail;
>>               }
>> -            real_start += qemu_host_page_size;
>> +            real_start += host_page_size;
>>           }
>>           /* handle the end of the mapping */
>>           if (last < real_last) {
>> -            abi_ulong real_page = real_last - qemu_host_page_size + 1;
>> +            abi_ulong real_page = real_last - host_page_size + 1;
>>               if (!mmap_frag(real_page, real_page, last,
>>                              target_prot, flags, fd,
>>                              offset + real_page - start)) {
>>                   goto fail;
>>               }
>> -            real_last -= qemu_host_page_size;
>> +            real_last -= host_page_size;
>>           }
>>           /* map the middle (easier) */
>> @@ -784,6 +783,7 @@ fail:
>>   static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
>>   {
>> +    int host_page_size = qemu_real_host_page_size();
>>       abi_ulong real_start;
>>       abi_ulong real_last;
>>       abi_ulong real_len;
>> @@ -793,7 +793,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, 
>> abi_ulong len)
>>       int prot;
>>       last = start + len - 1;
>> -    real_start = start & qemu_host_page_mask;
>> +    real_start = start & -host_page_size;
>>       real_last = HOST_PAGE_ALIGN(last) - 1;
>>       /*
>> @@ -802,7 +802,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, 
>> abi_ulong len)
>>        * The single page special case is required for the last page,
>>        * lest real_start overflow to zero.
>>        */
>> -    if (real_last - real_start < qemu_host_page_size) {
>> +    if (real_last - real_start < host_page_size) {
>>           prot = 0;
>>           for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
>>               prot |= page_get_flags(a);
>> @@ -818,14 +818,14 @@ static int mmap_reserve_or_unmap(abi_ulong 
>> start, abi_ulong len)
>>               prot |= page_get_flags(a);
>>           }
>>           if (prot != 0) {
>> -            real_start += qemu_host_page_size;
>> +            real_start += host_page_size;
>>           }
>>           for (prot = 0, a = last; a < real_last; a += 
>> TARGET_PAGE_SIZE) {
>>               prot |= page_get_flags(a + 1);
>>           }
>>           if (prot != 0) {
>> -            real_last -= qemu_host_page_size;
>> +            real_last -= host_page_size;
>>           }
>>           if (real_last < real_start) {
> 
> For me, the transformation ~page_mask == page_size - 1 and page_mask == 
> -page_size was not so intuitive, until I tried it on paper.

I have been there ;) Then when you have to deal with code related
to pages, this become natural page-related operations.

> Could it be useful to have macros PAGE_BITS(page_size), and 
> PAGE_MASK(page_size) for this?. All would be defined from page_sz, so 
> there is no duplicated data, and it would avoid a subtle error when 
> forgetting an operator, while helping to read the code.
> 
> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>