[PATCH v2] xen: Replace arch_mfn_in_directmap() with arch_mfns_in_directmap()

Julien Grall posted 1 patch 2 years, 2 months ago
Test gitlab-ci failed
Patches applied successfully (tree, apply log)
git fetch https://gitlab.com/xen-project/patchew/xen tags/patchew/20220126155919.36706-1-julien@xen.org
xen/arch/arm/include/asm/arm32/mm.h | 2 +-
xen/arch/arm/include/asm/arm64/mm.h | 2 +-
xen/arch/x86/include/asm/mm.h       | 6 +++---
xen/common/page_alloc.c             | 4 ++--
4 files changed, 7 insertions(+), 7 deletions(-)
[PATCH v2] xen: Replace arch_mfn_in_directmap() with arch_mfns_in_directmap()
Posted by Julien Grall 2 years, 2 months ago
From: Julien Grall <jgrall@amazon.com>

The name of arch_mfn_in_directmap() suggests that it will check against
that the passed MFN should be in the directmap.

However, the current callers are passing the next MFN and the
implementation will return true for up to one MFN past the directmap.

It would be more meaningful to test the exact MFN rather than the
next one.

That said, the current expectation is the memory will be direct-mapped
from 0 up to a given MFN. This may not be a valid assumption on all
the architectures.

For instance, on Arm32 only the xenheap that will be direct-mapped.
This may not be allocated a the beginning of the RAM.

So take the opportunity to rework the parameters and pass the
number of pages we want to check. This also requires to rename
the helper to better match the implementation.

Note that the implementation of the helper on arm32 is left as-is
for now.

Signed-off-by: Julien Grall <jgrall@amazon.com>

----

Changes in v2:
    - Pass a region instead the last MFN.
    - Rename the helper to arch_mfns_in_directmap()
    - This was https://lore.kernel.org/xen-devel/20211216152220.3317-1-julien@xen.org/

Looking at the history, it looks like the check in init_node_heap()
was <= and it was simply moved to a new helper without any adjustment
as part of c6fdc9696a "boot allocator: use arch helper for virt_to_mfn
on DIRECTMAP_VIRT region".
---
 xen/arch/arm/include/asm/arm32/mm.h | 2 +-
 xen/arch/arm/include/asm/arm64/mm.h | 2 +-
 xen/arch/x86/include/asm/mm.h       | 6 +++---
 xen/common/page_alloc.c             | 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h
index 68612499bf6c..6b039d9ceaa2 100644
--- a/xen/arch/arm/include/asm/arm32/mm.h
+++ b/xen/arch/arm/include/asm/arm32/mm.h
@@ -5,7 +5,7 @@
  * Only a limited amount of RAM, called xenheap, is always mapped on ARM32.
  * For convenience always return false.
  */
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
 {
     return false;
 }
diff --git a/xen/arch/arm/include/asm/arm64/mm.h b/xen/arch/arm/include/asm/arm64/mm.h
index d0a3be7e15a4..aa2adac63189 100644
--- a/xen/arch/arm/include/asm/arm64/mm.h
+++ b/xen/arch/arm/include/asm/arm64/mm.h
@@ -5,7 +5,7 @@
  * On ARM64, all the RAM is currently direct mapped in Xen.
  * Hence return always true.
  */
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
 {
     return true;
 }
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 5dbcee869624..9b9de4c6bef7 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -635,13 +635,13 @@ void write_32bit_pse_identmap(uint32_t *l2);
 
 /*
  * x86 maps part of physical memory via the directmap region.
- * Return whether the input MFN falls in that range.
+ * Return whether the range of MFN falls in the directmap region.
  */
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
 {
     unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
 
-    return mfn <= (virt_to_mfn(eva - 1) + 1);
+    return (mfn + nr) <= (virt_to_mfn(eva - 1) + 1);
 }
 
 #endif /* __ASM_X86_MM_H__ */
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 38eea879c04a..f8749b0787a6 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -588,7 +588,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
         needed = 0;
     }
     else if ( *use_tail && nr >= needed &&
-              arch_mfn_in_directmap(mfn + nr) &&
+              arch_mfns_in_directmap(mfn + nr - needed, needed) &&
               (!xenheap_bits ||
                !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
@@ -597,7 +597,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
     }
     else if ( nr >= needed &&
-              arch_mfn_in_directmap(mfn + needed) &&
+              arch_mfns_in_directmap(mfn, needed) &&
               (!xenheap_bits ||
                !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
-- 
2.32.0


Re: [PATCH v2] xen: Replace arch_mfn_in_directmap() with arch_mfns_in_directmap()
Posted by Jan Beulich 2 years, 2 months ago
On 26.01.2022 16:59, Julien Grall wrote:
> From: Julien Grall <jgrall@amazon.com>
> 
> The name of arch_mfn_in_directmap() suggests that it will check against
> that the passed MFN should be in the directmap.
> 
> However, the current callers are passing the next MFN and the
> implementation will return true for up to one MFN past the directmap.
> 
> It would be more meaningful to test the exact MFN rather than the
> next one.
> 
> That said, the current expectation is the memory will be direct-mapped
> from 0 up to a given MFN. This may not be a valid assumption on all
> the architectures.
> 
> For instance, on Arm32 only the xenheap that will be direct-mapped.
> This may not be allocated a the beginning of the RAM.
> 
> So take the opportunity to rework the parameters and pass the
> number of pages we want to check. This also requires to rename
> the helper to better match the implementation.
> 
> Note that the implementation of the helper on arm32 is left as-is
> for now.
> 
> Signed-off-by: Julien Grall <jgrall@amazon.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>


Re: [PATCH v2] xen: Replace arch_mfn_in_directmap() with arch_mfns_in_directmap()
Posted by Stefano Stabellini 2 years, 2 months ago
On Wed, 26 Jan 2022, Julien Grall wrote:
> From: Julien Grall <jgrall@amazon.com>
> 
> The name of arch_mfn_in_directmap() suggests that it will check against
> that the passed MFN should be in the directmap.
> 
> However, the current callers are passing the next MFN and the
> implementation will return true for up to one MFN past the directmap.
> 
> It would be more meaningful to test the exact MFN rather than the
> next one.
> 
> That said, the current expectation is the memory will be direct-mapped
> from 0 up to a given MFN. This may not be a valid assumption on all
> the architectures.
> 
> For instance, on Arm32 only the xenheap that will be direct-mapped.
> This may not be allocated a the beginning of the RAM.
> 
> So take the opportunity to rework the parameters and pass the
> number of pages we want to check. This also requires to rename
> the helper to better match the implementation.
> 
> Note that the implementation of the helper on arm32 is left as-is
> for now.
> 
> Signed-off-by: Julien Grall <jgrall@amazon.com>

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


> ----
> 
> Changes in v2:
>     - Pass a region instead the last MFN.
>     - Rename the helper to arch_mfns_in_directmap()
>     - This was https://lore.kernel.org/xen-devel/20211216152220.3317-1-julien@xen.org/
> 
> Looking at the history, it looks like the check in init_node_heap()
> was <= and it was simply moved to a new helper without any adjustment
> as part of c6fdc9696a "boot allocator: use arch helper for virt_to_mfn
> on DIRECTMAP_VIRT region".
> ---
>  xen/arch/arm/include/asm/arm32/mm.h | 2 +-
>  xen/arch/arm/include/asm/arm64/mm.h | 2 +-
>  xen/arch/x86/include/asm/mm.h       | 6 +++---
>  xen/common/page_alloc.c             | 4 ++--
>  4 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h
> index 68612499bf6c..6b039d9ceaa2 100644
> --- a/xen/arch/arm/include/asm/arm32/mm.h
> +++ b/xen/arch/arm/include/asm/arm32/mm.h
> @@ -5,7 +5,7 @@
>   * Only a limited amount of RAM, called xenheap, is always mapped on ARM32.
>   * For convenience always return false.
>   */
> -static inline bool arch_mfn_in_directmap(unsigned long mfn)
> +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
>  {
>      return false;
>  }
> diff --git a/xen/arch/arm/include/asm/arm64/mm.h b/xen/arch/arm/include/asm/arm64/mm.h
> index d0a3be7e15a4..aa2adac63189 100644
> --- a/xen/arch/arm/include/asm/arm64/mm.h
> +++ b/xen/arch/arm/include/asm/arm64/mm.h
> @@ -5,7 +5,7 @@
>   * On ARM64, all the RAM is currently direct mapped in Xen.
>   * Hence return always true.
>   */
> -static inline bool arch_mfn_in_directmap(unsigned long mfn)
> +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
>  {
>      return true;
>  }
> diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
> index 5dbcee869624..9b9de4c6bef7 100644
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -635,13 +635,13 @@ void write_32bit_pse_identmap(uint32_t *l2);
>  
>  /*
>   * x86 maps part of physical memory via the directmap region.
> - * Return whether the input MFN falls in that range.
> + * Return whether the range of MFN falls in the directmap region.
>   */
> -static inline bool arch_mfn_in_directmap(unsigned long mfn)
> +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
>  {
>      unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
>  
> -    return mfn <= (virt_to_mfn(eva - 1) + 1);
> +    return (mfn + nr) <= (virt_to_mfn(eva - 1) + 1);
>  }
>  
>  #endif /* __ASM_X86_MM_H__ */
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 38eea879c04a..f8749b0787a6 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -588,7 +588,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>          needed = 0;
>      }
>      else if ( *use_tail && nr >= needed &&
> -              arch_mfn_in_directmap(mfn + nr) &&
> +              arch_mfns_in_directmap(mfn + nr - needed, needed) &&
>                (!xenheap_bits ||
>                 !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>      {
> @@ -597,7 +597,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>                        PAGE_SIZE - sizeof(**avail) * NR_ZONES;
>      }
>      else if ( nr >= needed &&
> -              arch_mfn_in_directmap(mfn + needed) &&
> +              arch_mfns_in_directmap(mfn, needed) &&
>                (!xenheap_bits ||
>                 !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>      {
> -- 
> 2.32.0
>