From: Penny Zheng <Penny.Zheng@arm.com>
Introduce map_pages_to_xen() that is implemented using a new helper,
xen_mpumap_update(), which is responsible for updating Xen MPU memory
mapping table(xen_mpumap), including creating a new entry, updating
or destroying an existing one, it is equivalent to xen_pt_update in MMU.
This commit only implements populating a new entry in Xen MPU memory mapping
table(xen_mpumap).
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
Signed-off-by: Hari Limaye <hari.limaye@arm.com>
---
 xen/arch/arm/include/asm/mpu/mm.h | 12 ++++
 xen/arch/arm/mpu/mm.c             | 96 +++++++++++++++++++++++++++++++
 2 files changed, 108 insertions(+)
diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h
index a0f0d86d4a..f0f41db210 100644
--- a/xen/arch/arm/include/asm/mpu/mm.h
+++ b/xen/arch/arm/include/asm/mpu/mm.h
@@ -64,6 +64,7 @@ static inline void context_sync_mpu(void)
  * The following API requires context_sync_mpu() after being used to modify MPU
  * regions:
  *  - write_protection_region
+ *  - xen_mpumap_update
  */
 
 /* Reads the MPU region (into @pr_read) with index @sel from the HW */
@@ -72,6 +73,17 @@ void read_protection_region(pr_t *pr_read, uint8_t sel);
 /* Writes the MPU region (from @pr_write) with index @sel to the HW */
 void write_protection_region(const pr_t *pr_write, uint8_t sel);
 
+/*
+ * Maps an address range into the MPU data structure and updates the HW.
+ * Equivalent to xen_pt_update in an MMU system.
+ *
+ * @param base      Base address of the range to map (inclusive).
+ * @param limit     Limit address of the range to map (exclusive).
+ * @param flags     Flags for the memory range to map.
+ * @return          0 on success, negative on error.
+ */
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags);
+
 /*
  * Creates a pr_t structure describing a protection region.
  *
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 15197339b1..1de28d2120 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -6,6 +6,7 @@
 #include <xen/lib.h>
 #include <xen/mm.h>
 #include <xen/sizes.h>
+#include <xen/spinlock.h>
 #include <xen/types.h>
 #include <asm/mpu.h>
 #include <asm/mpu/mm.h>
@@ -41,6 +42,8 @@ DECLARE_BITMAP(xen_mpumap_mask, MAX_MPU_REGION_NR) \
 /* EL2 Xen MPU memory region mapping table. */
 pr_t __cacheline_aligned __section(".data") xen_mpumap[MAX_MPU_REGION_NR];
 
+static DEFINE_SPINLOCK(xen_mpumap_lock);
+
 static void __init __maybe_unused build_assertions(void)
 {
     /*
@@ -176,6 +179,99 @@ int mpumap_contain_region(pr_t *table, uint8_t nr_regions, paddr_t base,
     return MPUMAP_REGION_NOTFOUND;
 }
 
+/*
+ * Allocate a new free EL2 MPU memory region, based on bitmap xen_mpumap_mask.
+ * @param idx   Set to the index of the allocated EL2 MPU region on success.
+ * @return      0 on success, otherwise -ENOENT on failure.
+ */
+static int xen_mpumap_alloc_entry(uint8_t *idx)
+{
+    ASSERT(spin_is_locked(&xen_mpumap_lock));
+
+    *idx = find_first_zero_bit(xen_mpumap_mask, max_mpu_regions);
+    if ( *idx == max_mpu_regions )
+    {
+        printk(XENLOG_ERR "mpu: EL2 MPU memory region mapping pool exhausted\n");
+        return -ENOENT;
+    }
+
+    set_bit(*idx, xen_mpumap_mask);
+    return 0;
+}
+
+/*
+ * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
+ * given memory range and flags, creating one if none exists.
+ *
+ * @param base  Base address (inclusive).
+ * @param limit Limit address (exclusive).
+ * @param flags Region attributes (a combination of PAGE_HYPERVISOR_XXX)
+ * @return      0 on success, otherwise negative on error.
+ */
+static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
+                                   unsigned int flags)
+{
+    uint8_t idx;
+    int rc;
+
+    ASSERT(spin_is_locked(&xen_mpumap_lock));
+
+    rc = mpumap_contain_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
+    if ( (rc < 0) || (rc > MPUMAP_REGION_NOTFOUND) )
+        return -EINVAL;
+
+    /* We are inserting a mapping => Create new region. */
+    if ( flags & _PAGE_PRESENT )
+    {
+        rc = xen_mpumap_alloc_entry(&idx);
+        if ( rc )
+            return -ENOENT;
+
+        xen_mpumap[idx] = pr_of_addr(base, limit, flags);
+
+        write_protection_region(&xen_mpumap[idx], idx);
+    }
+
+    return 0;
+}
+
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
+{
+    int rc;
+
+    if ( flags_has_rwx(flags) )
+    {
+        region_printk("Mappings should not be both Writeable and Executable\n");
+        return -EINVAL;
+    }
+
+    if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) )
+    {
+        region_printk("base address 0x%"PRIpaddr", or limit address 0x%"PRIpaddr" is not page aligned\n",
+                      base, limit);
+        return -EINVAL;
+    }
+
+    spin_lock(&xen_mpumap_lock);
+
+    rc = xen_mpumap_update_entry(base, limit, flags);
+
+    spin_unlock(&xen_mpumap_lock);
+
+    return rc;
+}
+
+int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
+                     unsigned int flags)
+{
+    int rc = xen_mpumap_update(mfn_to_maddr(mfn),
+                               mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags);
+    if ( !rc )
+        context_sync_mpu();
+
+    return rc;
+}
+
 void __init setup_mm(void)
 {
     BUG_ON("unimplemented");
-- 
2.34.1Hi Hari,
Some questions.
On 20/06/2025 10:49, Hari Limaye wrote:
> CAUTION: This message has originated from an External Source. Please use proper judgment and caution when opening attachments, clicking links, or responding to this email.
>
>
> From: Penny Zheng <Penny.Zheng@arm.com>
>
> Introduce map_pages_to_xen() that is implemented using a new helper,
> xen_mpumap_update(), which is responsible for updating Xen MPU memory
> mapping table(xen_mpumap), including creating a new entry, updating
> or destroying an existing one, it is equivalent to xen_pt_update in MMU.
>
> This commit only implements populating a new entry in Xen MPU memory mapping
> table(xen_mpumap).
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
> Signed-off-by: Hari Limaye <hari.limaye@arm.com>
> ---
>   xen/arch/arm/include/asm/mpu/mm.h | 12 ++++
>   xen/arch/arm/mpu/mm.c             | 96 +++++++++++++++++++++++++++++++
>   2 files changed, 108 insertions(+)
>
> diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h
> index a0f0d86d4a..f0f41db210 100644
> --- a/xen/arch/arm/include/asm/mpu/mm.h
> +++ b/xen/arch/arm/include/asm/mpu/mm.h
> @@ -64,6 +64,7 @@ static inline void context_sync_mpu(void)
>    * The following API requires context_sync_mpu() after being used to modify MPU
>    * regions:
>    *  - write_protection_region
> + *  - xen_mpumap_update
>    */
>
>   /* Reads the MPU region (into @pr_read) with index @sel from the HW */
> @@ -72,6 +73,17 @@ void read_protection_region(pr_t *pr_read, uint8_t sel);
>   /* Writes the MPU region (from @pr_write) with index @sel to the HW */
>   void write_protection_region(const pr_t *pr_write, uint8_t sel);
>
> +/*
> + * Maps an address range into the MPU data structure and updates the HW.
> + * Equivalent to xen_pt_update in an MMU system.
> + *
> + * @param base      Base address of the range to map (inclusive).
> + * @param limit     Limit address of the range to map (exclusive).
> + * @param flags     Flags for the memory range to map.
> + * @return          0 on success, negative on error.
> + */
> +int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags);
> +
>   /*
>    * Creates a pr_t structure describing a protection region.
>    *
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 15197339b1..1de28d2120 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -6,6 +6,7 @@
>   #include <xen/lib.h>
>   #include <xen/mm.h>
>   #include <xen/sizes.h>
> +#include <xen/spinlock.h>
>   #include <xen/types.h>
>   #include <asm/mpu.h>
>   #include <asm/mpu/mm.h>
> @@ -41,6 +42,8 @@ DECLARE_BITMAP(xen_mpumap_mask, MAX_MPU_REGION_NR) \
>   /* EL2 Xen MPU memory region mapping table. */
>   pr_t __cacheline_aligned __section(".data") xen_mpumap[MAX_MPU_REGION_NR];
>
> +static DEFINE_SPINLOCK(xen_mpumap_lock);
> +
>   static void __init __maybe_unused build_assertions(void)
>   {
>       /*
> @@ -176,6 +179,99 @@ int mpumap_contain_region(pr_t *table, uint8_t nr_regions, paddr_t base,
>       return MPUMAP_REGION_NOTFOUND;
>   }
>
> +/*
> + * Allocate a new free EL2 MPU memory region, based on bitmap xen_mpumap_mask.
> + * @param idx   Set to the index of the allocated EL2 MPU region on success.
> + * @return      0 on success, otherwise -ENOENT on failure.
> + */
> +static int xen_mpumap_alloc_entry(uint8_t *idx)
> +{
> +    ASSERT(spin_is_locked(&xen_mpumap_lock));
> +
> +    *idx = find_first_zero_bit(xen_mpumap_mask, max_mpu_regions);
> +    if ( *idx == max_mpu_regions )
> +    {
> +        printk(XENLOG_ERR "mpu: EL2 MPU memory region mapping pool exhausted\n");
> +        return -ENOENT;
> +    }
> +
> +    set_bit(*idx, xen_mpumap_mask);
> +    return 0;
> +}
> +
> +/*
> + * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
> + * given memory range and flags, creating one if none exists.
> + *
> + * @param base  Base address (inclusive).
> + * @param limit Limit address (exclusive).
> + * @param flags Region attributes (a combination of PAGE_HYPERVISOR_XXX)
> + * @return      0 on success, otherwise negative on error.
> + */
> +static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
> +                                   unsigned int flags)
> +{
> +    uint8_t idx;
> +    int rc;
> +
> +    ASSERT(spin_is_locked(&xen_mpumap_lock));
> +
> +    rc = mpumap_contain_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
> +    if ( (rc < 0) || (rc > MPUMAP_REGION_NOTFOUND) )
if ( !(rc == MPUMAP_REGION_NOTFOUND) ) <<-- Does it read better ?
> +        return -EINVAL;
> +
> +    /* We are inserting a mapping => Create new region. */
> +    if ( flags & _PAGE_PRESENT )
Why do we need to check for this flag ? Or where have we set it.
If we have reached here, doesn't it mean that the region does not exist. 
So we need to create one.
> +    {
> +        rc = xen_mpumap_alloc_entry(&idx);
> +        if ( rc )
> +            return -ENOENT;
> +
> +        xen_mpumap[idx] = pr_of_addr(base, limit, flags);
> +
> +        write_protection_region(&xen_mpumap[idx], idx);
> +    }
> +
> +    return 0;
> +}
> +
> +int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
> +{
> +    int rc;
> +
> +    if ( flags_has_rwx(flags) )
> +    {
> +        region_printk("Mappings should not be both Writeable and Executable\n");
> +        return -EINVAL;
> +    }
> +
> +    if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) )
> +    {
> +        region_printk("base address 0x%"PRIpaddr", or limit address 0x%"PRIpaddr" is not page aligned\n",
> +                      base, limit);
> +        return -EINVAL;
> +    }
> +
> +    spin_lock(&xen_mpumap_lock);
> +
> +    rc = xen_mpumap_update_entry(base, limit, flags);
> +
> +    spin_unlock(&xen_mpumap_lock);
> +
> +    return rc;
> +}
> +
> +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
> +                     unsigned int flags)
> +{
> +    int rc = xen_mpumap_update(mfn_to_maddr(mfn),
> +                               mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags);
> +    if ( !rc )
> +        context_sync_mpu();
> +
> +    return rc;
> +}
> +
>   void __init setup_mm(void)
>   {
>       BUG_ON("unimplemented");
Rest look good to me.
- Ayan
> --
> 2.34.1
>
>
                
            © 2016 - 2025 Red Hat, Inc.