From: Penny Zheng <Penny.Zheng@arm.com>
Introduce map_pages_to_xen() that is implemented using a new helper,
xen_mpumap_update(), which is responsible for updating Xen MPU memory
mapping table(xen_mpumap), including creating a new entry, updating
or destroying an existing one, it is equivalent to xen_pt_update in MMU.
This commit only implements populating a new entry in Xen MPU memory mapping
table(xen_mpumap).
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
Signed-off-by: Hari Limaye <hari.limaye@arm.com>
---
Changes from v1:
- Simplify if condition
- Use normal printk
- Use %# over 0x%
- Add same asserts as in Patch 4
---
xen/arch/arm/include/asm/mpu/mm.h | 12 ++++
xen/arch/arm/mpu/mm.c | 100 ++++++++++++++++++++++++++++++
2 files changed, 112 insertions(+)
diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h
index 81e47b9d0b..101002f7d4 100644
--- a/xen/arch/arm/include/asm/mpu/mm.h
+++ b/xen/arch/arm/include/asm/mpu/mm.h
@@ -64,6 +64,7 @@ static inline void context_sync_mpu(void)
* The following API requires context_sync_mpu() after being used to modify MPU
* regions:
* - write_protection_region
+ * - xen_mpumap_update
*/
/* Reads the MPU region (into @pr_read) with index @sel from the HW */
@@ -72,6 +73,17 @@ void read_protection_region(pr_t *pr_read, uint8_t sel);
/* Writes the MPU region (from @pr_write) with index @sel to the HW */
void write_protection_region(const pr_t *pr_write, uint8_t sel);
+/*
+ * Maps an address range into the MPU data structure and updates the HW.
+ * Equivalent to xen_pt_update in an MMU system.
+ *
+ * @param base Base address of the range to map (inclusive).
+ * @param limit Limit address of the range to map (exclusive).
+ * @param flags Flags for the memory range to map.
+ * @return 0 on success, negative on error.
+ */
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags);
+
/*
* Creates a pr_t structure describing a protection region.
*
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 25e7f36c1e..dd54b66901 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -6,6 +6,7 @@
#include <xen/lib.h>
#include <xen/mm.h>
#include <xen/sizes.h>
+#include <xen/spinlock.h>
#include <xen/types.h>
#include <asm/mpu.h>
#include <asm/mpu/mm.h>
@@ -29,6 +30,8 @@ DECLARE_BITMAP(xen_mpumap_mask, MAX_MPU_REGION_NR) \
/* EL2 Xen MPU memory region mapping table. */
pr_t __cacheline_aligned __section(".data") xen_mpumap[MAX_MPU_REGION_NR];
+static DEFINE_SPINLOCK(xen_mpumap_lock);
+
static void __init __maybe_unused build_assertions(void)
{
/*
@@ -162,6 +165,103 @@ int mpumap_contains_region(pr_t *table, uint8_t nr_regions, paddr_t base,
return MPUMAP_REGION_NOTFOUND;
}
+/*
+ * Allocate a new free EL2 MPU memory region, based on bitmap xen_mpumap_mask.
+ * @param idx Set to the index of the allocated EL2 MPU region on success.
+ * @return 0 on success, otherwise -ENOENT on failure.
+ */
+static int xen_mpumap_alloc_entry(uint8_t *idx)
+{
+ ASSERT(spin_is_locked(&xen_mpumap_lock));
+
+ *idx = find_first_zero_bit(xen_mpumap_mask, max_mpu_regions);
+ if ( *idx == max_mpu_regions )
+ {
+ printk(XENLOG_ERR "mpu: EL2 MPU memory region mapping pool exhausted\n");
+ return -ENOENT;
+ }
+
+ set_bit(*idx, xen_mpumap_mask);
+ return 0;
+}
+
+/*
+ * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
+ * given memory range and flags, creating one if none exists.
+ *
+ * @param base Base address (inclusive).
+ * @param limit Limit address (exclusive).
+ * @param flags Region attributes (a combination of PAGE_HYPERVISOR_XXX)
+ * @return 0 on success, otherwise negative on error.
+ */
+static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
+ unsigned int flags)
+{
+ uint8_t idx;
+ int rc;
+
+ ASSERT(spin_is_locked(&xen_mpumap_lock));
+
+ rc = mpumap_contains_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
+ if ( !(rc == MPUMAP_REGION_NOTFOUND) )
+ return -EINVAL;
+
+ /* We are inserting a mapping => Create new region. */
+ if ( flags & _PAGE_PRESENT )
+ {
+ rc = xen_mpumap_alloc_entry(&idx);
+ if ( rc )
+ return -ENOENT;
+
+ xen_mpumap[idx] = pr_of_addr(base, limit, flags);
+
+ write_protection_region(&xen_mpumap[idx], idx);
+ }
+
+ return 0;
+}
+
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
+{
+ int rc;
+
+ ASSERT(IS_ALIGNED(base, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(limit, PAGE_SIZE));
+ ASSERT(base <= limit);
+
+ if ( flags_has_rwx(flags) )
+ {
+ printk("Mappings should not be both Writeable and Executable\n");
+ return -EINVAL;
+ }
+
+ if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) )
+ {
+ printk("base address %#"PRIpaddr", or limit address %#"PRIpaddr" is not page aligned\n",
+ base, limit);
+ return -EINVAL;
+ }
+
+ spin_lock(&xen_mpumap_lock);
+
+ rc = xen_mpumap_update_entry(base, limit, flags);
+
+ spin_unlock(&xen_mpumap_lock);
+
+ return rc;
+}
+
+int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
+ unsigned int flags)
+{
+ int rc = xen_mpumap_update(mfn_to_maddr(mfn),
+ mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags);
+ if ( !rc )
+ context_sync_mpu();
+
+ return rc;
+}
+
void __init setup_mm(void)
{
BUG_ON("unimplemented");
--
2.34.1
On 02/07/2025 16:13, Hari Limaye wrote:
> From: Penny Zheng <Penny.Zheng@arm.com>
>
> Introduce map_pages_to_xen() that is implemented using a new helper,
> xen_mpumap_update(), which is responsible for updating Xen MPU memory
> mapping table(xen_mpumap), including creating a new entry, updating
> or destroying an existing one, it is equivalent to xen_pt_update in MMU.
>
> This commit only implements populating a new entry in Xen MPU memory mapping
> table(xen_mpumap).
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
> Signed-off-by: Hari Limaye <hari.limaye@arm.com>
> ---
> Changes from v1:
> - Simplify if condition
> - Use normal printk
> - Use %# over 0x%
> - Add same asserts as in Patch 4
> ---
> xen/arch/arm/include/asm/mpu/mm.h | 12 ++++
> xen/arch/arm/mpu/mm.c | 100 ++++++++++++++++++++++++++++++
> 2 files changed, 112 insertions(+)
>
> diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h
> index 81e47b9d0b..101002f7d4 100644
> --- a/xen/arch/arm/include/asm/mpu/mm.h
> +++ b/xen/arch/arm/include/asm/mpu/mm.h
> @@ -64,6 +64,7 @@ static inline void context_sync_mpu(void)
> * The following API requires context_sync_mpu() after being used to modify MPU
> * regions:
> * - write_protection_region
> + * - xen_mpumap_update
> */
>
> /* Reads the MPU region (into @pr_read) with index @sel from the HW */
> @@ -72,6 +73,17 @@ void read_protection_region(pr_t *pr_read, uint8_t sel);
> /* Writes the MPU region (from @pr_write) with index @sel to the HW */
> void write_protection_region(const pr_t *pr_write, uint8_t sel);
>
> +/*
> + * Maps an address range into the MPU data structure and updates the HW.
> + * Equivalent to xen_pt_update in an MMU system.
> + *
> + * @param base Base address of the range to map (inclusive).
> + * @param limit Limit address of the range to map (exclusive).
> + * @param flags Flags for the memory range to map.
> + * @return 0 on success, negative on error.
> + */
> +int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags);
> +
> /*
> * Creates a pr_t structure describing a protection region.
> *
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 25e7f36c1e..dd54b66901 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -6,6 +6,7 @@
> #include <xen/lib.h>
> #include <xen/mm.h>
> #include <xen/sizes.h>
> +#include <xen/spinlock.h>
> #include <xen/types.h>
> #include <asm/mpu.h>
> #include <asm/mpu/mm.h>
> @@ -29,6 +30,8 @@ DECLARE_BITMAP(xen_mpumap_mask, MAX_MPU_REGION_NR) \
> /* EL2 Xen MPU memory region mapping table. */
> pr_t __cacheline_aligned __section(".data") xen_mpumap[MAX_MPU_REGION_NR];
>
> +static DEFINE_SPINLOCK(xen_mpumap_lock);
> +
> static void __init __maybe_unused build_assertions(void)
> {
> /*
> @@ -162,6 +165,103 @@ int mpumap_contains_region(pr_t *table, uint8_t nr_regions, paddr_t base,
> return MPUMAP_REGION_NOTFOUND;
> }
>
> +/*
> + * Allocate a new free EL2 MPU memory region, based on bitmap xen_mpumap_mask.
I would clarify that you allocate entry in bitmap, not a region.
> + * @param idx Set to the index of the allocated EL2 MPU region on success.
> + * @return 0 on success, otherwise -ENOENT on failure.
> + */
> +static int xen_mpumap_alloc_entry(uint8_t *idx)
> +{
> + ASSERT(spin_is_locked(&xen_mpumap_lock));
> +
> + *idx = find_first_zero_bit(xen_mpumap_mask, max_mpu_regions);
> + if ( *idx == max_mpu_regions )
> + {
> + printk(XENLOG_ERR "mpu: EL2 MPU memory region mapping pool exhausted\n");
> + return -ENOENT;
> + }
> +
> + set_bit(*idx, xen_mpumap_mask);
Empty line here please.
> + return 0;
> +}
> +
> +/*
> + * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
> + * given memory range and flags, creating one if none exists.
> + *
> + * @param base Base address (inclusive).
> + * @param limit Limit address (exclusive).
> + * @param flags Region attributes (a combination of PAGE_HYPERVISOR_XXX)
> + * @return 0 on success, otherwise negative on error.
> + */
> +static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
> + unsigned int flags)
> +{
> + uint8_t idx;
> + int rc;
> +
> + ASSERT(spin_is_locked(&xen_mpumap_lock));
> +
> + rc = mpumap_contains_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
> + if ( !(rc == MPUMAP_REGION_NOTFOUND) )
Why not ( rc != MPUMAP_REGION_NOTFOUND )?
> + return -EINVAL;
> +
> + /* We are inserting a mapping => Create new region. */
> + if ( flags & _PAGE_PRESENT )
Wouldn't it make more sense to have this check before calling
mpumap_contains_region()?
> + {
> + rc = xen_mpumap_alloc_entry(&idx);
> + if ( rc )
> + return -ENOENT;
> +
> + xen_mpumap[idx] = pr_of_addr(base, limit, flags);
> +
> + write_protection_region(&xen_mpumap[idx], idx);
> + }
> +
> + return 0;
> +}
> +
> +int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
> +{
> + int rc;
> +
> + ASSERT(IS_ALIGNED(base, PAGE_SIZE));
> + ASSERT(IS_ALIGNED(limit, PAGE_SIZE));
What's the purpose of these asserts given the exact same check a few lines below?
> + ASSERT(base <= limit);
Hmm, given limit being exclusive and later subtraction to become inclusive, if
we pass base == limit, you will end up with limit being smaller than base. Also,
if limit == 0, you will underflow it.
> +
> + if ( flags_has_rwx(flags) )
> + {
> + printk("Mappings should not be both Writeable and Executable\n");
> + return -EINVAL;
> + }
> +
> + if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) )
> + {
> + printk("base address %#"PRIpaddr", or limit address %#"PRIpaddr" is not page aligned\n",
> + base, limit);
> + return -EINVAL;
> + }
> +
> + spin_lock(&xen_mpumap_lock);
> +
> + rc = xen_mpumap_update_entry(base, limit, flags);
> +
> + spin_unlock(&xen_mpumap_lock);
> +
> + return rc;
> +}
> +
> +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
> + unsigned int flags)
> +{
> + int rc = xen_mpumap_update(mfn_to_maddr(mfn),
What do you expect to be passed as virt? I would expect maddr which could save
you the conversion here.
> + mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags);
> + if ( !rc )
> + context_sync_mpu();
Shouldn't this be called inside xen_mpumap_update() and within the locked section?
> +
> + return rc;
> +}
> +
> void __init setup_mm(void)
> {
> BUG_ON("unimplemented");
~Michal
Hi Michal,
>> +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
>> + unsigned int flags)
>> +{
>> + int rc = xen_mpumap_update(mfn_to_maddr(mfn),
> What do you expect to be passed as virt? I would expect maddr which could save
> you the conversion here.
I think you should be correct here and it should be fine to use virt directly. However, the one place in common code that I can see xen_mpumap_update is called is in `xen/common/vmap.c`; and here the virtual address is not the same as the machine address, but it looks like this is only used for LIVEPATCH and we perhaps don’t care about this on MPU systems?
Cheers,
Hari
On 11/07/2025 17:10, Hari Limaye wrote:
> Hi Michal,
>
>>> +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
>>> + unsigned int flags)
>>> +{
>>> + int rc = xen_mpumap_update(mfn_to_maddr(mfn),
>> What do you expect to be passed as virt? I would expect maddr which could save
>> you the conversion here.
>
> I think you should be correct here and it should be fine to use virt directly. However, the one place in common code that I can see xen_mpumap_update is called is in `xen/common/vmap.c`; and here the virtual address is not the same as the machine address, but it looks like this is only used for LIVEPATCH and we perhaps don’t care about this on MPU systems?
AFAICT you will not enable VMAP on MPU.
~Michal
© 2016 - 2025 Red Hat, Inc.