From: Penny Zheng <Penny.Zheng@arm.com>
This commit expands xen_mpumap_update/xen_mpumap_update_entry to include
destroying an existing entry.
We define a new helper "disable_mpu_region_from_index" to disable the MPU
region based on index. If region is within [0, 31], we could quickly
disable the MPU region through PRENR_EL2 which provides direct access to the
PRLAR_EL2.EN bits of EL2 MPU regions.
Rignt now, we only support destroying a *WHOLE* MPU memory region,
part-region removing is not supported, as in worst case, it will
leave two fragments behind.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
Signed-off-by: Hari Limaye <hari.limaye@arm.com>
---
Changes from v1:
- Move check for part-region removal outside if condition
- Use normal printk
---
xen/arch/arm/include/asm/mpu.h | 2 +
xen/arch/arm/include/asm/mpu/cpregs.h | 4 ++
xen/arch/arm/mpu/mm.c | 69 ++++++++++++++++++++++++++-
3 files changed, 73 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
index 63560c613b..5053edaf63 100644
--- a/xen/arch/arm/include/asm/mpu.h
+++ b/xen/arch/arm/include/asm/mpu.h
@@ -23,6 +23,8 @@
#define NUM_MPU_REGIONS_MASK (NUM_MPU_REGIONS - 1)
#define MAX_MPU_REGION_NR NUM_MPU_REGIONS_MASK
+#define PRENR_MASK GENMASK(31, 0)
+
#ifndef __ASSEMBLY__
/*
diff --git a/xen/arch/arm/include/asm/mpu/cpregs.h b/xen/arch/arm/include/asm/mpu/cpregs.h
index bb15e02df6..9f3b32acd7 100644
--- a/xen/arch/arm/include/asm/mpu/cpregs.h
+++ b/xen/arch/arm/include/asm/mpu/cpregs.h
@@ -6,6 +6,9 @@
/* CP15 CR0: MPU Type Register */
#define HMPUIR p15,4,c0,c0,4
+/* CP15 CR6: Protection Region Enable Register */
+#define HPRENR p15,4,c6,c1,1
+
/* CP15 CR6: MPU Protection Region Base/Limit/Select Address Register */
#define HPRSELR p15,4,c6,c2,1
#define HPRBAR p15,4,c6,c3,0
@@ -82,6 +85,7 @@
/* Alphabetically... */
#define MPUIR_EL2 HMPUIR
#define PRBAR_EL2 HPRBAR
+#define PRENR_EL2 HPRENR
#define PRLAR_EL2 HPRLAR
#define PRSELR_EL2 HPRSELR
#endif /* CONFIG_ARM_32 */
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index dd54b66901..2e88c467d5 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -185,6 +185,42 @@ static int xen_mpumap_alloc_entry(uint8_t *idx)
return 0;
}
+/*
+ * Disable and remove an MPU region from the data structure and MPU registers.
+ *
+ * @param index Index of the MPU region to be disabled.
+ */
+static void disable_mpu_region_from_index(uint8_t index)
+{
+ ASSERT(spin_is_locked(&xen_mpumap_lock));
+ ASSERT(index != INVALID_REGION_IDX);
+
+ if ( !region_is_valid(&xen_mpumap[index]) )
+ {
+ printk(XENLOG_WARNING
+ "mpu: MPU memory region[%u] is already disabled\n", index);
+ return;
+ }
+
+ /* Zeroing the region will also zero the region enable */
+ memset(&xen_mpumap[index], 0, sizeof(pr_t));
+ clear_bit(index, xen_mpumap_mask);
+
+ /*
+ * Both Armv8-R AArch64 and AArch32 have direct access to the enable bit for
+ * MPU regions numbered from 0 to 31.
+ */
+ if ( (index & PRENR_MASK) != 0 )
+ {
+ /* Clear respective bit */
+ uint64_t val = READ_SYSREG(PRENR_EL2) & (~(1UL << index));
+
+ WRITE_SYSREG(val, PRENR_EL2);
+ }
+ else
+ write_protection_region(&xen_mpumap[index], index);
+}
+
/*
* Update the entry in the MPU memory region mapping table (xen_mpumap) for the
* given memory range and flags, creating one if none exists.
@@ -203,11 +239,11 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
ASSERT(spin_is_locked(&xen_mpumap_lock));
rc = mpumap_contains_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
- if ( !(rc == MPUMAP_REGION_NOTFOUND) )
+ if ( rc < 0 )
return -EINVAL;
/* We are inserting a mapping => Create new region. */
- if ( flags & _PAGE_PRESENT )
+ if ( (flags & _PAGE_PRESENT) && (MPUMAP_REGION_NOTFOUND == rc) )
{
rc = xen_mpumap_alloc_entry(&idx);
if ( rc )
@@ -218,6 +254,20 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
write_protection_region(&xen_mpumap[idx], idx);
}
+ /*
+ * Currently, we only support destroying a *WHOLE* MPU memory region.
+ * Part-region removal is not supported as in the worst case it will leave
+ * two fragments behind.
+ */
+ if ( rc == MPUMAP_REGION_INCLUSIVE )
+ {
+ printk("mpu: part-region removal is not supported\n");
+ return -EINVAL;
+ }
+
+ if ( !(flags & _PAGE_PRESENT) && (rc >= MPUMAP_REGION_FOUND) )
+ disable_mpu_region_from_index(idx);
+
return 0;
}
@@ -251,6 +301,21 @@ int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
return rc;
}
+int destroy_xen_mappings(unsigned long s, unsigned long e)
+{
+ int rc;
+
+ ASSERT(IS_ALIGNED(s, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(e, PAGE_SIZE));
+ ASSERT(s <= e);
+
+ rc = xen_mpumap_update(virt_to_maddr(s), virt_to_maddr(e), 0);
+ if ( !rc )
+ context_sync_mpu();
+
+ return rc;
+}
+
int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
unsigned int flags)
{
--
2.34.1
On 02/07/2025 16:13, Hari Limaye wrote:
> From: Penny Zheng <Penny.Zheng@arm.com>
>
> This commit expands xen_mpumap_update/xen_mpumap_update_entry to include
> destroying an existing entry.
>
> We define a new helper "disable_mpu_region_from_index" to disable the MPU
> region based on index. If region is within [0, 31], we could quickly
> disable the MPU region through PRENR_EL2 which provides direct access to the
> PRLAR_EL2.EN bits of EL2 MPU regions.
>
> Rignt now, we only support destroying a *WHOLE* MPU memory region,
> part-region removing is not supported, as in worst case, it will
> leave two fragments behind.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
> Signed-off-by: Hari Limaye <hari.limaye@arm.com>
> ---
> Changes from v1:
> - Move check for part-region removal outside if condition
> - Use normal printk
> ---
> xen/arch/arm/include/asm/mpu.h | 2 +
> xen/arch/arm/include/asm/mpu/cpregs.h | 4 ++
> xen/arch/arm/mpu/mm.c | 69 ++++++++++++++++++++++++++-
> 3 files changed, 73 insertions(+), 2 deletions(-)
>
> diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
> index 63560c613b..5053edaf63 100644
> --- a/xen/arch/arm/include/asm/mpu.h
> +++ b/xen/arch/arm/include/asm/mpu.h
> @@ -23,6 +23,8 @@
> #define NUM_MPU_REGIONS_MASK (NUM_MPU_REGIONS - 1)
> #define MAX_MPU_REGION_NR NUM_MPU_REGIONS_MASK
>
> +#define PRENR_MASK GENMASK(31, 0)
> +
> #ifndef __ASSEMBLY__
>
> /*
> diff --git a/xen/arch/arm/include/asm/mpu/cpregs.h b/xen/arch/arm/include/asm/mpu/cpregs.h
> index bb15e02df6..9f3b32acd7 100644
> --- a/xen/arch/arm/include/asm/mpu/cpregs.h
> +++ b/xen/arch/arm/include/asm/mpu/cpregs.h
> @@ -6,6 +6,9 @@
> /* CP15 CR0: MPU Type Register */
> #define HMPUIR p15,4,c0,c0,4
>
> +/* CP15 CR6: Protection Region Enable Register */
> +#define HPRENR p15,4,c6,c1,1
> +
> /* CP15 CR6: MPU Protection Region Base/Limit/Select Address Register */
> #define HPRSELR p15,4,c6,c2,1
> #define HPRBAR p15,4,c6,c3,0
> @@ -82,6 +85,7 @@
> /* Alphabetically... */
> #define MPUIR_EL2 HMPUIR
> #define PRBAR_EL2 HPRBAR
> +#define PRENR_EL2 HPRENR
> #define PRLAR_EL2 HPRLAR
> #define PRSELR_EL2 HPRSELR
> #endif /* CONFIG_ARM_32 */
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index dd54b66901..2e88c467d5 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -185,6 +185,42 @@ static int xen_mpumap_alloc_entry(uint8_t *idx)
> return 0;
> }
>
> +/*
> + * Disable and remove an MPU region from the data structure and MPU registers.
> + *
> + * @param index Index of the MPU region to be disabled.
> + */
> +static void disable_mpu_region_from_index(uint8_t index)
> +{
> + ASSERT(spin_is_locked(&xen_mpumap_lock));
> + ASSERT(index != INVALID_REGION_IDX);
> +
> + if ( !region_is_valid(&xen_mpumap[index]) )
> + {
> + printk(XENLOG_WARNING
> + "mpu: MPU memory region[%u] is already disabled\n", index);
> + return;
> + }
> +
> + /* Zeroing the region will also zero the region enable */
> + memset(&xen_mpumap[index], 0, sizeof(pr_t));
Is it ok that for a fast case (i.e. 0-31) our representation of prbar/prlar will
be different from the HW i.e. xen_mpumap[index] is 0 vs only .en bit of prlar
being 0 in HW?
> + clear_bit(index, xen_mpumap_mask);
> +
> + /*
> + * Both Armv8-R AArch64 and AArch32 have direct access to the enable bit for
> + * MPU regions numbered from 0 to 31.
> + */
> + if ( (index & PRENR_MASK) != 0 )
> + {
> + /* Clear respective bit */
> + uint64_t val = READ_SYSREG(PRENR_EL2) & (~(1UL << index));
On AArch32 the register is 32bit, so I think you should use register_t type.
> +
> + WRITE_SYSREG(val, PRENR_EL2);
> + }
> + else
> + write_protection_region(&xen_mpumap[index], index);
> +}
> +
> /*
> * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
> * given memory range and flags, creating one if none exists.
> @@ -203,11 +239,11 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
> ASSERT(spin_is_locked(&xen_mpumap_lock));
>
> rc = mpumap_contains_region(xen_mpumap, max_mpu_regions, base, limit, &idx);
> - if ( !(rc == MPUMAP_REGION_NOTFOUND) )
> + if ( rc < 0 )
> return -EINVAL;
>
> /* We are inserting a mapping => Create new region. */
> - if ( flags & _PAGE_PRESENT )
> + if ( (flags & _PAGE_PRESENT) && (MPUMAP_REGION_NOTFOUND == rc) )
I think we need more sanity checking. What if flags has _PAGE_PRESENT but rc !=
MPUMAP_REGION_NOTFOUND, e.g. function called to modify existing entry? You will
silently return success. Maybe a similar function as for MMU is needed to
perform some sanity checks depending on the reason of the call?
> {
> rc = xen_mpumap_alloc_entry(&idx);
> if ( rc )
> @@ -218,6 +254,20 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
> write_protection_region(&xen_mpumap[idx], idx);
> }
>
> + /*
> + * Currently, we only support destroying a *WHOLE* MPU memory region.
> + * Part-region removal is not supported as in the worst case it will leave
> + * two fragments behind.
> + */
> + if ( rc == MPUMAP_REGION_INCLUSIVE )
> + {
> + printk("mpu: part-region removal is not supported\n");
You mention removal but why do you limit this place to removal only? You don't
have any checks making sure that flags is 0 at this point.
> + return -EINVAL;
> + }
> +
> + if ( !(flags & _PAGE_PRESENT) && (rc >= MPUMAP_REGION_FOUND) )
> + disable_mpu_region_from_index(idx);
> +
> return 0;
> }
>
> @@ -251,6 +301,21 @@ int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
> return rc;
> }
>
> +int destroy_xen_mappings(unsigned long s, unsigned long e)
> +{
> + int rc;
> +
> + ASSERT(IS_ALIGNED(s, PAGE_SIZE));
> + ASSERT(IS_ALIGNED(e, PAGE_SIZE));
> + ASSERT(s <= e);
> +
> + rc = xen_mpumap_update(virt_to_maddr(s), virt_to_maddr(e), 0);
> + if ( !rc )
> + context_sync_mpu();
> +
> + return rc;
> +}
> +
> int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns,
> unsigned int flags)
> {
~Michal
Hi Michal, Thank you for reviewing the patch. > > + /* Zeroing the region will also zero the region enable */ > > + memset(&xen_mpumap[index], 0, sizeof(pr_t)); > Is it ok that for a fast case (i.e. 0-31) our representation of prbar/prlar will > be different from the HW i.e. xen_mpumap[index] is 0 vs only .en bit of prlar > being 0 in HW? I think this should not matter - there is nothing reading registers directly and all the decisions are taken looking into xen_mpumap. However, if you would prefer, we could alter the logic here to only clear the .en bit in xen_mpumap[index] in the fast case, so that things remain consistent? Alternatively we could always directly zero the entirety of the registers, but then we would remove the use of the fast case entirely. Many thanks, Hari
On 11/07/2025 08:04, Hari Limaye wrote: > Hi Michal, > > Thank you for reviewing the patch. > > > >> > + /* Zeroing the region will also zero the region enable */ > >> > + memset(&xen_mpumap[index], 0, sizeof(pr_t)); > >> Is it ok that for a fast case (i.e. 0-31) our representation of prbar/prlar will > >> be different from the HW i.e. xen_mpumap[index] is 0 vs only .en bit of prlar > >> being 0 in HW? > > > > I think this should not matter - there is nothing reading registers directly and > > all the decisions are taken looking into xen_mpumap. > > > > However, if you would prefer, we could alter the logic here to only clear the .en > > bit in xen_mpumap[index] in the fast case, so that things remain consistent? > Alternatively we could always directly zero the entirety of the registers, but > then we would remove the use of the fast case entirely. I don't have a strong preference here. I think we can keep your current solution. ~Michal
© 2016 - 2025 Red Hat, Inc.