Define prepare_selector(), read_protection_region() and
write_protection_region() for arm32. Also, define
GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
Enable pr_of_addr() for arm32.
Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
---
Changes from :-
v1 - 1. Enable write_protection_region() for aarch32.
v2 - 1. Enable access to protection regions from 0 - 255.
xen/arch/arm/include/asm/mpu.h | 2 -
xen/arch/arm/mpu/arm32/Makefile | 1 +
xen/arch/arm/mpu/arm32/mm.c | 165 ++++++++++++++++++++++++++++++++
xen/arch/arm/mpu/mm.c | 2 -
4 files changed, 166 insertions(+), 4 deletions(-)
create mode 100644 xen/arch/arm/mpu/arm32/mm.c
diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
index 8f06ddac0f..63560c613b 100644
--- a/xen/arch/arm/include/asm/mpu.h
+++ b/xen/arch/arm/include/asm/mpu.h
@@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_ARM_64
/*
* Set base address of MPU protection region.
*
@@ -85,7 +84,6 @@ static inline bool region_is_valid(const pr_t *pr)
{
return pr->prlar.reg.en;
}
-#endif /* CONFIG_ARM_64 */
#endif /* __ASSEMBLY__ */
diff --git a/xen/arch/arm/mpu/arm32/Makefile b/xen/arch/arm/mpu/arm32/Makefile
index e15ce2f7be..3da872322e 100644
--- a/xen/arch/arm/mpu/arm32/Makefile
+++ b/xen/arch/arm/mpu/arm32/Makefile
@@ -1 +1,2 @@
obj-y += domain-page.o
+obj-y += mm.o
diff --git a/xen/arch/arm/mpu/arm32/mm.c b/xen/arch/arm/mpu/arm32/mm.c
new file mode 100644
index 0000000000..5d3cb6dff7
--- /dev/null
+++ b/xen/arch/arm/mpu/arm32/mm.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <xen/bug.h>
+#include <xen/types.h>
+#include <asm/mpu.h>
+#include <asm/sysregs.h>
+#include <asm/system.h>
+
+#define PRBAR_EL2_(n) HPRBAR##n
+#define PRLAR_EL2_(n) HPRLAR##n
+
+#define GENERATE_WRITE_PR_REG_CASE(num, pr) \
+ case num: \
+ { \
+ WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2_(num)); \
+ WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2_(num)); \
+ break; \
+ }
+
+#define GENERATE_WRITE_PR_REG_OTHERS(num, pr) \
+ case num: \
+ { \
+ WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, HPRBAR); \
+ WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, HPRLAR); \
+ break; \
+ }
+
+#define GENERATE_READ_PR_REG_CASE(num, pr) \
+ case num: \
+ { \
+ pr->prbar.bits = READ_SYSREG(PRBAR_EL2_(num)); \
+ pr->prlar.bits = READ_SYSREG(PRLAR_EL2_(num)); \
+ break; \
+ }
+
+#define GENERATE_READ_PR_REG_OTHERS(num, pr) \
+ case num: \
+ { \
+ pr->prbar.bits = READ_SYSREG(HPRBAR); \
+ pr->prlar.bits = READ_SYSREG(HPRLAR); \
+ break; \
+ }
+
+/*
+ * Armv8-R supports direct access and indirect access to the MPU regions through
+ * registers:
+ * - indirect access involves changing the MPU region selector, issuing an isb
+ * barrier and accessing the selected region through specific registers
+ * - direct access involves accessing specific registers that point to
+ * specific MPU regions, without changing the selector, avoiding the use of
+ * a barrier.
+ * For Arm32 the PR{B,L}AR<n>_ELx (for n=0..31) are used for direct access to the
+ * first 32 MPU regions.
+ * For MPU region numbered 32..255, one need to set the region number in PRSELR_ELx,
+ * followed by configuring PR{B,L}AR_ELx.
+ */
+inline void prepare_selector(uint8_t *sel)
+{
+ uint8_t cur_sel = *sel;
+
+ if ( cur_sel > 0x1FU )
+ {
+ WRITE_SYSREG(cur_sel, PRSELR_EL2);
+ isb();
+ }
+}
+
+void read_protection_region(pr_t *pr_read, uint8_t sel)
+{
+ prepare_selector(&sel);
+
+ switch ( sel )
+ {
+ GENERATE_READ_PR_REG_CASE(0, pr_read);
+ GENERATE_READ_PR_REG_CASE(1, pr_read);
+ GENERATE_READ_PR_REG_CASE(2, pr_read);
+ GENERATE_READ_PR_REG_CASE(3, pr_read);
+ GENERATE_READ_PR_REG_CASE(4, pr_read);
+ GENERATE_READ_PR_REG_CASE(5, pr_read);
+ GENERATE_READ_PR_REG_CASE(6, pr_read);
+ GENERATE_READ_PR_REG_CASE(7, pr_read);
+ GENERATE_READ_PR_REG_CASE(8, pr_read);
+ GENERATE_READ_PR_REG_CASE(9, pr_read);
+ GENERATE_READ_PR_REG_CASE(10, pr_read);
+ GENERATE_READ_PR_REG_CASE(11, pr_read);
+ GENERATE_READ_PR_REG_CASE(12, pr_read);
+ GENERATE_READ_PR_REG_CASE(13, pr_read);
+ GENERATE_READ_PR_REG_CASE(14, pr_read);
+ GENERATE_READ_PR_REG_CASE(15, pr_read);
+ GENERATE_READ_PR_REG_CASE(16, pr_read);
+ GENERATE_READ_PR_REG_CASE(17, pr_read);
+ GENERATE_READ_PR_REG_CASE(18, pr_read);
+ GENERATE_READ_PR_REG_CASE(19, pr_read);
+ GENERATE_READ_PR_REG_CASE(20, pr_read);
+ GENERATE_READ_PR_REG_CASE(21, pr_read);
+ GENERATE_READ_PR_REG_CASE(22, pr_read);
+ GENERATE_READ_PR_REG_CASE(23, pr_read);
+ GENERATE_READ_PR_REG_CASE(24, pr_read);
+ GENERATE_READ_PR_REG_CASE(25, pr_read);
+ GENERATE_READ_PR_REG_CASE(26, pr_read);
+ GENERATE_READ_PR_REG_CASE(27, pr_read);
+ GENERATE_READ_PR_REG_CASE(28, pr_read);
+ GENERATE_READ_PR_REG_CASE(29, pr_read);
+ GENERATE_READ_PR_REG_CASE(30, pr_read);
+ GENERATE_READ_PR_REG_CASE(31, pr_read);
+ GENERATE_READ_PR_REG_OTHERS(32 ... 255, pr_read);
+ default:
+ BUG(); /* Can't happen */
+ break;
+ }
+}
+
+void write_protection_region(const pr_t *pr_write, uint8_t sel)
+{
+ prepare_selector(&sel);
+
+ switch ( sel )
+ {
+ GENERATE_WRITE_PR_REG_CASE(0, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(1, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(2, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(3, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(4, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(5, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(6, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(7, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(8, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(9, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(10, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(11, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(12, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(13, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(14, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(15, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(16, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(17, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(18, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(19, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(20, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(21, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(22, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(23, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(24, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(25, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(26, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(27, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(28, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(29, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(30, pr_write);
+ GENERATE_WRITE_PR_REG_CASE(31, pr_write);
+ GENERATE_WRITE_PR_REG_OTHERS(32 ... 255, pr_write);
+ default:
+ BUG(); /* Can't happen */
+ break;
+ }
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 7ab68fc8c7..ccfb37a67b 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -39,7 +39,6 @@ static void __init __maybe_unused build_assertions(void)
BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
}
-#ifdef CONFIG_ARM_64
pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
{
unsigned int attr_idx = PAGE_AI_MASK(flags);
@@ -110,7 +109,6 @@ pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
return region;
}
-#endif /* CONFIG_ARM_64 */
void __init setup_mm(void)
{
--
2.25.1
Hi Ayan,
> On 11 Jun 2025, at 15:35, Ayan Kumar Halder <ayan.kumar.halder@amd.com> wrote:
>
> Define prepare_selector(), read_protection_region() and
> write_protection_region() for arm32. Also, define
> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>
> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
> Enable pr_of_addr() for arm32.
>
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
> ---
> Changes from :-
>
> v1 - 1. Enable write_protection_region() for aarch32.
>
> v2 - 1. Enable access to protection regions from 0 - 255.
>
> xen/arch/arm/include/asm/mpu.h | 2 -
> xen/arch/arm/mpu/arm32/Makefile | 1 +
> xen/arch/arm/mpu/arm32/mm.c | 165 ++++++++++++++++++++++++++++++++
> xen/arch/arm/mpu/mm.c | 2 -
> 4 files changed, 166 insertions(+), 4 deletions(-)
> create mode 100644 xen/arch/arm/mpu/arm32/mm.c
>
> diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
> index 8f06ddac0f..63560c613b 100644
> --- a/xen/arch/arm/include/asm/mpu.h
> +++ b/xen/arch/arm/include/asm/mpu.h
> @@ -25,7 +25,6 @@
>
> #ifndef __ASSEMBLY__
>
> -#ifdef CONFIG_ARM_64
> /*
> * Set base address of MPU protection region.
> *
> @@ -85,7 +84,6 @@ static inline bool region_is_valid(const pr_t *pr)
> {
> return pr->prlar.reg.en;
> }
> -#endif /* CONFIG_ARM_64 */
>
> #endif /* __ASSEMBLY__ */
>
> diff --git a/xen/arch/arm/mpu/arm32/Makefile b/xen/arch/arm/mpu/arm32/Makefile
> index e15ce2f7be..3da872322e 100644
> --- a/xen/arch/arm/mpu/arm32/Makefile
> +++ b/xen/arch/arm/mpu/arm32/Makefile
> @@ -1 +1,2 @@
> obj-y += domain-page.o
> +obj-y += mm.o
> diff --git a/xen/arch/arm/mpu/arm32/mm.c b/xen/arch/arm/mpu/arm32/mm.c
> new file mode 100644
> index 0000000000..5d3cb6dff7
> --- /dev/null
> +++ b/xen/arch/arm/mpu/arm32/mm.c
> @@ -0,0 +1,165 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <xen/bug.h>
> +#include <xen/types.h>
> +#include <asm/mpu.h>
> +#include <asm/sysregs.h>
> +#include <asm/system.h>
> +
> +#define PRBAR_EL2_(n) HPRBAR##n
> +#define PRLAR_EL2_(n) HPRLAR##n
> +
> +#define GENERATE_WRITE_PR_REG_CASE(num, pr) \
> + case num: \
> + { \
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2_(num)); \
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2_(num)); \
I was also thinking that in this file now you can use directly HPR{B,L}AR<N> instead of PR{B,L}AR<N>_EL2
Cheers,
Luca
Hi Ayan,
> On 11 Jun 2025, at 15:35, Ayan Kumar Halder <ayan.kumar.halder@amd.com> wrote:
>
> Define prepare_selector(), read_protection_region() and
> write_protection_region() for arm32. Also, define
> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>
> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
> Enable pr_of_addr() for arm32.
>
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
> ---
> Changes from :-
>
> v1 - 1. Enable write_protection_region() for aarch32.
>
> v2 - 1. Enable access to protection regions from 0 - 255.
>
> xen/arch/arm/include/asm/mpu.h | 2 -
> xen/arch/arm/mpu/arm32/Makefile | 1 +
> xen/arch/arm/mpu/arm32/mm.c | 165 ++++++++++++++++++++++++++++++++
> xen/arch/arm/mpu/mm.c | 2 -
> 4 files changed, 166 insertions(+), 4 deletions(-)
> create mode 100644 xen/arch/arm/mpu/arm32/mm.c
>
> diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
> index 8f06ddac0f..63560c613b 100644
> --- a/xen/arch/arm/include/asm/mpu.h
> +++ b/xen/arch/arm/include/asm/mpu.h
> @@ -25,7 +25,6 @@
>
> #ifndef __ASSEMBLY__
>
> -#ifdef CONFIG_ARM_64
> /*
> * Set base address of MPU protection region.
> *
> @@ -85,7 +84,6 @@ static inline bool region_is_valid(const pr_t *pr)
> {
> return pr->prlar.reg.en;
> }
> -#endif /* CONFIG_ARM_64 */
>
> #endif /* __ASSEMBLY__ */
>
> diff --git a/xen/arch/arm/mpu/arm32/Makefile b/xen/arch/arm/mpu/arm32/Makefile
> index e15ce2f7be..3da872322e 100644
> --- a/xen/arch/arm/mpu/arm32/Makefile
> +++ b/xen/arch/arm/mpu/arm32/Makefile
> @@ -1 +1,2 @@
> obj-y += domain-page.o
> +obj-y += mm.o
> diff --git a/xen/arch/arm/mpu/arm32/mm.c b/xen/arch/arm/mpu/arm32/mm.c
> new file mode 100644
> index 0000000000..5d3cb6dff7
> --- /dev/null
> +++ b/xen/arch/arm/mpu/arm32/mm.c
> @@ -0,0 +1,165 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <xen/bug.h>
> +#include <xen/types.h>
> +#include <asm/mpu.h>
> +#include <asm/sysregs.h>
> +#include <asm/system.h>
> +
> +#define PRBAR_EL2_(n) HPRBAR##n
> +#define PRLAR_EL2_(n) HPRLAR##n
> +
> +#define GENERATE_WRITE_PR_REG_CASE(num, pr) \
> + case num: \
> + { \
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2_(num)); \
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2_(num)); \
Maybe you don’t need '& ~MPU_REGION_RES0’ since your MPU_REGION_RES0 is zero, here and below
> + break; \
> + }
> +
> +#define GENERATE_WRITE_PR_REG_OTHERS(num, pr) \
> + case num: \
> + { \
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, HPRBAR); \
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, HPRLAR); \
> + break; \
> + }
> +
> +#define GENERATE_READ_PR_REG_CASE(num, pr) \
> + case num: \
> + { \
> + pr->prbar.bits = READ_SYSREG(PRBAR_EL2_(num)); \
> + pr->prlar.bits = READ_SYSREG(PRLAR_EL2_(num)); \
> + break; \
> + }
> +
> +#define GENERATE_READ_PR_REG_OTHERS(num, pr) \
> + case num: \
> + { \
> + pr->prbar.bits = READ_SYSREG(HPRBAR); \
> + pr->prlar.bits = READ_SYSREG(HPRLAR); \
> + break; \
> + }
> +
> +/*
> + * Armv8-R supports direct access and indirect access to the MPU regions through
> + * registers:
> + * - indirect access involves changing the MPU region selector, issuing an isb
> + * barrier and accessing the selected region through specific registers
> + * - direct access involves accessing specific registers that point to
> + * specific MPU regions, without changing the selector, avoiding the use of
> + * a barrier.
> + * For Arm32 the PR{B,L}AR<n>_ELx (for n=0..31) are used for direct access to the
Arm32 have PR{B,L}AR but it’s not EL2, you mean HPRBAR here and below
> + * first 32 MPU regions.
> + * For MPU region numbered 32..255, one need to set the region number in PRSELR_ELx,
32..254
and also maybe you can use HPRSELR instead of PRSELR_ELx
> + * followed by configuring PR{B,L}AR_ELx.
> + */
> +inline void prepare_selector(uint8_t *sel)
> +{
> + uint8_t cur_sel = *sel;
> +
> + if ( cur_sel > 0x1FU )
can we use 31 here? instead of the hex? It would be quicker to be read by a developer.
> + {
> + WRITE_SYSREG(cur_sel, PRSELR_EL2);
> + isb();
> + }
> +}
> +
> +void read_protection_region(pr_t *pr_read, uint8_t sel)
> +{
> + prepare_selector(&sel);
> +
> + switch ( sel )
> + {
> + GENERATE_READ_PR_REG_CASE(0, pr_read);
> + GENERATE_READ_PR_REG_CASE(1, pr_read);
> + GENERATE_READ_PR_REG_CASE(2, pr_read);
> + GENERATE_READ_PR_REG_CASE(3, pr_read);
> + GENERATE_READ_PR_REG_CASE(4, pr_read);
> + GENERATE_READ_PR_REG_CASE(5, pr_read);
> + GENERATE_READ_PR_REG_CASE(6, pr_read);
> + GENERATE_READ_PR_REG_CASE(7, pr_read);
> + GENERATE_READ_PR_REG_CASE(8, pr_read);
> + GENERATE_READ_PR_REG_CASE(9, pr_read);
> + GENERATE_READ_PR_REG_CASE(10, pr_read);
> + GENERATE_READ_PR_REG_CASE(11, pr_read);
> + GENERATE_READ_PR_REG_CASE(12, pr_read);
> + GENERATE_READ_PR_REG_CASE(13, pr_read);
> + GENERATE_READ_PR_REG_CASE(14, pr_read);
> + GENERATE_READ_PR_REG_CASE(15, pr_read);
> + GENERATE_READ_PR_REG_CASE(16, pr_read);
> + GENERATE_READ_PR_REG_CASE(17, pr_read);
> + GENERATE_READ_PR_REG_CASE(18, pr_read);
> + GENERATE_READ_PR_REG_CASE(19, pr_read);
> + GENERATE_READ_PR_REG_CASE(20, pr_read);
> + GENERATE_READ_PR_REG_CASE(21, pr_read);
> + GENERATE_READ_PR_REG_CASE(22, pr_read);
> + GENERATE_READ_PR_REG_CASE(23, pr_read);
> + GENERATE_READ_PR_REG_CASE(24, pr_read);
> + GENERATE_READ_PR_REG_CASE(25, pr_read);
> + GENERATE_READ_PR_REG_CASE(26, pr_read);
> + GENERATE_READ_PR_REG_CASE(27, pr_read);
> + GENERATE_READ_PR_REG_CASE(28, pr_read);
> + GENERATE_READ_PR_REG_CASE(29, pr_read);
> + GENERATE_READ_PR_REG_CASE(30, pr_read);
> + GENERATE_READ_PR_REG_CASE(31, pr_read);
> + GENERATE_READ_PR_REG_OTHERS(32 ... 255, pr_read);
32 … 254 here and in the below.
> + default:
> + BUG(); /* Can't happen */
> + break;
> + }
> +}
> +
> +void write_protection_region(const pr_t *pr_write, uint8_t sel)
> +{
> + prepare_selector(&sel);
> +
> + switch ( sel )
> + {
> + GENERATE_WRITE_PR_REG_CASE(0, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(1, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(2, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(3, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(4, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(5, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(6, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(7, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(8, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(9, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(10, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(11, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(12, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(13, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(14, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(15, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(16, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(17, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(18, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(19, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(20, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(21, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(22, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(23, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(24, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(25, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(26, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(27, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(28, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(29, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(30, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(31, pr_write);
> + GENERATE_WRITE_PR_REG_OTHERS(32 ... 255, pr_write);
> + default:
> + BUG(); /* Can't happen */
> + break;
> + }
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 7ab68fc8c7..ccfb37a67b 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -39,7 +39,6 @@ static void __init __maybe_unused build_assertions(void)
> BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
> }
>
> -#ifdef CONFIG_ARM_64
> pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
> {
> unsigned int attr_idx = PAGE_AI_MASK(flags);
> @@ -110,7 +109,6 @@ pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
>
> return region;
> }
> -#endif /* CONFIG_ARM_64 */
>
> void __init setup_mm(void)
> {
> --
> 2.25.1
>
>
Cheers,
Luca
On 11/06/2025 15:35, Ayan Kumar Halder wrote:
> Define prepare_selector(), read_protection_region() and
> write_protection_region() for arm32. Also, define
> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>
> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
> Enable pr_of_addr() for arm32.
>
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
> ---
> Changes from :-
>
> v1 - 1. Enable write_protection_region() for aarch32.
>
> v2 - 1. Enable access to protection regions from 0 - 255.
>
> xen/arch/arm/include/asm/mpu.h | 2 -
> xen/arch/arm/mpu/arm32/Makefile | 1 +
> xen/arch/arm/mpu/arm32/mm.c | 165 ++++++++++++++++++++++++++++++++
> xen/arch/arm/mpu/mm.c | 2 -
> 4 files changed, 166 insertions(+), 4 deletions(-)
> create mode 100644 xen/arch/arm/mpu/arm32/mm.c
>
> diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h
> index 8f06ddac0f..63560c613b 100644
> --- a/xen/arch/arm/include/asm/mpu.h
> +++ b/xen/arch/arm/include/asm/mpu.h
> @@ -25,7 +25,6 @@
>
> #ifndef __ASSEMBLY__
>
> -#ifdef CONFIG_ARM_64
> /*
> * Set base address of MPU protection region.
> *
> @@ -85,7 +84,6 @@ static inline bool region_is_valid(const pr_t *pr)
> {
> return pr->prlar.reg.en;
> }
> -#endif /* CONFIG_ARM_64 */
>
> #endif /* __ASSEMBLY__ */
>
> diff --git a/xen/arch/arm/mpu/arm32/Makefile b/xen/arch/arm/mpu/arm32/Makefile
> index e15ce2f7be..3da872322e 100644
> --- a/xen/arch/arm/mpu/arm32/Makefile
> +++ b/xen/arch/arm/mpu/arm32/Makefile
> @@ -1 +1,2 @@
> obj-y += domain-page.o
> +obj-y += mm.o
> diff --git a/xen/arch/arm/mpu/arm32/mm.c b/xen/arch/arm/mpu/arm32/mm.c
> new file mode 100644
> index 0000000000..5d3cb6dff7
> --- /dev/null
> +++ b/xen/arch/arm/mpu/arm32/mm.c
> @@ -0,0 +1,165 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <xen/bug.h>
> +#include <xen/types.h>
> +#include <asm/mpu.h>
> +#include <asm/sysregs.h>
> +#include <asm/system.h>
> +
> +#define PRBAR_EL2_(n) HPRBAR##n
> +#define PRLAR_EL2_(n) HPRLAR##n
> +
> +#define GENERATE_WRITE_PR_REG_CASE(num, pr) \
> + case num: \
> + { \
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2_(num)); \
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2_(num)); \
> + break; \
> + }
> +
> +#define GENERATE_WRITE_PR_REG_OTHERS(num, pr) \
> + case num: \
> + { \
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, HPRBAR); \
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, HPRLAR); \
> + break; \
> + }
> +
> +#define GENERATE_READ_PR_REG_CASE(num, pr) \
> + case num: \
> + { \
> + pr->prbar.bits = READ_SYSREG(PRBAR_EL2_(num)); \
> + pr->prlar.bits = READ_SYSREG(PRLAR_EL2_(num)); \
> + break; \
> + }
> +
> +#define GENERATE_READ_PR_REG_OTHERS(num, pr) \
> + case num: \
> + { \
> + pr->prbar.bits = READ_SYSREG(HPRBAR); \
> + pr->prlar.bits = READ_SYSREG(HPRLAR); \
> + break; \
> + }
> +
> +/*
> + * Armv8-R supports direct access and indirect access to the MPU regions through
> + * registers:
> + * - indirect access involves changing the MPU region selector, issuing an isb
> + * barrier and accessing the selected region through specific registers
> + * - direct access involves accessing specific registers that point to
> + * specific MPU regions, without changing the selector, avoiding the use of
> + * a barrier.
> + * For Arm32 the PR{B,L}AR<n>_ELx (for n=0..31) are used for direct access to the
> + * first 32 MPU regions.
> + * For MPU region numbered 32..255, one need to set the region number in PRSELR_ELx,
> + * followed by configuring PR{B,L}AR_ELx.
> + */
> +inline void prepare_selector(uint8_t *sel)
> +{
> + uint8_t cur_sel = *sel;
> +
> + if ( cur_sel > 0x1FU )
> + {
> + WRITE_SYSREG(cur_sel, PRSELR_EL2);
> + isb();
> + }
> +}
> +
> +void read_protection_region(pr_t *pr_read, uint8_t sel)
> +{
> + prepare_selector(&sel);
> +
> + switch ( sel )
> + {
> + GENERATE_READ_PR_REG_CASE(0, pr_read);
> + GENERATE_READ_PR_REG_CASE(1, pr_read);
> + GENERATE_READ_PR_REG_CASE(2, pr_read);
> + GENERATE_READ_PR_REG_CASE(3, pr_read);
> + GENERATE_READ_PR_REG_CASE(4, pr_read);
> + GENERATE_READ_PR_REG_CASE(5, pr_read);
> + GENERATE_READ_PR_REG_CASE(6, pr_read);
> + GENERATE_READ_PR_REG_CASE(7, pr_read);
> + GENERATE_READ_PR_REG_CASE(8, pr_read);
> + GENERATE_READ_PR_REG_CASE(9, pr_read);
> + GENERATE_READ_PR_REG_CASE(10, pr_read);
> + GENERATE_READ_PR_REG_CASE(11, pr_read);
> + GENERATE_READ_PR_REG_CASE(12, pr_read);
> + GENERATE_READ_PR_REG_CASE(13, pr_read);
> + GENERATE_READ_PR_REG_CASE(14, pr_read);
> + GENERATE_READ_PR_REG_CASE(15, pr_read);
> + GENERATE_READ_PR_REG_CASE(16, pr_read);
> + GENERATE_READ_PR_REG_CASE(17, pr_read);
> + GENERATE_READ_PR_REG_CASE(18, pr_read);
> + GENERATE_READ_PR_REG_CASE(19, pr_read);
> + GENERATE_READ_PR_REG_CASE(20, pr_read);
> + GENERATE_READ_PR_REG_CASE(21, pr_read);
> + GENERATE_READ_PR_REG_CASE(22, pr_read);
> + GENERATE_READ_PR_REG_CASE(23, pr_read);
> + GENERATE_READ_PR_REG_CASE(24, pr_read);
> + GENERATE_READ_PR_REG_CASE(25, pr_read);
> + GENERATE_READ_PR_REG_CASE(26, pr_read);
> + GENERATE_READ_PR_REG_CASE(27, pr_read);
> + GENERATE_READ_PR_REG_CASE(28, pr_read);
> + GENERATE_READ_PR_REG_CASE(29, pr_read);
> + GENERATE_READ_PR_REG_CASE(30, pr_read);
> + GENERATE_READ_PR_REG_CASE(31, pr_read);
> + GENERATE_READ_PR_REG_OTHERS(32 ... 255, pr_read);
This should be 32 ... 254 . Thanks Luca for pointing this out.
The max number of regions supported is 255 (not 256). It is the maximum
value of HMPUIR.
- Ayan
> + default:
> + BUG(); /* Can't happen */
> + break;
> + }
> +}
> +
> +void write_protection_region(const pr_t *pr_write, uint8_t sel)
> +{
> + prepare_selector(&sel);
> +
> + switch ( sel )
> + {
> + GENERATE_WRITE_PR_REG_CASE(0, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(1, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(2, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(3, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(4, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(5, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(6, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(7, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(8, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(9, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(10, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(11, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(12, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(13, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(14, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(15, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(16, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(17, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(18, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(19, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(20, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(21, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(22, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(23, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(24, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(25, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(26, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(27, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(28, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(29, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(30, pr_write);
> + GENERATE_WRITE_PR_REG_CASE(31, pr_write);
> + GENERATE_WRITE_PR_REG_OTHERS(32 ... 255, pr_write);
> + default:
> + BUG(); /* Can't happen */
> + break;
> + }
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 7ab68fc8c7..ccfb37a67b 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -39,7 +39,6 @@ static void __init __maybe_unused build_assertions(void)
> BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
> }
>
> -#ifdef CONFIG_ARM_64
> pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
> {
> unsigned int attr_idx = PAGE_AI_MASK(flags);
> @@ -110,7 +109,6 @@ pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags)
>
> return region;
> }
> -#endif /* CONFIG_ARM_64 */
>
> void __init setup_mm(void)
> {
Hi Ayan,
> On 11 Jun 2025, at 15:35, Ayan Kumar Halder <ayan.kumar.halder@amd.com> wrote:
>
> Define prepare_selector(), read_protection_region() and
> write_protection_region() for arm32. Also, define
> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>
> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
> Enable pr_of_addr() for arm32.
>
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
> ---
Based on your v2 (https://patchwork.kernel.org/project/xen-devel/patch/20250606164854.1551148-4-ayan.kumar.halder@amd.com/) I was imaging something like this:
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 74e96ca57137..5d324b2d4ca5 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -87,20 +87,28 @@ static void __init __maybe_unused build_assertions(void)
*/
static void prepare_selector(uint8_t *sel)
{
-#ifdef CONFIG_ARM_64
uint8_t cur_sel = *sel;
+#ifdef CONFIG_ARM_64
/*
- * {read,write}_protection_region works using the direct access to the 0..15
- * regions, so in order to save the isb() overhead, change the PRSELR_EL2
- * only when needed, so when the upper 4 bits of the selector will change.
+ * {read,write}_protection_region works using the Arm64 direct access to the
+ * 0..15 regions, so in order to save the isb() overhead, change the
+ * PRSELR_EL2 only when needed, so when the upper 4 bits of the selector
+ * will change.
*/
cur_sel &= 0xF0U;
+#else
+ /* Arm32 MPU can use direct access for 0-31 */
+ if ( cur_sel > 31 )
+ cur_sel = 0;
+#endif
if ( READ_SYSREG(PRSELR_EL2) != cur_sel )
{
WRITE_SYSREG(cur_sel, PRSELR_EL2);
isb();
}
+
+#ifdef CONFIG_ARM_64
*sel = *sel & 0xFU;
#endif
}
@@ -144,6 +152,12 @@ void read_protection_region(pr_t *pr_read, uint8_t sel)
GENERATE_READ_PR_REG_CASE(29, pr_read);
GENERATE_READ_PR_REG_CASE(30, pr_read);
GENERATE_READ_PR_REG_CASE(31, pr_read);
+ case 32 ... 255:
+ {
+ pr->prbar.bits = READ_SYSREG(PRBAR_EL2);
+ pr->prlar.bits = READ_SYSREG(PRLAR_EL2);
+ break;
+ }
#endif
default:
BUG(); /* Can't happen */
@@ -190,6 +204,12 @@ void write_protection_region(const pr_t *pr_write, uint8_t sel)
GENERATE_WRITE_PR_REG_CASE(29, pr_write);
GENERATE_WRITE_PR_REG_CASE(30, pr_write);
GENERATE_WRITE_PR_REG_CASE(31, pr_write);
+ case 32 ... 255:
+ {
+ WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2);
+ WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2);
+ break;
+ }
#endif
default:
BUG(); /* Can't happen */
Is it using too ifdefs in your opinion that would benefit the split you do in v3?
On 12/06/2025 10:35, Luca Fancellu wrote:
> Hi Ayan,
Hi Luca,
>
>> On 11 Jun 2025, at 15:35, Ayan Kumar Halder <ayan.kumar.halder@amd.com> wrote:
>>
>> Define prepare_selector(), read_protection_region() and
>> write_protection_region() for arm32. Also, define
>> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>>
>> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
>> Enable pr_of_addr() for arm32.
>>
>> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
>> ---
> Based on your v2 (https://patchwork.kernel.org/project/xen-devel/patch/20250606164854.1551148-4-ayan.kumar.halder@amd.com/) I was imaging something like this:
>
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 74e96ca57137..5d324b2d4ca5 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -87,20 +87,28 @@ static void __init __maybe_unused build_assertions(void)
> */
> static void prepare_selector(uint8_t *sel)
> {
> -#ifdef CONFIG_ARM_64
> uint8_t cur_sel = *sel;
>
> +#ifdef CONFIG_ARM_64
> /*
> - * {read,write}_protection_region works using the direct access to the 0..15
> - * regions, so in order to save the isb() overhead, change the PRSELR_EL2
> - * only when needed, so when the upper 4 bits of the selector will change.
> + * {read,write}_protection_region works using the Arm64 direct access to the
> + * 0..15 regions, so in order to save the isb() overhead, change the
> + * PRSELR_EL2 only when needed, so when the upper 4 bits of the selector
> + * will change.
> */
> cur_sel &= 0xF0U;
> +#else
> + /* Arm32 MPU can use direct access for 0-31 */
> + if ( cur_sel > 31 )
> + cur_sel = 0;
> +#endif
> if ( READ_SYSREG(PRSELR_EL2) != cur_sel )
> {
> WRITE_SYSREG(cur_sel, PRSELR_EL2);
> isb();
> }
> +
> +#ifdef CONFIG_ARM_64
> *sel = *sel & 0xFU;
> #endif
> }
> @@ -144,6 +152,12 @@ void read_protection_region(pr_t *pr_read, uint8_t sel)
> GENERATE_READ_PR_REG_CASE(29, pr_read);
> GENERATE_READ_PR_REG_CASE(30, pr_read);
> GENERATE_READ_PR_REG_CASE(31, pr_read);
> + case 32 ... 255:
> + {
> + pr->prbar.bits = READ_SYSREG(PRBAR_EL2);
> + pr->prlar.bits = READ_SYSREG(PRLAR_EL2);
> + break;
> + }
> #endif
> default:
> BUG(); /* Can't happen */
> @@ -190,6 +204,12 @@ void write_protection_region(const pr_t *pr_write, uint8_t sel)
> GENERATE_WRITE_PR_REG_CASE(29, pr_write);
> GENERATE_WRITE_PR_REG_CASE(30, pr_write);
> GENERATE_WRITE_PR_REG_CASE(31, pr_write);
> + case 32 ... 255:
> + {
> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2);
> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2);
> + break;
> + }
> #endif
> default:
> BUG(); /* Can't happen */
>
>
> Is it using too ifdefs in your opinion that would benefit the split you do in v3?
Yes. However, I understand that this is subjective. I need your and
Michal/Julien to have an opinion here whether to go with the split
(which means some amount of code duplication) or introduce if-defs. I
will be happy to proceed as per your opinions.
- Ayan
>
>
On 12/06/2025 12:37, Ayan Kumar Halder wrote:
>
> On 12/06/2025 10:35, Luca Fancellu wrote:
>> Hi Ayan,
> Hi Luca,
>>
>>> On 11 Jun 2025, at 15:35, Ayan Kumar Halder <ayan.kumar.halder@amd.com> wrote:
>>>
>>> Define prepare_selector(), read_protection_region() and
>>> write_protection_region() for arm32. Also, define
>>> GENERATE_{READ/WRITE}_PR_REG_OTHERS to access MPU regions from 32 to 255.
>>>
>>> Enable pr_{get/set}_{base/limit}(), region_is_valid() for arm32.
>>> Enable pr_of_addr() for arm32.
>>>
>>> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
>>> ---
>> Based on your v2 (https://patchwork.kernel.org/project/xen-devel/patch/20250606164854.1551148-4-ayan.kumar.halder@amd.com/) I was imaging something like this:
>>
>> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
>> index 74e96ca57137..5d324b2d4ca5 100644
>> --- a/xen/arch/arm/mpu/mm.c
>> +++ b/xen/arch/arm/mpu/mm.c
>> @@ -87,20 +87,28 @@ static void __init __maybe_unused build_assertions(void)
>> */
>> static void prepare_selector(uint8_t *sel)
>> {
>> -#ifdef CONFIG_ARM_64
>> uint8_t cur_sel = *sel;
>>
>> +#ifdef CONFIG_ARM_64
>> /*
>> - * {read,write}_protection_region works using the direct access to the 0..15
>> - * regions, so in order to save the isb() overhead, change the PRSELR_EL2
>> - * only when needed, so when the upper 4 bits of the selector will change.
>> + * {read,write}_protection_region works using the Arm64 direct access to the
>> + * 0..15 regions, so in order to save the isb() overhead, change the
>> + * PRSELR_EL2 only when needed, so when the upper 4 bits of the selector
>> + * will change.
>> */
>> cur_sel &= 0xF0U;
>> +#else
>> + /* Arm32 MPU can use direct access for 0-31 */
>> + if ( cur_sel > 31 )
>> + cur_sel = 0;
>> +#endif
>> if ( READ_SYSREG(PRSELR_EL2) != cur_sel )
>> {
>> WRITE_SYSREG(cur_sel, PRSELR_EL2);
>> isb();
>> }
>> +
>> +#ifdef CONFIG_ARM_64
>> *sel = *sel & 0xFU;
>> #endif
>> }
>> @@ -144,6 +152,12 @@ void read_protection_region(pr_t *pr_read, uint8_t sel)
>> GENERATE_READ_PR_REG_CASE(29, pr_read);
>> GENERATE_READ_PR_REG_CASE(30, pr_read);
>> GENERATE_READ_PR_REG_CASE(31, pr_read);
>> + case 32 ... 255:
>> + {
>> + pr->prbar.bits = READ_SYSREG(PRBAR_EL2);
>> + pr->prlar.bits = READ_SYSREG(PRLAR_EL2);
>> + break;
>> + }
>> #endif
>> default:
>> BUG(); /* Can't happen */
>> @@ -190,6 +204,12 @@ void write_protection_region(const pr_t *pr_write, uint8_t sel)
>> GENERATE_WRITE_PR_REG_CASE(29, pr_write);
>> GENERATE_WRITE_PR_REG_CASE(30, pr_write);
>> GENERATE_WRITE_PR_REG_CASE(31, pr_write);
>> + case 32 ... 255:
>> + {
>> + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2);
>> + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2);
>> + break;
>> + }
>> #endif
>> default:
>> BUG(); /* Can't happen */
>>
>>
>> Is it using too ifdefs in your opinion that would benefit the split you do in v3?
>
> Yes. However, I understand that this is subjective. I need your and
> Michal/Julien to have an opinion here whether to go with the split
> (which means some amount of code duplication) or introduce if-defs. I
> will be happy to proceed as per your opinions.
I don't have a strong opinion here. Maybe I slightly prefer the split to avoid
ifdefery.
~Michal
© 2016 - 2026 Red Hat, Inc.