HCR_EL2.TID2 mandates that access from EL1 to CTR_EL0, CCSIDR_EL1,
CCSIDR2_EL1, CLIDR_EL1, CSSELR_EL1 are trapped to EL2, and QEMU
completely ignores it, making it impossible for hypervisors to
virtualize the cache hierarchy.
Do the right thing by trapping to EL2 if HCR_EL2.TID2 is set.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
target/arm/helper.c | 31 +++++++++++++++++++++++++++----
1 file changed, 27 insertions(+), 4 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 0bf8f53d4b..1e546096b8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1910,6 +1910,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
raw_write(env, ri, value);
}
+static CPAccessResult access_aa64_tid2(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
@@ -2110,10 +2121,14 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.writefn = pmintenclr_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
- .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
+ .access = PL1_R,
+ .accessfn = access_aa64_tid2,
+ .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
{ .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
- .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
+ .access = PL1_RW,
+ .accessfn = access_aa64_tid2,
+ .writefn = csselr_write, .resetvalue = 0,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
offsetof(CPUARMState, cp15.csselr_ns) } },
/* Auxiliary ID register: this actually has an IMPDEF value but for now
@@ -5204,6 +5219,11 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
return CP_ACCESS_TRAP;
}
+
+ if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
return CP_ACCESS_OK;
}
@@ -6184,7 +6204,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid2,
+ .resetvalue = cpu->clidr
};
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
@@ -6717,7 +6739,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
/* These are common to v8 and pre-v8 */
{ .name = "CTR",
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ .access = PL1_R, .accessfn = ctr_el0_access,
+ .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
{ .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
.access = PL0_R, .accessfn = ctr_el0_access,
--
2.20.1
On Sun, Dec 01, 2019 at 12:20:14PM +0000, Marc Zyngier wrote:
> HCR_EL2.TID2 mandates that access from EL1 to CTR_EL0, CCSIDR_EL1,
> CCSIDR2_EL1, CLIDR_EL1, CSSELR_EL1 are trapped to EL2, and QEMU
> completely ignores it, making it impossible for hypervisors to
> virtualize the cache hierarchy.
>
> Do the right thing by trapping to EL2 if HCR_EL2.TID2 is set.
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> target/arm/helper.c | 31 +++++++++++++++++++++++++++----
> 1 file changed, 27 insertions(+), 4 deletions(-)
>
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 0bf8f53d4b..1e546096b8 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -1910,6 +1910,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
> raw_write(env, ri, value);
> }
>
> +static CPAccessResult access_aa64_tid2(CPUARMState *env,
> + const ARMCPRegInfo *ri,
> + bool isread)
> +{
> + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
> + return CP_ACCESS_TRAP_EL2;
> + }
> +
> + return CP_ACCESS_OK;
> +}
> +
> static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
> {
> ARMCPU *cpu = env_archcpu(env);
> @@ -2110,10 +2121,14 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
> .writefn = pmintenclr_write },
> { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
> .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
> - .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
> + .access = PL1_R,
> + .accessfn = access_aa64_tid2,
> + .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
> { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
> .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
> - .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
> + .access = PL1_RW,
> + .accessfn = access_aa64_tid2,
> + .writefn = csselr_write, .resetvalue = 0,
> .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
> offsetof(CPUARMState, cp15.csselr_ns) } },
> /* Auxiliary ID register: this actually has an IMPDEF value but for now
> @@ -5204,6 +5219,11 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
> if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
> return CP_ACCESS_TRAP;
> }
> +
> + if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
> + return CP_ACCESS_TRAP_EL2;
> + }
> +
> return CP_ACCESS_OK;
> }
>
> @@ -6184,7 +6204,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
> ARMCPRegInfo clidr = {
> .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
> .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
> - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
> + .access = PL1_R, .type = ARM_CP_CONST,
> + .accessfn = access_aa64_tid2,
> + .resetvalue = cpu->clidr
> };
> define_one_arm_cp_reg(cpu, &clidr);
> define_arm_cp_regs(cpu, v7_cp_reginfo);
> @@ -6717,7 +6739,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
> /* These are common to v8 and pre-v8 */
> { .name = "CTR",
> .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
> - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
> + .access = PL1_R, .accessfn = ctr_el0_access,
> + .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
> { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
> .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
> .access = PL0_R, .accessfn = ctr_el0_access,
> --
> 2.20.1
>
>
On 12/1/19 12:20 PM, Marc Zyngier wrote: > HCR_EL2.TID2 mandates that access from EL1 to CTR_EL0, CCSIDR_EL1, > CCSIDR2_EL1, CLIDR_EL1, CSSELR_EL1 are trapped to EL2, and QEMU > completely ignores it, making it impossible for hypervisors to > virtualize the cache hierarchy. > > Do the right thing by trapping to EL2 if HCR_EL2.TID2 is set. > > Signed-off-by: Marc Zyngier <maz@kernel.org> > --- > target/arm/helper.c | 31 +++++++++++++++++++++++++++---- > 1 file changed, 27 insertions(+), 4 deletions(-) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> r~
© 2016 - 2025 Red Hat, Inc.