ARMv8.4 adds the mandatory FEAT_TLBIRANGE. It provides TLBI
maintenance instructions that apply to a range of input addresses.
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
---
target/arm/cpu.h | 5 +
target/arm/helper.c | 296 ++++++++++++++++++++
2 files changed, 301 insertions(+)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 616b39325347..5802798c3069 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4071,6 +4071,11 @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
}
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+}
+
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 9b1b98705f91..cb10851efda8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4759,6 +4759,219 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMMMUIdxBit_SE3, bits);
}
+#ifdef TARGET_AARCH64
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
+ uint64_t value)
+{
+ unsigned int page_shift;
+ unsigned int page_size_granule;
+ uint64_t num;
+ uint64_t scale;
+ uint64_t exponent;
+ uint64_t length;
+
+ num = extract64(value, 39, 4);
+ scale = extract64(value, 44, 2);
+ page_size_granule = extract64(value, 46, 2);
+
+ page_shift = page_size_granule * 2 + 10;
+
+ if (page_size_granule == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+ page_size_granule);
+ return 0;
+ }
+
+ exponent = (5 * scale) + 1;
+ length = (num + 1) << (exponent + page_shift);
+
+ return length;
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL1&0.
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ ARMMMUIdx mmu_idx;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ mask = vae1_tlbmask(env);
+ mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
+ if (regime_has_2_ranges(mmu_idx)) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, mmu_idx, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length, mask,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+ * shareable specific flushes.
+ */
+ ARMMMUIdx mmu_idx;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ mask = vae1_tlbmask(env);
+ mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
+ if (regime_has_2_ranges(mmu_idx)) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, mmu_idx, pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL2.
+ * Currently handles all of RVAE2 and RVALE2,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ bool secure;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ secure = arm_is_secure_below_el3(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
+ bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
+ pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length, mask,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer shareable specific flushes.
+ */
+ bool secure;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ secure = arm_is_secure_below_el3(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
+ bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
+ pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3.
+ * Currently handles all of RVAE3 and RVALE3,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length,
+ ARMMMUIdxBit_SE3,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length,
+ ARMMMUIdxBit_SE3,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer specific flushes.
+ */
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length,
+ ARMMMUIdxBit_SE3,
+ bits);
+}
+#endif
+
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6920,6 +7133,86 @@ static const ARMCPRegInfo pauth_reginfo[] = {
REGINFO_SENTINEL
};
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ REGINFO_SENTINEL
+};
+
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
{
Error *err = NULL;
@@ -8289,6 +8582,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rndr, cpu)) {
define_arm_cp_regs(cpu, rndr_reginfo);
}
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
+ }
#ifndef CONFIG_USER_ONLY
/* Data Cache clean instructions up to PoP */
if (cpu_isar_feature(aa64_dcpop, cpu)) {
--
2.26.2
On 5/4/21 8:04 PM, Rebecca Cran wrote:
> +static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
> + uint64_t value)
> +{
> + unsigned int page_shift;
> + unsigned int page_size_granule;
> + uint64_t num;
> + uint64_t scale;
> + uint64_t exponent;
> + uint64_t length;
> +
> + num = extract64(value, 39, 4);
> + scale = extract64(value, 44, 2);
> + page_size_granule = extract64(value, 46, 2);
> +
> + page_shift = page_size_granule * 2 + 10;
Should be + 12, for the sequence 12, 14, 16 (4k, 16k, 64k).
> +static void tlbi_aa64_rvae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
> + uint64_t value)
> +{
> + /*
> + * Invalidate by VA range, EL1&0.
> + * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
> + * since we don't support flush-for-specific-ASID-only or
> + * flush-last-level-only.
> + */
> + ARMMMUIdx mmu_idx;
> + int mask;
> + int bits;
> + uint64_t pageaddr;
> + uint64_t length;
> +
> + CPUState *cs = env_cpu(env);
> + mask = vae1_tlbmask(env);
> + mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
> + if (regime_has_2_ranges(mmu_idx)) {
> + pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
> + } else {
> + pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
> + }
Let's extract the base address via a helper as well. Add
/* TODO: ARMv8.7 FEAT_LPA2 */
as that will change the extracted base address.
I think there's enough replicated between these functions to want a common
function. Something like
static void do_rvae_write(CPUARMState *env, uint64_t value,
int idxmap, bool synced)
{
ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
bool two_ranges = regime_has_2_ranges(one_idx);
uint64_t baseaddr, length;
int bits;
baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
length = tlb_aa64_range_get_length(env, value);
bits = tlbbits_for_regime(env, one_idx, baseaddr);
if (synced) {
tlb_flush_range_by_mmuidx_all_cpus_synced(...);
} else {
tlb_flush_range_by_mmuidx(...);
}
}
static void tlbi_aa64_rvae1_write(...)
{
do_rvae_write(env, value, vae1_tlbmask(env),
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae1is_write(...)
{
do_rvae_write(env, value, vae1_tlbmask(env), true);
}
static int vae2_tlbmask(CPUARMState *env)
{
return (arm_is_secure_below_el3(env)
? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
}
static void tlbi_aa64_rvae2_write(...)
{
do_rvae_write(env, value, vae2_tlbmask(env),
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae2is_write(...)
{
do_rvae_write(env, value, vae2_tlbmask(env), true);
}
static void tlbi_aa64_rvae3_write(...)
{
do_rvae_write(env, value, ARMMMUIdxBit_SE3,
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(...)
{
do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
}
r~
© 2016 - 2026 Red Hat, Inc.