According to ARM ARM, section "Memory region tagging types", tag-store
instructions targeting canonically tagged regions cause a stage 1
permission fault with MTX enabled.
Signed-off-by: Gabriel Brookman <brookmangabriel@gmail.com>
---
target/arm/tcg/mte_helper.c | 69 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 07797aecf9..ddf4ffc51b 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -227,6 +227,20 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
#endif
}
+static void canonical_tag_write_fail(CPUARMState *env,
+ uint64_t dirty_ptr, uintptr_t ra)
+{
+ uint64_t syn;
+
+ env->exception.vaddress = dirty_ptr;
+
+ syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, 1, 0);
+ syn |= BIT_ULL(42); /* TnD is bit 42 */
+
+ raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
+ g_assert_not_reached();
+}
+
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
uint64_t ptr, MMUAccessType ptr_access,
int ptr_size, MMUAccessType tag_access,
@@ -372,7 +386,11 @@ static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
/* Store if page supports tags. */
if (mem) {
store1(ptr, mem, allocation_tag_from_addr(xt));
+ } else if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ canonical_tag_write_fail(env, ptr, ra);
+ return;
}
+
}
void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
@@ -389,9 +407,19 @@ void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
{
int mmu_idx = arm_env_mmu_index(env);
uintptr_t ra = GETPC();
+ uint8_t *mem;
check_tag_aligned(env, ptr, ra);
probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
+
+ /* If we are storing to a canonically tagged memory region, fault. */
+ if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE,
+ TAG_GRANULE, MMU_DATA_STORE, true, ra);
+ if (!mem) {
+ canonical_tag_write_fail(env, ptr, ra);
+ }
+ }
}
static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
@@ -415,6 +443,11 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
MMU_DATA_STORE, TAG_GRANULE,
MMU_DATA_STORE, ra);
+ if (!(mem1 && mem2) && canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ canonical_tag_write_fail(env, ptr, ra);
+ return;
+ }
+
/* Store if page(s) support tags. */
if (mem1) {
store1(TAG_GRANULE, mem1, tag);
@@ -426,9 +459,14 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
/* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
2 * TAG_GRANULE, MMU_DATA_STORE, ra);
+
if (mem1) {
tag |= tag << 4;
qatomic_set(mem1, tag);
+ } else if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ /* Writing tags to canonically tagged memory region: faults */
+ canonical_tag_write_fail(env, ptr, ra);
+ return;
}
}
}
@@ -448,6 +486,7 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
int mmu_idx = arm_env_mmu_index(env);
uintptr_t ra = GETPC();
int in_page = -(ptr | TARGET_PAGE_MASK);
+ uint8_t *mem1, *mem2;
check_tag_aligned(env, ptr, ra);
@@ -457,6 +496,29 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
}
+
+ /* If we are storing to a canonically tagged memory region, fault. */
+ if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ if (likely(in_page >= 2 * TAG_GRANULE)) {
+ mem1 = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE,
+ 2 * TAG_GRANULE, MMU_DATA_STORE,
+ true, ra);
+ if (!mem1) {
+ canonical_tag_write_fail(env, ptr, ra);
+ }
+ } else {
+ mem1 = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE,
+ TAG_GRANULE, MMU_DATA_STORE,
+ true, ra);
+ mem2 = allocation_tag_mem_probe(env, mmu_idx,
+ ptr + TAG_GRANULE,
+ MMU_DATA_STORE, TAG_GRANULE,
+ MMU_DATA_STORE, true, ra);
+ if (!mem1 || !mem2) {
+ canonical_tag_write_fail(env, ptr, ra);
+ }
+ }
+ }
}
uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
@@ -569,6 +631,10 @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
* and if the OS has enabled access to the tags.
*/
if (!tag_mem) {
+ /* Storing tags to canonically tagged region: fault. */
+ if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ canonical_tag_write_fail(env, ptr, ra);
+ }
return;
}
@@ -619,9 +685,12 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
MMU_DATA_STORE, ra);
+
if (mem) {
int tag_pair = (val & 0xf) * 0x11;
memset(mem, tag_pair, tag_bytes);
+ } else if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
+ canonical_tag_write_fail(env, ptr, ra);
}
}
--
2.52.0
On 3/10/26 08:59, Gabriel Brookman wrote:
> According to ARM ARM, section "Memory region tagging types", tag-store
> instructions targeting canonically tagged regions cause a stage 1
> permission fault with MTX enabled.
>
> Signed-off-by: Gabriel Brookman <brookmangabriel@gmail.com>
> ---
> target/arm/tcg/mte_helper.c | 69 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 69 insertions(+)
>
> diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
> index 07797aecf9..ddf4ffc51b 100644
> --- a/target/arm/tcg/mte_helper.c
> +++ b/target/arm/tcg/mte_helper.c
> @@ -227,6 +227,20 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
> #endif
> }
>
> +static void canonical_tag_write_fail(CPUARMState *env,
> + uint64_t dirty_ptr, uintptr_t ra)
> +{
> + uint64_t syn;
> +
> + env->exception.vaddress = dirty_ptr;
> +
> + syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, 1, 0);
> + syn |= BIT_ULL(42); /* TnD is bit 42 */
> +
> + raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
> + g_assert_not_reached();
> +}
> +
> static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
> uint64_t ptr, MMUAccessType ptr_access,
> int ptr_size, MMUAccessType tag_access,
> @@ -372,7 +386,11 @@ static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
> /* Store if page supports tags. */
> if (mem) {
> store1(ptr, mem, allocation_tag_from_addr(xt));
> + } else if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
Pass mtx from translator, all instances.
> @@ -415,6 +443,11 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
> MMU_DATA_STORE, TAG_GRANULE,
> MMU_DATA_STORE, ra);
>
> + if (!(mem1 && mem2) && canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
> + canonical_tag_write_fail(env, ptr, ra);
> + return;
> + }
> +
This isn't correct. The store to mem1 is independent from mem2 and might succeed; the
fault from mem2 must increment the address.
> @@ -448,6 +486,7 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
> int mmu_idx = arm_env_mmu_index(env);
> uintptr_t ra = GETPC();
> int in_page = -(ptr | TARGET_PAGE_MASK);
> + uint8_t *mem1, *mem2;
>
> check_tag_aligned(env, ptr, ra);
>
> @@ -457,6 +496,29 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
> probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
> probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
> }
> +
> + /* If we are storing to a canonically tagged memory region, fault. */
> + if (canonical_tagging_enabled(env, 1 & (ptr >> 55))) {
> + if (likely(in_page >= 2 * TAG_GRANULE)) {
> + mem1 = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE,
> + 2 * TAG_GRANULE, MMU_DATA_STORE,
> + true, ra);
> + if (!mem1) {
> + canonical_tag_write_fail(env, ptr, ra);
> + }
> + } else {
> + mem1 = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE,
> + TAG_GRANULE, MMU_DATA_STORE,
> + true, ra);
> + mem2 = allocation_tag_mem_probe(env, mmu_idx,
> + ptr + TAG_GRANULE,
> + MMU_DATA_STORE, TAG_GRANULE,
> + MMU_DATA_STORE, true, ra);
> + if (!mem1 || !mem2) {
> + canonical_tag_write_fail(env, ptr, ra);
> + }
> + }
> + }
> }
No changes required for st2g_stub, because that's only used when tagaccess is false. See
AArch64.S1CheckPermissions.
r~
© 2016 - 2026 Red Hat, Inc.