1 | First arm pullreq for 6.1 cycle. The big stuff here is RTH's alignment series. | 1 | I don't have anything else queued up at the moment, so this is just |
---|---|---|---|
2 | Richard's SME patches. | ||
2 | 3 | ||
3 | thanks | ||
4 | -- PMM | 4 | -- PMM |
5 | 5 | ||
6 | The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4: | 6 | The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3: |
7 | 7 | ||
8 | Open 6.1 development tree (2021-04-30 11:15:40 +0100) | 8 | Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530) |
9 | 9 | ||
10 | are available in the Git repository at: | 10 | are available in the Git repository at: |
11 | 11 | ||
12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210430 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711 |
13 | 13 | ||
14 | for you to fetch changes up to a6091108aa44e9017af4ca13c43f55a629e3744c: | 14 | for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8: |
15 | 15 | ||
16 | hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows (2021-04-30 11:16:52 +0100) | 16 | linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100) |
17 | 17 | ||
18 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
19 | target-arm queue: | 19 | target-arm: |
20 | * hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows | 20 | * Implement SME emulation, for both system and linux-user |
21 | * hw: add compat machines for 6.1 | ||
22 | * Fault misaligned accesses where the architecture requires it | ||
23 | * Fix some corner cases of MTE faults (notably with misaligned accesses) | ||
24 | * Make Thumb store insns UNDEF for Rn==1111 | ||
25 | * hw/arm/smmuv3: Support 16K translation granule | ||
26 | 21 | ||
27 | ---------------------------------------------------------------- | 22 | ---------------------------------------------------------------- |
28 | Cornelia Huck (1): | 23 | Richard Henderson (45): |
29 | hw: add compat machines for 6.1 | 24 | target/arm: Handle SME in aarch64_cpu_dump_state |
25 | target/arm: Add infrastructure for disas_sme | ||
26 | target/arm: Trap non-streaming usage when Streaming SVE is active | ||
27 | target/arm: Mark ADR as non-streaming | ||
28 | target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming | ||
29 | target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming | ||
30 | target/arm: Mark PMULL, FMMLA as non-streaming | ||
31 | target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming | ||
32 | target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming | ||
33 | target/arm: Mark string/histo/crypto as non-streaming | ||
34 | target/arm: Mark gather/scatter load/store as non-streaming | ||
35 | target/arm: Mark gather prefetch as non-streaming | ||
36 | target/arm: Mark LDFF1 and LDNF1 as non-streaming | ||
37 | target/arm: Mark LD1RO as non-streaming | ||
38 | target/arm: Add SME enablement checks | ||
39 | target/arm: Handle SME in sve_access_check | ||
40 | target/arm: Implement SME RDSVL, ADDSVL, ADDSPL | ||
41 | target/arm: Implement SME ZERO | ||
42 | target/arm: Implement SME MOVA | ||
43 | target/arm: Implement SME LD1, ST1 | ||
44 | target/arm: Export unpredicated ld/st from translate-sve.c | ||
45 | target/arm: Implement SME LDR, STR | ||
46 | target/arm: Implement SME ADDHA, ADDVA | ||
47 | target/arm: Implement FMOPA, FMOPS (non-widening) | ||
48 | target/arm: Implement BFMOPA, BFMOPS | ||
49 | target/arm: Implement FMOPA, FMOPS (widening) | ||
50 | target/arm: Implement SME integer outer product | ||
51 | target/arm: Implement PSEL | ||
52 | target/arm: Implement REVD | ||
53 | target/arm: Implement SCLAMP, UCLAMP | ||
54 | target/arm: Reset streaming sve state on exception boundaries | ||
55 | target/arm: Enable SME for -cpu max | ||
56 | linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS | ||
57 | linux-user/aarch64: Reset PSTATE.SM on syscalls | ||
58 | linux-user/aarch64: Add SM bit to SVE signal context | ||
59 | linux-user/aarch64: Tidy target_restore_sigframe error return | ||
60 | linux-user/aarch64: Do not allow duplicate or short sve records | ||
61 | linux-user/aarch64: Verify extra record lock succeeded | ||
62 | linux-user/aarch64: Move sve record checks into restore | ||
63 | linux-user/aarch64: Implement SME signal handling | ||
64 | linux-user: Rename sve prctls | ||
65 | linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL | ||
66 | target/arm: Only set ZEN in reset if SVE present | ||
67 | target/arm: Enable SME for user-only | ||
68 | linux-user/aarch64: Add SME related hwcap entries | ||
30 | 69 | ||
31 | Kunkun Jiang (1): | 70 | docs/system/arm/emulation.rst | 4 + |
32 | hw/arm/smmuv3: Support 16K translation granule | 71 | linux-user/aarch64/target_cpu.h | 5 +- |
33 | 72 | linux-user/aarch64/target_prctl.h | 62 +- | |
34 | Peter Maydell (2): | 73 | target/arm/cpu.h | 7 + |
35 | target/arm: Make Thumb store insns UNDEF for Rn==1111 | 74 | target/arm/helper-sme.h | 126 ++++ |
36 | hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows | 75 | target/arm/helper-sve.h | 4 + |
37 | 76 | target/arm/helper.h | 18 + | |
38 | Richard Henderson (39): | 77 | target/arm/translate-a64.h | 45 ++ |
39 | target/arm: Fix mte_checkN | 78 | target/arm/translate.h | 16 + |
40 | target/arm: Split out mte_probe_int | 79 | target/arm/sme-fa64.decode | 60 ++ |
41 | target/arm: Fix unaligned checks for mte_check1, mte_probe1 | 80 | target/arm/sme.decode | 88 +++ |
42 | test/tcg/aarch64: Add mte-5 | 81 | target/arm/sve.decode | 41 +- |
43 | target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1 | 82 | linux-user/aarch64/cpu_loop.c | 9 + |
44 | target/arm: Merge mte_check1, mte_checkN | 83 | linux-user/aarch64/signal.c | 243 ++++++-- |
45 | target/arm: Rename mte_probe1 to mte_probe | 84 | linux-user/elfload.c | 20 + |
46 | target/arm: Simplify sve mte checking | 85 | linux-user/syscall.c | 28 +- |
47 | target/arm: Remove log2_esize parameter to gen_mte_checkN | 86 | target/arm/cpu.c | 35 +- |
48 | target/arm: Fix decode of align in VLDST_single | 87 | target/arm/cpu64.c | 11 + |
49 | target/arm: Rename TBFLAG_A32, SCTLR_B | 88 | target/arm/helper.c | 56 +- |
50 | target/arm: Rename TBFLAG_ANY, PSTATE_SS | 89 | target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++ |
51 | target/arm: Add wrapper macros for accessing tbflags | 90 | target/arm/sve_helper.c | 28 + |
52 | target/arm: Introduce CPUARMTBFlags | 91 | target/arm/translate-a64.c | 103 +++- |
53 | target/arm: Move mode specific TB flags to tb->cs_base | 92 | target/arm/translate-sme.c | 373 ++++++++++++ |
54 | target/arm: Move TBFLAG_AM32 bits to the top | 93 | target/arm/translate-sve.c | 393 ++++++++++--- |
55 | target/arm: Move TBFLAG_ANY bits to the bottom | 94 | target/arm/translate-vfp.c | 12 + |
56 | target/arm: Add ALIGN_MEM to TBFLAG_ANY | 95 | target/arm/translate.c | 2 + |
57 | target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness | 96 | target/arm/vec_helper.c | 24 + |
58 | target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64 | 97 | target/arm/meson.build | 3 + |
59 | target/arm: Fix SCTLR_B test for TCGv_i64 load/store | 98 | 28 files changed, 2821 insertions(+), 135 deletions(-) |
60 | target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness | 99 | create mode 100644 target/arm/sme-fa64.decode |
61 | target/arm: Enforce word alignment for LDRD/STRD | 100 | create mode 100644 target/arm/sme.decode |
62 | target/arm: Enforce alignment for LDA/LDAH/STL/STLH | 101 | create mode 100644 target/arm/translate-sme.c |
63 | target/arm: Enforce alignment for LDM/STM | ||
64 | target/arm: Enforce alignment for RFE | ||
65 | target/arm: Enforce alignment for SRS | ||
66 | target/arm: Enforce alignment for VLDM/VSTM | ||
67 | target/arm: Enforce alignment for VLDR/VSTR | ||
68 | target/arm: Enforce alignment for VLDn (all lanes) | ||
69 | target/arm: Enforce alignment for VLDn/VSTn (multiple) | ||
70 | target/arm: Enforce alignment for VLDn/VSTn (single) | ||
71 | target/arm: Use finalize_memop for aa64 gpr load/store | ||
72 | target/arm: Use finalize_memop for aa64 fpr load/store | ||
73 | target/arm: Enforce alignment for aa64 load-acq/store-rel | ||
74 | target/arm: Use MemOp for size + endian in aa64 vector ld/st | ||
75 | target/arm: Enforce alignment for aa64 vector LDn/STn (multiple) | ||
76 | target/arm: Enforce alignment for aa64 vector LDn/STn (single) | ||
77 | target/arm: Enforce alignment for sve LD1R | ||
78 | |||
79 | include/hw/boards.h | 3 + | ||
80 | include/hw/i386/pc.h | 3 + | ||
81 | include/hw/pci-host/gpex.h | 4 + | ||
82 | target/arm/cpu.h | 105 ++++++++++----- | ||
83 | target/arm/helper-a64.h | 3 +- | ||
84 | target/arm/internals.h | 11 +- | ||
85 | target/arm/translate-a64.h | 2 +- | ||
86 | target/arm/translate.h | 38 ++++++ | ||
87 | target/arm/neon-ls.decode | 4 +- | ||
88 | hw/arm/smmuv3.c | 6 +- | ||
89 | hw/arm/virt.c | 7 +- | ||
90 | hw/core/machine.c | 5 + | ||
91 | hw/i386/pc.c | 3 + | ||
92 | hw/i386/pc_piix.c | 14 +- | ||
93 | hw/i386/pc_q35.c | 13 +- | ||
94 | hw/pci-host/gpex.c | 56 +++++++- | ||
95 | hw/ppc/spapr.c | 17 ++- | ||
96 | hw/s390x/s390-virtio-ccw.c | 14 +- | ||
97 | target/arm/helper-a64.c | 2 +- | ||
98 | target/arm/helper.c | 162 ++++++++++++---------- | ||
99 | target/arm/mte_helper.c | 185 ++++++++++--------------- | ||
100 | target/arm/sve_helper.c | 100 +++++--------- | ||
101 | target/arm/translate-a64.c | 236 ++++++++++++++++---------------- | ||
102 | target/arm/translate-sve.c | 11 +- | ||
103 | target/arm/translate.c | 274 ++++++++++++++++++++++---------------- | ||
104 | tests/tcg/aarch64/mte-5.c | 44 ++++++ | ||
105 | target/arm/translate-neon.c.inc | 117 ++++++++++++---- | ||
106 | target/arm/translate-vfp.c.inc | 20 +-- | ||
107 | tests/tcg/aarch64/Makefile.target | 2 +- | ||
108 | 29 files changed, 878 insertions(+), 583 deletions(-) | ||
109 | create mode 100644 tests/tcg/aarch64/mte-5.c | ||
110 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Dump SVCR, plus use the correct access check for Streaming Mode. | ||
2 | 4 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-31-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-2-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 9 | --- |
8 | target/arm/translate-a64.c | 9 +++++---- | 10 | target/arm/cpu.c | 17 ++++++++++++++++- |
9 | 1 file changed, 5 insertions(+), 4 deletions(-) | 11 | 1 file changed, 16 insertions(+), 1 deletion(-) |
10 | 12 | ||
11 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
12 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-a64.c | 15 | --- a/target/arm/cpu.c |
14 | +++ b/target/arm/translate-a64.c | 16 | +++ b/target/arm/cpu.c |
15 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 17 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
16 | int index = is_q << 3 | S << 2 | size; | 18 | int i; |
17 | int xs, total; | 19 | int el = arm_current_el(env); |
18 | TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; | 20 | const char *ns_status; |
19 | + MemOp mop; | 21 | + bool sve; |
20 | 22 | ||
21 | if (extract32(insn, 31, 1)) { | 23 | qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); |
22 | unallocated_encoding(s); | 24 | for (i = 0; i < 32; i++) { |
23 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 25 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
24 | 26 | el, | |
25 | clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, | 27 | psr & PSTATE_SP ? 'h' : 't'); |
26 | total); | 28 | |
27 | + mop = finalize_memop(s, scale); | 29 | + if (cpu_isar_feature(aa64_sme, cpu)) { |
28 | 30 | + qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c", | |
29 | tcg_ebytes = tcg_const_i64(1 << scale); | 31 | + env->svcr, |
30 | for (xs = 0; xs < selem; xs++) { | 32 | + (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'), |
31 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 33 | + (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-')); |
32 | /* Load and replicate to all elements */ | 34 | + } |
33 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | 35 | if (cpu_isar_feature(aa64_bti, cpu)) { |
34 | 36 | qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); | |
35 | - tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, | 37 | } |
36 | - get_mem_index(s), s->be_data + scale); | 38 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
37 | + tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop); | 39 | qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n", |
38 | tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), | 40 | vfp_get_fpcr(env), vfp_get_fpsr(env)); |
39 | (is_q + 1) * 8, vec_full_reg_size(s), | 41 | |
40 | tcg_tmp); | 42 | - if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { |
41 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 43 | + if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) { |
42 | } else { | 44 | + sve = sme_exception_el(env, el) == 0; |
43 | /* Load/store one element per register */ | 45 | + } else if (cpu_isar_feature(aa64_sve, cpu)) { |
44 | if (is_load) { | 46 | + sve = sve_exception_el(env, el) == 0; |
45 | - do_vec_ld(s, rt, index, clean_addr, scale | s->be_data); | 47 | + } else { |
46 | + do_vec_ld(s, rt, index, clean_addr, mop); | 48 | + sve = false; |
47 | } else { | 49 | + } |
48 | - do_vec_st(s, rt, index, clean_addr, scale | s->be_data); | 50 | + |
49 | + do_vec_st(s, rt, index, clean_addr, mop); | 51 | + if (sve) { |
50 | } | 52 | int j, zcr_len = sve_vqm1_for_el(env, el); |
51 | } | 53 | |
52 | tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); | 54 | for (i = 0; i <= FFR_PRED_NUM; i++) { |
53 | -- | 55 | -- |
54 | 2.20.1 | 56 | 2.25.1 |
55 | |||
56 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This includes the build rules for the decoder, and the | ||
4 | new file for translation, but excludes any instructions. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-30-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-3-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-a64.c | 15 +++++++++++---- | 11 | target/arm/translate-a64.h | 1 + |
9 | 1 file changed, 11 insertions(+), 4 deletions(-) | 12 | target/arm/sme.decode | 20 ++++++++++++++++++++ |
13 | target/arm/translate-a64.c | 7 ++++++- | ||
14 | target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++ | ||
15 | target/arm/meson.build | 2 ++ | ||
16 | 5 files changed, 64 insertions(+), 1 deletion(-) | ||
17 | create mode 100644 target/arm/sme.decode | ||
18 | create mode 100644 target/arm/translate-sme.c | ||
10 | 19 | ||
20 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/target/arm/translate-a64.h | ||
23 | +++ b/target/arm/translate-a64.h | ||
24 | @@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s) | ||
25 | } | ||
26 | |||
27 | bool disas_sve(DisasContext *, uint32_t); | ||
28 | +bool disas_sme(DisasContext *, uint32_t); | ||
29 | |||
30 | void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, | ||
31 | uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); | ||
32 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
33 | new file mode 100644 | ||
34 | index XXXXXXX..XXXXXXX | ||
35 | --- /dev/null | ||
36 | +++ b/target/arm/sme.decode | ||
37 | @@ -XXX,XX +XXX,XX @@ | ||
38 | +# AArch64 SME instruction descriptions | ||
39 | +# | ||
40 | +# Copyright (c) 2022 Linaro, Ltd | ||
41 | +# | ||
42 | +# This library is free software; you can redistribute it and/or | ||
43 | +# modify it under the terms of the GNU Lesser General Public | ||
44 | +# License as published by the Free Software Foundation; either | ||
45 | +# version 2.1 of the License, or (at your option) any later version. | ||
46 | +# | ||
47 | +# This library is distributed in the hope that it will be useful, | ||
48 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
49 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
50 | +# Lesser General Public License for more details. | ||
51 | +# | ||
52 | +# You should have received a copy of the GNU Lesser General Public | ||
53 | +# License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
54 | + | ||
55 | +# | ||
56 | +# This file is processed by scripts/decodetree.py | ||
57 | +# | ||
11 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 58 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
12 | index XXXXXXX..XXXXXXX 100644 | 59 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-a64.c | 60 | --- a/target/arm/translate-a64.c |
14 | +++ b/target/arm/translate-a64.c | 61 | +++ b/target/arm/translate-a64.c |
15 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 62 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) |
16 | bool is_postidx = extract32(insn, 23, 1); | ||
17 | bool is_q = extract32(insn, 30, 1); | ||
18 | TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; | ||
19 | - MemOp endian = s->be_data; | ||
20 | + MemOp endian, align, mop; | ||
21 | |||
22 | int total; /* total bytes */ | ||
23 | int elements; /* elements per vector */ | ||
24 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
25 | } | 63 | } |
26 | 64 | ||
27 | /* For our purposes, bytes are always little-endian. */ | 65 | switch (extract32(insn, 25, 4)) { |
28 | + endian = s->be_data; | 66 | - case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ |
29 | if (size == 0) { | 67 | + case 0x0: |
30 | endian = MO_LE; | 68 | + if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { |
31 | } | 69 | + unallocated_encoding(s); |
32 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 70 | + } |
33 | * Consecutive little-endian elements from a single register | 71 | + break; |
34 | * can be promoted to a larger little-endian operation. | 72 | + case 0x1: case 0x3: /* UNALLOCATED */ |
35 | */ | 73 | unallocated_encoding(s); |
36 | + align = MO_ALIGN; | 74 | break; |
37 | if (selem == 1 && endian == MO_LE) { | 75 | case 0x2: |
38 | + align = pow2_align(size); | 76 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c |
39 | size = 3; | 77 | new file mode 100644 |
40 | } | 78 | index XXXXXXX..XXXXXXX |
41 | - elements = (is_q ? 16 : 8) >> size; | 79 | --- /dev/null |
42 | + if (!s->align_mem) { | 80 | +++ b/target/arm/translate-sme.c |
43 | + align = 0; | 81 | @@ -XXX,XX +XXX,XX @@ |
44 | + } | 82 | +/* |
45 | + mop = endian | size | align; | 83 | + * AArch64 SME translation |
46 | 84 | + * | |
47 | + elements = (is_q ? 16 : 8) >> size; | 85 | + * Copyright (c) 2022 Linaro, Ltd |
48 | tcg_ebytes = tcg_const_i64(1 << size); | 86 | + * |
49 | for (r = 0; r < rpt; r++) { | 87 | + * This library is free software; you can redistribute it and/or |
50 | int e; | 88 | + * modify it under the terms of the GNU Lesser General Public |
51 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 89 | + * License as published by the Free Software Foundation; either |
52 | for (xs = 0; xs < selem; xs++) { | 90 | + * version 2.1 of the License, or (at your option) any later version. |
53 | int tt = (rt + r + xs) % 32; | 91 | + * |
54 | if (is_store) { | 92 | + * This library is distributed in the hope that it will be useful, |
55 | - do_vec_st(s, tt, e, clean_addr, size | endian); | 93 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
56 | + do_vec_st(s, tt, e, clean_addr, mop); | 94 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
57 | } else { | 95 | + * Lesser General Public License for more details. |
58 | - do_vec_ld(s, tt, e, clean_addr, size | endian); | 96 | + * |
59 | + do_vec_ld(s, tt, e, clean_addr, mop); | 97 | + * You should have received a copy of the GNU Lesser General Public |
60 | } | 98 | + * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
61 | tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); | 99 | + */ |
62 | } | 100 | + |
101 | +#include "qemu/osdep.h" | ||
102 | +#include "cpu.h" | ||
103 | +#include "tcg/tcg-op.h" | ||
104 | +#include "tcg/tcg-op-gvec.h" | ||
105 | +#include "tcg/tcg-gvec-desc.h" | ||
106 | +#include "translate.h" | ||
107 | +#include "exec/helper-gen.h" | ||
108 | +#include "translate-a64.h" | ||
109 | +#include "fpu/softfloat.h" | ||
110 | + | ||
111 | + | ||
112 | +/* | ||
113 | + * Include the generated decoder. | ||
114 | + */ | ||
115 | + | ||
116 | +#include "decode-sme.c.inc" | ||
117 | diff --git a/target/arm/meson.build b/target/arm/meson.build | ||
118 | index XXXXXXX..XXXXXXX 100644 | ||
119 | --- a/target/arm/meson.build | ||
120 | +++ b/target/arm/meson.build | ||
121 | @@ -XXX,XX +XXX,XX @@ | ||
122 | gen = [ | ||
123 | decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), | ||
124 | + decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), | ||
125 | decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), | ||
126 | decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), | ||
127 | decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), | ||
128 | @@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files( | ||
129 | 'sme_helper.c', | ||
130 | 'translate-a64.c', | ||
131 | 'translate-sve.c', | ||
132 | + 'translate-sme.c', | ||
133 | )) | ||
134 | |||
135 | arm_softmmu_ss = ss.source_set() | ||
63 | -- | 136 | -- |
64 | 2.20.1 | 137 | 2.25.1 |
65 | |||
66 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Use this to signal when memory access alignment is required. | 3 | This new behaviour is in the ARM pseudocode function |
4 | This value comes from the CCR register for M-profile, and | 4 | AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32 |
5 | from the SCTLR register for A-profile. | 5 | via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which |
6 | the trap would be delivered is in AArch64 mode. | ||
7 | |||
8 | Given that ARMv9 drops support for AArch32 outside EL0, the trap EL | ||
9 | detection ought to be trivially true, but the pseudocode still contains | ||
10 | a number of conditions, and QEMU has not yet committed to dropping A32 | ||
11 | support for EL[12] when v9 features are present. | ||
12 | |||
13 | Since the computation of SME_TRAP_NONSTREAMING is necessarily different | ||
14 | for the two modes, we might as well preserve bits within TBFLAG_ANY and | ||
15 | allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead. | ||
16 | |||
17 | Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table | ||
18 | of instructions illegal in streaming mode. | ||
6 | 19 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 20 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 21 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210419202257.161730-11-richard.henderson@linaro.org | 22 | Message-id: 20220708151540.18136-4-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 24 | --- |
12 | target/arm/cpu.h | 2 ++ | 25 | target/arm/cpu.h | 7 +++ |
13 | target/arm/translate.h | 2 ++ | 26 | target/arm/translate.h | 4 ++ |
14 | target/arm/helper.c | 19 +++++++++++++++++-- | 27 | target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++ |
15 | target/arm/translate-a64.c | 1 + | 28 | target/arm/helper.c | 41 +++++++++++++++++ |
16 | target/arm/translate.c | 7 +++---- | 29 | target/arm/translate-a64.c | 40 ++++++++++++++++- |
17 | 5 files changed, 25 insertions(+), 6 deletions(-) | 30 | target/arm/translate-vfp.c | 12 +++++ |
31 | target/arm/translate.c | 2 + | ||
32 | target/arm/meson.build | 1 + | ||
33 | 8 files changed, 195 insertions(+), 2 deletions(-) | ||
34 | create mode 100644 target/arm/sme-fa64.decode | ||
18 | 35 | ||
19 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 36 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
20 | index XXXXXXX..XXXXXXX 100644 | 37 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/cpu.h | 38 | --- a/target/arm/cpu.h |
22 | +++ b/target/arm/cpu.h | 39 | +++ b/target/arm/cpu.h |
23 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, MMUIDX, 4, 4) | 40 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) |
24 | FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2) | 41 | * the same thing as the current security state of the processor! |
25 | /* For A-profile only, target EL for debug exceptions. */ | 42 | */ |
26 | FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2) | 43 | FIELD(TBFLAG_A32, NS, 10, 1) |
27 | +/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */ | 44 | +/* |
28 | +FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1) | 45 | + * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. |
46 | + * This requires an SME trap from AArch32 mode when using NEON. | ||
47 | + */ | ||
48 | +FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) | ||
29 | 49 | ||
30 | /* | 50 | /* |
31 | * Bit usage when in AArch32 state, both A- and M-profile. | 51 | * Bit usage when in AArch32 state, for M-profile only. |
52 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2) | ||
53 | FIELD(TBFLAG_A64, PSTATE_SM, 22, 1) | ||
54 | FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) | ||
55 | FIELD(TBFLAG_A64, SVL, 24, 4) | ||
56 | +/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ | ||
57 | +FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) | ||
58 | |||
59 | /* | ||
60 | * Helpers for using the above. | ||
32 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 61 | diff --git a/target/arm/translate.h b/target/arm/translate.h |
33 | index XXXXXXX..XXXXXXX 100644 | 62 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/target/arm/translate.h | 63 | --- a/target/arm/translate.h |
35 | +++ b/target/arm/translate.h | 64 | +++ b/target/arm/translate.h |
36 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | 65 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { |
37 | bool bt; | 66 | bool pstate_sm; |
38 | /* True if any CP15 access is trapped by HSTR_EL2 */ | 67 | /* True if PSTATE.ZA is set. */ |
39 | bool hstr_active; | 68 | bool pstate_za; |
40 | + /* True if memory operations require alignment */ | 69 | + /* True if non-streaming insns should raise an SME Streaming exception. */ |
41 | + bool align_mem; | 70 | + bool sme_trap_nonstreaming; |
71 | + /* True if the current instruction is non-streaming. */ | ||
72 | + bool is_nonstreaming; | ||
73 | /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ | ||
74 | bool mve_no_pred; | ||
42 | /* | 75 | /* |
43 | * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. | 76 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
44 | * < 0, set by the current instruction. | 77 | new file mode 100644 |
78 | index XXXXXXX..XXXXXXX | ||
79 | --- /dev/null | ||
80 | +++ b/target/arm/sme-fa64.decode | ||
81 | @@ -XXX,XX +XXX,XX @@ | ||
82 | +# AArch64 SME allowed instruction decoding | ||
83 | +# | ||
84 | +# Copyright (c) 2022 Linaro, Ltd | ||
85 | +# | ||
86 | +# This library is free software; you can redistribute it and/or | ||
87 | +# modify it under the terms of the GNU Lesser General Public | ||
88 | +# License as published by the Free Software Foundation; either | ||
89 | +# version 2.1 of the License, or (at your option) any later version. | ||
90 | +# | ||
91 | +# This library is distributed in the hope that it will be useful, | ||
92 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
93 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
94 | +# Lesser General Public License for more details. | ||
95 | +# | ||
96 | +# You should have received a copy of the GNU Lesser General Public | ||
97 | +# License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
98 | + | ||
99 | +# | ||
100 | +# This file is processed by scripts/decodetree.py | ||
101 | +# | ||
102 | + | ||
103 | +# These patterns are taken from Appendix E1.1 of DDI0616 A.a, | ||
104 | +# Arm Architecture Reference Manual Supplement, | ||
105 | +# The Scalable Matrix Extension (SME), for Armv9-A | ||
106 | + | ||
107 | +{ | ||
108 | + [ | ||
109 | + OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0] | ||
110 | + OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0] | ||
111 | + OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0] | ||
112 | + OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0] | ||
113 | + OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0] | ||
114 | + OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0] | ||
115 | + OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0] | ||
116 | + ] | ||
117 | + FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations | ||
118 | +} | ||
119 | + | ||
120 | +{ | ||
121 | + [ | ||
122 | + OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar) | ||
123 | + OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16) | ||
124 | + OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar) | ||
125 | + OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16) | ||
126 | + ] | ||
127 | + FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations | ||
128 | +} | ||
129 | + | ||
130 | +FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store | ||
131 | +FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions | ||
132 | +FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
133 | + | ||
134 | +# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions | ||
135 | +# We don't actually need to include these, as the default is OK. | ||
136 | +# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations | ||
137 | +# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers | ||
138 | +# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal) | ||
139 | +# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) | ||
140 | +# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
141 | +# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
142 | + | ||
143 | +FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR | ||
144 | +FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA | ||
145 | +FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT | ||
146 | +FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS | ||
147 | +FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR | ||
148 | +FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP | ||
149 | +FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) | ||
150 | +FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA | ||
151 | +FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL | ||
152 | +FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD | ||
153 | +FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA | ||
154 | +FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA | ||
155 | +FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions | ||
156 | +FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) | ||
157 | +FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) | ||
158 | +FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) | ||
159 | +FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm) | ||
160 | +FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector) | ||
161 | +FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector) | ||
162 | +FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector) | ||
163 | +FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) | ||
164 | +FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) | ||
165 | +FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) | ||
166 | +FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) | ||
167 | +FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch | ||
168 | +FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar) | ||
169 | +FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar) | ||
170 | +FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector) | ||
171 | +FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc) | ||
45 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 172 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
46 | index XXXXXXX..XXXXXXX 100644 | 173 | index XXXXXXX..XXXXXXX 100644 |
47 | --- a/target/arm/helper.c | 174 | --- a/target/arm/helper.c |
48 | +++ b/target/arm/helper.c | 175 | +++ b/target/arm/helper.c |
49 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, | 176 | @@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el) |
50 | ARMMMUIdx mmu_idx) | 177 | return 0; |
51 | { | 178 | } |
52 | CPUARMTBFlags flags = {}; | 179 | |
53 | + uint32_t ccr = env->v7m.ccr[env->v7m.secure]; | 180 | +/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */ |
54 | + | 181 | +static bool sme_fa64(CPUARMState *env, int el) |
55 | + /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */ | 182 | +{ |
56 | + if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) { | 183 | + if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) { |
57 | + DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); | 184 | + return false; |
58 | + } | 185 | + } |
59 | 186 | + | |
60 | if (arm_v7m_is_handler_mode(env)) { | 187 | + if (el <= 1 && !el_is_in_host(env, el)) { |
61 | DP_TBFLAG_M32(flags, HANDLER, 1); | 188 | + if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) { |
62 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, | 189 | + return false; |
63 | */ | 190 | + } |
64 | if (arm_feature(env, ARM_FEATURE_V8) && | 191 | + } |
65 | !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && | 192 | + if (el <= 2 && arm_is_el2_enabled(env)) { |
66 | - (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { | 193 | + if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) { |
67 | + (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) { | 194 | + return false; |
68 | DP_TBFLAG_M32(flags, STACKCHECK, 1); | 195 | + } |
69 | } | 196 | + } |
70 | 197 | + if (arm_feature(env, ARM_FEATURE_EL3)) { | |
198 | + if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) { | ||
199 | + return false; | ||
200 | + } | ||
201 | + } | ||
202 | + | ||
203 | + return true; | ||
204 | +} | ||
205 | + | ||
206 | /* | ||
207 | * Given that SVE is enabled, return the vector length for EL. | ||
208 | */ | ||
71 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, | 209 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, |
72 | ARMMMUIdx mmu_idx) | 210 | DP_TBFLAG_ANY(flags, PSTATE__IL, 1); |
73 | { | 211 | } |
74 | CPUARMTBFlags flags = rebuild_hflags_aprofile(env); | 212 | |
75 | + int el = arm_current_el(env); | 213 | + /* |
76 | + | 214 | + * The SME exception we are testing for is raised via |
77 | + if (arm_sctlr(env, el) & SCTLR_A) { | 215 | + * AArch64.CheckFPAdvSIMDEnabled(), as called from |
78 | + DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); | 216 | + * AArch32.CheckAdvSIMDOrFPEnabled(). |
79 | + } | 217 | + */ |
80 | 218 | + if (el == 0 | |
81 | if (arm_el_is_aa64(env, 1)) { | 219 | + && FIELD_EX64(env->svcr, SVCR, SM) |
82 | DP_TBFLAG_A32(flags, VFPEN, 1); | 220 | + && (!arm_is_el2_enabled(env) |
83 | } | 221 | + || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE))) |
84 | 222 | + && arm_el_is_aa64(env, 1) | |
85 | - if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && | 223 | + && !sme_fa64(env, el)) { |
86 | + if (el < 2 && env->cp15.hstr_el2 && | 224 | + DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); |
87 | (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { | 225 | + } |
88 | DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); | 226 | + |
89 | } | 227 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); |
228 | } | ||
229 | |||
90 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | 230 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, |
91 | 231 | } | |
92 | sctlr = regime_sctlr(env, stage1); | 232 | if (FIELD_EX64(env->svcr, SVCR, SM)) { |
93 | 233 | DP_TBFLAG_A64(flags, PSTATE_SM, 1); | |
94 | + if (sctlr & SCTLR_A) { | 234 | + DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); |
95 | + DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); | 235 | } |
96 | + } | 236 | DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); |
97 | + | ||
98 | if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { | ||
99 | DP_TBFLAG_ANY(flags, BE_DATA, 1); | ||
100 | } | 237 | } |
101 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 238 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
102 | index XXXXXXX..XXXXXXX 100644 | 239 | index XXXXXXX..XXXXXXX 100644 |
103 | --- a/target/arm/translate-a64.c | 240 | --- a/target/arm/translate-a64.c |
104 | +++ b/target/arm/translate-a64.c | 241 | +++ b/target/arm/translate-a64.c |
242 | @@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element, | ||
243 | * unallocated-encoding checks (otherwise the syndrome information | ||
244 | * for the resulting exception will be incorrect). | ||
245 | */ | ||
246 | -static bool fp_access_check(DisasContext *s) | ||
247 | +static bool fp_access_check_only(DisasContext *s) | ||
248 | { | ||
249 | if (s->fp_excp_el) { | ||
250 | assert(!s->fp_access_checked); | ||
251 | @@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s) | ||
252 | return true; | ||
253 | } | ||
254 | |||
255 | +static bool fp_access_check(DisasContext *s) | ||
256 | +{ | ||
257 | + if (!fp_access_check_only(s)) { | ||
258 | + return false; | ||
259 | + } | ||
260 | + if (s->sme_trap_nonstreaming && s->is_nonstreaming) { | ||
261 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
262 | + syn_smetrap(SME_ET_Streaming, false)); | ||
263 | + return false; | ||
264 | + } | ||
265 | + return true; | ||
266 | +} | ||
267 | + | ||
268 | /* Check that SVE access is enabled. If it is, return true. | ||
269 | * If not, emit code to generate an appropriate exception and return false. | ||
270 | */ | ||
271 | @@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, | ||
272 | default: | ||
273 | g_assert_not_reached(); | ||
274 | } | ||
275 | - if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { | ||
276 | + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { | ||
277 | return; | ||
278 | } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { | ||
279 | return; | ||
280 | @@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) | ||
281 | } | ||
282 | } | ||
283 | |||
284 | +/* | ||
285 | + * Include the generated SME FA64 decoder. | ||
286 | + */ | ||
287 | + | ||
288 | +#include "decode-sme-fa64.c.inc" | ||
289 | + | ||
290 | +static bool trans_OK(DisasContext *s, arg_OK *a) | ||
291 | +{ | ||
292 | + return true; | ||
293 | +} | ||
294 | + | ||
295 | +static bool trans_FAIL(DisasContext *s, arg_OK *a) | ||
296 | +{ | ||
297 | + s->is_nonstreaming = true; | ||
298 | + return true; | ||
299 | +} | ||
300 | + | ||
301 | /** | ||
302 | * is_guarded_page: | ||
303 | * @env: The cpu environment | ||
105 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | 304 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, |
106 | dc->user = (dc->current_el == 0); | 305 | dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); |
107 | #endif | 306 | dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); |
108 | dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); | 307 | dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); |
109 | + dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); | 308 | + dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); |
110 | dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); | 309 | dc->vec_len = 0; |
111 | dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16; | 310 | dc->vec_stride = 0; |
112 | dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); | 311 | dc->cp_regs = arm_cpu->cp_regs; |
312 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) | ||
313 | } | ||
314 | } | ||
315 | |||
316 | + s->is_nonstreaming = false; | ||
317 | + if (s->sme_trap_nonstreaming) { | ||
318 | + disas_sme_fa64(s, insn); | ||
319 | + } | ||
320 | + | ||
321 | switch (extract32(insn, 25, 4)) { | ||
322 | case 0x0: | ||
323 | if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { | ||
324 | diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c | ||
325 | index XXXXXXX..XXXXXXX 100644 | ||
326 | --- a/target/arm/translate-vfp.c | ||
327 | +++ b/target/arm/translate-vfp.c | ||
328 | @@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled) | ||
329 | return false; | ||
330 | } | ||
331 | |||
332 | + /* | ||
333 | + * Note that rebuild_hflags_a32 has already accounted for being in EL0 | ||
334 | + * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not | ||
335 | + * appear to be any insns which touch VFP which are allowed. | ||
336 | + */ | ||
337 | + if (s->sme_trap_nonstreaming) { | ||
338 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
339 | + syn_smetrap(SME_ET_Streaming, | ||
340 | + s->base.pc_next - s->pc_curr == 2)); | ||
341 | + return false; | ||
342 | + } | ||
343 | + | ||
344 | if (!s->vfp_enabled && !ignore_vfp_enabled) { | ||
345 | assert(!arm_dc_feature(s, ARM_FEATURE_M)); | ||
346 | unallocated_encoding(s); | ||
113 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 347 | diff --git a/target/arm/translate.c b/target/arm/translate.c |
114 | index XXXXXXX..XXXXXXX 100644 | 348 | index XXXXXXX..XXXXXXX 100644 |
115 | --- a/target/arm/translate.c | 349 | --- a/target/arm/translate.c |
116 | +++ b/target/arm/translate.c | 350 | +++ b/target/arm/translate.c |
117 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
118 | { | ||
119 | TCGv addr; | ||
120 | |||
121 | - if (arm_dc_feature(s, ARM_FEATURE_M) && | ||
122 | - !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { | ||
123 | + if (s->align_mem) { | ||
124 | opc |= MO_ALIGN; | ||
125 | } | ||
126 | |||
127 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
128 | { | ||
129 | TCGv addr; | ||
130 | |||
131 | - if (arm_dc_feature(s, ARM_FEATURE_M) && | ||
132 | - !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { | ||
133 | + if (s->align_mem) { | ||
134 | opc |= MO_ALIGN; | ||
135 | } | ||
136 | |||
137 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | 351 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) |
138 | dc->user = (dc->current_el == 0); | 352 | dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); |
139 | #endif | 353 | dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); |
140 | dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); | 354 | } |
141 | + dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); | 355 | + dc->sme_trap_nonstreaming = |
142 | 356 | + EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); | |
143 | if (arm_feature(env, ARM_FEATURE_M)) { | 357 | } |
144 | dc->vfp_enabled = 1; | 358 | dc->cp_regs = cpu->cp_regs; |
359 | dc->features = env->features; | ||
360 | diff --git a/target/arm/meson.build b/target/arm/meson.build | ||
361 | index XXXXXXX..XXXXXXX 100644 | ||
362 | --- a/target/arm/meson.build | ||
363 | +++ b/target/arm/meson.build | ||
364 | @@ -XXX,XX +XXX,XX @@ | ||
365 | gen = [ | ||
366 | decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), | ||
367 | decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), | ||
368 | + decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'), | ||
369 | decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), | ||
370 | decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), | ||
371 | decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), | ||
145 | -- | 372 | -- |
146 | 2.20.1 | 373 | 2.25.1 |
147 | |||
148 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Create a finalize_memop function that computes alignment and | 3 | Mark ADR as a non-streaming instruction, which should trap |
4 | endianness and returns the final MemOp for the operation. | 4 | if full a64 support is not enabled in streaming mode. |
5 | 5 | ||
6 | Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special | 6 | Removing entries from sme-fa64.decode is an easy way to see |
7 | handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32 | 7 | what remains to be done. |
8 | so that s->be_data is not added by the callers. | ||
9 | 8 | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | Message-id: 20210419202257.161730-12-richard.henderson@linaro.org | 11 | Message-id: 20220708151540.18136-5-richard.henderson@linaro.org |
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | --- | 13 | --- |
15 | target/arm/translate.h | 24 ++++++++ | 14 | target/arm/translate.h | 7 +++++++ |
16 | target/arm/translate.c | 100 +++++++++++++++++--------------- | 15 | target/arm/sme-fa64.decode | 1 - |
17 | target/arm/translate-neon.c.inc | 9 +-- | 16 | target/arm/translate-sve.c | 8 ++++---- |
18 | 3 files changed, 79 insertions(+), 54 deletions(-) | 17 | 3 files changed, 11 insertions(+), 5 deletions(-) |
19 | 18 | ||
20 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 19 | diff --git a/target/arm/translate.h b/target/arm/translate.h |
21 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/target/arm/translate.h | 21 | --- a/target/arm/translate.h |
23 | +++ b/target/arm/translate.h | 22 | +++ b/target/arm/translate.h |
24 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) | 23 | @@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); |
25 | return statusptr; | 24 | static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ |
26 | } | 25 | { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } |
27 | 26 | ||
28 | +/** | 27 | +#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ |
29 | + * finalize_memop: | 28 | + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ |
30 | + * @s: DisasContext | 29 | + { \ |
31 | + * @opc: size+sign+align of the memory operation | 30 | + s->is_nonstreaming = true; \ |
32 | + * | 31 | + return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ |
33 | + * Build the complete MemOp for a memory operation, including alignment | ||
34 | + * and endianness. | ||
35 | + * | ||
36 | + * If (op & MO_AMASK) then the operation already contains the required | ||
37 | + * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally | ||
38 | + * unaligned operation, e.g. for AccType_NORMAL. | ||
39 | + * | ||
40 | + * In the latter case, there are configuration bits that require alignment, | ||
41 | + * and this is applied here. Note that there is no way to indicate that | ||
42 | + * no alignment should ever be enforced; this must be handled manually. | ||
43 | + */ | ||
44 | +static inline MemOp finalize_memop(DisasContext *s, MemOp opc) | ||
45 | +{ | ||
46 | + if (s->align_mem && !(opc & MO_AMASK)) { | ||
47 | + opc |= MO_ALIGN; | ||
48 | + } | 32 | + } |
49 | + return opc | s->be_data; | ||
50 | +} | ||
51 | + | 33 | + |
52 | #endif /* TARGET_ARM_TRANSLATE_H */ | 34 | #endif /* TARGET_ARM_TRANSLATE_H */ |
53 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 35 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
54 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
55 | --- a/target/arm/translate.c | 37 | --- a/target/arm/sme-fa64.decode |
56 | +++ b/target/arm/translate.c | 38 | +++ b/target/arm/sme-fa64.decode |
57 | @@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) | 39 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
58 | #define IS_USER_ONLY 0 | 40 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
59 | #endif | 41 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
60 | 42 | ||
61 | -/* Abstractions of "generate code to do a guest load/store for | 43 | -FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR |
62 | +/* | 44 | FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
63 | + * Abstractions of "generate code to do a guest load/store for | 45 | FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT |
64 | * AArch32", where a vaddr is always 32 bits (and is zero | 46 | FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS |
65 | * extended if we're a 64 bit core) and data is also | 47 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
66 | * 32 bits unless specifically doing a 64 bit access. | 48 | index XXXXXXX..XXXXXXX 100644 |
67 | @@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) | 49 | --- a/target/arm/translate-sve.c |
68 | * that the address argument is TCGv_i32 rather than TCGv. | 50 | +++ b/target/arm/translate-sve.c |
69 | */ | 51 | @@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) |
70 | 52 | return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); | |
71 | -static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) | ||
72 | +static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) | ||
73 | { | ||
74 | TCGv addr = tcg_temp_new(); | ||
75 | tcg_gen_extu_i32_tl(addr, a32); | ||
76 | @@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) | ||
77 | return addr; | ||
78 | } | 53 | } |
79 | 54 | ||
80 | +/* | 55 | -TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) |
81 | + * Internal routines are used for NEON cases where the endianness | 56 | -TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) |
82 | + * and/or alignment has already been taken into account and manipulated. | 57 | -TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) |
83 | + */ | 58 | -TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) |
84 | +static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, | 59 | +TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) |
85 | + TCGv_i32 a32, int index, MemOp opc) | 60 | +TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) |
86 | +{ | 61 | +TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) |
87 | + TCGv addr = gen_aa32_addr(s, a32, opc); | 62 | +TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) |
88 | + tcg_gen_qemu_ld_i32(val, addr, index, opc); | 63 | |
89 | + tcg_temp_free(addr); | 64 | /* |
90 | +} | 65 | *** SVE Integer Misc - Unpredicated Group |
91 | + | ||
92 | +static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, | ||
93 | + TCGv_i32 a32, int index, MemOp opc) | ||
94 | +{ | ||
95 | + TCGv addr = gen_aa32_addr(s, a32, opc); | ||
96 | + tcg_gen_qemu_st_i32(val, addr, index, opc); | ||
97 | + tcg_temp_free(addr); | ||
98 | +} | ||
99 | + | ||
100 | static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
101 | int index, MemOp opc) | ||
102 | { | ||
103 | - TCGv addr; | ||
104 | - | ||
105 | - if (s->align_mem) { | ||
106 | - opc |= MO_ALIGN; | ||
107 | - } | ||
108 | - | ||
109 | - addr = gen_aa32_addr(s, a32, opc); | ||
110 | - tcg_gen_qemu_ld_i32(val, addr, index, opc); | ||
111 | - tcg_temp_free(addr); | ||
112 | + gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc)); | ||
113 | } | ||
114 | |||
115 | static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
116 | int index, MemOp opc) | ||
117 | { | ||
118 | - TCGv addr; | ||
119 | + gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc)); | ||
120 | +} | ||
121 | |||
122 | - if (s->align_mem) { | ||
123 | - opc |= MO_ALIGN; | ||
124 | +#define DO_GEN_LD(SUFF, OPC) \ | ||
125 | + static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ | ||
126 | + TCGv_i32 a32, int index) \ | ||
127 | + { \ | ||
128 | + gen_aa32_ld_i32(s, val, a32, index, OPC); \ | ||
129 | } | ||
130 | |||
131 | - addr = gen_aa32_addr(s, a32, opc); | ||
132 | - tcg_gen_qemu_st_i32(val, addr, index, opc); | ||
133 | - tcg_temp_free(addr); | ||
134 | -} | ||
135 | - | ||
136 | -#define DO_GEN_LD(SUFF, OPC) \ | ||
137 | -static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ | ||
138 | - TCGv_i32 a32, int index) \ | ||
139 | -{ \ | ||
140 | - gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \ | ||
141 | -} | ||
142 | - | ||
143 | -#define DO_GEN_ST(SUFF, OPC) \ | ||
144 | -static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ | ||
145 | - TCGv_i32 a32, int index) \ | ||
146 | -{ \ | ||
147 | - gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \ | ||
148 | -} | ||
149 | +#define DO_GEN_ST(SUFF, OPC) \ | ||
150 | + static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ | ||
151 | + TCGv_i32 a32, int index) \ | ||
152 | + { \ | ||
153 | + gen_aa32_st_i32(s, val, a32, index, OPC); \ | ||
154 | + } | ||
155 | |||
156 | static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) | ||
157 | { | ||
158 | @@ -XXX,XX +XXX,XX @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a, | ||
159 | addr = op_addr_rr_pre(s, a); | ||
160 | |||
161 | tmp = tcg_temp_new_i32(); | ||
162 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); | ||
163 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop); | ||
164 | disas_set_da_iss(s, mop, issinfo); | ||
165 | |||
166 | /* | ||
167 | @@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, | ||
168 | addr = op_addr_rr_pre(s, a); | ||
169 | |||
170 | tmp = load_reg(s, a->rt); | ||
171 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); | ||
172 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); | ||
173 | disas_set_da_iss(s, mop, issinfo); | ||
174 | tcg_temp_free_i32(tmp); | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) | ||
177 | addr = op_addr_rr_pre(s, a); | ||
178 | |||
179 | tmp = tcg_temp_new_i32(); | ||
180 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
181 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
182 | store_reg(s, a->rt, tmp); | ||
183 | |||
184 | tcg_gen_addi_i32(addr, addr, 4); | ||
185 | |||
186 | tmp = tcg_temp_new_i32(); | ||
187 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
188 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
189 | store_reg(s, a->rt + 1, tmp); | ||
190 | |||
191 | /* LDRD w/ base writeback is undefined if the registers overlap. */ | ||
192 | @@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) | ||
193 | addr = op_addr_rr_pre(s, a); | ||
194 | |||
195 | tmp = load_reg(s, a->rt); | ||
196 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
197 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
198 | tcg_temp_free_i32(tmp); | ||
199 | |||
200 | tcg_gen_addi_i32(addr, addr, 4); | ||
201 | |||
202 | tmp = load_reg(s, a->rt + 1); | ||
203 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
204 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
205 | tcg_temp_free_i32(tmp); | ||
206 | |||
207 | op_addr_rr_post(s, a, addr, -4); | ||
208 | @@ -XXX,XX +XXX,XX @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a, | ||
209 | addr = op_addr_ri_pre(s, a); | ||
210 | |||
211 | tmp = tcg_temp_new_i32(); | ||
212 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); | ||
213 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop); | ||
214 | disas_set_da_iss(s, mop, issinfo); | ||
215 | |||
216 | /* | ||
217 | @@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, | ||
218 | addr = op_addr_ri_pre(s, a); | ||
219 | |||
220 | tmp = load_reg(s, a->rt); | ||
221 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); | ||
222 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); | ||
223 | disas_set_da_iss(s, mop, issinfo); | ||
224 | tcg_temp_free_i32(tmp); | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) | ||
227 | addr = op_addr_ri_pre(s, a); | ||
228 | |||
229 | tmp = tcg_temp_new_i32(); | ||
230 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
231 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
232 | store_reg(s, a->rt, tmp); | ||
233 | |||
234 | tcg_gen_addi_i32(addr, addr, 4); | ||
235 | |||
236 | tmp = tcg_temp_new_i32(); | ||
237 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
238 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
239 | store_reg(s, rt2, tmp); | ||
240 | |||
241 | /* LDRD w/ base writeback is undefined if the registers overlap. */ | ||
242 | @@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) | ||
243 | addr = op_addr_ri_pre(s, a); | ||
244 | |||
245 | tmp = load_reg(s, a->rt); | ||
246 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
247 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
248 | tcg_temp_free_i32(tmp); | ||
249 | |||
250 | tcg_gen_addi_i32(addr, addr, 4); | ||
251 | |||
252 | tmp = load_reg(s, rt2); | ||
253 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); | ||
254 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
255 | tcg_temp_free_i32(tmp); | ||
256 | |||
257 | op_addr_ri_post(s, a, addr, -4); | ||
258 | @@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) | ||
259 | addr = load_reg(s, a->rn); | ||
260 | tmp = load_reg(s, a->rt); | ||
261 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); | ||
262 | - gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); | ||
263 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop); | ||
264 | disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); | ||
265 | |||
266 | tcg_temp_free_i32(tmp); | ||
267 | @@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) | ||
268 | |||
269 | addr = load_reg(s, a->rn); | ||
270 | tmp = tcg_temp_new_i32(); | ||
271 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); | ||
272 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop); | ||
273 | disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); | ||
274 | tcg_temp_free_i32(addr); | ||
275 | |||
276 | @@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) | ||
277 | addr = load_reg(s, a->rn); | ||
278 | tcg_gen_add_i32(addr, addr, tmp); | ||
279 | |||
280 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), | ||
281 | - half ? MO_UW | s->be_data : MO_UB); | ||
282 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB); | ||
283 | tcg_temp_free_i32(addr); | ||
284 | |||
285 | tcg_gen_add_i32(tmp, tmp, tmp); | ||
286 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | ||
287 | index XXXXXXX..XXXXXXX 100644 | ||
288 | --- a/target/arm/translate-neon.c.inc | ||
289 | +++ b/target/arm/translate-neon.c.inc | ||
290 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) | ||
291 | addr = tcg_temp_new_i32(); | ||
292 | load_reg_var(s, addr, a->rn); | ||
293 | for (reg = 0; reg < nregs; reg++) { | ||
294 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), | ||
295 | - s->be_data | size); | ||
296 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size); | ||
297 | if ((vd & 1) && vec_size == 16) { | ||
298 | /* | ||
299 | * We cannot write 16 bytes at once because the | ||
300 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) | ||
301 | */ | ||
302 | for (reg = 0; reg < nregs; reg++) { | ||
303 | if (a->l) { | ||
304 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), | ||
305 | - s->be_data | a->size); | ||
306 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size); | ||
307 | neon_store_element(vd, a->reg_idx, a->size, tmp); | ||
308 | } else { /* Store */ | ||
309 | neon_load_element(tmp, vd, a->reg_idx, a->size); | ||
310 | - gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), | ||
311 | - s->be_data | a->size); | ||
312 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size); | ||
313 | } | ||
314 | vd += a->stride; | ||
315 | tcg_gen_addi_i32(addr, addr, 1 << a->size); | ||
316 | -- | 66 | -- |
317 | 2.20.1 | 67 | 2.25.1 |
318 | |||
319 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-28-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-6-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-a64.c | 23 ++++++++++++++--------- | 11 | target/arm/sme-fa64.decode | 2 -- |
9 | 1 file changed, 14 insertions(+), 9 deletions(-) | 12 | target/arm/translate-sve.c | 9 ++++++--- |
13 | 2 files changed, 6 insertions(+), 5 deletions(-) | ||
10 | 14 | ||
11 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-a64.c | 17 | --- a/target/arm/sme-fa64.decode |
14 | +++ b/target/arm/translate-a64.c | 18 | +++ b/target/arm/sme-fa64.decode |
15 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
16 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); | 20 | |
17 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | 21 | FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
18 | true, rn != 31, size); | 22 | FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT |
19 | - do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, | 23 | -FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS |
20 | + /* TODO: ARMv8.4-LSE SCTLR.nAA */ | 24 | -FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR |
21 | + do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt, | 25 | FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP |
22 | disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | 26 | FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) |
23 | return; | 27 | FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA |
24 | 28 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | |
25 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | 29 | index XXXXXXX..XXXXXXX 100644 |
26 | } | 30 | --- a/target/arm/translate-sve.c |
27 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | 31 | +++ b/target/arm/translate-sve.c |
28 | false, rn != 31, size); | 32 | @@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) |
29 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt, | 33 | TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) |
30 | - disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | 34 | |
31 | + /* TODO: ARMv8.4-LSE SCTLR.nAA */ | 35 | /* Note pat == 31 is #all, to set all elements. */ |
32 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true, | 36 | -TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false) |
33 | + rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | 37 | +TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, |
34 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | 38 | + do_predset, 0, FFR_PRED_NUM, 31, false) |
35 | return; | 39 | |
36 | 40 | /* Note pat == 32 is #unimp, to set no elements. */ | |
37 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | 41 | TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) |
38 | int size = extract32(insn, 30, 2); | 42 | @@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) |
39 | TCGv_i64 clean_addr, dirty_addr; | 43 | .rd = a->rd, .pg = a->pg, .s = a->s, |
40 | bool is_store = false; | 44 | .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, |
41 | - bool is_signed = false; | 45 | }; |
42 | bool extend = false; | ||
43 | bool iss_sf; | ||
44 | + MemOp mop; | ||
45 | |||
46 | if (!dc_isar_feature(aa64_rcpc_8_4, s)) { | ||
47 | unallocated_encoding(s); | ||
48 | return; | ||
49 | } | ||
50 | |||
51 | + /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
52 | + mop = size | MO_ALIGN; | ||
53 | + | 46 | + |
54 | switch (opc) { | 47 | + s->is_nonstreaming = true; |
55 | case 0: /* STLURB */ | 48 | return trans_AND_pppp(s, &alt_a); |
56 | is_store = true; | 49 | } |
57 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | 50 | |
58 | unallocated_encoding(s); | 51 | -TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) |
59 | return; | 52 | -TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) |
60 | } | 53 | +TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) |
61 | - is_signed = true; | 54 | +TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) |
62 | + mop |= MO_SIGN; | 55 | |
63 | break; | 56 | static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, |
64 | case 3: /* LDAPURS* 32-bit variant */ | 57 | void (*gen_fn)(TCGv_i32, TCGv_ptr, |
65 | if (size > 1) { | ||
66 | unallocated_encoding(s); | ||
67 | return; | ||
68 | } | ||
69 | - is_signed = true; | ||
70 | + mop |= MO_SIGN; | ||
71 | extend = true; /* zero-extend 32->64 after signed load */ | ||
72 | break; | ||
73 | default: | ||
74 | g_assert_not_reached(); | ||
75 | } | ||
76 | |||
77 | - iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); | ||
78 | + iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc); | ||
79 | |||
80 | if (rn == 31) { | ||
81 | gen_check_sp_alignment(s); | ||
82 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | ||
83 | if (is_store) { | ||
84 | /* Store-Release semantics */ | ||
85 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); | ||
86 | - do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true); | ||
87 | + do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true); | ||
88 | } else { | ||
89 | /* | ||
90 | * Load-AcquirePC semantics; we implement as the slightly more | ||
91 | * restrictive Load-Acquire. | ||
92 | */ | ||
93 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN, | ||
94 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop, | ||
95 | extend, true, rt, iss_sf, true); | ||
96 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
97 | } | ||
98 | -- | 58 | -- |
99 | 2.20.1 | 59 | 2.25.1 |
100 | |||
101 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Now that other bits have been moved out of tb->flags, | 3 | Mark these as a non-streaming instructions, which should trap |
4 | there's no point in filling from the top. | 4 | if full a64 support is not enabled in streaming mode. |
5 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210419202257.161730-10-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-7-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/cpu.h | 14 +++++++------- | 11 | target/arm/sme-fa64.decode | 3 --- |
12 | 1 file changed, 7 insertions(+), 7 deletions(-) | 12 | target/arm/translate-sve.c | 22 ++++++++++++---------- |
13 | 2 files changed, 12 insertions(+), 13 deletions(-) | ||
13 | 14 | ||
14 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU; | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | * | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
20 | * Unless otherwise noted, these bits are cached in env->hflags. | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
21 | */ | 22 | |
22 | -FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) | 23 | -FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
23 | -FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1) | 24 | -FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT |
24 | -FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */ | 25 | -FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP |
25 | -FIELD(TBFLAG_ANY, BE_DATA, 28, 1) | 26 | FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) |
26 | -FIELD(TBFLAG_ANY, MMUIDX, 24, 4) | 27 | FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA |
27 | +FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1) | 28 | FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL |
28 | +FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1) | 29 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
29 | +FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */ | 30 | index XXXXXXX..XXXXXXX 100644 |
30 | +FIELD(TBFLAG_ANY, BE_DATA, 3, 1) | 31 | --- a/target/arm/translate-sve.c |
31 | +FIELD(TBFLAG_ANY, MMUIDX, 4, 4) | 32 | +++ b/target/arm/translate-sve.c |
32 | /* Target EL if we take a floating-point-disabled exception */ | 33 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = { |
33 | -FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2) | 34 | NULL, gen_helper_sve_fexpa_h, |
34 | +FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2) | 35 | gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, |
35 | /* For A-profile only, target EL for debug exceptions. */ | 36 | }; |
36 | -FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2) | 37 | -TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz, |
37 | +FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2) | 38 | - fexpa_fns[a->esz], a->rd, a->rn, 0) |
39 | +TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, | ||
40 | + fexpa_fns[a->esz], a->rd, a->rn, 0) | ||
41 | |||
42 | static gen_helper_gvec_3 * const ftssel_fns[4] = { | ||
43 | NULL, gen_helper_sve_ftssel_h, | ||
44 | gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, | ||
45 | }; | ||
46 | -TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0) | ||
47 | +TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, | ||
48 | + ftssel_fns[a->esz], a, 0) | ||
38 | 49 | ||
39 | /* | 50 | /* |
40 | * Bit usage when in AArch32 state, both A- and M-profile. | 51 | *** SVE Predicate Logical Operations Group |
52 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, | ||
53 | static gen_helper_gvec_3 * const compact_fns[4] = { | ||
54 | NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d | ||
55 | }; | ||
56 | -TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0) | ||
57 | +TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, | ||
58 | + compact_fns[a->esz], a, 0) | ||
59 | |||
60 | /* Call the helper that computes the ARM LastActiveElement pseudocode | ||
61 | * function, scaled by the element size. This includes the not found | ||
62 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = { | ||
63 | gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, | ||
64 | gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, | ||
65 | }; | ||
66 | -TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
67 | - bext_fns[a->esz], a, 0) | ||
68 | +TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
69 | + bext_fns[a->esz], a, 0) | ||
70 | |||
71 | static gen_helper_gvec_3 * const bdep_fns[4] = { | ||
72 | gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, | ||
73 | gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, | ||
74 | }; | ||
75 | -TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
76 | - bdep_fns[a->esz], a, 0) | ||
77 | +TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
78 | + bdep_fns[a->esz], a, 0) | ||
79 | |||
80 | static gen_helper_gvec_3 * const bgrp_fns[4] = { | ||
81 | gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, | ||
82 | gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, | ||
83 | }; | ||
84 | -TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
85 | - bgrp_fns[a->esz], a, 0) | ||
86 | +TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
87 | + bgrp_fns[a->esz], a, 0) | ||
88 | |||
89 | static gen_helper_gvec_3 * const cadd_fns[4] = { | ||
90 | gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, | ||
41 | -- | 91 | -- |
42 | 2.20.1 | 92 | 2.25.1 |
43 | |||
44 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
1 | 2 | ||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20220708151540.18136-8-richard.henderson@linaro.org | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/sme-fa64.decode | 2 -- | ||
12 | target/arm/translate-sve.c | 24 +++++++++++++++--------- | ||
13 | 2 files changed, 15 insertions(+), 11 deletions(-) | ||
14 | |||
15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/sme-fa64.decode | ||
18 | +++ b/target/arm/sme-fa64.decode | ||
19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
22 | |||
23 | -FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) | ||
24 | -FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA | ||
25 | FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL | ||
26 | FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD | ||
27 | FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA | ||
28 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/target/arm/translate-sve.c | ||
31 | +++ b/target/arm/translate-sve.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) | ||
33 | gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, | ||
34 | NULL, gen_helper_sve2_pmull_d, | ||
35 | }; | ||
36 | - if (a->esz == 0 | ||
37 | - ? !dc_isar_feature(aa64_sve2_pmull128, s) | ||
38 | - : !dc_isar_feature(aa64_sve, s)) { | ||
39 | + | ||
40 | + if (a->esz == 0) { | ||
41 | + if (!dc_isar_feature(aa64_sve2_pmull128, s)) { | ||
42 | + return false; | ||
43 | + } | ||
44 | + s->is_nonstreaming = true; | ||
45 | + } else if (!dc_isar_feature(aa64_sve, s)) { | ||
46 | return false; | ||
47 | } | ||
48 | return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); | ||
49 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) | ||
50 | * SVE Integer Multiply-Add (unpredicated) | ||
51 | */ | ||
52 | |||
53 | -TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s, | ||
54 | - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) | ||
55 | -TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d, | ||
56 | - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) | ||
57 | +TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, | ||
58 | + gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, | ||
59 | + 0, FPST_FPCR) | ||
60 | +TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, | ||
61 | + gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, | ||
62 | + 0, FPST_FPCR) | ||
63 | |||
64 | static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { | ||
65 | NULL, gen_helper_sve2_sqdmlal_zzzw_h, | ||
66 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, | ||
67 | TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, | ||
68 | gen_helper_gvec_bfdot_idx, a) | ||
69 | |||
70 | -TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, | ||
71 | - gen_helper_gvec_bfmmla, a, 0) | ||
72 | +TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, | ||
73 | + gen_helper_gvec_bfmmla, a, 0) | ||
74 | |||
75 | static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) | ||
76 | { | ||
77 | -- | ||
78 | 2.25.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Now that we have all of the proper macros defined, expanding | 3 | Mark these as a non-streaming instructions, which should trap |
4 | the CPUARMTBFlags structure and populating the two TB fields | 4 | if full a64 support is not enabled in streaming mode. |
5 | is relatively simple. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210419202257.161730-7-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-9-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/cpu.h | 49 ++++++++++++++++++++++++------------------ | 11 | target/arm/sme-fa64.decode | 3 --- |
13 | target/arm/translate.h | 2 +- | 12 | target/arm/translate-sve.c | 15 +++++++++++---- |
14 | target/arm/helper.c | 10 +++++---- | 13 | 2 files changed, 11 insertions(+), 7 deletions(-) |
15 | 3 files changed, 35 insertions(+), 26 deletions(-) | ||
16 | 14 | ||
17 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/sme-fa64.decode |
20 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/sme-fa64.decode |
21 | @@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey { | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
22 | /* See the commentary above the TBFLAG field definitions. */ | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
23 | typedef struct CPUARMTBFlags { | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
24 | uint32_t flags; | 22 | |
25 | + target_ulong flags2; | 23 | -FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL |
26 | } CPUARMTBFlags; | 24 | -FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD |
27 | 25 | -FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA | |
28 | typedef struct CPUARMState { | 26 | FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA |
29 | @@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU; | 27 | FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions |
30 | #include "exec/cpu-all.h" | 28 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
29 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/arm/translate-sve.c | ||
32 | +++ b/target/arm/translate-sve.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { | ||
34 | NULL, gen_helper_sve_ftmad_h, | ||
35 | gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, | ||
36 | }; | ||
37 | -TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz, | ||
38 | - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, | ||
39 | - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) | ||
40 | +TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, | ||
41 | + ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, | ||
42 | + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) | ||
31 | 43 | ||
32 | /* | 44 | /* |
33 | - * Bit usage in the TB flags field: bit 31 indicates whether we are | 45 | *** SVE Floating Point Accumulating Reduction Group |
34 | - * in 32 or 64 bit mode. The meaning of the other bits depends on that. | 46 | @@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) |
35 | - * We put flags which are shared between 32 and 64 bit mode at the top | 47 | if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { |
36 | - * of the word, and flags which apply to only one mode at the bottom. | 48 | return false; |
37 | + * We have more than 32-bits worth of state per TB, so we split the data | 49 | } |
38 | + * between tb->flags and tb->cs_base, which is otherwise unused for ARM. | 50 | + s->is_nonstreaming = true; |
39 | + * We collect these two parts in CPUARMTBFlags where they are named | 51 | if (!sve_access_check(s)) { |
40 | + * flags and flags2 respectively. | 52 | return true; |
41 | * | 53 | } |
42 | - * 31 20 18 14 9 0 | 54 | @@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) |
43 | - * +--------------+-----+-----+----------+--------------+ | 55 | DO_FP3(FADD_zzz, fadd) |
44 | - * | | | TBFLAG_A32 | | | 56 | DO_FP3(FSUB_zzz, fsub) |
45 | - * | | +-----+----------+ TBFLAG_AM32 | | 57 | DO_FP3(FMUL_zzz, fmul) |
46 | - * | TBFLAG_ANY | |TBFLAG_M32| | | 58 | -DO_FP3(FTSMUL, ftsmul) |
47 | - * | +-----------+----------+--------------| | 59 | DO_FP3(FRECPS, recps) |
48 | - * | | TBFLAG_A64 | | 60 | DO_FP3(FRSQRTS, rsqrts) |
49 | - * +--------------+-------------------------------------+ | 61 | |
50 | - * 31 20 0 | 62 | #undef DO_FP3 |
51 | + * The flags that are shared between all execution modes, TBFLAG_ANY, | 63 | |
52 | + * are stored in flags. The flags that are specific to a given mode | 64 | +static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { |
53 | + * are stores in flags2. Since cs_base is sized on the configured | 65 | + NULL, gen_helper_gvec_ftsmul_h, |
54 | + * address size, flags2 always has 64-bits for A64, and a minimum of | 66 | + gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d |
55 | + * 32-bits for A32 and M32. | 67 | +}; |
56 | + * | 68 | +TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, |
57 | + * The bits for 32-bit A-profile and M-profile partially overlap: | 69 | + ftsmul_fns[a->esz], a, 0) |
58 | + * | 70 | + |
59 | + * 18 9 0 | 71 | /* |
60 | + * +----------------+--------------+ | 72 | *** SVE Floating Point Arithmetic - Predicated Group |
61 | + * | TBFLAG_A32 | | | ||
62 | + * +-----+----------+ TBFLAG_AM32 | | ||
63 | + * | |TBFLAG_M32| | | ||
64 | + * +-----+----------+--------------+ | ||
65 | + * 14 9 0 | ||
66 | * | ||
67 | * Unless otherwise noted, these bits are cached in env->hflags. | ||
68 | */ | 73 | */ |
69 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) | ||
70 | #define DP_TBFLAG_ANY(DST, WHICH, VAL) \ | ||
71 | (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL)) | ||
72 | #define DP_TBFLAG_A64(DST, WHICH, VAL) \ | ||
73 | - (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL)) | ||
74 | + (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL)) | ||
75 | #define DP_TBFLAG_A32(DST, WHICH, VAL) \ | ||
76 | - (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL)) | ||
77 | + (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL)) | ||
78 | #define DP_TBFLAG_M32(DST, WHICH, VAL) \ | ||
79 | - (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL)) | ||
80 | + (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL)) | ||
81 | #define DP_TBFLAG_AM32(DST, WHICH, VAL) \ | ||
82 | - (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL)) | ||
83 | + (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL)) | ||
84 | |||
85 | #define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH) | ||
86 | -#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH) | ||
87 | -#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH) | ||
88 | -#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH) | ||
89 | -#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH) | ||
90 | +#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH) | ||
91 | +#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH) | ||
92 | +#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH) | ||
93 | +#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH) | ||
94 | |||
95 | /** | ||
96 | * cpu_mmu_index: | ||
97 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
98 | index XXXXXXX..XXXXXXX 100644 | ||
99 | --- a/target/arm/translate.h | ||
100 | +++ b/target/arm/translate.h | ||
101 | @@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); | ||
102 | */ | ||
103 | static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) | ||
104 | { | ||
105 | - return (CPUARMTBFlags){ tb->flags }; | ||
106 | + return (CPUARMTBFlags){ tb->flags, tb->cs_base }; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
111 | index XXXXXXX..XXXXXXX 100644 | ||
112 | --- a/target/arm/helper.c | ||
113 | +++ b/target/arm/helper.c | ||
114 | @@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env) | ||
115 | CPUARMTBFlags c = env->hflags; | ||
116 | CPUARMTBFlags r = rebuild_hflags_internal(env); | ||
117 | |||
118 | - if (unlikely(c.flags != r.flags)) { | ||
119 | - fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", | ||
120 | - c.flags, r.flags); | ||
121 | + if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) { | ||
122 | + fprintf(stderr, "TCG hflags mismatch " | ||
123 | + "(current:(0x%08x,0x" TARGET_FMT_lx ")" | ||
124 | + " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n", | ||
125 | + c.flags, c.flags2, r.flags, r.flags2); | ||
126 | abort(); | ||
127 | } | ||
128 | #endif | ||
129 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
130 | { | ||
131 | CPUARMTBFlags flags; | ||
132 | |||
133 | - *cs_base = 0; | ||
134 | assert_hflags_rebuild_correctly(env); | ||
135 | flags = env->hflags; | ||
136 | |||
137 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
138 | } | ||
139 | |||
140 | *pflags = flags.flags; | ||
141 | + *cs_base = flags.flags2; | ||
142 | } | ||
143 | |||
144 | #ifdef TARGET_AARCH64 | ||
145 | -- | 74 | -- |
146 | 2.20.1 | 75 | 2.25.1 |
147 | |||
148 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
1 | 2 | ||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20220708151540.18136-10-richard.henderson@linaro.org | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/sme-fa64.decode | 1 - | ||
12 | target/arm/translate-sve.c | 12 ++++++------ | ||
13 | 2 files changed, 6 insertions(+), 7 deletions(-) | ||
14 | |||
15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/sme-fa64.decode | ||
18 | +++ b/target/arm/sme-fa64.decode | ||
19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
22 | |||
23 | -FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA | ||
24 | FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions | ||
25 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) | ||
26 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) | ||
27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/translate-sve.c | ||
30 | +++ b/target/arm/translate-sve.c | ||
31 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) | ||
32 | TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) | ||
33 | TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) | ||
34 | |||
35 | -TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
36 | - gen_helper_gvec_smmla_b, a, 0) | ||
37 | -TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
38 | - gen_helper_gvec_usmmla_b, a, 0) | ||
39 | -TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
40 | - gen_helper_gvec_ummla_b, a, 0) | ||
41 | +TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
42 | + gen_helper_gvec_smmla_b, a, 0) | ||
43 | +TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
44 | + gen_helper_gvec_usmmla_b, a, 0) | ||
45 | +TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
46 | + gen_helper_gvec_ummla_b, a, 0) | ||
47 | |||
48 | TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, | ||
49 | gen_helper_gvec_bfdot, a, 0) | ||
50 | -- | ||
51 | 2.25.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-32-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-11-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-sve.c | 2 +- | 11 | target/arm/sme-fa64.decode | 1 - |
9 | 1 file changed, 1 insertion(+), 1 deletion(-) | 12 | target/arm/translate-sve.c | 35 ++++++++++++++++++----------------- |
13 | 2 files changed, 18 insertions(+), 18 deletions(-) | ||
10 | 14 | ||
15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/sme-fa64.decode | ||
18 | +++ b/target/arm/sme-fa64.decode | ||
19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
22 | |||
23 | -FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions | ||
24 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) | ||
25 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) | ||
26 | FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) | ||
11 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
12 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
14 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) | 31 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) |
16 | clean_addr = gen_mte_check1(s, temp, false, true, msz); | 32 | static gen_helper_gvec_flags_4 * const match_fns[4] = { |
17 | 33 | gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL | |
18 | tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), | 34 | }; |
19 | - s->be_data | dtype_mop[a->dtype]); | 35 | -TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) |
20 | + finalize_memop(s, dtype_mop[a->dtype])); | 36 | +TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) |
21 | 37 | ||
22 | /* Broadcast to *all* elements. */ | 38 | static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { |
23 | tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), | 39 | gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL |
40 | }; | ||
41 | -TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) | ||
42 | +TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) | ||
43 | |||
44 | static gen_helper_gvec_4 * const histcnt_fns[4] = { | ||
45 | NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d | ||
46 | }; | ||
47 | -TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, | ||
48 | - histcnt_fns[a->esz], a, 0) | ||
49 | +TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, | ||
50 | + histcnt_fns[a->esz], a, 0) | ||
51 | |||
52 | -TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, | ||
53 | - a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) | ||
54 | +TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, | ||
55 | + a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) | ||
56 | |||
57 | DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) | ||
58 | DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) | ||
59 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, | ||
60 | TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, | ||
61 | a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) | ||
62 | |||
63 | -TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, | ||
64 | - gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) | ||
65 | +TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, | ||
66 | + gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) | ||
67 | |||
68 | -TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, | ||
69 | - gen_helper_crypto_aese, a, false) | ||
70 | -TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, | ||
71 | - gen_helper_crypto_aese, a, true) | ||
72 | +TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, | ||
73 | + gen_helper_crypto_aese, a, false) | ||
74 | +TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, | ||
75 | + gen_helper_crypto_aese, a, true) | ||
76 | |||
77 | -TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
78 | - gen_helper_crypto_sm4e, a, 0) | ||
79 | -TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
80 | - gen_helper_crypto_sm4ekey, a, 0) | ||
81 | +TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
82 | + gen_helper_crypto_sm4e, a, 0) | ||
83 | +TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
84 | + gen_helper_crypto_sm4ekey, a, 0) | ||
85 | |||
86 | -TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a) | ||
87 | +TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, | ||
88 | + gen_gvec_rax1, a) | ||
89 | |||
90 | TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, | ||
91 | gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) | ||
24 | -- | 92 | -- |
25 | 2.20.1 | 93 | 2.25.1 |
26 | |||
27 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-21-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-12-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-vfp.c.inc | 8 ++++---- | 11 | target/arm/sme-fa64.decode | 9 --------- |
9 | 1 file changed, 4 insertions(+), 4 deletions(-) | 12 | target/arm/translate-sve.c | 6 ++++++ |
13 | 2 files changed, 6 insertions(+), 9 deletions(-) | ||
10 | 14 | ||
11 | diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-vfp.c.inc | 17 | --- a/target/arm/sme-fa64.decode |
14 | +++ b/target/arm/translate-vfp.c.inc | 18 | +++ b/target/arm/sme-fa64.decode |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
16 | for (i = 0; i < n; i++) { | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
17 | if (a->l) { | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
18 | /* load */ | 22 | |
19 | - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); | 23 | -FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
20 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 24 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) |
21 | vfp_store_reg32(tmp, a->vd + i); | 25 | FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) |
22 | } else { | 26 | -FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm) |
23 | /* store */ | 27 | -FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector) |
24 | vfp_load_reg32(tmp, a->vd + i); | 28 | -FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector) |
25 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | 29 | -FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector) |
26 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 30 | FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) |
27 | } | 31 | FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) |
28 | tcg_gen_addi_i32(addr, addr, offset); | 32 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) |
33 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) | ||
34 | FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch | ||
35 | -FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar) | ||
36 | -FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar) | ||
37 | -FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector) | ||
38 | -FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc) | ||
39 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/translate-sve.c | ||
42 | +++ b/target/arm/translate-sve.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) | ||
44 | if (!dc_isar_feature(aa64_sve, s)) { | ||
45 | return false; | ||
29 | } | 46 | } |
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) | 47 | + s->is_nonstreaming = true; |
31 | for (i = 0; i < n; i++) { | 48 | if (!sve_access_check(s)) { |
32 | if (a->l) { | 49 | return true; |
33 | /* load */ | 50 | } |
34 | - gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); | 51 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) |
35 | + gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); | 52 | if (!dc_isar_feature(aa64_sve, s)) { |
36 | vfp_store_reg64(tmp, a->vd + i); | 53 | return false; |
37 | } else { | 54 | } |
38 | /* store */ | 55 | + s->is_nonstreaming = true; |
39 | vfp_load_reg64(tmp, a->vd + i); | 56 | if (!sve_access_check(s)) { |
40 | - gen_aa32_st64(s, tmp, addr, get_mem_index(s)); | 57 | return true; |
41 | + gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); | 58 | } |
42 | } | 59 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) |
43 | tcg_gen_addi_i32(addr, addr, offset); | 60 | if (!dc_isar_feature(aa64_sve2, s)) { |
61 | return false; | ||
62 | } | ||
63 | + s->is_nonstreaming = true; | ||
64 | if (!sve_access_check(s)) { | ||
65 | return true; | ||
66 | } | ||
67 | @@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) | ||
68 | if (!dc_isar_feature(aa64_sve, s)) { | ||
69 | return false; | ||
70 | } | ||
71 | + s->is_nonstreaming = true; | ||
72 | if (!sve_access_check(s)) { | ||
73 | return true; | ||
74 | } | ||
75 | @@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) | ||
76 | if (!dc_isar_feature(aa64_sve, s)) { | ||
77 | return false; | ||
78 | } | ||
79 | + s->is_nonstreaming = true; | ||
80 | if (!sve_access_check(s)) { | ||
81 | return true; | ||
82 | } | ||
83 | @@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) | ||
84 | if (!dc_isar_feature(aa64_sve2, s)) { | ||
85 | return false; | ||
86 | } | ||
87 | + s->is_nonstreaming = true; | ||
88 | if (!sve_access_check(s)) { | ||
89 | return true; | ||
44 | } | 90 | } |
45 | -- | 91 | -- |
46 | 2.20.1 | 92 | 2.25.1 |
47 | |||
48 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | In preparation for splitting tb->flags across multiple | 3 | Mark these as a non-streaming instructions, which should trap if full |
4 | fields, introduce a structure to hold the value(s). | 4 | a64 support is not enabled in streaming mode. In this case, introduce |
5 | So far this only migrates the one uint32_t and fixes | 5 | PRF_ns (prefetch non-streaming) to handle the checks. |
6 | all of the places that require adjustment to match. | ||
7 | 6 | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20210419202257.161730-6-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-13-richard.henderson@linaro.org |
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 11 | --- |
13 | target/arm/cpu.h | 26 ++++++++++++--------- | 12 | target/arm/sme-fa64.decode | 3 --- |
14 | target/arm/translate.h | 11 +++++++++ | 13 | target/arm/sve.decode | 10 +++++----- |
15 | target/arm/helper.c | 48 +++++++++++++++++++++----------------- | 14 | target/arm/translate-sve.c | 11 +++++++++++ |
16 | target/arm/translate-a64.c | 2 +- | 15 | 3 files changed, 16 insertions(+), 8 deletions(-) |
17 | target/arm/translate.c | 7 +++--- | ||
18 | 5 files changed, 57 insertions(+), 37 deletions(-) | ||
19 | 16 | ||
20 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 17 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
21 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/target/arm/cpu.h | 19 | --- a/target/arm/sme-fa64.decode |
23 | +++ b/target/arm/cpu.h | 20 | +++ b/target/arm/sme-fa64.decode |
24 | @@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey { | 21 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
25 | } ARMPACKey; | 22 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
26 | #endif | 23 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
27 | 24 | ||
28 | +/* See the commentary above the TBFLAG field definitions. */ | 25 | -FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) |
29 | +typedef struct CPUARMTBFlags { | 26 | -FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) |
30 | + uint32_t flags; | 27 | FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) |
31 | +} CPUARMTBFlags; | 28 | FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) |
32 | 29 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) | |
33 | typedef struct CPUARMState { | 30 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) |
34 | /* Regs for current mode. */ | 31 | -FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch |
35 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | 32 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
36 | uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ | ||
37 | |||
38 | /* Cached TBFLAGS state. See below for which bits are included. */ | ||
39 | - uint32_t hflags; | ||
40 | + CPUARMTBFlags hflags; | ||
41 | |||
42 | /* Frequently accessed CPSR bits are stored separately for efficiency. | ||
43 | This contains all the other bits. Use cpsr_{read,write} to access | ||
44 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) | ||
45 | * Helpers for using the above. | ||
46 | */ | ||
47 | #define DP_TBFLAG_ANY(DST, WHICH, VAL) \ | ||
48 | - (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL)) | ||
49 | + (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL)) | ||
50 | #define DP_TBFLAG_A64(DST, WHICH, VAL) \ | ||
51 | - (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL)) | ||
52 | + (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL)) | ||
53 | #define DP_TBFLAG_A32(DST, WHICH, VAL) \ | ||
54 | - (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL)) | ||
55 | + (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL)) | ||
56 | #define DP_TBFLAG_M32(DST, WHICH, VAL) \ | ||
57 | - (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL)) | ||
58 | + (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL)) | ||
59 | #define DP_TBFLAG_AM32(DST, WHICH, VAL) \ | ||
60 | - (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL)) | ||
61 | + (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL)) | ||
62 | |||
63 | -#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH) | ||
64 | -#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH) | ||
65 | -#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH) | ||
66 | -#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH) | ||
67 | -#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH) | ||
68 | +#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH) | ||
69 | +#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH) | ||
70 | +#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH) | ||
71 | +#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH) | ||
72 | +#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH) | ||
73 | |||
74 | /** | ||
75 | * cpu_mmu_index: | ||
76 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
77 | index XXXXXXX..XXXXXXX 100644 | 33 | index XXXXXXX..XXXXXXX 100644 |
78 | --- a/target/arm/translate.h | 34 | --- a/target/arm/sve.decode |
79 | +++ b/target/arm/translate.h | 35 | +++ b/target/arm/sve.decode |
80 | @@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); | 36 | @@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \ |
81 | typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); | 37 | @rpri_load_msz nreg=0 |
82 | typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); | 38 | |
83 | 39 | # SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets) | |
84 | +/** | 40 | -PRF 1000010 00 -1 ----- 0-- --- ----- 0 ---- |
85 | + * arm_tbflags_from_tb: | 41 | +PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ---- |
86 | + * @tb: the TranslationBlock | 42 | |
87 | + * | 43 | # SVE 32-bit gather prefetch (vector plus immediate) |
88 | + * Extract the flag values from @tb. | 44 | -PRF 1000010 -- 00 ----- 111 --- ----- 0 ---- |
89 | + */ | 45 | +PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ---- |
90 | +static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) | 46 | |
47 | # SVE contiguous prefetch (scalar plus immediate) | ||
48 | PRF 1000010 11 1- ----- 0-- --- ----- 0 ---- | ||
49 | @@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ | ||
50 | @rpri_g_load esz=3 | ||
51 | |||
52 | # SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets) | ||
53 | -PRF 1100010 00 11 ----- 1-- --- ----- 0 ---- | ||
54 | +PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ---- | ||
55 | |||
56 | # SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets) | ||
57 | -PRF 1100010 00 -1 ----- 0-- --- ----- 0 ---- | ||
58 | +PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ---- | ||
59 | |||
60 | # SVE 64-bit gather prefetch (vector plus immediate) | ||
61 | -PRF 1100010 -- 00 ----- 111 --- ----- 0 ---- | ||
62 | +PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ---- | ||
63 | |||
64 | ### SVE Memory Store Group | ||
65 | |||
66 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/target/arm/translate-sve.c | ||
69 | +++ b/target/arm/translate-sve.c | ||
70 | @@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) | ||
71 | return true; | ||
72 | } | ||
73 | |||
74 | +static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) | ||
91 | +{ | 75 | +{ |
92 | + return (CPUARMTBFlags){ tb->flags }; | 76 | + if (!dc_isar_feature(aa64_sve, s)) { |
77 | + return false; | ||
78 | + } | ||
79 | + /* Prefetch is a nop within QEMU. */ | ||
80 | + s->is_nonstreaming = true; | ||
81 | + (void)sve_access_check(s); | ||
82 | + return true; | ||
93 | +} | 83 | +} |
94 | + | 84 | + |
95 | /* | 85 | /* |
96 | * Enum for argument to fpstatus_ptr(). | 86 | * Move Prefix |
97 | */ | 87 | * |
98 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/helper.c | ||
101 | +++ b/target/arm/helper.c | ||
102 | @@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | -static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, | ||
107 | - ARMMMUIdx mmu_idx, uint32_t flags) | ||
108 | +static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, | ||
109 | + ARMMMUIdx mmu_idx, | ||
110 | + CPUARMTBFlags flags) | ||
111 | { | ||
112 | DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el); | ||
113 | DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); | ||
114 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, | ||
115 | return flags; | ||
116 | } | ||
117 | |||
118 | -static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, | ||
119 | - ARMMMUIdx mmu_idx, uint32_t flags) | ||
120 | +static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el, | ||
121 | + ARMMMUIdx mmu_idx, | ||
122 | + CPUARMTBFlags flags) | ||
123 | { | ||
124 | bool sctlr_b = arm_sctlr_b(env); | ||
125 | |||
126 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, | ||
127 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | ||
128 | } | ||
129 | |||
130 | -static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, | ||
131 | - ARMMMUIdx mmu_idx) | ||
132 | +static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, | ||
133 | + ARMMMUIdx mmu_idx) | ||
134 | { | ||
135 | - uint32_t flags = 0; | ||
136 | + CPUARMTBFlags flags = {}; | ||
137 | |||
138 | if (arm_v7m_is_handler_mode(env)) { | ||
139 | DP_TBFLAG_M32(flags, HANDLER, 1); | ||
140 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, | ||
141 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | ||
142 | } | ||
143 | |||
144 | -static uint32_t rebuild_hflags_aprofile(CPUARMState *env) | ||
145 | +static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env) | ||
146 | { | ||
147 | - int flags = 0; | ||
148 | + CPUARMTBFlags flags = {}; | ||
149 | |||
150 | DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env)); | ||
151 | return flags; | ||
152 | } | ||
153 | |||
154 | -static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, | ||
155 | - ARMMMUIdx mmu_idx) | ||
156 | +static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, | ||
157 | + ARMMMUIdx mmu_idx) | ||
158 | { | ||
159 | - uint32_t flags = rebuild_hflags_aprofile(env); | ||
160 | + CPUARMTBFlags flags = rebuild_hflags_aprofile(env); | ||
161 | |||
162 | if (arm_el_is_aa64(env, 1)) { | ||
163 | DP_TBFLAG_A32(flags, VFPEN, 1); | ||
164 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, | ||
165 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | ||
166 | } | ||
167 | |||
168 | -static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
169 | - ARMMMUIdx mmu_idx) | ||
170 | +static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
171 | + ARMMMUIdx mmu_idx) | ||
172 | { | ||
173 | - uint32_t flags = rebuild_hflags_aprofile(env); | ||
174 | + CPUARMTBFlags flags = rebuild_hflags_aprofile(env); | ||
175 | ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); | ||
176 | uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; | ||
177 | uint64_t sctlr; | ||
178 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
179 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | ||
180 | } | ||
181 | |||
182 | -static uint32_t rebuild_hflags_internal(CPUARMState *env) | ||
183 | +static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env) | ||
184 | { | ||
185 | int el = arm_current_el(env); | ||
186 | int fp_el = fp_exception_el(env, el); | ||
187 | @@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) | ||
188 | int el = arm_current_el(env); | ||
189 | int fp_el = fp_exception_el(env, el); | ||
190 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | ||
191 | + | ||
192 | env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); | ||
193 | } | ||
194 | |||
195 | @@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) | ||
196 | static inline void assert_hflags_rebuild_correctly(CPUARMState *env) | ||
197 | { | ||
198 | #ifdef CONFIG_DEBUG_TCG | ||
199 | - uint32_t env_flags_current = env->hflags; | ||
200 | - uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); | ||
201 | + CPUARMTBFlags c = env->hflags; | ||
202 | + CPUARMTBFlags r = rebuild_hflags_internal(env); | ||
203 | |||
204 | - if (unlikely(env_flags_current != env_flags_rebuilt)) { | ||
205 | + if (unlikely(c.flags != r.flags)) { | ||
206 | fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", | ||
207 | - env_flags_current, env_flags_rebuilt); | ||
208 | + c.flags, r.flags); | ||
209 | abort(); | ||
210 | } | ||
211 | #endif | ||
212 | @@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env) | ||
213 | void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
214 | target_ulong *cs_base, uint32_t *pflags) | ||
215 | { | ||
216 | - uint32_t flags = env->hflags; | ||
217 | + CPUARMTBFlags flags; | ||
218 | |||
219 | *cs_base = 0; | ||
220 | assert_hflags_rebuild_correctly(env); | ||
221 | + flags = env->hflags; | ||
222 | |||
223 | if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { | ||
224 | *pc = env->pc; | ||
225 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
226 | DP_TBFLAG_ANY(flags, PSTATE__SS, 1); | ||
227 | } | ||
228 | |||
229 | - *pflags = flags; | ||
230 | + *pflags = flags.flags; | ||
231 | } | ||
232 | |||
233 | #ifdef TARGET_AARCH64 | ||
234 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
235 | index XXXXXXX..XXXXXXX 100644 | ||
236 | --- a/target/arm/translate-a64.c | ||
237 | +++ b/target/arm/translate-a64.c | ||
238 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
239 | DisasContext *dc = container_of(dcbase, DisasContext, base); | ||
240 | CPUARMState *env = cpu->env_ptr; | ||
241 | ARMCPU *arm_cpu = env_archcpu(env); | ||
242 | - uint32_t tb_flags = dc->base.tb->flags; | ||
243 | + CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); | ||
244 | int bound, core_mmu_idx; | ||
245 | |||
246 | dc->isar = &arm_cpu->isar; | ||
247 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
248 | index XXXXXXX..XXXXXXX 100644 | ||
249 | --- a/target/arm/translate.c | ||
250 | +++ b/target/arm/translate.c | ||
251 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
252 | DisasContext *dc = container_of(dcbase, DisasContext, base); | ||
253 | CPUARMState *env = cs->env_ptr; | ||
254 | ARMCPU *cpu = env_archcpu(env); | ||
255 | - uint32_t tb_flags = dc->base.tb->flags; | ||
256 | + CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); | ||
257 | uint32_t condexec, core_mmu_idx; | ||
258 | |||
259 | dc->isar = &cpu->isar; | ||
260 | @@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) | ||
261 | { | ||
262 | DisasContext dc = { }; | ||
263 | const TranslatorOps *ops = &arm_translator_ops; | ||
264 | + CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb); | ||
265 | |||
266 | - if (EX_TBFLAG_AM32(tb->flags, THUMB)) { | ||
267 | + if (EX_TBFLAG_AM32(tb_flags, THUMB)) { | ||
268 | ops = &thumb_translator_ops; | ||
269 | } | ||
270 | #ifdef TARGET_AARCH64 | ||
271 | - if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) { | ||
272 | + if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) { | ||
273 | ops = &aarch64_translator_ops; | ||
274 | } | ||
275 | #endif | ||
276 | -- | 88 | -- |
277 | 2.20.1 | 89 | 2.25.1 |
278 | |||
279 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-22-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-14-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-vfp.c.inc | 12 ++++++------ | 11 | target/arm/sme-fa64.decode | 2 -- |
9 | 1 file changed, 6 insertions(+), 6 deletions(-) | 12 | target/arm/translate-sve.c | 2 ++ |
13 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
10 | 14 | ||
11 | diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-vfp.c.inc | 17 | --- a/target/arm/sme-fa64.decode |
14 | +++ b/target/arm/translate-vfp.c.inc | 18 | +++ b/target/arm/sme-fa64.decode |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
16 | addr = add_reg_for_lit(s, a->rn, offset); | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
17 | tmp = tcg_temp_new_i32(); | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
18 | if (a->l) { | 22 | |
19 | - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); | 23 | -FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) |
20 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN); | 24 | -FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) |
21 | vfp_store_reg32(tmp, a->vd); | 25 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) |
22 | } else { | 26 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) |
23 | vfp_load_reg32(tmp, a->vd); | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
24 | - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); | 28 | index XXXXXXX..XXXXXXX 100644 |
25 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN); | 29 | --- a/target/arm/translate-sve.c |
30 | +++ b/target/arm/translate-sve.c | ||
31 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) | ||
32 | if (!dc_isar_feature(aa64_sve, s)) { | ||
33 | return false; | ||
26 | } | 34 | } |
27 | tcg_temp_free_i32(tmp); | 35 | + s->is_nonstreaming = true; |
28 | tcg_temp_free_i32(addr); | 36 | if (sve_access_check(s)) { |
29 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) | 37 | TCGv_i64 addr = new_tmp_a64(s); |
30 | addr = add_reg_for_lit(s, a->rn, offset); | 38 | tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); |
31 | tmp = tcg_temp_new_i32(); | 39 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) |
32 | if (a->l) { | 40 | if (!dc_isar_feature(aa64_sve, s)) { |
33 | - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); | 41 | return false; |
34 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | ||
35 | vfp_store_reg32(tmp, a->vd); | ||
36 | } else { | ||
37 | vfp_load_reg32(tmp, a->vd); | ||
38 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | ||
39 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | ||
40 | } | 42 | } |
41 | tcg_temp_free_i32(tmp); | 43 | + s->is_nonstreaming = true; |
42 | tcg_temp_free_i32(addr); | 44 | if (sve_access_check(s)) { |
43 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) | 45 | int vsz = vec_full_reg_size(s); |
44 | addr = add_reg_for_lit(s, a->rn, offset); | 46 | int elements = vsz >> dtype_esz[a->dtype]; |
45 | tmp = tcg_temp_new_i64(); | ||
46 | if (a->l) { | ||
47 | - gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); | ||
48 | + gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); | ||
49 | vfp_store_reg64(tmp, a->vd); | ||
50 | } else { | ||
51 | vfp_load_reg64(tmp, a->vd); | ||
52 | - gen_aa32_st64(s, tmp, addr, get_mem_index(s)); | ||
53 | + gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4); | ||
54 | } | ||
55 | tcg_temp_free_i64(tmp); | ||
56 | tcg_temp_free_i32(addr); | ||
57 | -- | 47 | -- |
58 | 2.20.1 | 48 | 2.25.1 |
59 | |||
60 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-20-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-15-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate.c | 4 ++-- | 11 | target/arm/sme-fa64.decode | 3 --- |
9 | 1 file changed, 2 insertions(+), 2 deletions(-) | 12 | target/arm/translate-sve.c | 2 ++ |
13 | 2 files changed, 2 insertions(+), 3 deletions(-) | ||
10 | 14 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 17 | --- a/target/arm/sme-fa64.decode |
14 | +++ b/target/arm/translate.c | 18 | +++ b/target/arm/sme-fa64.decode |
15 | @@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s, | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
20 | # --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) | ||
21 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
22 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
23 | - | ||
24 | -FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) | ||
25 | -FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) | ||
26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/target/arm/translate-sve.c | ||
29 | +++ b/target/arm/translate-sve.c | ||
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) | ||
31 | if (a->rm == 31) { | ||
32 | return false; | ||
16 | } | 33 | } |
17 | tcg_gen_addi_i32(addr, addr, offset); | 34 | + s->is_nonstreaming = true; |
18 | tmp = load_reg(s, 14); | 35 | if (sve_access_check(s)) { |
19 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | 36 | TCGv_i64 addr = new_tmp_a64(s); |
20 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 37 | tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); |
21 | tcg_temp_free_i32(tmp); | 38 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) |
22 | tmp = load_cpu_field(spsr); | 39 | if (!dc_isar_feature(aa64_sve_f64mm, s)) { |
23 | tcg_gen_addi_i32(addr, addr, 4); | 40 | return false; |
24 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | 41 | } |
25 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 42 | + s->is_nonstreaming = true; |
26 | tcg_temp_free_i32(tmp); | 43 | if (sve_access_check(s)) { |
27 | if (writeback) { | 44 | TCGv_i64 addr = new_tmp_a64(s); |
28 | switch (amode) { | 45 | tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); |
29 | -- | 46 | -- |
30 | 2.20.1 | 47 | 2.25.1 |
31 | |||
32 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | In the case of gpr load, merge the size and is_signed arguments; | 3 | These functions will be used to verify that the cpu |
4 | otherwise, simply convert size to memop. | 4 | is in the correct state for a given instruction. |
5 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210419202257.161730-26-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-16-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/translate-a64.c | 78 ++++++++++++++++---------------------- | 11 | target/arm/translate-a64.h | 21 +++++++++++++++++++++ |
12 | 1 file changed, 33 insertions(+), 45 deletions(-) | 12 | target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++ |
13 | 2 files changed, 55 insertions(+) | ||
13 | 14 | ||
15 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/translate-a64.h | ||
18 | +++ b/target/arm/translate-a64.h | ||
19 | @@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); | ||
20 | bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, | ||
21 | unsigned int imms, unsigned int immr); | ||
22 | bool sve_access_check(DisasContext *s); | ||
23 | +bool sme_enabled_check(DisasContext *s); | ||
24 | +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); | ||
25 | + | ||
26 | +/* This function corresponds to CheckStreamingSVEEnabled. */ | ||
27 | +static inline bool sme_sm_enabled_check(DisasContext *s) | ||
28 | +{ | ||
29 | + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK); | ||
30 | +} | ||
31 | + | ||
32 | +/* This function corresponds to CheckSMEAndZAEnabled. */ | ||
33 | +static inline bool sme_za_enabled_check(DisasContext *s) | ||
34 | +{ | ||
35 | + return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK); | ||
36 | +} | ||
37 | + | ||
38 | +/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */ | ||
39 | +static inline bool sme_smza_enabled_check(DisasContext *s) | ||
40 | +{ | ||
41 | + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); | ||
42 | +} | ||
43 | + | ||
44 | TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); | ||
45 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
46 | bool tag_checked, int log2_size); | ||
14 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 47 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
15 | index XXXXXXX..XXXXXXX 100644 | 48 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/translate-a64.c | 49 | --- a/target/arm/translate-a64.c |
17 | +++ b/target/arm/translate-a64.c | 50 | +++ b/target/arm/translate-a64.c |
18 | @@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) | 51 | @@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s) |
19 | * Store from GPR register to memory. | 52 | return true; |
20 | */ | ||
21 | static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, | ||
22 | - TCGv_i64 tcg_addr, int size, int memidx, | ||
23 | + TCGv_i64 tcg_addr, MemOp memop, int memidx, | ||
24 | bool iss_valid, | ||
25 | unsigned int iss_srt, | ||
26 | bool iss_sf, bool iss_ar) | ||
27 | { | ||
28 | - g_assert(size <= 3); | ||
29 | - tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size); | ||
30 | + memop = finalize_memop(s, memop); | ||
31 | + tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); | ||
32 | |||
33 | if (iss_valid) { | ||
34 | uint32_t syn; | ||
35 | |||
36 | syn = syn_data_abort_with_iss(0, | ||
37 | - size, | ||
38 | + (memop & MO_SIZE), | ||
39 | false, | ||
40 | iss_srt, | ||
41 | iss_sf, | ||
42 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, | ||
43 | } | 53 | } |
44 | 54 | ||
45 | static void do_gpr_st(DisasContext *s, TCGv_i64 source, | 55 | +/* This function corresponds to CheckSMEEnabled. */ |
46 | - TCGv_i64 tcg_addr, int size, | 56 | +bool sme_enabled_check(DisasContext *s) |
47 | + TCGv_i64 tcg_addr, MemOp memop, | 57 | +{ |
48 | bool iss_valid, | 58 | + /* |
49 | unsigned int iss_srt, | 59 | + * Note that unlike sve_excp_el, we have not constrained sme_excp_el |
50 | bool iss_sf, bool iss_ar) | 60 | + * to be zero when fp_excp_el has priority. This is because we need |
51 | { | 61 | + * sme_excp_el by itself for cpregs access checks. |
52 | - do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s), | 62 | + */ |
53 | + do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s), | 63 | + if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { |
54 | iss_valid, iss_srt, iss_sf, iss_ar); | 64 | + s->fp_access_checked = true; |
55 | } | 65 | + return sme_access_check(s); |
56 | 66 | + } | |
67 | + return fp_access_check_only(s); | ||
68 | +} | ||
69 | + | ||
70 | +/* Common subroutine for CheckSMEAnd*Enabled. */ | ||
71 | +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) | ||
72 | +{ | ||
73 | + if (!sme_enabled_check(s)) { | ||
74 | + return false; | ||
75 | + } | ||
76 | + if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { | ||
77 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
78 | + syn_smetrap(SME_ET_NotStreaming, false)); | ||
79 | + return false; | ||
80 | + } | ||
81 | + if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { | ||
82 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
83 | + syn_smetrap(SME_ET_InactiveZA, false)); | ||
84 | + return false; | ||
85 | + } | ||
86 | + return true; | ||
87 | +} | ||
88 | + | ||
57 | /* | 89 | /* |
58 | * Load from memory to GPR register | 90 | * This utility function is for doing register extension with an |
59 | */ | 91 | * optional shift. You will likely want to pass a temporary for the |
60 | -static void do_gpr_ld_memidx(DisasContext *s, | ||
61 | - TCGv_i64 dest, TCGv_i64 tcg_addr, | ||
62 | - int size, bool is_signed, | ||
63 | - bool extend, int memidx, | ||
64 | +static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, | ||
65 | + MemOp memop, bool extend, int memidx, | ||
66 | bool iss_valid, unsigned int iss_srt, | ||
67 | bool iss_sf, bool iss_ar) | ||
68 | { | ||
69 | - MemOp memop = s->be_data + size; | ||
70 | - | ||
71 | - g_assert(size <= 3); | ||
72 | - | ||
73 | - if (is_signed) { | ||
74 | - memop += MO_SIGN; | ||
75 | - } | ||
76 | - | ||
77 | + memop = finalize_memop(s, memop); | ||
78 | tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); | ||
79 | |||
80 | - if (extend && is_signed) { | ||
81 | - g_assert(size < 3); | ||
82 | + if (extend && (memop & MO_SIGN)) { | ||
83 | + g_assert((memop & MO_SIZE) <= MO_32); | ||
84 | tcg_gen_ext32u_i64(dest, dest); | ||
85 | } | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s, | ||
88 | uint32_t syn; | ||
89 | |||
90 | syn = syn_data_abort_with_iss(0, | ||
91 | - size, | ||
92 | - is_signed, | ||
93 | + (memop & MO_SIZE), | ||
94 | + (memop & MO_SIGN) != 0, | ||
95 | iss_srt, | ||
96 | iss_sf, | ||
97 | iss_ar, | ||
98 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s, | ||
99 | } | ||
100 | } | ||
101 | |||
102 | -static void do_gpr_ld(DisasContext *s, | ||
103 | - TCGv_i64 dest, TCGv_i64 tcg_addr, | ||
104 | - int size, bool is_signed, bool extend, | ||
105 | +static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, | ||
106 | + MemOp memop, bool extend, | ||
107 | bool iss_valid, unsigned int iss_srt, | ||
108 | bool iss_sf, bool iss_ar) | ||
109 | { | ||
110 | - do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend, | ||
111 | - get_mem_index(s), | ||
112 | + do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s), | ||
113 | iss_valid, iss_srt, iss_sf, iss_ar); | ||
114 | } | ||
115 | |||
116 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
117 | } | ||
118 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | ||
119 | false, rn != 31, size); | ||
120 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt, | ||
121 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt, | ||
122 | disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | ||
123 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
124 | return; | ||
125 | @@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) | ||
126 | /* Only unsigned 32bit loads target 32bit registers. */ | ||
127 | bool iss_sf = opc != 0; | ||
128 | |||
129 | - do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false, | ||
130 | - true, rt, iss_sf, false); | ||
131 | + do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, | ||
132 | + false, true, rt, iss_sf, false); | ||
133 | } | ||
134 | tcg_temp_free_i64(clean_addr); | ||
135 | } | ||
136 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
137 | /* Do not modify tcg_rt before recognizing any exception | ||
138 | * from the second load. | ||
139 | */ | ||
140 | - do_gpr_ld(s, tmp, clean_addr, size, is_signed, false, | ||
141 | - false, 0, false, false); | ||
142 | + do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN, | ||
143 | + false, false, 0, false, false); | ||
144 | tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); | ||
145 | - do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false, | ||
146 | - false, 0, false, false); | ||
147 | + do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN, | ||
148 | + false, false, 0, false, false); | ||
149 | |||
150 | tcg_gen_mov_i64(tcg_rt, tmp); | ||
151 | tcg_temp_free_i64(tmp); | ||
152 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, | ||
153 | do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, | ||
154 | iss_valid, rt, iss_sf, false); | ||
155 | } else { | ||
156 | - do_gpr_ld_memidx(s, tcg_rt, clean_addr, size, | ||
157 | - is_signed, is_extended, memidx, | ||
158 | + do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, | ||
159 | + is_extended, memidx, | ||
160 | iss_valid, rt, iss_sf, false); | ||
161 | } | ||
162 | } | ||
163 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, | ||
164 | do_gpr_st(s, tcg_rt, clean_addr, size, | ||
165 | true, rt, iss_sf, false); | ||
166 | } else { | ||
167 | - do_gpr_ld(s, tcg_rt, clean_addr, size, | ||
168 | - is_signed, is_extended, | ||
169 | - true, rt, iss_sf, false); | ||
170 | + do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, | ||
171 | + is_extended, true, rt, iss_sf, false); | ||
172 | } | ||
173 | } | ||
174 | } | ||
175 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, | ||
176 | do_gpr_st(s, tcg_rt, clean_addr, size, | ||
177 | true, rt, iss_sf, false); | ||
178 | } else { | ||
179 | - do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended, | ||
180 | - true, rt, iss_sf, false); | ||
181 | + do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, | ||
182 | + is_extended, true, rt, iss_sf, false); | ||
183 | } | ||
184 | } | ||
185 | } | ||
186 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, | ||
187 | * full load-acquire (we only need "load-acquire processor consistent"), | ||
188 | * but we choose to implement them as full LDAQ. | ||
189 | */ | ||
190 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, | ||
191 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, | ||
192 | true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); | ||
193 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
194 | return; | ||
195 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, | ||
196 | is_wback || rn != 31, size); | ||
197 | |||
198 | tcg_rt = cpu_reg(s, rt); | ||
199 | - do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false, | ||
200 | + do_gpr_ld(s, tcg_rt, clean_addr, size, | ||
201 | /* extend */ false, /* iss_valid */ !is_wback, | ||
202 | /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); | ||
203 | |||
204 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | ||
205 | * Load-AcquirePC semantics; we implement as the slightly more | ||
206 | * restrictive Load-Acquire. | ||
207 | */ | ||
208 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend, | ||
209 | - true, rt, iss_sf, true); | ||
210 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN, | ||
211 | + extend, true, rt, iss_sf, true); | ||
212 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
213 | } | ||
214 | } | ||
215 | -- | 92 | -- |
216 | 2.20.1 | 93 | 2.25.1 |
217 | |||
218 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For 128-bit load/store, use 16-byte alignment. This | 3 | The pseudocode for CheckSVEEnabled gains a check for Streaming |
4 | requires that we perform the two operations in the | 4 | SVE mode, and for SME present but SVE absent. |
5 | correct order so that we generate the alignment fault | ||
6 | before modifying memory. | ||
7 | 5 | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20210419202257.161730-27-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-17-richard.henderson@linaro.org |
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 10 | --- |
13 | target/arm/translate-a64.c | 42 +++++++++++++++++++++++--------------- | 11 | target/arm/translate-a64.c | 22 ++++++++++++++++------ |
14 | 1 file changed, 26 insertions(+), 16 deletions(-) | 12 | 1 file changed, 16 insertions(+), 6 deletions(-) |
15 | 13 | ||
16 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 14 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
17 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/translate-a64.c | 16 | --- a/target/arm/translate-a64.c |
19 | +++ b/target/arm/translate-a64.c | 17 | +++ b/target/arm/translate-a64.c |
20 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, | 18 | @@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s) |
21 | static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) | 19 | return true; |
20 | } | ||
21 | |||
22 | -/* Check that SVE access is enabled. If it is, return true. | ||
23 | +/* | ||
24 | + * Check that SVE access is enabled. If it is, return true. | ||
25 | * If not, emit code to generate an appropriate exception and return false. | ||
26 | + * This function corresponds to CheckSVEEnabled(). | ||
27 | */ | ||
28 | bool sve_access_check(DisasContext *s) | ||
22 | { | 29 | { |
23 | /* This writes the bottom N bits of a 128 bit wide vector to memory */ | 30 | - if (s->sve_excp_el) { |
24 | - TCGv_i64 tmp = tcg_temp_new_i64(); | 31 | - assert(!s->sve_access_checked); |
25 | - tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64)); | 32 | - s->sve_access_checked = true; |
26 | + TCGv_i64 tmplo = tcg_temp_new_i64(); | 33 | - |
27 | + MemOp mop; | 34 | + if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { |
35 | + assert(dc_isar_feature(aa64_sme, s)); | ||
36 | + if (!sme_sm_enabled_check(s)) { | ||
37 | + goto fail_exit; | ||
38 | + } | ||
39 | + } else if (s->sve_excp_el) { | ||
40 | gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF, | ||
41 | syn_sve_access_trap(), s->sve_excp_el); | ||
42 | - return false; | ||
43 | + goto fail_exit; | ||
44 | } | ||
45 | s->sve_access_checked = true; | ||
46 | return fp_access_check(s); | ||
28 | + | 47 | + |
29 | + tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64)); | 48 | + fail_exit: |
30 | + | 49 | + /* Assert that we only raise one exception per instruction. */ |
31 | if (size < 4) { | 50 | + assert(!s->sve_access_checked); |
32 | - tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), | 51 | + s->sve_access_checked = true; |
33 | - s->be_data + size); | 52 | + return false; |
34 | + mop = finalize_memop(s, size); | ||
35 | + tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
36 | } else { | ||
37 | bool be = s->be_data == MO_BE; | ||
38 | TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(); | ||
39 | + TCGv_i64 tmphi = tcg_temp_new_i64(); | ||
40 | |||
41 | + tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx)); | ||
42 | + | ||
43 | + mop = s->be_data | MO_Q; | ||
44 | + tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), | ||
45 | + mop | (s->align_mem ? MO_ALIGN_16 : 0)); | ||
46 | tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); | ||
47 | - tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), | ||
48 | - s->be_data | MO_Q); | ||
49 | - tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx)); | ||
50 | - tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), | ||
51 | - s->be_data | MO_Q); | ||
52 | + tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr, | ||
53 | + get_mem_index(s), mop); | ||
54 | + | ||
55 | tcg_temp_free_i64(tcg_hiaddr); | ||
56 | + tcg_temp_free_i64(tmphi); | ||
57 | } | ||
58 | |||
59 | - tcg_temp_free_i64(tmp); | ||
60 | + tcg_temp_free_i64(tmplo); | ||
61 | } | 53 | } |
62 | 54 | ||
63 | /* | 55 | /* |
64 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
65 | /* This always zero-extends and writes to a full 128 bit wide vector */ | ||
66 | TCGv_i64 tmplo = tcg_temp_new_i64(); | ||
67 | TCGv_i64 tmphi = NULL; | ||
68 | + MemOp mop; | ||
69 | |||
70 | if (size < 4) { | ||
71 | - MemOp memop = s->be_data + size; | ||
72 | - tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop); | ||
73 | + mop = finalize_memop(s, size); | ||
74 | + tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
75 | } else { | ||
76 | bool be = s->be_data == MO_BE; | ||
77 | TCGv_i64 tcg_hiaddr; | ||
78 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
79 | tmphi = tcg_temp_new_i64(); | ||
80 | tcg_hiaddr = tcg_temp_new_i64(); | ||
81 | |||
82 | + mop = s->be_data | MO_Q; | ||
83 | + tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), | ||
84 | + mop | (s->align_mem ? MO_ALIGN_16 : 0)); | ||
85 | tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); | ||
86 | - tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), | ||
87 | - s->be_data | MO_Q); | ||
88 | - tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), | ||
89 | - s->be_data | MO_Q); | ||
90 | + tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr, | ||
91 | + get_mem_index(s), mop); | ||
92 | tcg_temp_free_i64(tcg_hiaddr); | ||
93 | } | ||
94 | |||
95 | -- | 56 | -- |
96 | 2.20.1 | 57 | 2.25.1 |
97 | |||
98 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Adjust the interface to match what has been done to the | 3 | These SME instructions are nominally within the SVE decode space, |
4 | TCGv_i32 load/store functions. | 4 | so we add them to sve.decode and translate-sve.c. |
5 | |||
6 | This is less obvious, because at present the only user of | ||
7 | these functions, trans_VLDST_multiple, also wants to manipulate | ||
8 | the endianness to speed up loading multiple bytes. Thus we | ||
9 | retain an "internal" interface which is identical to the | ||
10 | current gen_aa32_{ld,st}_i64 interface. | ||
11 | |||
12 | The "new" interface will gain users as we remove the legacy | ||
13 | interfaces, gen_aa32_ld64 and gen_aa32_st64. | ||
14 | 5 | ||
15 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
16 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
17 | Message-id: 20210419202257.161730-15-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-18-richard.henderson@linaro.org |
18 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
19 | --- | 10 | --- |
20 | target/arm/translate.c | 78 +++++++++++++++++++-------------- | 11 | target/arm/translate-a64.h | 12 ++++++++++++ |
21 | target/arm/translate-neon.c.inc | 6 ++- | 12 | target/arm/sve.decode | 5 ++++- |
22 | 2 files changed, 49 insertions(+), 35 deletions(-) | 13 | target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++ |
14 | 3 files changed, 54 insertions(+), 1 deletion(-) | ||
23 | 15 | ||
24 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 16 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
25 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/arm/translate.c | 18 | --- a/target/arm/translate-a64.h |
27 | +++ b/target/arm/translate.c | 19 | +++ b/target/arm/translate-a64.h |
28 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, | 20 | @@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s) |
29 | tcg_temp_free(addr); | 21 | return s->vl; |
30 | } | 22 | } |
31 | 23 | ||
32 | +static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, | 24 | +/* Return the byte size of the vector register, SVL / 8. */ |
33 | + TCGv_i32 a32, int index, MemOp opc) | 25 | +static inline int streaming_vec_reg_size(DisasContext *s) |
34 | +{ | 26 | +{ |
35 | + TCGv addr = gen_aa32_addr(s, a32, opc); | 27 | + return s->svl; |
36 | + | ||
37 | + tcg_gen_qemu_ld_i64(val, addr, index, opc); | ||
38 | + | ||
39 | + /* Not needed for user-mode BE32, where we use MO_BE instead. */ | ||
40 | + if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | ||
41 | + tcg_gen_rotri_i64(val, val, 32); | ||
42 | + } | ||
43 | + tcg_temp_free(addr); | ||
44 | +} | 28 | +} |
45 | + | 29 | + |
46 | +static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, | 30 | /* |
47 | + TCGv_i32 a32, int index, MemOp opc) | 31 | * Return the offset info CPUARMState of the predicate vector register Pn. |
32 | * Note for this purpose, FFR is P16. | ||
33 | @@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s) | ||
34 | return s->vl >> 3; | ||
35 | } | ||
36 | |||
37 | +/* Return the byte size of the predicate register, SVL / 64. */ | ||
38 | +static inline int streaming_pred_reg_size(DisasContext *s) | ||
48 | +{ | 39 | +{ |
49 | + TCGv addr = gen_aa32_addr(s, a32, opc); | 40 | + return s->svl >> 3; |
50 | + | ||
51 | + /* Not needed for user-mode BE32, where we use MO_BE instead. */ | ||
52 | + if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | ||
53 | + TCGv_i64 tmp = tcg_temp_new_i64(); | ||
54 | + tcg_gen_rotri_i64(tmp, val, 32); | ||
55 | + tcg_gen_qemu_st_i64(tmp, addr, index, opc); | ||
56 | + tcg_temp_free_i64(tmp); | ||
57 | + } else { | ||
58 | + tcg_gen_qemu_st_i64(val, addr, index, opc); | ||
59 | + } | ||
60 | + tcg_temp_free(addr); | ||
61 | +} | 41 | +} |
62 | + | 42 | + |
63 | static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | 43 | /* |
64 | int index, MemOp opc) | 44 | * Round up the size of a register to a size allowed by |
65 | { | 45 | * the tcg vector infrastructure. Any operation which uses this |
66 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | 46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
67 | gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc)); | 47 | index XXXXXXX..XXXXXXX 100644 |
48 | --- a/target/arm/sve.decode | ||
49 | +++ b/target/arm/sve.decode | ||
50 | @@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5 | ||
51 | # SVE index generation (register start, register increment) | ||
52 | INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm | ||
53 | |||
54 | -### SVE Stack Allocation Group | ||
55 | +### SVE / Streaming SVE Stack Allocation Group | ||
56 | |||
57 | # SVE stack frame adjustment | ||
58 | ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6 | ||
59 | +ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6 | ||
60 | ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6 | ||
61 | +ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6 | ||
62 | |||
63 | # SVE stack frame size | ||
64 | RDVL 00000100 101 11111 01010 imm:s6 rd:5 | ||
65 | +RDSVL 00000100 101 11111 01011 imm:s6 rd:5 | ||
66 | |||
67 | ### SVE Bitwise Shift - Unpredicated Group | ||
68 | |||
69 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/translate-sve.c | ||
72 | +++ b/target/arm/translate-sve.c | ||
73 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) | ||
74 | return true; | ||
68 | } | 75 | } |
69 | 76 | ||
70 | +static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | 77 | +static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) |
71 | + int index, MemOp opc) | ||
72 | +{ | 78 | +{ |
73 | + gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc)); | 79 | + if (!dc_isar_feature(aa64_sme, s)) { |
80 | + return false; | ||
81 | + } | ||
82 | + if (sme_enabled_check(s)) { | ||
83 | + TCGv_i64 rd = cpu_reg_sp(s, a->rd); | ||
84 | + TCGv_i64 rn = cpu_reg_sp(s, a->rn); | ||
85 | + tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); | ||
86 | + } | ||
87 | + return true; | ||
74 | +} | 88 | +} |
75 | + | 89 | + |
76 | +static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | 90 | static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) |
77 | + int index, MemOp opc) | 91 | { |
92 | if (!dc_isar_feature(aa64_sve, s)) { | ||
93 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) | ||
94 | return true; | ||
95 | } | ||
96 | |||
97 | +static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) | ||
78 | +{ | 98 | +{ |
79 | + gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc)); | 99 | + if (!dc_isar_feature(aa64_sme, s)) { |
100 | + return false; | ||
101 | + } | ||
102 | + if (sme_enabled_check(s)) { | ||
103 | + TCGv_i64 rd = cpu_reg_sp(s, a->rd); | ||
104 | + TCGv_i64 rn = cpu_reg_sp(s, a->rn); | ||
105 | + tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); | ||
106 | + } | ||
107 | + return true; | ||
80 | +} | 108 | +} |
81 | + | 109 | + |
82 | #define DO_GEN_LD(SUFF, OPC) \ | 110 | static bool trans_RDVL(DisasContext *s, arg_RDVL *a) |
83 | static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ | ||
84 | TCGv_i32 a32, int index) \ | ||
85 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
86 | gen_aa32_st_i32(s, val, a32, index, OPC); \ | ||
87 | } | ||
88 | |||
89 | -static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
90 | - int index, MemOp opc) | ||
91 | -{ | ||
92 | - TCGv addr = gen_aa32_addr(s, a32, opc); | ||
93 | - tcg_gen_qemu_ld_i64(val, addr, index, opc); | ||
94 | - | ||
95 | - /* Not needed for user-mode BE32, where we use MO_BE instead. */ | ||
96 | - if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | ||
97 | - tcg_gen_rotri_i64(val, val, 32); | ||
98 | - } | ||
99 | - | ||
100 | - tcg_temp_free(addr); | ||
101 | -} | ||
102 | - | ||
103 | static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, | ||
104 | TCGv_i32 a32, int index) | ||
105 | { | 111 | { |
106 | - gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data); | 112 | if (!dc_isar_feature(aa64_sve, s)) { |
107 | -} | 113 | @@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a) |
108 | - | 114 | return true; |
109 | -static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
110 | - int index, MemOp opc) | ||
111 | -{ | ||
112 | - TCGv addr = gen_aa32_addr(s, a32, opc); | ||
113 | - | ||
114 | - /* Not needed for user-mode BE32, where we use MO_BE instead. */ | ||
115 | - if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | ||
116 | - TCGv_i64 tmp = tcg_temp_new_i64(); | ||
117 | - tcg_gen_rotri_i64(tmp, val, 32); | ||
118 | - tcg_gen_qemu_st_i64(tmp, addr, index, opc); | ||
119 | - tcg_temp_free_i64(tmp); | ||
120 | - } else { | ||
121 | - tcg_gen_qemu_st_i64(val, addr, index, opc); | ||
122 | - } | ||
123 | - tcg_temp_free(addr); | ||
124 | + gen_aa32_ld_i64(s, val, a32, index, MO_Q); | ||
125 | } | 115 | } |
126 | 116 | ||
127 | static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, | 117 | +static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) |
128 | TCGv_i32 a32, int index) | 118 | +{ |
129 | { | 119 | + if (!dc_isar_feature(aa64_sme, s)) { |
130 | - gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data); | 120 | + return false; |
131 | + gen_aa32_st_i64(s, val, a32, index, MO_Q); | 121 | + } |
132 | } | 122 | + if (sme_enabled_check(s)) { |
133 | 123 | + TCGv_i64 reg = cpu_reg(s, a->rd); | |
134 | DO_GEN_LD(8u, MO_UB) | 124 | + tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); |
135 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | 125 | + } |
136 | index XXXXXXX..XXXXXXX 100644 | 126 | + return true; |
137 | --- a/target/arm/translate-neon.c.inc | 127 | +} |
138 | +++ b/target/arm/translate-neon.c.inc | 128 | + |
139 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) | 129 | /* |
140 | int tt = a->vd + reg + spacing * xs; | 130 | *** SVE Compute Vector Address Group |
141 | 131 | */ | |
142 | if (a->l) { | ||
143 | - gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size); | ||
144 | + gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, | ||
145 | + endian | size); | ||
146 | neon_store_element64(tt, n, size, tmp64); | ||
147 | } else { | ||
148 | neon_load_element64(tmp64, tt, n, size); | ||
149 | - gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size); | ||
150 | + gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, | ||
151 | + endian | size); | ||
152 | } | ||
153 | tcg_gen_add_i32(addr, addr, tmp); | ||
154 | } | ||
155 | -- | 132 | -- |
156 | 2.20.1 | 133 | 2.25.1 |
157 | |||
158 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-25-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-19-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++----- | 8 | target/arm/helper-sme.h | 2 ++ |
9 | 1 file changed, 42 insertions(+), 6 deletions(-) | 9 | target/arm/sme.decode | 4 ++++ |
10 | target/arm/sme_helper.c | 25 +++++++++++++++++++++++++ | ||
11 | target/arm/translate-sme.c | 13 +++++++++++++ | ||
12 | 4 files changed, 44 insertions(+) | ||
10 | 13 | ||
11 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
12 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-neon.c.inc | 16 | --- a/target/arm/helper-sme.h |
14 | +++ b/target/arm/translate-neon.c.inc | 17 | +++ b/target/arm/helper-sme.h |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) | 18 | @@ -XXX,XX +XXX,XX @@ |
16 | int nregs = a->n + 1; | 19 | |
17 | int vd = a->vd; | 20 | DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) |
18 | TCGv_i32 addr, tmp; | 21 | DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) |
19 | + MemOp mop; | 22 | + |
20 | 23 | +DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) | |
21 | if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { | 24 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
22 | return false; | 25 | index XXXXXXX..XXXXXXX 100644 |
23 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) | 26 | --- a/target/arm/sme.decode |
24 | return true; | 27 | +++ b/target/arm/sme.decode |
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | # | ||
30 | # This file is processed by scripts/decodetree.py | ||
31 | # | ||
32 | + | ||
33 | +### SME Misc | ||
34 | + | ||
35 | +ZERO 11000000 00 001 00000000000 imm:8 | ||
36 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sme_helper.c | ||
39 | +++ b/target/arm/sme_helper.c | ||
40 | @@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i) | ||
41 | memset(env->zarray, 0, sizeof(env->zarray)); | ||
25 | } | 42 | } |
26 | 43 | } | |
27 | + /* Pick up SCTLR settings */ | ||
28 | + mop = finalize_memop(s, a->size); | ||
29 | + | 44 | + |
30 | + if (a->align) { | 45 | +void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) |
31 | + MemOp align_op; | 46 | +{ |
47 | + uint32_t i; | ||
32 | + | 48 | + |
33 | + switch (nregs) { | 49 | + /* |
34 | + case 1: | 50 | + * Special case clearing the entire ZA space. |
35 | + /* For VLD1, use natural alignment. */ | 51 | + * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any |
36 | + align_op = MO_ALIGN; | 52 | + * parts of the ZA storage outside of SVL. |
37 | + break; | 53 | + */ |
38 | + case 2: | 54 | + if (imm == 0xff) { |
39 | + /* For VLD2, use double alignment. */ | 55 | + memset(env->zarray, 0, sizeof(env->zarray)); |
40 | + align_op = pow2_align(a->size + 1); | 56 | + return; |
41 | + break; | ||
42 | + case 4: | ||
43 | + if (a->size == MO_32) { | ||
44 | + /* | ||
45 | + * For VLD4.32, align = 1 is double alignment, align = 2 is | ||
46 | + * quad alignment; align = 3 is rejected above. | ||
47 | + */ | ||
48 | + align_op = pow2_align(a->size + a->align); | ||
49 | + } else { | ||
50 | + /* For VLD4.8 and VLD.16, we want quad alignment. */ | ||
51 | + align_op = pow2_align(a->size + 2); | ||
52 | + } | ||
53 | + break; | ||
54 | + default: | ||
55 | + /* For VLD3, the alignment field is zero and rejected above. */ | ||
56 | + g_assert_not_reached(); | ||
57 | + } | ||
58 | + | ||
59 | + mop = (mop & ~MO_AMASK) | align_op; | ||
60 | + } | 57 | + } |
61 | + | 58 | + |
62 | tmp = tcg_temp_new_i32(); | 59 | + /* |
63 | addr = tcg_temp_new_i32(); | 60 | + * Recall that ZAnH.D[m] is spread across ZA[n+8*m], |
64 | load_reg_var(s, addr, a->rn); | 61 | + * so each row is discontiguous within ZA[]. |
65 | - /* | 62 | + */ |
66 | - * TODO: if we implemented alignment exceptions, we should check | 63 | + for (i = 0; i < svl; i++) { |
67 | - * addr against the alignment encoded in a->align here. | 64 | + if (imm & (1 << (i % 8))) { |
68 | - */ | 65 | + memset(&env->zarray[i], 0, svl); |
66 | + } | ||
67 | + } | ||
68 | +} | ||
69 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/translate-sme.c | ||
72 | +++ b/target/arm/translate-sme.c | ||
73 | @@ -XXX,XX +XXX,XX @@ | ||
74 | */ | ||
75 | |||
76 | #include "decode-sme.c.inc" | ||
69 | + | 77 | + |
70 | for (reg = 0; reg < nregs; reg++) { | ||
71 | if (a->l) { | ||
72 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size); | ||
73 | + gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop); | ||
74 | neon_store_element(vd, a->reg_idx, a->size, tmp); | ||
75 | } else { /* Store */ | ||
76 | neon_load_element(tmp, vd, a->reg_idx, a->size); | ||
77 | - gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size); | ||
78 | + gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop); | ||
79 | } | ||
80 | vd += a->stride; | ||
81 | tcg_gen_addi_i32(addr, addr, 1 << a->size); | ||
82 | + | 78 | + |
83 | + /* Subsequent memory operations inherit alignment */ | 79 | +static bool trans_ZERO(DisasContext *s, arg_ZERO *a) |
84 | + mop &= ~MO_AMASK; | 80 | +{ |
85 | } | 81 | + if (!dc_isar_feature(aa64_sme, s)) { |
86 | tcg_temp_free_i32(addr); | 82 | + return false; |
87 | tcg_temp_free_i32(tmp); | 83 | + } |
84 | + if (sme_za_enabled_check(s)) { | ||
85 | + gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm), | ||
86 | + tcg_constant_i32(streaming_vec_reg_size(s))); | ||
87 | + } | ||
88 | + return true; | ||
89 | +} | ||
88 | -- | 90 | -- |
89 | 2.20.1 | 91 | 2.25.1 |
90 | |||
91 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Now that mte_check1 and mte_checkN have been merged, we can | 3 | We can reuse the SVE functions for implementing moves to/from |
4 | merge sve_cont_ldst_mte_check1 and sve_cont_ldst_mte_checkN. | 4 | horizontal tile slices, but we need new ones for moves to/from |
5 | vertical tile slices. | ||
5 | 6 | ||
6 | Which means that we can eliminate the function pointer into | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | sve_ldN_r and sve_stN_r, calling sve_cont_ldst_mte_check directly. | ||
8 | |||
9 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20210416183106.1516563-9-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-20-richard.henderson@linaro.org |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 11 | --- |
14 | target/arm/sve_helper.c | 84 +++++++++++++---------------------------- | 12 | target/arm/helper-sme.h | 12 +++ |
15 | 1 file changed, 26 insertions(+), 58 deletions(-) | 13 | target/arm/helper-sve.h | 2 + |
14 | target/arm/translate-a64.h | 8 ++ | ||
15 | target/arm/translate.h | 5 ++ | ||
16 | target/arm/sme.decode | 15 ++++ | ||
17 | target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++- | ||
18 | target/arm/sve_helper.c | 12 +++ | ||
19 | target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++ | ||
20 | 8 files changed, 331 insertions(+), 1 deletion(-) | ||
16 | 21 | ||
22 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/target/arm/helper-sme.h | ||
25 | +++ b/target/arm/helper-sme.h | ||
26 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) | ||
27 | DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) | ||
28 | |||
29 | DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) | ||
30 | + | ||
31 | +/* Move to/from vertical array slices, i.e. columns, so 'c'. */ | ||
32 | +DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
42 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/target/arm/helper-sve.h | ||
45 | +++ b/target/arm/helper-sve.h | ||
46 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, | ||
47 | void, ptr, ptr, ptr, ptr, i32) | ||
48 | DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, | ||
49 | void, ptr, ptr, ptr, ptr, i32) | ||
50 | +DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG, | ||
51 | + void, ptr, ptr, ptr, ptr, i32) | ||
52 | |||
53 | DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG, | ||
54 | void, ptr, ptr, ptr, ptr, i32) | ||
55 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/target/arm/translate-a64.h | ||
58 | +++ b/target/arm/translate-a64.h | ||
59 | @@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s) | ||
60 | return size_for_gvec(pred_full_reg_size(s)); | ||
61 | } | ||
62 | |||
63 | +/* Return a newly allocated pointer to the predicate register. */ | ||
64 | +static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) | ||
65 | +{ | ||
66 | + TCGv_ptr ret = tcg_temp_new_ptr(); | ||
67 | + tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno)); | ||
68 | + return ret; | ||
69 | +} | ||
70 | + | ||
71 | bool disas_sve(DisasContext *, uint32_t); | ||
72 | bool disas_sme(DisasContext *, uint32_t); | ||
73 | |||
74 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/target/arm/translate.h | ||
77 | +++ b/target/arm/translate.h | ||
78 | @@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x) | ||
79 | return x + 2; | ||
80 | } | ||
81 | |||
82 | +static inline int plus_12(DisasContext *s, int x) | ||
83 | +{ | ||
84 | + return x + 12; | ||
85 | +} | ||
86 | + | ||
87 | static inline int times_2(DisasContext *s, int x) | ||
88 | { | ||
89 | return x * 2; | ||
90 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/target/arm/sme.decode | ||
93 | +++ b/target/arm/sme.decode | ||
94 | @@ -XXX,XX +XXX,XX @@ | ||
95 | ### SME Misc | ||
96 | |||
97 | ZERO 11000000 00 001 00000000000 imm:8 | ||
98 | + | ||
99 | +### SME Move into/from Array | ||
100 | + | ||
101 | +%mova_rs 13:2 !function=plus_12 | ||
102 | +&mova esz rs pg zr za_imm v:bool to_vec:bool | ||
103 | + | ||
104 | +MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \ | ||
105 | + &mova to_vec=0 rs=%mova_rs | ||
106 | +MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \ | ||
107 | + &mova to_vec=0 rs=%mova_rs esz=4 | ||
108 | + | ||
109 | +MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ | ||
110 | + &mova to_vec=1 rs=%mova_rs | ||
111 | +MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ | ||
112 | + &mova to_vec=1 rs=%mova_rs esz=4 | ||
113 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
114 | index XXXXXXX..XXXXXXX 100644 | ||
115 | --- a/target/arm/sme_helper.c | ||
116 | +++ b/target/arm/sme_helper.c | ||
117 | @@ -XXX,XX +XXX,XX @@ | ||
118 | |||
119 | #include "qemu/osdep.h" | ||
120 | #include "cpu.h" | ||
121 | -#include "internals.h" | ||
122 | +#include "tcg/tcg-gvec-desc.h" | ||
123 | #include "exec/helper-proto.h" | ||
124 | +#include "qemu/int128.h" | ||
125 | +#include "vec_internal.h" | ||
126 | |||
127 | /* ResetSVEState */ | ||
128 | void arm_reset_sve_state(CPUARMState *env) | ||
129 | @@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | + | ||
134 | + | ||
135 | +/* | ||
136 | + * When considering the ZA storage as an array of elements of | ||
137 | + * type T, the index within that array of the Nth element of | ||
138 | + * a vertical slice of a tile can be calculated like this, | ||
139 | + * regardless of the size of type T. This is because the tiles | ||
140 | + * are interleaved, so if type T is size N bytes then row 1 of | ||
141 | + * the tile is N rows away from row 0. The division by N to | ||
142 | + * convert a byte offset into an array index and the multiplication | ||
143 | + * by N to convert from vslice-index-within-the-tile to | ||
144 | + * the index within the ZA storage cancel out. | ||
145 | + */ | ||
146 | +#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg)) | ||
147 | + | ||
148 | +/* | ||
149 | + * When doing byte arithmetic on the ZA storage, the element | ||
150 | + * byteoff bytes away in a tile vertical slice is always this | ||
151 | + * many bytes away in the ZA storage, regardless of the | ||
152 | + * size of the tile element, assuming that byteoff is a multiple | ||
153 | + * of the element size. Again this is because of the interleaving | ||
154 | + * of the tiles. For instance if we have 1 byte per element then | ||
155 | + * each row of the ZA storage has one byte of the vslice data, | ||
156 | + * and (counting from 0) byte 8 goes in row 8 of the storage | ||
157 | + * at offset (8 * row-size-in-bytes). | ||
158 | + * If we have 8 bytes per element then each row of the ZA storage | ||
159 | + * has 8 bytes of the data, but there are 8 interleaved tiles and | ||
160 | + * so byte 8 of the data goes into row 1 of the tile, | ||
161 | + * which is again row 8 of the storage, so the offset is still | ||
162 | + * (8 * row-size-in-bytes). Similarly for other element sizes. | ||
163 | + */ | ||
164 | +#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg)) | ||
165 | + | ||
166 | + | ||
167 | +/* | ||
168 | + * Move Zreg vector to ZArray column. | ||
169 | + */ | ||
170 | +#define DO_MOVA_C(NAME, TYPE, H) \ | ||
171 | +void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \ | ||
172 | +{ \ | ||
173 | + int i, oprsz = simd_oprsz(desc); \ | ||
174 | + for (i = 0; i < oprsz; ) { \ | ||
175 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
176 | + do { \ | ||
177 | + if (pg & 1) { \ | ||
178 | + *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \ | ||
179 | + } \ | ||
180 | + i += sizeof(TYPE); \ | ||
181 | + pg >>= sizeof(TYPE); \ | ||
182 | + } while (i & 15); \ | ||
183 | + } \ | ||
184 | +} | ||
185 | + | ||
186 | +DO_MOVA_C(sme_mova_cz_b, uint8_t, H1) | ||
187 | +DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2) | ||
188 | +DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4) | ||
189 | + | ||
190 | +void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc) | ||
191 | +{ | ||
192 | + int i, oprsz = simd_oprsz(desc) / 8; | ||
193 | + uint8_t *pg = vg; | ||
194 | + uint64_t *n = vn; | ||
195 | + uint64_t *a = za; | ||
196 | + | ||
197 | + for (i = 0; i < oprsz; i++) { | ||
198 | + if (pg[H1(i)] & 1) { | ||
199 | + a[tile_vslice_index(i)] = n[i]; | ||
200 | + } | ||
201 | + } | ||
202 | +} | ||
203 | + | ||
204 | +void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc) | ||
205 | +{ | ||
206 | + int i, oprsz = simd_oprsz(desc) / 16; | ||
207 | + uint16_t *pg = vg; | ||
208 | + Int128 *n = vn; | ||
209 | + Int128 *a = za; | ||
210 | + | ||
211 | + /* | ||
212 | + * Int128 is used here simply to copy 16 bytes, and to simplify | ||
213 | + * the address arithmetic. | ||
214 | + */ | ||
215 | + for (i = 0; i < oprsz; i++) { | ||
216 | + if (pg[H2(i)] & 1) { | ||
217 | + a[tile_vslice_index(i)] = n[i]; | ||
218 | + } | ||
219 | + } | ||
220 | +} | ||
221 | + | ||
222 | +#undef DO_MOVA_C | ||
223 | + | ||
224 | +/* | ||
225 | + * Move ZArray column to Zreg vector. | ||
226 | + */ | ||
227 | +#define DO_MOVA_Z(NAME, TYPE, H) \ | ||
228 | +void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \ | ||
229 | +{ \ | ||
230 | + int i, oprsz = simd_oprsz(desc); \ | ||
231 | + for (i = 0; i < oprsz; ) { \ | ||
232 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
233 | + do { \ | ||
234 | + if (pg & 1) { \ | ||
235 | + *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \ | ||
236 | + } \ | ||
237 | + i += sizeof(TYPE); \ | ||
238 | + pg >>= sizeof(TYPE); \ | ||
239 | + } while (i & 15); \ | ||
240 | + } \ | ||
241 | +} | ||
242 | + | ||
243 | +DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1) | ||
244 | +DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2) | ||
245 | +DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4) | ||
246 | + | ||
247 | +void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc) | ||
248 | +{ | ||
249 | + int i, oprsz = simd_oprsz(desc) / 8; | ||
250 | + uint8_t *pg = vg; | ||
251 | + uint64_t *d = vd; | ||
252 | + uint64_t *a = za; | ||
253 | + | ||
254 | + for (i = 0; i < oprsz; i++) { | ||
255 | + if (pg[H1(i)] & 1) { | ||
256 | + d[i] = a[tile_vslice_index(i)]; | ||
257 | + } | ||
258 | + } | ||
259 | +} | ||
260 | + | ||
261 | +void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) | ||
262 | +{ | ||
263 | + int i, oprsz = simd_oprsz(desc) / 16; | ||
264 | + uint16_t *pg = vg; | ||
265 | + Int128 *d = vd; | ||
266 | + Int128 *a = za; | ||
267 | + | ||
268 | + /* | ||
269 | + * Int128 is used here simply to copy 16 bytes, and to simplify | ||
270 | + * the address arithmetic. | ||
271 | + */ | ||
272 | + for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) { | ||
273 | + if (pg[H2(i)] & 1) { | ||
274 | + d[i] = a[tile_vslice_index(i)]; | ||
275 | + } | ||
276 | + } | ||
277 | +} | ||
278 | + | ||
279 | +#undef DO_MOVA_Z | ||
17 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 280 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c |
18 | index XXXXXXX..XXXXXXX 100644 | 281 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/sve_helper.c | 282 | --- a/target/arm/sve_helper.c |
20 | +++ b/target/arm/sve_helper.c | 283 | +++ b/target/arm/sve_helper.c |
21 | @@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, | 284 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, |
22 | #endif | ||
23 | } | ||
24 | |||
25 | -typedef uint64_t mte_check_fn(CPUARMState *, uint32_t, uint64_t, uintptr_t); | ||
26 | - | ||
27 | -static inline QEMU_ALWAYS_INLINE | ||
28 | -void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env, | ||
29 | - uint64_t *vg, target_ulong addr, int esize, | ||
30 | - int msize, uint32_t mtedesc, uintptr_t ra, | ||
31 | - mte_check_fn *check) | ||
32 | +static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, | ||
33 | + uint64_t *vg, target_ulong addr, int esize, | ||
34 | + int msize, uint32_t mtedesc, uintptr_t ra) | ||
35 | { | ||
36 | intptr_t mem_off, reg_off, reg_last; | ||
37 | |||
38 | @@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env, | ||
39 | uint64_t pg = vg[reg_off >> 6]; | ||
40 | do { | ||
41 | if ((pg >> (reg_off & 63)) & 1) { | ||
42 | - check(env, mtedesc, addr, ra); | ||
43 | + mte_check(env, mtedesc, addr, ra); | ||
44 | } | ||
45 | reg_off += esize; | ||
46 | mem_off += msize; | ||
47 | @@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env, | ||
48 | uint64_t pg = vg[reg_off >> 6]; | ||
49 | do { | ||
50 | if ((pg >> (reg_off & 63)) & 1) { | ||
51 | - check(env, mtedesc, addr, ra); | ||
52 | + mte_check(env, mtedesc, addr, ra); | ||
53 | } | ||
54 | reg_off += esize; | ||
55 | mem_off += msize; | ||
56 | @@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env, | ||
57 | } | 285 | } |
58 | } | 286 | } |
59 | 287 | ||
60 | -typedef void sve_cont_ldst_mte_check_fn(SVEContLdSt *info, CPUARMState *env, | 288 | +void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm, |
61 | - uint64_t *vg, target_ulong addr, | 289 | + void *vg, uint32_t desc) |
62 | - int esize, int msize, uint32_t mtedesc, | 290 | +{ |
63 | - uintptr_t ra); | 291 | + intptr_t i, opr_sz = simd_oprsz(desc) / 16; |
64 | - | 292 | + Int128 *d = vd, *n = vn, *m = vm; |
65 | -static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env, | 293 | + uint16_t *pg = vg; |
66 | - uint64_t *vg, target_ulong addr, | 294 | + |
67 | - int esize, int msize, uint32_t mtedesc, | 295 | + for (i = 0; i < opr_sz; i += 1) { |
68 | - uintptr_t ra) | 296 | + d[i] = (pg[H2(i)] & 1 ? n : m)[i]; |
69 | -{ | 297 | + } |
70 | - sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize, | 298 | +} |
71 | - mtedesc, ra, mte_check); | 299 | + |
72 | -} | 300 | /* Two operand comparison controlled by a predicate. |
73 | - | 301 | * ??? It is very tempting to want to be able to expand this inline |
74 | -static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env, | 302 | * with x86 instructions, e.g. |
75 | - uint64_t *vg, target_ulong addr, | 303 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c |
76 | - int esize, int msize, uint32_t mtedesc, | 304 | index XXXXXXX..XXXXXXX 100644 |
77 | - uintptr_t ra) | 305 | --- a/target/arm/translate-sme.c |
78 | -{ | 306 | +++ b/target/arm/translate-sme.c |
79 | - sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize, | 307 | @@ -XXX,XX +XXX,XX @@ |
80 | - mtedesc, ra, mte_check); | 308 | #include "decode-sme.c.inc" |
81 | -} | 309 | |
82 | - | 310 | |
83 | - | 311 | +/* |
84 | /* | 312 | + * Resolve tile.size[index] to a host pointer, where tile and index |
85 | * Common helper for all contiguous 1,2,3,4-register predicated stores. | 313 | + * are always decoded together, dependent on the element size. |
86 | */ | 314 | + */ |
87 | @@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr, | 315 | +static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, |
88 | uint32_t desc, const uintptr_t retaddr, | 316 | + int tile_index, bool vertical) |
89 | const int esz, const int msz, const int N, uint32_t mtedesc, | 317 | +{ |
90 | sve_ldst1_host_fn *host_fn, | 318 | + int tile = tile_index >> (4 - esz); |
91 | - sve_ldst1_tlb_fn *tlb_fn, | 319 | + int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz); |
92 | - sve_cont_ldst_mte_check_fn *mte_check_fn) | 320 | + int pos, len, offset; |
93 | + sve_ldst1_tlb_fn *tlb_fn) | 321 | + TCGv_i32 tmp; |
322 | + TCGv_ptr addr; | ||
323 | + | ||
324 | + /* Compute the final index, which is Rs+imm. */ | ||
325 | + tmp = tcg_temp_new_i32(); | ||
326 | + tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs)); | ||
327 | + tcg_gen_addi_i32(tmp, tmp, index); | ||
328 | + | ||
329 | + /* Prepare a power-of-two modulo via extraction of @len bits. */ | ||
330 | + len = ctz32(streaming_vec_reg_size(s)) - esz; | ||
331 | + | ||
332 | + if (vertical) { | ||
333 | + /* | ||
334 | + * Compute the byte offset of the index within the tile: | ||
335 | + * (index % (svl / size)) * size | ||
336 | + * = (index % (svl >> esz)) << esz | ||
337 | + * Perform the power-of-two modulo via extraction of the low @len bits. | ||
338 | + * Perform the multiply by shifting left by @pos bits. | ||
339 | + * Perform these operations simultaneously via deposit into zero. | ||
340 | + */ | ||
341 | + pos = esz; | ||
342 | + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); | ||
343 | + | ||
344 | + /* | ||
345 | + * For big-endian, adjust the indexed column byte offset within | ||
346 | + * the uint64_t host words that make up env->zarray[]. | ||
347 | + */ | ||
348 | + if (HOST_BIG_ENDIAN && esz < MO_64) { | ||
349 | + tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz)); | ||
350 | + } | ||
351 | + } else { | ||
352 | + /* | ||
353 | + * Compute the byte offset of the index within the tile: | ||
354 | + * (index % (svl / size)) * (size * sizeof(row)) | ||
355 | + * = (index % (svl >> esz)) << (esz + log2(sizeof(row))) | ||
356 | + */ | ||
357 | + pos = esz + ctz32(sizeof(ARMVectorReg)); | ||
358 | + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); | ||
359 | + | ||
360 | + /* Row slices are always aligned and need no endian adjustment. */ | ||
361 | + } | ||
362 | + | ||
363 | + /* The tile byte offset within env->zarray is the row. */ | ||
364 | + offset = tile * sizeof(ARMVectorReg); | ||
365 | + | ||
366 | + /* Include the byte offset of zarray to make this relative to env. */ | ||
367 | + offset += offsetof(CPUARMState, zarray); | ||
368 | + tcg_gen_addi_i32(tmp, tmp, offset); | ||
369 | + | ||
370 | + /* Add the byte offset to env to produce the final pointer. */ | ||
371 | + addr = tcg_temp_new_ptr(); | ||
372 | + tcg_gen_ext_i32_ptr(addr, tmp); | ||
373 | + tcg_temp_free_i32(tmp); | ||
374 | + tcg_gen_add_ptr(addr, addr, cpu_env); | ||
375 | + | ||
376 | + return addr; | ||
377 | +} | ||
378 | + | ||
379 | static bool trans_ZERO(DisasContext *s, arg_ZERO *a) | ||
94 | { | 380 | { |
95 | const unsigned rd = simd_data(desc); | 381 | if (!dc_isar_feature(aa64_sme, s)) { |
96 | const intptr_t reg_max = simd_oprsz(desc); | 382 | @@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a) |
97 | @@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr, | ||
98 | * Handle mte checks for all active elements. | ||
99 | * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
100 | */ | ||
101 | - if (mte_check_fn && mtedesc) { | ||
102 | - mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz, | ||
103 | - mtedesc, retaddr); | ||
104 | + if (mtedesc) { | ||
105 | + sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz, | ||
106 | + mtedesc, retaddr); | ||
107 | } | 383 | } |
108 | 384 | return true; | |
109 | flags = info.page[0].flags | info.page[1].flags; | ||
110 | @@ -XXX,XX +XXX,XX @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, | ||
111 | mtedesc = 0; | ||
112 | } | ||
113 | |||
114 | - sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn, | ||
115 | - N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN); | ||
116 | + sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn); | ||
117 | } | 385 | } |
118 | 386 | + | |
119 | #define DO_LD1_1(NAME, ESZ) \ | 387 | +static bool trans_MOVA(DisasContext *s, arg_MOVA *a) |
120 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ | 388 | +{ |
121 | target_ulong addr, uint32_t desc) \ | 389 | + static gen_helper_gvec_4 * const h_fns[5] = { |
122 | { \ | 390 | + gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, |
123 | sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \ | 391 | + gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d, |
124 | - sve_##NAME##_host, sve_##NAME##_tlb, NULL); \ | 392 | + gen_helper_sve_sel_zpzz_q |
125 | + sve_##NAME##_host, sve_##NAME##_tlb); \ | 393 | + }; |
126 | } \ | 394 | + static gen_helper_gvec_3 * const cz_fns[5] = { |
127 | void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \ | 395 | + gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h, |
128 | target_ulong addr, uint32_t desc) \ | 396 | + gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d, |
129 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ | 397 | + gen_helper_sme_mova_cz_q, |
130 | target_ulong addr, uint32_t desc) \ | 398 | + }; |
131 | { \ | 399 | + static gen_helper_gvec_3 * const zc_fns[5] = { |
132 | sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ | 400 | + gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h, |
133 | - sve_##NAME##_le_host, sve_##NAME##_le_tlb, NULL); \ | 401 | + gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d, |
134 | + sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ | 402 | + gen_helper_sme_mova_zc_q, |
135 | } \ | 403 | + }; |
136 | void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ | 404 | + |
137 | target_ulong addr, uint32_t desc) \ | 405 | + TCGv_ptr t_za, t_zr, t_pg; |
138 | { \ | 406 | + TCGv_i32 t_desc; |
139 | sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ | 407 | + int svl; |
140 | - sve_##NAME##_be_host, sve_##NAME##_be_tlb, NULL); \ | 408 | + |
141 | + sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ | 409 | + if (!dc_isar_feature(aa64_sme, s)) { |
142 | } \ | 410 | + return false; |
143 | void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ | 411 | + } |
144 | - target_ulong addr, uint32_t desc) \ | 412 | + if (!sme_smza_enabled_check(s)) { |
145 | + target_ulong addr, uint32_t desc) \ | 413 | + return true; |
146 | { \ | 414 | + } |
147 | sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ | 415 | + |
148 | sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ | 416 | + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); |
149 | } \ | 417 | + t_zr = vec_full_reg_ptr(s, a->zr); |
150 | void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \ | 418 | + t_pg = pred_full_reg_ptr(s, a->pg); |
151 | - target_ulong addr, uint32_t desc) \ | 419 | + |
152 | + target_ulong addr, uint32_t desc) \ | 420 | + svl = streaming_vec_reg_size(s); |
153 | { \ | 421 | + t_desc = tcg_constant_i32(simd_desc(svl, svl, 0)); |
154 | sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ | 422 | + |
155 | sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ | 423 | + if (a->v) { |
156 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \ | 424 | + /* Vertical slice -- use sme mova helpers. */ |
157 | target_ulong addr, uint32_t desc) \ | 425 | + if (a->to_vec) { |
158 | { \ | 426 | + zc_fns[a->esz](t_zr, t_za, t_pg, t_desc); |
159 | sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \ | 427 | + } else { |
160 | - sve_ld1bb_host, sve_ld1bb_tlb, NULL); \ | 428 | + cz_fns[a->esz](t_za, t_zr, t_pg, t_desc); |
161 | + sve_ld1bb_host, sve_ld1bb_tlb); \ | 429 | + } |
162 | } \ | 430 | + } else { |
163 | void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \ | 431 | + /* Horizontal slice -- reuse sve sel helpers. */ |
164 | target_ulong addr, uint32_t desc) \ | 432 | + if (a->to_vec) { |
165 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \ | 433 | + h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc); |
166 | target_ulong addr, uint32_t desc) \ | 434 | + } else { |
167 | { \ | 435 | + h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); |
168 | sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ | 436 | + } |
169 | - sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb, NULL); \ | 437 | + } |
170 | + sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \ | 438 | + |
171 | } \ | 439 | + tcg_temp_free_ptr(t_za); |
172 | void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \ | 440 | + tcg_temp_free_ptr(t_zr); |
173 | target_ulong addr, uint32_t desc) \ | 441 | + tcg_temp_free_ptr(t_pg); |
174 | { \ | 442 | + |
175 | sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ | 443 | + return true; |
176 | - sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb, NULL); \ | 444 | +} |
177 | + sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \ | ||
178 | } \ | ||
179 | void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \ | ||
180 | target_ulong addr, uint32_t desc) \ | ||
181 | @@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, | ||
182 | uint32_t desc, const uintptr_t retaddr, | ||
183 | const int esz, const int msz, const int N, uint32_t mtedesc, | ||
184 | sve_ldst1_host_fn *host_fn, | ||
185 | - sve_ldst1_tlb_fn *tlb_fn, | ||
186 | - sve_cont_ldst_mte_check_fn *mte_check_fn) | ||
187 | + sve_ldst1_tlb_fn *tlb_fn) | ||
188 | { | ||
189 | const unsigned rd = simd_data(desc); | ||
190 | const intptr_t reg_max = simd_oprsz(desc); | ||
191 | @@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, | ||
192 | * Handle mte checks for all active elements. | ||
193 | * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
194 | */ | ||
195 | - if (mte_check_fn && mtedesc) { | ||
196 | - mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz, | ||
197 | - mtedesc, retaddr); | ||
198 | + if (mtedesc) { | ||
199 | + sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz, | ||
200 | + mtedesc, retaddr); | ||
201 | } | ||
202 | |||
203 | flags = info.page[0].flags | info.page[1].flags; | ||
204 | @@ -XXX,XX +XXX,XX @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, | ||
205 | mtedesc = 0; | ||
206 | } | ||
207 | |||
208 | - sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn, | ||
209 | - N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN); | ||
210 | + sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn); | ||
211 | } | ||
212 | |||
213 | #define DO_STN_1(N, NAME, ESZ) \ | ||
214 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \ | ||
215 | target_ulong addr, uint32_t desc) \ | ||
216 | { \ | ||
217 | sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \ | ||
218 | - sve_st1##NAME##_host, sve_st1##NAME##_tlb, NULL); \ | ||
219 | + sve_st1##NAME##_host, sve_st1##NAME##_tlb); \ | ||
220 | } \ | ||
221 | void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \ | ||
222 | target_ulong addr, uint32_t desc) \ | ||
223 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \ | ||
224 | target_ulong addr, uint32_t desc) \ | ||
225 | { \ | ||
226 | sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ | ||
227 | - sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb, NULL); \ | ||
228 | + sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \ | ||
229 | } \ | ||
230 | void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \ | ||
231 | target_ulong addr, uint32_t desc) \ | ||
232 | { \ | ||
233 | sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ | ||
234 | - sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb, NULL); \ | ||
235 | + sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \ | ||
236 | } \ | ||
237 | void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ | ||
238 | target_ulong addr, uint32_t desc) \ | ||
239 | -- | 445 | -- |
240 | 2.20.1 | 446 | 2.25.1 |
241 | |||
242 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | The mte_check1 and mte_checkN functions are now identical. | 3 | We cannot reuse the SVE functions for LD[1-4] and ST[1-4], |
4 | Drop mte_check1 and rename mte_checkN to mte_check. | 4 | because those functions accept only a Zreg register number. |
5 | For SME, we want to pass a pointer into ZA storage. | ||
5 | 6 | ||
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210416183106.1516563-7-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-21-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 11 | --- |
11 | target/arm/helper-a64.h | 3 +-- | 12 | target/arm/helper-sme.h | 82 +++++ |
12 | target/arm/internals.h | 5 +---- | 13 | target/arm/sme.decode | 9 + |
13 | target/arm/mte_helper.c | 26 +++----------------------- | 14 | target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++ |
14 | target/arm/sve_helper.c | 14 +++++++------- | 15 | target/arm/translate-sme.c | 70 +++++ |
15 | target/arm/translate-a64.c | 4 ++-- | 16 | 4 files changed, 756 insertions(+) |
16 | 5 files changed, 14 insertions(+), 38 deletions(-) | ||
17 | 17 | ||
18 | diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h | 18 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
19 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper-a64.h | 20 | --- a/target/arm/helper-sme.h |
21 | +++ b/target/arm/helper-a64.h | 21 | +++ b/target/arm/helper-sme.h |
22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64) | 22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
23 | DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64) | 23 | DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
24 | DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64) | 24 | DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
25 | 25 | DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | |
26 | -DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64) | 26 | + |
27 | -DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64) | 27 | +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
28 | +DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64) | 28 | +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
29 | DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64) | 29 | +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
30 | DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64) | 30 | +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
31 | DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32) | 31 | + |
32 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 32 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
33 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
34 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
36 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
38 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
39 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
40 | + | ||
41 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
42 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
43 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
44 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
45 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
47 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
48 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
49 | + | ||
50 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
51 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
52 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
54 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
56 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
57 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
58 | + | ||
59 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
61 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
63 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
65 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
66 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
67 | + | ||
68 | +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
69 | +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
70 | +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
71 | +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
72 | + | ||
73 | +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
74 | +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
75 | +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
76 | +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
77 | +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
78 | +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
79 | +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
80 | +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
81 | + | ||
82 | +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
83 | +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
84 | +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
85 | +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
86 | +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
87 | +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
88 | +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
89 | +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
90 | + | ||
91 | +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
92 | +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
93 | +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
94 | +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
95 | +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
96 | +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
97 | +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
98 | +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
99 | + | ||
100 | +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
101 | +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
102 | +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
103 | +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
104 | +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
105 | +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
106 | +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
107 | +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
108 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
33 | index XXXXXXX..XXXXXXX 100644 | 109 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/target/arm/internals.h | 110 | --- a/target/arm/sme.decode |
35 | +++ b/target/arm/internals.h | 111 | +++ b/target/arm/sme.decode |
36 | @@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, WRITE, 8, 1) | 112 | @@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ |
37 | FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ | 113 | &mova to_vec=1 rs=%mova_rs |
38 | 114 | MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ | |
39 | bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr); | 115 | &mova to_vec=1 rs=%mova_rs esz=4 |
40 | -uint64_t mte_check1(CPUARMState *env, uint32_t desc, | 116 | + |
41 | - uint64_t ptr, uintptr_t ra); | 117 | +### SME Memory |
42 | -uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 118 | + |
43 | - uint64_t ptr, uintptr_t ra); | 119 | +&ldst esz rs pg rn rm za_imm v:bool st:bool |
44 | +uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); | 120 | + |
45 | 121 | +LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ | |
46 | static inline int allocation_tag_from_addr(uint64_t ptr) | 122 | + &ldst rs=%mova_rs |
47 | { | 123 | +LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
48 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | 124 | + &ldst esz=4 rs=%mova_rs |
125 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | 126 | index XXXXXXX..XXXXXXX 100644 |
50 | --- a/target/arm/mte_helper.c | 127 | --- a/target/arm/sme_helper.c |
51 | +++ b/target/arm/mte_helper.c | 128 | +++ b/target/arm/sme_helper.c |
52 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | 129 | @@ -XXX,XX +XXX,XX @@ |
53 | return 0; | 130 | |
131 | #include "qemu/osdep.h" | ||
132 | #include "cpu.h" | ||
133 | +#include "internals.h" | ||
134 | #include "tcg/tcg-gvec-desc.h" | ||
135 | #include "exec/helper-proto.h" | ||
136 | +#include "exec/cpu_ldst.h" | ||
137 | +#include "exec/exec-all.h" | ||
138 | #include "qemu/int128.h" | ||
139 | #include "vec_internal.h" | ||
140 | +#include "sve_ldst_internal.h" | ||
141 | |||
142 | /* ResetSVEState */ | ||
143 | void arm_reset_sve_state(CPUARMState *env) | ||
144 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) | ||
54 | } | 145 | } |
55 | 146 | ||
56 | -uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 147 | #undef DO_MOVA_Z |
57 | - uint64_t ptr, uintptr_t ra) | 148 | + |
58 | +uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra) | 149 | +/* |
59 | { | 150 | + * Clear elements in a tile slice comprising len bytes. |
60 | uint64_t fault; | 151 | + */ |
61 | int ret = mte_probe_int(env, desc, ptr, ra, &fault); | 152 | + |
62 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 153 | +typedef void ClearFn(void *ptr, size_t off, size_t len); |
63 | return useronly_clean_ptr(ptr); | 154 | + |
155 | +static void clear_horizontal(void *ptr, size_t off, size_t len) | ||
156 | +{ | ||
157 | + memset(ptr + off, 0, len); | ||
158 | +} | ||
159 | + | ||
160 | +static void clear_vertical_b(void *vptr, size_t off, size_t len) | ||
161 | +{ | ||
162 | + for (size_t i = 0; i < len; ++i) { | ||
163 | + *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
164 | + } | ||
165 | +} | ||
166 | + | ||
167 | +static void clear_vertical_h(void *vptr, size_t off, size_t len) | ||
168 | +{ | ||
169 | + for (size_t i = 0; i < len; i += 2) { | ||
170 | + *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
171 | + } | ||
172 | +} | ||
173 | + | ||
174 | +static void clear_vertical_s(void *vptr, size_t off, size_t len) | ||
175 | +{ | ||
176 | + for (size_t i = 0; i < len; i += 4) { | ||
177 | + *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
178 | + } | ||
179 | +} | ||
180 | + | ||
181 | +static void clear_vertical_d(void *vptr, size_t off, size_t len) | ||
182 | +{ | ||
183 | + for (size_t i = 0; i < len; i += 8) { | ||
184 | + *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
185 | + } | ||
186 | +} | ||
187 | + | ||
188 | +static void clear_vertical_q(void *vptr, size_t off, size_t len) | ||
189 | +{ | ||
190 | + for (size_t i = 0; i < len; i += 16) { | ||
191 | + memset(vptr + tile_vslice_offset(i + off), 0, 16); | ||
192 | + } | ||
193 | +} | ||
194 | + | ||
195 | +/* | ||
196 | + * Copy elements from an array into a tile slice comprising len bytes. | ||
197 | + */ | ||
198 | + | ||
199 | +typedef void CopyFn(void *dst, const void *src, size_t len); | ||
200 | + | ||
201 | +static void copy_horizontal(void *dst, const void *src, size_t len) | ||
202 | +{ | ||
203 | + memcpy(dst, src, len); | ||
204 | +} | ||
205 | + | ||
206 | +static void copy_vertical_b(void *vdst, const void *vsrc, size_t len) | ||
207 | +{ | ||
208 | + const uint8_t *src = vsrc; | ||
209 | + uint8_t *dst = vdst; | ||
210 | + size_t i; | ||
211 | + | ||
212 | + for (i = 0; i < len; ++i) { | ||
213 | + dst[tile_vslice_index(i)] = src[i]; | ||
214 | + } | ||
215 | +} | ||
216 | + | ||
217 | +static void copy_vertical_h(void *vdst, const void *vsrc, size_t len) | ||
218 | +{ | ||
219 | + const uint16_t *src = vsrc; | ||
220 | + uint16_t *dst = vdst; | ||
221 | + size_t i; | ||
222 | + | ||
223 | + for (i = 0; i < len / 2; ++i) { | ||
224 | + dst[tile_vslice_index(i)] = src[i]; | ||
225 | + } | ||
226 | +} | ||
227 | + | ||
228 | +static void copy_vertical_s(void *vdst, const void *vsrc, size_t len) | ||
229 | +{ | ||
230 | + const uint32_t *src = vsrc; | ||
231 | + uint32_t *dst = vdst; | ||
232 | + size_t i; | ||
233 | + | ||
234 | + for (i = 0; i < len / 4; ++i) { | ||
235 | + dst[tile_vslice_index(i)] = src[i]; | ||
236 | + } | ||
237 | +} | ||
238 | + | ||
239 | +static void copy_vertical_d(void *vdst, const void *vsrc, size_t len) | ||
240 | +{ | ||
241 | + const uint64_t *src = vsrc; | ||
242 | + uint64_t *dst = vdst; | ||
243 | + size_t i; | ||
244 | + | ||
245 | + for (i = 0; i < len / 8; ++i) { | ||
246 | + dst[tile_vslice_index(i)] = src[i]; | ||
247 | + } | ||
248 | +} | ||
249 | + | ||
250 | +static void copy_vertical_q(void *vdst, const void *vsrc, size_t len) | ||
251 | +{ | ||
252 | + for (size_t i = 0; i < len; i += 16) { | ||
253 | + memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16); | ||
254 | + } | ||
255 | +} | ||
256 | + | ||
257 | +/* | ||
258 | + * Host and TLB primitives for vertical tile slice addressing. | ||
259 | + */ | ||
260 | + | ||
261 | +#define DO_LD(NAME, TYPE, HOST, TLB) \ | ||
262 | +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ | ||
263 | +{ \ | ||
264 | + TYPE val = HOST(host); \ | ||
265 | + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ | ||
266 | +} \ | ||
267 | +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ | ||
268 | + intptr_t off, target_ulong addr, uintptr_t ra) \ | ||
269 | +{ \ | ||
270 | + TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \ | ||
271 | + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ | ||
272 | +} | ||
273 | + | ||
274 | +#define DO_ST(NAME, TYPE, HOST, TLB) \ | ||
275 | +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ | ||
276 | +{ \ | ||
277 | + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ | ||
278 | + HOST(host, val); \ | ||
279 | +} \ | ||
280 | +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ | ||
281 | + intptr_t off, target_ulong addr, uintptr_t ra) \ | ||
282 | +{ \ | ||
283 | + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ | ||
284 | + TLB(env, useronly_clean_ptr(addr), val, ra); \ | ||
285 | +} | ||
286 | + | ||
287 | +/* | ||
288 | + * The ARMVectorReg elements are stored in host-endian 64-bit units. | ||
289 | + * For 128-bit quantities, the sequence defined by the Elem[] pseudocode | ||
290 | + * corresponds to storing the two 64-bit pieces in little-endian order. | ||
291 | + */ | ||
292 | +#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \ | ||
293 | +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ | ||
294 | +{ \ | ||
295 | + uint64_t val0 = HOST(host), val1 = HOST(host + 8); \ | ||
296 | + uint64_t *ptr = za + off; \ | ||
297 | + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ | ||
298 | +} \ | ||
299 | +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ | ||
300 | +{ \ | ||
301 | + HNAME##_host(za, tile_vslice_offset(off), host); \ | ||
302 | +} \ | ||
303 | +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
304 | + target_ulong addr, uintptr_t ra) \ | ||
305 | +{ \ | ||
306 | + uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \ | ||
307 | + uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \ | ||
308 | + uint64_t *ptr = za + off; \ | ||
309 | + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ | ||
310 | +} \ | ||
311 | +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
312 | + target_ulong addr, uintptr_t ra) \ | ||
313 | +{ \ | ||
314 | + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ | ||
315 | +} | ||
316 | + | ||
317 | +#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \ | ||
318 | +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ | ||
319 | +{ \ | ||
320 | + uint64_t *ptr = za + off; \ | ||
321 | + HOST(host, ptr[BE]); \ | ||
322 | + HOST(host + 1, ptr[!BE]); \ | ||
323 | +} \ | ||
324 | +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ | ||
325 | +{ \ | ||
326 | + HNAME##_host(za, tile_vslice_offset(off), host); \ | ||
327 | +} \ | ||
328 | +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
329 | + target_ulong addr, uintptr_t ra) \ | ||
330 | +{ \ | ||
331 | + uint64_t *ptr = za + off; \ | ||
332 | + TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \ | ||
333 | + TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \ | ||
334 | +} \ | ||
335 | +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
336 | + target_ulong addr, uintptr_t ra) \ | ||
337 | +{ \ | ||
338 | + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ | ||
339 | +} | ||
340 | + | ||
341 | +DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra) | ||
342 | +DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra) | ||
343 | +DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra) | ||
344 | +DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra) | ||
345 | +DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra) | ||
346 | +DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra) | ||
347 | +DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra) | ||
348 | + | ||
349 | +DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra) | ||
350 | +DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra) | ||
351 | + | ||
352 | +DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra) | ||
353 | +DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra) | ||
354 | +DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra) | ||
355 | +DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra) | ||
356 | +DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra) | ||
357 | +DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra) | ||
358 | +DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra) | ||
359 | + | ||
360 | +DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra) | ||
361 | +DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra) | ||
362 | + | ||
363 | +#undef DO_LD | ||
364 | +#undef DO_ST | ||
365 | +#undef DO_LDQ | ||
366 | +#undef DO_STQ | ||
367 | + | ||
368 | +/* | ||
369 | + * Common helper for all contiguous predicated loads. | ||
370 | + */ | ||
371 | + | ||
372 | +static inline QEMU_ALWAYS_INLINE | ||
373 | +void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, | ||
374 | + const target_ulong addr, uint32_t desc, const uintptr_t ra, | ||
375 | + const int esz, uint32_t mtedesc, bool vertical, | ||
376 | + sve_ldst1_host_fn *host_fn, | ||
377 | + sve_ldst1_tlb_fn *tlb_fn, | ||
378 | + ClearFn *clr_fn, | ||
379 | + CopyFn *cpy_fn) | ||
380 | +{ | ||
381 | + const intptr_t reg_max = simd_oprsz(desc); | ||
382 | + const intptr_t esize = 1 << esz; | ||
383 | + intptr_t reg_off, reg_last; | ||
384 | + SVEContLdSt info; | ||
385 | + void *host; | ||
386 | + int flags; | ||
387 | + | ||
388 | + /* Find the active elements. */ | ||
389 | + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { | ||
390 | + /* The entire predicate was false; no load occurs. */ | ||
391 | + clr_fn(za, 0, reg_max); | ||
392 | + return; | ||
393 | + } | ||
394 | + | ||
395 | + /* Probe the page(s). Exit with exception for any invalid page. */ | ||
396 | + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra); | ||
397 | + | ||
398 | + /* Handle watchpoints for all active elements. */ | ||
399 | + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, | ||
400 | + BP_MEM_READ, ra); | ||
401 | + | ||
402 | + /* | ||
403 | + * Handle mte checks for all active elements. | ||
404 | + * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
405 | + */ | ||
406 | + if (mtedesc) { | ||
407 | + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, | ||
408 | + mtedesc, ra); | ||
409 | + } | ||
410 | + | ||
411 | + flags = info.page[0].flags | info.page[1].flags; | ||
412 | + if (unlikely(flags != 0)) { | ||
413 | +#ifdef CONFIG_USER_ONLY | ||
414 | + g_assert_not_reached(); | ||
415 | +#else | ||
416 | + /* | ||
417 | + * At least one page includes MMIO. | ||
418 | + * Any bus operation can fail with cpu_transaction_failed, | ||
419 | + * which for ARM will raise SyncExternal. Perform the load | ||
420 | + * into scratch memory to preserve register state until the end. | ||
421 | + */ | ||
422 | + ARMVectorReg scratch = { }; | ||
423 | + | ||
424 | + reg_off = info.reg_off_first[0]; | ||
425 | + reg_last = info.reg_off_last[1]; | ||
426 | + if (reg_last < 0) { | ||
427 | + reg_last = info.reg_off_split; | ||
428 | + if (reg_last < 0) { | ||
429 | + reg_last = info.reg_off_last[0]; | ||
430 | + } | ||
431 | + } | ||
432 | + | ||
433 | + do { | ||
434 | + uint64_t pg = vg[reg_off >> 6]; | ||
435 | + do { | ||
436 | + if ((pg >> (reg_off & 63)) & 1) { | ||
437 | + tlb_fn(env, &scratch, reg_off, addr + reg_off, ra); | ||
438 | + } | ||
439 | + reg_off += esize; | ||
440 | + } while (reg_off & 63); | ||
441 | + } while (reg_off <= reg_last); | ||
442 | + | ||
443 | + cpy_fn(za, &scratch, reg_max); | ||
444 | + return; | ||
445 | +#endif | ||
446 | + } | ||
447 | + | ||
448 | + /* The entire operation is in RAM, on valid pages. */ | ||
449 | + | ||
450 | + reg_off = info.reg_off_first[0]; | ||
451 | + reg_last = info.reg_off_last[0]; | ||
452 | + host = info.page[0].host; | ||
453 | + | ||
454 | + if (!vertical) { | ||
455 | + memset(za, 0, reg_max); | ||
456 | + } else if (reg_off) { | ||
457 | + clr_fn(za, 0, reg_off); | ||
458 | + } | ||
459 | + | ||
460 | + while (reg_off <= reg_last) { | ||
461 | + uint64_t pg = vg[reg_off >> 6]; | ||
462 | + do { | ||
463 | + if ((pg >> (reg_off & 63)) & 1) { | ||
464 | + host_fn(za, reg_off, host + reg_off); | ||
465 | + } else if (vertical) { | ||
466 | + clr_fn(za, reg_off, esize); | ||
467 | + } | ||
468 | + reg_off += esize; | ||
469 | + } while (reg_off <= reg_last && (reg_off & 63)); | ||
470 | + } | ||
471 | + | ||
472 | + /* | ||
473 | + * Use the slow path to manage the cross-page misalignment. | ||
474 | + * But we know this is RAM and cannot trap. | ||
475 | + */ | ||
476 | + reg_off = info.reg_off_split; | ||
477 | + if (unlikely(reg_off >= 0)) { | ||
478 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
479 | + } | ||
480 | + | ||
481 | + reg_off = info.reg_off_first[1]; | ||
482 | + if (unlikely(reg_off >= 0)) { | ||
483 | + reg_last = info.reg_off_last[1]; | ||
484 | + host = info.page[1].host; | ||
485 | + | ||
486 | + do { | ||
487 | + uint64_t pg = vg[reg_off >> 6]; | ||
488 | + do { | ||
489 | + if ((pg >> (reg_off & 63)) & 1) { | ||
490 | + host_fn(za, reg_off, host + reg_off); | ||
491 | + } else if (vertical) { | ||
492 | + clr_fn(za, reg_off, esize); | ||
493 | + } | ||
494 | + reg_off += esize; | ||
495 | + } while (reg_off & 63); | ||
496 | + } while (reg_off <= reg_last); | ||
497 | + } | ||
498 | +} | ||
499 | + | ||
500 | +static inline QEMU_ALWAYS_INLINE | ||
501 | +void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, | ||
502 | + target_ulong addr, uint32_t desc, uintptr_t ra, | ||
503 | + const int esz, bool vertical, | ||
504 | + sve_ldst1_host_fn *host_fn, | ||
505 | + sve_ldst1_tlb_fn *tlb_fn, | ||
506 | + ClearFn *clr_fn, | ||
507 | + CopyFn *cpy_fn) | ||
508 | +{ | ||
509 | + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
510 | + int bit55 = extract64(addr, 55, 1); | ||
511 | + | ||
512 | + /* Remove mtedesc from the normal sve descriptor. */ | ||
513 | + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
514 | + | ||
515 | + /* Perform gross MTE suppression early. */ | ||
516 | + if (!tbi_check(desc, bit55) || | ||
517 | + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { | ||
518 | + mtedesc = 0; | ||
519 | + } | ||
520 | + | ||
521 | + sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical, | ||
522 | + host_fn, tlb_fn, clr_fn, cpy_fn); | ||
523 | +} | ||
524 | + | ||
525 | +#define DO_LD(L, END, ESZ) \ | ||
526 | +void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ | ||
527 | + target_ulong addr, uint32_t desc) \ | ||
528 | +{ \ | ||
529 | + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ | ||
530 | + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ | ||
531 | + clear_horizontal, copy_horizontal); \ | ||
532 | +} \ | ||
533 | +void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ | ||
534 | + target_ulong addr, uint32_t desc) \ | ||
535 | +{ \ | ||
536 | + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ | ||
537 | + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ | ||
538 | + clear_vertical_##L, copy_vertical_##L); \ | ||
539 | +} \ | ||
540 | +void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ | ||
541 | + target_ulong addr, uint32_t desc) \ | ||
542 | +{ \ | ||
543 | + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ | ||
544 | + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ | ||
545 | + clear_horizontal, copy_horizontal); \ | ||
546 | +} \ | ||
547 | +void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ | ||
548 | + target_ulong addr, uint32_t desc) \ | ||
549 | +{ \ | ||
550 | + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ | ||
551 | + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ | ||
552 | + clear_vertical_##L, copy_vertical_##L); \ | ||
553 | +} | ||
554 | + | ||
555 | +DO_LD(b, , MO_8) | ||
556 | +DO_LD(h, _be, MO_16) | ||
557 | +DO_LD(h, _le, MO_16) | ||
558 | +DO_LD(s, _be, MO_32) | ||
559 | +DO_LD(s, _le, MO_32) | ||
560 | +DO_LD(d, _be, MO_64) | ||
561 | +DO_LD(d, _le, MO_64) | ||
562 | +DO_LD(q, _be, MO_128) | ||
563 | +DO_LD(q, _le, MO_128) | ||
564 | + | ||
565 | +#undef DO_LD | ||
566 | + | ||
567 | +/* | ||
568 | + * Common helper for all contiguous predicated stores. | ||
569 | + */ | ||
570 | + | ||
571 | +static inline QEMU_ALWAYS_INLINE | ||
572 | +void sme_st1(CPUARMState *env, void *za, uint64_t *vg, | ||
573 | + const target_ulong addr, uint32_t desc, const uintptr_t ra, | ||
574 | + const int esz, uint32_t mtedesc, bool vertical, | ||
575 | + sve_ldst1_host_fn *host_fn, | ||
576 | + sve_ldst1_tlb_fn *tlb_fn) | ||
577 | +{ | ||
578 | + const intptr_t reg_max = simd_oprsz(desc); | ||
579 | + const intptr_t esize = 1 << esz; | ||
580 | + intptr_t reg_off, reg_last; | ||
581 | + SVEContLdSt info; | ||
582 | + void *host; | ||
583 | + int flags; | ||
584 | + | ||
585 | + /* Find the active elements. */ | ||
586 | + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { | ||
587 | + /* The entire predicate was false; no store occurs. */ | ||
588 | + return; | ||
589 | + } | ||
590 | + | ||
591 | + /* Probe the page(s). Exit with exception for any invalid page. */ | ||
592 | + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra); | ||
593 | + | ||
594 | + /* Handle watchpoints for all active elements. */ | ||
595 | + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, | ||
596 | + BP_MEM_WRITE, ra); | ||
597 | + | ||
598 | + /* | ||
599 | + * Handle mte checks for all active elements. | ||
600 | + * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
601 | + */ | ||
602 | + if (mtedesc) { | ||
603 | + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, | ||
604 | + mtedesc, ra); | ||
605 | + } | ||
606 | + | ||
607 | + flags = info.page[0].flags | info.page[1].flags; | ||
608 | + if (unlikely(flags != 0)) { | ||
609 | +#ifdef CONFIG_USER_ONLY | ||
610 | + g_assert_not_reached(); | ||
611 | +#else | ||
612 | + /* | ||
613 | + * At least one page includes MMIO. | ||
614 | + * Any bus operation can fail with cpu_transaction_failed, | ||
615 | + * which for ARM will raise SyncExternal. We cannot avoid | ||
616 | + * this fault and will leave with the store incomplete. | ||
617 | + */ | ||
618 | + reg_off = info.reg_off_first[0]; | ||
619 | + reg_last = info.reg_off_last[1]; | ||
620 | + if (reg_last < 0) { | ||
621 | + reg_last = info.reg_off_split; | ||
622 | + if (reg_last < 0) { | ||
623 | + reg_last = info.reg_off_last[0]; | ||
624 | + } | ||
625 | + } | ||
626 | + | ||
627 | + do { | ||
628 | + uint64_t pg = vg[reg_off >> 6]; | ||
629 | + do { | ||
630 | + if ((pg >> (reg_off & 63)) & 1) { | ||
631 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
632 | + } | ||
633 | + reg_off += esize; | ||
634 | + } while (reg_off & 63); | ||
635 | + } while (reg_off <= reg_last); | ||
636 | + return; | ||
637 | +#endif | ||
638 | + } | ||
639 | + | ||
640 | + reg_off = info.reg_off_first[0]; | ||
641 | + reg_last = info.reg_off_last[0]; | ||
642 | + host = info.page[0].host; | ||
643 | + | ||
644 | + while (reg_off <= reg_last) { | ||
645 | + uint64_t pg = vg[reg_off >> 6]; | ||
646 | + do { | ||
647 | + if ((pg >> (reg_off & 63)) & 1) { | ||
648 | + host_fn(za, reg_off, host + reg_off); | ||
649 | + } | ||
650 | + reg_off += 1 << esz; | ||
651 | + } while (reg_off <= reg_last && (reg_off & 63)); | ||
652 | + } | ||
653 | + | ||
654 | + /* | ||
655 | + * Use the slow path to manage the cross-page misalignment. | ||
656 | + * But we know this is RAM and cannot trap. | ||
657 | + */ | ||
658 | + reg_off = info.reg_off_split; | ||
659 | + if (unlikely(reg_off >= 0)) { | ||
660 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
661 | + } | ||
662 | + | ||
663 | + reg_off = info.reg_off_first[1]; | ||
664 | + if (unlikely(reg_off >= 0)) { | ||
665 | + reg_last = info.reg_off_last[1]; | ||
666 | + host = info.page[1].host; | ||
667 | + | ||
668 | + do { | ||
669 | + uint64_t pg = vg[reg_off >> 6]; | ||
670 | + do { | ||
671 | + if ((pg >> (reg_off & 63)) & 1) { | ||
672 | + host_fn(za, reg_off, host + reg_off); | ||
673 | + } | ||
674 | + reg_off += 1 << esz; | ||
675 | + } while (reg_off & 63); | ||
676 | + } while (reg_off <= reg_last); | ||
677 | + } | ||
678 | +} | ||
679 | + | ||
680 | +static inline QEMU_ALWAYS_INLINE | ||
681 | +void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, | ||
682 | + uint32_t desc, uintptr_t ra, int esz, bool vertical, | ||
683 | + sve_ldst1_host_fn *host_fn, | ||
684 | + sve_ldst1_tlb_fn *tlb_fn) | ||
685 | +{ | ||
686 | + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
687 | + int bit55 = extract64(addr, 55, 1); | ||
688 | + | ||
689 | + /* Remove mtedesc from the normal sve descriptor. */ | ||
690 | + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
691 | + | ||
692 | + /* Perform gross MTE suppression early. */ | ||
693 | + if (!tbi_check(desc, bit55) || | ||
694 | + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { | ||
695 | + mtedesc = 0; | ||
696 | + } | ||
697 | + | ||
698 | + sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc, | ||
699 | + vertical, host_fn, tlb_fn); | ||
700 | +} | ||
701 | + | ||
702 | +#define DO_ST(L, END, ESZ) \ | ||
703 | +void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ | ||
704 | + target_ulong addr, uint32_t desc) \ | ||
705 | +{ \ | ||
706 | + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ | ||
707 | + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ | ||
708 | +} \ | ||
709 | +void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ | ||
710 | + target_ulong addr, uint32_t desc) \ | ||
711 | +{ \ | ||
712 | + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ | ||
713 | + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ | ||
714 | +} \ | ||
715 | +void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ | ||
716 | + target_ulong addr, uint32_t desc) \ | ||
717 | +{ \ | ||
718 | + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ | ||
719 | + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ | ||
720 | +} \ | ||
721 | +void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ | ||
722 | + target_ulong addr, uint32_t desc) \ | ||
723 | +{ \ | ||
724 | + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ | ||
725 | + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ | ||
726 | +} | ||
727 | + | ||
728 | +DO_ST(b, , MO_8) | ||
729 | +DO_ST(h, _be, MO_16) | ||
730 | +DO_ST(h, _le, MO_16) | ||
731 | +DO_ST(s, _be, MO_32) | ||
732 | +DO_ST(s, _le, MO_32) | ||
733 | +DO_ST(d, _be, MO_64) | ||
734 | +DO_ST(d, _le, MO_64) | ||
735 | +DO_ST(q, _be, MO_128) | ||
736 | +DO_ST(q, _le, MO_128) | ||
737 | + | ||
738 | +#undef DO_ST | ||
739 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
740 | index XXXXXXX..XXXXXXX 100644 | ||
741 | --- a/target/arm/translate-sme.c | ||
742 | +++ b/target/arm/translate-sme.c | ||
743 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) | ||
744 | |||
745 | return true; | ||
64 | } | 746 | } |
65 | 747 | + | |
66 | -uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr) | 748 | +static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) |
67 | +uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) | 749 | +{ |
68 | { | 750 | + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); |
69 | - return mte_checkN(env, desc, ptr, GETPC()); | 751 | + |
70 | -} | 752 | + /* |
71 | - | 753 | + * Indexed by [esz][be][v][mte][st], which is (except for load/store) |
72 | -uint64_t mte_check1(CPUARMState *env, uint32_t desc, | 754 | + * also the order in which the elements appear in the function names, |
73 | - uint64_t ptr, uintptr_t ra) | 755 | + * and so how we must concatenate the pieces. |
74 | -{ | 756 | + */ |
75 | - uint64_t fault; | 757 | + |
76 | - int ret = mte_probe_int(env, desc, ptr, ra, &fault); | 758 | +#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F } |
77 | - | 759 | +#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) } |
78 | - if (unlikely(ret == 0)) { | 760 | +#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) } |
79 | - mte_check_fail(env, desc, fault, ra); | 761 | +#define FN_END(L, B) { FN_HV(L), FN_HV(B) } |
80 | - } else if (ret < 0) { | 762 | + |
81 | - return ptr; | 763 | + static GenLdSt1 * const fns[5][2][2][2][2] = { |
82 | - } | 764 | + FN_END(b, b), |
83 | - return useronly_clean_ptr(ptr); | 765 | + FN_END(h_le, h_be), |
84 | -} | 766 | + FN_END(s_le, s_be), |
85 | - | 767 | + FN_END(d_le, d_be), |
86 | -uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr) | 768 | + FN_END(q_le, q_be), |
87 | -{ | 769 | + }; |
88 | - return mte_check1(env, desc, ptr, GETPC()); | 770 | + |
89 | + return mte_check(env, desc, ptr, GETPC()); | 771 | +#undef FN_LS |
90 | } | 772 | +#undef FN_MTE |
91 | 773 | +#undef FN_HV | |
92 | /* | 774 | +#undef FN_END |
93 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 775 | + |
94 | index XXXXXXX..XXXXXXX 100644 | 776 | + TCGv_ptr t_za, t_pg; |
95 | --- a/target/arm/sve_helper.c | 777 | + TCGv_i64 addr; |
96 | +++ b/target/arm/sve_helper.c | 778 | + int svl, desc = 0; |
97 | @@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env, | 779 | + bool be = s->be_data == MO_BE; |
98 | uintptr_t ra) | 780 | + bool mte = s->mte_active[0]; |
99 | { | 781 | + |
100 | sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize, | 782 | + if (!dc_isar_feature(aa64_sme, s)) { |
101 | - mtedesc, ra, mte_check1); | 783 | + return false; |
102 | + mtedesc, ra, mte_check); | 784 | + } |
103 | } | 785 | + if (!sme_smza_enabled_check(s)) { |
104 | 786 | + return true; | |
105 | static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env, | 787 | + } |
106 | @@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env, | 788 | + |
107 | uintptr_t ra) | 789 | + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); |
108 | { | 790 | + t_pg = pred_full_reg_ptr(s, a->pg); |
109 | sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize, | 791 | + addr = tcg_temp_new_i64(); |
110 | - mtedesc, ra, mte_checkN); | 792 | + |
111 | + mtedesc, ra, mte_check); | 793 | + tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz); |
112 | } | 794 | + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); |
113 | 795 | + | |
114 | 796 | + if (mte) { | |
115 | @@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, | 797 | + desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); |
116 | if (fault == FAULT_FIRST) { | 798 | + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); |
117 | /* Trapping mte check for the first-fault element. */ | 799 | + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); |
118 | if (mtedesc) { | 800 | + desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st); |
119 | - mte_check1(env, mtedesc, addr + mem_off, retaddr); | 801 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1); |
120 | + mte_check(env, mtedesc, addr + mem_off, retaddr); | 802 | + desc <<= SVE_MTEDESC_SHIFT; |
121 | } | 803 | + } else { |
122 | 804 | + addr = clean_data_tbi(s, addr); | |
123 | /* | 805 | + } |
124 | @@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, | 806 | + svl = streaming_vec_reg_size(s); |
125 | info.attrs, BP_MEM_READ, retaddr); | 807 | + desc = simd_desc(svl, svl, desc); |
126 | } | 808 | + |
127 | if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { | 809 | + fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, |
128 | - mte_check1(env, mtedesc, addr, retaddr); | 810 | + tcg_constant_i32(desc)); |
129 | + mte_check(env, mtedesc, addr, retaddr); | 811 | + |
130 | } | 812 | + tcg_temp_free_ptr(t_za); |
131 | host_fn(&scratch, reg_off, info.host); | 813 | + tcg_temp_free_ptr(t_pg); |
132 | } else { | 814 | + tcg_temp_free_i64(addr); |
133 | @@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, | 815 | + return true; |
134 | BP_MEM_READ, retaddr); | 816 | +} |
135 | } | ||
136 | if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { | ||
137 | - mte_check1(env, mtedesc, addr, retaddr); | ||
138 | + mte_check(env, mtedesc, addr, retaddr); | ||
139 | } | ||
140 | tlb_fn(env, &scratch, reg_off, addr, retaddr); | ||
141 | } | ||
142 | @@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, | ||
143 | */ | ||
144 | addr = base + (off_fn(vm, reg_off) << scale); | ||
145 | if (mtedesc) { | ||
146 | - mte_check1(env, mtedesc, addr, retaddr); | ||
147 | + mte_check(env, mtedesc, addr, retaddr); | ||
148 | } | ||
149 | tlb_fn(env, vd, reg_off, addr, retaddr); | ||
150 | |||
151 | @@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, | ||
152 | } | ||
153 | |||
154 | if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { | ||
155 | - mte_check1(env, mtedesc, addr, retaddr); | ||
156 | + mte_check(env, mtedesc, addr, retaddr); | ||
157 | } | ||
158 | } | ||
159 | i += 1; | ||
160 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
161 | index XXXXXXX..XXXXXXX 100644 | ||
162 | --- a/target/arm/translate-a64.c | ||
163 | +++ b/target/arm/translate-a64.c | ||
164 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, | ||
165 | tcg_desc = tcg_const_i32(desc); | ||
166 | |||
167 | ret = new_tmp_a64(s); | ||
168 | - gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr); | ||
169 | + gen_helper_mte_check(ret, cpu_env, tcg_desc, addr); | ||
170 | tcg_temp_free_i32(tcg_desc); | ||
171 | |||
172 | return ret; | ||
173 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
174 | tcg_desc = tcg_const_i32(desc); | ||
175 | |||
176 | ret = new_tmp_a64(s); | ||
177 | - gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr); | ||
178 | + gen_helper_mte_check(ret, cpu_env, tcg_desc, addr); | ||
179 | tcg_temp_free_i32(tcg_desc); | ||
180 | |||
181 | return ret; | ||
182 | -- | 817 | -- |
183 | 2.20.1 | 818 | 2.25.1 |
184 | |||
185 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | The log2_esize parameter is not used except trivially. | 3 | Add a TCGv_ptr base argument, which will be cpu_env for SVE. |
4 | Drop the parameter and the deferral to gen_mte_check1. | 4 | We will reuse this for SME save and restore array insns. |
5 | 5 | ||
6 | This fixes a bug in that the parameters as documented | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | in the header file were the reverse from those in the | ||
8 | implementation. Which meant that translate-sve.c was | ||
9 | passing the parameters in the wrong order. | ||
10 | |||
11 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | Message-id: 20210416183106.1516563-10-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-22-richard.henderson@linaro.org |
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 10 | --- |
16 | target/arm/translate-a64.h | 2 +- | 11 | target/arm/translate-a64.h | 3 +++ |
17 | target/arm/translate-a64.c | 15 +++++++-------- | 12 | target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++---------- |
18 | target/arm/translate-sve.c | 4 ++-- | 13 | 2 files changed, 39 insertions(+), 12 deletions(-) |
19 | 3 files changed, 10 insertions(+), 11 deletions(-) | ||
20 | 14 | ||
21 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | 15 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
22 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/target/arm/translate-a64.h | 17 | --- a/target/arm/translate-a64.h |
24 | +++ b/target/arm/translate-a64.h | 18 | +++ b/target/arm/translate-a64.h |
25 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); | 19 | @@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, |
26 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, | 20 | uint32_t rm_ofs, int64_t shift, |
27 | bool tag_checked, int log2_size); | 21 | uint32_t opr_sz, uint32_t max_sz); |
28 | TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | 22 | |
29 | - bool tag_checked, int count, int log2_esize); | 23 | +void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); |
30 | + bool tag_checked, int size); | 24 | +void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); |
31 | 25 | + | |
32 | /* We should have at some point before trying to access an FP register | 26 | #endif /* TARGET_ARM_TRANSLATE_A64_H */ |
33 | * done the necessary access check, so assert that | ||
34 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/target/arm/translate-a64.c | ||
37 | +++ b/target/arm/translate-a64.c | ||
38 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
39 | * For MTE, check multiple logical sequential accesses. | ||
40 | */ | ||
41 | TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
42 | - bool tag_checked, int log2_esize, int total_size) | ||
43 | + bool tag_checked, int size) | ||
44 | { | ||
45 | - if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) { | ||
46 | + if (tag_checked && s->mte_active[0]) { | ||
47 | TCGv_i32 tcg_desc; | ||
48 | TCGv_i64 ret; | ||
49 | int desc = 0; | ||
50 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
51 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
52 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
53 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | ||
54 | - desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); | ||
55 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); | ||
56 | tcg_desc = tcg_const_i32(desc); | ||
57 | |||
58 | ret = new_tmp_a64(s); | ||
59 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
60 | |||
61 | return ret; | ||
62 | } | ||
63 | - return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize); | ||
64 | + return clean_data_tbi(s, addr); | ||
65 | } | ||
66 | |||
67 | typedef struct DisasCompare64 { | ||
68 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
69 | } | ||
70 | |||
71 | clean_addr = gen_mte_checkN(s, dirty_addr, !is_load, | ||
72 | - (wback || rn != 31) && !set_tag, | ||
73 | - size, 2 << size); | ||
74 | + (wback || rn != 31) && !set_tag, 2 << size); | ||
75 | |||
76 | if (is_vector) { | ||
77 | if (is_load) { | ||
78 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
79 | * promote consecutive little-endian elements below. | ||
80 | */ | ||
81 | clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31, | ||
82 | - size, total); | ||
83 | + total); | ||
84 | |||
85 | /* | ||
86 | * Consecutive little-endian elements from a single register | ||
87 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | ||
88 | tcg_rn = cpu_reg_sp(s, rn); | ||
89 | |||
90 | clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, | ||
91 | - scale, total); | ||
92 | + total); | ||
93 | |||
94 | tcg_ebytes = tcg_const_i64(1 << scale); | ||
95 | for (xs = 0; xs < selem; xs++) { | ||
96 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
97 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
98 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
99 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
31 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, | ||
32 | * The load should begin at the address Rn + IMM. | ||
33 | */ | ||
34 | |||
35 | -static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
36 | +void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, | ||
37 | + int len, int rn, int imm) | ||
38 | { | ||
39 | int len_align = QEMU_ALIGN_DOWN(len, 8); | ||
40 | int len_remain = len % 8; | ||
100 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | 41 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) |
101 | 42 | t0 = tcg_temp_new_i64(); | |
102 | dirty_addr = tcg_temp_new_i64(); | 43 | for (i = 0; i < len_align; i += 8) { |
103 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); | 44 | tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); |
104 | - clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); | 45 | - tcg_gen_st_i64(t0, cpu_env, vofs + i); |
105 | + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); | 46 | + tcg_gen_st_i64(t0, base, vofs + i); |
106 | tcg_temp_free_i64(dirty_addr); | 47 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); |
48 | } | ||
49 | tcg_temp_free_i64(t0); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
51 | clean_addr = new_tmp_a64_local(s); | ||
52 | tcg_gen_mov_i64(clean_addr, t0); | ||
53 | |||
54 | + if (base != cpu_env) { | ||
55 | + TCGv_ptr b = tcg_temp_local_new_ptr(); | ||
56 | + tcg_gen_mov_ptr(b, base); | ||
57 | + base = b; | ||
58 | + } | ||
59 | + | ||
60 | gen_set_label(loop); | ||
61 | |||
62 | t0 = tcg_temp_new_i64(); | ||
63 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
64 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
65 | |||
66 | tp = tcg_temp_new_ptr(); | ||
67 | - tcg_gen_add_ptr(tp, cpu_env, i); | ||
68 | + tcg_gen_add_ptr(tp, base, i); | ||
69 | tcg_gen_addi_ptr(i, i, 8); | ||
70 | tcg_gen_st_i64(t0, tp, vofs); | ||
71 | tcg_temp_free_ptr(tp); | ||
72 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
73 | |||
74 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); | ||
75 | tcg_temp_free_ptr(i); | ||
76 | + | ||
77 | + if (base != cpu_env) { | ||
78 | + tcg_temp_free_ptr(base); | ||
79 | + assert(len_remain == 0); | ||
80 | + } | ||
81 | } | ||
107 | 82 | ||
108 | /* | 83 | /* |
84 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
85 | default: | ||
86 | g_assert_not_reached(); | ||
87 | } | ||
88 | - tcg_gen_st_i64(t0, cpu_env, vofs + len_align); | ||
89 | + tcg_gen_st_i64(t0, base, vofs + len_align); | ||
90 | tcg_temp_free_i64(t0); | ||
91 | } | ||
92 | } | ||
93 | |||
94 | /* Similarly for stores. */ | ||
95 | -static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
96 | +void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
97 | + int len, int rn, int imm) | ||
98 | { | ||
99 | int len_align = QEMU_ALIGN_DOWN(len, 8); | ||
100 | int len_remain = len % 8; | ||
109 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | 101 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) |
110 | 102 | ||
111 | dirty_addr = tcg_temp_new_i64(); | 103 | t0 = tcg_temp_new_i64(); |
112 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); | 104 | for (i = 0; i < len_align; i += 8) { |
113 | - clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); | 105 | - tcg_gen_ld_i64(t0, cpu_env, vofs + i); |
114 | + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); | 106 | + tcg_gen_ld_i64(t0, base, vofs + i); |
115 | tcg_temp_free_i64(dirty_addr); | 107 | tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); |
116 | 108 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); | |
117 | /* Note that unpredicated load/store of vector/predicate registers | 109 | } |
110 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
111 | clean_addr = new_tmp_a64_local(s); | ||
112 | tcg_gen_mov_i64(clean_addr, t0); | ||
113 | |||
114 | + if (base != cpu_env) { | ||
115 | + TCGv_ptr b = tcg_temp_local_new_ptr(); | ||
116 | + tcg_gen_mov_ptr(b, base); | ||
117 | + base = b; | ||
118 | + } | ||
119 | + | ||
120 | gen_set_label(loop); | ||
121 | |||
122 | t0 = tcg_temp_new_i64(); | ||
123 | tp = tcg_temp_new_ptr(); | ||
124 | - tcg_gen_add_ptr(tp, cpu_env, i); | ||
125 | + tcg_gen_add_ptr(tp, base, i); | ||
126 | tcg_gen_ld_i64(t0, tp, vofs); | ||
127 | tcg_gen_addi_ptr(i, i, 8); | ||
128 | tcg_temp_free_ptr(tp); | ||
129 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
130 | |||
131 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); | ||
132 | tcg_temp_free_ptr(i); | ||
133 | + | ||
134 | + if (base != cpu_env) { | ||
135 | + tcg_temp_free_ptr(base); | ||
136 | + assert(len_remain == 0); | ||
137 | + } | ||
138 | } | ||
139 | |||
140 | /* Predicate register stores can be any multiple of 2. */ | ||
141 | if (len_remain) { | ||
142 | t0 = tcg_temp_new_i64(); | ||
143 | - tcg_gen_ld_i64(t0, cpu_env, vofs + len_align); | ||
144 | + tcg_gen_ld_i64(t0, base, vofs + len_align); | ||
145 | |||
146 | switch (len_remain) { | ||
147 | case 2: | ||
148 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a) | ||
149 | if (sve_access_check(s)) { | ||
150 | int size = vec_full_reg_size(s); | ||
151 | int off = vec_full_reg_offset(s, a->rd); | ||
152 | - do_ldr(s, off, size, a->rn, a->imm * size); | ||
153 | + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); | ||
154 | } | ||
155 | return true; | ||
156 | } | ||
157 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a) | ||
158 | if (sve_access_check(s)) { | ||
159 | int size = pred_full_reg_size(s); | ||
160 | int off = pred_full_reg_offset(s, a->rd); | ||
161 | - do_ldr(s, off, size, a->rn, a->imm * size); | ||
162 | + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); | ||
163 | } | ||
164 | return true; | ||
165 | } | ||
166 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a) | ||
167 | if (sve_access_check(s)) { | ||
168 | int size = vec_full_reg_size(s); | ||
169 | int off = vec_full_reg_offset(s, a->rd); | ||
170 | - do_str(s, off, size, a->rn, a->imm * size); | ||
171 | + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); | ||
172 | } | ||
173 | return true; | ||
174 | } | ||
175 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) | ||
176 | if (sve_access_check(s)) { | ||
177 | int size = pred_full_reg_size(s); | ||
178 | int off = pred_full_reg_offset(s, a->rd); | ||
179 | - do_str(s, off, size, a->rn, a->imm * size); | ||
180 | + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); | ||
181 | } | ||
182 | return true; | ||
183 | } | ||
118 | -- | 184 | -- |
119 | 2.20.1 | 185 | 2.25.1 |
120 | |||
121 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | We can reuse the SVE functions for LDR and STR, passing in the | ||
4 | base of the ZA vector and a zero offset. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-24-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-23-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-neon.c.inc | 27 ++++++++++++++++++++++----- | 11 | target/arm/sme.decode | 7 +++++++ |
9 | 1 file changed, 22 insertions(+), 5 deletions(-) | 12 | target/arm/translate-sme.c | 24 ++++++++++++++++++++++++ |
13 | 2 files changed, 31 insertions(+) | ||
10 | 14 | ||
11 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | 15 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-neon.c.inc | 17 | --- a/target/arm/sme.decode |
14 | +++ b/target/arm/translate-neon.c.inc | 18 | +++ b/target/arm/sme.decode |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) | 19 | @@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
16 | { | 20 | &ldst rs=%mova_rs |
17 | /* Neon load/store multiple structures */ | 21 | LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
18 | int nregs, interleave, spacing, reg, n; | 22 | &ldst esz=4 rs=%mova_rs |
19 | - MemOp endian = s->be_data; | ||
20 | + MemOp mop, align, endian; | ||
21 | int mmu_idx = get_mem_index(s); | ||
22 | int size = a->size; | ||
23 | TCGv_i64 tmp64; | ||
24 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) | ||
25 | } | ||
26 | |||
27 | /* For our purposes, bytes are always little-endian. */ | ||
28 | + endian = s->be_data; | ||
29 | if (size == 0) { | ||
30 | endian = MO_LE; | ||
31 | } | ||
32 | + | 23 | + |
33 | + /* Enforce alignment requested by the instruction */ | 24 | +&ldstr rv rn imm |
34 | + if (a->align) { | 25 | +@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \ |
35 | + align = pow2_align(a->align + 2); /* 4 ** a->align */ | 26 | + &ldstr rv=%mova_rs |
36 | + } else { | 27 | + |
37 | + align = s->align_mem ? MO_ALIGN : 0; | 28 | +LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr |
29 | +STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr | ||
30 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/translate-sme.c | ||
33 | +++ b/target/arm/translate-sme.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) | ||
35 | tcg_temp_free_i64(addr); | ||
36 | return true; | ||
37 | } | ||
38 | + | ||
39 | +typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int); | ||
40 | + | ||
41 | +static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) | ||
42 | +{ | ||
43 | + int svl = streaming_vec_reg_size(s); | ||
44 | + int imm = a->imm; | ||
45 | + TCGv_ptr base; | ||
46 | + | ||
47 | + if (!sme_za_enabled_check(s)) { | ||
48 | + return true; | ||
38 | + } | 49 | + } |
39 | + | 50 | + |
40 | /* | 51 | + /* ZA[n] equates to ZA0H.B[n]. */ |
41 | * Consecutive little-endian elements from a single register | 52 | + base = get_tile_rowcol(s, MO_8, a->rv, imm, false); |
42 | * can be promoted to a larger little-endian operation. | ||
43 | */ | ||
44 | if (interleave == 1 && endian == MO_LE) { | ||
45 | + /* Retain any natural alignment. */ | ||
46 | + if (align == MO_ALIGN) { | ||
47 | + align = pow2_align(size); | ||
48 | + } | ||
49 | size = 3; | ||
50 | } | ||
51 | + | 53 | + |
52 | tmp64 = tcg_temp_new_i64(); | 54 | + fn(s, base, 0, svl, a->rn, imm * svl); |
53 | addr = tcg_temp_new_i32(); | ||
54 | tmp = tcg_const_i32(1 << size); | ||
55 | load_reg_var(s, addr, a->rn); | ||
56 | + | 55 | + |
57 | + mop = endian | size | align; | 56 | + tcg_temp_free_ptr(base); |
58 | for (reg = 0; reg < nregs; reg++) { | 57 | + return true; |
59 | for (n = 0; n < 8 >> size; n++) { | 58 | +} |
60 | int xs; | ||
61 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) | ||
62 | int tt = a->vd + reg + spacing * xs; | ||
63 | |||
64 | if (a->l) { | ||
65 | - gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, | ||
66 | - endian | size); | ||
67 | + gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop); | ||
68 | neon_store_element64(tt, n, size, tmp64); | ||
69 | } else { | ||
70 | neon_load_element64(tmp64, tt, n, size); | ||
71 | - gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, | ||
72 | - endian | size); | ||
73 | + gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop); | ||
74 | } | ||
75 | tcg_gen_add_i32(addr, addr, tmp); | ||
76 | + | 59 | + |
77 | + /* Subsequent memory operations inherit alignment */ | 60 | +TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) |
78 | + mop &= ~MO_AMASK; | 61 | +TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) |
79 | } | ||
80 | } | ||
81 | } | ||
82 | -- | 62 | -- |
83 | 2.20.1 | 63 | 2.25.1 |
84 | |||
85 | diff view generated by jsdifflib |
1 | From: Cornelia Huck <cohuck@redhat.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Add 6.1 machine types for arm/i440fx/q35/s390x/spapr. | ||
4 | |||
5 | Signed-off-by: Cornelia Huck <cohuck@redhat.com> | ||
6 | Acked-by: Greg Kurz <groug@kaod.org> | ||
7 | Message-id: 20210331111900.118274-1-cohuck@redhat.com | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20220708151540.18136-24-richard.henderson@linaro.org | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 7 | --- |
11 | include/hw/boards.h | 3 +++ | 8 | target/arm/helper-sme.h | 5 +++ |
12 | include/hw/i386/pc.h | 3 +++ | 9 | target/arm/sme.decode | 11 +++++ |
13 | hw/arm/virt.c | 7 ++++++- | 10 | target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++ |
14 | hw/core/machine.c | 3 +++ | 11 | target/arm/translate-sme.c | 31 +++++++++++++ |
15 | hw/i386/pc.c | 3 +++ | 12 | 4 files changed, 137 insertions(+) |
16 | hw/i386/pc_piix.c | 14 +++++++++++++- | ||
17 | hw/i386/pc_q35.c | 13 ++++++++++++- | ||
18 | hw/ppc/spapr.c | 17 ++++++++++++++--- | ||
19 | hw/s390x/s390-virtio-ccw.c | 14 +++++++++++++- | ||
20 | 9 files changed, 70 insertions(+), 7 deletions(-) | ||
21 | 13 | ||
22 | diff --git a/include/hw/boards.h b/include/hw/boards.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
23 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/include/hw/boards.h | 16 | --- a/target/arm/helper-sme.h |
25 | +++ b/include/hw/boards.h | 17 | +++ b/target/arm/helper-sme.h |
26 | @@ -XXX,XX +XXX,XX @@ struct MachineState { | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i |
27 | } \ | 19 | DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
28 | type_init(machine_initfn##_register_types) | 20 | DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
29 | 21 | DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | |
30 | +extern GlobalProperty hw_compat_6_0[]; | ||
31 | +extern const size_t hw_compat_6_0_len; | ||
32 | + | 22 | + |
33 | extern GlobalProperty hw_compat_5_2[]; | 23 | +DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
34 | extern const size_t hw_compat_5_2_len; | 24 | +DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
35 | 25 | +DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | |
36 | diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h | 26 | +DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
38 | --- a/include/hw/i386/pc.h | 29 | --- a/target/arm/sme.decode |
39 | +++ b/include/hw/i386/pc.h | 30 | +++ b/target/arm/sme.decode |
40 | @@ -XXX,XX +XXX,XX @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, | 31 | @@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
41 | void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, | 32 | |
42 | const CPUArchIdList *apic_ids, GArray *entry); | 33 | LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr |
43 | 34 | STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr | |
44 | +extern GlobalProperty pc_compat_6_0[]; | ||
45 | +extern const size_t pc_compat_6_0_len; | ||
46 | + | 35 | + |
47 | extern GlobalProperty pc_compat_5_2[]; | 36 | +### SME Add Vector to Array |
48 | extern const size_t pc_compat_5_2_len; | 37 | + |
49 | 38 | +&adda zad zn pm pn | |
50 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | 39 | +@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda |
40 | +@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda | ||
41 | + | ||
42 | +ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 | ||
43 | +ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 | ||
44 | +ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 | ||
45 | +ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 | ||
46 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
51 | index XXXXXXX..XXXXXXX 100644 | 47 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/hw/arm/virt.c | 48 | --- a/target/arm/sme_helper.c |
53 | +++ b/hw/arm/virt.c | 49 | +++ b/target/arm/sme_helper.c |
54 | @@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void) | 50 | @@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128) |
55 | } | 51 | DO_ST(q, _le, MO_128) |
56 | type_init(machvirt_machine_init); | 52 | |
57 | 53 | #undef DO_ST | |
58 | +static void virt_machine_6_1_options(MachineClass *mc) | 54 | + |
55 | +void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn, | ||
56 | + void *vpm, uint32_t desc) | ||
59 | +{ | 57 | +{ |
60 | +} | 58 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; |
61 | +DEFINE_VIRT_MACHINE_AS_LATEST(6, 1) | 59 | + uint64_t *pn = vpn, *pm = vpm; |
60 | + uint32_t *zda = vzda, *zn = vzn; | ||
62 | + | 61 | + |
63 | static void virt_machine_6_0_options(MachineClass *mc) | 62 | + for (row = 0; row < oprsz; ) { |
64 | { | 63 | + uint64_t pa = pn[row >> 4]; |
65 | } | 64 | + do { |
66 | -DEFINE_VIRT_MACHINE_AS_LATEST(6, 0) | 65 | + if (pa & 1) { |
67 | +DEFINE_VIRT_MACHINE(6, 0) | 66 | + for (col = 0; col < oprsz; ) { |
68 | 67 | + uint64_t pb = pm[col >> 4]; | |
69 | static void virt_machine_5_2_options(MachineClass *mc) | 68 | + do { |
70 | { | 69 | + if (pb & 1) { |
71 | diff --git a/hw/core/machine.c b/hw/core/machine.c | 70 | + zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)]; |
72 | index XXXXXXX..XXXXXXX 100644 | 71 | + } |
73 | --- a/hw/core/machine.c | 72 | + pb >>= 4; |
74 | +++ b/hw/core/machine.c | 73 | + } while (++col & 15); |
75 | @@ -XXX,XX +XXX,XX @@ | 74 | + } |
76 | #include "hw/virtio/virtio.h" | 75 | + } |
77 | #include "hw/virtio/virtio-pci.h" | 76 | + pa >>= 4; |
78 | 77 | + } while (++row & 15); | |
79 | +GlobalProperty hw_compat_6_0[] = {}; | 78 | + } |
80 | +const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0); | ||
81 | + | ||
82 | GlobalProperty hw_compat_5_2[] = { | ||
83 | { "ICH9-LPC", "smm-compat", "on"}, | ||
84 | { "PIIX4_PM", "smm-compat", "on"}, | ||
85 | diff --git a/hw/i386/pc.c b/hw/i386/pc.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/hw/i386/pc.c | ||
88 | +++ b/hw/i386/pc.c | ||
89 | @@ -XXX,XX +XXX,XX @@ | ||
90 | #include "trace.h" | ||
91 | #include CONFIG_DEVICES | ||
92 | |||
93 | +GlobalProperty pc_compat_6_0[] = {}; | ||
94 | +const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0); | ||
95 | + | ||
96 | GlobalProperty pc_compat_5_2[] = { | ||
97 | { "ICH9-LPC", "x-smi-cpu-hotunplug", "off" }, | ||
98 | }; | ||
99 | diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c | ||
100 | index XXXXXXX..XXXXXXX 100644 | ||
101 | --- a/hw/i386/pc_piix.c | ||
102 | +++ b/hw/i386/pc_piix.c | ||
103 | @@ -XXX,XX +XXX,XX @@ static void pc_i440fx_machine_options(MachineClass *m) | ||
104 | machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); | ||
105 | } | ||
106 | |||
107 | -static void pc_i440fx_6_0_machine_options(MachineClass *m) | ||
108 | +static void pc_i440fx_6_1_machine_options(MachineClass *m) | ||
109 | { | ||
110 | PCMachineClass *pcmc = PC_MACHINE_CLASS(m); | ||
111 | pc_i440fx_machine_options(m); | ||
112 | @@ -XXX,XX +XXX,XX @@ static void pc_i440fx_6_0_machine_options(MachineClass *m) | ||
113 | pcmc->default_cpu_version = 1; | ||
114 | } | ||
115 | |||
116 | +DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL, | ||
117 | + pc_i440fx_6_1_machine_options); | ||
118 | + | ||
119 | +static void pc_i440fx_6_0_machine_options(MachineClass *m) | ||
120 | +{ | ||
121 | + pc_i440fx_6_1_machine_options(m); | ||
122 | + m->alias = NULL; | ||
123 | + m->is_default = false; | ||
124 | + compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len); | ||
125 | + compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len); | ||
126 | +} | 79 | +} |
127 | + | 80 | + |
128 | DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL, | 81 | +void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn, |
129 | pc_i440fx_6_0_machine_options); | 82 | + void *vpm, uint32_t desc) |
130 | 83 | +{ | |
131 | diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c | 84 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
132 | index XXXXXXX..XXXXXXX 100644 | 85 | + uint8_t *pn = vpn, *pm = vpm; |
133 | --- a/hw/i386/pc_q35.c | 86 | + uint64_t *zda = vzda, *zn = vzn; |
134 | +++ b/hw/i386/pc_q35.c | ||
135 | @@ -XXX,XX +XXX,XX @@ static void pc_q35_machine_options(MachineClass *m) | ||
136 | m->max_cpus = 288; | ||
137 | } | ||
138 | |||
139 | -static void pc_q35_6_0_machine_options(MachineClass *m) | ||
140 | +static void pc_q35_6_1_machine_options(MachineClass *m) | ||
141 | { | ||
142 | PCMachineClass *pcmc = PC_MACHINE_CLASS(m); | ||
143 | pc_q35_machine_options(m); | ||
144 | @@ -XXX,XX +XXX,XX @@ static void pc_q35_6_0_machine_options(MachineClass *m) | ||
145 | pcmc->default_cpu_version = 1; | ||
146 | } | ||
147 | |||
148 | +DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL, | ||
149 | + pc_q35_6_1_machine_options); | ||
150 | + | 87 | + |
151 | +static void pc_q35_6_0_machine_options(MachineClass *m) | 88 | + for (row = 0; row < oprsz; ++row) { |
152 | +{ | 89 | + if (pn[H1(row)] & 1) { |
153 | + pc_q35_6_1_machine_options(m); | 90 | + for (col = 0; col < oprsz; ++col) { |
154 | + m->alias = NULL; | 91 | + if (pm[H1(col)] & 1) { |
155 | + compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len); | 92 | + zda[tile_vslice_index(row) + col] += zn[col]; |
156 | + compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len); | 93 | + } |
94 | + } | ||
95 | + } | ||
96 | + } | ||
157 | +} | 97 | +} |
158 | + | 98 | + |
159 | DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL, | 99 | +void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn, |
160 | pc_q35_6_0_machine_options); | 100 | + void *vpm, uint32_t desc) |
161 | 101 | +{ | |
162 | diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c | 102 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; |
163 | index XXXXXXX..XXXXXXX 100644 | 103 | + uint64_t *pn = vpn, *pm = vpm; |
164 | --- a/hw/ppc/spapr.c | 104 | + uint32_t *zda = vzda, *zn = vzn; |
165 | +++ b/hw/ppc/spapr.c | ||
166 | @@ -XXX,XX +XXX,XX @@ static void spapr_machine_latest_class_options(MachineClass *mc) | ||
167 | type_init(spapr_machine_register_##suffix) | ||
168 | |||
169 | /* | ||
170 | - * pseries-6.0 | ||
171 | + * pseries-6.1 | ||
172 | */ | ||
173 | -static void spapr_machine_6_0_class_options(MachineClass *mc) | ||
174 | +static void spapr_machine_6_1_class_options(MachineClass *mc) | ||
175 | { | ||
176 | /* Defaults for the latest behaviour inherited from the base class */ | ||
177 | } | ||
178 | |||
179 | -DEFINE_SPAPR_MACHINE(6_0, "6.0", true); | ||
180 | +DEFINE_SPAPR_MACHINE(6_1, "6.1", true); | ||
181 | + | 105 | + |
182 | +/* | 106 | + for (row = 0; row < oprsz; ) { |
183 | + * pseries-6.0 | 107 | + uint64_t pa = pn[row >> 4]; |
184 | + */ | 108 | + do { |
185 | +static void spapr_machine_6_0_class_options(MachineClass *mc) | 109 | + if (pa & 1) { |
186 | +{ | 110 | + uint32_t zn_row = zn[H4(row)]; |
187 | + spapr_machine_6_1_class_options(mc); | 111 | + for (col = 0; col < oprsz; ) { |
188 | + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); | 112 | + uint64_t pb = pm[col >> 4]; |
113 | + do { | ||
114 | + if (pb & 1) { | ||
115 | + zda[tile_vslice_index(row) + H4(col)] += zn_row; | ||
116 | + } | ||
117 | + pb >>= 4; | ||
118 | + } while (++col & 15); | ||
119 | + } | ||
120 | + } | ||
121 | + pa >>= 4; | ||
122 | + } while (++row & 15); | ||
123 | + } | ||
189 | +} | 124 | +} |
190 | + | 125 | + |
191 | +DEFINE_SPAPR_MACHINE(6_0, "6.0", false); | 126 | +void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, |
192 | 127 | + void *vpm, uint32_t desc) | |
193 | /* | 128 | +{ |
194 | * pseries-5.2 | 129 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
195 | diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c | 130 | + uint8_t *pn = vpn, *pm = vpm; |
131 | + uint64_t *zda = vzda, *zn = vzn; | ||
132 | + | ||
133 | + for (row = 0; row < oprsz; ++row) { | ||
134 | + if (pn[H1(row)] & 1) { | ||
135 | + uint64_t zn_row = zn[row]; | ||
136 | + for (col = 0; col < oprsz; ++col) { | ||
137 | + if (pm[H1(col)] & 1) { | ||
138 | + zda[tile_vslice_index(row) + col] += zn_row; | ||
139 | + } | ||
140 | + } | ||
141 | + } | ||
142 | + } | ||
143 | +} | ||
144 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
196 | index XXXXXXX..XXXXXXX 100644 | 145 | index XXXXXXX..XXXXXXX 100644 |
197 | --- a/hw/s390x/s390-virtio-ccw.c | 146 | --- a/target/arm/translate-sme.c |
198 | +++ b/hw/s390x/s390-virtio-ccw.c | 147 | +++ b/target/arm/translate-sme.c |
199 | @@ -XXX,XX +XXX,XX @@ bool css_migration_enabled(void) | 148 | @@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) |
200 | } \ | 149 | |
201 | type_init(ccw_machine_register_##suffix) | 150 | TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) |
202 | 151 | TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) | |
203 | +static void ccw_machine_6_1_instance_options(MachineState *machine) | 152 | + |
153 | +static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, | ||
154 | + gen_helper_gvec_4 *fn) | ||
204 | +{ | 155 | +{ |
156 | + int svl = streaming_vec_reg_size(s); | ||
157 | + uint32_t desc = simd_desc(svl, svl, 0); | ||
158 | + TCGv_ptr za, zn, pn, pm; | ||
159 | + | ||
160 | + if (!sme_smza_enabled_check(s)) { | ||
161 | + return true; | ||
162 | + } | ||
163 | + | ||
164 | + /* Sum XZR+zad to find ZAd. */ | ||
165 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); | ||
166 | + zn = vec_full_reg_ptr(s, a->zn); | ||
167 | + pn = pred_full_reg_ptr(s, a->pn); | ||
168 | + pm = pred_full_reg_ptr(s, a->pm); | ||
169 | + | ||
170 | + fn(za, zn, pn, pm, tcg_constant_i32(desc)); | ||
171 | + | ||
172 | + tcg_temp_free_ptr(za); | ||
173 | + tcg_temp_free_ptr(zn); | ||
174 | + tcg_temp_free_ptr(pn); | ||
175 | + tcg_temp_free_ptr(pm); | ||
176 | + return true; | ||
205 | +} | 177 | +} |
206 | + | 178 | + |
207 | +static void ccw_machine_6_1_class_options(MachineClass *mc) | 179 | +TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) |
208 | +{ | 180 | +TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) |
209 | +} | 181 | +TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) |
210 | +DEFINE_CCW_MACHINE(6_1, "6.1", true); | 182 | +TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) |
211 | + | ||
212 | static void ccw_machine_6_0_instance_options(MachineState *machine) | ||
213 | { | ||
214 | + ccw_machine_6_1_instance_options(machine); | ||
215 | } | ||
216 | |||
217 | static void ccw_machine_6_0_class_options(MachineClass *mc) | ||
218 | { | ||
219 | + ccw_machine_6_1_class_options(mc); | ||
220 | + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); | ||
221 | } | ||
222 | -DEFINE_CCW_MACHINE(6_0, "6.0", true); | ||
223 | +DEFINE_CCW_MACHINE(6_0, "6.0", false); | ||
224 | |||
225 | static void ccw_machine_5_2_instance_options(MachineState *machine) | ||
226 | { | ||
227 | -- | 183 | -- |
228 | 2.20.1 | 184 | 2.25.1 |
229 | |||
230 | diff view generated by jsdifflib |
1 | Currently the gpex PCI controller implements no special behaviour for | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | guest accesses to areas of the PIO and MMIO where it has not mapped | ||
3 | any PCI devices, which means that for Arm you end up with a CPU | ||
4 | exception due to a data abort. | ||
5 | 2 | ||
6 | Most host OSes expect "like an x86 PC" behaviour, where bad accesses | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | like this return -1 for reads and ignore writes. In the interests of | 4 | Message-id: 20220708151540.18136-25-richard.henderson@linaro.org |
8 | not being surprising, make host CPU accesses to these windows behave | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | as -1/discard where there's no mapped PCI device. | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | ||
8 | target/arm/helper-sme.h | 5 +++ | ||
9 | target/arm/sme.decode | 9 +++++ | ||
10 | target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sme.c | 32 ++++++++++++++++++ | ||
12 | 4 files changed, 115 insertions(+) | ||
10 | 13 | ||
11 | The old behaviour generally didn't cause any problems, because | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
12 | almost always the guest OS will map the PCI devices and then only | ||
13 | access where it has mapped them. One corner case where you will see | ||
14 | this kind of access is if Linux attempts to probe legacy ISA | ||
15 | devices via a PIO window access. So far the only case where we've | ||
16 | seen this has been via the syzkaller fuzzer. | ||
17 | |||
18 | Reported-by: Dmitry Vyukov <dvyukov@google.com> | ||
19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
20 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
21 | Acked-by: Michael S. Tsirkin <mst@redhat.com> | ||
22 | Message-id: 20210325163315.27724-1-peter.maydell@linaro.org | ||
23 | Fixes: https://bugs.launchpad.net/qemu/+bug/1918917 | ||
24 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
25 | --- | ||
26 | include/hw/pci-host/gpex.h | 4 +++ | ||
27 | hw/core/machine.c | 4 ++- | ||
28 | hw/pci-host/gpex.c | 56 ++++++++++++++++++++++++++++++++++++-- | ||
29 | 3 files changed, 60 insertions(+), 4 deletions(-) | ||
30 | |||
31 | diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h | ||
32 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/include/hw/pci-host/gpex.h | 16 | --- a/target/arm/helper-sme.h |
34 | +++ b/include/hw/pci-host/gpex.h | 17 | +++ b/target/arm/helper-sme.h |
35 | @@ -XXX,XX +XXX,XX @@ struct GPEXHost { | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
36 | 19 | DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | |
37 | MemoryRegion io_ioport; | 20 | DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
38 | MemoryRegion io_mmio; | 21 | DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
39 | + MemoryRegion io_ioport_window; | ||
40 | + MemoryRegion io_mmio_window; | ||
41 | qemu_irq irq[GPEX_NUM_IRQS]; | ||
42 | int irq_num[GPEX_NUM_IRQS]; | ||
43 | + | 22 | + |
44 | + bool allow_unmapped_accesses; | 23 | +DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, |
45 | }; | 24 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
46 | 25 | +DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, | |
47 | struct GPEXConfig { | 26 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
48 | diff --git a/hw/core/machine.c b/hw/core/machine.c | 27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
49 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
50 | --- a/hw/core/machine.c | 29 | --- a/target/arm/sme.decode |
51 | +++ b/hw/core/machine.c | 30 | +++ b/target/arm/sme.decode |
31 | @@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 | ||
32 | ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 | ||
33 | ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 | ||
34 | ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 | ||
35 | + | ||
36 | +### SME Outer Product | ||
37 | + | ||
38 | +&op zad zn zm pm pn sub:bool | ||
39 | +@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op | ||
40 | +@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op | ||
41 | + | ||
42 | +FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 | ||
43 | +FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
44 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/target/arm/sme_helper.c | ||
47 | +++ b/target/arm/sme_helper.c | ||
52 | @@ -XXX,XX +XXX,XX @@ | 48 | @@ -XXX,XX +XXX,XX @@ |
53 | #include "hw/virtio/virtio.h" | 49 | #include "exec/cpu_ldst.h" |
54 | #include "hw/virtio/virtio-pci.h" | 50 | #include "exec/exec-all.h" |
55 | 51 | #include "qemu/int128.h" | |
56 | -GlobalProperty hw_compat_6_0[] = {}; | 52 | +#include "fpu/softfloat.h" |
57 | +GlobalProperty hw_compat_6_0[] = { | 53 | #include "vec_internal.h" |
58 | + { "gpex-pcihost", "allow-unmapped-accesses", "false" }, | 54 | #include "sve_ldst_internal.h" |
59 | +}; | 55 | |
60 | const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0); | 56 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, |
61 | 57 | } | |
62 | GlobalProperty hw_compat_5_2[] = { | 58 | } |
63 | diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c | 59 | } |
64 | index XXXXXXX..XXXXXXX 100644 | 60 | + |
65 | --- a/hw/pci-host/gpex.c | 61 | +void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, |
66 | +++ b/hw/pci-host/gpex.c | 62 | + void *vpm, void *vst, uint32_t desc) |
67 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) | 63 | +{ |
68 | int i; | 64 | + intptr_t row, col, oprsz = simd_maxsz(desc); |
69 | 65 | + uint32_t neg = simd_data(desc) << 31; | |
70 | pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); | 66 | + uint16_t *pn = vpn, *pm = vpm; |
71 | + sysbus_init_mmio(sbd, &pex->mmio); | 67 | + float_status fpst; |
72 | + | 68 | + |
73 | + /* | 69 | + /* |
74 | + * Note that the MemoryRegions io_mmio and io_ioport that we pass | 70 | + * Make a copy of float_status because this operation does not |
75 | + * to pci_register_root_bus() are not the same as the | 71 | + * update the cumulative fp exception status. It also produces |
76 | + * MemoryRegions io_mmio_window and io_ioport_window that we | 72 | + * default nans. |
77 | + * expose as SysBus MRs. The difference is in the behaviour of | ||
78 | + * accesses to addresses where no PCI device has been mapped. | ||
79 | + * | ||
80 | + * io_mmio and io_ioport are the underlying PCI view of the PCI | ||
81 | + * address space, and when a PCI device does a bus master access | ||
82 | + * to a bad address this is reported back to it as a transaction | ||
83 | + * failure. | ||
84 | + * | ||
85 | + * io_mmio_window and io_ioport_window implement "unmapped | ||
86 | + * addresses read as -1 and ignore writes"; this is traditional | ||
87 | + * x86 PC behaviour, which is not mandated by the PCI spec proper | ||
88 | + * but expected by much PCI-using guest software, including Linux. | ||
89 | + * | ||
90 | + * In the interests of not being unnecessarily surprising, we | ||
91 | + * implement it in the gpex PCI host controller, by providing the | ||
92 | + * _window MRs, which are containers with io ops that implement | ||
93 | + * the 'background' behaviour and which hold the real PCI MRs as | ||
94 | + * subregions. | ||
95 | + */ | 73 | + */ |
96 | memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX); | 74 | + fpst = *(float_status *)vst; |
97 | memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024); | 75 | + set_default_nan_mode(true, &fpst); |
98 | |||
99 | - sysbus_init_mmio(sbd, &pex->mmio); | ||
100 | - sysbus_init_mmio(sbd, &s->io_mmio); | ||
101 | - sysbus_init_mmio(sbd, &s->io_ioport); | ||
102 | + if (s->allow_unmapped_accesses) { | ||
103 | + memory_region_init_io(&s->io_mmio_window, OBJECT(s), | ||
104 | + &unassigned_io_ops, OBJECT(s), | ||
105 | + "gpex_mmio_window", UINT64_MAX); | ||
106 | + memory_region_init_io(&s->io_ioport_window, OBJECT(s), | ||
107 | + &unassigned_io_ops, OBJECT(s), | ||
108 | + "gpex_ioport_window", 64 * 1024); | ||
109 | + | 76 | + |
110 | + memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio); | 77 | + for (row = 0; row < oprsz; ) { |
111 | + memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport); | 78 | + uint16_t pa = pn[H2(row >> 4)]; |
112 | + sysbus_init_mmio(sbd, &s->io_mmio_window); | 79 | + do { |
113 | + sysbus_init_mmio(sbd, &s->io_ioport_window); | 80 | + if (pa & 1) { |
114 | + } else { | 81 | + void *vza_row = vza + tile_vslice_offset(row); |
115 | + sysbus_init_mmio(sbd, &s->io_mmio); | 82 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg; |
116 | + sysbus_init_mmio(sbd, &s->io_ioport); | 83 | + |
84 | + for (col = 0; col < oprsz; ) { | ||
85 | + uint16_t pb = pm[H2(col >> 4)]; | ||
86 | + do { | ||
87 | + if (pb & 1) { | ||
88 | + uint32_t *a = vza_row + H1_4(col); | ||
89 | + uint32_t *m = vzm + H1_4(col); | ||
90 | + *a = float32_muladd(n, *m, *a, 0, vst); | ||
91 | + } | ||
92 | + col += 4; | ||
93 | + pb >>= 4; | ||
94 | + } while (col & 15); | ||
95 | + } | ||
96 | + } | ||
97 | + row += 4; | ||
98 | + pa >>= 4; | ||
99 | + } while (row & 15); | ||
100 | + } | ||
101 | +} | ||
102 | + | ||
103 | +void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, | ||
104 | + void *vpm, void *vst, uint32_t desc) | ||
105 | +{ | ||
106 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; | ||
107 | + uint64_t neg = (uint64_t)simd_data(desc) << 63; | ||
108 | + uint64_t *za = vza, *zn = vzn, *zm = vzm; | ||
109 | + uint8_t *pn = vpn, *pm = vpm; | ||
110 | + float_status fpst = *(float_status *)vst; | ||
111 | + | ||
112 | + set_default_nan_mode(true, &fpst); | ||
113 | + | ||
114 | + for (row = 0; row < oprsz; ++row) { | ||
115 | + if (pn[H1(row)] & 1) { | ||
116 | + uint64_t *za_row = &za[tile_vslice_index(row)]; | ||
117 | + uint64_t n = zn[row] ^ neg; | ||
118 | + | ||
119 | + for (col = 0; col < oprsz; ++col) { | ||
120 | + if (pm[H1(col)] & 1) { | ||
121 | + uint64_t *a = &za_row[col]; | ||
122 | + *a = float64_muladd(n, zm[col], *a, 0, &fpst); | ||
123 | + } | ||
124 | + } | ||
125 | + } | ||
126 | + } | ||
127 | +} | ||
128 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
129 | index XXXXXXX..XXXXXXX 100644 | ||
130 | --- a/target/arm/translate-sme.c | ||
131 | +++ b/target/arm/translate-sme.c | ||
132 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) | ||
133 | TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) | ||
134 | TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) | ||
135 | TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) | ||
136 | + | ||
137 | +static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, | ||
138 | + gen_helper_gvec_5_ptr *fn) | ||
139 | +{ | ||
140 | + int svl = streaming_vec_reg_size(s); | ||
141 | + uint32_t desc = simd_desc(svl, svl, a->sub); | ||
142 | + TCGv_ptr za, zn, zm, pn, pm, fpst; | ||
143 | + | ||
144 | + if (!sme_smza_enabled_check(s)) { | ||
145 | + return true; | ||
117 | + } | 146 | + } |
118 | + | 147 | + |
119 | for (i = 0; i < GPEX_NUM_IRQS; i++) { | 148 | + /* Sum XZR+zad to find ZAd. */ |
120 | sysbus_init_irq(sbd, &s->irq[i]); | 149 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); |
121 | s->irq_num[i] = -1; | 150 | + zn = vec_full_reg_ptr(s, a->zn); |
122 | @@ -XXX,XX +XXX,XX @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, | 151 | + zm = vec_full_reg_ptr(s, a->zm); |
123 | return "0000:00"; | 152 | + pn = pred_full_reg_ptr(s, a->pn); |
124 | } | 153 | + pm = pred_full_reg_ptr(s, a->pm); |
125 | 154 | + fpst = fpstatus_ptr(FPST_FPCR); | |
126 | +static Property gpex_host_properties[] = { | ||
127 | + /* | ||
128 | + * Permit CPU accesses to unmapped areas of the PIO and MMIO windows | ||
129 | + * (discarding writes and returning -1 for reads) rather than aborting. | ||
130 | + */ | ||
131 | + DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost, | ||
132 | + allow_unmapped_accesses, true), | ||
133 | + DEFINE_PROP_END_OF_LIST(), | ||
134 | +}; | ||
135 | + | 155 | + |
136 | static void gpex_host_class_init(ObjectClass *klass, void *data) | 156 | + fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); |
137 | { | 157 | + |
138 | DeviceClass *dc = DEVICE_CLASS(klass); | 158 | + tcg_temp_free_ptr(za); |
139 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data) | 159 | + tcg_temp_free_ptr(zn); |
140 | dc->realize = gpex_host_realize; | 160 | + tcg_temp_free_ptr(pn); |
141 | set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); | 161 | + tcg_temp_free_ptr(pm); |
142 | dc->fw_name = "pci"; | 162 | + tcg_temp_free_ptr(fpst); |
143 | + device_class_set_props(dc, gpex_host_properties); | 163 | + return true; |
144 | } | 164 | +} |
145 | 165 | + | |
146 | static void gpex_host_initfn(Object *obj) | 166 | +TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
167 | +TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) | ||
147 | -- | 168 | -- |
148 | 2.20.1 | 169 | 2.25.1 |
149 | |||
150 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Split out a helper function from mte_checkN to perform | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | all of the checking and address manpulation. So far, | 4 | Message-id: 20220708151540.18136-26-richard.henderson@linaro.org |
5 | just use this in mte_checkN itself. | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sme.h | 2 ++ | ||
9 | target/arm/sme.decode | 2 ++ | ||
10 | target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sme.c | 30 ++++++++++++++++++++ | ||
12 | 4 files changed, 90 insertions(+) | ||
6 | 13 | ||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-id: 20210416183106.1516563-3-richard.henderson@linaro.org | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | --- | ||
13 | target/arm/mte_helper.c | 52 +++++++++++++++++++++++++++++++---------- | ||
14 | 1 file changed, 40 insertions(+), 12 deletions(-) | ||
15 | |||
16 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/mte_helper.c | 16 | --- a/target/arm/helper-sme.h |
19 | +++ b/target/arm/mte_helper.c | 17 | +++ b/target/arm/helper-sme.h |
20 | @@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count) | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, |
21 | return n; | 19 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
22 | } | 20 | DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, |
23 | 21 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) | |
24 | -uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 22 | +DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, |
25 | - uint64_t ptr, uintptr_t ra) | 23 | + void, ptr, ptr, ptr, ptr, ptr, i32) |
26 | +/** | 24 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
27 | + * mte_probe_int() - helper for mte_probe and mte_check | 25 | index XXXXXXX..XXXXXXX 100644 |
28 | + * @env: CPU environment | 26 | --- a/target/arm/sme.decode |
29 | + * @desc: MTEDESC descriptor | 27 | +++ b/target/arm/sme.decode |
30 | + * @ptr: virtual address of the base of the access | 28 | @@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 |
31 | + * @fault: return virtual address of the first check failure | 29 | |
32 | + * | 30 | FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 |
33 | + * Internal routine for both mte_probe and mte_check. | 31 | FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 |
34 | + * Return zero on failure, filling in *fault. | 32 | + |
35 | + * Return negative on trivial success for tbi disabled. | 33 | +BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 |
36 | + * Return positive on success with tbi enabled. | 34 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c |
37 | + */ | 35 | index XXXXXXX..XXXXXXX 100644 |
38 | +static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | 36 | --- a/target/arm/sme_helper.c |
39 | + uintptr_t ra, uint32_t total, uint64_t *fault) | 37 | +++ b/target/arm/sme_helper.c |
40 | { | 38 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, |
41 | int mmu_idx, ptr_tag, bit55; | ||
42 | uint64_t ptr_last, prev_page, next_page; | ||
43 | uint64_t tag_first, tag_last; | ||
44 | uint64_t tag_byte_first, tag_byte_last; | ||
45 | - uint32_t total, tag_count, tag_size, n, c; | ||
46 | + uint32_t tag_count, tag_size, n, c; | ||
47 | uint8_t *mem1, *mem2; | ||
48 | MMUAccessType type; | ||
49 | |||
50 | bit55 = extract64(ptr, 55, 1); | ||
51 | + *fault = ptr; | ||
52 | |||
53 | /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ | ||
54 | if (unlikely(!tbi_check(desc, bit55))) { | ||
55 | - return ptr; | ||
56 | + return -1; | ||
57 | } | ||
58 | |||
59 | ptr_tag = allocation_tag_from_addr(ptr); | ||
60 | |||
61 | if (tcma_check(desc, bit55, ptr_tag)) { | ||
62 | - goto done; | ||
63 | + return 1; | ||
64 | } | ||
65 | |||
66 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
67 | type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
68 | - total = FIELD_EX32(desc, MTEDESC, TSIZE); | ||
69 | |||
70 | /* Find the addr of the end of the access */ | ||
71 | ptr_last = ptr + total - 1; | ||
72 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
73 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total, | ||
74 | MMU_DATA_LOAD, tag_size, ra); | ||
75 | if (!mem1) { | ||
76 | - goto done; | ||
77 | + return 1; | ||
78 | } | ||
79 | /* Perform all of the comparisons. */ | ||
80 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); | ||
81 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
82 | } | ||
83 | if (n == c) { | ||
84 | if (!mem2) { | ||
85 | - goto done; | ||
86 | + return 1; | ||
87 | } | ||
88 | n += checkN(mem2, 0, ptr_tag, tag_count - c); | ||
89 | } | 39 | } |
90 | } | 40 | } |
91 | 41 | } | |
92 | + if (likely(n == tag_count)) { | 42 | + |
93 | + return 1; | 43 | +/* |
44 | + * Alter PAIR as needed for controlling predicates being false, | ||
45 | + * and for NEG on an enabled row element. | ||
46 | + */ | ||
47 | +static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) | ||
48 | +{ | ||
49 | + /* | ||
50 | + * The pseudocode uses a conditional negate after the conditional zero. | ||
51 | + * It is simpler here to unconditionally negate before conditional zero. | ||
52 | + */ | ||
53 | + pair ^= neg; | ||
54 | + if (!(pg & 1)) { | ||
55 | + pair &= 0xffff0000u; | ||
56 | + } | ||
57 | + if (!(pg & 4)) { | ||
58 | + pair &= 0x0000ffffu; | ||
59 | + } | ||
60 | + return pair; | ||
61 | +} | ||
62 | + | ||
63 | +void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, | ||
64 | + void *vpm, uint32_t desc) | ||
65 | +{ | ||
66 | + intptr_t row, col, oprsz = simd_maxsz(desc); | ||
67 | + uint32_t neg = simd_data(desc) * 0x80008000u; | ||
68 | + uint16_t *pn = vpn, *pm = vpm; | ||
69 | + | ||
70 | + for (row = 0; row < oprsz; ) { | ||
71 | + uint16_t prow = pn[H2(row >> 4)]; | ||
72 | + do { | ||
73 | + void *vza_row = vza + tile_vslice_offset(row); | ||
74 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); | ||
75 | + | ||
76 | + n = f16mop_adj_pair(n, prow, neg); | ||
77 | + | ||
78 | + for (col = 0; col < oprsz; ) { | ||
79 | + uint16_t pcol = pm[H2(col >> 4)]; | ||
80 | + do { | ||
81 | + if (prow & pcol & 0b0101) { | ||
82 | + uint32_t *a = vza_row + H1_4(col); | ||
83 | + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); | ||
84 | + | ||
85 | + m = f16mop_adj_pair(m, pcol, 0); | ||
86 | + *a = bfdotadd(*a, n, m); | ||
87 | + | ||
88 | + col += 4; | ||
89 | + pcol >>= 4; | ||
90 | + } | ||
91 | + } while (col & 15); | ||
92 | + } | ||
93 | + row += 4; | ||
94 | + prow >>= 4; | ||
95 | + } while (row & 15); | ||
96 | + } | ||
97 | +} | ||
98 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/translate-sme.c | ||
101 | +++ b/target/arm/translate-sme.c | ||
102 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) | ||
103 | TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) | ||
104 | TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) | ||
105 | |||
106 | +static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, | ||
107 | + gen_helper_gvec_5 *fn) | ||
108 | +{ | ||
109 | + int svl = streaming_vec_reg_size(s); | ||
110 | + uint32_t desc = simd_desc(svl, svl, a->sub); | ||
111 | + TCGv_ptr za, zn, zm, pn, pm; | ||
112 | + | ||
113 | + if (!sme_smza_enabled_check(s)) { | ||
114 | + return true; | ||
94 | + } | 115 | + } |
95 | + | 116 | + |
96 | /* | 117 | + /* Sum XZR+zad to find ZAd. */ |
97 | * If we failed, we know which granule. For the first granule, the | 118 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); |
98 | * failure address is @ptr, the first byte accessed. Otherwise the | 119 | + zn = vec_full_reg_ptr(s, a->zn); |
99 | * failure address is the first byte of the nth granule. | 120 | + zm = vec_full_reg_ptr(s, a->zm); |
100 | */ | 121 | + pn = pred_full_reg_ptr(s, a->pn); |
101 | - if (unlikely(n < tag_count)) { | 122 | + pm = pred_full_reg_ptr(s, a->pm); |
102 | - uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE); | 123 | + |
103 | - mte_check_fail(env, desc, fault, ra); | 124 | + fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); |
104 | + if (n > 0) { | 125 | + |
105 | + *fault = tag_first + n * TAG_GRANULE; | 126 | + tcg_temp_free_ptr(za); |
106 | } | 127 | + tcg_temp_free_ptr(zn); |
107 | + return 0; | 128 | + tcg_temp_free_ptr(pn); |
129 | + tcg_temp_free_ptr(pm); | ||
130 | + return true; | ||
108 | +} | 131 | +} |
109 | |||
110 | - done: | ||
111 | +uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
112 | + uint64_t ptr, uintptr_t ra) | ||
113 | +{ | ||
114 | + uint64_t fault; | ||
115 | + uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE); | ||
116 | + int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); | ||
117 | + | 132 | + |
118 | + if (unlikely(ret == 0)) { | 133 | static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
119 | + mte_check_fail(env, desc, fault, ra); | 134 | gen_helper_gvec_5_ptr *fn) |
120 | + } else if (ret < 0) { | 135 | { |
121 | + return ptr; | 136 | @@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
122 | + } | 137 | |
123 | return useronly_clean_ptr(ptr); | 138 | TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
124 | } | 139 | TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) |
125 | 140 | + | |
141 | +/* TODO: FEAT_EBF16 */ | ||
142 | +TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) | ||
126 | -- | 143 | -- |
127 | 2.20.1 | 144 | 2.25.1 |
128 | |||
129 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Buglink: https://bugs.launchpad.net/bugs/1921948 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20210416183106.1516563-5-richard.henderson@linaro.org | 4 | Message-id: 20220708151540.18136-27-richard.henderson@linaro.org |
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | --- | 7 | --- |
9 | tests/tcg/aarch64/mte-5.c | 44 +++++++++++++++++++++++++++++++ | 8 | target/arm/helper-sme.h | 2 ++ |
10 | tests/tcg/aarch64/Makefile.target | 2 +- | 9 | target/arm/sme.decode | 1 + |
11 | 2 files changed, 45 insertions(+), 1 deletion(-) | 10 | target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++ |
12 | create mode 100644 tests/tcg/aarch64/mte-5.c | 11 | target/arm/translate-sme.c | 1 + |
12 | 4 files changed, 78 insertions(+) | ||
13 | 13 | ||
14 | diff --git a/tests/tcg/aarch64/mte-5.c b/tests/tcg/aarch64/mte-5.c | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | new file mode 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | index XXXXXXX..XXXXXXX | 16 | --- a/target/arm/helper-sme.h |
17 | --- /dev/null | 17 | +++ b/target/arm/helper-sme.h |
18 | +++ b/tests/tcg/aarch64/mte-5.c | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
19 | @@ -XXX,XX +XXX,XX @@ | 19 | DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
20 | +/* | 20 | DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
21 | + * Memory tagging, faulting unaligned access. | 21 | |
22 | + * | 22 | +DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, |
23 | + * Copyright (c) 2021 Linaro Ltd | 23 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
24 | + * SPDX-License-Identifier: GPL-2.0-or-later | 24 | DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, |
25 | + */ | 25 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
26 | DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, | ||
27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/sme.decode | ||
30 | +++ b/target/arm/sme.decode | ||
31 | @@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 | ||
32 | FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
33 | |||
34 | BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 | ||
35 | +FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 | ||
36 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sme_helper.c | ||
39 | +++ b/target/arm/sme_helper.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) | ||
41 | return pair; | ||
42 | } | ||
43 | |||
44 | +static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, | ||
45 | + float_status *s_std, float_status *s_odd) | ||
46 | +{ | ||
47 | + float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std); | ||
48 | + float64 e1c = float16_to_float64(e1 >> 16, true, s_std); | ||
49 | + float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std); | ||
50 | + float64 e2c = float16_to_float64(e2 >> 16, true, s_std); | ||
51 | + float64 t64; | ||
52 | + float32 t32; | ||
26 | + | 53 | + |
27 | +#include "mte.h" | 54 | + /* |
55 | + * The ARM pseudocode function FPDot performs both multiplies | ||
56 | + * and the add with a single rounding operation. Emulate this | ||
57 | + * by performing the first multiply in round-to-odd, then doing | ||
58 | + * the second multiply as fused multiply-add, and rounding to | ||
59 | + * float32 all in one step. | ||
60 | + */ | ||
61 | + t64 = float64_mul(e1r, e2r, s_odd); | ||
62 | + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); | ||
28 | + | 63 | + |
29 | +void pass(int sig, siginfo_t *info, void *uc) | 64 | + /* This conversion is exact, because we've already rounded. */ |
30 | +{ | 65 | + t32 = float64_to_float32(t64, s_std); |
31 | + assert(info->si_code == SEGV_MTESERR); | 66 | + |
32 | + exit(0); | 67 | + /* The final accumulation step is not fused. */ |
68 | + return float32_add(sum, t32, s_std); | ||
33 | +} | 69 | +} |
34 | + | 70 | + |
35 | +int main(int ac, char **av) | 71 | +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, |
72 | + void *vpm, void *vst, uint32_t desc) | ||
36 | +{ | 73 | +{ |
37 | + struct sigaction sa; | 74 | + intptr_t row, col, oprsz = simd_maxsz(desc); |
38 | + void *p0, *p1, *p2; | 75 | + uint32_t neg = simd_data(desc) * 0x80008000u; |
39 | + long excl = 1; | 76 | + uint16_t *pn = vpn, *pm = vpm; |
77 | + float_status fpst_odd, fpst_std; | ||
40 | + | 78 | + |
41 | + enable_mte(PR_MTE_TCF_SYNC); | 79 | + /* |
42 | + p0 = alloc_mte_mem(sizeof(*p0)); | 80 | + * Make a copy of float_status because this operation does not |
81 | + * update the cumulative fp exception status. It also produces | ||
82 | + * default nans. Make a second copy with round-to-odd -- see above. | ||
83 | + */ | ||
84 | + fpst_std = *(float_status *)vst; | ||
85 | + set_default_nan_mode(true, &fpst_std); | ||
86 | + fpst_odd = fpst_std; | ||
87 | + set_float_rounding_mode(float_round_to_odd, &fpst_odd); | ||
43 | + | 88 | + |
44 | + /* Create two differently tagged pointers. */ | 89 | + for (row = 0; row < oprsz; ) { |
45 | + asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl)); | 90 | + uint16_t prow = pn[H2(row >> 4)]; |
46 | + asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1)); | 91 | + do { |
47 | + assert(excl != 1); | 92 | + void *vza_row = vza + tile_vslice_offset(row); |
48 | + asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl)); | 93 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); |
49 | + assert(p1 != p2); | ||
50 | + | 94 | + |
51 | + memset(&sa, 0, sizeof(sa)); | 95 | + n = f16mop_adj_pair(n, prow, neg); |
52 | + sa.sa_sigaction = pass; | ||
53 | + sa.sa_flags = SA_SIGINFO; | ||
54 | + sigaction(SIGSEGV, &sa, NULL); | ||
55 | + | 96 | + |
56 | + /* Store store two different tags in sequential granules. */ | 97 | + for (col = 0; col < oprsz; ) { |
57 | + asm("stg %0, [%0]" : : "r"(p1)); | 98 | + uint16_t pcol = pm[H2(col >> 4)]; |
58 | + asm("stg %0, [%0]" : : "r"(p2 + 16)); | 99 | + do { |
100 | + if (prow & pcol & 0b0101) { | ||
101 | + uint32_t *a = vza_row + H1_4(col); | ||
102 | + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); | ||
59 | + | 103 | + |
60 | + /* Perform an unaligned load crossing the granules. */ | 104 | + m = f16mop_adj_pair(m, pcol, 0); |
61 | + asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12)); | 105 | + *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd); |
62 | + abort(); | 106 | + |
107 | + col += 4; | ||
108 | + pcol >>= 4; | ||
109 | + } | ||
110 | + } while (col & 15); | ||
111 | + } | ||
112 | + row += 4; | ||
113 | + prow >>= 4; | ||
114 | + } while (row & 15); | ||
115 | + } | ||
63 | +} | 116 | +} |
64 | diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target | 117 | + |
118 | void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, | ||
119 | void *vpm, uint32_t desc) | ||
120 | { | ||
121 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | 122 | index XXXXXXX..XXXXXXX 100644 |
66 | --- a/tests/tcg/aarch64/Makefile.target | 123 | --- a/target/arm/translate-sme.c |
67 | +++ b/tests/tcg/aarch64/Makefile.target | 124 | +++ b/target/arm/translate-sme.c |
68 | @@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += bti-2 | 125 | @@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
69 | 126 | return true; | |
70 | # MTE Tests | 127 | } |
71 | ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),) | 128 | |
72 | -AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-6 | 129 | +TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h) |
73 | +AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 | 130 | TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
74 | mte-%: CFLAGS += -march=armv8.5-a+memtag | 131 | TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) |
75 | endif | ||
76 | 132 | ||
77 | -- | 133 | -- |
78 | 2.20.1 | 134 | 2.25.1 |
79 | |||
80 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We were incorrectly assuming that only the first byte of an MTE access | 3 | This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16. |
4 | is checked against the tags. But per the ARM, unaligned accesses are | ||
5 | pre-decomposed into single-byte accesses. So by the time we reach the | ||
6 | actual MTE check in the ARM pseudocode, all accesses are aligned. | ||
7 | 4 | ||
8 | We cannot tell a priori whether or not a given scalar access is aligned, | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | therefore we must at least check. Use mte_probe_int, which is already | ||
10 | set up for checking multiple granules. | ||
11 | |||
12 | Buglink: https://bugs.launchpad.net/bugs/1921948 | ||
13 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
14 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
16 | Message-id: 20210416183106.1516563-4-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-28-richard.henderson@linaro.org |
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | --- | 9 | --- |
19 | target/arm/mte_helper.c | 109 +++++++++++++--------------------------- | 10 | target/arm/helper-sme.h | 16 ++++++++ |
20 | 1 file changed, 35 insertions(+), 74 deletions(-) | 11 | target/arm/sme.decode | 10 +++++ |
12 | target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++ | ||
13 | target/arm/translate-sme.c | 10 +++++ | ||
14 | 4 files changed, 118 insertions(+) | ||
21 | 15 | ||
22 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | 16 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
23 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/target/arm/mte_helper.c | 18 | --- a/target/arm/helper-sme.h |
25 | +++ b/target/arm/mte_helper.c | 19 | +++ b/target/arm/helper-sme.h |
26 | @@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc, | 20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, |
21 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) | ||
22 | DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, | ||
23 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG, | ||
25 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG, | ||
27 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG, | ||
29 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG, | ||
37 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG, | ||
39 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
40 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/target/arm/sme.decode | ||
43 | +++ b/target/arm/sme.decode | ||
44 | @@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
45 | |||
46 | BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 | ||
47 | FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 | ||
48 | + | ||
49 | +SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32 | ||
50 | +SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32 | ||
51 | +USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32 | ||
52 | +UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32 | ||
53 | + | ||
54 | +SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64 | ||
55 | +SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64 | ||
56 | +USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64 | ||
57 | +UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64 | ||
58 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/sme_helper.c | ||
61 | +++ b/target/arm/sme_helper.c | ||
62 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, | ||
63 | } while (row & 15); | ||
27 | } | 64 | } |
28 | } | 65 | } |
29 | 66 | + | |
30 | -/* | 67 | +typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool); |
31 | - * Perform an MTE checked access for a single logical or atomic access. | 68 | + |
32 | - */ | 69 | +static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, |
33 | -static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | 70 | + uint8_t *pn, uint8_t *pm, |
34 | - uintptr_t ra, int bit55) | 71 | + uint32_t desc, IMOPFn *fn) |
35 | -{ | ||
36 | - int mem_tag, mmu_idx, ptr_tag, size; | ||
37 | - MMUAccessType type; | ||
38 | - uint8_t *mem; | ||
39 | - | ||
40 | - ptr_tag = allocation_tag_from_addr(ptr); | ||
41 | - | ||
42 | - if (tcma_check(desc, bit55, ptr_tag)) { | ||
43 | - return true; | ||
44 | - } | ||
45 | - | ||
46 | - mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
47 | - type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
48 | - size = FIELD_EX32(desc, MTEDESC, ESIZE); | ||
49 | - | ||
50 | - mem = allocation_tag_mem(env, mmu_idx, ptr, type, size, | ||
51 | - MMU_DATA_LOAD, 1, ra); | ||
52 | - if (!mem) { | ||
53 | - return true; | ||
54 | - } | ||
55 | - | ||
56 | - mem_tag = load_tag1(ptr, mem); | ||
57 | - return ptr_tag == mem_tag; | ||
58 | -} | ||
59 | - | ||
60 | -/* | ||
61 | - * No-fault version of mte_check1, to be used by SVE for MemSingleNF. | ||
62 | - * Returns false if the access is Checked and the check failed. This | ||
63 | - * is only intended to probe the tag -- the validity of the page must | ||
64 | - * be checked beforehand. | ||
65 | - */ | ||
66 | -bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
67 | -{ | ||
68 | - int bit55 = extract64(ptr, 55, 1); | ||
69 | - | ||
70 | - /* If TBI is disabled, the access is unchecked. */ | ||
71 | - if (unlikely(!tbi_check(desc, bit55))) { | ||
72 | - return true; | ||
73 | - } | ||
74 | - | ||
75 | - return mte_probe1_int(env, desc, ptr, 0, bit55); | ||
76 | -} | ||
77 | - | ||
78 | -uint64_t mte_check1(CPUARMState *env, uint32_t desc, | ||
79 | - uint64_t ptr, uintptr_t ra) | ||
80 | -{ | ||
81 | - int bit55 = extract64(ptr, 55, 1); | ||
82 | - | ||
83 | - /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ | ||
84 | - if (unlikely(!tbi_check(desc, bit55))) { | ||
85 | - return ptr; | ||
86 | - } | ||
87 | - | ||
88 | - if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) { | ||
89 | - mte_check_fail(env, desc, ptr, ra); | ||
90 | - } | ||
91 | - | ||
92 | - return useronly_clean_ptr(ptr); | ||
93 | -} | ||
94 | - | ||
95 | -uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
96 | -{ | ||
97 | - return mte_check1(env, desc, ptr, GETPC()); | ||
98 | -} | ||
99 | - | ||
100 | -/* | ||
101 | - * Perform an MTE checked access for multiple logical accesses. | ||
102 | - */ | ||
103 | - | ||
104 | /** | ||
105 | * checkN: | ||
106 | * @tag: tag memory to test | ||
107 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
108 | return mte_checkN(env, desc, ptr, GETPC()); | ||
109 | } | ||
110 | |||
111 | +uint64_t mte_check1(CPUARMState *env, uint32_t desc, | ||
112 | + uint64_t ptr, uintptr_t ra) | ||
113 | +{ | 72 | +{ |
114 | + uint64_t fault; | 73 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
115 | + uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); | 74 | + bool neg = simd_data(desc); |
116 | + int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); | ||
117 | + | 75 | + |
118 | + if (unlikely(ret == 0)) { | 76 | + for (row = 0; row < oprsz; ++row) { |
119 | + mte_check_fail(env, desc, fault, ra); | 77 | + uint8_t pa = pn[H1(row)]; |
120 | + } else if (ret < 0) { | 78 | + uint64_t *za_row = &za[tile_vslice_index(row)]; |
121 | + return ptr; | 79 | + uint64_t n = zn[row]; |
80 | + | ||
81 | + for (col = 0; col < oprsz; ++col) { | ||
82 | + uint8_t pb = pm[H1(col)]; | ||
83 | + uint64_t *a = &za_row[col]; | ||
84 | + | ||
85 | + *a = fn(n, zm[col], *a, pa & pb, neg); | ||
86 | + } | ||
122 | + } | 87 | + } |
123 | + return useronly_clean_ptr(ptr); | ||
124 | +} | 88 | +} |
125 | + | 89 | + |
126 | +uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr) | 90 | +#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ |
127 | +{ | 91 | +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ |
128 | + return mte_check1(env, desc, ptr, GETPC()); | 92 | +{ \ |
93 | + uint32_t sum0 = 0, sum1 = 0; \ | ||
94 | + /* Apply P to N as a mask, making the inactive elements 0. */ \ | ||
95 | + n &= expand_pred_b(p); \ | ||
96 | + sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ | ||
97 | + sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ | ||
98 | + sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ | ||
99 | + sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ | ||
100 | + sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ | ||
101 | + sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \ | ||
102 | + sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ | ||
103 | + sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \ | ||
104 | + if (neg) { \ | ||
105 | + sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \ | ||
106 | + } else { \ | ||
107 | + sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \ | ||
108 | + } \ | ||
109 | + return ((uint64_t)sum1 << 32) | sum0; \ | ||
129 | +} | 110 | +} |
130 | + | 111 | + |
131 | +/* | 112 | +#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ |
132 | + * No-fault version of mte_check1, to be used by SVE for MemSingleNF. | 113 | +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ |
133 | + * Returns false if the access is Checked and the check failed. This | 114 | +{ \ |
134 | + * is only intended to probe the tag -- the validity of the page must | 115 | + uint64_t sum = 0; \ |
135 | + * be checked beforehand. | 116 | + /* Apply P to N as a mask, making the inactive elements 0. */ \ |
136 | + */ | 117 | + n &= expand_pred_h(p); \ |
137 | +bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr) | 118 | + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ |
138 | +{ | 119 | + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ |
139 | + uint64_t fault; | 120 | + sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ |
140 | + uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); | 121 | + sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ |
141 | + int ret = mte_probe_int(env, desc, ptr, 0, total, &fault); | 122 | + return neg ? a - sum : a + sum; \ |
142 | + | ||
143 | + return ret != 0; | ||
144 | +} | 123 | +} |
145 | + | 124 | + |
146 | /* | 125 | +DEF_IMOP_32(smopa_s, int8_t, int8_t) |
147 | * Perform an MTE checked access for DC_ZVA. | 126 | +DEF_IMOP_32(umopa_s, uint8_t, uint8_t) |
148 | */ | 127 | +DEF_IMOP_32(sumopa_s, int8_t, uint8_t) |
128 | +DEF_IMOP_32(usmopa_s, uint8_t, int8_t) | ||
129 | + | ||
130 | +DEF_IMOP_64(smopa_d, int16_t, int16_t) | ||
131 | +DEF_IMOP_64(umopa_d, uint16_t, uint16_t) | ||
132 | +DEF_IMOP_64(sumopa_d, int16_t, uint16_t) | ||
133 | +DEF_IMOP_64(usmopa_d, uint16_t, int16_t) | ||
134 | + | ||
135 | +#define DEF_IMOPH(NAME) \ | ||
136 | + void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \ | ||
137 | + void *vpm, uint32_t desc) \ | ||
138 | + { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); } | ||
139 | + | ||
140 | +DEF_IMOPH(smopa_s) | ||
141 | +DEF_IMOPH(umopa_s) | ||
142 | +DEF_IMOPH(sumopa_s) | ||
143 | +DEF_IMOPH(usmopa_s) | ||
144 | +DEF_IMOPH(smopa_d) | ||
145 | +DEF_IMOPH(umopa_d) | ||
146 | +DEF_IMOPH(sumopa_d) | ||
147 | +DEF_IMOPH(usmopa_d) | ||
148 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
149 | index XXXXXXX..XXXXXXX 100644 | ||
150 | --- a/target/arm/translate-sme.c | ||
151 | +++ b/target/arm/translate-sme.c | ||
152 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f | ||
153 | |||
154 | /* TODO: FEAT_EBF16 */ | ||
155 | TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) | ||
156 | + | ||
157 | +TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s) | ||
158 | +TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s) | ||
159 | +TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s) | ||
160 | +TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s) | ||
161 | + | ||
162 | +TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d) | ||
163 | +TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d) | ||
164 | +TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d) | ||
165 | +TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d) | ||
149 | -- | 166 | -- |
150 | 2.20.1 | 167 | 2.25.1 |
151 | |||
152 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | This is the only caller. Adjust some commentary to talk | 3 | This is an SVE instruction that operates using the SVE vector |
4 | about SCTLR_B instead of the vanishing function. | 4 | length but that it is present only if SME is implemented. |
5 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210419202257.161730-13-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-29-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/translate.c | 37 ++++++++++++++++--------------------- | 11 | target/arm/sve.decode | 20 +++++++++++++ |
12 | 1 file changed, 16 insertions(+), 21 deletions(-) | 12 | target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++ |
13 | 2 files changed, 77 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 15 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/translate.c | 17 | --- a/target/arm/sve.decode |
17 | +++ b/target/arm/translate.c | 18 | +++ b/target/arm/sve.decode |
18 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | 19 | @@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 |
19 | gen_aa32_st_i32(s, val, a32, index, OPC); \ | 20 | |
20 | } | 21 | ### SVE2 floating-point bfloat16 dot-product (indexed) |
21 | 22 | BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2 | |
22 | -static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) | ||
23 | -{ | ||
24 | - /* Not needed for user-mode BE32, where we use MO_BE instead. */ | ||
25 | - if (!IS_USER_ONLY && s->sctlr_b) { | ||
26 | - tcg_gen_rotri_i64(val, val, 32); | ||
27 | - } | ||
28 | -} | ||
29 | - | ||
30 | static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
31 | int index, MemOp opc) | ||
32 | { | ||
33 | TCGv addr = gen_aa32_addr(s, a32, opc); | ||
34 | tcg_gen_qemu_ld_i64(val, addr, index, opc); | ||
35 | - gen_aa32_frob64(s, val); | ||
36 | + | 23 | + |
37 | + /* Not needed for user-mode BE32, where we use MO_BE instead. */ | 24 | +### SVE broadcast predicate element |
38 | + if (!IS_USER_ONLY && s->sctlr_b) { | 25 | + |
39 | + tcg_gen_rotri_i64(val, val, 32); | 26 | +&psel esz pd pn pm rv imm |
27 | +%psel_rv 16:2 !function=plus_12 | ||
28 | +%psel_imm_b 22:2 19:2 | ||
29 | +%psel_imm_h 22:2 20:1 | ||
30 | +%psel_imm_s 22:2 | ||
31 | +%psel_imm_d 23:1 | ||
32 | +@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \ | ||
33 | + &psel rv=%psel_rv | ||
34 | + | ||
35 | +PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \ | ||
36 | + @psel esz=0 imm=%psel_imm_b | ||
37 | +PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \ | ||
38 | + @psel esz=1 imm=%psel_imm_h | ||
39 | +PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ | ||
40 | + @psel esz=2 imm=%psel_imm_s | ||
41 | +PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ | ||
42 | + @psel esz=3 imm=%psel_imm_d | ||
43 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/target/arm/translate-sve.c | ||
46 | +++ b/target/arm/translate-sve.c | ||
47 | @@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) | ||
48 | |||
49 | TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) | ||
50 | TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) | ||
51 | + | ||
52 | +static bool trans_PSEL(DisasContext *s, arg_psel *a) | ||
53 | +{ | ||
54 | + int vl = vec_full_reg_size(s); | ||
55 | + int pl = pred_gvec_reg_size(s); | ||
56 | + int elements = vl >> a->esz; | ||
57 | + TCGv_i64 tmp, didx, dbit; | ||
58 | + TCGv_ptr ptr; | ||
59 | + | ||
60 | + if (!dc_isar_feature(aa64_sme, s)) { | ||
61 | + return false; | ||
62 | + } | ||
63 | + if (!sve_access_check(s)) { | ||
64 | + return true; | ||
40 | + } | 65 | + } |
41 | + | 66 | + |
42 | tcg_temp_free(addr); | 67 | + tmp = tcg_temp_new_i64(); |
43 | } | 68 | + dbit = tcg_temp_new_i64(); |
44 | 69 | + didx = tcg_temp_new_i64(); | |
45 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | 70 | + ptr = tcg_temp_new_ptr(); |
46 | TCGv_i32 tmp2 = tcg_temp_new_i32(); | ||
47 | TCGv_i64 t64 = tcg_temp_new_i64(); | ||
48 | |||
49 | - /* For AArch32, architecturally the 32-bit word at the lowest | ||
50 | + /* | ||
51 | + * For AArch32, architecturally the 32-bit word at the lowest | ||
52 | * address is always Rt and the one at addr+4 is Rt2, even if | ||
53 | * the CPU is big-endian. That means we don't want to do a | ||
54 | - * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if | ||
55 | - * for an architecturally 64-bit access, but instead do a | ||
56 | - * 64-bit access using MO_BE if appropriate and then split | ||
57 | - * the two halves. | ||
58 | - * This only makes a difference for BE32 user-mode, where | ||
59 | - * frob64() must not flip the two halves of the 64-bit data | ||
60 | - * but this code must treat BE32 user-mode like BE32 system. | ||
61 | + * gen_aa32_ld_i64(), which checks SCTLR_B as if for an | ||
62 | + * architecturally 64-bit access, but instead do a 64-bit access | ||
63 | + * using MO_BE if appropriate and then split the two halves. | ||
64 | */ | ||
65 | TCGv taddr = gen_aa32_addr(s, addr, opc); | ||
66 | |||
67 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
68 | TCGv_i64 n64 = tcg_temp_new_i64(); | ||
69 | |||
70 | t2 = load_reg(s, rt2); | ||
71 | - /* For AArch32, architecturally the 32-bit word at the lowest | ||
72 | + | 71 | + |
73 | + /* | 72 | + /* Compute the predicate element. */ |
74 | + * For AArch32, architecturally the 32-bit word at the lowest | 73 | + tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); |
75 | * address is always Rt and the one at addr+4 is Rt2, even if | 74 | + if (is_power_of_2(elements)) { |
76 | * the CPU is big-endian. Since we're going to treat this as a | 75 | + tcg_gen_andi_i64(tmp, tmp, elements - 1); |
77 | * single 64-bit BE store, we need to put the two halves in the | 76 | + } else { |
78 | * opposite order for BE to LE, so that they end up in the right | 77 | + tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); |
79 | - * places. | 78 | + } |
80 | - * We don't want gen_aa32_frob64() because that does the wrong | 79 | + |
81 | - * thing for BE32 usermode. | 80 | + /* Extract the predicate byte and bit indices. */ |
82 | + * places. We don't want gen_aa32_st_i64, because that checks | 81 | + tcg_gen_shli_i64(tmp, tmp, a->esz); |
83 | + * SCTLR_B as if for an architectural 64-bit access. | 82 | + tcg_gen_andi_i64(dbit, tmp, 7); |
84 | */ | 83 | + tcg_gen_shri_i64(didx, tmp, 3); |
85 | if (s->be_data == MO_BE) { | 84 | + if (HOST_BIG_ENDIAN) { |
86 | tcg_gen_concat_i32_i64(n64, t2, t1); | 85 | + tcg_gen_xori_i64(didx, didx, 7); |
86 | + } | ||
87 | + | ||
88 | + /* Load the predicate word. */ | ||
89 | + tcg_gen_trunc_i64_ptr(ptr, didx); | ||
90 | + tcg_gen_add_ptr(ptr, ptr, cpu_env); | ||
91 | + tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); | ||
92 | + | ||
93 | + /* Extract the predicate bit and replicate to MO_64. */ | ||
94 | + tcg_gen_shr_i64(tmp, tmp, dbit); | ||
95 | + tcg_gen_andi_i64(tmp, tmp, 1); | ||
96 | + tcg_gen_neg_i64(tmp, tmp); | ||
97 | + | ||
98 | + /* Apply to either copy the source, or write zeros. */ | ||
99 | + tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), | ||
100 | + pred_full_reg_offset(s, a->pn), tmp, pl, pl); | ||
101 | + | ||
102 | + tcg_temp_free_i64(tmp); | ||
103 | + tcg_temp_free_i64(dbit); | ||
104 | + tcg_temp_free_i64(didx); | ||
105 | + tcg_temp_free_ptr(ptr); | ||
106 | + return true; | ||
107 | +} | ||
87 | -- | 108 | -- |
88 | 2.20.1 | 109 | 2.25.1 |
89 | |||
90 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For consistency with the mte_check1 + mte_checkN merge | 3 | This is an SVE instruction that operates using the SVE vector |
4 | to mte_check, rename the probe function as well. | 4 | length but that it is present only if SME is implemented. |
5 | 5 | ||
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210416183106.1516563-8-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-30-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/internals.h | 2 +- | 11 | target/arm/helper-sve.h | 2 ++ |
12 | target/arm/mte_helper.c | 6 +++--- | 12 | target/arm/sve.decode | 1 + |
13 | target/arm/sve_helper.c | 6 +++--- | 13 | target/arm/sve_helper.c | 16 ++++++++++++++++ |
14 | 3 files changed, 7 insertions(+), 7 deletions(-) | 14 | target/arm/translate-sve.c | 2 ++ |
15 | 4 files changed, 21 insertions(+) | ||
15 | 16 | ||
16 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h |
17 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/internals.h | 19 | --- a/target/arm/helper-sve.h |
19 | +++ b/target/arm/internals.h | 20 | +++ b/target/arm/helper-sve.h |
20 | @@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, TCMA, 6, 2) | 21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
21 | FIELD(MTEDESC, WRITE, 8, 1) | 22 | |
22 | FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ | 23 | DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
23 | 24 | ||
24 | -bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr); | 25 | +DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
25 | +bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); | 26 | + |
26 | uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); | 27 | DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
27 | 28 | DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | |
28 | static inline int allocation_tag_from_addr(uint64_t ptr) | 29 | DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
29 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | 30 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
30 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/target/arm/mte_helper.c | 32 | --- a/target/arm/sve.decode |
32 | +++ b/target/arm/mte_helper.c | 33 | +++ b/target/arm/sve.decode |
33 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | 34 | @@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn |
34 | * exception for inaccessible pages, and resolves the virtual address | 35 | REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn |
35 | * into the softmmu tlb. | 36 | REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn |
36 | * | 37 | RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn |
37 | - * When RA == 0, this is for mte_probe1. The page is expected to be | 38 | +REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0 |
38 | + * When RA == 0, this is for mte_probe. The page is expected to be | 39 | |
39 | * valid. Indicate to probe_access_flags no-fault, then assert that | 40 | # SVE vector splice (predicated, destructive) |
40 | * we received a valid page. | 41 | SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm |
41 | */ | ||
42 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | - * No-fault version of mte_check1, to be used by SVE for MemSingleNF. | ||
47 | + * No-fault version of mte_check, to be used by SVE for MemSingleNF. | ||
48 | * Returns false if the access is Checked and the check failed. This | ||
49 | * is only intended to probe the tag -- the validity of the page must | ||
50 | * be checked beforehand. | ||
51 | */ | ||
52 | -bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
53 | +bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
54 | { | ||
55 | uint64_t fault; | ||
56 | int ret = mte_probe_int(env, desc, ptr, 0, &fault); | ||
57 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 42 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c |
58 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
59 | --- a/target/arm/sve_helper.c | 44 | --- a/target/arm/sve_helper.c |
60 | +++ b/target/arm/sve_helper.c | 45 | +++ b/target/arm/sve_helper.c |
61 | @@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, | 46 | @@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) |
62 | /* Watchpoint hit, see below. */ | 47 | |
63 | goto do_fault; | 48 | DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) |
64 | } | 49 | |
65 | - if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) { | 50 | +void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc) |
66 | + if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) { | 51 | +{ |
67 | goto do_fault; | 52 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; |
68 | } | 53 | + uint64_t *d = vd, *n = vn; |
69 | /* | 54 | + uint8_t *pg = vg; |
70 | @@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, | 55 | + |
71 | & BP_MEM_READ)) { | 56 | + for (i = 0; i < opr_sz; i += 2) { |
72 | goto do_fault; | 57 | + if (pg[H1(i)] & 1) { |
73 | } | 58 | + uint64_t n0 = n[i + 0]; |
74 | - if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) { | 59 | + uint64_t n1 = n[i + 1]; |
75 | + if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) { | 60 | + d[i + 0] = n1; |
76 | goto do_fault; | 61 | + d[i + 1] = n0; |
77 | } | 62 | + } |
78 | host_fn(vd, reg_off, host + mem_off); | 63 | + } |
79 | @@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, | 64 | +} |
80 | } | 65 | + |
81 | if (mtedesc && | 66 | DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) |
82 | arm_tlb_mte_tagged(&info.attrs) && | 67 | DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) |
83 | - !mte_probe1(env, mtedesc, addr)) { | 68 | DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) |
84 | + !mte_probe(env, mtedesc, addr)) { | 69 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
85 | goto fault; | 70 | index XXXXXXX..XXXXXXX 100644 |
86 | } | 71 | --- a/target/arm/translate-sve.c |
72 | +++ b/target/arm/translate-sve.c | ||
73 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) | ||
74 | TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, | ||
75 | a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) | ||
76 | |||
77 | +TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) | ||
78 | + | ||
79 | TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, | ||
80 | gen_helper_sve_splice, a, a->esz) | ||
87 | 81 | ||
88 | -- | 82 | -- |
89 | 2.20.1 | 83 | 2.25.1 |
90 | |||
91 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | After recent changes, mte_checkN does not use ESIZE, | 3 | This is an SVE instruction that operates using the SVE vector |
4 | and mte_check1 never used TSIZE. We can combine the | 4 | length but that it is present only if SME is implemented. |
5 | two into a single field: SIZEM1. | ||
6 | 5 | ||
7 | Choose to pass size - 1 because size == 0 is never used, | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | our immediate need in mte_probe_int is for the address | ||
9 | of the last byte (ptr + size - 1), and since almost all | ||
10 | operations are powers of 2, this makes the immediate | ||
11 | constant one bit smaller. | ||
12 | |||
13 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | Message-id: 20210416183106.1516563-6-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-31-richard.henderson@linaro.org |
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
17 | --- | 10 | --- |
18 | target/arm/internals.h | 4 ++-- | 11 | target/arm/helper.h | 18 +++++++ |
19 | target/arm/mte_helper.c | 18 ++++++++---------- | 12 | target/arm/sve.decode | 5 ++ |
20 | target/arm/translate-a64.c | 5 ++--- | 13 | target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++ |
21 | target/arm/translate-sve.c | 5 ++--- | 14 | target/arm/vec_helper.c | 24 +++++++++ |
22 | 4 files changed, 14 insertions(+), 18 deletions(-) | 15 | 4 files changed, 149 insertions(+) |
23 | 16 | ||
24 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 17 | diff --git a/target/arm/helper.h b/target/arm/helper.h |
25 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/arm/internals.h | 19 | --- a/target/arm/helper.h |
27 | +++ b/target/arm/internals.h | 20 | +++ b/target/arm/helper.h |
28 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, |
29 | #define TARGET_ARM_INTERNALS_H | 22 | DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, |
30 | 23 | void, ptr, ptr, ptr, ptr, ptr, i32) | |
31 | #include "hw/registerfields.h" | 24 | |
32 | +#include "tcg/tcg-gvec-desc.h" | 25 | +DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, |
33 | #include "syndrome.h" | 26 | + void, ptr, ptr, ptr, ptr, i32) |
34 | 27 | +DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, | |
35 | /* register banks for CPU modes */ | 28 | + void, ptr, ptr, ptr, ptr, i32) |
36 | @@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4) | 29 | +DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, |
37 | FIELD(MTEDESC, TBI, 4, 2) | 30 | + void, ptr, ptr, ptr, ptr, i32) |
38 | FIELD(MTEDESC, TCMA, 6, 2) | 31 | +DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, |
39 | FIELD(MTEDESC, WRITE, 8, 1) | 32 | + void, ptr, ptr, ptr, ptr, i32) |
40 | -FIELD(MTEDESC, ESIZE, 9, 5) | 33 | + |
41 | -FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */ | 34 | +DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, |
42 | +FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ | 35 | + void, ptr, ptr, ptr, ptr, i32) |
43 | 36 | +DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, | |
44 | bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr); | 37 | + void, ptr, ptr, ptr, ptr, i32) |
45 | uint64_t mte_check1(CPUARMState *env, uint32_t desc, | 38 | +DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, |
46 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | 39 | + void, ptr, ptr, ptr, ptr, i32) |
47 | index XXXXXXX..XXXXXXX 100644 | 40 | +DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, |
48 | --- a/target/arm/mte_helper.c | 41 | + void, ptr, ptr, ptr, ptr, i32) |
49 | +++ b/target/arm/mte_helper.c | 42 | + |
50 | @@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count) | 43 | #ifdef TARGET_AARCH64 |
51 | * Return positive on success with tbi enabled. | 44 | #include "helper-a64.h" |
52 | */ | 45 | #include "helper-sve.h" |
53 | static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | 46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
54 | - uintptr_t ra, uint32_t total, uint64_t *fault) | 47 | index XXXXXXX..XXXXXXX 100644 |
55 | + uintptr_t ra, uint64_t *fault) | 48 | --- a/target/arm/sve.decode |
56 | { | 49 | +++ b/target/arm/sve.decode |
57 | int mmu_idx, ptr_tag, bit55; | 50 | @@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ |
58 | uint64_t ptr_last, prev_page, next_page; | 51 | @psel esz=2 imm=%psel_imm_s |
59 | uint64_t tag_first, tag_last; | 52 | PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ |
60 | uint64_t tag_byte_first, tag_byte_last; | 53 | @psel esz=3 imm=%psel_imm_d |
61 | - uint32_t tag_count, tag_size, n, c; | 54 | + |
62 | + uint32_t sizem1, tag_count, tag_size, n, c; | 55 | +### SVE clamp |
63 | uint8_t *mem1, *mem2; | 56 | + |
64 | MMUAccessType type; | 57 | +SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm |
65 | 58 | +UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm | |
66 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | ||
67 | |||
68 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
69 | type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
70 | + sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1); | ||
71 | |||
72 | /* Find the addr of the end of the access */ | ||
73 | - ptr_last = ptr + total - 1; | ||
74 | + ptr_last = ptr + sizem1; | ||
75 | |||
76 | /* Round the bounds to the tag granule, and compute the number of tags. */ | ||
77 | tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); | ||
78 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | ||
79 | if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) { | ||
80 | /* Memory access stays on one page. */ | ||
81 | tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; | ||
82 | - mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total, | ||
83 | + mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, | ||
84 | MMU_DATA_LOAD, tag_size, ra); | ||
85 | if (!mem1) { | ||
86 | return 1; | ||
87 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
88 | uint64_t ptr, uintptr_t ra) | ||
89 | { | ||
90 | uint64_t fault; | ||
91 | - uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE); | ||
92 | - int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); | ||
93 | + int ret = mte_probe_int(env, desc, ptr, ra, &fault); | ||
94 | |||
95 | if (unlikely(ret == 0)) { | ||
96 | mte_check_fail(env, desc, fault, ra); | ||
97 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_check1(CPUARMState *env, uint32_t desc, | ||
98 | uint64_t ptr, uintptr_t ra) | ||
99 | { | ||
100 | uint64_t fault; | ||
101 | - uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); | ||
102 | - int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); | ||
103 | + int ret = mte_probe_int(env, desc, ptr, ra, &fault); | ||
104 | |||
105 | if (unlikely(ret == 0)) { | ||
106 | mte_check_fail(env, desc, fault, ra); | ||
107 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
108 | bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
109 | { | ||
110 | uint64_t fault; | ||
111 | - uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); | ||
112 | - int ret = mte_probe_int(env, desc, ptr, 0, total, &fault); | ||
113 | + int ret = mte_probe_int(env, desc, ptr, 0, &fault); | ||
114 | |||
115 | return ret != 0; | ||
116 | } | ||
117 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
118 | index XXXXXXX..XXXXXXX 100644 | ||
119 | --- a/target/arm/translate-a64.c | ||
120 | +++ b/target/arm/translate-a64.c | ||
121 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, | ||
122 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
123 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
124 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | ||
125 | - desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size); | ||
126 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); | ||
127 | tcg_desc = tcg_const_i32(desc); | ||
128 | |||
129 | ret = new_tmp_a64(s); | ||
130 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
131 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
132 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
133 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | ||
134 | - desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize); | ||
135 | - desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size); | ||
136 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); | ||
137 | tcg_desc = tcg_const_i32(desc); | ||
138 | |||
139 | ret = new_tmp_a64(s); | ||
140 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 59 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
141 | index XXXXXXX..XXXXXXX 100644 | 60 | index XXXXXXX..XXXXXXX 100644 |
142 | --- a/target/arm/translate-sve.c | 61 | --- a/target/arm/translate-sve.c |
143 | +++ b/target/arm/translate-sve.c | 62 | +++ b/target/arm/translate-sve.c |
144 | @@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, | 63 | @@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) |
145 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | 64 | tcg_temp_free_ptr(ptr); |
146 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | 65 | return true; |
147 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | 66 | } |
148 | - desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz); | 67 | + |
149 | - desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz); | 68 | +static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) |
150 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1); | 69 | +{ |
151 | desc <<= SVE_MTEDESC_SHIFT; | 70 | + tcg_gen_smax_i32(d, a, n); |
152 | } else { | 71 | + tcg_gen_smin_i32(d, d, m); |
153 | addr = clean_data_tbi(s, addr); | 72 | +} |
154 | @@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, | 73 | + |
155 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | 74 | +static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) |
156 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | 75 | +{ |
157 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | 76 | + tcg_gen_smax_i64(d, a, n); |
158 | - desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz); | 77 | + tcg_gen_smin_i64(d, d, m); |
159 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1); | 78 | +} |
160 | desc <<= SVE_MTEDESC_SHIFT; | 79 | + |
80 | +static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
81 | + TCGv_vec m, TCGv_vec a) | ||
82 | +{ | ||
83 | + tcg_gen_smax_vec(vece, d, a, n); | ||
84 | + tcg_gen_smin_vec(vece, d, d, m); | ||
85 | +} | ||
86 | + | ||
87 | +static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
88 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
89 | +{ | ||
90 | + static const TCGOpcode vecop[] = { | ||
91 | + INDEX_op_smin_vec, INDEX_op_smax_vec, 0 | ||
92 | + }; | ||
93 | + static const GVecGen4 ops[4] = { | ||
94 | + { .fniv = gen_sclamp_vec, | ||
95 | + .fno = gen_helper_gvec_sclamp_b, | ||
96 | + .opt_opc = vecop, | ||
97 | + .vece = MO_8 }, | ||
98 | + { .fniv = gen_sclamp_vec, | ||
99 | + .fno = gen_helper_gvec_sclamp_h, | ||
100 | + .opt_opc = vecop, | ||
101 | + .vece = MO_16 }, | ||
102 | + { .fni4 = gen_sclamp_i32, | ||
103 | + .fniv = gen_sclamp_vec, | ||
104 | + .fno = gen_helper_gvec_sclamp_s, | ||
105 | + .opt_opc = vecop, | ||
106 | + .vece = MO_32 }, | ||
107 | + { .fni8 = gen_sclamp_i64, | ||
108 | + .fniv = gen_sclamp_vec, | ||
109 | + .fno = gen_helper_gvec_sclamp_d, | ||
110 | + .opt_opc = vecop, | ||
111 | + .vece = MO_64, | ||
112 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } | ||
113 | + }; | ||
114 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); | ||
115 | +} | ||
116 | + | ||
117 | +TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) | ||
118 | + | ||
119 | +static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) | ||
120 | +{ | ||
121 | + tcg_gen_umax_i32(d, a, n); | ||
122 | + tcg_gen_umin_i32(d, d, m); | ||
123 | +} | ||
124 | + | ||
125 | +static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) | ||
126 | +{ | ||
127 | + tcg_gen_umax_i64(d, a, n); | ||
128 | + tcg_gen_umin_i64(d, d, m); | ||
129 | +} | ||
130 | + | ||
131 | +static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
132 | + TCGv_vec m, TCGv_vec a) | ||
133 | +{ | ||
134 | + tcg_gen_umax_vec(vece, d, a, n); | ||
135 | + tcg_gen_umin_vec(vece, d, d, m); | ||
136 | +} | ||
137 | + | ||
138 | +static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
139 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
140 | +{ | ||
141 | + static const TCGOpcode vecop[] = { | ||
142 | + INDEX_op_umin_vec, INDEX_op_umax_vec, 0 | ||
143 | + }; | ||
144 | + static const GVecGen4 ops[4] = { | ||
145 | + { .fniv = gen_uclamp_vec, | ||
146 | + .fno = gen_helper_gvec_uclamp_b, | ||
147 | + .opt_opc = vecop, | ||
148 | + .vece = MO_8 }, | ||
149 | + { .fniv = gen_uclamp_vec, | ||
150 | + .fno = gen_helper_gvec_uclamp_h, | ||
151 | + .opt_opc = vecop, | ||
152 | + .vece = MO_16 }, | ||
153 | + { .fni4 = gen_uclamp_i32, | ||
154 | + .fniv = gen_uclamp_vec, | ||
155 | + .fno = gen_helper_gvec_uclamp_s, | ||
156 | + .opt_opc = vecop, | ||
157 | + .vece = MO_32 }, | ||
158 | + { .fni8 = gen_uclamp_i64, | ||
159 | + .fniv = gen_uclamp_vec, | ||
160 | + .fno = gen_helper_gvec_uclamp_d, | ||
161 | + .opt_opc = vecop, | ||
162 | + .vece = MO_64, | ||
163 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } | ||
164 | + }; | ||
165 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); | ||
166 | +} | ||
167 | + | ||
168 | +TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) | ||
169 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
170 | index XXXXXXX..XXXXXXX 100644 | ||
171 | --- a/target/arm/vec_helper.c | ||
172 | +++ b/target/arm/vec_helper.c | ||
173 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, | ||
161 | } | 174 | } |
162 | desc = simd_desc(vsz, vsz, desc | scale); | 175 | clear_tail(d, opr_sz, simd_maxsz(desc)); |
176 | } | ||
177 | + | ||
178 | +#define DO_CLAMP(NAME, TYPE) \ | ||
179 | +void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \ | ||
180 | +{ \ | ||
181 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
182 | + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ | ||
183 | + TYPE aa = *(TYPE *)(a + i); \ | ||
184 | + TYPE nn = *(TYPE *)(n + i); \ | ||
185 | + TYPE mm = *(TYPE *)(m + i); \ | ||
186 | + TYPE dd = MIN(MAX(aa, nn), mm); \ | ||
187 | + *(TYPE *)(d + i) = dd; \ | ||
188 | + } \ | ||
189 | + clear_tail(d, opr_sz, simd_maxsz(desc)); \ | ||
190 | +} | ||
191 | + | ||
192 | +DO_CLAMP(gvec_sclamp_b, int8_t) | ||
193 | +DO_CLAMP(gvec_sclamp_h, int16_t) | ||
194 | +DO_CLAMP(gvec_sclamp_s, int32_t) | ||
195 | +DO_CLAMP(gvec_sclamp_d, int64_t) | ||
196 | + | ||
197 | +DO_CLAMP(gvec_uclamp_b, uint8_t) | ||
198 | +DO_CLAMP(gvec_uclamp_h, uint16_t) | ||
199 | +DO_CLAMP(gvec_uclamp_s, uint32_t) | ||
200 | +DO_CLAMP(gvec_uclamp_d, uint64_t) | ||
163 | -- | 201 | -- |
164 | 2.20.1 | 202 | 2.25.1 |
165 | |||
166 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We're about to rearrange the macro expansion surrounding tbflags, | 3 | We can handle both exception entry and exception return by |
4 | and this field name will be expanded using the bit definition of | 4 | hooking into aarch64_sve_change_el. |
5 | the same name, resulting in a token pasting error. | ||
6 | |||
7 | So PSTATE_SS -> PSTATE__SS in the uses, and document it. | ||
8 | 5 | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20210419202257.161730-4-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-32-richard.henderson@linaro.org |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 10 | --- |
14 | target/arm/cpu.h | 2 +- | 11 | target/arm/helper.c | 15 +++++++++++++-- |
15 | target/arm/helper.c | 4 ++-- | 12 | 1 file changed, 13 insertions(+), 2 deletions(-) |
16 | target/arm/translate-a64.c | 2 +- | ||
17 | target/arm/translate.c | 2 +- | ||
18 | 4 files changed, 5 insertions(+), 5 deletions(-) | ||
19 | 13 | ||
20 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/target/arm/cpu.h | ||
23 | +++ b/target/arm/cpu.h | ||
24 | @@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU; | ||
25 | */ | ||
26 | FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) | ||
27 | FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1) | ||
28 | -FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */ | ||
29 | +FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */ | ||
30 | FIELD(TBFLAG_ANY, BE_DATA, 28, 1) | ||
31 | FIELD(TBFLAG_ANY, MMUIDX, 24, 4) | ||
32 | /* Target EL if we take a floating-point-disabled exception */ | ||
33 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 14 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
34 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/target/arm/helper.c | 16 | --- a/target/arm/helper.c |
36 | +++ b/target/arm/helper.c | 17 | +++ b/target/arm/helper.c |
37 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | 18 | @@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, |
38 | * 0 x Inactive (the TB flag for SS is always 0) | 19 | return; |
39 | * 1 0 Active-pending | 20 | } |
40 | * 1 1 Active-not-pending | 21 | |
41 | - * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. | 22 | + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; |
42 | + * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. | 23 | + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; |
24 | + | ||
25 | + /* | ||
26 | + * Both AArch64.TakeException and AArch64.ExceptionReturn | ||
27 | + * invoke ResetSVEState when taking an exception from, or | ||
28 | + * returning to, AArch32 state when PSTATE.SM is enabled. | ||
29 | + */ | ||
30 | + if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) { | ||
31 | + arm_reset_sve_state(env); | ||
32 | + return; | ||
33 | + } | ||
34 | + | ||
35 | /* | ||
36 | * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped | ||
37 | * at ELx, or not available because the EL is in AArch32 state, then | ||
38 | @@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, | ||
39 | * we already have the correct register contents when encountering the | ||
40 | * vq0->vq0 transition between EL0->EL1. | ||
43 | */ | 41 | */ |
44 | if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && | 42 | - old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; |
45 | (env->pstate & PSTATE_SS)) { | 43 | old_len = (old_a64 && !sve_exception_el(env, old_el) |
46 | - flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); | 44 | ? sve_vqm1_for_el(env, old_el) : 0); |
47 | + flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1); | 45 | - new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; |
48 | } | 46 | new_len = (new_a64 && !sve_exception_el(env, new_el) |
49 | 47 | ? sve_vqm1_for_el(env, new_el) : 0); | |
50 | *pflags = flags; | 48 | |
51 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/target/arm/translate-a64.c | ||
54 | +++ b/target/arm/translate-a64.c | ||
55 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
56 | * end the TB | ||
57 | */ | ||
58 | dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); | ||
59 | - dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); | ||
60 | + dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS); | ||
61 | dc->is_ldex = false; | ||
62 | dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); | ||
63 | |||
64 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/target/arm/translate.c | ||
67 | +++ b/target/arm/translate.c | ||
68 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
69 | * end the TB | ||
70 | */ | ||
71 | dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); | ||
72 | - dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); | ||
73 | + dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS); | ||
74 | dc->is_ldex = false; | ||
75 | |||
76 | dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; | ||
77 | -- | 49 | -- |
78 | 2.20.1 | 50 | 2.25.1 |
79 | |||
80 | diff view generated by jsdifflib |
1 | From: Kunkun Jiang <jiangkunkun@huawei.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | The driver can query some bits in SMMUv3 IDR5 to learn which | 3 | Note that SME remains effectively disabled for user-only, |
4 | translation granules are supported. Arm recommends that SMMUv3 | 4 | because we do not yet set CPACR_EL1.SMEN. This needs to |
5 | implementations support at least 4K and 64K granules. But in | 5 | wait until the kernel ABI is implemented. |
6 | the vSMMUv3, there seems to be no reason not to support 16K | ||
7 | translation granule. In addition, if 16K is not supported, | ||
8 | vSVA will failed to be enabled in the future for 16K guest | ||
9 | kernel. So it'd better to support it. | ||
10 | 6 | ||
11 | Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | Tested-by: Eric Auger <eric.auger@redhat.com> | 9 | Message-id: 20220708151540.18136-33-richard.henderson@linaro.org |
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 11 | --- |
16 | hw/arm/smmuv3.c | 6 ++++-- | 12 | docs/system/arm/emulation.rst | 4 ++++ |
17 | 1 file changed, 4 insertions(+), 2 deletions(-) | 13 | target/arm/cpu64.c | 11 +++++++++++ |
14 | 2 files changed, 15 insertions(+) | ||
18 | 15 | ||
19 | diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c | 16 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst |
20 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/arm/smmuv3.c | 18 | --- a/docs/system/arm/emulation.rst |
22 | +++ b/hw/arm/smmuv3.c | 19 | +++ b/docs/system/arm/emulation.rst |
23 | @@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s) | 20 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: |
24 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); | 21 | - FEAT_SHA512 (Advanced SIMD SHA512 instructions) |
25 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); | 22 | - FEAT_SM3 (Advanced SIMD SM3 instructions) |
26 | 23 | - FEAT_SM4 (Advanced SIMD SM4 instructions) | |
27 | - /* 4K and 64K granule support */ | 24 | +- FEAT_SME (Scalable Matrix Extension) |
28 | + /* 4K, 16K and 64K granule support */ | 25 | +- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) |
29 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); | 26 | +- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) |
30 | + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); | 27 | +- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) |
31 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); | 28 | - FEAT_SPECRES (Speculation restriction instructions) |
32 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ | 29 | - FEAT_SSBS (Speculative Store Bypass Safe) |
33 | 30 | - FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain) | |
34 | @@ -XXX,XX +XXX,XX @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) | 31 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c |
35 | 32 | index XXXXXXX..XXXXXXX 100644 | |
36 | tg = CD_TG(cd, i); | 33 | --- a/target/arm/cpu64.c |
37 | tt->granule_sz = tg2granule(tg, i); | 34 | +++ b/target/arm/cpu64.c |
38 | - if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { | 35 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) |
39 | + if ((tt->granule_sz != 12 && tt->granule_sz != 14 && | 36 | */ |
40 | + tt->granule_sz != 16) || CD_ENDI(cd)) { | 37 | t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ |
41 | goto bad_cd; | 38 | t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ |
42 | } | 39 | + t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ |
40 | t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ | ||
41 | cpu->isar.id_aa64pfr1 = t; | ||
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
44 | t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */ | ||
45 | cpu->isar.id_aa64dfr0 = t; | ||
46 | |||
47 | + t = cpu->isar.id_aa64smfr0; | ||
48 | + t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ | ||
49 | + t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ | ||
50 | + t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ | ||
51 | + t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ | ||
52 | + t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ | ||
53 | + t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ | ||
54 | + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ | ||
55 | + cpu->isar.id_aa64smfr0 = t; | ||
56 | + | ||
57 | /* Replicate the same data to the 32-bit id registers. */ | ||
58 | aa32_max_features(cpu); | ||
43 | 59 | ||
44 | -- | 60 | -- |
45 | 2.20.1 | 61 | 2.25.1 |
46 | |||
47 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-29-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-34-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/translate-a64.c | 20 ++++++++++---------- | 8 | linux-user/aarch64/target_cpu.h | 5 ++++- |
9 | 1 file changed, 10 insertions(+), 10 deletions(-) | 9 | 1 file changed, 4 insertions(+), 1 deletion(-) |
10 | 10 | ||
11 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 11 | diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-a64.c | 13 | --- a/linux-user/aarch64/target_cpu.h |
14 | +++ b/target/arm/translate-a64.c | 14 | +++ b/linux-user/aarch64/target_cpu.h |
15 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | 15 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags) |
16 | 16 | ||
17 | /* Store from vector register to memory */ | 17 | static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) |
18 | static void do_vec_st(DisasContext *s, int srcidx, int element, | ||
19 | - TCGv_i64 tcg_addr, int size, MemOp endian) | ||
20 | + TCGv_i64 tcg_addr, MemOp mop) | ||
21 | { | 18 | { |
22 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | 19 | - /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is |
23 | 20 | + /* | |
24 | - read_vec_element(s, tcg_tmp, srcidx, element, size); | 21 | + * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is |
25 | - tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); | 22 | * different from AArch32 Linux, which uses TPIDRRO. |
26 | + read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE); | 23 | */ |
27 | + tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); | 24 | env->cp15.tpidr_el[0] = newtls; |
28 | 25 | + /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */ | |
29 | tcg_temp_free_i64(tcg_tmp); | 26 | + env->cp15.tpidr2_el0 = 0; |
30 | } | 27 | } |
31 | 28 | ||
32 | /* Load from memory to vector register */ | 29 | static inline abi_ulong get_sp_from_cpustate(CPUARMState *state) |
33 | static void do_vec_ld(DisasContext *s, int destidx, int element, | ||
34 | - TCGv_i64 tcg_addr, int size, MemOp endian) | ||
35 | + TCGv_i64 tcg_addr, MemOp mop) | ||
36 | { | ||
37 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
38 | |||
39 | - tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); | ||
40 | - write_vec_element(s, tcg_tmp, destidx, element, size); | ||
41 | + tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); | ||
42 | + write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE); | ||
43 | |||
44 | tcg_temp_free_i64(tcg_tmp); | ||
45 | } | ||
46 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
47 | for (xs = 0; xs < selem; xs++) { | ||
48 | int tt = (rt + r + xs) % 32; | ||
49 | if (is_store) { | ||
50 | - do_vec_st(s, tt, e, clean_addr, size, endian); | ||
51 | + do_vec_st(s, tt, e, clean_addr, size | endian); | ||
52 | } else { | ||
53 | - do_vec_ld(s, tt, e, clean_addr, size, endian); | ||
54 | + do_vec_ld(s, tt, e, clean_addr, size | endian); | ||
55 | } | ||
56 | tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); | ||
57 | } | ||
58 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | ||
59 | } else { | ||
60 | /* Load/store one element per register */ | ||
61 | if (is_load) { | ||
62 | - do_vec_ld(s, rt, index, clean_addr, scale, s->be_data); | ||
63 | + do_vec_ld(s, rt, index, clean_addr, scale | s->be_data); | ||
64 | } else { | ||
65 | - do_vec_st(s, rt, index, clean_addr, scale, s->be_data); | ||
66 | + do_vec_st(s, rt, index, clean_addr, scale | s->be_data); | ||
67 | } | ||
68 | } | ||
69 | tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); | ||
70 | -- | 30 | -- |
71 | 2.20.1 | 31 | 2.25.1 |
72 | |||
73 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-19-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-35-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/translate.c | 4 ++-- | 8 | linux-user/aarch64/cpu_loop.c | 9 +++++++++ |
9 | 1 file changed, 2 insertions(+), 2 deletions(-) | 9 | 1 file changed, 9 insertions(+) |
10 | 10 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 11 | diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 13 | --- a/linux-user/aarch64/cpu_loop.c |
14 | +++ b/target/arm/translate.c | 14 | +++ b/linux-user/aarch64/cpu_loop.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_RFE(DisasContext *s, arg_RFE *a) | 15 | @@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env) |
16 | 16 | ||
17 | /* Load PC into tmp and CPSR into tmp2. */ | 17 | switch (trapnr) { |
18 | t1 = tcg_temp_new_i32(); | 18 | case EXCP_SWI: |
19 | - gen_aa32_ld32u(s, t1, addr, get_mem_index(s)); | 19 | + /* |
20 | + gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 20 | + * On syscall, PSTATE.ZA is preserved, along with the ZA matrix. |
21 | tcg_gen_addi_i32(addr, addr, 4); | 21 | + * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState. |
22 | t2 = tcg_temp_new_i32(); | 22 | + */ |
23 | - gen_aa32_ld32u(s, t2, addr, get_mem_index(s)); | 23 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
24 | + gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN); | 24 | + env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0); |
25 | 25 | + arm_rebuild_hflags(env); | |
26 | if (a->w) { | 26 | + arm_reset_sve_state(env); |
27 | /* Base writeback. */ | 27 | + } |
28 | ret = do_syscall(env, | ||
29 | env->xregs[8], | ||
30 | env->xregs[0], | ||
28 | -- | 31 | -- |
29 | 2.20.1 | 32 | 2.25.1 |
30 | |||
31 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Buglink: https://bugs.launchpad.net/qemu/+bug/1905356 | 3 | Make sure to zero the currently reserved fields. |
4 | |||
4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20210419202257.161730-16-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-36-richard.henderson@linaro.org |
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | --- | 9 | --- |
9 | target/arm/translate.c | 16 ++++++++-------- | 10 | linux-user/aarch64/signal.c | 9 ++++++++- |
10 | 1 file changed, 8 insertions(+), 8 deletions(-) | 11 | 1 file changed, 8 insertions(+), 1 deletion(-) |
11 | 12 | ||
12 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 13 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
13 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/translate.c | 15 | --- a/linux-user/aarch64/signal.c |
15 | +++ b/target/arm/translate.c | 16 | +++ b/linux-user/aarch64/signal.c |
16 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) | 17 | @@ -XXX,XX +XXX,XX @@ struct target_extra_context { |
17 | addr = op_addr_rr_pre(s, a); | 18 | struct target_sve_context { |
18 | 19 | struct target_aarch64_ctx head; | |
19 | tmp = tcg_temp_new_i32(); | 20 | uint16_t vl; |
20 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | 21 | - uint16_t reserved[3]; |
21 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | 22 | + uint16_t flags; |
22 | store_reg(s, a->rt, tmp); | 23 | + uint16_t reserved[2]; |
23 | 24 | /* The actual SVE data immediately follows. It is laid out | |
24 | tcg_gen_addi_i32(addr, addr, 4); | 25 | * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of |
25 | 26 | * the original struct pointer. | |
26 | tmp = tcg_temp_new_i32(); | 27 | @@ -XXX,XX +XXX,XX @@ struct target_sve_context { |
27 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | 28 | #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \ |
28 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | 29 | (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17)) |
29 | store_reg(s, a->rt + 1, tmp); | 30 | |
30 | 31 | +#define TARGET_SVE_SIG_FLAG_SM 1 | |
31 | /* LDRD w/ base writeback is undefined if the registers overlap. */ | 32 | + |
32 | @@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) | 33 | struct target_rt_sigframe { |
33 | addr = op_addr_rr_pre(s, a); | 34 | struct target_siginfo info; |
34 | 35 | struct target_ucontext uc; | |
35 | tmp = load_reg(s, a->rt); | 36 | @@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve, |
36 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | 37 | { |
37 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | 38 | int i, j; |
38 | tcg_temp_free_i32(tmp); | 39 | |
39 | 40 | + memset(sve, 0, sizeof(*sve)); | |
40 | tcg_gen_addi_i32(addr, addr, 4); | 41 | __put_user(TARGET_SVE_MAGIC, &sve->head.magic); |
41 | 42 | __put_user(size, &sve->head.size); | |
42 | tmp = load_reg(s, a->rt + 1); | 43 | __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl); |
43 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | 44 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
44 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | 45 | + __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags); |
45 | tcg_temp_free_i32(tmp); | 46 | + } |
46 | 47 | ||
47 | op_addr_rr_post(s, a, addr, -4); | 48 | /* Note that SVE regs are stored as a byte stream, with each byte element |
48 | @@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) | 49 | * at a subsequent address. This corresponds to a little-endian store |
49 | addr = op_addr_ri_pre(s, a); | ||
50 | |||
51 | tmp = tcg_temp_new_i32(); | ||
52 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
53 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | ||
54 | store_reg(s, a->rt, tmp); | ||
55 | |||
56 | tcg_gen_addi_i32(addr, addr, 4); | ||
57 | |||
58 | tmp = tcg_temp_new_i32(); | ||
59 | - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL); | ||
60 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | ||
61 | store_reg(s, rt2, tmp); | ||
62 | |||
63 | /* LDRD w/ base writeback is undefined if the registers overlap. */ | ||
64 | @@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) | ||
65 | addr = op_addr_ri_pre(s, a); | ||
66 | |||
67 | tmp = load_reg(s, a->rt); | ||
68 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
69 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | ||
70 | tcg_temp_free_i32(tmp); | ||
71 | |||
72 | tcg_gen_addi_i32(addr, addr, 4); | ||
73 | |||
74 | tmp = load_reg(s, rt2); | ||
75 | - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL); | ||
76 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | ||
77 | tcg_temp_free_i32(tmp); | ||
78 | |||
79 | op_addr_ri_post(s, a, addr, -4); | ||
80 | -- | 50 | -- |
81 | 2.20.1 | 51 | 2.25.1 |
82 | |||
83 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We're about to split tbflags into two parts. These macros | 3 | Fold the return value setting into the goto, so each |
4 | will ensure that the correct part is used with the correct | 4 | point of failure need not do both. |
5 | set of bits. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210419202257.161730-5-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-37-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/cpu.h | 22 +++++++++- | 11 | linux-user/aarch64/signal.c | 26 +++++++++++--------------- |
13 | target/arm/helper-a64.c | 2 +- | 12 | 1 file changed, 11 insertions(+), 15 deletions(-) |
14 | target/arm/helper.c | 85 +++++++++++++++++--------------------- | ||
15 | target/arm/translate-a64.c | 36 ++++++++-------- | ||
16 | target/arm/translate.c | 48 ++++++++++----------- | ||
17 | 5 files changed, 101 insertions(+), 92 deletions(-) | ||
18 | 13 | ||
19 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 14 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
20 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/cpu.h | 16 | --- a/linux-user/aarch64/signal.c |
22 | +++ b/target/arm/cpu.h | 17 | +++ b/linux-user/aarch64/signal.c |
23 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2) | 18 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
24 | FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1) | 19 | struct target_sve_context *sve = NULL; |
25 | FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) | 20 | uint64_t extra_datap = 0; |
26 | 21 | bool used_extra = false; | |
27 | +/* | 22 | - bool err = false; |
28 | + * Helpers for using the above. | 23 | int vq = 0, sve_size = 0; |
29 | + */ | 24 | |
30 | +#define DP_TBFLAG_ANY(DST, WHICH, VAL) \ | 25 | target_restore_general_frame(env, sf); |
31 | + (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL)) | 26 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
32 | +#define DP_TBFLAG_A64(DST, WHICH, VAL) \ | 27 | switch (magic) { |
33 | + (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL)) | 28 | case 0: |
34 | +#define DP_TBFLAG_A32(DST, WHICH, VAL) \ | 29 | if (size != 0) { |
35 | + (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL)) | 30 | - err = true; |
36 | +#define DP_TBFLAG_M32(DST, WHICH, VAL) \ | 31 | - goto exit; |
37 | + (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL)) | 32 | + goto err; |
38 | +#define DP_TBFLAG_AM32(DST, WHICH, VAL) \ | 33 | } |
39 | + (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL)) | 34 | if (used_extra) { |
35 | ctx = NULL; | ||
36 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
37 | |||
38 | case TARGET_FPSIMD_MAGIC: | ||
39 | if (fpsimd || size != sizeof(struct target_fpsimd_context)) { | ||
40 | - err = true; | ||
41 | - goto exit; | ||
42 | + goto err; | ||
43 | } | ||
44 | fpsimd = (struct target_fpsimd_context *)ctx; | ||
45 | break; | ||
46 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
47 | break; | ||
48 | } | ||
49 | } | ||
50 | - err = true; | ||
51 | - goto exit; | ||
52 | + goto err; | ||
53 | |||
54 | case TARGET_EXTRA_MAGIC: | ||
55 | if (extra || size != sizeof(struct target_extra_context)) { | ||
56 | - err = true; | ||
57 | - goto exit; | ||
58 | + goto err; | ||
59 | } | ||
60 | __get_user(extra_datap, | ||
61 | &((struct target_extra_context *)ctx)->datap); | ||
62 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
63 | /* Unknown record -- we certainly didn't generate it. | ||
64 | * Did we in fact get out of sync? | ||
65 | */ | ||
66 | - err = true; | ||
67 | - goto exit; | ||
68 | + goto err; | ||
69 | } | ||
70 | ctx = (void *)ctx + size; | ||
71 | } | ||
72 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
73 | if (fpsimd) { | ||
74 | target_restore_fpsimd_record(env, fpsimd); | ||
75 | } else { | ||
76 | - err = true; | ||
77 | + goto err; | ||
78 | } | ||
79 | |||
80 | /* SVE data, if present, overwrites FPSIMD data. */ | ||
81 | if (sve) { | ||
82 | target_restore_sve_record(env, sve, vq); | ||
83 | } | ||
84 | - | ||
85 | - exit: | ||
86 | unlock_user(extra, extra_datap, 0); | ||
87 | - return err; | ||
88 | + return 0; | ||
40 | + | 89 | + |
41 | +#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH) | 90 | + err: |
42 | +#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH) | 91 | + unlock_user(extra, extra_datap, 0); |
43 | +#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH) | 92 | + return 1; |
44 | +#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH) | ||
45 | +#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH) | ||
46 | + | ||
47 | /** | ||
48 | * cpu_mmu_index: | ||
49 | * @env: The cpu environment | ||
50 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) | ||
51 | */ | ||
52 | static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) | ||
53 | { | ||
54 | - return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX); | ||
55 | + return EX_TBFLAG_ANY(env->hflags, MMUIDX); | ||
56 | } | 93 | } |
57 | 94 | ||
58 | static inline bool bswap_code(bool sctlr_b) | 95 | static abi_ulong get_sigframe(struct target_sigaction *ka, |
59 | diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/target/arm/helper-a64.c | ||
62 | +++ b/target/arm/helper-a64.c | ||
63 | @@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) | ||
64 | * the hflags rebuild, since we can pull the composite TBII field | ||
65 | * from there. | ||
66 | */ | ||
67 | - tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII); | ||
68 | + tbii = EX_TBFLAG_A64(env->hflags, TBII); | ||
69 | if ((tbii >> extract64(new_pc, 55, 1)) & 1) { | ||
70 | /* TBI is enabled. */ | ||
71 | int core_mmu_idx = cpu_mmu_index(env, false); | ||
72 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
73 | index XXXXXXX..XXXXXXX 100644 | ||
74 | --- a/target/arm/helper.c | ||
75 | +++ b/target/arm/helper.c | ||
76 | @@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) | ||
77 | static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, | ||
78 | ARMMMUIdx mmu_idx, uint32_t flags) | ||
79 | { | ||
80 | - flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); | ||
81 | - flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, | ||
82 | - arm_to_core_mmu_idx(mmu_idx)); | ||
83 | + DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el); | ||
84 | + DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); | ||
85 | |||
86 | if (arm_singlestep_active(env)) { | ||
87 | - flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); | ||
88 | + DP_TBFLAG_ANY(flags, SS_ACTIVE, 1); | ||
89 | } | ||
90 | return flags; | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, | ||
93 | bool sctlr_b = arm_sctlr_b(env); | ||
94 | |||
95 | if (sctlr_b) { | ||
96 | - flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1); | ||
97 | + DP_TBFLAG_A32(flags, SCTLR__B, 1); | ||
98 | } | ||
99 | if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { | ||
100 | - flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); | ||
101 | + DP_TBFLAG_ANY(flags, BE_DATA, 1); | ||
102 | } | ||
103 | - flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); | ||
104 | + DP_TBFLAG_A32(flags, NS, !access_secure_reg(env)); | ||
105 | |||
106 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | ||
107 | } | ||
108 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, | ||
109 | uint32_t flags = 0; | ||
110 | |||
111 | if (arm_v7m_is_handler_mode(env)) { | ||
112 | - flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1); | ||
113 | + DP_TBFLAG_M32(flags, HANDLER, 1); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, | ||
118 | if (arm_feature(env, ARM_FEATURE_V8) && | ||
119 | !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && | ||
120 | (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { | ||
121 | - flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1); | ||
122 | + DP_TBFLAG_M32(flags, STACKCHECK, 1); | ||
123 | } | ||
124 | |||
125 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | ||
126 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_aprofile(CPUARMState *env) | ||
127 | { | ||
128 | int flags = 0; | ||
129 | |||
130 | - flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, | ||
131 | - arm_debug_target_el(env)); | ||
132 | + DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env)); | ||
133 | return flags; | ||
134 | } | ||
135 | |||
136 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, | ||
137 | uint32_t flags = rebuild_hflags_aprofile(env); | ||
138 | |||
139 | if (arm_el_is_aa64(env, 1)) { | ||
140 | - flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); | ||
141 | + DP_TBFLAG_A32(flags, VFPEN, 1); | ||
142 | } | ||
143 | |||
144 | if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && | ||
145 | (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { | ||
146 | - flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); | ||
147 | + DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); | ||
148 | } | ||
149 | |||
150 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | ||
151 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
152 | uint64_t sctlr; | ||
153 | int tbii, tbid; | ||
154 | |||
155 | - flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); | ||
156 | + DP_TBFLAG_ANY(flags, AARCH64_STATE, 1); | ||
157 | |||
158 | /* Get control bits for tagged addresses. */ | ||
159 | tbid = aa64_va_parameter_tbi(tcr, mmu_idx); | ||
160 | tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); | ||
161 | |||
162 | - flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); | ||
163 | - flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); | ||
164 | + DP_TBFLAG_A64(flags, TBII, tbii); | ||
165 | + DP_TBFLAG_A64(flags, TBID, tbid); | ||
166 | |||
167 | if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
168 | int sve_el = sve_exception_el(env, el); | ||
169 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
170 | } else { | ||
171 | zcr_len = sve_zcr_len_for_el(env, el); | ||
172 | } | ||
173 | - flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); | ||
174 | - flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); | ||
175 | + DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el); | ||
176 | + DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len); | ||
177 | } | ||
178 | |||
179 | sctlr = regime_sctlr(env, stage1); | ||
180 | |||
181 | if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { | ||
182 | - flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); | ||
183 | + DP_TBFLAG_ANY(flags, BE_DATA, 1); | ||
184 | } | ||
185 | |||
186 | if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { | ||
187 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
188 | * The decision of which action to take is left to a helper. | ||
189 | */ | ||
190 | if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { | ||
191 | - flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); | ||
192 | + DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1); | ||
193 | } | ||
194 | } | ||
195 | |||
196 | if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { | ||
197 | /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ | ||
198 | if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { | ||
199 | - flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); | ||
200 | + DP_TBFLAG_A64(flags, BT, 1); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
205 | case ARMMMUIdx_SE10_1: | ||
206 | case ARMMMUIdx_SE10_1_PAN: | ||
207 | /* TODO: ARMv8.3-NV */ | ||
208 | - flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); | ||
209 | + DP_TBFLAG_A64(flags, UNPRIV, 1); | ||
210 | break; | ||
211 | case ARMMMUIdx_E20_2: | ||
212 | case ARMMMUIdx_E20_2_PAN: | ||
213 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
214 | * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. | ||
215 | */ | ||
216 | if (env->cp15.hcr_el2 & HCR_TGE) { | ||
217 | - flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); | ||
218 | + DP_TBFLAG_A64(flags, UNPRIV, 1); | ||
219 | } | ||
220 | break; | ||
221 | default: | ||
222 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
223 | * 4) If no Allocation Tag Access, then all accesses are Unchecked. | ||
224 | */ | ||
225 | if (allocation_tag_access_enabled(env, el, sctlr)) { | ||
226 | - flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1); | ||
227 | + DP_TBFLAG_A64(flags, ATA, 1); | ||
228 | if (tbid | ||
229 | && !(env->pstate & PSTATE_TCO) | ||
230 | && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { | ||
231 | - flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1); | ||
232 | + DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); | ||
233 | } | ||
234 | } | ||
235 | /* And again for unprivileged accesses, if required. */ | ||
236 | - if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV) | ||
237 | + if (EX_TBFLAG_A64(flags, UNPRIV) | ||
238 | && tbid | ||
239 | && !(env->pstate & PSTATE_TCO) | ||
240 | && (sctlr & SCTLR_TCF0) | ||
241 | && allocation_tag_access_enabled(env, 0, sctlr)) { | ||
242 | - flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1); | ||
243 | + DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); | ||
244 | } | ||
245 | /* Cache TCMA as well as TBI. */ | ||
246 | - flags = FIELD_DP32(flags, TBFLAG_A64, TCMA, | ||
247 | - aa64_va_parameter_tcma(tcr, mmu_idx)); | ||
248 | + DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); | ||
249 | } | ||
250 | |||
251 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | ||
252 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
253 | *cs_base = 0; | ||
254 | assert_hflags_rebuild_correctly(env); | ||
255 | |||
256 | - if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { | ||
257 | + if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { | ||
258 | *pc = env->pc; | ||
259 | if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { | ||
260 | - flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); | ||
261 | + DP_TBFLAG_A64(flags, BTYPE, env->btype); | ||
262 | } | ||
263 | } else { | ||
264 | *pc = env->regs[15]; | ||
265 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
266 | if (arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
267 | FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) | ||
268 | != env->v7m.secure) { | ||
269 | - flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1); | ||
270 | + DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); | ||
271 | } | ||
272 | |||
273 | if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && | ||
274 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
275 | * active FP context; we must create a new FP context before | ||
276 | * executing any FP insn. | ||
277 | */ | ||
278 | - flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1); | ||
279 | + DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); | ||
280 | } | ||
281 | |||
282 | bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; | ||
283 | if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { | ||
284 | - flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1); | ||
285 | + DP_TBFLAG_M32(flags, LSPACT, 1); | ||
286 | } | ||
287 | } else { | ||
288 | /* | ||
289 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
290 | * Note that VECLEN+VECSTRIDE are RES0 for M-profile. | ||
291 | */ | ||
292 | if (arm_feature(env, ARM_FEATURE_XSCALE)) { | ||
293 | - flags = FIELD_DP32(flags, TBFLAG_A32, | ||
294 | - XSCALE_CPAR, env->cp15.c15_cpar); | ||
295 | + DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); | ||
296 | } else { | ||
297 | - flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, | ||
298 | - env->vfp.vec_len); | ||
299 | - flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, | ||
300 | - env->vfp.vec_stride); | ||
301 | + DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); | ||
302 | + DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); | ||
303 | } | ||
304 | if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { | ||
305 | - flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); | ||
306 | + DP_TBFLAG_A32(flags, VFPEN, 1); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | - flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb); | ||
311 | - flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits); | ||
312 | + DP_TBFLAG_AM32(flags, THUMB, env->thumb); | ||
313 | + DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
318 | * 1 1 Active-not-pending | ||
319 | * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. | ||
320 | */ | ||
321 | - if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && | ||
322 | - (env->pstate & PSTATE_SS)) { | ||
323 | - flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1); | ||
324 | + if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { | ||
325 | + DP_TBFLAG_ANY(flags, PSTATE__SS, 1); | ||
326 | } | ||
327 | |||
328 | *pflags = flags; | ||
329 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
330 | index XXXXXXX..XXXXXXX 100644 | ||
331 | --- a/target/arm/translate-a64.c | ||
332 | +++ b/target/arm/translate-a64.c | ||
333 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
334 | !arm_el_is_aa64(env, 3); | ||
335 | dc->thumb = 0; | ||
336 | dc->sctlr_b = 0; | ||
337 | - dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; | ||
338 | + dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; | ||
339 | dc->condexec_mask = 0; | ||
340 | dc->condexec_cond = 0; | ||
341 | - core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); | ||
342 | + core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); | ||
343 | dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); | ||
344 | - dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII); | ||
345 | - dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID); | ||
346 | - dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA); | ||
347 | + dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); | ||
348 | + dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); | ||
349 | + dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); | ||
350 | dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); | ||
351 | #if !defined(CONFIG_USER_ONLY) | ||
352 | dc->user = (dc->current_el == 0); | ||
353 | #endif | ||
354 | - dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); | ||
355 | - dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL); | ||
356 | - dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16; | ||
357 | - dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE); | ||
358 | - dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT); | ||
359 | - dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE); | ||
360 | - dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV); | ||
361 | - dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA); | ||
362 | - dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE); | ||
363 | - dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE); | ||
364 | + dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); | ||
365 | + dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); | ||
366 | + dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16; | ||
367 | + dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); | ||
368 | + dc->bt = EX_TBFLAG_A64(tb_flags, BT); | ||
369 | + dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); | ||
370 | + dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); | ||
371 | + dc->ata = EX_TBFLAG_A64(tb_flags, ATA); | ||
372 | + dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); | ||
373 | + dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); | ||
374 | dc->vec_len = 0; | ||
375 | dc->vec_stride = 0; | ||
376 | dc->cp_regs = arm_cpu->cp_regs; | ||
377 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
378 | * emit code to generate a software step exception | ||
379 | * end the TB | ||
380 | */ | ||
381 | - dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); | ||
382 | - dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS); | ||
383 | + dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); | ||
384 | + dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); | ||
385 | dc->is_ldex = false; | ||
386 | - dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); | ||
387 | + dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL); | ||
388 | |||
389 | /* Bound the number of insns to execute to those left on the page. */ | ||
390 | bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; | ||
391 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
392 | index XXXXXXX..XXXXXXX 100644 | ||
393 | --- a/target/arm/translate.c | ||
394 | +++ b/target/arm/translate.c | ||
395 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
396 | */ | ||
397 | dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && | ||
398 | !arm_el_is_aa64(env, 3); | ||
399 | - dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB); | ||
400 | - dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; | ||
401 | - condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC); | ||
402 | + dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB); | ||
403 | + dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; | ||
404 | + condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC); | ||
405 | dc->condexec_mask = (condexec & 0xf) << 1; | ||
406 | dc->condexec_cond = condexec >> 4; | ||
407 | |||
408 | - core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); | ||
409 | + core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); | ||
410 | dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); | ||
411 | dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); | ||
412 | #if !defined(CONFIG_USER_ONLY) | ||
413 | dc->user = (dc->current_el == 0); | ||
414 | #endif | ||
415 | - dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); | ||
416 | + dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); | ||
417 | |||
418 | if (arm_feature(env, ARM_FEATURE_M)) { | ||
419 | dc->vfp_enabled = 1; | ||
420 | dc->be_data = MO_TE; | ||
421 | - dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER); | ||
422 | + dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER); | ||
423 | dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
424 | regime_is_secure(env, dc->mmu_idx); | ||
425 | - dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK); | ||
426 | - dc->v8m_fpccr_s_wrong = | ||
427 | - FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG); | ||
428 | + dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK); | ||
429 | + dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG); | ||
430 | dc->v7m_new_fp_ctxt_needed = | ||
431 | - FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED); | ||
432 | - dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT); | ||
433 | + EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED); | ||
434 | + dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT); | ||
435 | } else { | ||
436 | - dc->be_data = | ||
437 | - FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; | ||
438 | - dc->debug_target_el = | ||
439 | - FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); | ||
440 | - dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B); | ||
441 | - dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE); | ||
442 | - dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS); | ||
443 | - dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN); | ||
444 | + dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL); | ||
445 | + dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B); | ||
446 | + dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE); | ||
447 | + dc->ns = EX_TBFLAG_A32(tb_flags, NS); | ||
448 | + dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN); | ||
449 | if (arm_feature(env, ARM_FEATURE_XSCALE)) { | ||
450 | - dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR); | ||
451 | + dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR); | ||
452 | } else { | ||
453 | - dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN); | ||
454 | - dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE); | ||
455 | + dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); | ||
456 | + dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); | ||
457 | } | ||
458 | } | ||
459 | dc->cp_regs = cpu->cp_regs; | ||
460 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
461 | * emit code to generate a software step exception | ||
462 | * end the TB | ||
463 | */ | ||
464 | - dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); | ||
465 | - dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS); | ||
466 | + dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); | ||
467 | + dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); | ||
468 | dc->is_ldex = false; | ||
469 | |||
470 | dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; | ||
471 | @@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) | ||
472 | DisasContext dc = { }; | ||
473 | const TranslatorOps *ops = &arm_translator_ops; | ||
474 | |||
475 | - if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) { | ||
476 | + if (EX_TBFLAG_AM32(tb->flags, THUMB)) { | ||
477 | ops = &thumb_translator_ops; | ||
478 | } | ||
479 | #ifdef TARGET_AARCH64 | ||
480 | - if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) { | ||
481 | + if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) { | ||
482 | ops = &aarch64_translator_ops; | ||
483 | } | ||
484 | #endif | ||
485 | -- | 96 | -- |
486 | 2.20.1 | 97 | 2.25.1 |
487 | |||
488 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Just because operating on a TCGv_i64 temporary does not | 3 | In parse_user_sigframe, the kernel rejects duplicate sve records, |
4 | mean that we're performing a 64-bit operation. Restrict | 4 | or records that are smaller than the header. We were silently |
5 | the frobbing to actual 64-bit operations. | 5 | allowing these cases to pass, dropping the record. |
6 | |||
7 | This bug is not currently visible because all current | ||
8 | users of these two functions always pass MO_64. | ||
9 | 6 | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | Message-id: 20210419202257.161730-14-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-38-richard.henderson@linaro.org |
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | --- | 11 | --- |
15 | target/arm/translate.c | 4 ++-- | 12 | linux-user/aarch64/signal.c | 5 ++++- |
16 | 1 file changed, 2 insertions(+), 2 deletions(-) | 13 | 1 file changed, 4 insertions(+), 1 deletion(-) |
17 | 14 | ||
18 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 15 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
19 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/translate.c | 17 | --- a/linux-user/aarch64/signal.c |
21 | +++ b/target/arm/translate.c | 18 | +++ b/linux-user/aarch64/signal.c |
22 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | 19 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
23 | tcg_gen_qemu_ld_i64(val, addr, index, opc); | 20 | break; |
24 | 21 | ||
25 | /* Not needed for user-mode BE32, where we use MO_BE instead. */ | 22 | case TARGET_SVE_MAGIC: |
26 | - if (!IS_USER_ONLY && s->sctlr_b) { | 23 | + if (sve || size < sizeof(struct target_sve_context)) { |
27 | + if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | 24 | + goto err; |
28 | tcg_gen_rotri_i64(val, val, 32); | 25 | + } |
29 | } | 26 | if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
30 | 27 | vq = sve_vq(env); | |
31 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | 28 | sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); |
32 | TCGv addr = gen_aa32_addr(s, a32, opc); | 29 | - if (!sve && size == sve_size) { |
33 | 30 | + if (size == sve_size) { | |
34 | /* Not needed for user-mode BE32, where we use MO_BE instead. */ | 31 | sve = (struct target_sve_context *)ctx; |
35 | - if (!IS_USER_ONLY && s->sctlr_b) { | 32 | break; |
36 | + if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { | 33 | } |
37 | TCGv_i64 tmp = tcg_temp_new_i64(); | ||
38 | tcg_gen_rotri_i64(tmp, val, 32); | ||
39 | tcg_gen_qemu_st_i64(tmp, addr, index, opc); | ||
40 | -- | 34 | -- |
41 | 2.20.1 | 35 | 2.25.1 |
42 | |||
43 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-18-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-39-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/translate.c | 4 ++-- | 8 | linux-user/aarch64/signal.c | 3 +++ |
9 | 1 file changed, 2 insertions(+), 2 deletions(-) | 9 | 1 file changed, 3 insertions(+) |
10 | 10 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 11 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 13 | --- a/linux-user/aarch64/signal.c |
14 | +++ b/target/arm/translate.c | 14 | +++ b/linux-user/aarch64/signal.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) | 15 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
16 | } else { | 16 | __get_user(extra_size, |
17 | tmp = load_reg(s, i); | 17 | &((struct target_extra_context *)ctx)->size); |
18 | } | 18 | extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0); |
19 | - gen_aa32_st32(s, tmp, addr, mem_idx); | 19 | + if (!extra) { |
20 | + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | 20 | + return 1; |
21 | tcg_temp_free_i32(tmp); | 21 | + } |
22 | 22 | break; | |
23 | /* No need to add after the last transfer. */ | 23 | |
24 | @@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) | 24 | default: |
25 | } | ||
26 | |||
27 | tmp = tcg_temp_new_i32(); | ||
28 | - gen_aa32_ld32u(s, tmp, addr, mem_idx); | ||
29 | + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); | ||
30 | if (user) { | ||
31 | tmp2 = tcg_const_i32(i); | ||
32 | gen_helper_set_user_reg(cpu_env, tmp2, tmp); | ||
33 | -- | 25 | -- |
34 | 2.20.1 | 26 | 2.25.1 |
35 | |||
36 | diff view generated by jsdifflib |
1 | The Arm ARM specifies that for Thumb encodings of the various plain | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | store insns, if the Rn field is 1111 then we must UNDEF. This is | ||
3 | different from the Arm encodings, where this case is either | ||
4 | UNPREDICTABLE or has well-defined behaviour. The exclusive stores, | ||
5 | store-release and STRD do not have this UNDEF case for any encoding. | ||
6 | 2 | ||
7 | Enforce the UNDEF for this case in the Thumb plain store insns. | 3 | Move the checks out of the parsing loop and into the |
4 | restore function. This more closely mirrors the code | ||
5 | structure in the kernel, and is slightly clearer. | ||
8 | 6 | ||
9 | Fixes: https://bugs.launchpad.net/qemu/+bug/1922887 | 7 | Reject rather than silently skip incorrect VL and SVE record sizes, |
8 | bringing our checks in to line with those the kernel does. | ||
9 | |||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 20220708151540.18136-40-richard.henderson@linaro.org | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 20210408162402.5822-1-peter.maydell@linaro.org | ||
13 | --- | 14 | --- |
14 | target/arm/translate.c | 16 ++++++++++++++++ | 15 | linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------ |
15 | 1 file changed, 16 insertions(+) | 16 | 1 file changed, 35 insertions(+), 16 deletions(-) |
16 | 17 | ||
17 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 18 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
18 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/translate.c | 20 | --- a/linux-user/aarch64/signal.c |
20 | +++ b/target/arm/translate.c | 21 | +++ b/linux-user/aarch64/signal.c |
21 | @@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, | 22 | @@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env, |
22 | ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; | 23 | } |
23 | TCGv_i32 addr, tmp; | 24 | } |
24 | 25 | ||
25 | + /* | 26 | -static void target_restore_sve_record(CPUARMState *env, |
26 | + * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it | 27 | - struct target_sve_context *sve, int vq) |
27 | + * is either UNPREDICTABLE or has defined behaviour | 28 | +static bool target_restore_sve_record(CPUARMState *env, |
28 | + */ | 29 | + struct target_sve_context *sve, |
29 | + if (s->thumb && a->rn == 15) { | 30 | + int size) |
31 | { | ||
32 | - int i, j; | ||
33 | + int i, j, vl, vq; | ||
34 | |||
35 | - /* Note that SVE regs are stored as a byte stream, with each byte element | ||
36 | + if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
30 | + return false; | 37 | + return false; |
31 | + } | 38 | + } |
32 | + | 39 | + |
33 | addr = op_addr_rr_pre(s, a); | 40 | + __get_user(vl, &sve->vl); |
34 | 41 | + vq = sve_vq(env); | |
35 | tmp = load_reg(s, a->rt); | 42 | + |
36 | @@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, | 43 | + /* Reject mismatched VL. */ |
37 | ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; | 44 | + if (vl != vq * TARGET_SVE_VQ_BYTES) { |
38 | TCGv_i32 addr, tmp; | ||
39 | |||
40 | + /* | ||
41 | + * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it | ||
42 | + * is either UNPREDICTABLE or has defined behaviour | ||
43 | + */ | ||
44 | + if (s->thumb && a->rn == 15) { | ||
45 | + return false; | 45 | + return false; |
46 | + } | 46 | + } |
47 | + | 47 | + |
48 | addr = op_addr_ri_pre(s, a); | 48 | + /* Accept empty record -- used to clear PSTATE.SM. */ |
49 | 49 | + if (size <= sizeof(*sve)) { | |
50 | tmp = load_reg(s, a->rt); | 50 | + return true; |
51 | + } | ||
52 | + | ||
53 | + /* Reject non-empty but incomplete record. */ | ||
54 | + if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) { | ||
55 | + return false; | ||
56 | + } | ||
57 | + | ||
58 | + /* | ||
59 | + * Note that SVE regs are stored as a byte stream, with each byte element | ||
60 | * at a subsequent address. This corresponds to a little-endian load | ||
61 | * of our 64-bit hunks. | ||
62 | */ | ||
63 | @@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env, | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | + return true; | ||
68 | } | ||
69 | |||
70 | static int target_restore_sigframe(CPUARMState *env, | ||
71 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
72 | struct target_sve_context *sve = NULL; | ||
73 | uint64_t extra_datap = 0; | ||
74 | bool used_extra = false; | ||
75 | - int vq = 0, sve_size = 0; | ||
76 | + int sve_size = 0; | ||
77 | |||
78 | target_restore_general_frame(env, sf); | ||
79 | |||
80 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
81 | if (sve || size < sizeof(struct target_sve_context)) { | ||
82 | goto err; | ||
83 | } | ||
84 | - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
85 | - vq = sve_vq(env); | ||
86 | - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); | ||
87 | - if (size == sve_size) { | ||
88 | - sve = (struct target_sve_context *)ctx; | ||
89 | - break; | ||
90 | - } | ||
91 | - } | ||
92 | - goto err; | ||
93 | + sve = (struct target_sve_context *)ctx; | ||
94 | + sve_size = size; | ||
95 | + break; | ||
96 | |||
97 | case TARGET_EXTRA_MAGIC: | ||
98 | if (extra || size != sizeof(struct target_extra_context)) { | ||
99 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
100 | } | ||
101 | |||
102 | /* SVE data, if present, overwrites FPSIMD data. */ | ||
103 | - if (sve) { | ||
104 | - target_restore_sve_record(env, sve, vq); | ||
105 | + if (sve && !target_restore_sve_record(env, sve, sve_size)) { | ||
106 | + goto err; | ||
107 | } | ||
108 | unlock_user(extra, extra_datap, 0); | ||
109 | return 0; | ||
51 | -- | 110 | -- |
52 | 2.20.1 | 111 | 2.25.1 |
53 | |||
54 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Set the SM bit in the SVE record on signal delivery, create the ZA record. | ||
4 | Restore SM and ZA state according to the records present on return. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-23-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-41-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate.h | 1 + | 11 | linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++--- |
9 | target/arm/translate.c | 15 +++++++++++++ | 12 | 1 file changed, 154 insertions(+), 13 deletions(-) |
10 | target/arm/translate-neon.c.inc | 37 +++++++++++++++++++++++++-------- | ||
11 | 3 files changed, 44 insertions(+), 9 deletions(-) | ||
12 | 13 | ||
13 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 14 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
14 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/translate.h | 16 | --- a/linux-user/aarch64/signal.c |
16 | +++ b/target/arm/translate.h | 17 | +++ b/linux-user/aarch64/signal.c |
17 | @@ -XXX,XX +XXX,XX @@ void arm_test_cc(DisasCompare *cmp, int cc); | 18 | @@ -XXX,XX +XXX,XX @@ struct target_sve_context { |
18 | void arm_free_cc(DisasCompare *cmp); | 19 | |
19 | void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); | 20 | #define TARGET_SVE_SIG_FLAG_SM 1 |
20 | void arm_gen_test_cc(int cc, TCGLabel *label); | 21 | |
21 | +MemOp pow2_align(unsigned i); | 22 | +#define TARGET_ZA_MAGIC 0x54366345 |
22 | 23 | + | |
23 | /* Return state of Alternate Half-precision flag, caller frees result */ | 24 | +struct target_za_context { |
24 | static inline TCGv_i32 get_ahp_flag(void) | 25 | + struct target_aarch64_ctx head; |
25 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 26 | + uint16_t vl; |
26 | index XXXXXXX..XXXXXXX 100644 | 27 | + uint16_t reserved[3]; |
27 | --- a/target/arm/translate.c | 28 | + /* The actual ZA data immediately follows. */ |
28 | +++ b/target/arm/translate.c | 29 | +}; |
29 | @@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) | 30 | + |
30 | #define IS_USER_ONLY 0 | 31 | +#define TARGET_ZA_SIG_REGS_OFFSET \ |
31 | #endif | 32 | + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) |
32 | 33 | +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ | |
33 | +MemOp pow2_align(unsigned i) | 34 | + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) |
35 | +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ | ||
36 | + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) | ||
37 | + | ||
38 | struct target_rt_sigframe { | ||
39 | struct target_siginfo info; | ||
40 | struct target_ucontext uc; | ||
41 | @@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end) | ||
42 | } | ||
43 | |||
44 | static void target_setup_sve_record(struct target_sve_context *sve, | ||
45 | - CPUARMState *env, int vq, int size) | ||
46 | + CPUARMState *env, int size) | ||
47 | { | ||
48 | - int i, j; | ||
49 | + int i, j, vq = sve_vq(env); | ||
50 | |||
51 | memset(sve, 0, sizeof(*sve)); | ||
52 | __put_user(TARGET_SVE_MAGIC, &sve->head.magic); | ||
53 | @@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve, | ||
54 | } | ||
55 | } | ||
56 | |||
57 | +static void target_setup_za_record(struct target_za_context *za, | ||
58 | + CPUARMState *env, int size) | ||
34 | +{ | 59 | +{ |
35 | + static const MemOp mop_align[] = { | 60 | + int vq = sme_vq(env); |
36 | + 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, | 61 | + int vl = vq * TARGET_SVE_VQ_BYTES; |
37 | + /* | 62 | + int i, j; |
38 | + * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such | 63 | + |
39 | + * that 256-bit alignment (MO_ALIGN_32) cannot be supported: | 64 | + memset(za, 0, sizeof(*za)); |
40 | + * see get_alignment_bits(). Enforce only 128-bit alignment for now. | 65 | + __put_user(TARGET_ZA_MAGIC, &za->head.magic); |
41 | + */ | 66 | + __put_user(size, &za->head.size); |
42 | + MO_ALIGN_16 | 67 | + __put_user(vl, &za->vl); |
43 | + }; | 68 | + |
44 | + g_assert(i < ARRAY_SIZE(mop_align)); | 69 | + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { |
45 | + return mop_align[i]; | 70 | + return; |
71 | + } | ||
72 | + assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq)); | ||
73 | + | ||
74 | + /* | ||
75 | + * Note that ZA vectors are stored as a byte stream, | ||
76 | + * with each byte element at a subsequent address. | ||
77 | + */ | ||
78 | + for (i = 0; i < vl; ++i) { | ||
79 | + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); | ||
80 | + for (j = 0; j < vq * 2; ++j) { | ||
81 | + __put_user_e(env->zarray[i].d[j], z + j, le); | ||
82 | + } | ||
83 | + } | ||
46 | +} | 84 | +} |
47 | + | 85 | + |
48 | /* | 86 | static void target_restore_general_frame(CPUARMState *env, |
49 | * Abstractions of "generate code to do a guest load/store for | 87 | struct target_rt_sigframe *sf) |
50 | * AArch32", where a vaddr is always 32 bits (and is zero | 88 | { |
51 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | 89 | @@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env, |
52 | index XXXXXXX..XXXXXXX 100644 | 90 | |
53 | --- a/target/arm/translate-neon.c.inc | 91 | static bool target_restore_sve_record(CPUARMState *env, |
54 | +++ b/target/arm/translate-neon.c.inc | 92 | struct target_sve_context *sve, |
55 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) | 93 | - int size) |
56 | int size = a->size; | 94 | + int size, int *svcr) |
57 | int nregs = a->n + 1; | 95 | { |
58 | TCGv_i32 addr, tmp; | 96 | - int i, j, vl, vq; |
59 | + MemOp mop, align; | 97 | + int i, j, vl, vq, flags; |
60 | 98 | + bool sm; | |
61 | if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { | 99 | |
100 | - if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
101 | + __get_user(vl, &sve->vl); | ||
102 | + __get_user(flags, &sve->flags); | ||
103 | + | ||
104 | + sm = flags & TARGET_SVE_SIG_FLAG_SM; | ||
105 | + | ||
106 | + /* The cpu must support Streaming or Non-streaming SVE. */ | ||
107 | + if (sm | ||
108 | + ? !cpu_isar_feature(aa64_sme, env_archcpu(env)) | ||
109 | + : !cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
62 | return false; | 110 | return false; |
63 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) | 111 | } |
112 | |||
113 | - __get_user(vl, &sve->vl); | ||
114 | - vq = sve_vq(env); | ||
115 | + /* | ||
116 | + * Note that we cannot use sve_vq() because that depends on the | ||
117 | + * current setting of PSTATE.SM, not the state to be restored. | ||
118 | + */ | ||
119 | + vq = sve_vqm1_for_el_sm(env, 0, sm) + 1; | ||
120 | |||
121 | /* Reject mismatched VL. */ | ||
122 | if (vl != vq * TARGET_SVE_VQ_BYTES) { | ||
123 | @@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env, | ||
64 | return false; | 124 | return false; |
65 | } | 125 | } |
66 | 126 | ||
67 | + align = 0; | 127 | + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); |
68 | if (size == 3) { | 128 | + |
69 | if (nregs != 4 || a->a == 0) { | 129 | /* |
70 | return false; | 130 | * Note that SVE regs are stored as a byte stream, with each byte element |
71 | } | 131 | * at a subsequent address. This corresponds to a little-endian load |
72 | /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */ | 132 | @@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env, |
73 | - size = 2; | 133 | return true; |
74 | - } | 134 | } |
75 | - if (nregs == 1 && a->a == 1 && size == 0) { | 135 | |
76 | - return false; | 136 | +static bool target_restore_za_record(CPUARMState *env, |
77 | - } | 137 | + struct target_za_context *za, |
78 | - if (nregs == 3 && a->a == 1) { | 138 | + int size, int *svcr) |
79 | - return false; | 139 | +{ |
80 | + size = MO_32; | 140 | + int i, j, vl, vq; |
81 | + align = MO_ALIGN_16; | 141 | + |
82 | + } else if (a->a) { | 142 | + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { |
83 | + switch (nregs) { | 143 | + return false; |
84 | + case 1: | 144 | + } |
85 | + if (size == 0) { | 145 | + |
86 | + return false; | 146 | + __get_user(vl, &za->vl); |
147 | + vq = sme_vq(env); | ||
148 | + | ||
149 | + /* Reject mismatched VL. */ | ||
150 | + if (vl != vq * TARGET_SVE_VQ_BYTES) { | ||
151 | + return false; | ||
152 | + } | ||
153 | + | ||
154 | + /* Accept empty record -- used to clear PSTATE.ZA. */ | ||
155 | + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { | ||
156 | + return true; | ||
157 | + } | ||
158 | + | ||
159 | + /* Reject non-empty but incomplete record. */ | ||
160 | + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { | ||
161 | + return false; | ||
162 | + } | ||
163 | + | ||
164 | + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); | ||
165 | + | ||
166 | + for (i = 0; i < vl; ++i) { | ||
167 | + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); | ||
168 | + for (j = 0; j < vq * 2; ++j) { | ||
169 | + __get_user_e(env->zarray[i].d[j], z + j, le); | ||
170 | + } | ||
171 | + } | ||
172 | + return true; | ||
173 | +} | ||
174 | + | ||
175 | static int target_restore_sigframe(CPUARMState *env, | ||
176 | struct target_rt_sigframe *sf) | ||
177 | { | ||
178 | struct target_aarch64_ctx *ctx, *extra = NULL; | ||
179 | struct target_fpsimd_context *fpsimd = NULL; | ||
180 | struct target_sve_context *sve = NULL; | ||
181 | + struct target_za_context *za = NULL; | ||
182 | uint64_t extra_datap = 0; | ||
183 | bool used_extra = false; | ||
184 | int sve_size = 0; | ||
185 | + int za_size = 0; | ||
186 | + int svcr = 0; | ||
187 | |||
188 | target_restore_general_frame(env, sf); | ||
189 | |||
190 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
191 | sve_size = size; | ||
192 | break; | ||
193 | |||
194 | + case TARGET_ZA_MAGIC: | ||
195 | + if (za || size < sizeof(struct target_za_context)) { | ||
196 | + goto err; | ||
87 | + } | 197 | + } |
88 | + align = MO_ALIGN; | 198 | + za = (struct target_za_context *)ctx; |
199 | + za_size = size; | ||
89 | + break; | 200 | + break; |
90 | + case 2: | 201 | + |
91 | + align = pow2_align(size + 1); | 202 | case TARGET_EXTRA_MAGIC: |
92 | + break; | 203 | if (extra || size != sizeof(struct target_extra_context)) { |
93 | + case 3: | 204 | goto err; |
94 | + return false; | 205 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
95 | + case 4: | 206 | } |
96 | + align = pow2_align(size + 2); | 207 | |
97 | + break; | 208 | /* SVE data, if present, overwrites FPSIMD data. */ |
98 | + default: | 209 | - if (sve && !target_restore_sve_record(env, sve, sve_size)) { |
99 | + g_assert_not_reached(); | 210 | + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { |
211 | goto err; | ||
212 | } | ||
213 | + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { | ||
214 | + goto err; | ||
215 | + } | ||
216 | + if (env->svcr != svcr) { | ||
217 | + env->svcr = svcr; | ||
218 | + arm_rebuild_hflags(env); | ||
219 | + } | ||
220 | unlock_user(extra, extra_datap, 0); | ||
221 | return 0; | ||
222 | |||
223 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, | ||
224 | .total_size = offsetof(struct target_rt_sigframe, | ||
225 | uc.tuc_mcontext.__reserved), | ||
226 | }; | ||
227 | - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; | ||
228 | + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; | ||
229 | + int sve_size = 0, za_size = 0; | ||
230 | struct target_rt_sigframe *frame; | ||
231 | struct target_rt_frame_record *fr; | ||
232 | abi_ulong frame_addr, return_addr; | ||
233 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, | ||
234 | &layout); | ||
235 | |||
236 | /* SVE state needs saving only if it exists. */ | ||
237 | - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { | ||
238 | - vq = sve_vq(env); | ||
239 | - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); | ||
240 | + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || | ||
241 | + cpu_isar_feature(aa64_sme, env_archcpu(env))) { | ||
242 | + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16); | ||
243 | sve_ofs = alloc_sigframe_space(sve_size, &layout); | ||
244 | } | ||
245 | + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { | ||
246 | + /* ZA state needs saving only if it is enabled. */ | ||
247 | + if (FIELD_EX64(env->svcr, SVCR, ZA)) { | ||
248 | + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); | ||
249 | + } else { | ||
250 | + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); | ||
100 | + } | 251 | + } |
101 | } | 252 | + za_ofs = alloc_sigframe_space(za_size, &layout); |
102 | 253 | + } | |
103 | if (!vfp_access_check(s)) { | 254 | |
104 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) | 255 | if (layout.extra_ofs) { |
105 | */ | 256 | /* Reserve space for the extra end marker. The standard end marker |
106 | stride = a->t ? 2 : 1; | 257 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
107 | vec_size = nregs == 1 ? stride * 8 : 8; | 258 | target_setup_end_record((void *)frame + layout.extra_end_ofs); |
108 | - | 259 | } |
109 | + mop = size | align; | 260 | if (sve_ofs) { |
110 | tmp = tcg_temp_new_i32(); | 261 | - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); |
111 | addr = tcg_temp_new_i32(); | 262 | + target_setup_sve_record((void *)frame + sve_ofs, env, sve_size); |
112 | load_reg_var(s, addr, a->rn); | 263 | + } |
113 | for (reg = 0; reg < nregs; reg++) { | 264 | + if (za_ofs) { |
114 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size); | 265 | + target_setup_za_record((void *)frame + za_ofs, env, za_size); |
115 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop); | 266 | } |
116 | if ((vd & 1) && vec_size == 16) { | 267 | |
117 | /* | 268 | /* Set up the stack frame for unwinding. */ |
118 | * We cannot write 16 bytes at once because the | 269 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
119 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) | 270 | env->btype = 2; |
120 | } | 271 | } |
121 | tcg_gen_addi_i32(addr, addr, 1 << size); | 272 | |
122 | vd += stride; | 273 | + /* |
123 | + | 274 | + * Invoke the signal handler with both SM and ZA disabled. |
124 | + /* Subsequent memory operations inherit alignment */ | 275 | + * When clearing SM, ResetSVEState, per SMSTOP. |
125 | + mop &= ~MO_AMASK; | 276 | + */ |
126 | } | 277 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
127 | tcg_temp_free_i32(tmp); | 278 | + arm_reset_sve_state(env); |
128 | tcg_temp_free_i32(addr); | 279 | + } |
280 | + if (env->svcr) { | ||
281 | + env->svcr = 0; | ||
282 | + arm_rebuild_hflags(env); | ||
283 | + } | ||
284 | + | ||
285 | if (info) { | ||
286 | tswap_siginfo(&frame->info, info); | ||
287 | env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); | ||
129 | -- | 288 | -- |
130 | 2.20.1 | 289 | 2.25.1 |
131 | |||
132 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Now that these bits have been moved out of tb->flags, | 3 | Add "sve" to the sve prctl functions, to distinguish |
4 | where TBFLAG_ANY was filling from the top, move AM32 | 4 | them from the coming "sme" prctls with similar names. |
5 | to fill from the top, and A32 and M32 to fill from the | ||
6 | bottom. This means fewer changes when adding new bits. | ||
7 | 5 | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20210419202257.161730-9-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-42-richard.henderson@linaro.org |
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 10 | --- |
13 | target/arm/cpu.h | 42 +++++++++++++++++++++--------------------- | 11 | linux-user/aarch64/target_prctl.h | 8 ++++---- |
14 | 1 file changed, 21 insertions(+), 21 deletions(-) | 12 | linux-user/syscall.c | 12 ++++++------ |
13 | 2 files changed, 10 insertions(+), 10 deletions(-) | ||
15 | 14 | ||
16 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/cpu.h | 17 | --- a/linux-user/aarch64/target_prctl.h |
19 | +++ b/target/arm/cpu.h | 18 | +++ b/linux-user/aarch64/target_prctl.h |
20 | @@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU; | 19 | @@ -XXX,XX +XXX,XX @@ |
21 | * | 20 | #ifndef AARCH64_TARGET_PRCTL_H |
22 | * The bits for 32-bit A-profile and M-profile partially overlap: | 21 | #define AARCH64_TARGET_PRCTL_H |
23 | * | 22 | |
24 | - * 18 9 0 | 23 | -static abi_long do_prctl_get_vl(CPUArchState *env) |
25 | - * +----------------+--------------+ | 24 | +static abi_long do_prctl_sve_get_vl(CPUArchState *env) |
26 | - * | TBFLAG_A32 | | | 25 | { |
27 | - * +-----+----------+ TBFLAG_AM32 | | 26 | ARMCPU *cpu = env_archcpu(env); |
28 | - * | |TBFLAG_M32| | | 27 | if (cpu_isar_feature(aa64_sve, cpu)) { |
29 | - * +-----+----------+--------------+ | 28 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env) |
30 | - * 14 9 0 | 29 | } |
31 | + * 31 23 11 10 0 | 30 | return -TARGET_EINVAL; |
32 | + * +-------------+----------+----------------+ | 31 | } |
33 | + * | | | TBFLAG_A32 | | 32 | -#define do_prctl_get_vl do_prctl_get_vl |
34 | + * | TBFLAG_AM32 | +-----+----------+ | 33 | +#define do_prctl_sve_get_vl do_prctl_sve_get_vl |
35 | + * | | |TBFLAG_M32| | 34 | |
36 | + * +-------------+----------------+----------+ | 35 | -static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) |
37 | + * 31 23 5 4 0 | 36 | +static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
38 | * | 37 | { |
39 | * Unless otherwise noted, these bits are cached in env->hflags. | 38 | /* |
40 | */ | 39 | * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. |
41 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2) | 40 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) |
42 | /* | 41 | } |
43 | * Bit usage when in AArch32 state, both A- and M-profile. | 42 | return -TARGET_EINVAL; |
44 | */ | 43 | } |
45 | -FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */ | 44 | -#define do_prctl_set_vl do_prctl_set_vl |
46 | -FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */ | 45 | +#define do_prctl_sve_set_vl do_prctl_sve_set_vl |
47 | +FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */ | 46 | |
48 | +FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */ | 47 | static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) |
49 | 48 | { | |
50 | /* | 49 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
51 | * Bit usage when in AArch32 state, for A-profile only. | 50 | index XXXXXXX..XXXXXXX 100644 |
52 | */ | 51 | --- a/linux-user/syscall.c |
53 | -FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */ | 52 | +++ b/linux-user/syscall.c |
54 | -FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */ | 53 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) |
55 | +FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */ | 54 | #ifndef do_prctl_set_fp_mode |
56 | +FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */ | 55 | #define do_prctl_set_fp_mode do_prctl_inval1 |
57 | /* | 56 | #endif |
58 | * We store the bottom two bits of the CPAR as TB flags and handle | 57 | -#ifndef do_prctl_get_vl |
59 | * checks on the other bits at runtime. This shares the same bits as | 58 | -#define do_prctl_get_vl do_prctl_inval0 |
60 | * VECSTRIDE, which is OK as no XScale CPU has VFP. | 59 | +#ifndef do_prctl_sve_get_vl |
61 | * Not cached, because VECLEN+VECSTRIDE are not cached. | 60 | +#define do_prctl_sve_get_vl do_prctl_inval0 |
62 | */ | 61 | #endif |
63 | -FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2) | 62 | -#ifndef do_prctl_set_vl |
64 | -FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */ | 63 | -#define do_prctl_set_vl do_prctl_inval1 |
65 | -FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */ | 64 | +#ifndef do_prctl_sve_set_vl |
66 | -FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1) | 65 | +#define do_prctl_sve_set_vl do_prctl_inval1 |
67 | +FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2) | 66 | #endif |
68 | +FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */ | 67 | #ifndef do_prctl_reset_keys |
69 | +FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */ | 68 | #define do_prctl_reset_keys do_prctl_inval1 |
70 | +FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) | 69 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, |
71 | /* | 70 | case PR_SET_FP_MODE: |
72 | * Indicates whether cp register reads and writes by guest code should access | 71 | return do_prctl_set_fp_mode(env, arg2); |
73 | * the secure or nonsecure bank of banked registers; note that this is not | 72 | case PR_SVE_GET_VL: |
74 | * the same thing as the current security state of the processor! | 73 | - return do_prctl_get_vl(env); |
75 | */ | 74 | + return do_prctl_sve_get_vl(env); |
76 | -FIELD(TBFLAG_A32, NS, 17, 1) | 75 | case PR_SVE_SET_VL: |
77 | +FIELD(TBFLAG_A32, NS, 10, 1) | 76 | - return do_prctl_set_vl(env, arg2); |
78 | 77 | + return do_prctl_sve_set_vl(env, arg2); | |
79 | /* | 78 | case PR_PAC_RESET_KEYS: |
80 | * Bit usage when in AArch32 state, for M-profile only. | 79 | if (arg3 || arg4 || arg5) { |
81 | */ | 80 | return -TARGET_EINVAL; |
82 | /* Handler (ie not Thread) mode */ | ||
83 | -FIELD(TBFLAG_M32, HANDLER, 9, 1) | ||
84 | +FIELD(TBFLAG_M32, HANDLER, 0, 1) | ||
85 | /* Whether we should generate stack-limit checks */ | ||
86 | -FIELD(TBFLAG_M32, STACKCHECK, 10, 1) | ||
87 | +FIELD(TBFLAG_M32, STACKCHECK, 1, 1) | ||
88 | /* Set if FPCCR.LSPACT is set */ | ||
89 | -FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */ | ||
90 | +FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */ | ||
91 | /* Set if we must create a new FP context */ | ||
92 | -FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */ | ||
93 | +FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */ | ||
94 | /* Set if FPCCR.S does not match current security state */ | ||
95 | -FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */ | ||
96 | +FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */ | ||
97 | |||
98 | /* | ||
99 | * Bit usage when in AArch64 state | ||
100 | -- | 81 | -- |
101 | 2.20.1 | 82 | 2.25.1 |
102 | |||
103 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We were incorrectly assuming that only the first byte of an MTE access | 3 | These prctl set the Streaming SVE vector length, which may |
4 | is checked against the tags. But per the ARM, unaligned accesses are | 4 | be completely different from the Normal SVE vector length. |
5 | pre-decomposed into single-byte accesses. So by the time we reach the | ||
6 | actual MTE check in the ARM pseudocode, all accesses are aligned. | ||
7 | 5 | ||
8 | Therefore, the first failure is always either the first byte of the | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | access, or the first byte of the granule. | ||
10 | |||
11 | In addition, some of the arithmetic is off for last-first -> count. | ||
12 | This does not become directly visible until a later patch that passes | ||
13 | single bytes into this function, so ptr == ptr_last. | ||
14 | |||
15 | Buglink: https://bugs.launchpad.net/bugs/1921948 | ||
16 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
17 | Message-id: 20210416183106.1516563-2-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-43-richard.henderson@linaro.org |
18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
19 | [PMM: tweaked a comment] | ||
20 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
21 | --- | 10 | --- |
22 | target/arm/mte_helper.c | 40 ++++++++++++++++++---------------------- | 11 | linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++ |
23 | 1 file changed, 18 insertions(+), 22 deletions(-) | 12 | linux-user/syscall.c | 16 +++++++++ |
13 | 2 files changed, 70 insertions(+) | ||
24 | 14 | ||
25 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | 15 | diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h |
26 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/target/arm/mte_helper.c | 17 | --- a/linux-user/aarch64/target_prctl.h |
28 | +++ b/target/arm/mte_helper.c | 18 | +++ b/linux-user/aarch64/target_prctl.h |
29 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 19 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env) |
30 | uint64_t ptr, uintptr_t ra) | ||
31 | { | 20 | { |
32 | int mmu_idx, ptr_tag, bit55; | 21 | ARMCPU *cpu = env_archcpu(env); |
33 | - uint64_t ptr_last, ptr_end, prev_page, next_page; | 22 | if (cpu_isar_feature(aa64_sve, cpu)) { |
34 | - uint64_t tag_first, tag_end; | 23 | + /* PSTATE.SM is always unset on syscall entry. */ |
35 | - uint64_t tag_byte_first, tag_byte_end; | 24 | return sve_vq(env) * 16; |
36 | - uint32_t esize, total, tag_count, tag_size, n, c; | 25 | } |
37 | + uint64_t ptr_last, prev_page, next_page; | 26 | return -TARGET_EINVAL; |
38 | + uint64_t tag_first, tag_last; | 27 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
39 | + uint64_t tag_byte_first, tag_byte_last; | 28 | && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { |
40 | + uint32_t total, tag_count, tag_size, n, c; | 29 | uint32_t vq, old_vq; |
41 | uint8_t *mem1, *mem2; | 30 | |
42 | MMUAccessType type; | 31 | + /* PSTATE.SM is always unset on syscall entry. */ |
43 | 32 | old_vq = sve_vq(env); | |
44 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
45 | |||
46 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
47 | type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
48 | - esize = FIELD_EX32(desc, MTEDESC, ESIZE); | ||
49 | total = FIELD_EX32(desc, MTEDESC, TSIZE); | ||
50 | |||
51 | - /* Find the addr of the end of the access, and of the last element. */ | ||
52 | - ptr_end = ptr + total; | ||
53 | - ptr_last = ptr_end - esize; | ||
54 | + /* Find the addr of the end of the access */ | ||
55 | + ptr_last = ptr + total - 1; | ||
56 | |||
57 | /* Round the bounds to the tag granule, and compute the number of tags. */ | ||
58 | tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); | ||
59 | - tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE); | ||
60 | - tag_count = (tag_end - tag_first) / TAG_GRANULE; | ||
61 | + tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); | ||
62 | + tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; | ||
63 | |||
64 | /* Round the bounds to twice the tag granule, and compute the bytes. */ | ||
65 | tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); | ||
66 | - tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE); | ||
67 | + tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); | ||
68 | |||
69 | /* Locate the page boundaries. */ | ||
70 | prev_page = ptr & TARGET_PAGE_MASK; | ||
71 | next_page = prev_page + TARGET_PAGE_SIZE; | ||
72 | |||
73 | - if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) { | ||
74 | + if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) { | ||
75 | /* Memory access stays on one page. */ | ||
76 | - tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE); | ||
77 | + tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; | ||
78 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total, | ||
79 | MMU_DATA_LOAD, tag_size, ra); | ||
80 | if (!mem1) { | ||
81 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | ||
82 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, | ||
83 | MMU_DATA_LOAD, tag_size, ra); | ||
84 | |||
85 | - tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE); | ||
86 | + tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; | ||
87 | mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, | ||
88 | - ptr_end - next_page, | ||
89 | + ptr_last - next_page + 1, | ||
90 | MMU_DATA_LOAD, tag_size, ra); | ||
91 | 33 | ||
92 | /* | 34 | /* |
93 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc, | 35 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
94 | } | 36 | } |
95 | 37 | #define do_prctl_sve_set_vl do_prctl_sve_set_vl | |
96 | /* | 38 | |
97 | - * If we failed, we know which granule. Compute the element that | 39 | +static abi_long do_prctl_sme_get_vl(CPUArchState *env) |
98 | - * is first in that granule, and signal failure on that element. | 40 | +{ |
99 | + * If we failed, we know which granule. For the first granule, the | 41 | + ARMCPU *cpu = env_archcpu(env); |
100 | + * failure address is @ptr, the first byte accessed. Otherwise the | 42 | + if (cpu_isar_feature(aa64_sme, cpu)) { |
101 | + * failure address is the first byte of the nth granule. | 43 | + return sme_vq(env) * 16; |
102 | */ | 44 | + } |
103 | if (unlikely(n < tag_count)) { | 45 | + return -TARGET_EINVAL; |
104 | - uint64_t fail_ofs; | 46 | +} |
105 | - | 47 | +#define do_prctl_sme_get_vl do_prctl_sme_get_vl |
106 | - fail_ofs = tag_first + n * TAG_GRANULE - ptr; | 48 | + |
107 | - fail_ofs = ROUND_UP(fail_ofs, esize); | 49 | +static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) |
108 | - mte_check_fail(env, desc, ptr + fail_ofs, ra); | 50 | +{ |
109 | + uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE); | 51 | + /* |
110 | + mte_check_fail(env, desc, fault, ra); | 52 | + * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. |
111 | } | 53 | + * Note the kernel definition of sve_vl_valid allows for VQ=512, |
112 | 54 | + * i.e. VL=8192, even though the architectural maximum is VQ=16. | |
113 | done: | 55 | + */ |
56 | + if (cpu_isar_feature(aa64_sme, env_archcpu(env)) | ||
57 | + && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { | ||
58 | + int vq, old_vq; | ||
59 | + | ||
60 | + old_vq = sme_vq(env); | ||
61 | + | ||
62 | + /* | ||
63 | + * Bound the value of vq, so that we know that it fits into | ||
64 | + * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared | ||
65 | + * on syscall entry, we are not modifying the current SVE | ||
66 | + * vector length. | ||
67 | + */ | ||
68 | + vq = MAX(arg2 / 16, 1); | ||
69 | + vq = MIN(vq, 16); | ||
70 | + env->vfp.smcr_el[1] = | ||
71 | + FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); | ||
72 | + | ||
73 | + /* Delay rebuilding hflags until we know if ZA must change. */ | ||
74 | + vq = sve_vqm1_for_el_sm(env, 0, true) + 1; | ||
75 | + | ||
76 | + if (vq != old_vq) { | ||
77 | + /* | ||
78 | + * PSTATE.ZA state is cleared on any change to SVL. | ||
79 | + * We need not call arm_rebuild_hflags because PSTATE.SM was | ||
80 | + * cleared on syscall entry, so this hasn't changed VL. | ||
81 | + */ | ||
82 | + env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); | ||
83 | + arm_rebuild_hflags(env); | ||
84 | + } | ||
85 | + return vq * 16; | ||
86 | + } | ||
87 | + return -TARGET_EINVAL; | ||
88 | +} | ||
89 | +#define do_prctl_sme_set_vl do_prctl_sme_set_vl | ||
90 | + | ||
91 | static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) | ||
92 | { | ||
93 | ARMCPU *cpu = env_archcpu(env); | ||
94 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | ||
96 | --- a/linux-user/syscall.c | ||
97 | +++ b/linux-user/syscall.c | ||
98 | @@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) | ||
99 | #ifndef PR_SET_SYSCALL_USER_DISPATCH | ||
100 | # define PR_SET_SYSCALL_USER_DISPATCH 59 | ||
101 | #endif | ||
102 | +#ifndef PR_SME_SET_VL | ||
103 | +# define PR_SME_SET_VL 63 | ||
104 | +# define PR_SME_GET_VL 64 | ||
105 | +# define PR_SME_VL_LEN_MASK 0xffff | ||
106 | +# define PR_SME_VL_INHERIT (1 << 17) | ||
107 | +#endif | ||
108 | |||
109 | #include "target_prctl.h" | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) | ||
112 | #ifndef do_prctl_set_unalign | ||
113 | #define do_prctl_set_unalign do_prctl_inval1 | ||
114 | #endif | ||
115 | +#ifndef do_prctl_sme_get_vl | ||
116 | +#define do_prctl_sme_get_vl do_prctl_inval0 | ||
117 | +#endif | ||
118 | +#ifndef do_prctl_sme_set_vl | ||
119 | +#define do_prctl_sme_set_vl do_prctl_inval1 | ||
120 | +#endif | ||
121 | |||
122 | static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, | ||
123 | abi_long arg3, abi_long arg4, abi_long arg5) | ||
124 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, | ||
125 | return do_prctl_sve_get_vl(env); | ||
126 | case PR_SVE_SET_VL: | ||
127 | return do_prctl_sve_set_vl(env, arg2); | ||
128 | + case PR_SME_GET_VL: | ||
129 | + return do_prctl_sme_get_vl(env); | ||
130 | + case PR_SME_SET_VL: | ||
131 | + return do_prctl_sme_set_vl(env, arg2); | ||
132 | case PR_PAC_RESET_KEYS: | ||
133 | if (arg3 || arg4 || arg5) { | ||
134 | return -TARGET_EINVAL; | ||
114 | -- | 135 | -- |
115 | 2.20.1 | 136 | 2.25.1 |
116 | |||
117 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We're about to rearrange the macro expansion surrounding tbflags, | 3 | There's no reason to set CPACR_EL1.ZEN if SVE disabled. |
4 | and this field name will be expanded using the bit definition of | ||
5 | the same name, resulting in a token pasting error. | ||
6 | |||
7 | So SCTLR_B -> SCTLR__B in the 3 uses, and document it. | ||
8 | 4 | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20210419202257.161730-3-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-44-richard.henderson@linaro.org |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 9 | --- |
14 | target/arm/cpu.h | 2 +- | 10 | target/arm/cpu.c | 7 +++---- |
15 | target/arm/helper.c | 2 +- | 11 | 1 file changed, 3 insertions(+), 4 deletions(-) |
16 | target/arm/translate.c | 2 +- | ||
17 | 3 files changed, 3 insertions(+), 3 deletions(-) | ||
18 | 12 | ||
19 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
20 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/cpu.h | 15 | --- a/target/arm/cpu.c |
22 | +++ b/target/arm/cpu.h | 16 | +++ b/target/arm/cpu.c |
23 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */ | 17 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
24 | */ | 18 | /* and to the FP/Neon instructions */ |
25 | FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2) | 19 | env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
26 | FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */ | 20 | CPACR_EL1, FPEN, 3); |
27 | -FIELD(TBFLAG_A32, SCTLR_B, 15, 1) | 21 | - /* and to the SVE instructions */ |
28 | +FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */ | 22 | - env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
29 | FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1) | 23 | - CPACR_EL1, ZEN, 3); |
30 | /* | 24 | - /* with reasonable vector length */ |
31 | * Indicates whether cp register reads and writes by guest code should access | 25 | + /* and to the SVE instructions, with default vector length */ |
32 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 26 | if (cpu_isar_feature(aa64_sve, cpu)) { |
33 | index XXXXXXX..XXXXXXX 100644 | 27 | + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
34 | --- a/target/arm/helper.c | 28 | + CPACR_EL1, ZEN, 3); |
35 | +++ b/target/arm/helper.c | 29 | env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; |
36 | @@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, | 30 | } |
37 | bool sctlr_b = arm_sctlr_b(env); | 31 | /* |
38 | |||
39 | if (sctlr_b) { | ||
40 | - flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1); | ||
41 | + flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1); | ||
42 | } | ||
43 | if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { | ||
44 | flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); | ||
45 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/target/arm/translate.c | ||
48 | +++ b/target/arm/translate.c | ||
49 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
50 | FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; | ||
51 | dc->debug_target_el = | ||
52 | FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); | ||
53 | - dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B); | ||
54 | + dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B); | ||
55 | dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE); | ||
56 | dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS); | ||
57 | dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN); | ||
58 | -- | 32 | -- |
59 | 2.20.1 | 33 | 2.25.1 |
60 | |||
61 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | The encoding of size = 2 and size = 3 had the incorrect decode | 3 | Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu. |
4 | for align, overlapping the stride field. This error was hidden | ||
5 | by what should have been unnecessary masking in translate. | ||
6 | 4 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210419202257.161730-2-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-45-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 9 | --- |
12 | target/arm/neon-ls.decode | 4 ++-- | 10 | target/arm/cpu.c | 11 +++++++++++ |
13 | target/arm/translate-neon.c.inc | 4 ++-- | 11 | 1 file changed, 11 insertions(+) |
14 | 2 files changed, 4 insertions(+), 4 deletions(-) | ||
15 | 12 | ||
16 | diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/neon-ls.decode | 15 | --- a/target/arm/cpu.c |
19 | +++ b/target/arm/neon-ls.decode | 16 | +++ b/target/arm/cpu.c |
20 | @@ -XXX,XX +XXX,XX @@ VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \ | 17 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
21 | 18 | CPACR_EL1, ZEN, 3); | |
22 | VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \ | 19 | env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; |
23 | vd=%vd_dp size=0 stride=1 | ||
24 | -VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \ | ||
25 | +VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \ | ||
26 | vd=%vd_dp size=1 stride=%imm1_5_p1 | ||
27 | -VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \ | ||
28 | +VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \ | ||
29 | vd=%vd_dp size=2 stride=%imm1_6_p1 | ||
30 | diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/translate-neon.c.inc | ||
33 | +++ b/target/arm/translate-neon.c.inc | ||
34 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) | ||
35 | switch (nregs) { | ||
36 | case 1: | ||
37 | if (((a->align & (1 << a->size)) != 0) || | ||
38 | - (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) { | ||
39 | + (a->size == 2 && (a->align == 1 || a->align == 2))) { | ||
40 | return false; | ||
41 | } | 20 | } |
42 | break; | 21 | + /* and for SME instructions, with default vector length, and TPIDR2 */ |
43 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) | 22 | + if (cpu_isar_feature(aa64_sme, cpu)) { |
44 | } | 23 | + env->cp15.sctlr_el[1] |= SCTLR_EnTP2; |
45 | break; | 24 | + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
46 | case 4: | 25 | + CPACR_EL1, SMEN, 3); |
47 | - if ((a->size == 2) && ((a->align & 3) == 3)) { | 26 | + env->vfp.smcr_el[1] = cpu->sme_default_vq - 1; |
48 | + if (a->size == 2 && a->align == 3) { | 27 | + if (cpu_isar_feature(aa64_sme_fa64, cpu)) { |
49 | return false; | 28 | + env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1], |
50 | } | 29 | + SMCR, FA64, 1); |
51 | break; | 30 | + } |
31 | + } | ||
32 | /* | ||
33 | * Enable 48-bit address space (TODO: take reserved_va into account). | ||
34 | * Enable TBI0 but not TBI1. | ||
52 | -- | 35 | -- |
53 | 2.20.1 | 36 | 2.25.1 |
54 | |||
55 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210419202257.161730-17-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-46-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/translate.c | 4 ++-- | 8 | linux-user/elfload.c | 20 ++++++++++++++++++++ |
9 | 1 file changed, 2 insertions(+), 2 deletions(-) | 9 | 1 file changed, 20 insertions(+) |
10 | 10 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 11 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 13 | --- a/linux-user/elfload.c |
14 | +++ b/target/arm/translate.c | 14 | +++ b/linux-user/elfload.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) | 15 | @@ -XXX,XX +XXX,XX @@ enum { |
16 | addr = load_reg(s, a->rn); | 16 | ARM_HWCAP2_A64_RNG = 1 << 16, |
17 | tmp = load_reg(s, a->rt); | 17 | ARM_HWCAP2_A64_BTI = 1 << 17, |
18 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); | 18 | ARM_HWCAP2_A64_MTE = 1 << 18, |
19 | - gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop); | 19 | + ARM_HWCAP2_A64_ECV = 1 << 19, |
20 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); | 20 | + ARM_HWCAP2_A64_AFP = 1 << 20, |
21 | disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); | 21 | + ARM_HWCAP2_A64_RPRES = 1 << 21, |
22 | 22 | + ARM_HWCAP2_A64_MTE3 = 1 << 22, | |
23 | tcg_temp_free_i32(tmp); | 23 | + ARM_HWCAP2_A64_SME = 1 << 23, |
24 | @@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) | 24 | + ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, |
25 | 25 | + ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, | |
26 | addr = load_reg(s, a->rn); | 26 | + ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, |
27 | tmp = tcg_temp_new_i32(); | 27 | + ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, |
28 | - gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop); | 28 | + ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, |
29 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); | 29 | + ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, |
30 | disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); | 30 | + ARM_HWCAP2_A64_SME_FA64 = 1 << 30, |
31 | tcg_temp_free_i32(addr); | 31 | }; |
32 | 32 | ||
33 | #define ELF_HWCAP get_elf_hwcap() | ||
34 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void) | ||
35 | GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); | ||
36 | GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); | ||
37 | GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); | ||
38 | + GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | | ||
39 | + ARM_HWCAP2_A64_SME_F32F32 | | ||
40 | + ARM_HWCAP2_A64_SME_B16F32 | | ||
41 | + ARM_HWCAP2_A64_SME_F16F32 | | ||
42 | + ARM_HWCAP2_A64_SME_I8I32)); | ||
43 | + GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); | ||
44 | + GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); | ||
45 | + GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); | ||
46 | |||
47 | return hwcaps; | ||
48 | } | ||
33 | -- | 49 | -- |
34 | 2.20.1 | 50 | 2.25.1 |
35 | |||
36 | diff view generated by jsdifflib |