1 | Big fat pullreq this time around, because it has all of RTH's | 1 | I don't have anything else queued up at the moment, so this is just |
---|---|---|---|
2 | SVE2 emulation patchset in it. | 2 | Richard's SME patches. |
3 | 3 | ||
4 | -- PMM | 4 | -- PMM |
5 | 5 | ||
6 | The following changes since commit 0dab1d36f55c3ed649bb8e4c74b9269ef3a63049: | 6 | The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3: |
7 | 7 | ||
8 | Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging (2021-05-24 15:48:08 +0100) | 8 | Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530) |
9 | 9 | ||
10 | are available in the Git repository at: | 10 | are available in the Git repository at: |
11 | 11 | ||
12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210525 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711 |
13 | 13 | ||
14 | for you to fetch changes up to f8680aaa6e5bfc6022b75157c23db7d2ea98ab11: | 14 | for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8: |
15 | 15 | ||
16 | target/arm: Enable SVE2 and related extensions (2021-05-25 16:01:44 +0100) | 16 | linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100) |
17 | 17 | ||
18 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
19 | target-arm queue: | 19 | target-arm: |
20 | * Implement SVE2 emulation | 20 | * Implement SME emulation, for both system and linux-user |
21 | * Implement integer matrix multiply accumulate | ||
22 | * Implement FEAT_TLBIOS | ||
23 | * Implement FEAT_TLBRANGE | ||
24 | * disas/libvixl: Protect C system header for C++ compiler | ||
25 | * Use correct SP in M-profile exception return | ||
26 | * AN524, AN547: Correct modelling of internal SRAMs | ||
27 | * hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic | ||
28 | * hw/arm/smmuv3: Another range invalidation fix | ||
29 | 21 | ||
30 | ---------------------------------------------------------------- | 22 | ---------------------------------------------------------------- |
31 | Eric Auger (1): | 23 | Richard Henderson (45): |
32 | hw/arm/smmuv3: Another range invalidation fix | 24 | target/arm: Handle SME in aarch64_cpu_dump_state |
25 | target/arm: Add infrastructure for disas_sme | ||
26 | target/arm: Trap non-streaming usage when Streaming SVE is active | ||
27 | target/arm: Mark ADR as non-streaming | ||
28 | target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming | ||
29 | target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming | ||
30 | target/arm: Mark PMULL, FMMLA as non-streaming | ||
31 | target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming | ||
32 | target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming | ||
33 | target/arm: Mark string/histo/crypto as non-streaming | ||
34 | target/arm: Mark gather/scatter load/store as non-streaming | ||
35 | target/arm: Mark gather prefetch as non-streaming | ||
36 | target/arm: Mark LDFF1 and LDNF1 as non-streaming | ||
37 | target/arm: Mark LD1RO as non-streaming | ||
38 | target/arm: Add SME enablement checks | ||
39 | target/arm: Handle SME in sve_access_check | ||
40 | target/arm: Implement SME RDSVL, ADDSVL, ADDSPL | ||
41 | target/arm: Implement SME ZERO | ||
42 | target/arm: Implement SME MOVA | ||
43 | target/arm: Implement SME LD1, ST1 | ||
44 | target/arm: Export unpredicated ld/st from translate-sve.c | ||
45 | target/arm: Implement SME LDR, STR | ||
46 | target/arm: Implement SME ADDHA, ADDVA | ||
47 | target/arm: Implement FMOPA, FMOPS (non-widening) | ||
48 | target/arm: Implement BFMOPA, BFMOPS | ||
49 | target/arm: Implement FMOPA, FMOPS (widening) | ||
50 | target/arm: Implement SME integer outer product | ||
51 | target/arm: Implement PSEL | ||
52 | target/arm: Implement REVD | ||
53 | target/arm: Implement SCLAMP, UCLAMP | ||
54 | target/arm: Reset streaming sve state on exception boundaries | ||
55 | target/arm: Enable SME for -cpu max | ||
56 | linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS | ||
57 | linux-user/aarch64: Reset PSTATE.SM on syscalls | ||
58 | linux-user/aarch64: Add SM bit to SVE signal context | ||
59 | linux-user/aarch64: Tidy target_restore_sigframe error return | ||
60 | linux-user/aarch64: Do not allow duplicate or short sve records | ||
61 | linux-user/aarch64: Verify extra record lock succeeded | ||
62 | linux-user/aarch64: Move sve record checks into restore | ||
63 | linux-user/aarch64: Implement SME signal handling | ||
64 | linux-user: Rename sve prctls | ||
65 | linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL | ||
66 | target/arm: Only set ZEN in reset if SVE present | ||
67 | target/arm: Enable SME for user-only | ||
68 | linux-user/aarch64: Add SME related hwcap entries | ||
33 | 69 | ||
34 | Peter Maydell (8): | 70 | docs/system/arm/emulation.rst | 4 + |
35 | hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic | 71 | linux-user/aarch64/target_cpu.h | 5 +- |
36 | hw/arm/mps2-tz: Don't duplicate modelling of SRAM in AN524 | 72 | linux-user/aarch64/target_prctl.h | 62 +- |
37 | hw/arm/mps2-tz: Make SRAM_ADDR_WIDTH board-specific | 73 | target/arm/cpu.h | 7 + |
38 | hw/arm/armsse.c: Correct modelling of SSE-300 internal SRAMs | 74 | target/arm/helper-sme.h | 126 ++++ |
39 | hw/arm/armsse: Convert armsse_realize() to use ERRP_GUARD | 75 | target/arm/helper-sve.h | 4 + |
40 | hw/arm/mps2-tz: Allow board to specify a boot RAM size | 76 | target/arm/helper.h | 18 + |
41 | hw/arm: Model TCMs in the SSE-300, not the AN547 | 77 | target/arm/translate-a64.h | 45 ++ |
42 | target/arm: Use correct SP in M-profile exception return | 78 | target/arm/translate.h | 16 + |
43 | 79 | target/arm/sme-fa64.decode | 60 ++ | |
44 | Philippe Mathieu-Daudé (1): | 80 | target/arm/sme.decode | 88 +++ |
45 | disas/libvixl: Protect C system header for C++ compiler | 81 | target/arm/sve.decode | 41 +- |
46 | 82 | linux-user/aarch64/cpu_loop.c | 9 + | |
47 | Rebecca Cran (3): | 83 | linux-user/aarch64/signal.c | 243 ++++++-- |
48 | target/arm: Add support for FEAT_TLBIRANGE | 84 | linux-user/elfload.c | 20 + |
49 | target/arm: Add support for FEAT_TLBIOS | 85 | linux-user/syscall.c | 28 +- |
50 | target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type | 86 | target/arm/cpu.c | 35 +- |
51 | 87 | target/arm/cpu64.c | 11 + | |
52 | Richard Henderson (84): | 88 | target/arm/helper.c | 56 +- |
53 | accel/tcg: Replace g_new() + memcpy() by g_memdup() | 89 | target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++ |
54 | accel/tcg: Pass length argument to tlb_flush_range_locked() | 90 | target/arm/sve_helper.c | 28 + |
55 | accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData | 91 | target/arm/translate-a64.c | 103 +++- |
56 | accel/tcg: Remove {encode,decode}_pbm_to_runon | 92 | target/arm/translate-sme.c | 373 ++++++++++++ |
57 | accel/tcg: Add tlb_flush_range_by_mmuidx() | 93 | target/arm/translate-sve.c | 393 ++++++++++--- |
58 | accel/tcg: Add tlb_flush_range_by_mmuidx_all_cpus() | 94 | target/arm/translate-vfp.c | 12 + |
59 | accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced() | 95 | target/arm/translate.c | 2 + |
60 | accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0 | 96 | target/arm/vec_helper.c | 24 + |
61 | accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1] | 97 | target/arm/meson.build | 3 + |
62 | target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 | 98 | 28 files changed, 2821 insertions(+), 135 deletions(-) |
63 | target/arm: Implement SVE2 Integer Multiply - Unpredicated | 99 | create mode 100644 target/arm/sme-fa64.decode |
64 | target/arm: Implement SVE2 integer pairwise add and accumulate long | 100 | create mode 100644 target/arm/sme.decode |
65 | target/arm: Implement SVE2 integer unary operations (predicated) | 101 | create mode 100644 target/arm/translate-sme.c |
66 | target/arm: Split out saturating/rounding shifts from neon | ||
67 | target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) | ||
68 | target/arm: Implement SVE2 integer halving add/subtract (predicated) | ||
69 | target/arm: Implement SVE2 integer pairwise arithmetic | ||
70 | target/arm: Implement SVE2 saturating add/subtract (predicated) | ||
71 | target/arm: Implement SVE2 integer add/subtract long | ||
72 | target/arm: Implement SVE2 integer add/subtract interleaved long | ||
73 | target/arm: Implement SVE2 integer add/subtract wide | ||
74 | target/arm: Implement SVE2 integer multiply long | ||
75 | target/arm: Implement SVE2 PMULLB, PMULLT | ||
76 | target/arm: Implement SVE2 bitwise shift left long | ||
77 | target/arm: Implement SVE2 bitwise exclusive-or interleaved | ||
78 | target/arm: Implement SVE2 bitwise permute | ||
79 | target/arm: Implement SVE2 complex integer add | ||
80 | target/arm: Implement SVE2 integer absolute difference and accumulate long | ||
81 | target/arm: Implement SVE2 integer add/subtract long with carry | ||
82 | target/arm: Implement SVE2 bitwise shift right and accumulate | ||
83 | target/arm: Implement SVE2 bitwise shift and insert | ||
84 | target/arm: Implement SVE2 integer absolute difference and accumulate | ||
85 | target/arm: Implement SVE2 saturating extract narrow | ||
86 | target/arm: Implement SVE2 SHRN, RSHRN | ||
87 | target/arm: Implement SVE2 SQSHRUN, SQRSHRUN | ||
88 | target/arm: Implement SVE2 UQSHRN, UQRSHRN | ||
89 | target/arm: Implement SVE2 SQSHRN, SQRSHRN | ||
90 | target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS | ||
91 | target/arm: Implement SVE2 WHILERW, WHILEWR | ||
92 | target/arm: Implement SVE2 bitwise ternary operations | ||
93 | target/arm: Implement SVE2 saturating multiply-add long | ||
94 | target/arm: Implement SVE2 saturating multiply-add high | ||
95 | target/arm: Implement SVE2 integer multiply-add long | ||
96 | target/arm: Implement SVE2 complex integer multiply-add | ||
97 | target/arm: Implement SVE2 XAR | ||
98 | target/arm: Use correct output type for gvec_sdot_*_b | ||
99 | target/arm: Pass separate addend to {U, S}DOT helpers | ||
100 | target/arm: Pass separate addend to FCMLA helpers | ||
101 | target/arm: Split out formats for 2 vectors + 1 index | ||
102 | target/arm: Split out formats for 3 vectors + 1 index | ||
103 | target/arm: Implement SVE2 integer multiply (indexed) | ||
104 | target/arm: Implement SVE2 integer multiply-add (indexed) | ||
105 | target/arm: Implement SVE2 saturating multiply-add high (indexed) | ||
106 | target/arm: Implement SVE2 saturating multiply-add (indexed) | ||
107 | target/arm: Implement SVE2 saturating multiply (indexed) | ||
108 | target/arm: Implement SVE2 signed saturating doubling multiply high | ||
109 | target/arm: Implement SVE2 saturating multiply high (indexed) | ||
110 | target/arm: Implement SVE2 multiply-add long (indexed) | ||
111 | target/arm: Implement SVE2 integer multiply long (indexed) | ||
112 | target/arm: Implement SVE2 complex integer multiply-add (indexed) | ||
113 | target/arm: Implement SVE2 complex integer dot product | ||
114 | target/arm: Macroize helper_gvec_{s,u}dot_{b,h} | ||
115 | target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h} | ||
116 | target/arm: Implement SVE mixed sign dot product (indexed) | ||
117 | target/arm: Implement SVE mixed sign dot product | ||
118 | target/arm: Implement SVE2 crypto unary operations | ||
119 | target/arm: Implement SVE2 crypto destructive binary operations | ||
120 | target/arm: Implement SVE2 crypto constructive binary operations | ||
121 | target/arm: Implement SVE2 FCVTNT | ||
122 | target/arm: Share table of sve load functions | ||
123 | target/arm: Tidy do_ldrq | ||
124 | target/arm: Implement SVE2 LD1RO | ||
125 | target/arm: Implement 128-bit ZIP, UZP, TRN | ||
126 | target/arm: Move endian adjustment macros to vec_internal.h | ||
127 | target/arm: Implement aarch64 SUDOT, USDOT | ||
128 | target/arm: Split out do_neon_ddda_fpst | ||
129 | target/arm: Remove unused fpst from VDOT_scalar | ||
130 | target/arm: Fix decode for VDOT (indexed) | ||
131 | target/arm: Split out do_neon_ddda | ||
132 | target/arm: Split decode of VSDOT and VUDOT | ||
133 | target/arm: Implement aarch32 VSUDOT, VUSDOT | ||
134 | target/arm: Implement integer matrix multiply accumulate | ||
135 | linux-user/aarch64: Enable hwcap bits for sve2 and related extensions | ||
136 | target/arm: Enable SVE2 and related extensions | ||
137 | |||
138 | Stephen Long (17): | ||
139 | target/arm: Implement SVE2 floating-point pairwise | ||
140 | target/arm: Implement SVE2 MATCH, NMATCH | ||
141 | target/arm: Implement SVE2 ADDHNB, ADDHNT | ||
142 | target/arm: Implement SVE2 RADDHNB, RADDHNT | ||
143 | target/arm: Implement SVE2 SUBHNB, SUBHNT | ||
144 | target/arm: Implement SVE2 RSUBHNB, RSUBHNT | ||
145 | target/arm: Implement SVE2 HISTCNT, HISTSEG | ||
146 | target/arm: Implement SVE2 scatter store insns | ||
147 | target/arm: Implement SVE2 gather load insns | ||
148 | target/arm: Implement SVE2 FMMLA | ||
149 | target/arm: Implement SVE2 SPLICE, EXT | ||
150 | target/arm: Implement SVE2 TBL, TBX | ||
151 | target/arm: Implement SVE2 FCVTLT | ||
152 | target/arm: Implement SVE2 FCVTXNT, FCVTX | ||
153 | target/arm: Implement SVE2 FLOGB | ||
154 | target/arm: Implement SVE2 bitwise shift immediate | ||
155 | target/arm: Implement SVE2 fp multiply-add long | ||
156 | |||
157 | disas/libvixl/vixl/code-buffer.h | 2 +- | ||
158 | disas/libvixl/vixl/globals.h | 16 +- | ||
159 | disas/libvixl/vixl/invalset.h | 2 +- | ||
160 | disas/libvixl/vixl/platform.h | 2 + | ||
161 | disas/libvixl/vixl/utils.h | 2 +- | ||
162 | include/exec/exec-all.h | 44 + | ||
163 | include/hw/arm/armsse.h | 2 + | ||
164 | target/arm/cpu.h | 76 + | ||
165 | target/arm/helper-sve.h | 722 ++++++++- | ||
166 | target/arm/helper.h | 110 +- | ||
167 | target/arm/translate-a64.h | 3 + | ||
168 | target/arm/vec_internal.h | 167 ++ | ||
169 | target/arm/neon-shared.decode | 24 +- | ||
170 | target/arm/sve.decode | 574 ++++++- | ||
171 | accel/tcg/cputlb.c | 231 ++- | ||
172 | hw/arm/armsse.c | 35 +- | ||
173 | hw/arm/mps2-tz.c | 39 +- | ||
174 | hw/arm/smmuv3.c | 50 +- | ||
175 | hw/intc/arm_gicv3_cpuif.c | 48 +- | ||
176 | linux-user/elfload.c | 10 + | ||
177 | target/arm/cpu.c | 2 + | ||
178 | target/arm/cpu64.c | 14 + | ||
179 | target/arm/cpu_tcg.c | 1 + | ||
180 | target/arm/helper.c | 327 +++- | ||
181 | target/arm/kvm64.c | 21 +- | ||
182 | target/arm/m_helper.c | 3 +- | ||
183 | target/arm/neon_helper.c | 507 +----- | ||
184 | target/arm/sve_helper.c | 2110 +++++++++++++++++++++++-- | ||
185 | target/arm/translate-a64.c | 111 +- | ||
186 | target/arm/translate-neon.c | 231 +-- | ||
187 | target/arm/translate-sve.c | 3200 +++++++++++++++++++++++++++++++++++--- | ||
188 | target/arm/vec_helper.c | 887 ++++++++--- | ||
189 | disas/libvixl/vixl/utils.cc | 2 +- | ||
190 | 33 files changed, 8275 insertions(+), 1300 deletions(-) | ||
191 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Eric Auger <eric.auger@redhat.com> | ||
2 | 1 | ||
3 | 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") | ||
4 | failed to completely fix misalignment issues with range | ||
5 | invalidation. For instance invalidations patterns like "invalidate 32 | ||
6 | 4kB pages starting from 0xff395000 are not correctly handled" due | ||
7 | to the fact the previous fix only made sure the number of invalidated | ||
8 | pages were a power of 2 but did not properly handle the start | ||
9 | address was not aligned with the range. This can be noticed when | ||
10 | boothing a fedora 33 with protected virtio-blk-pci. | ||
11 | |||
12 | Signed-off-by: Eric Auger <eric.auger@redhat.com> | ||
13 | Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") | ||
14 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
16 | --- | ||
17 | hw/arm/smmuv3.c | 50 +++++++++++++++++++++++++------------------------ | ||
18 | 1 file changed, 26 insertions(+), 24 deletions(-) | ||
19 | |||
20 | diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/hw/arm/smmuv3.c | ||
23 | +++ b/hw/arm/smmuv3.c | ||
24 | @@ -XXX,XX +XXX,XX @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, | ||
25 | |||
26 | static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) | ||
27 | { | ||
28 | - uint8_t scale = 0, num = 0, ttl = 0; | ||
29 | - dma_addr_t addr = CMD_ADDR(cmd); | ||
30 | + dma_addr_t end, addr = CMD_ADDR(cmd); | ||
31 | uint8_t type = CMD_TYPE(cmd); | ||
32 | uint16_t vmid = CMD_VMID(cmd); | ||
33 | + uint8_t scale = CMD_SCALE(cmd); | ||
34 | + uint8_t num = CMD_NUM(cmd); | ||
35 | + uint8_t ttl = CMD_TTL(cmd); | ||
36 | bool leaf = CMD_LEAF(cmd); | ||
37 | uint8_t tg = CMD_TG(cmd); | ||
38 | - uint64_t first_page = 0, last_page; | ||
39 | - uint64_t num_pages = 1; | ||
40 | + uint64_t num_pages; | ||
41 | + uint8_t granule; | ||
42 | int asid = -1; | ||
43 | |||
44 | - if (tg) { | ||
45 | - scale = CMD_SCALE(cmd); | ||
46 | - num = CMD_NUM(cmd); | ||
47 | - ttl = CMD_TTL(cmd); | ||
48 | - num_pages = (num + 1) * BIT_ULL(scale); | ||
49 | - } | ||
50 | - | ||
51 | if (type == SMMU_CMD_TLBI_NH_VA) { | ||
52 | asid = CMD_ASID(cmd); | ||
53 | } | ||
54 | |||
55 | + if (!tg) { | ||
56 | + trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); | ||
57 | + smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); | ||
58 | + smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); | ||
59 | + return; | ||
60 | + } | ||
61 | + | ||
62 | + /* RIL in use */ | ||
63 | + | ||
64 | + num_pages = (num + 1) * BIT_ULL(scale); | ||
65 | + granule = tg * 2 + 10; | ||
66 | + | ||
67 | /* Split invalidations into ^2 range invalidations */ | ||
68 | - last_page = num_pages - 1; | ||
69 | - while (num_pages) { | ||
70 | - uint8_t granule = tg * 2 + 10; | ||
71 | - uint64_t mask, count; | ||
72 | + end = addr + (num_pages << granule) - 1; | ||
73 | |||
74 | - mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule); | ||
75 | - count = mask + 1; | ||
76 | + while (addr != end + 1) { | ||
77 | + uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); | ||
78 | |||
79 | - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf); | ||
80 | - smmuv3_inv_notifiers_iova(s, asid, addr, tg, count); | ||
81 | - smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl); | ||
82 | - | ||
83 | - num_pages -= count; | ||
84 | - first_page += count; | ||
85 | - addr += count * BIT_ULL(granule); | ||
86 | + num_pages = (mask + 1) >> granule; | ||
87 | + trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); | ||
88 | + smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); | ||
89 | + smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); | ||
90 | + addr += mask + 1; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | -- | ||
95 | 2.20.1 | ||
96 | |||
97 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In icc_eoir_write() we assume that we can identify the group of the | ||
2 | IRQ being completed based purely on which register is being written | ||
3 | to and the current CPU state, and that "CPU state matches group | ||
4 | indicated by register" is the only necessary access check. | ||
5 | 1 | ||
6 | This isn't correct: if the CPU is not in Secure state then EOIR1 will | ||
7 | only complete Group 1 NS IRQs, but if the CPU is in EL3 it can | ||
8 | complete both Group 1 S and Group 1 NS IRQs. (The pseudocode | ||
9 | ICC_EOIR1_EL1 makes this clear.) We were also missing the logic to | ||
10 | prevent EOIR0 writes completing G0 IRQs when they should not. | ||
11 | |||
12 | Rearrange the logic to first identify the group of the current | ||
13 | highest priority interrupt and then look at whether we should | ||
14 | complete it or ignore the access based on which register was accessed | ||
15 | and the state of the CPU. The resulting behavioural change is: | ||
16 | * EL3 can now complete G1NS interrupts | ||
17 | * G0 interrupt completion is now ignored if the GIC | ||
18 | and the CPU have the security extension enabled and | ||
19 | the CPU is not secure | ||
20 | |||
21 | Reported-by: Chan Kim <ckim@etri.re.kr> | ||
22 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
23 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
24 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
25 | Message-id: 20210510150016.24910-1-peter.maydell@linaro.org | ||
26 | --- | ||
27 | hw/intc/arm_gicv3_cpuif.c | 48 ++++++++++++++++++++++++++------------- | ||
28 | 1 file changed, 32 insertions(+), 16 deletions(-) | ||
29 | |||
30 | diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/hw/intc/arm_gicv3_cpuif.c | ||
33 | +++ b/hw/intc/arm_gicv3_cpuif.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
35 | GICv3CPUState *cs = icc_cs_from_env(env); | ||
36 | int irq = value & 0xffffff; | ||
37 | int grp; | ||
38 | + bool is_eoir0 = ri->crm == 8; | ||
39 | |||
40 | - if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) { | ||
41 | + if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { | ||
42 | icv_eoir_write(env, ri, value); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | - trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1, | ||
47 | + trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, | ||
48 | gicv3_redist_affid(cs), value); | ||
49 | |||
50 | - if (ri->crm == 8) { | ||
51 | - /* EOIR0 */ | ||
52 | - grp = GICV3_G0; | ||
53 | - } else { | ||
54 | - /* EOIR1 */ | ||
55 | - if (arm_is_secure(env)) { | ||
56 | - grp = GICV3_G1; | ||
57 | - } else { | ||
58 | - grp = GICV3_G1NS; | ||
59 | - } | ||
60 | - } | ||
61 | - | ||
62 | if (irq >= cs->gic->num_irq) { | ||
63 | /* This handles two cases: | ||
64 | * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] | ||
65 | @@ -XXX,XX +XXX,XX @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | - if (icc_highest_active_group(cs) != grp) { | ||
70 | - return; | ||
71 | + grp = icc_highest_active_group(cs); | ||
72 | + switch (grp) { | ||
73 | + case GICV3_G0: | ||
74 | + if (!is_eoir0) { | ||
75 | + return; | ||
76 | + } | ||
77 | + if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) | ||
78 | + && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { | ||
79 | + return; | ||
80 | + } | ||
81 | + break; | ||
82 | + case GICV3_G1: | ||
83 | + if (is_eoir0) { | ||
84 | + return; | ||
85 | + } | ||
86 | + if (!arm_is_secure(env)) { | ||
87 | + return; | ||
88 | + } | ||
89 | + break; | ||
90 | + case GICV3_G1NS: | ||
91 | + if (is_eoir0) { | ||
92 | + return; | ||
93 | + } | ||
94 | + if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { | ||
95 | + return; | ||
96 | + } | ||
97 | + break; | ||
98 | + default: | ||
99 | + g_assert_not_reached(); | ||
100 | } | ||
101 | |||
102 | icc_drop_prio(cs, grp); | ||
103 | -- | ||
104 | 2.20.1 | ||
105 | |||
106 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The SRAM at 0x2000_0000 is part of the SSE-200 itself, and we model | ||
2 | it that way in hw/arm/armsse.c (along with the associated MPCs). We | ||
3 | incorrectly also added an entry to the RAMInfo array for the AN524 in | ||
4 | hw/arm/mps2-tz.c, which was pointless because the CPU would never see | ||
5 | it. Delete it. | ||
6 | 1 | ||
7 | The bug had no guest-visible effect because devices in the SSE-200 | ||
8 | take priority over those in the board model (armsse.c maps | ||
9 | s->board_memory at priority -2). | ||
10 | |||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: 20210510190844.17799-2-peter.maydell@linaro.org | ||
14 | --- | ||
15 | hw/arm/mps2-tz.c | 8 +------- | ||
16 | 1 file changed, 1 insertion(+), 7 deletions(-) | ||
17 | |||
18 | diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/hw/arm/mps2-tz.c | ||
21 | +++ b/hw/arm/mps2-tz.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static const RAMInfo an524_raminfo[] = { { | ||
23 | .size = 512 * KiB, | ||
24 | .mpc = 0, | ||
25 | .mrindex = 0, | ||
26 | - }, { | ||
27 | - .name = "sram", | ||
28 | - .base = 0x20000000, | ||
29 | - .size = 32 * 4 * KiB, | ||
30 | - .mpc = -1, | ||
31 | - .mrindex = 1, | ||
32 | }, { | ||
33 | /* We don't model QSPI flash yet; for now expose it as simple ROM */ | ||
34 | .name = "QSPI", | ||
35 | .base = 0x28000000, | ||
36 | .size = 8 * MiB, | ||
37 | .mpc = 1, | ||
38 | - .mrindex = 2, | ||
39 | + .mrindex = 1, | ||
40 | .flags = IS_ROM, | ||
41 | }, { | ||
42 | .name = "DDR", | ||
43 | -- | ||
44 | 2.20.1 | ||
45 | |||
46 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The AN547 sets the SRAM_ADDR_WIDTH for the SSE-300 to 21; | ||
2 | since this is not the default value for the SSE-300, model this | ||
3 | in mps2-tz.c as a per-board value. | ||
4 | 1 | ||
5 | Reported-by: Devaraj Ranganna <devaraj.ranganna@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210510190844.17799-3-peter.maydell@linaro.org | ||
9 | --- | ||
10 | hw/arm/mps2-tz.c | 6 ++++++ | ||
11 | 1 file changed, 6 insertions(+) | ||
12 | |||
13 | diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/hw/arm/mps2-tz.c | ||
16 | +++ b/hw/arm/mps2-tz.c | ||
17 | @@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass { | ||
18 | int numirq; /* Number of external interrupts */ | ||
19 | int uart_overflow_irq; /* number of the combined UART overflow IRQ */ | ||
20 | uint32_t init_svtor; /* init-svtor setting for SSE */ | ||
21 | + uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */ | ||
22 | const RAMInfo *raminfo; | ||
23 | const char *armsse_type; | ||
24 | }; | ||
25 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine) | ||
26 | OBJECT(system_memory), &error_abort); | ||
27 | qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq); | ||
28 | qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor); | ||
29 | + qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width); | ||
30 | qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk); | ||
31 | qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk); | ||
32 | sysbus_realize(SYS_BUS_DEVICE(&mms->iotkit), &error_fatal); | ||
33 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) | ||
34 | mmc->numirq = 92; | ||
35 | mmc->uart_overflow_irq = 47; | ||
36 | mmc->init_svtor = 0x10000000; | ||
37 | + mmc->sram_addr_width = 15; | ||
38 | mmc->raminfo = an505_raminfo; | ||
39 | mmc->armsse_type = TYPE_IOTKIT; | ||
40 | mps2tz_set_default_ram_info(mmc); | ||
41 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) | ||
42 | mmc->numirq = 92; | ||
43 | mmc->uart_overflow_irq = 47; | ||
44 | mmc->init_svtor = 0x10000000; | ||
45 | + mmc->sram_addr_width = 15; | ||
46 | mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */ | ||
47 | mmc->armsse_type = TYPE_SSE200; | ||
48 | mps2tz_set_default_ram_info(mmc); | ||
49 | @@ -XXX,XX +XXX,XX @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) | ||
50 | mmc->numirq = 95; | ||
51 | mmc->uart_overflow_irq = 47; | ||
52 | mmc->init_svtor = 0x10000000; | ||
53 | + mmc->sram_addr_width = 15; | ||
54 | mmc->raminfo = an524_raminfo; | ||
55 | mmc->armsse_type = TYPE_SSE200; | ||
56 | mps2tz_set_default_ram_info(mmc); | ||
57 | @@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) | ||
58 | mmc->numirq = 96; | ||
59 | mmc->uart_overflow_irq = 48; | ||
60 | mmc->init_svtor = 0x00000000; | ||
61 | + mmc->sram_addr_width = 21; | ||
62 | mmc->raminfo = an547_raminfo; | ||
63 | mmc->armsse_type = TYPE_SSE300; | ||
64 | mps2tz_set_default_ram_info(mmc); | ||
65 | -- | ||
66 | 2.20.1 | ||
67 | |||
68 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The SSE-300 was not correctly modelling its internal SRAMs: | ||
2 | * the SRAM address width default is 18 | ||
3 | * the SRAM is mapped at 0x2100_0000, not 0x2000_0000 like | ||
4 | the SSE-200 and IoTKit | ||
5 | 1 | ||
6 | The default address width is no longer guest-visible since | ||
7 | our only SSE-300 board sets it explicitly to a non-default | ||
8 | value, but following the hardware's default will help for | ||
9 | any future boards we need to model. | ||
10 | |||
11 | Reported-by: Devaraj Ranganna <devaraj.ranganna@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-id: 20210510190844.17799-4-peter.maydell@linaro.org | ||
15 | --- | ||
16 | hw/arm/armsse.c | 8 ++++++-- | ||
17 | 1 file changed, 6 insertions(+), 2 deletions(-) | ||
18 | |||
19 | diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/hw/arm/armsse.c | ||
22 | +++ b/hw/arm/armsse.c | ||
23 | @@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo { | ||
24 | const char *cpu_type; | ||
25 | uint32_t sse_version; | ||
26 | int sram_banks; | ||
27 | + uint32_t sram_bank_base; | ||
28 | int num_cpus; | ||
29 | uint32_t sys_version; | ||
30 | uint32_t iidr; | ||
31 | @@ -XXX,XX +XXX,XX @@ static Property sse300_properties[] = { | ||
32 | DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, | ||
33 | MemoryRegion *), | ||
34 | DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), | ||
35 | - DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15), | ||
36 | + DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 18), | ||
37 | DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), | ||
38 | DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true), | ||
39 | DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), | ||
40 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
41 | .sse_version = ARMSSE_IOTKIT, | ||
42 | .cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"), | ||
43 | .sram_banks = 1, | ||
44 | + .sram_bank_base = 0x20000000, | ||
45 | .num_cpus = 1, | ||
46 | .sys_version = 0x41743, | ||
47 | .iidr = 0, | ||
48 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
49 | .sse_version = ARMSSE_SSE200, | ||
50 | .cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"), | ||
51 | .sram_banks = 4, | ||
52 | + .sram_bank_base = 0x20000000, | ||
53 | .num_cpus = 2, | ||
54 | .sys_version = 0x22041743, | ||
55 | .iidr = 0, | ||
56 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
57 | .sse_version = ARMSSE_SSE300, | ||
58 | .cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"), | ||
59 | .sram_banks = 2, | ||
60 | + .sram_bank_base = 0x21000000, | ||
61 | .num_cpus = 1, | ||
62 | .sys_version = 0x7e00043b, | ||
63 | .iidr = 0x74a0043b, | ||
64 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
65 | /* Map the upstream end of the MPC into the right place... */ | ||
66 | sbd_mpc = SYS_BUS_DEVICE(&s->mpc[i]); | ||
67 | memory_region_add_subregion(&s->container, | ||
68 | - 0x20000000 + i * sram_bank_size, | ||
69 | + info->sram_bank_base + i * sram_bank_size, | ||
70 | sysbus_mmio_get_region(sbd_mpc, 1)); | ||
71 | /* ...and its register interface */ | ||
72 | memory_region_add_subregion(&s->container, 0x50083000 + i * 0x1000, | ||
73 | -- | ||
74 | 2.20.1 | ||
75 | |||
76 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Convert armsse_realize() to use ERRP_GUARD(), following | ||
2 | the rules in include/qapi/error.h. | ||
3 | 1 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210510190844.17799-5-peter.maydell@linaro.org | ||
7 | --- | ||
8 | hw/arm/armsse.c | 8 ++++---- | ||
9 | 1 file changed, 4 insertions(+), 4 deletions(-) | ||
10 | |||
11 | diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/hw/arm/armsse.c | ||
14 | +++ b/hw/arm/armsse.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
16 | const ARMSSEDeviceInfo *devinfo; | ||
17 | int i; | ||
18 | MemoryRegion *mr; | ||
19 | - Error *err = NULL; | ||
20 | SysBusDevice *sbd_apb_ppc0; | ||
21 | SysBusDevice *sbd_secctl; | ||
22 | DeviceState *dev_apb_ppc0; | ||
23 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
24 | DeviceState *dev_splitter; | ||
25 | uint32_t addr_width_max; | ||
26 | |||
27 | + ERRP_GUARD(); | ||
28 | + | ||
29 | if (!s->board_memory) { | ||
30 | error_setg(errp, "memory property was not set"); | ||
31 | return; | ||
32 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
33 | uint32_t sram_bank_size = 1 << s->sram_addr_width; | ||
34 | |||
35 | memory_region_init_ram(&s->sram[i], NULL, ramname, | ||
36 | - sram_bank_size, &err); | ||
37 | + sram_bank_size, errp); | ||
38 | g_free(ramname); | ||
39 | - if (err) { | ||
40 | - error_propagate(errp, err); | ||
41 | + if (*errp) { | ||
42 | return; | ||
43 | } | ||
44 | object_property_set_link(OBJECT(&s->mpc[i]), "downstream", | ||
45 | -- | ||
46 | 2.20.1 | ||
47 | |||
48 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Currently we model the ITCM in the AN547's RAMInfo list. This is incorrect | ||
2 | because this RAM is really a part of the SSE-300. We can't just delete | ||
3 | it from the RAMInfo list, though, because this would make boot_ram_size() | ||
4 | assert because it wouldn't be able to find an entry in the list covering | ||
5 | guest address 0. | ||
6 | 1 | ||
7 | Allow a board to specify a boot RAM size manually if it doesn't have | ||
8 | any RAM itself at address 0 and is relying on the SSE for that, and | ||
9 | set the correct value for the AN547. The other boards can continue | ||
10 | to use the "look it up from the RAMInfo list" logic. | ||
11 | |||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-id: 20210510190844.17799-6-peter.maydell@linaro.org | ||
15 | --- | ||
16 | hw/arm/mps2-tz.c | 13 +++++++++++++ | ||
17 | 1 file changed, 13 insertions(+) | ||
18 | |||
19 | diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/hw/arm/mps2-tz.c | ||
22 | +++ b/hw/arm/mps2-tz.c | ||
23 | @@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass { | ||
24 | uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */ | ||
25 | const RAMInfo *raminfo; | ||
26 | const char *armsse_type; | ||
27 | + uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */ | ||
28 | }; | ||
29 | |||
30 | struct MPS2TZMachineState { | ||
31 | @@ -XXX,XX +XXX,XX @@ static uint32_t boot_ram_size(MPS2TZMachineState *mms) | ||
32 | const RAMInfo *p; | ||
33 | MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); | ||
34 | |||
35 | + /* | ||
36 | + * Use a per-board specification (for when the boot RAM is in | ||
37 | + * the SSE and so doesn't have a RAMInfo list entry) | ||
38 | + */ | ||
39 | + if (mmc->boot_ram_size) { | ||
40 | + return mmc->boot_ram_size; | ||
41 | + } | ||
42 | + | ||
43 | for (p = mmc->raminfo; p->name; p++) { | ||
44 | if (p->base == boot_mem_base(mms)) { | ||
45 | return p->size; | ||
46 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) | ||
47 | mmc->sram_addr_width = 15; | ||
48 | mmc->raminfo = an505_raminfo; | ||
49 | mmc->armsse_type = TYPE_IOTKIT; | ||
50 | + mmc->boot_ram_size = 0; | ||
51 | mps2tz_set_default_ram_info(mmc); | ||
52 | } | ||
53 | |||
54 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) | ||
55 | mmc->sram_addr_width = 15; | ||
56 | mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */ | ||
57 | mmc->armsse_type = TYPE_SSE200; | ||
58 | + mmc->boot_ram_size = 0; | ||
59 | mps2tz_set_default_ram_info(mmc); | ||
60 | } | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) | ||
63 | mmc->sram_addr_width = 15; | ||
64 | mmc->raminfo = an524_raminfo; | ||
65 | mmc->armsse_type = TYPE_SSE200; | ||
66 | + mmc->boot_ram_size = 0; | ||
67 | mps2tz_set_default_ram_info(mmc); | ||
68 | |||
69 | object_class_property_add_str(oc, "remap", mps2_get_remap, mps2_set_remap); | ||
70 | @@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) | ||
71 | mmc->sram_addr_width = 21; | ||
72 | mmc->raminfo = an547_raminfo; | ||
73 | mmc->armsse_type = TYPE_SSE300; | ||
74 | + mmc->boot_ram_size = 512 * KiB; | ||
75 | mps2tz_set_default_ram_info(mmc); | ||
76 | } | ||
77 | |||
78 | -- | ||
79 | 2.20.1 | ||
80 | |||
81 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000. | ||
2 | Currently we model these in the AN547 board, but this is conceptually | ||
3 | wrong, because they are a part of the SSE-300 itself. Move the | ||
4 | modelling of the TCMs out of mps2-tz.c into sse300.c. | ||
5 | 1 | ||
6 | This has no guest-visible effects. | ||
7 | |||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 20210510190844.17799-7-peter.maydell@linaro.org | ||
11 | --- | ||
12 | include/hw/arm/armsse.h | 2 ++ | ||
13 | hw/arm/armsse.c | 19 +++++++++++++++++++ | ||
14 | hw/arm/mps2-tz.c | 12 ------------ | ||
15 | 3 files changed, 21 insertions(+), 12 deletions(-) | ||
16 | |||
17 | diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/hw/arm/armsse.h | ||
20 | +++ b/include/hw/arm/armsse.h | ||
21 | @@ -XXX,XX +XXX,XX @@ struct ARMSSE { | ||
22 | MemoryRegion alias2; | ||
23 | MemoryRegion alias3[SSE_MAX_CPUS]; | ||
24 | MemoryRegion sram[MAX_SRAM_BANKS]; | ||
25 | + MemoryRegion itcm; | ||
26 | + MemoryRegion dtcm; | ||
27 | |||
28 | qemu_irq *exp_irqs[SSE_MAX_CPUS]; | ||
29 | qemu_irq ppc0_irq; | ||
30 | diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/hw/arm/armsse.c | ||
33 | +++ b/hw/arm/armsse.c | ||
34 | @@ -XXX,XX +XXX,XX @@ | ||
35 | #include "qemu/log.h" | ||
36 | #include "qemu/module.h" | ||
37 | #include "qemu/bitops.h" | ||
38 | +#include "qemu/units.h" | ||
39 | #include "qapi/error.h" | ||
40 | #include "trace.h" | ||
41 | #include "hw/sysbus.h" | ||
42 | @@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo { | ||
43 | bool has_cpuid; | ||
44 | bool has_cpu_pwrctrl; | ||
45 | bool has_sse_counter; | ||
46 | + bool has_tcms; | ||
47 | Property *props; | ||
48 | const ARMSSEDeviceInfo *devinfo; | ||
49 | const bool *irq_is_common; | ||
50 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
51 | .has_cpuid = false, | ||
52 | .has_cpu_pwrctrl = false, | ||
53 | .has_sse_counter = false, | ||
54 | + .has_tcms = false, | ||
55 | .props = iotkit_properties, | ||
56 | .devinfo = iotkit_devices, | ||
57 | .irq_is_common = sse200_irq_is_common, | ||
58 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
59 | .has_cpuid = true, | ||
60 | .has_cpu_pwrctrl = false, | ||
61 | .has_sse_counter = false, | ||
62 | + .has_tcms = false, | ||
63 | .props = sse200_properties, | ||
64 | .devinfo = sse200_devices, | ||
65 | .irq_is_common = sse200_irq_is_common, | ||
66 | @@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = { | ||
67 | .has_cpuid = true, | ||
68 | .has_cpu_pwrctrl = true, | ||
69 | .has_sse_counter = true, | ||
70 | + .has_tcms = true, | ||
71 | .props = sse300_properties, | ||
72 | .devinfo = sse300_devices, | ||
73 | .irq_is_common = sse300_irq_is_common, | ||
74 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
75 | sysbus_mmio_get_region(sbd, 1)); | ||
76 | } | ||
77 | |||
78 | + if (info->has_tcms) { | ||
79 | + /* The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000 */ | ||
80 | + memory_region_init_ram(&s->itcm, NULL, "sse300-itcm", 512 * KiB, errp); | ||
81 | + if (*errp) { | ||
82 | + return; | ||
83 | + } | ||
84 | + memory_region_init_ram(&s->dtcm, NULL, "sse300-dtcm", 512 * KiB, errp); | ||
85 | + if (*errp) { | ||
86 | + return; | ||
87 | + } | ||
88 | + memory_region_add_subregion(&s->container, 0x00000000, &s->itcm); | ||
89 | + memory_region_add_subregion(&s->container, 0x20000000, &s->dtcm); | ||
90 | + } | ||
91 | + | ||
92 | /* Devices behind APB PPC0: | ||
93 | * 0x40000000: timer0 | ||
94 | * 0x40001000: timer1 | ||
95 | diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/hw/arm/mps2-tz.c | ||
98 | +++ b/hw/arm/mps2-tz.c | ||
99 | @@ -XXX,XX +XXX,XX @@ static const RAMInfo an524_raminfo[] = { { | ||
100 | }; | ||
101 | |||
102 | static const RAMInfo an547_raminfo[] = { { | ||
103 | - .name = "itcm", | ||
104 | - .base = 0x00000000, | ||
105 | - .size = 512 * KiB, | ||
106 | - .mpc = -1, | ||
107 | - .mrindex = 0, | ||
108 | - }, { | ||
109 | .name = "sram", | ||
110 | .base = 0x01000000, | ||
111 | .size = 2 * MiB, | ||
112 | .mpc = 0, | ||
113 | .mrindex = 1, | ||
114 | - }, { | ||
115 | - .name = "dtcm", | ||
116 | - .base = 0x20000000, | ||
117 | - .size = 4 * 128 * KiB, | ||
118 | - .mpc = -1, | ||
119 | - .mrindex = 2, | ||
120 | }, { | ||
121 | .name = "sram 2", | ||
122 | .base = 0x21000000, | ||
123 | -- | ||
124 | 2.20.1 | ||
125 | |||
126 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | When an M-profile CPU is restoring registers from the stack on | ||
2 | exception return, the stack pointer to use is determined based on | ||
3 | bits in the magic exception return type value. We were not getting | ||
4 | this logic entirely correct. | ||
5 | 1 | ||
6 | Whether we use one of the Secure stack pointers or one of the | ||
7 | Non-Secure stack pointers depends on the EXCRET.S bit. However, | ||
8 | whether we use the MSP or the PSP then depends on the SPSEL bit in | ||
9 | either the CONTROL_S or CONTROL_NS register. We were incorrectly | ||
10 | selecting MSP vs PSP based on the EXCRET.SPSEL bit. | ||
11 | |||
12 | (In the pseudocode this is in the PopStack() function, which calls | ||
13 | LookUpSp_with_security_mode() which in turn looks at the relevant | ||
14 | CONTROL.SPSEL bit.) | ||
15 | |||
16 | The buggy behaviour wasn't noticeable in most cases, because we write | ||
17 | EXCRET.SPSEL to the CONTROL.SPSEL bit for the S/NS register selected | ||
18 | by EXCRET.ES, so we only do the wrong thing when EXCRET.S and | ||
19 | EXCRET.ES are different. This will happen when secure code takes a | ||
20 | secure exception, which then tail-chains to a non-secure exception | ||
21 | which finally returns to the original secure code. | ||
22 | |||
23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
24 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
25 | Message-id: 20210520130905.2049-1-peter.maydell@linaro.org | ||
26 | --- | ||
27 | target/arm/m_helper.c | 3 ++- | ||
28 | 1 file changed, 2 insertions(+), 1 deletion(-) | ||
29 | |||
30 | diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/m_helper.c | ||
33 | +++ b/target/arm/m_helper.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
35 | * We use this limited C variable scope so we don't accidentally | ||
36 | * use 'frame_sp_p' after we do something that makes it invalid. | ||
37 | */ | ||
38 | + bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK; | ||
39 | uint32_t *frame_sp_p = get_v7m_sp_ptr(env, | ||
40 | return_to_secure, | ||
41 | !return_to_handler, | ||
42 | - return_to_sp_process); | ||
43 | + spsel); | ||
44 | uint32_t frameptr = *frame_sp_p; | ||
45 | bool pop_ok = true; | ||
46 | ARMMMUIdx mmu_idx; | ||
47 | -- | ||
48 | 2.20.1 | ||
49 | |||
50 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Using g_memdup is a bit more compact than g_new + memcpy. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Message-id: 20210509151618.2331764-2-f4bug@amsat.org | ||
8 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
9 | [PMD: Split from bigger patch] | ||
10 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | accel/tcg/cputlb.c | 15 ++++----------- | ||
15 | 1 file changed, 4 insertions(+), 11 deletions(-) | ||
16 | |||
17 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/accel/tcg/cputlb.c | ||
20 | +++ b/accel/tcg/cputlb.c | ||
21 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
22 | } else if (encode_pbm_to_runon(&runon, d)) { | ||
23 | async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
24 | } else { | ||
25 | - TLBFlushPageBitsByMMUIdxData *p | ||
26 | - = g_new(TLBFlushPageBitsByMMUIdxData, 1); | ||
27 | - | ||
28 | /* Otherwise allocate a structure, freed by the worker. */ | ||
29 | - *p = d; | ||
30 | + TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d)); | ||
31 | async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
32 | RUN_ON_CPU_HOST_PTR(p)); | ||
33 | } | ||
34 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
35 | flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
36 | } else { | ||
37 | CPUState *dst_cpu; | ||
38 | - TLBFlushPageBitsByMMUIdxData *p; | ||
39 | |||
40 | /* Allocate a separate data block for each destination cpu. */ | ||
41 | CPU_FOREACH(dst_cpu) { | ||
42 | if (dst_cpu != src_cpu) { | ||
43 | - p = g_new(TLBFlushPageBitsByMMUIdxData, 1); | ||
44 | - *p = d; | ||
45 | + TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d)); | ||
46 | async_run_on_cpu(dst_cpu, | ||
47 | tlb_flush_page_bits_by_mmuidx_async_2, | ||
48 | RUN_ON_CPU_HOST_PTR(p)); | ||
49 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
50 | /* Allocate a separate data block for each destination cpu. */ | ||
51 | CPU_FOREACH(dst_cpu) { | ||
52 | if (dst_cpu != src_cpu) { | ||
53 | - p = g_new(TLBFlushPageBitsByMMUIdxData, 1); | ||
54 | - *p = d; | ||
55 | + p = g_memdup(&d, sizeof(d)); | ||
56 | async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
57 | RUN_ON_CPU_HOST_PTR(p)); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | - p = g_new(TLBFlushPageBitsByMMUIdxData, 1); | ||
62 | - *p = d; | ||
63 | + p = g_memdup(&d, sizeof(d)); | ||
64 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
65 | RUN_ON_CPU_HOST_PTR(p)); | ||
66 | } | ||
67 | -- | ||
68 | 2.20.1 | ||
69 | |||
70 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Rename tlb_flush_page_bits_locked() -> tlb_flush_range_locked(), and | ||
4 | have callers pass a length argument (currently TARGET_PAGE_SIZE) via | ||
5 | the TLBFlushPageBitsByMMUIdxData structure. | ||
6 | |||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-id: 20210509151618.2331764-3-f4bug@amsat.org | ||
10 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
11 | [PMD: Split from bigger patch] | ||
12 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
13 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | --- | ||
16 | accel/tcg/cputlb.c | 48 +++++++++++++++++++++++++++++++--------------- | ||
17 | 1 file changed, 33 insertions(+), 15 deletions(-) | ||
18 | |||
19 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/accel/tcg/cputlb.c | ||
22 | +++ b/accel/tcg/cputlb.c | ||
23 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) | ||
24 | tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); | ||
25 | } | ||
26 | |||
27 | -static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, | ||
28 | - target_ulong page, unsigned bits) | ||
29 | +static void tlb_flush_range_locked(CPUArchState *env, int midx, | ||
30 | + target_ulong addr, target_ulong len, | ||
31 | + unsigned bits) | ||
32 | { | ||
33 | CPUTLBDesc *d = &env_tlb(env)->d[midx]; | ||
34 | CPUTLBDescFast *f = &env_tlb(env)->f[midx]; | ||
35 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, | ||
36 | * If @bits is smaller than the tlb size, there may be multiple entries | ||
37 | * within the TLB; otherwise all addresses that match under @mask hit | ||
38 | * the same TLB entry. | ||
39 | - * | ||
40 | * TODO: Perhaps allow bits to be a few bits less than the size. | ||
41 | * For now, just flush the entire TLB. | ||
42 | + * | ||
43 | + * If @len is larger than the tlb size, then it will take longer to | ||
44 | + * test all of the entries in the TLB than it will to flush it all. | ||
45 | */ | ||
46 | - if (mask < f->mask) { | ||
47 | + if (mask < f->mask || len > f->mask) { | ||
48 | tlb_debug("forcing full flush midx %d (" | ||
49 | - TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | ||
50 | - midx, page, mask); | ||
51 | + TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", | ||
52 | + midx, addr, mask, len); | ||
53 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | - /* Check if we need to flush due to large pages. */ | ||
58 | - if ((page & d->large_page_mask) == d->large_page_addr) { | ||
59 | + /* | ||
60 | + * Check if we need to flush due to large pages. | ||
61 | + * Because large_page_mask contains all 1's from the msb, | ||
62 | + * we only need to test the end of the range. | ||
63 | + */ | ||
64 | + if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { | ||
65 | tlb_debug("forcing full flush midx %d (" | ||
66 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | ||
67 | midx, d->large_page_addr, d->large_page_mask); | ||
68 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | - if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) { | ||
73 | - tlb_n_used_entries_dec(env, midx); | ||
74 | + for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { | ||
75 | + target_ulong page = addr + i; | ||
76 | + CPUTLBEntry *entry = tlb_entry(env, midx, page); | ||
77 | + | ||
78 | + if (tlb_flush_entry_mask_locked(entry, page, mask)) { | ||
79 | + tlb_n_used_entries_dec(env, midx); | ||
80 | + } | ||
81 | + tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); | ||
82 | } | ||
83 | - tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); | ||
84 | } | ||
85 | |||
86 | typedef struct { | ||
87 | target_ulong addr; | ||
88 | + target_ulong len; | ||
89 | uint16_t idxmap; | ||
90 | uint16_t bits; | ||
91 | } TLBFlushPageBitsByMMUIdxData; | ||
92 | @@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, | ||
93 | |||
94 | assert_cpu_is_self(cpu); | ||
95 | |||
96 | - tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n", | ||
97 | - d.addr, d.bits, d.idxmap); | ||
98 | + tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", | ||
99 | + d.addr, d.bits, d.len, d.idxmap); | ||
100 | |||
101 | qemu_spin_lock(&env_tlb(env)->c.lock); | ||
102 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | ||
103 | if ((d.idxmap >> mmu_idx) & 1) { | ||
104 | - tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits); | ||
105 | + tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); | ||
106 | } | ||
107 | } | ||
108 | qemu_spin_unlock(&env_tlb(env)->c.lock); | ||
109 | |||
110 | - tb_flush_jmp_cache(cpu, d.addr); | ||
111 | + for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { | ||
112 | + tb_flush_jmp_cache(cpu, d.addr + i); | ||
113 | + } | ||
114 | } | ||
115 | |||
116 | static bool encode_pbm_to_runon(run_on_cpu_data *out, | ||
117 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
118 | |||
119 | /* This should already be page aligned */ | ||
120 | d.addr = addr & TARGET_PAGE_MASK; | ||
121 | + d.len = TARGET_PAGE_SIZE; | ||
122 | d.idxmap = idxmap; | ||
123 | d.bits = bits; | ||
124 | |||
125 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
126 | |||
127 | /* This should already be page aligned */ | ||
128 | d.addr = addr & TARGET_PAGE_MASK; | ||
129 | + d.len = TARGET_PAGE_SIZE; | ||
130 | d.idxmap = idxmap; | ||
131 | d.bits = bits; | ||
132 | |||
133 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
134 | |||
135 | /* This should already be page aligned */ | ||
136 | d.addr = addr & TARGET_PAGE_MASK; | ||
137 | + d.len = TARGET_PAGE_SIZE; | ||
138 | d.idxmap = idxmap; | ||
139 | d.bits = bits; | ||
140 | |||
141 | -- | ||
142 | 2.20.1 | ||
143 | |||
144 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Rename the structure to match the rename of tlb_flush_range_locked. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Message-id: 20210509151618.2331764-4-f4bug@amsat.org | ||
8 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
9 | [PMD: Split from bigger patch] | ||
10 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | accel/tcg/cputlb.c | 24 ++++++++++++------------ | ||
15 | 1 file changed, 12 insertions(+), 12 deletions(-) | ||
16 | |||
17 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/accel/tcg/cputlb.c | ||
20 | +++ b/accel/tcg/cputlb.c | ||
21 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
22 | target_ulong len; | ||
23 | uint16_t idxmap; | ||
24 | uint16_t bits; | ||
25 | -} TLBFlushPageBitsByMMUIdxData; | ||
26 | +} TLBFlushRangeData; | ||
27 | |||
28 | static void | ||
29 | tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, | ||
30 | - TLBFlushPageBitsByMMUIdxData d) | ||
31 | + TLBFlushRangeData d) | ||
32 | { | ||
33 | CPUArchState *env = cpu->env_ptr; | ||
34 | int mmu_idx; | ||
35 | @@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, | ||
36 | } | ||
37 | |||
38 | static bool encode_pbm_to_runon(run_on_cpu_data *out, | ||
39 | - TLBFlushPageBitsByMMUIdxData d) | ||
40 | + TLBFlushRangeData d) | ||
41 | { | ||
42 | /* We need 6 bits to hold to hold @bits up to 63. */ | ||
43 | if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { | ||
44 | @@ -XXX,XX +XXX,XX @@ static bool encode_pbm_to_runon(run_on_cpu_data *out, | ||
45 | return false; | ||
46 | } | ||
47 | |||
48 | -static TLBFlushPageBitsByMMUIdxData | ||
49 | +static TLBFlushRangeData | ||
50 | decode_runon_to_pbm(run_on_cpu_data data) | ||
51 | { | ||
52 | target_ulong addr_map_bits = (target_ulong) data.target_ptr; | ||
53 | - return (TLBFlushPageBitsByMMUIdxData){ | ||
54 | + return (TLBFlushRangeData){ | ||
55 | .addr = addr_map_bits & TARGET_PAGE_MASK, | ||
56 | .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, | ||
57 | .bits = addr_map_bits & 0x3f | ||
58 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, | ||
59 | static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
60 | run_on_cpu_data data) | ||
61 | { | ||
62 | - TLBFlushPageBitsByMMUIdxData *d = data.host_ptr; | ||
63 | + TLBFlushRangeData *d = data.host_ptr; | ||
64 | tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); | ||
65 | g_free(d); | ||
66 | } | ||
67 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
68 | void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
69 | uint16_t idxmap, unsigned bits) | ||
70 | { | ||
71 | - TLBFlushPageBitsByMMUIdxData d; | ||
72 | + TLBFlushRangeData d; | ||
73 | run_on_cpu_data runon; | ||
74 | |||
75 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
76 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
77 | async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
78 | } else { | ||
79 | /* Otherwise allocate a structure, freed by the worker. */ | ||
80 | - TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d)); | ||
81 | + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
82 | async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
83 | RUN_ON_CPU_HOST_PTR(p)); | ||
84 | } | ||
85 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
86 | uint16_t idxmap, | ||
87 | unsigned bits) | ||
88 | { | ||
89 | - TLBFlushPageBitsByMMUIdxData d; | ||
90 | + TLBFlushRangeData d; | ||
91 | run_on_cpu_data runon; | ||
92 | |||
93 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
94 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
95 | /* Allocate a separate data block for each destination cpu. */ | ||
96 | CPU_FOREACH(dst_cpu) { | ||
97 | if (dst_cpu != src_cpu) { | ||
98 | - TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d)); | ||
99 | + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
100 | async_run_on_cpu(dst_cpu, | ||
101 | tlb_flush_page_bits_by_mmuidx_async_2, | ||
102 | RUN_ON_CPU_HOST_PTR(p)); | ||
103 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
104 | uint16_t idxmap, | ||
105 | unsigned bits) | ||
106 | { | ||
107 | - TLBFlushPageBitsByMMUIdxData d; | ||
108 | + TLBFlushRangeData d; | ||
109 | run_on_cpu_data runon; | ||
110 | |||
111 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
112 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
113 | runon); | ||
114 | } else { | ||
115 | CPUState *dst_cpu; | ||
116 | - TLBFlushPageBitsByMMUIdxData *p; | ||
117 | + TLBFlushRangeData *p; | ||
118 | |||
119 | /* Allocate a separate data block for each destination cpu. */ | ||
120 | CPU_FOREACH(dst_cpu) { | ||
121 | -- | ||
122 | 2.20.1 | ||
123 | |||
124 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | We will not be able to fit address + length into a 64-bit packet. | ||
4 | Drop this optimization before re-organizing this code. | ||
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Message-id: 20210509151618.2331764-10-f4bug@amsat.org | ||
9 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
10 | [PMD: Split from bigger patch] | ||
11 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
12 | [PMM: Moved patch earlier in the series] | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | --- | ||
15 | accel/tcg/cputlb.c | 86 +++++++++++----------------------------------- | ||
16 | 1 file changed, 20 insertions(+), 66 deletions(-) | ||
17 | |||
18 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/accel/tcg/cputlb.c | ||
21 | +++ b/accel/tcg/cputlb.c | ||
22 | @@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, | ||
23 | } | ||
24 | } | ||
25 | |||
26 | -static bool encode_pbm_to_runon(run_on_cpu_data *out, | ||
27 | - TLBFlushRangeData d) | ||
28 | -{ | ||
29 | - /* We need 6 bits to hold to hold @bits up to 63. */ | ||
30 | - if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { | ||
31 | - *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits); | ||
32 | - return true; | ||
33 | - } | ||
34 | - return false; | ||
35 | -} | ||
36 | - | ||
37 | -static TLBFlushRangeData | ||
38 | -decode_runon_to_pbm(run_on_cpu_data data) | ||
39 | -{ | ||
40 | - target_ulong addr_map_bits = (target_ulong) data.target_ptr; | ||
41 | - return (TLBFlushRangeData){ | ||
42 | - .addr = addr_map_bits & TARGET_PAGE_MASK, | ||
43 | - .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, | ||
44 | - .bits = addr_map_bits & 0x3f | ||
45 | - }; | ||
46 | -} | ||
47 | - | ||
48 | -static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, | ||
49 | - run_on_cpu_data runon) | ||
50 | -{ | ||
51 | - tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon)); | ||
52 | -} | ||
53 | - | ||
54 | static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
55 | run_on_cpu_data data) | ||
56 | { | ||
57 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
58 | uint16_t idxmap, unsigned bits) | ||
59 | { | ||
60 | TLBFlushRangeData d; | ||
61 | - run_on_cpu_data runon; | ||
62 | |||
63 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
64 | if (bits >= TARGET_LONG_BITS) { | ||
65 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
66 | |||
67 | if (qemu_cpu_is_self(cpu)) { | ||
68 | tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); | ||
69 | - } else if (encode_pbm_to_runon(&runon, d)) { | ||
70 | - async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
71 | } else { | ||
72 | /* Otherwise allocate a structure, freed by the worker. */ | ||
73 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
74 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
75 | unsigned bits) | ||
76 | { | ||
77 | TLBFlushRangeData d; | ||
78 | - run_on_cpu_data runon; | ||
79 | + CPUState *dst_cpu; | ||
80 | |||
81 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
82 | if (bits >= TARGET_LONG_BITS) { | ||
83 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
84 | d.idxmap = idxmap; | ||
85 | d.bits = bits; | ||
86 | |||
87 | - if (encode_pbm_to_runon(&runon, d)) { | ||
88 | - flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
89 | - } else { | ||
90 | - CPUState *dst_cpu; | ||
91 | - | ||
92 | - /* Allocate a separate data block for each destination cpu. */ | ||
93 | - CPU_FOREACH(dst_cpu) { | ||
94 | - if (dst_cpu != src_cpu) { | ||
95 | - TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
96 | - async_run_on_cpu(dst_cpu, | ||
97 | - tlb_flush_page_bits_by_mmuidx_async_2, | ||
98 | - RUN_ON_CPU_HOST_PTR(p)); | ||
99 | - } | ||
100 | + /* Allocate a separate data block for each destination cpu. */ | ||
101 | + CPU_FOREACH(dst_cpu) { | ||
102 | + if (dst_cpu != src_cpu) { | ||
103 | + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
104 | + async_run_on_cpu(dst_cpu, | ||
105 | + tlb_flush_page_bits_by_mmuidx_async_2, | ||
106 | + RUN_ON_CPU_HOST_PTR(p)); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
111 | uint16_t idxmap, | ||
112 | unsigned bits) | ||
113 | { | ||
114 | - TLBFlushRangeData d; | ||
115 | - run_on_cpu_data runon; | ||
116 | + TLBFlushRangeData d, *p; | ||
117 | + CPUState *dst_cpu; | ||
118 | |||
119 | /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
120 | if (bits >= TARGET_LONG_BITS) { | ||
121 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
122 | d.idxmap = idxmap; | ||
123 | d.bits = bits; | ||
124 | |||
125 | - if (encode_pbm_to_runon(&runon, d)) { | ||
126 | - flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); | ||
127 | - async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, | ||
128 | - runon); | ||
129 | - } else { | ||
130 | - CPUState *dst_cpu; | ||
131 | - TLBFlushRangeData *p; | ||
132 | - | ||
133 | - /* Allocate a separate data block for each destination cpu. */ | ||
134 | - CPU_FOREACH(dst_cpu) { | ||
135 | - if (dst_cpu != src_cpu) { | ||
136 | - p = g_memdup(&d, sizeof(d)); | ||
137 | - async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
138 | - RUN_ON_CPU_HOST_PTR(p)); | ||
139 | - } | ||
140 | + /* Allocate a separate data block for each destination cpu. */ | ||
141 | + CPU_FOREACH(dst_cpu) { | ||
142 | + if (dst_cpu != src_cpu) { | ||
143 | + p = g_memdup(&d, sizeof(d)); | ||
144 | + async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
145 | + RUN_ON_CPU_HOST_PTR(p)); | ||
146 | } | ||
147 | - | ||
148 | - p = g_memdup(&d, sizeof(d)); | ||
149 | - async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
150 | - RUN_ON_CPU_HOST_PTR(p)); | ||
151 | } | ||
152 | + | ||
153 | + p = g_memdup(&d, sizeof(d)); | ||
154 | + async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
155 | + RUN_ON_CPU_HOST_PTR(p)); | ||
156 | } | ||
157 | |||
158 | /* update the TLBs so that writes to code in the virtual page 'addr' | ||
159 | -- | ||
160 | 2.20.1 | ||
161 | |||
162 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Forward tlb_flush_page_bits_by_mmuidx to tlb_flush_range_by_mmuidx | ||
4 | passing TARGET_PAGE_SIZE. | ||
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Message-id: 20210509151618.2331764-5-f4bug@amsat.org | ||
9 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
10 | [PMD: Split from bigger patch] | ||
11 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | --- | ||
15 | include/exec/exec-all.h | 19 +++++++++++++++++++ | ||
16 | accel/tcg/cputlb.c | 20 +++++++++++++++----- | ||
17 | 2 files changed, 34 insertions(+), 5 deletions(-) | ||
18 | |||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | ||
24 | void tlb_flush_page_bits_by_mmuidx_all_cpus_synced | ||
25 | (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits); | ||
26 | |||
27 | +/** | ||
28 | + * tlb_flush_range_by_mmuidx | ||
29 | + * @cpu: CPU whose TLB should be flushed | ||
30 | + * @addr: virtual address of the start of the range to be flushed | ||
31 | + * @len: length of range to be flushed | ||
32 | + * @idxmap: bitmap of mmu indexes to flush | ||
33 | + * @bits: number of significant bits in address | ||
34 | + * | ||
35 | + * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), | ||
36 | + * comparing only the low @bits worth of each virtual page. | ||
37 | + */ | ||
38 | +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
39 | + target_ulong len, uint16_t idxmap, | ||
40 | + unsigned bits); | ||
41 | /** | ||
42 | * tlb_set_page_with_attrs: | ||
43 | * @cpu: CPU to add this TLB entry for | ||
44 | @@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | ||
45 | uint16_t idxmap, unsigned bits) | ||
46 | { | ||
47 | } | ||
48 | +static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
49 | + target_ulong len, uint16_t idxmap, | ||
50 | + unsigned bits) | ||
51 | +{ | ||
52 | +} | ||
53 | #endif | ||
54 | /** | ||
55 | * probe_access: | ||
56 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/accel/tcg/cputlb.c | ||
59 | +++ b/accel/tcg/cputlb.c | ||
60 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
61 | g_free(d); | ||
62 | } | ||
63 | |||
64 | -void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
65 | - uint16_t idxmap, unsigned bits) | ||
66 | +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
67 | + target_ulong len, uint16_t idxmap, | ||
68 | + unsigned bits) | ||
69 | { | ||
70 | TLBFlushRangeData d; | ||
71 | |||
72 | - /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
73 | - if (bits >= TARGET_LONG_BITS) { | ||
74 | + /* | ||
75 | + * If all bits are significant, and len is small, | ||
76 | + * this devolves to tlb_flush_page. | ||
77 | + */ | ||
78 | + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | ||
79 | tlb_flush_page_by_mmuidx(cpu, addr, idxmap); | ||
80 | return; | ||
81 | } | ||
82 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
83 | |||
84 | /* This should already be page aligned */ | ||
85 | d.addr = addr & TARGET_PAGE_MASK; | ||
86 | - d.len = TARGET_PAGE_SIZE; | ||
87 | + d.len = len; | ||
88 | d.idxmap = idxmap; | ||
89 | d.bits = bits; | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
92 | } | ||
93 | } | ||
94 | |||
95 | +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
96 | + uint16_t idxmap, unsigned bits) | ||
97 | +{ | ||
98 | + tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); | ||
99 | +} | ||
100 | + | ||
101 | void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
102 | target_ulong addr, | ||
103 | uint16_t idxmap, | ||
104 | -- | ||
105 | 2.20.1 | ||
106 | |||
107 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Forward tlb_flush_page_bits_by_mmuidx_all_cpus to | ||
4 | tlb_flush_range_by_mmuidx_all_cpus passing TARGET_PAGE_SIZE. | ||
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Message-id: 20210509151618.2331764-6-f4bug@amsat.org | ||
9 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
10 | [PMD: Split from bigger patch] | ||
11 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | --- | ||
15 | include/exec/exec-all.h | 13 +++++++++++++ | ||
16 | accel/tcg/cputlb.c | 24 +++++++++++++++++------- | ||
17 | 2 files changed, 30 insertions(+), 7 deletions(-) | ||
18 | |||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced | ||
24 | void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
25 | target_ulong len, uint16_t idxmap, | ||
26 | unsigned bits); | ||
27 | + | ||
28 | +/* Similarly, with broadcast and syncing. */ | ||
29 | +void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | ||
30 | + target_ulong len, uint16_t idxmap, | ||
31 | + unsigned bits); | ||
32 | + | ||
33 | /** | ||
34 | * tlb_set_page_with_attrs: | ||
35 | * @cpu: CPU to add this TLB entry for | ||
36 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
37 | unsigned bits) | ||
38 | { | ||
39 | } | ||
40 | +static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, | ||
41 | + target_ulong addr, | ||
42 | + target_ulong len, | ||
43 | + uint16_t idxmap, | ||
44 | + unsigned bits) | ||
45 | +{ | ||
46 | +} | ||
47 | #endif | ||
48 | /** | ||
49 | * probe_access: | ||
50 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
51 | index XXXXXXX..XXXXXXX 100644 | ||
52 | --- a/accel/tcg/cputlb.c | ||
53 | +++ b/accel/tcg/cputlb.c | ||
54 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
55 | tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); | ||
56 | } | ||
57 | |||
58 | -void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
59 | - target_ulong addr, | ||
60 | - uint16_t idxmap, | ||
61 | - unsigned bits) | ||
62 | +void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
63 | + target_ulong addr, target_ulong len, | ||
64 | + uint16_t idxmap, unsigned bits) | ||
65 | { | ||
66 | TLBFlushRangeData d; | ||
67 | CPUState *dst_cpu; | ||
68 | |||
69 | - /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
70 | - if (bits >= TARGET_LONG_BITS) { | ||
71 | + /* | ||
72 | + * If all bits are significant, and len is small, | ||
73 | + * this devolves to tlb_flush_page. | ||
74 | + */ | ||
75 | + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | ||
76 | tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); | ||
77 | return; | ||
78 | } | ||
79 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
80 | |||
81 | /* This should already be page aligned */ | ||
82 | d.addr = addr & TARGET_PAGE_MASK; | ||
83 | - d.len = TARGET_PAGE_SIZE; | ||
84 | + d.len = len; | ||
85 | d.idxmap = idxmap; | ||
86 | d.bits = bits; | ||
87 | |||
88 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
89 | tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); | ||
90 | } | ||
91 | |||
92 | +void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
93 | + target_ulong addr, | ||
94 | + uint16_t idxmap, unsigned bits) | ||
95 | +{ | ||
96 | + tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, | ||
97 | + idxmap, bits); | ||
98 | +} | ||
99 | + | ||
100 | void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
101 | target_ulong addr, | ||
102 | uint16_t idxmap, | ||
103 | -- | ||
104 | 2.20.1 | ||
105 | |||
106 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Forward tlb_flush_page_bits_by_mmuidx_all_cpus_synced to | ||
4 | tlb_flush_range_by_mmuidx_all_cpus_synced passing TARGET_PAGE_SIZE. | ||
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Message-id: 20210509151618.2331764-7-f4bug@amsat.org | ||
9 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
10 | [PMD: Split from bigger patch] | ||
11 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | --- | ||
15 | include/exec/exec-all.h | 12 ++++++++++++ | ||
16 | accel/tcg/cputlb.c | 27 ++++++++++++++++++++------- | ||
17 | 2 files changed, 32 insertions(+), 7 deletions(-) | ||
18 | |||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
24 | void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | ||
25 | target_ulong len, uint16_t idxmap, | ||
26 | unsigned bits); | ||
27 | +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, | ||
28 | + target_ulong addr, | ||
29 | + target_ulong len, | ||
30 | + uint16_t idxmap, | ||
31 | + unsigned bits); | ||
32 | |||
33 | /** | ||
34 | * tlb_set_page_with_attrs: | ||
35 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, | ||
36 | unsigned bits) | ||
37 | { | ||
38 | } | ||
39 | +static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, | ||
40 | + target_ulong addr, | ||
41 | + target_long len, | ||
42 | + uint16_t idxmap, | ||
43 | + unsigned bits) | ||
44 | +{ | ||
45 | +} | ||
46 | #endif | ||
47 | /** | ||
48 | * probe_access: | ||
49 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/accel/tcg/cputlb.c | ||
52 | +++ b/accel/tcg/cputlb.c | ||
53 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
54 | idxmap, bits); | ||
55 | } | ||
56 | |||
57 | -void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
58 | - target_ulong addr, | ||
59 | - uint16_t idxmap, | ||
60 | - unsigned bits) | ||
61 | +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
62 | + target_ulong addr, | ||
63 | + target_ulong len, | ||
64 | + uint16_t idxmap, | ||
65 | + unsigned bits) | ||
66 | { | ||
67 | TLBFlushRangeData d, *p; | ||
68 | CPUState *dst_cpu; | ||
69 | |||
70 | - /* If all bits are significant, this devolves to tlb_flush_page. */ | ||
71 | - if (bits >= TARGET_LONG_BITS) { | ||
72 | + /* | ||
73 | + * If all bits are significant, and len is small, | ||
74 | + * this devolves to tlb_flush_page. | ||
75 | + */ | ||
76 | + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | ||
77 | tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); | ||
78 | return; | ||
79 | } | ||
80 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
81 | |||
82 | /* This should already be page aligned */ | ||
83 | d.addr = addr & TARGET_PAGE_MASK; | ||
84 | - d.len = TARGET_PAGE_SIZE; | ||
85 | + d.len = len; | ||
86 | d.idxmap = idxmap; | ||
87 | d.bits = bits; | ||
88 | |||
89 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
90 | RUN_ON_CPU_HOST_PTR(p)); | ||
91 | } | ||
92 | |||
93 | +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
94 | + target_ulong addr, | ||
95 | + uint16_t idxmap, | ||
96 | + unsigned bits) | ||
97 | +{ | ||
98 | + tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, | ||
99 | + idxmap, bits); | ||
100 | +} | ||
101 | + | ||
102 | /* update the TLBs so that writes to code in the virtual page 'addr' | ||
103 | can be detected */ | ||
104 | void tlb_protect_code(ram_addr_t ram_addr) | ||
105 | -- | ||
106 | 2.20.1 | ||
107 | |||
108 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Rename to match tlb_flush_range_locked. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Message-id: 20210509151618.2331764-8-f4bug@amsat.org | ||
8 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
9 | [PMD: Split from bigger patch] | ||
10 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | accel/tcg/cputlb.c | 11 +++++------ | ||
15 | 1 file changed, 5 insertions(+), 6 deletions(-) | ||
16 | |||
17 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/accel/tcg/cputlb.c | ||
20 | +++ b/accel/tcg/cputlb.c | ||
21 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
22 | uint16_t bits; | ||
23 | } TLBFlushRangeData; | ||
24 | |||
25 | -static void | ||
26 | -tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, | ||
27 | - TLBFlushRangeData d) | ||
28 | +static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, | ||
29 | + TLBFlushRangeData d) | ||
30 | { | ||
31 | CPUArchState *env = cpu->env_ptr; | ||
32 | int mmu_idx; | ||
33 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
34 | run_on_cpu_data data) | ||
35 | { | ||
36 | TLBFlushRangeData *d = data.host_ptr; | ||
37 | - tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); | ||
38 | + tlb_flush_range_by_mmuidx_async_0(cpu, *d); | ||
39 | g_free(d); | ||
40 | } | ||
41 | |||
42 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
43 | d.bits = bits; | ||
44 | |||
45 | if (qemu_cpu_is_self(cpu)) { | ||
46 | - tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); | ||
47 | + tlb_flush_range_by_mmuidx_async_0(cpu, d); | ||
48 | } else { | ||
49 | /* Otherwise allocate a structure, freed by the worker. */ | ||
50 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
51 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
52 | } | ||
53 | } | ||
54 | |||
55 | - tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); | ||
56 | + tlb_flush_range_by_mmuidx_async_0(src_cpu, d); | ||
57 | } | ||
58 | |||
59 | void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
60 | -- | ||
61 | 2.20.1 | ||
62 | |||
63 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Rename to match tlb_flush_range_locked. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Message-id: 20210509151618.2331764-9-f4bug@amsat.org | ||
8 | Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> | ||
9 | [PMD: Split from bigger patch] | ||
10 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | accel/tcg/cputlb.c | 12 ++++++------ | ||
15 | 1 file changed, 6 insertions(+), 6 deletions(-) | ||
16 | |||
17 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/accel/tcg/cputlb.c | ||
20 | +++ b/accel/tcg/cputlb.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, | ||
22 | } | ||
23 | } | ||
24 | |||
25 | -static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, | ||
26 | - run_on_cpu_data data) | ||
27 | +static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, | ||
28 | + run_on_cpu_data data) | ||
29 | { | ||
30 | TLBFlushRangeData *d = data.host_ptr; | ||
31 | tlb_flush_range_by_mmuidx_async_0(cpu, *d); | ||
32 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, | ||
33 | } else { | ||
34 | /* Otherwise allocate a structure, freed by the worker. */ | ||
35 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
36 | - async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
37 | + async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, | ||
38 | RUN_ON_CPU_HOST_PTR(p)); | ||
39 | } | ||
40 | } | ||
41 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, | ||
42 | if (dst_cpu != src_cpu) { | ||
43 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | ||
44 | async_run_on_cpu(dst_cpu, | ||
45 | - tlb_flush_page_bits_by_mmuidx_async_2, | ||
46 | + tlb_flush_range_by_mmuidx_async_1, | ||
47 | RUN_ON_CPU_HOST_PTR(p)); | ||
48 | } | ||
49 | } | ||
50 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
51 | CPU_FOREACH(dst_cpu) { | ||
52 | if (dst_cpu != src_cpu) { | ||
53 | p = g_memdup(&d, sizeof(d)); | ||
54 | - async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
55 | + async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, | ||
56 | RUN_ON_CPU_HOST_PTR(p)); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | p = g_memdup(&d, sizeof(d)); | ||
61 | - async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, | ||
62 | + async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, | ||
63 | RUN_ON_CPU_HOST_PTR(p)); | ||
64 | } | ||
65 | |||
66 | -- | ||
67 | 2.20.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Rebecca Cran <rebecca@nuviainc.com> | ||
2 | 1 | ||
3 | ARMv8.4 adds the mandatory FEAT_TLBIRANGE. It provides TLBI | ||
4 | maintenance instructions that apply to a range of input addresses. | ||
5 | |||
6 | Signed-off-by: Rebecca Cran <rebecca@nuviainc.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210512182337.18563-2-rebecca@nuviainc.com | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/cpu.h | 5 + | ||
12 | target/arm/helper.c | 281 ++++++++++++++++++++++++++++++++++++++++++++ | ||
13 | 2 files changed, 286 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/cpu.h | ||
18 | +++ b/target/arm/cpu.h | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id) | ||
20 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0; | ||
21 | } | ||
22 | |||
23 | +static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) | ||
24 | +{ | ||
25 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; | ||
26 | +} | ||
27 | + | ||
28 | static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) | ||
29 | { | ||
30 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; | ||
31 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/helper.c | ||
34 | +++ b/target/arm/helper.c | ||
35 | @@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
36 | ARMMMUIdxBit_SE3, bits); | ||
37 | } | ||
38 | |||
39 | +#ifdef TARGET_AARCH64 | ||
40 | +static uint64_t tlbi_aa64_range_get_length(CPUARMState *env, | ||
41 | + uint64_t value) | ||
42 | +{ | ||
43 | + unsigned int page_shift; | ||
44 | + unsigned int page_size_granule; | ||
45 | + uint64_t num; | ||
46 | + uint64_t scale; | ||
47 | + uint64_t exponent; | ||
48 | + uint64_t length; | ||
49 | + | ||
50 | + num = extract64(value, 39, 4); | ||
51 | + scale = extract64(value, 44, 2); | ||
52 | + page_size_granule = extract64(value, 46, 2); | ||
53 | + | ||
54 | + page_shift = page_size_granule * 2 + 12; | ||
55 | + | ||
56 | + if (page_size_granule == 0) { | ||
57 | + qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n", | ||
58 | + page_size_granule); | ||
59 | + return 0; | ||
60 | + } | ||
61 | + | ||
62 | + exponent = (5 * scale) + 1; | ||
63 | + length = (num + 1) << (exponent + page_shift); | ||
64 | + | ||
65 | + return length; | ||
66 | +} | ||
67 | + | ||
68 | +static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value, | ||
69 | + bool two_ranges) | ||
70 | +{ | ||
71 | + /* TODO: ARMv8.7 FEAT_LPA2 */ | ||
72 | + uint64_t pageaddr; | ||
73 | + | ||
74 | + if (two_ranges) { | ||
75 | + pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS; | ||
76 | + } else { | ||
77 | + pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS; | ||
78 | + } | ||
79 | + | ||
80 | + return pageaddr; | ||
81 | +} | ||
82 | + | ||
83 | +static void do_rvae_write(CPUARMState *env, uint64_t value, | ||
84 | + int idxmap, bool synced) | ||
85 | +{ | ||
86 | + ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); | ||
87 | + bool two_ranges = regime_has_2_ranges(one_idx); | ||
88 | + uint64_t baseaddr, length; | ||
89 | + int bits; | ||
90 | + | ||
91 | + baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges); | ||
92 | + length = tlbi_aa64_range_get_length(env, value); | ||
93 | + bits = tlbbits_for_regime(env, one_idx, baseaddr); | ||
94 | + | ||
95 | + if (synced) { | ||
96 | + tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), | ||
97 | + baseaddr, | ||
98 | + length, | ||
99 | + idxmap, | ||
100 | + bits); | ||
101 | + } else { | ||
102 | + tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr, | ||
103 | + length, idxmap, bits); | ||
104 | + } | ||
105 | +} | ||
106 | + | ||
107 | +static void tlbi_aa64_rvae1_write(CPUARMState *env, | ||
108 | + const ARMCPRegInfo *ri, | ||
109 | + uint64_t value) | ||
110 | +{ | ||
111 | + /* | ||
112 | + * Invalidate by VA range, EL1&0. | ||
113 | + * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, | ||
114 | + * since we don't support flush-for-specific-ASID-only or | ||
115 | + * flush-last-level-only. | ||
116 | + */ | ||
117 | + | ||
118 | + do_rvae_write(env, value, vae1_tlbmask(env), | ||
119 | + tlb_force_broadcast(env)); | ||
120 | +} | ||
121 | + | ||
122 | +static void tlbi_aa64_rvae1is_write(CPUARMState *env, | ||
123 | + const ARMCPRegInfo *ri, | ||
124 | + uint64_t value) | ||
125 | +{ | ||
126 | + /* | ||
127 | + * Invalidate by VA range, Inner/Outer Shareable EL1&0. | ||
128 | + * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, | ||
129 | + * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support | ||
130 | + * flush-for-specific-ASID-only, flush-last-level-only or inner/outer | ||
131 | + * shareable specific flushes. | ||
132 | + */ | ||
133 | + | ||
134 | + do_rvae_write(env, value, vae1_tlbmask(env), true); | ||
135 | +} | ||
136 | + | ||
137 | +static int vae2_tlbmask(CPUARMState *env) | ||
138 | +{ | ||
139 | + return (arm_is_secure_below_el3(env) | ||
140 | + ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2); | ||
141 | +} | ||
142 | + | ||
143 | +static void tlbi_aa64_rvae2_write(CPUARMState *env, | ||
144 | + const ARMCPRegInfo *ri, | ||
145 | + uint64_t value) | ||
146 | +{ | ||
147 | + /* | ||
148 | + * Invalidate by VA range, EL2. | ||
149 | + * Currently handles all of RVAE2 and RVALE2, | ||
150 | + * since we don't support flush-for-specific-ASID-only or | ||
151 | + * flush-last-level-only. | ||
152 | + */ | ||
153 | + | ||
154 | + do_rvae_write(env, value, vae2_tlbmask(env), | ||
155 | + tlb_force_broadcast(env)); | ||
156 | + | ||
157 | + | ||
158 | +} | ||
159 | + | ||
160 | +static void tlbi_aa64_rvae2is_write(CPUARMState *env, | ||
161 | + const ARMCPRegInfo *ri, | ||
162 | + uint64_t value) | ||
163 | +{ | ||
164 | + /* | ||
165 | + * Invalidate by VA range, Inner/Outer Shareable, EL2. | ||
166 | + * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, | ||
167 | + * since we don't support flush-for-specific-ASID-only, | ||
168 | + * flush-last-level-only or inner/outer shareable specific flushes. | ||
169 | + */ | ||
170 | + | ||
171 | + do_rvae_write(env, value, vae2_tlbmask(env), true); | ||
172 | + | ||
173 | +} | ||
174 | + | ||
175 | +static void tlbi_aa64_rvae3_write(CPUARMState *env, | ||
176 | + const ARMCPRegInfo *ri, | ||
177 | + uint64_t value) | ||
178 | +{ | ||
179 | + /* | ||
180 | + * Invalidate by VA range, EL3. | ||
181 | + * Currently handles all of RVAE3 and RVALE3, | ||
182 | + * since we don't support flush-for-specific-ASID-only or | ||
183 | + * flush-last-level-only. | ||
184 | + */ | ||
185 | + | ||
186 | + do_rvae_write(env, value, ARMMMUIdxBit_SE3, | ||
187 | + tlb_force_broadcast(env)); | ||
188 | +} | ||
189 | + | ||
190 | +static void tlbi_aa64_rvae3is_write(CPUARMState *env, | ||
191 | + const ARMCPRegInfo *ri, | ||
192 | + uint64_t value) | ||
193 | +{ | ||
194 | + /* | ||
195 | + * Invalidate by VA range, EL3, Inner/Outer Shareable. | ||
196 | + * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, | ||
197 | + * since we don't support flush-for-specific-ASID-only, | ||
198 | + * flush-last-level-only or inner/outer specific flushes. | ||
199 | + */ | ||
200 | + | ||
201 | + do_rvae_write(env, value, ARMMMUIdxBit_SE3, true); | ||
202 | +} | ||
203 | +#endif | ||
204 | + | ||
205 | static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, | ||
206 | bool isread) | ||
207 | { | ||
208 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pauth_reginfo[] = { | ||
209 | REGINFO_SENTINEL | ||
210 | }; | ||
211 | |||
212 | +static const ARMCPRegInfo tlbirange_reginfo[] = { | ||
213 | + { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, | ||
214 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, | ||
215 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
216 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
217 | + { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, | ||
218 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, | ||
219 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
220 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
221 | + { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, | ||
222 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, | ||
223 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
224 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
225 | + { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, | ||
226 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, | ||
227 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
228 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
229 | + { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, | ||
230 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, | ||
231 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
232 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
233 | + { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, | ||
234 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, | ||
235 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
236 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
237 | + { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, | ||
238 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, | ||
239 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
240 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
241 | + { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, | ||
242 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, | ||
243 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
244 | + .writefn = tlbi_aa64_rvae1is_write }, | ||
245 | + { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, | ||
246 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, | ||
247 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
248 | + .writefn = tlbi_aa64_rvae1_write }, | ||
249 | + { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, | ||
250 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, | ||
251 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
252 | + .writefn = tlbi_aa64_rvae1_write }, | ||
253 | + { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, | ||
254 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, | ||
255 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
256 | + .writefn = tlbi_aa64_rvae1_write }, | ||
257 | + { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, | ||
258 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, | ||
259 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
260 | + .writefn = tlbi_aa64_rvae1_write }, | ||
261 | + { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, | ||
262 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, | ||
263 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
264 | + { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, | ||
265 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, | ||
266 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
267 | + { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, | ||
268 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, | ||
269 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
270 | + .writefn = tlbi_aa64_rvae2is_write }, | ||
271 | + { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, | ||
272 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, | ||
273 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
274 | + .writefn = tlbi_aa64_rvae2is_write }, | ||
275 | + { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, | ||
276 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, | ||
277 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
278 | + { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, | ||
279 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, | ||
280 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
281 | + { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, | ||
282 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, | ||
283 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
284 | + .writefn = tlbi_aa64_rvae2is_write }, | ||
285 | + { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, | ||
286 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, | ||
287 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
288 | + .writefn = tlbi_aa64_rvae2is_write }, | ||
289 | + { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, | ||
290 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, | ||
291 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
292 | + .writefn = tlbi_aa64_rvae2_write }, | ||
293 | + { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, | ||
294 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, | ||
295 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
296 | + .writefn = tlbi_aa64_rvae2_write }, | ||
297 | + { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, | ||
298 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, | ||
299 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
300 | + .writefn = tlbi_aa64_rvae3is_write }, | ||
301 | + { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, | ||
302 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, | ||
303 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
304 | + .writefn = tlbi_aa64_rvae3is_write }, | ||
305 | + { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, | ||
306 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, | ||
307 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
308 | + .writefn = tlbi_aa64_rvae3is_write }, | ||
309 | + { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, | ||
310 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, | ||
311 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
312 | + .writefn = tlbi_aa64_rvae3is_write }, | ||
313 | + { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, | ||
314 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, | ||
315 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
316 | + .writefn = tlbi_aa64_rvae3_write }, | ||
317 | + { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, | ||
318 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, | ||
319 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
320 | + .writefn = tlbi_aa64_rvae3_write }, | ||
321 | + REGINFO_SENTINEL | ||
322 | +}; | ||
323 | + | ||
324 | static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) | ||
325 | { | ||
326 | Error *err = NULL; | ||
327 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | ||
328 | if (cpu_isar_feature(aa64_rndr, cpu)) { | ||
329 | define_arm_cp_regs(cpu, rndr_reginfo); | ||
330 | } | ||
331 | + if (cpu_isar_feature(aa64_tlbirange, cpu)) { | ||
332 | + define_arm_cp_regs(cpu, tlbirange_reginfo); | ||
333 | + } | ||
334 | #ifndef CONFIG_USER_ONLY | ||
335 | /* Data Cache clean instructions up to PoP */ | ||
336 | if (cpu_isar_feature(aa64_dcpop, cpu)) { | ||
337 | -- | ||
338 | 2.20.1 | ||
339 | |||
340 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Rebecca Cran <rebecca@nuviainc.com> | ||
2 | 1 | ||
3 | ARMv8.4 adds the mandatory FEAT_TLBIOS. It provides TLBI | ||
4 | maintenance instructions that extend to the Outer Shareable domain. | ||
5 | |||
6 | Signed-off-by: Rebecca Cran <rebecca@nuviainc.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210512182337.18563-3-rebecca@nuviainc.com | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/cpu.h | 5 +++++ | ||
12 | target/arm/helper.c | 43 +++++++++++++++++++++++++++++++++++++++++++ | ||
13 | 2 files changed, 48 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/cpu.h | ||
18 | +++ b/target/arm/cpu.h | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) | ||
20 | return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; | ||
21 | } | ||
22 | |||
23 | +static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id) | ||
24 | +{ | ||
25 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0; | ||
26 | +} | ||
27 | + | ||
28 | static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) | ||
29 | { | ||
30 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; | ||
31 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/helper.c | ||
34 | +++ b/target/arm/helper.c | ||
35 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = { | ||
36 | REGINFO_SENTINEL | ||
37 | }; | ||
38 | |||
39 | +static const ARMCPRegInfo tlbios_reginfo[] = { | ||
40 | + { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, | ||
41 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, | ||
42 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
43 | + .writefn = tlbi_aa64_vmalle1is_write }, | ||
44 | + { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, | ||
45 | + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, | ||
46 | + .access = PL1_W, .type = ARM_CP_NO_RAW, | ||
47 | + .writefn = tlbi_aa64_vmalle1is_write }, | ||
48 | + { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, | ||
49 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, | ||
50 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
51 | + .writefn = tlbi_aa64_alle2is_write }, | ||
52 | + { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, | ||
53 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, | ||
54 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
55 | + .writefn = tlbi_aa64_alle1is_write }, | ||
56 | + { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, | ||
57 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, | ||
58 | + .access = PL2_W, .type = ARM_CP_NO_RAW, | ||
59 | + .writefn = tlbi_aa64_alle1is_write }, | ||
60 | + { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, | ||
61 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, | ||
62 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
63 | + { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, | ||
64 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, | ||
65 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
66 | + { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, | ||
67 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, | ||
68 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
69 | + { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, | ||
70 | + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, | ||
71 | + .access = PL2_W, .type = ARM_CP_NOP }, | ||
72 | + { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, | ||
73 | + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, | ||
74 | + .access = PL3_W, .type = ARM_CP_NO_RAW, | ||
75 | + .writefn = tlbi_aa64_alle3is_write }, | ||
76 | + REGINFO_SENTINEL | ||
77 | +}; | ||
78 | + | ||
79 | static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) | ||
80 | { | ||
81 | Error *err = NULL; | ||
82 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | ||
83 | if (cpu_isar_feature(aa64_tlbirange, cpu)) { | ||
84 | define_arm_cp_regs(cpu, tlbirange_reginfo); | ||
85 | } | ||
86 | + if (cpu_isar_feature(aa64_tlbios, cpu)) { | ||
87 | + define_arm_cp_regs(cpu, tlbios_reginfo); | ||
88 | + } | ||
89 | #ifndef CONFIG_USER_ONLY | ||
90 | /* Data Cache clean instructions up to PoP */ | ||
91 | if (cpu_isar_feature(aa64_dcpop, cpu)) { | ||
92 | -- | ||
93 | 2.20.1 | ||
94 | |||
95 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Dump SVCR, plus use the correct access check for Streaming Mode. | ||
2 | 4 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-58-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-2-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 9 | --- |
8 | target/arm/helper-sve.h | 5 +++++ | 10 | target/arm/cpu.c | 17 ++++++++++++++++- |
9 | target/arm/sve.decode | 12 ++++++++++++ | 11 | 1 file changed, 16 insertions(+), 1 deletion(-) |
10 | target/arm/sve_helper.c | 20 ++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 14 ++++++++++++++ | ||
12 | 4 files changed, 51 insertions(+) | ||
13 | 12 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
15 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 15 | --- a/target/arm/cpu.c |
17 | +++ b/target/arm/helper-sve.h | 16 | +++ b/target/arm/cpu.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG, | 17 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
19 | void, ptr, ptr, ptr, ptr, i32) | 18 | int i; |
20 | DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG, | 19 | int el = arm_current_el(env); |
21 | void, ptr, ptr, ptr, ptr, i32) | 20 | const char *ns_status; |
22 | + | 21 | + bool sve; |
23 | +DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG, | 22 | |
24 | + void, ptr, ptr, ptr, i32) | 23 | qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); |
25 | +DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG, | 24 | for (i = 0; i < 32; i++) { |
26 | + void, ptr, ptr, ptr, i32) | 25 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
27 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 26 | el, |
28 | index XXXXXXX..XXXXXXX 100644 | 27 | psr & PSTATE_SP ? 'h' : 't'); |
29 | --- a/target/arm/sve.decode | 28 | |
30 | +++ b/target/arm/sve.decode | 29 | + if (cpu_isar_feature(aa64_sme, cpu)) { |
31 | @@ -XXX,XX +XXX,XX @@ | 30 | + qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c", |
32 | @rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz | 31 | + env->svcr, |
33 | @rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz | 32 | + (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'), |
34 | 33 | + (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-')); | |
35 | +# Two registers and a scalar by N-bit index, alternate | 34 | + } |
36 | +@rrx_3a ........ .. . .. rm:3 ...... rn:5 rd:5 \ | 35 | if (cpu_isar_feature(aa64_bti, cpu)) { |
37 | + &rrx_esz index=%index3_19_11 | 36 | qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); |
38 | +@rrx_2a ........ .. . . rm:4 ...... rn:5 rd:5 \ | 37 | } |
39 | + &rrx_esz index=%index2_20_11 | 38 | @@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
40 | + | 39 | qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n", |
41 | # Three registers and a scalar by N-bit index | 40 | vfp_get_fpcr(env), vfp_get_fpsr(env)); |
42 | @rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \ | 41 | |
43 | &rrxr_esz ra=%reg_movprfx index=%index3_22_19 | 42 | - if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { |
44 | @@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3 | 43 | + if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) { |
45 | SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2 | 44 | + sve = sme_exception_el(env, el) == 0; |
46 | SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3 | 45 | + } else if (cpu_isar_feature(aa64_sve, cpu)) { |
47 | 46 | + sve = sve_exception_el(env, el) == 0; | |
48 | +# SVE2 saturating multiply (indexed) | 47 | + } else { |
49 | +SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2 | 48 | + sve = false; |
50 | +SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3 | ||
51 | +SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2 | ||
52 | +SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3 | ||
53 | + | ||
54 | # SVE2 integer multiply (indexed) | ||
55 | MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | ||
56 | MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | ||
57 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/target/arm/sve_helper.c | ||
60 | +++ b/target/arm/sve_helper.c | ||
61 | @@ -XXX,XX +XXX,XX @@ DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D) | ||
62 | |||
63 | #undef DO_ZZXW | ||
64 | |||
65 | +#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
66 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
67 | +{ \ | ||
68 | + intptr_t i, j, oprsz = simd_oprsz(desc); \ | ||
69 | + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \ | ||
70 | + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \ | ||
71 | + for (i = 0; i < oprsz; i += 16) { \ | ||
72 | + TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \ | ||
73 | + for (j = 0; j < 16; j += sizeof(TYPEW)) { \ | ||
74 | + TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \ | ||
75 | + *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \ | ||
76 | + } \ | ||
77 | + } \ | ||
78 | +} | ||
79 | + | ||
80 | +DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s) | ||
81 | +DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d) | ||
82 | + | ||
83 | +#undef DO_ZZX | ||
84 | + | ||
85 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
86 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
87 | { \ | ||
88 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
89 | index XXXXXXX..XXXXXXX 100644 | ||
90 | --- a/target/arm/translate-sve.c | ||
91 | +++ b/target/arm/translate-sve.c | ||
92 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d) | ||
93 | |||
94 | #undef DO_SVE2_RRX | ||
95 | |||
96 | +#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ | ||
97 | + static bool NAME(DisasContext *s, arg_rrx_esz *a) \ | ||
98 | + { \ | ||
99 | + return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \ | ||
100 | + (a->index << 1) | TOP, FUNC); \ | ||
101 | + } | 49 | + } |
102 | + | 50 | + |
103 | +DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) | 51 | + if (sve) { |
104 | +DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) | 52 | int j, zcr_len = sve_vqm1_for_el(env, el); |
105 | +DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) | 53 | |
106 | +DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) | 54 | for (i = 0; i <= FFR_PRED_NUM; i++) { |
107 | + | ||
108 | +#undef DO_SVE2_RRX_TB | ||
109 | + | ||
110 | static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra, | ||
111 | int data, gen_helper_gvec_4 *fn) | ||
112 | { | ||
113 | -- | 55 | -- |
114 | 2.20.1 | 56 | 2.25.1 |
115 | |||
116 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For SVE, we potentially have a 4th argument coming from the | 3 | This includes the build rules for the decoder, and the |
4 | movprfx instruction. Currently we do not optimize movprfx, | 4 | new file for translation, but excludes any instructions. |
5 | so the problem is not visible. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-50-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-3-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/helper.h | 20 +++--- | 11 | target/arm/translate-a64.h | 1 + |
13 | target/arm/sve.decode | 7 ++- | 12 | target/arm/sme.decode | 20 ++++++++++++++++++++ |
14 | target/arm/translate-a64.c | 15 ++++- | 13 | target/arm/translate-a64.c | 7 ++++++- |
15 | target/arm/translate-neon.c | 10 +-- | 14 | target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++ |
16 | target/arm/translate-sve.c | 13 ++-- | 15 | target/arm/meson.build | 2 ++ |
17 | target/arm/vec_helper.c | 120 ++++++++++++++++++++---------------- | 16 | 5 files changed, 64 insertions(+), 1 deletion(-) |
18 | 6 files changed, 109 insertions(+), 76 deletions(-) | 17 | create mode 100644 target/arm/sme.decode |
18 | create mode 100644 target/arm/translate-sme.c | ||
19 | 19 | ||
20 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 20 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
21 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/target/arm/helper.h | 22 | --- a/target/arm/translate-a64.h |
23 | +++ b/target/arm/helper.h | 23 | +++ b/target/arm/translate-a64.h |
24 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG, | 24 | @@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s) |
25 | DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG, | 25 | } |
26 | void, ptr, ptr, ptr, ptr, i32) | 26 | |
27 | 27 | bool disas_sve(DisasContext *, uint32_t); | |
28 | -DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 28 | +bool disas_sme(DisasContext *, uint32_t); |
29 | -DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 29 | |
30 | -DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 30 | void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, |
31 | -DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 31 | uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); |
32 | +DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 32 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
33 | +DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 33 | new file mode 100644 |
34 | +DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 34 | index XXXXXXX..XXXXXXX |
35 | +DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 35 | --- /dev/null |
36 | 36 | +++ b/target/arm/sme.decode | |
37 | -DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 37 | @@ -XXX,XX +XXX,XX @@ |
38 | -DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 38 | +# AArch64 SME instruction descriptions |
39 | -DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 39 | +# |
40 | -DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 40 | +# Copyright (c) 2022 Linaro, Ltd |
41 | +DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG, | 41 | +# |
42 | + void, ptr, ptr, ptr, ptr, i32) | 42 | +# This library is free software; you can redistribute it and/or |
43 | +DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG, | 43 | +# modify it under the terms of the GNU Lesser General Public |
44 | + void, ptr, ptr, ptr, ptr, i32) | 44 | +# License as published by the Free Software Foundation; either |
45 | +DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG, | 45 | +# version 2.1 of the License, or (at your option) any later version. |
46 | + void, ptr, ptr, ptr, ptr, i32) | 46 | +# |
47 | +DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG, | 47 | +# This library is distributed in the hope that it will be useful, |
48 | + void, ptr, ptr, ptr, ptr, i32) | 48 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
49 | 49 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
50 | DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, | 50 | +# Lesser General Public License for more details. |
51 | void, ptr, ptr, ptr, ptr, i32) | 51 | +# |
52 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 52 | +# You should have received a copy of the GNU Lesser General Public |
53 | index XXXXXXX..XXXXXXX 100644 | 53 | +# License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54 | --- a/target/arm/sve.decode | 54 | + |
55 | +++ b/target/arm/sve.decode | 55 | +# |
56 | @@ -XXX,XX +XXX,XX @@ UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u | 56 | +# This file is processed by scripts/decodetree.py |
57 | MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s | 57 | +# |
58 | |||
59 | # SVE integer dot product (unpredicated) | ||
60 | -DOT_zzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx | ||
61 | +DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \ | ||
62 | + ra=%reg_movprfx | ||
63 | |||
64 | # SVE integer dot product (indexed) | ||
65 | -DOT_zzx 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \ | ||
66 | +DOT_zzxw 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \ | ||
67 | sz=0 ra=%reg_movprfx | ||
68 | -DOT_zzx 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \ | ||
69 | +DOT_zzxw 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \ | ||
70 | sz=1 ra=%reg_movprfx | ||
71 | |||
72 | # SVE floating-point complex add (predicated) | ||
73 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 58 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
74 | index XXXXXXX..XXXXXXX 100644 | 59 | index XXXXXXX..XXXXXXX 100644 |
75 | --- a/target/arm/translate-a64.c | 60 | --- a/target/arm/translate-a64.c |
76 | +++ b/target/arm/translate-a64.c | 61 | +++ b/target/arm/translate-a64.c |
77 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn, | 62 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) |
78 | tcg_temp_free_ptr(qc_ptr); | 63 | } |
79 | } | 64 | |
80 | 65 | switch (extract32(insn, 25, 4)) { | |
81 | +/* Expand a 4-operand operation using an out-of-line helper. */ | 66 | - case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ |
82 | +static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn, | 67 | + case 0x0: |
83 | + int rm, int ra, int data, gen_helper_gvec_4 *fn) | 68 | + if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { |
84 | +{ | 69 | + unallocated_encoding(s); |
85 | + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), | 70 | + } |
86 | + vec_full_reg_offset(s, rn), | 71 | + break; |
87 | + vec_full_reg_offset(s, rm), | 72 | + case 0x1: case 0x3: /* UNALLOCATED */ |
88 | + vec_full_reg_offset(s, ra), | 73 | unallocated_encoding(s); |
89 | + is_q ? 16 : 8, vec_full_reg_size(s), data, fn); | 74 | break; |
90 | +} | 75 | case 0x2: |
76 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
77 | new file mode 100644 | ||
78 | index XXXXXXX..XXXXXXX | ||
79 | --- /dev/null | ||
80 | +++ b/target/arm/translate-sme.c | ||
81 | @@ -XXX,XX +XXX,XX @@ | ||
82 | +/* | ||
83 | + * AArch64 SME translation | ||
84 | + * | ||
85 | + * Copyright (c) 2022 Linaro, Ltd | ||
86 | + * | ||
87 | + * This library is free software; you can redistribute it and/or | ||
88 | + * modify it under the terms of the GNU Lesser General Public | ||
89 | + * License as published by the Free Software Foundation; either | ||
90 | + * version 2.1 of the License, or (at your option) any later version. | ||
91 | + * | ||
92 | + * This library is distributed in the hope that it will be useful, | ||
93 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
94 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
95 | + * Lesser General Public License for more details. | ||
96 | + * | ||
97 | + * You should have received a copy of the GNU Lesser General Public | ||
98 | + * License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
99 | + */ | ||
91 | + | 100 | + |
92 | /* Set ZF and NF based on a 64 bit result. This is alas fiddlier | 101 | +#include "qemu/osdep.h" |
93 | * than the 32 bit equivalent. | 102 | +#include "cpu.h" |
94 | */ | 103 | +#include "tcg/tcg-op.h" |
95 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | 104 | +#include "tcg/tcg-op-gvec.h" |
96 | return; | 105 | +#include "tcg/tcg-gvec-desc.h" |
97 | 106 | +#include "translate.h" | |
98 | case 0x2: /* SDOT / UDOT */ | 107 | +#include "exec/helper-gen.h" |
99 | - gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, | 108 | +#include "translate-a64.h" |
100 | + gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, | 109 | +#include "fpu/softfloat.h" |
101 | u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b); | 110 | + |
102 | return; | 111 | + |
103 | 112 | +/* | |
104 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | 113 | + * Include the generated decoder. |
105 | switch (16 * u + opcode) { | 114 | + */ |
106 | case 0x0e: /* SDOT */ | 115 | + |
107 | case 0x1e: /* UDOT */ | 116 | +#include "decode-sme.c.inc" |
108 | - gen_gvec_op3_ool(s, is_q, rd, rn, rm, index, | 117 | diff --git a/target/arm/meson.build b/target/arm/meson.build |
109 | + gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, | ||
110 | u ? gen_helper_gvec_udot_idx_b | ||
111 | : gen_helper_gvec_sdot_idx_b); | ||
112 | return; | ||
113 | diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c | ||
114 | index XXXXXXX..XXXXXXX 100644 | 118 | index XXXXXXX..XXXXXXX 100644 |
115 | --- a/target/arm/translate-neon.c | 119 | --- a/target/arm/meson.build |
116 | +++ b/target/arm/translate-neon.c | 120 | +++ b/target/arm/meson.build |
117 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a) | 121 | @@ -XXX,XX +XXX,XX @@ |
118 | static bool trans_VDOT(DisasContext *s, arg_VDOT *a) | 122 | gen = [ |
119 | { | 123 | decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), |
120 | int opr_sz; | 124 | + decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), |
121 | - gen_helper_gvec_3 *fn_gvec; | 125 | decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), |
122 | + gen_helper_gvec_4 *fn_gvec; | 126 | decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), |
123 | 127 | decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), | |
124 | if (!dc_isar_feature(aa32_dp, s)) { | 128 | @@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files( |
125 | return false; | 129 | 'sme_helper.c', |
126 | @@ -XXX,XX +XXX,XX @@ static bool trans_VDOT(DisasContext *s, arg_VDOT *a) | 130 | 'translate-a64.c', |
127 | 131 | 'translate-sve.c', | |
128 | opr_sz = (1 + a->q) * 8; | 132 | + 'translate-sme.c', |
129 | fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b; | 133 | )) |
130 | - tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd), | 134 | |
131 | + tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd), | 135 | arm_softmmu_ss = ss.source_set() |
132 | vfp_reg_offset(1, a->vn), | ||
133 | vfp_reg_offset(1, a->vm), | ||
134 | + vfp_reg_offset(1, a->vd), | ||
135 | opr_sz, opr_sz, 0, fn_gvec); | ||
136 | return true; | ||
137 | } | ||
138 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a) | ||
139 | |||
140 | static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a) | ||
141 | { | ||
142 | - gen_helper_gvec_3 *fn_gvec; | ||
143 | + gen_helper_gvec_4 *fn_gvec; | ||
144 | int opr_sz; | ||
145 | TCGv_ptr fpst; | ||
146 | |||
147 | @@ -XXX,XX +XXX,XX @@ static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a) | ||
148 | fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; | ||
149 | opr_sz = (1 + a->q) * 8; | ||
150 | fpst = fpstatus_ptr(FPST_STD); | ||
151 | - tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd), | ||
152 | + tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd), | ||
153 | vfp_reg_offset(1, a->vn), | ||
154 | vfp_reg_offset(1, a->rm), | ||
155 | + vfp_reg_offset(1, a->vd), | ||
156 | opr_sz, opr_sz, a->index, fn_gvec); | ||
157 | tcg_temp_free_ptr(fpst); | ||
158 | return true; | ||
159 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
160 | index XXXXXXX..XXXXXXX 100644 | ||
161 | --- a/target/arm/translate-sve.c | ||
162 | +++ b/target/arm/translate-sve.c | ||
163 | @@ -XXX,XX +XXX,XX @@ DO_ZZI(UMIN, umin) | ||
164 | |||
165 | #undef DO_ZZI | ||
166 | |||
167 | -static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a) | ||
168 | +static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a) | ||
169 | { | ||
170 | - static gen_helper_gvec_3 * const fns[2][2] = { | ||
171 | + static gen_helper_gvec_4 * const fns[2][2] = { | ||
172 | { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, | ||
173 | { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } | ||
174 | }; | ||
175 | |||
176 | if (sve_access_check(s)) { | ||
177 | - gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, 0); | ||
178 | + gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0); | ||
179 | } | ||
180 | return true; | ||
181 | } | ||
182 | |||
183 | -static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a) | ||
184 | +static bool trans_DOT_zzxw(DisasContext *s, arg_DOT_zzxw *a) | ||
185 | { | ||
186 | - static gen_helper_gvec_3 * const fns[2][2] = { | ||
187 | + static gen_helper_gvec_4 * const fns[2][2] = { | ||
188 | { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h }, | ||
189 | { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h } | ||
190 | }; | ||
191 | |||
192 | if (sve_access_check(s)) { | ||
193 | - gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->index); | ||
194 | + gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, | ||
195 | + a->ra, a->index); | ||
196 | } | ||
197 | return true; | ||
198 | } | ||
199 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
200 | index XXXXXXX..XXXXXXX 100644 | ||
201 | --- a/target/arm/vec_helper.c | ||
202 | +++ b/target/arm/vec_helper.c | ||
203 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm, | ||
204 | * All elements are treated equally, no matter where they are. | ||
205 | */ | ||
206 | |||
207 | -void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
208 | +void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
209 | { | ||
210 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
211 | - int32_t *d = vd; | ||
212 | + int32_t *d = vd, *a = va; | ||
213 | int8_t *n = vn, *m = vm; | ||
214 | |||
215 | for (i = 0; i < opr_sz / 4; ++i) { | ||
216 | - d[i] += n[i * 4 + 0] * m[i * 4 + 0] | ||
217 | - + n[i * 4 + 1] * m[i * 4 + 1] | ||
218 | - + n[i * 4 + 2] * m[i * 4 + 2] | ||
219 | - + n[i * 4 + 3] * m[i * 4 + 3]; | ||
220 | + d[i] = (a[i] + | ||
221 | + n[i * 4 + 0] * m[i * 4 + 0] + | ||
222 | + n[i * 4 + 1] * m[i * 4 + 1] + | ||
223 | + n[i * 4 + 2] * m[i * 4 + 2] + | ||
224 | + n[i * 4 + 3] * m[i * 4 + 3]); | ||
225 | } | ||
226 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
227 | } | ||
228 | |||
229 | -void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
230 | +void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
231 | { | ||
232 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
233 | - uint32_t *d = vd; | ||
234 | + uint32_t *d = vd, *a = va; | ||
235 | uint8_t *n = vn, *m = vm; | ||
236 | |||
237 | for (i = 0; i < opr_sz / 4; ++i) { | ||
238 | - d[i] += n[i * 4 + 0] * m[i * 4 + 0] | ||
239 | - + n[i * 4 + 1] * m[i * 4 + 1] | ||
240 | - + n[i * 4 + 2] * m[i * 4 + 2] | ||
241 | - + n[i * 4 + 3] * m[i * 4 + 3]; | ||
242 | + d[i] = (a[i] + | ||
243 | + n[i * 4 + 0] * m[i * 4 + 0] + | ||
244 | + n[i * 4 + 1] * m[i * 4 + 1] + | ||
245 | + n[i * 4 + 2] * m[i * 4 + 2] + | ||
246 | + n[i * 4 + 3] * m[i * 4 + 3]); | ||
247 | } | ||
248 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
249 | } | ||
250 | |||
251 | -void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
252 | +void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
253 | { | ||
254 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
255 | - int64_t *d = vd; | ||
256 | + int64_t *d = vd, *a = va; | ||
257 | int16_t *n = vn, *m = vm; | ||
258 | |||
259 | for (i = 0; i < opr_sz / 8; ++i) { | ||
260 | - d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0] | ||
261 | - + (int64_t)n[i * 4 + 1] * m[i * 4 + 1] | ||
262 | - + (int64_t)n[i * 4 + 2] * m[i * 4 + 2] | ||
263 | - + (int64_t)n[i * 4 + 3] * m[i * 4 + 3]; | ||
264 | + d[i] = (a[i] + | ||
265 | + (int64_t)n[i * 4 + 0] * m[i * 4 + 0] + | ||
266 | + (int64_t)n[i * 4 + 1] * m[i * 4 + 1] + | ||
267 | + (int64_t)n[i * 4 + 2] * m[i * 4 + 2] + | ||
268 | + (int64_t)n[i * 4 + 3] * m[i * 4 + 3]); | ||
269 | } | ||
270 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
271 | } | ||
272 | |||
273 | -void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
274 | +void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
275 | { | ||
276 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
277 | - uint64_t *d = vd; | ||
278 | + uint64_t *d = vd, *a = va; | ||
279 | uint16_t *n = vn, *m = vm; | ||
280 | |||
281 | for (i = 0; i < opr_sz / 8; ++i) { | ||
282 | - d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] | ||
283 | - + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] | ||
284 | - + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] | ||
285 | - + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]; | ||
286 | + d[i] = (a[i] + | ||
287 | + (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] + | ||
288 | + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] + | ||
289 | + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] + | ||
290 | + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]); | ||
291 | } | ||
292 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
293 | } | ||
294 | |||
295 | -void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
296 | +void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, | ||
297 | + void *va, uint32_t desc) | ||
298 | { | ||
299 | intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; | ||
300 | intptr_t index = simd_data(desc); | ||
301 | - int32_t *d = vd; | ||
302 | + int32_t *d = vd, *a = va; | ||
303 | int8_t *n = vn; | ||
304 | int8_t *m_indexed = (int8_t *)vm + H4(index) * 4; | ||
305 | |||
306 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
307 | int8_t m3 = m_indexed[i * 4 + 3]; | ||
308 | |||
309 | do { | ||
310 | - d[i] += n[i * 4 + 0] * m0 | ||
311 | - + n[i * 4 + 1] * m1 | ||
312 | - + n[i * 4 + 2] * m2 | ||
313 | - + n[i * 4 + 3] * m3; | ||
314 | + d[i] = (a[i] + | ||
315 | + n[i * 4 + 0] * m0 + | ||
316 | + n[i * 4 + 1] * m1 + | ||
317 | + n[i * 4 + 2] * m2 + | ||
318 | + n[i * 4 + 3] * m3); | ||
319 | } while (++i < segend); | ||
320 | segend = i + 4; | ||
321 | } while (i < opr_sz_4); | ||
322 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
323 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
324 | } | ||
325 | |||
326 | -void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
327 | +void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, | ||
328 | + void *va, uint32_t desc) | ||
329 | { | ||
330 | intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; | ||
331 | intptr_t index = simd_data(desc); | ||
332 | - uint32_t *d = vd; | ||
333 | + uint32_t *d = vd, *a = va; | ||
334 | uint8_t *n = vn; | ||
335 | uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4; | ||
336 | |||
337 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
338 | uint8_t m3 = m_indexed[i * 4 + 3]; | ||
339 | |||
340 | do { | ||
341 | - d[i] += n[i * 4 + 0] * m0 | ||
342 | - + n[i * 4 + 1] * m1 | ||
343 | - + n[i * 4 + 2] * m2 | ||
344 | - + n[i * 4 + 3] * m3; | ||
345 | + d[i] = (a[i] + | ||
346 | + n[i * 4 + 0] * m0 + | ||
347 | + n[i * 4 + 1] * m1 + | ||
348 | + n[i * 4 + 2] * m2 + | ||
349 | + n[i * 4 + 3] * m3); | ||
350 | } while (++i < segend); | ||
351 | segend = i + 4; | ||
352 | } while (i < opr_sz_4); | ||
353 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
354 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
355 | } | ||
356 | |||
357 | -void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
358 | +void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, | ||
359 | + void *va, uint32_t desc) | ||
360 | { | ||
361 | intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; | ||
362 | intptr_t index = simd_data(desc); | ||
363 | - int64_t *d = vd; | ||
364 | + int64_t *d = vd, *a = va; | ||
365 | int16_t *n = vn; | ||
366 | int16_t *m_indexed = (int16_t *)vm + index * 4; | ||
367 | |||
368 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
369 | * Process the entire segment all at once, writing back the results | ||
370 | * only after we've consumed all of the inputs. | ||
371 | */ | ||
372 | - for (i = 0; i < opr_sz_8 ; i += 2) { | ||
373 | - uint64_t d0, d1; | ||
374 | + for (i = 0; i < opr_sz_8; i += 2) { | ||
375 | + int64_t d0, d1; | ||
376 | |||
377 | - d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; | ||
378 | + d0 = a[i + 0]; | ||
379 | + d0 += n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; | ||
380 | d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; | ||
381 | d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; | ||
382 | d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; | ||
383 | - d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; | ||
384 | + | ||
385 | + d1 = a[i + 1]; | ||
386 | + d1 += n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; | ||
387 | d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; | ||
388 | d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; | ||
389 | d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; | ||
390 | |||
391 | - d[i + 0] += d0; | ||
392 | - d[i + 1] += d1; | ||
393 | + d[i + 0] = d0; | ||
394 | + d[i + 1] = d1; | ||
395 | } | ||
396 | - | ||
397 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
398 | } | ||
399 | |||
400 | -void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
401 | +void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, | ||
402 | + void *va, uint32_t desc) | ||
403 | { | ||
404 | intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; | ||
405 | intptr_t index = simd_data(desc); | ||
406 | - uint64_t *d = vd; | ||
407 | + uint64_t *d = vd, *a = va; | ||
408 | uint16_t *n = vn; | ||
409 | uint16_t *m_indexed = (uint16_t *)vm + index * 4; | ||
410 | |||
411 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
412 | * Process the entire segment all at once, writing back the results | ||
413 | * only after we've consumed all of the inputs. | ||
414 | */ | ||
415 | - for (i = 0; i < opr_sz_8 ; i += 2) { | ||
416 | + for (i = 0; i < opr_sz_8; i += 2) { | ||
417 | uint64_t d0, d1; | ||
418 | |||
419 | - d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; | ||
420 | + d0 = a[i + 0]; | ||
421 | + d0 += n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; | ||
422 | d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; | ||
423 | d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; | ||
424 | d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; | ||
425 | - d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; | ||
426 | + | ||
427 | + d1 = a[i + 1]; | ||
428 | + d1 += n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; | ||
429 | d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; | ||
430 | d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; | ||
431 | d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; | ||
432 | |||
433 | - d[i + 0] += d0; | ||
434 | - d[i + 1] += d1; | ||
435 | + d[i + 0] = d0; | ||
436 | + d[i + 1] = d1; | ||
437 | } | ||
438 | - | ||
439 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
440 | } | ||
441 | |||
442 | -- | 136 | -- |
443 | 2.20.1 | 137 | 2.25.1 |
444 | |||
445 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This new behaviour is in the ARM pseudocode function | ||
4 | AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32 | ||
5 | via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which | ||
6 | the trap would be delivered is in AArch64 mode. | ||
7 | |||
8 | Given that ARMv9 drops support for AArch32 outside EL0, the trap EL | ||
9 | detection ought to be trivially true, but the pseudocode still contains | ||
10 | a number of conditions, and QEMU has not yet committed to dropping A32 | ||
11 | support for EL[12] when v9 features are present. | ||
12 | |||
13 | Since the computation of SME_TRAP_NONSTREAMING is necessarily different | ||
14 | for the two modes, we might as well preserve bits within TBFLAG_ANY and | ||
15 | allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead. | ||
16 | |||
17 | Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table | ||
18 | of instructions illegal in streaming mode. | ||
2 | 19 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 20 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 21 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-18-richard.henderson@linaro.org | 22 | Message-id: 20220708151540.18136-4-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 24 | --- |
8 | target/arm/cpu.h | 5 +++ | 25 | target/arm/cpu.h | 7 +++ |
9 | target/arm/helper-sve.h | 15 ++++++++ | 26 | target/arm/translate.h | 4 ++ |
10 | target/arm/sve.decode | 6 ++++ | 27 | target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++ |
11 | target/arm/sve_helper.c | 73 ++++++++++++++++++++++++++++++++++++++ | 28 | target/arm/helper.c | 41 +++++++++++++++++ |
12 | target/arm/translate-sve.c | 36 +++++++++++++++++++ | 29 | target/arm/translate-a64.c | 40 ++++++++++++++++- |
13 | 5 files changed, 135 insertions(+) | 30 | target/arm/translate-vfp.c | 12 +++++ |
31 | target/arm/translate.c | 2 + | ||
32 | target/arm/meson.build | 1 + | ||
33 | 8 files changed, 195 insertions(+), 2 deletions(-) | ||
34 | create mode 100644 target/arm/sme-fa64.decode | ||
14 | 35 | ||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 36 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
16 | index XXXXXXX..XXXXXXX 100644 | 37 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/cpu.h | 38 | --- a/target/arm/cpu.h |
18 | +++ b/target/arm/cpu.h | 39 | +++ b/target/arm/cpu.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) | 40 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) |
20 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; | 41 | * the same thing as the current security state of the processor! |
42 | */ | ||
43 | FIELD(TBFLAG_A32, NS, 10, 1) | ||
44 | +/* | ||
45 | + * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. | ||
46 | + * This requires an SME trap from AArch32 mode when using NEON. | ||
47 | + */ | ||
48 | +FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) | ||
49 | |||
50 | /* | ||
51 | * Bit usage when in AArch32 state, for M-profile only. | ||
52 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2) | ||
53 | FIELD(TBFLAG_A64, PSTATE_SM, 22, 1) | ||
54 | FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) | ||
55 | FIELD(TBFLAG_A64, SVL, 24, 4) | ||
56 | +/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ | ||
57 | +FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) | ||
58 | |||
59 | /* | ||
60 | * Helpers for using the above. | ||
61 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/target/arm/translate.h | ||
64 | +++ b/target/arm/translate.h | ||
65 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
66 | bool pstate_sm; | ||
67 | /* True if PSTATE.ZA is set. */ | ||
68 | bool pstate_za; | ||
69 | + /* True if non-streaming insns should raise an SME Streaming exception. */ | ||
70 | + bool sme_trap_nonstreaming; | ||
71 | + /* True if the current instruction is non-streaming. */ | ||
72 | + bool is_nonstreaming; | ||
73 | /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ | ||
74 | bool mve_no_pred; | ||
75 | /* | ||
76 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode | ||
77 | new file mode 100644 | ||
78 | index XXXXXXX..XXXXXXX | ||
79 | --- /dev/null | ||
80 | +++ b/target/arm/sme-fa64.decode | ||
81 | @@ -XXX,XX +XXX,XX @@ | ||
82 | +# AArch64 SME allowed instruction decoding | ||
83 | +# | ||
84 | +# Copyright (c) 2022 Linaro, Ltd | ||
85 | +# | ||
86 | +# This library is free software; you can redistribute it and/or | ||
87 | +# modify it under the terms of the GNU Lesser General Public | ||
88 | +# License as published by the Free Software Foundation; either | ||
89 | +# version 2.1 of the License, or (at your option) any later version. | ||
90 | +# | ||
91 | +# This library is distributed in the hope that it will be useful, | ||
92 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
93 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
94 | +# Lesser General Public License for more details. | ||
95 | +# | ||
96 | +# You should have received a copy of the GNU Lesser General Public | ||
97 | +# License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
98 | + | ||
99 | +# | ||
100 | +# This file is processed by scripts/decodetree.py | ||
101 | +# | ||
102 | + | ||
103 | +# These patterns are taken from Appendix E1.1 of DDI0616 A.a, | ||
104 | +# Arm Architecture Reference Manual Supplement, | ||
105 | +# The Scalable Matrix Extension (SME), for Armv9-A | ||
106 | + | ||
107 | +{ | ||
108 | + [ | ||
109 | + OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0] | ||
110 | + OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0] | ||
111 | + OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0] | ||
112 | + OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0] | ||
113 | + OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0] | ||
114 | + OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0] | ||
115 | + OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0] | ||
116 | + ] | ||
117 | + FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations | ||
118 | +} | ||
119 | + | ||
120 | +{ | ||
121 | + [ | ||
122 | + OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar) | ||
123 | + OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16) | ||
124 | + OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar) | ||
125 | + OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16) | ||
126 | + ] | ||
127 | + FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations | ||
128 | +} | ||
129 | + | ||
130 | +FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store | ||
131 | +FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions | ||
132 | +FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
133 | + | ||
134 | +# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions | ||
135 | +# We don't actually need to include these, as the default is OK. | ||
136 | +# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations | ||
137 | +# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers | ||
138 | +# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal) | ||
139 | +# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) | ||
140 | +# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
141 | +# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
142 | + | ||
143 | +FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR | ||
144 | +FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA | ||
145 | +FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT | ||
146 | +FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS | ||
147 | +FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR | ||
148 | +FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP | ||
149 | +FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) | ||
150 | +FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA | ||
151 | +FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL | ||
152 | +FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD | ||
153 | +FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA | ||
154 | +FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA | ||
155 | +FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions | ||
156 | +FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) | ||
157 | +FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) | ||
158 | +FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) | ||
159 | +FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm) | ||
160 | +FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector) | ||
161 | +FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector) | ||
162 | +FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector) | ||
163 | +FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) | ||
164 | +FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) | ||
165 | +FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) | ||
166 | +FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) | ||
167 | +FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch | ||
168 | +FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar) | ||
169 | +FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar) | ||
170 | +FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector) | ||
171 | +FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc) | ||
172 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
173 | index XXXXXXX..XXXXXXX 100644 | ||
174 | --- a/target/arm/helper.c | ||
175 | +++ b/target/arm/helper.c | ||
176 | @@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el) | ||
177 | return 0; | ||
21 | } | 178 | } |
22 | 179 | ||
23 | +static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) | 180 | +/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */ |
24 | +{ | 181 | +static bool sme_fa64(CPUARMState *env, int el) |
25 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; | 182 | +{ |
183 | + if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) { | ||
184 | + return false; | ||
185 | + } | ||
186 | + | ||
187 | + if (el <= 1 && !el_is_in_host(env, el)) { | ||
188 | + if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) { | ||
189 | + return false; | ||
190 | + } | ||
191 | + } | ||
192 | + if (el <= 2 && arm_is_el2_enabled(env)) { | ||
193 | + if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) { | ||
194 | + return false; | ||
195 | + } | ||
196 | + } | ||
197 | + if (arm_feature(env, ARM_FEATURE_EL3)) { | ||
198 | + if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) { | ||
199 | + return false; | ||
200 | + } | ||
201 | + } | ||
202 | + | ||
203 | + return true; | ||
26 | +} | 204 | +} |
27 | + | 205 | + |
28 | /* | 206 | /* |
29 | * Feature tests for "does this exist in either 32-bit or 64-bit?" | 207 | * Given that SVE is enabled, return the vector length for EL. |
30 | */ | 208 | */ |
31 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 209 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, |
32 | index XXXXXXX..XXXXXXX 100644 | 210 | DP_TBFLAG_ANY(flags, PSTATE__IL, 1); |
33 | --- a/target/arm/helper-sve.h | 211 | } |
34 | +++ b/target/arm/helper-sve.h | 212 | |
35 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 213 | + /* |
36 | DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 214 | + * The SME exception we are testing for is raised via |
37 | DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 215 | + * AArch64.CheckFPAdvSIMDEnabled(), as called from |
38 | DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 216 | + * AArch32.CheckAdvSIMDOrFPEnabled(). |
39 | + | 217 | + */ |
40 | +DEF_HELPER_FLAGS_4(sve2_bext_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 218 | + if (el == 0 |
41 | +DEF_HELPER_FLAGS_4(sve2_bext_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 219 | + && FIELD_EX64(env->svcr, SVCR, SM) |
42 | +DEF_HELPER_FLAGS_4(sve2_bext_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 220 | + && (!arm_is_el2_enabled(env) |
43 | +DEF_HELPER_FLAGS_4(sve2_bext_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 221 | + || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE))) |
44 | + | 222 | + && arm_el_is_aa64(env, 1) |
45 | +DEF_HELPER_FLAGS_4(sve2_bdep_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 223 | + && !sme_fa64(env, el)) { |
46 | +DEF_HELPER_FLAGS_4(sve2_bdep_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 224 | + DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); |
47 | +DEF_HELPER_FLAGS_4(sve2_bdep_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 225 | + } |
48 | +DEF_HELPER_FLAGS_4(sve2_bdep_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 226 | + |
49 | + | 227 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); |
50 | +DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 228 | } |
51 | +DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 229 | |
52 | +DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 230 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, |
53 | +DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 231 | } |
54 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 232 | if (FIELD_EX64(env->svcr, SVCR, SM)) { |
55 | index XXXXXXX..XXXXXXX 100644 | 233 | DP_TBFLAG_A64(flags, PSTATE_SM, 1); |
56 | --- a/target/arm/sve.decode | 234 | + DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); |
57 | +++ b/target/arm/sve.decode | 235 | } |
58 | @@ -XXX,XX +XXX,XX @@ USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl | 236 | DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); |
59 | 237 | } | |
60 | EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm | 238 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
61 | EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm | 239 | index XXXXXXX..XXXXXXX 100644 |
62 | + | 240 | --- a/target/arm/translate-a64.c |
63 | +## SVE2 bitwise permute | 241 | +++ b/target/arm/translate-a64.c |
64 | + | 242 | @@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element, |
65 | +BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm | 243 | * unallocated-encoding checks (otherwise the syndrome information |
66 | +BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm | 244 | * for the resulting exception will be incorrect). |
67 | +BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm | 245 | */ |
68 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 246 | -static bool fp_access_check(DisasContext *s) |
69 | index XXXXXXX..XXXXXXX 100644 | 247 | +static bool fp_access_check_only(DisasContext *s) |
70 | --- a/target/arm/sve_helper.c | ||
71 | +++ b/target/arm/sve_helper.c | ||
72 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR) | ||
73 | |||
74 | #undef DO_ZZZ_NTB | ||
75 | |||
76 | +#define DO_BITPERM(NAME, TYPE, OP) \ | ||
77 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
78 | +{ \ | ||
79 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
80 | + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ | ||
81 | + TYPE nn = *(TYPE *)(vn + i); \ | ||
82 | + TYPE mm = *(TYPE *)(vm + i); \ | ||
83 | + *(TYPE *)(vd + i) = OP(nn, mm, sizeof(TYPE) * 8); \ | ||
84 | + } \ | ||
85 | +} | ||
86 | + | ||
87 | +static uint64_t bitextract(uint64_t data, uint64_t mask, int n) | ||
88 | +{ | ||
89 | + uint64_t res = 0; | ||
90 | + int db, rb = 0; | ||
91 | + | ||
92 | + for (db = 0; db < n; ++db) { | ||
93 | + if ((mask >> db) & 1) { | ||
94 | + res |= ((data >> db) & 1) << rb; | ||
95 | + ++rb; | ||
96 | + } | ||
97 | + } | ||
98 | + return res; | ||
99 | +} | ||
100 | + | ||
101 | +DO_BITPERM(sve2_bext_b, uint8_t, bitextract) | ||
102 | +DO_BITPERM(sve2_bext_h, uint16_t, bitextract) | ||
103 | +DO_BITPERM(sve2_bext_s, uint32_t, bitextract) | ||
104 | +DO_BITPERM(sve2_bext_d, uint64_t, bitextract) | ||
105 | + | ||
106 | +static uint64_t bitdeposit(uint64_t data, uint64_t mask, int n) | ||
107 | +{ | ||
108 | + uint64_t res = 0; | ||
109 | + int rb, db = 0; | ||
110 | + | ||
111 | + for (rb = 0; rb < n; ++rb) { | ||
112 | + if ((mask >> rb) & 1) { | ||
113 | + res |= ((data >> db) & 1) << rb; | ||
114 | + ++db; | ||
115 | + } | ||
116 | + } | ||
117 | + return res; | ||
118 | +} | ||
119 | + | ||
120 | +DO_BITPERM(sve2_bdep_b, uint8_t, bitdeposit) | ||
121 | +DO_BITPERM(sve2_bdep_h, uint16_t, bitdeposit) | ||
122 | +DO_BITPERM(sve2_bdep_s, uint32_t, bitdeposit) | ||
123 | +DO_BITPERM(sve2_bdep_d, uint64_t, bitdeposit) | ||
124 | + | ||
125 | +static uint64_t bitgroup(uint64_t data, uint64_t mask, int n) | ||
126 | +{ | ||
127 | + uint64_t resm = 0, resu = 0; | ||
128 | + int db, rbm = 0, rbu = 0; | ||
129 | + | ||
130 | + for (db = 0; db < n; ++db) { | ||
131 | + uint64_t val = (data >> db) & 1; | ||
132 | + if ((mask >> db) & 1) { | ||
133 | + resm |= val << rbm++; | ||
134 | + } else { | ||
135 | + resu |= val << rbu++; | ||
136 | + } | ||
137 | + } | ||
138 | + | ||
139 | + return resm | (resu << rbm); | ||
140 | +} | ||
141 | + | ||
142 | +DO_BITPERM(sve2_bgrp_b, uint8_t, bitgroup) | ||
143 | +DO_BITPERM(sve2_bgrp_h, uint16_t, bitgroup) | ||
144 | +DO_BITPERM(sve2_bgrp_s, uint32_t, bitgroup) | ||
145 | +DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup) | ||
146 | + | ||
147 | +#undef DO_BITPERM | ||
148 | + | ||
149 | #define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \ | ||
150 | void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
151 | { \ | ||
152 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
153 | index XXXXXXX..XXXXXXX 100644 | ||
154 | --- a/target/arm/translate-sve.c | ||
155 | +++ b/target/arm/translate-sve.c | ||
156 | @@ -XXX,XX +XXX,XX @@ static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a) | ||
157 | { | 248 | { |
158 | return do_sve2_shll_tb(s, a, true, true); | 249 | if (s->fp_excp_el) { |
250 | assert(!s->fp_access_checked); | ||
251 | @@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s) | ||
252 | return true; | ||
159 | } | 253 | } |
160 | + | 254 | |
161 | +static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a) | 255 | +static bool fp_access_check(DisasContext *s) |
162 | +{ | 256 | +{ |
163 | + static gen_helper_gvec_3 * const fns[4] = { | 257 | + if (!fp_access_check_only(s)) { |
164 | + gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, | ||
165 | + gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, | ||
166 | + }; | ||
167 | + if (!dc_isar_feature(aa64_sve2_bitperm, s)) { | ||
168 | + return false; | 258 | + return false; |
169 | + } | 259 | + } |
170 | + return do_sve2_zzw_ool(s, a, fns[a->esz], 0); | 260 | + if (s->sme_trap_nonstreaming && s->is_nonstreaming) { |
171 | +} | 261 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, |
172 | + | 262 | + syn_smetrap(SME_ET_Streaming, false)); |
173 | +static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a) | ||
174 | +{ | ||
175 | + static gen_helper_gvec_3 * const fns[4] = { | ||
176 | + gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, | ||
177 | + gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, | ||
178 | + }; | ||
179 | + if (!dc_isar_feature(aa64_sve2_bitperm, s)) { | ||
180 | + return false; | 263 | + return false; |
181 | + } | 264 | + } |
182 | + return do_sve2_zzw_ool(s, a, fns[a->esz], 0); | 265 | + return true; |
183 | +} | 266 | +} |
184 | + | 267 | + |
185 | +static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a) | 268 | /* Check that SVE access is enabled. If it is, return true. |
186 | +{ | 269 | * If not, emit code to generate an appropriate exception and return false. |
187 | + static gen_helper_gvec_3 * const fns[4] = { | 270 | */ |
188 | + gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, | 271 | @@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, |
189 | + gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, | 272 | default: |
190 | + }; | 273 | g_assert_not_reached(); |
191 | + if (!dc_isar_feature(aa64_sve2_bitperm, s)) { | 274 | } |
275 | - if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { | ||
276 | + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { | ||
277 | return; | ||
278 | } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { | ||
279 | return; | ||
280 | @@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) | ||
281 | } | ||
282 | } | ||
283 | |||
284 | +/* | ||
285 | + * Include the generated SME FA64 decoder. | ||
286 | + */ | ||
287 | + | ||
288 | +#include "decode-sme-fa64.c.inc" | ||
289 | + | ||
290 | +static bool trans_OK(DisasContext *s, arg_OK *a) | ||
291 | +{ | ||
292 | + return true; | ||
293 | +} | ||
294 | + | ||
295 | +static bool trans_FAIL(DisasContext *s, arg_OK *a) | ||
296 | +{ | ||
297 | + s->is_nonstreaming = true; | ||
298 | + return true; | ||
299 | +} | ||
300 | + | ||
301 | /** | ||
302 | * is_guarded_page: | ||
303 | * @env: The cpu environment | ||
304 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
305 | dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); | ||
306 | dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); | ||
307 | dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); | ||
308 | + dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); | ||
309 | dc->vec_len = 0; | ||
310 | dc->vec_stride = 0; | ||
311 | dc->cp_regs = arm_cpu->cp_regs; | ||
312 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) | ||
313 | } | ||
314 | } | ||
315 | |||
316 | + s->is_nonstreaming = false; | ||
317 | + if (s->sme_trap_nonstreaming) { | ||
318 | + disas_sme_fa64(s, insn); | ||
319 | + } | ||
320 | + | ||
321 | switch (extract32(insn, 25, 4)) { | ||
322 | case 0x0: | ||
323 | if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { | ||
324 | diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c | ||
325 | index XXXXXXX..XXXXXXX 100644 | ||
326 | --- a/target/arm/translate-vfp.c | ||
327 | +++ b/target/arm/translate-vfp.c | ||
328 | @@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled) | ||
329 | return false; | ||
330 | } | ||
331 | |||
332 | + /* | ||
333 | + * Note that rebuild_hflags_a32 has already accounted for being in EL0 | ||
334 | + * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not | ||
335 | + * appear to be any insns which touch VFP which are allowed. | ||
336 | + */ | ||
337 | + if (s->sme_trap_nonstreaming) { | ||
338 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
339 | + syn_smetrap(SME_ET_Streaming, | ||
340 | + s->base.pc_next - s->pc_curr == 2)); | ||
192 | + return false; | 341 | + return false; |
193 | + } | 342 | + } |
194 | + return do_sve2_zzw_ool(s, a, fns[a->esz], 0); | 343 | + |
195 | +} | 344 | if (!s->vfp_enabled && !ignore_vfp_enabled) { |
345 | assert(!arm_dc_feature(s, ARM_FEATURE_M)); | ||
346 | unallocated_encoding(s); | ||
347 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
348 | index XXXXXXX..XXXXXXX 100644 | ||
349 | --- a/target/arm/translate.c | ||
350 | +++ b/target/arm/translate.c | ||
351 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
352 | dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); | ||
353 | dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); | ||
354 | } | ||
355 | + dc->sme_trap_nonstreaming = | ||
356 | + EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); | ||
357 | } | ||
358 | dc->cp_regs = cpu->cp_regs; | ||
359 | dc->features = env->features; | ||
360 | diff --git a/target/arm/meson.build b/target/arm/meson.build | ||
361 | index XXXXXXX..XXXXXXX 100644 | ||
362 | --- a/target/arm/meson.build | ||
363 | +++ b/target/arm/meson.build | ||
364 | @@ -XXX,XX +XXX,XX @@ | ||
365 | gen = [ | ||
366 | decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), | ||
367 | decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), | ||
368 | + decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'), | ||
369 | decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), | ||
370 | decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), | ||
371 | decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), | ||
196 | -- | 372 | -- |
197 | 2.20.1 | 373 | 2.25.1 |
198 | |||
199 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | This completes the section "SVE2 bitwise shift right narrow". | 3 | Mark ADR as a non-streaming instruction, which should trap |
4 | if full a64 support is not enabled in streaming mode. | ||
5 | |||
6 | Removing entries from sme-fa64.decode is an easy way to see | ||
7 | what remains to be done. | ||
4 | 8 | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20210525010358.152808-30-richard.henderson@linaro.org | 11 | Message-id: 20220708151540.18136-5-richard.henderson@linaro.org |
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 13 | --- |
10 | target/arm/helper-sve.h | 16 ++++++ | 14 | target/arm/translate.h | 7 +++++++ |
11 | target/arm/sve.decode | 4 ++ | 15 | target/arm/sme-fa64.decode | 1 - |
12 | target/arm/sve_helper.c | 24 +++++++++ | 16 | target/arm/translate-sve.c | 8 ++++---- |
13 | target/arm/translate-sve.c | 105 +++++++++++++++++++++++++++++++++++++ | 17 | 3 files changed, 11 insertions(+), 5 deletions(-) |
14 | 4 files changed, 149 insertions(+) | ||
15 | 18 | ||
16 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 19 | diff --git a/target/arm/translate.h b/target/arm/translate.h |
17 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/helper-sve.h | 21 | --- a/target/arm/translate.h |
19 | +++ b/target/arm/helper-sve.h | 22 | +++ b/target/arm/translate.h |
20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 23 | @@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); |
21 | DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 24 | static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ |
22 | DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 25 | { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } |
23 | 26 | ||
24 | +DEF_HELPER_FLAGS_3(sve2_sqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 27 | +#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ |
25 | +DEF_HELPER_FLAGS_3(sve2_sqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 28 | + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ |
26 | +DEF_HELPER_FLAGS_3(sve2_sqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 29 | + { \ |
30 | + s->is_nonstreaming = true; \ | ||
31 | + return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ | ||
32 | + } | ||
27 | + | 33 | + |
28 | +DEF_HELPER_FLAGS_3(sve2_sqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 34 | #endif /* TARGET_ARM_TRANSLATE_H */ |
29 | +DEF_HELPER_FLAGS_3(sve2_sqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 35 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
30 | +DEF_HELPER_FLAGS_3(sve2_sqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
31 | + | ||
32 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
35 | + | ||
36 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_3(sve2_sqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
39 | + | ||
40 | DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
41 | DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
42 | DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
43 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
44 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/target/arm/sve.decode | 37 | --- a/target/arm/sme-fa64.decode |
46 | +++ b/target/arm/sve.decode | 38 | +++ b/target/arm/sme-fa64.decode |
47 | @@ -XXX,XX +XXX,XX @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr | 39 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
48 | SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr | 40 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
49 | RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr | 41 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
50 | RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr | 42 | |
51 | +SQSHRNB 01000101 .. 1 ..... 00 1000 ..... ..... @rd_rn_tszimm_shr | 43 | -FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR |
52 | +SQSHRNT 01000101 .. 1 ..... 00 1001 ..... ..... @rd_rn_tszimm_shr | 44 | FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
53 | +SQRSHRNB 01000101 .. 1 ..... 00 1010 ..... ..... @rd_rn_tszimm_shr | 45 | FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT |
54 | +SQRSHRNT 01000101 .. 1 ..... 00 1011 ..... ..... @rd_rn_tszimm_shr | 46 | FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS |
55 | UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr | ||
56 | UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr | ||
57 | UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr | ||
58 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/sve_helper.c | ||
61 | +++ b/target/arm/sve_helper.c | ||
62 | @@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H) | ||
63 | DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S) | ||
64 | DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D) | ||
65 | |||
66 | +#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX) | ||
67 | +#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX) | ||
68 | +#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX) | ||
69 | + | ||
70 | +DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H) | ||
71 | +DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S) | ||
72 | +DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D) | ||
73 | + | ||
74 | +DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H) | ||
75 | +DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S) | ||
76 | +DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, , H1_4, DO_SQSHRN_D) | ||
77 | + | ||
78 | +#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX) | ||
79 | +#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX) | ||
80 | +#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX) | ||
81 | + | ||
82 | +DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H) | ||
83 | +DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S) | ||
84 | +DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D) | ||
85 | + | ||
86 | +DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H) | ||
87 | +DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S) | ||
88 | +DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRN_D) | ||
89 | + | ||
90 | #define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX) | ||
91 | #define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX) | ||
92 | #define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX) | ||
93 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 47 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
94 | index XXXXXXX..XXXXXXX 100644 | 48 | index XXXXXXX..XXXXXXX 100644 |
95 | --- a/target/arm/translate-sve.c | 49 | --- a/target/arm/translate-sve.c |
96 | +++ b/target/arm/translate-sve.c | 50 | +++ b/target/arm/translate-sve.c |
97 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a) | 51 | @@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) |
98 | return do_sve2_shr_narrow(s, a, ops); | 52 | return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); |
99 | } | 53 | } |
100 | 54 | ||
101 | +static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, | 55 | -TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) |
102 | + TCGv_vec n, int64_t shr) | 56 | -TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) |
103 | +{ | 57 | -TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) |
104 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 58 | -TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) |
105 | + int halfbits = 4 << vece; | 59 | +TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) |
106 | + int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); | 60 | +TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) |
107 | + int64_t min = -max - 1; | 61 | +TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) |
108 | + | 62 | +TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) |
109 | + tcg_gen_sari_vec(vece, n, n, shr); | 63 | |
110 | + tcg_gen_dupi_vec(vece, t, min); | 64 | /* |
111 | + tcg_gen_smax_vec(vece, n, n, t); | 65 | *** SVE Integer Misc - Unpredicated Group |
112 | + tcg_gen_dupi_vec(vece, t, max); | ||
113 | + tcg_gen_smin_vec(vece, n, n, t); | ||
114 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
115 | + tcg_gen_and_vec(vece, d, n, t); | ||
116 | + tcg_temp_free_vec(t); | ||
117 | +} | ||
118 | + | ||
119 | +static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a) | ||
120 | +{ | ||
121 | + static const TCGOpcode vec_list[] = { | ||
122 | + INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 | ||
123 | + }; | ||
124 | + static const GVecGen2i ops[3] = { | ||
125 | + { .fniv = gen_sqshrnb_vec, | ||
126 | + .opt_opc = vec_list, | ||
127 | + .fno = gen_helper_sve2_sqshrnb_h, | ||
128 | + .vece = MO_16 }, | ||
129 | + { .fniv = gen_sqshrnb_vec, | ||
130 | + .opt_opc = vec_list, | ||
131 | + .fno = gen_helper_sve2_sqshrnb_s, | ||
132 | + .vece = MO_32 }, | ||
133 | + { .fniv = gen_sqshrnb_vec, | ||
134 | + .opt_opc = vec_list, | ||
135 | + .fno = gen_helper_sve2_sqshrnb_d, | ||
136 | + .vece = MO_64 }, | ||
137 | + }; | ||
138 | + return do_sve2_shr_narrow(s, a, ops); | ||
139 | +} | ||
140 | + | ||
141 | +static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, | ||
142 | + TCGv_vec n, int64_t shr) | ||
143 | +{ | ||
144 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
145 | + int halfbits = 4 << vece; | ||
146 | + int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); | ||
147 | + int64_t min = -max - 1; | ||
148 | + | ||
149 | + tcg_gen_sari_vec(vece, n, n, shr); | ||
150 | + tcg_gen_dupi_vec(vece, t, min); | ||
151 | + tcg_gen_smax_vec(vece, n, n, t); | ||
152 | + tcg_gen_dupi_vec(vece, t, max); | ||
153 | + tcg_gen_smin_vec(vece, n, n, t); | ||
154 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
155 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
156 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
157 | + tcg_temp_free_vec(t); | ||
158 | +} | ||
159 | + | ||
160 | +static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a) | ||
161 | +{ | ||
162 | + static const TCGOpcode vec_list[] = { | ||
163 | + INDEX_op_shli_vec, INDEX_op_sari_vec, | ||
164 | + INDEX_op_smax_vec, INDEX_op_smin_vec, 0 | ||
165 | + }; | ||
166 | + static const GVecGen2i ops[3] = { | ||
167 | + { .fniv = gen_sqshrnt_vec, | ||
168 | + .opt_opc = vec_list, | ||
169 | + .load_dest = true, | ||
170 | + .fno = gen_helper_sve2_sqshrnt_h, | ||
171 | + .vece = MO_16 }, | ||
172 | + { .fniv = gen_sqshrnt_vec, | ||
173 | + .opt_opc = vec_list, | ||
174 | + .load_dest = true, | ||
175 | + .fno = gen_helper_sve2_sqshrnt_s, | ||
176 | + .vece = MO_32 }, | ||
177 | + { .fniv = gen_sqshrnt_vec, | ||
178 | + .opt_opc = vec_list, | ||
179 | + .load_dest = true, | ||
180 | + .fno = gen_helper_sve2_sqshrnt_d, | ||
181 | + .vece = MO_64 }, | ||
182 | + }; | ||
183 | + return do_sve2_shr_narrow(s, a, ops); | ||
184 | +} | ||
185 | + | ||
186 | +static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a) | ||
187 | +{ | ||
188 | + static const GVecGen2i ops[3] = { | ||
189 | + { .fno = gen_helper_sve2_sqrshrnb_h }, | ||
190 | + { .fno = gen_helper_sve2_sqrshrnb_s }, | ||
191 | + { .fno = gen_helper_sve2_sqrshrnb_d }, | ||
192 | + }; | ||
193 | + return do_sve2_shr_narrow(s, a, ops); | ||
194 | +} | ||
195 | + | ||
196 | +static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a) | ||
197 | +{ | ||
198 | + static const GVecGen2i ops[3] = { | ||
199 | + { .fno = gen_helper_sve2_sqrshrnt_h }, | ||
200 | + { .fno = gen_helper_sve2_sqrshrnt_s }, | ||
201 | + { .fno = gen_helper_sve2_sqrshrnt_d }, | ||
202 | + }; | ||
203 | + return do_sve2_shr_narrow(s, a, ops); | ||
204 | +} | ||
205 | + | ||
206 | static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, | ||
207 | TCGv_vec n, int64_t shr) | ||
208 | { | ||
209 | -- | 66 | -- |
210 | 2.20.1 | 67 | 2.25.1 |
211 | |||
212 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-59-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-6-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper.h | 10 +++++ | 11 | target/arm/sme-fa64.decode | 2 -- |
9 | target/arm/sve.decode | 4 ++ | 12 | target/arm/translate-sve.c | 9 ++++++--- |
10 | target/arm/translate-sve.c | 18 ++++++++ | 13 | 2 files changed, 6 insertions(+), 5 deletions(-) |
11 | target/arm/vec_helper.c | 84 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 116 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/helper.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG, | 20 | |
20 | void, ptr, ptr, ptr, ptr, i32) | 21 | FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
21 | 22 | FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT | |
22 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 23 | -FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS |
23 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 24 | -FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR |
24 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 25 | FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP |
25 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 26 | FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) |
26 | + | 27 | FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA |
27 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | + | ||
32 | DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | |||
34 | #ifdef TARGET_AARCH64 | ||
35 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/target/arm/sve.decode | ||
38 | +++ b/target/arm/sve.decode | ||
39 | @@ -XXX,XX +XXX,XX @@ SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm | ||
40 | UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm | ||
41 | PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0 | ||
42 | |||
43 | +# SVE2 signed saturating doubling multiply high (unpredicated) | ||
44 | +SQDMULH_zzz 00000100 .. 1 ..... 0111 00 ..... ..... @rd_rn_rm | ||
45 | +SQRDMULH_zzz 00000100 .. 1 ..... 0111 01 ..... ..... @rd_rn_rm | ||
46 | + | ||
47 | ### SVE2 Integer - Predicated | ||
48 | |||
49 | SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn | ||
50 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 28 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
51 | index XXXXXXX..XXXXXXX 100644 | 29 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/target/arm/translate-sve.c | 30 | --- a/target/arm/translate-sve.c |
53 | +++ b/target/arm/translate-sve.c | 31 | +++ b/target/arm/translate-sve.c |
54 | @@ -XXX,XX +XXX,XX @@ static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a) | 32 | @@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) |
55 | return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b); | 33 | TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) |
34 | |||
35 | /* Note pat == 31 is #all, to set all elements. */ | ||
36 | -TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false) | ||
37 | +TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, | ||
38 | + do_predset, 0, FFR_PRED_NUM, 31, false) | ||
39 | |||
40 | /* Note pat == 32 is #unimp, to set no elements. */ | ||
41 | TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) | ||
42 | @@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) | ||
43 | .rd = a->rd, .pg = a->pg, .s = a->s, | ||
44 | .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, | ||
45 | }; | ||
46 | + | ||
47 | + s->is_nonstreaming = true; | ||
48 | return trans_AND_pppp(s, &alt_a); | ||
56 | } | 49 | } |
57 | 50 | ||
58 | +static bool trans_SQDMULH_zzz(DisasContext *s, arg_rrr_esz *a) | 51 | -TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) |
59 | +{ | 52 | -TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) |
60 | + static gen_helper_gvec_3 * const fns[4] = { | 53 | +TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) |
61 | + gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, | 54 | +TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) |
62 | + gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, | 55 | |
63 | + }; | 56 | static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, |
64 | + return do_sve2_zzz_ool(s, a, fns[a->esz]); | 57 | void (*gen_fn)(TCGv_i32, TCGv_ptr, |
65 | +} | ||
66 | + | ||
67 | +static bool trans_SQRDMULH_zzz(DisasContext *s, arg_rrr_esz *a) | ||
68 | +{ | ||
69 | + static gen_helper_gvec_3 * const fns[4] = { | ||
70 | + gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, | ||
71 | + gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, | ||
72 | + }; | ||
73 | + return do_sve2_zzz_ool(s, a, fns[a->esz]); | ||
74 | +} | ||
75 | + | ||
76 | /* | ||
77 | * SVE2 Integer - Predicated | ||
78 | */ | ||
79 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/vec_helper.c | ||
82 | +++ b/target/arm/vec_helper.c | ||
83 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm, | ||
84 | } | ||
85 | } | ||
86 | |||
87 | +void HELPER(sve2_sqdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
88 | +{ | ||
89 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
90 | + int8_t *d = vd, *n = vn, *m = vm; | ||
91 | + | ||
92 | + for (i = 0; i < opr_sz; ++i) { | ||
93 | + d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, false); | ||
94 | + } | ||
95 | +} | ||
96 | + | ||
97 | +void HELPER(sve2_sqrdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
98 | +{ | ||
99 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
100 | + int8_t *d = vd, *n = vn, *m = vm; | ||
101 | + | ||
102 | + for (i = 0; i < opr_sz; ++i) { | ||
103 | + d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, true); | ||
104 | + } | ||
105 | +} | ||
106 | + | ||
107 | /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ | ||
108 | int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3, | ||
109 | bool neg, bool round, uint32_t *sat) | ||
110 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm, | ||
111 | } | ||
112 | } | ||
113 | |||
114 | +void HELPER(sve2_sqdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
115 | +{ | ||
116 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
117 | + int16_t *d = vd, *n = vn, *m = vm; | ||
118 | + uint32_t discard; | ||
119 | + | ||
120 | + for (i = 0; i < opr_sz / 2; ++i) { | ||
121 | + d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, &discard); | ||
122 | + } | ||
123 | +} | ||
124 | + | ||
125 | +void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
126 | +{ | ||
127 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
128 | + int16_t *d = vd, *n = vn, *m = vm; | ||
129 | + uint32_t discard; | ||
130 | + | ||
131 | + for (i = 0; i < opr_sz / 2; ++i) { | ||
132 | + d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, &discard); | ||
133 | + } | ||
134 | +} | ||
135 | + | ||
136 | /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ | ||
137 | int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, | ||
138 | bool neg, bool round, uint32_t *sat) | ||
139 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm, | ||
140 | } | ||
141 | } | ||
142 | |||
143 | +void HELPER(sve2_sqdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc) | ||
144 | +{ | ||
145 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
146 | + int32_t *d = vd, *n = vn, *m = vm; | ||
147 | + uint32_t discard; | ||
148 | + | ||
149 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
150 | + d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, &discard); | ||
151 | + } | ||
152 | +} | ||
153 | + | ||
154 | +void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc) | ||
155 | +{ | ||
156 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
157 | + int32_t *d = vd, *n = vn, *m = vm; | ||
158 | + uint32_t discard; | ||
159 | + | ||
160 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
161 | + d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, &discard); | ||
162 | + } | ||
163 | +} | ||
164 | + | ||
165 | /* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */ | ||
166 | static int64_t do_sat128_d(Int128 r) | ||
167 | { | ||
168 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm, | ||
169 | } | ||
170 | } | ||
171 | |||
172 | +void HELPER(sve2_sqdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
173 | +{ | ||
174 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
175 | + int64_t *d = vd, *n = vn, *m = vm; | ||
176 | + | ||
177 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
178 | + d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, false); | ||
179 | + } | ||
180 | +} | ||
181 | + | ||
182 | +void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
183 | +{ | ||
184 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
185 | + int64_t *d = vd, *n = vn, *m = vm; | ||
186 | + | ||
187 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
188 | + d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, true); | ||
189 | + } | ||
190 | +} | ||
191 | + | ||
192 | /* Integer 8 and 16-bit dot-product. | ||
193 | * | ||
194 | * Note that for the loops herein, host endianness does not matter | ||
195 | -- | 58 | -- |
196 | 2.20.1 | 59 | 2.25.1 |
197 | |||
198 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-56-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-7-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 14 ++++++++++++++ | 11 | target/arm/sme-fa64.decode | 3 --- |
9 | target/arm/sve.decode | 8 ++++++++ | 12 | target/arm/translate-sve.c | 22 ++++++++++++---------- |
10 | target/arm/sve_helper.c | 36 ++++++++++++++++++++++++++++++++++++ | 13 | 2 files changed, 12 insertions(+), 13 deletions(-) |
11 | target/arm/translate-sve.c | 8 ++++++++ | ||
12 | 4 files changed, 66 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/helper-sve.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | |
20 | DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
21 | DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) | 22 | |
22 | + | 23 | -FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA |
23 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG, | 24 | -FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT |
24 | + void, ptr, ptr, ptr, ptr, i32) | 25 | -FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP |
25 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_s, TCG_CALL_NO_RWG, | 26 | FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) |
26 | + void, ptr, ptr, ptr, ptr, i32) | 27 | FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA |
27 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_d, TCG_CALL_NO_RWG, | 28 | FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL |
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_h, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1 | ||
41 | MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2 | ||
42 | MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3 | ||
43 | |||
44 | +# SVE2 saturating multiply-add high (indexed) | ||
45 | +SQRDMLAH_zzxz_h 01000100 0. 1 ..... 000100 ..... ..... @rrxr_3 esz=1 | ||
46 | +SQRDMLAH_zzxz_s 01000100 10 1 ..... 000100 ..... ..... @rrxr_2 esz=2 | ||
47 | +SQRDMLAH_zzxz_d 01000100 11 1 ..... 000100 ..... ..... @rrxr_1 esz=3 | ||
48 | +SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1 | ||
49 | +SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2 | ||
50 | +SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3 | ||
51 | + | ||
52 | # SVE2 integer multiply (indexed) | ||
53 | MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | ||
54 | MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | ||
55 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/target/arm/sve_helper.c | ||
58 | +++ b/target/arm/sve_helper.c | ||
59 | @@ -XXX,XX +XXX,XX @@ DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D) | ||
60 | #undef DO_SQRDMLAH_S | ||
61 | #undef DO_SQRDMLAH_D | ||
62 | |||
63 | +#define DO_ZZXZ(NAME, TYPE, H, OP) \ | ||
64 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
65 | +{ \ | ||
66 | + intptr_t oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ | ||
67 | + intptr_t i, j, idx = simd_data(desc); \ | ||
68 | + TYPE *d = vd, *a = va, *n = vn, *m = (TYPE *)vm + H(idx); \ | ||
69 | + for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ | ||
70 | + TYPE mm = m[i]; \ | ||
71 | + for (j = 0; j < segment; j++) { \ | ||
72 | + d[i + j] = OP(n[i + j], mm, a[i + j]); \ | ||
73 | + } \ | ||
74 | + } \ | ||
75 | +} | ||
76 | + | ||
77 | +#define DO_SQRDMLAH_H(N, M, A) \ | ||
78 | + ({ uint32_t discard; do_sqrdmlah_h(N, M, A, false, true, &discard); }) | ||
79 | +#define DO_SQRDMLAH_S(N, M, A) \ | ||
80 | + ({ uint32_t discard; do_sqrdmlah_s(N, M, A, false, true, &discard); }) | ||
81 | +#define DO_SQRDMLAH_D(N, M, A) do_sqrdmlah_d(N, M, A, false, true) | ||
82 | + | ||
83 | +DO_ZZXZ(sve2_sqrdmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H) | ||
84 | +DO_ZZXZ(sve2_sqrdmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S) | ||
85 | +DO_ZZXZ(sve2_sqrdmlah_idx_d, int64_t, , DO_SQRDMLAH_D) | ||
86 | + | ||
87 | +#define DO_SQRDMLSH_H(N, M, A) \ | ||
88 | + ({ uint32_t discard; do_sqrdmlah_h(N, M, A, true, true, &discard); }) | ||
89 | +#define DO_SQRDMLSH_S(N, M, A) \ | ||
90 | + ({ uint32_t discard; do_sqrdmlah_s(N, M, A, true, true, &discard); }) | ||
91 | +#define DO_SQRDMLSH_D(N, M, A) do_sqrdmlah_d(N, M, A, true, true) | ||
92 | + | ||
93 | +DO_ZZXZ(sve2_sqrdmlsh_idx_h, int16_t, H2, DO_SQRDMLSH_H) | ||
94 | +DO_ZZXZ(sve2_sqrdmlsh_idx_s, int32_t, H4, DO_SQRDMLSH_S) | ||
95 | +DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D) | ||
96 | + | ||
97 | +#undef DO_ZZXZ | ||
98 | + | ||
99 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
100 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
101 | { \ | ||
102 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 29 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
103 | index XXXXXXX..XXXXXXX 100644 | 30 | index XXXXXXX..XXXXXXX 100644 |
104 | --- a/target/arm/translate-sve.c | 31 | --- a/target/arm/translate-sve.c |
105 | +++ b/target/arm/translate-sve.c | 32 | +++ b/target/arm/translate-sve.c |
106 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h) | 33 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = { |
107 | DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s) | 34 | NULL, gen_helper_sve_fexpa_h, |
108 | DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d) | 35 | gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, |
109 | 36 | }; | |
110 | +DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) | 37 | -TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz, |
111 | +DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) | 38 | - fexpa_fns[a->esz], a->rd, a->rn, 0) |
112 | +DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) | 39 | +TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, |
113 | + | 40 | + fexpa_fns[a->esz], a->rd, a->rn, 0) |
114 | +DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) | 41 | |
115 | +DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) | 42 | static gen_helper_gvec_3 * const ftssel_fns[4] = { |
116 | +DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) | 43 | NULL, gen_helper_sve_ftssel_h, |
117 | + | 44 | gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, |
118 | #undef DO_SVE2_RRXR | 45 | }; |
46 | -TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0) | ||
47 | +TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, | ||
48 | + ftssel_fns[a->esz], a, 0) | ||
119 | 49 | ||
120 | /* | 50 | /* |
51 | *** SVE Predicate Logical Operations Group | ||
52 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, | ||
53 | static gen_helper_gvec_3 * const compact_fns[4] = { | ||
54 | NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d | ||
55 | }; | ||
56 | -TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0) | ||
57 | +TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, | ||
58 | + compact_fns[a->esz], a, 0) | ||
59 | |||
60 | /* Call the helper that computes the ARM LastActiveElement pseudocode | ||
61 | * function, scaled by the element size. This includes the not found | ||
62 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = { | ||
63 | gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, | ||
64 | gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, | ||
65 | }; | ||
66 | -TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
67 | - bext_fns[a->esz], a, 0) | ||
68 | +TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
69 | + bext_fns[a->esz], a, 0) | ||
70 | |||
71 | static gen_helper_gvec_3 * const bdep_fns[4] = { | ||
72 | gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, | ||
73 | gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, | ||
74 | }; | ||
75 | -TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
76 | - bdep_fns[a->esz], a, 0) | ||
77 | +TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
78 | + bdep_fns[a->esz], a, 0) | ||
79 | |||
80 | static gen_helper_gvec_3 * const bgrp_fns[4] = { | ||
81 | gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, | ||
82 | gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, | ||
83 | }; | ||
84 | -TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
85 | - bgrp_fns[a->esz], a, 0) | ||
86 | +TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, | ||
87 | + bgrp_fns[a->esz], a, 0) | ||
88 | |||
89 | static gen_helper_gvec_3 * const cadd_fns[4] = { | ||
90 | gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, | ||
121 | -- | 91 | -- |
122 | 2.20.1 | 92 | 2.25.1 |
123 | |||
124 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-28-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-8-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 16 +++++++ | 11 | target/arm/sme-fa64.decode | 2 -- |
9 | target/arm/sve.decode | 4 ++ | 12 | target/arm/translate-sve.c | 24 +++++++++++++++--------- |
10 | target/arm/sve_helper.c | 35 ++++++++++++++ | 13 | 2 files changed, 15 insertions(+), 11 deletions(-) |
11 | target/arm/translate-sve.c | 98 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 153 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/helper-sve.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
20 | DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
21 | 22 | ||
22 | +DEF_HELPER_FLAGS_3(sve2_sqshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 23 | -FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result) |
23 | +DEF_HELPER_FLAGS_3(sve2_sqshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 24 | -FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA |
24 | +DEF_HELPER_FLAGS_3(sve2_sqshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 25 | FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL |
25 | + | 26 | FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD |
26 | +DEF_HELPER_FLAGS_3(sve2_sqshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 27 | FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA |
27 | +DEF_HELPER_FLAGS_3(sve2_sqshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_3(sve2_sqshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
37 | + | ||
38 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
39 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
40 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
41 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/target/arm/sve.decode | ||
44 | +++ b/target/arm/sve.decode | ||
45 | @@ -XXX,XX +XXX,XX @@ SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl | ||
46 | ## SVE2 bitwise shift right narrow | ||
47 | |||
48 | # Bit 23 == 0 is handled by esz > 0 in the translator. | ||
49 | +SQSHRUNB 01000101 .. 1 ..... 00 0000 ..... ..... @rd_rn_tszimm_shr | ||
50 | +SQSHRUNT 01000101 .. 1 ..... 00 0001 ..... ..... @rd_rn_tszimm_shr | ||
51 | +SQRSHRUNB 01000101 .. 1 ..... 00 0010 ..... ..... @rd_rn_tszimm_shr | ||
52 | +SQRSHRUNT 01000101 .. 1 ..... 00 0011 ..... ..... @rd_rn_tszimm_shr | ||
53 | SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr | ||
54 | SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr | ||
55 | RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr | ||
56 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/target/arm/sve_helper.c | ||
59 | +++ b/target/arm/sve_helper.c | ||
60 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh) | ||
61 | } | ||
62 | } | ||
63 | |||
64 | +static inline int64_t do_srshr(int64_t x, unsigned sh) | ||
65 | +{ | ||
66 | + if (likely(sh < 64)) { | ||
67 | + return (x >> sh) + ((x >> (sh - 1)) & 1); | ||
68 | + } else { | ||
69 | + /* Rounding the sign bit always produces 0. */ | ||
70 | + return 0; | ||
71 | + } | ||
72 | +} | ||
73 | + | ||
74 | DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR) | ||
75 | DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR) | ||
76 | DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR) | ||
77 | @@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr) | ||
78 | DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr) | ||
79 | DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr) | ||
80 | |||
81 | +#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX) | ||
82 | +#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX) | ||
83 | +#define DO_SQSHRUN_D(x, sh) \ | ||
84 | + do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX) | ||
85 | + | ||
86 | +DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H) | ||
87 | +DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S) | ||
88 | +DO_SHRNB(sve2_sqshrunb_d, int64_t, uint32_t, DO_SQSHRUN_D) | ||
89 | + | ||
90 | +DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H) | ||
91 | +DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S) | ||
92 | +DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, , H1_4, DO_SQSHRUN_D) | ||
93 | + | ||
94 | +#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX) | ||
95 | +#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX) | ||
96 | +#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX) | ||
97 | + | ||
98 | +DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H) | ||
99 | +DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S) | ||
100 | +DO_SHRNB(sve2_sqrshrunb_d, int64_t, uint32_t, DO_SQRSHRUN_D) | ||
101 | + | ||
102 | +DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H) | ||
103 | +DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S) | ||
104 | +DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D) | ||
105 | + | ||
106 | #undef DO_SHRNB | ||
107 | #undef DO_SHRNT | ||
108 | |||
109 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 28 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
110 | index XXXXXXX..XXXXXXX 100644 | 29 | index XXXXXXX..XXXXXXX 100644 |
111 | --- a/target/arm/translate-sve.c | 30 | --- a/target/arm/translate-sve.c |
112 | +++ b/target/arm/translate-sve.c | 31 | +++ b/target/arm/translate-sve.c |
113 | @@ -XXX,XX +XXX,XX @@ static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a) | 32 | @@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) |
114 | return do_sve2_shr_narrow(s, a, ops); | 33 | gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, |
115 | } | 34 | NULL, gen_helper_sve2_pmull_d, |
116 | 35 | }; | |
117 | +static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, | 36 | - if (a->esz == 0 |
118 | + TCGv_vec n, int64_t shr) | 37 | - ? !dc_isar_feature(aa64_sve2_pmull128, s) |
119 | +{ | 38 | - : !dc_isar_feature(aa64_sve, s)) { |
120 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
121 | + int halfbits = 4 << vece; | ||
122 | + | 39 | + |
123 | + tcg_gen_sari_vec(vece, n, n, shr); | 40 | + if (a->esz == 0) { |
124 | + tcg_gen_dupi_vec(vece, t, 0); | 41 | + if (!dc_isar_feature(aa64_sve2_pmull128, s)) { |
125 | + tcg_gen_smax_vec(vece, n, n, t); | 42 | + return false; |
126 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | 43 | + } |
127 | + tcg_gen_umin_vec(vece, d, n, t); | 44 | + s->is_nonstreaming = true; |
128 | + tcg_temp_free_vec(t); | 45 | + } else if (!dc_isar_feature(aa64_sve, s)) { |
129 | +} | 46 | return false; |
130 | + | 47 | } |
131 | +static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a) | 48 | return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); |
132 | +{ | 49 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) |
133 | + static const TCGOpcode vec_list[] = { | 50 | * SVE Integer Multiply-Add (unpredicated) |
134 | + INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 | 51 | */ |
135 | + }; | 52 | |
136 | + static const GVecGen2i ops[3] = { | 53 | -TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s, |
137 | + { .fniv = gen_sqshrunb_vec, | 54 | - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) |
138 | + .opt_opc = vec_list, | 55 | -TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d, |
139 | + .fno = gen_helper_sve2_sqshrunb_h, | 56 | - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) |
140 | + .vece = MO_16 }, | 57 | +TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, |
141 | + { .fniv = gen_sqshrunb_vec, | 58 | + gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, |
142 | + .opt_opc = vec_list, | 59 | + 0, FPST_FPCR) |
143 | + .fno = gen_helper_sve2_sqshrunb_s, | 60 | +TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, |
144 | + .vece = MO_32 }, | 61 | + gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, |
145 | + { .fniv = gen_sqshrunb_vec, | 62 | + 0, FPST_FPCR) |
146 | + .opt_opc = vec_list, | 63 | |
147 | + .fno = gen_helper_sve2_sqshrunb_d, | 64 | static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { |
148 | + .vece = MO_64 }, | 65 | NULL, gen_helper_sve2_sqdmlal_zzzw_h, |
149 | + }; | 66 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, |
150 | + return do_sve2_shr_narrow(s, a, ops); | 67 | TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, |
151 | +} | 68 | gen_helper_gvec_bfdot_idx, a) |
152 | + | 69 | |
153 | +static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, | 70 | -TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, |
154 | + TCGv_vec n, int64_t shr) | 71 | - gen_helper_gvec_bfmmla, a, 0) |
155 | +{ | 72 | +TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, |
156 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 73 | + gen_helper_gvec_bfmmla, a, 0) |
157 | + int halfbits = 4 << vece; | 74 | |
158 | + | 75 | static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) |
159 | + tcg_gen_sari_vec(vece, n, n, shr); | ||
160 | + tcg_gen_dupi_vec(vece, t, 0); | ||
161 | + tcg_gen_smax_vec(vece, n, n, t); | ||
162 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
163 | + tcg_gen_umin_vec(vece, n, n, t); | ||
164 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
165 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
166 | + tcg_temp_free_vec(t); | ||
167 | +} | ||
168 | + | ||
169 | +static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a) | ||
170 | +{ | ||
171 | + static const TCGOpcode vec_list[] = { | ||
172 | + INDEX_op_shli_vec, INDEX_op_sari_vec, | ||
173 | + INDEX_op_smax_vec, INDEX_op_umin_vec, 0 | ||
174 | + }; | ||
175 | + static const GVecGen2i ops[3] = { | ||
176 | + { .fniv = gen_sqshrunt_vec, | ||
177 | + .opt_opc = vec_list, | ||
178 | + .load_dest = true, | ||
179 | + .fno = gen_helper_sve2_sqshrunt_h, | ||
180 | + .vece = MO_16 }, | ||
181 | + { .fniv = gen_sqshrunt_vec, | ||
182 | + .opt_opc = vec_list, | ||
183 | + .load_dest = true, | ||
184 | + .fno = gen_helper_sve2_sqshrunt_s, | ||
185 | + .vece = MO_32 }, | ||
186 | + { .fniv = gen_sqshrunt_vec, | ||
187 | + .opt_opc = vec_list, | ||
188 | + .load_dest = true, | ||
189 | + .fno = gen_helper_sve2_sqshrunt_d, | ||
190 | + .vece = MO_64 }, | ||
191 | + }; | ||
192 | + return do_sve2_shr_narrow(s, a, ops); | ||
193 | +} | ||
194 | + | ||
195 | +static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a) | ||
196 | +{ | ||
197 | + static const GVecGen2i ops[3] = { | ||
198 | + { .fno = gen_helper_sve2_sqrshrunb_h }, | ||
199 | + { .fno = gen_helper_sve2_sqrshrunb_s }, | ||
200 | + { .fno = gen_helper_sve2_sqrshrunb_d }, | ||
201 | + }; | ||
202 | + return do_sve2_shr_narrow(s, a, ops); | ||
203 | +} | ||
204 | + | ||
205 | +static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a) | ||
206 | +{ | ||
207 | + static const GVecGen2i ops[3] = { | ||
208 | + { .fno = gen_helper_sve2_sqrshrunt_h }, | ||
209 | + { .fno = gen_helper_sve2_sqrshrunt_s }, | ||
210 | + { .fno = gen_helper_sve2_sqrshrunt_d }, | ||
211 | + }; | ||
212 | + return do_sve2_shr_narrow(s, a, ops); | ||
213 | +} | ||
214 | + | ||
215 | static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
216 | gen_helper_gvec_4_ptr *fn) | ||
217 | { | 76 | { |
218 | -- | 77 | -- |
219 | 2.20.1 | 78 | 2.25.1 |
220 | |||
221 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-57-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-9-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 9 +++++++++ | 11 | target/arm/sme-fa64.decode | 3 --- |
9 | target/arm/sve.decode | 18 ++++++++++++++++++ | 12 | target/arm/translate-sve.c | 15 +++++++++++---- |
10 | target/arm/sve_helper.c | 30 ++++++++++++++++++++++++++++++ | 13 | 2 files changed, 11 insertions(+), 7 deletions(-) |
11 | target/arm/translate-sve.c | 19 +++++++++++++++++++ | ||
12 | 4 files changed, 76 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/helper-sve.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | void, ptr, ptr, ptr, ptr, i32) | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
20 | DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG, | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
21 | void, ptr, ptr, ptr, ptr, i32) | 22 | |
22 | + | 23 | -FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL |
23 | +DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_s, TCG_CALL_NO_RWG, | 24 | -FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD |
24 | + void, ptr, ptr, ptr, ptr, i32) | 25 | -FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA |
25 | +DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_d, TCG_CALL_NO_RWG, | 26 | FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA |
26 | + void, ptr, ptr, ptr, ptr, i32) | 27 | FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions |
27 | +DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG, | 28 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, i32) | ||
31 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/sve.decode | ||
34 | +++ b/target/arm/sve.decode | ||
35 | @@ -XXX,XX +XXX,XX @@ | ||
36 | %size_23 23:2 | ||
37 | %dtype_23_13 23:2 13:2 | ||
38 | %index3_22_19 22:1 19:2 | ||
39 | +%index3_19_11 19:2 11:1 | ||
40 | +%index2_20_11 20:1 11:1 | ||
41 | |||
42 | # A combination of tsz:imm3 -- extract esize. | ||
43 | %tszimm_esz 22:2 5:5 !function=tszimm_esz | ||
44 | @@ -XXX,XX +XXX,XX @@ | ||
45 | @rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \ | ||
46 | &rrxr_esz ra=%reg_movprfx | ||
47 | |||
48 | +# Three registers and a scalar by N-bit index, alternate | ||
49 | +@rrxr_3a ........ .. ... rm:3 ...... rn:5 rd:5 \ | ||
50 | + &rrxr_esz ra=%reg_movprfx index=%index3_19_11 | ||
51 | +@rrxr_2a ........ .. .. rm:4 ...... rn:5 rd:5 \ | ||
52 | + &rrxr_esz ra=%reg_movprfx index=%index2_20_11 | ||
53 | + | ||
54 | ########################################################################### | ||
55 | # Instruction patterns. Grouped according to the SVE encodingindex.xhtml. | ||
56 | |||
57 | @@ -XXX,XX +XXX,XX @@ SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1 | ||
58 | SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2 | ||
59 | SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3 | ||
60 | |||
61 | +# SVE2 saturating multiply-add (indexed) | ||
62 | +SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2 | ||
63 | +SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3 | ||
64 | +SQDMLALT_zzxw_s 01000100 10 1 ..... 0010.1 ..... ..... @rrxr_3a esz=2 | ||
65 | +SQDMLALT_zzxw_d 01000100 11 1 ..... 0010.1 ..... ..... @rrxr_2a esz=3 | ||
66 | +SQDMLSLB_zzxw_s 01000100 10 1 ..... 0011.0 ..... ..... @rrxr_3a esz=2 | ||
67 | +SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3 | ||
68 | +SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2 | ||
69 | +SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3 | ||
70 | + | ||
71 | # SVE2 integer multiply (indexed) | ||
72 | MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | ||
73 | MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | ||
74 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/target/arm/sve_helper.c | ||
77 | +++ b/target/arm/sve_helper.c | ||
78 | @@ -XXX,XX +XXX,XX @@ DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D) | ||
79 | |||
80 | #undef DO_ZZXZ | ||
81 | |||
82 | +#define DO_ZZXW(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
83 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
84 | +{ \ | ||
85 | + intptr_t i, j, oprsz = simd_oprsz(desc); \ | ||
86 | + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \ | ||
87 | + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \ | ||
88 | + for (i = 0; i < oprsz; i += 16) { \ | ||
89 | + TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \ | ||
90 | + for (j = 0; j < 16; j += sizeof(TYPEW)) { \ | ||
91 | + TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \ | ||
92 | + TYPEW aa = *(TYPEW *)(va + HW(i + j)); \ | ||
93 | + *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm, aa); \ | ||
94 | + } \ | ||
95 | + } \ | ||
96 | +} | ||
97 | + | ||
98 | +#define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M)) | ||
99 | +#define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M)) | ||
100 | + | ||
101 | +DO_ZZXW(sve2_sqdmlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLAL_S) | ||
102 | +DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D) | ||
103 | + | ||
104 | +#define DO_SQDMLSL_S(N, M, A) DO_SQSUB_S(A, do_sqdmull_s(N, M)) | ||
105 | +#define DO_SQDMLSL_D(N, M, A) do_sqsub_d(A, do_sqdmull_d(N, M)) | ||
106 | + | ||
107 | +DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S) | ||
108 | +DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D) | ||
109 | + | ||
110 | +#undef DO_ZZXW | ||
111 | + | ||
112 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
113 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
114 | { \ | ||
115 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 29 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
116 | index XXXXXXX..XXXXXXX 100644 | 30 | index XXXXXXX..XXXXXXX 100644 |
117 | --- a/target/arm/translate-sve.c | 31 | --- a/target/arm/translate-sve.c |
118 | +++ b/target/arm/translate-sve.c | 32 | +++ b/target/arm/translate-sve.c |
119 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) | 33 | @@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { |
120 | 34 | NULL, gen_helper_sve_ftmad_h, | |
121 | #undef DO_SVE2_RRXR | 35 | gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, |
122 | 36 | }; | |
123 | +#define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \ | 37 | -TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz, |
124 | + static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ | 38 | - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, |
125 | + { \ | 39 | - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) |
126 | + return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \ | 40 | +TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, |
127 | + (a->index << 1) | TOP, FUNC); \ | 41 | + ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, |
128 | + } | 42 | + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) |
129 | + | 43 | |
130 | +DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) | 44 | /* |
131 | +DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) | 45 | *** SVE Floating Point Accumulating Reduction Group |
132 | +DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) | 46 | @@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) |
133 | +DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) | 47 | if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { |
134 | + | 48 | return false; |
135 | +DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) | 49 | } |
136 | +DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) | 50 | + s->is_nonstreaming = true; |
137 | +DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) | 51 | if (!sve_access_check(s)) { |
138 | +DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) | 52 | return true; |
139 | + | 53 | } |
140 | +#undef DO_SVE2_RRXR_TB | 54 | @@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) |
55 | DO_FP3(FADD_zzz, fadd) | ||
56 | DO_FP3(FSUB_zzz, fsub) | ||
57 | DO_FP3(FMUL_zzz, fmul) | ||
58 | -DO_FP3(FTSMUL, ftsmul) | ||
59 | DO_FP3(FRECPS, recps) | ||
60 | DO_FP3(FRSQRTS, rsqrts) | ||
61 | |||
62 | #undef DO_FP3 | ||
63 | |||
64 | +static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { | ||
65 | + NULL, gen_helper_gvec_ftsmul_h, | ||
66 | + gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d | ||
67 | +}; | ||
68 | +TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, | ||
69 | + ftsmul_fns[a->esz], a, 0) | ||
141 | + | 70 | + |
142 | /* | 71 | /* |
143 | *** SVE Floating Point Multiply-Add Indexed Group | 72 | *** SVE Floating Point Arithmetic - Predicated Group |
144 | */ | 73 | */ |
145 | -- | 74 | -- |
146 | 2.20.1 | 75 | 2.25.1 |
147 | |||
148 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-71-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-10-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/cpu.h | 5 +++++ | 11 | target/arm/sme-fa64.decode | 1 - |
9 | target/arm/sve.decode | 4 ++++ | 12 | target/arm/translate-sve.c | 12 ++++++------ |
10 | target/arm/translate-sve.c | 16 ++++++++++++++++ | 13 | 2 files changed, 6 insertions(+), 7 deletions(-) |
11 | 3 files changed, 25 insertions(+) | ||
12 | 14 | ||
13 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
14 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/sme-fa64.decode |
16 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/sme-fa64.decode |
17 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
18 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
19 | } | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
20 | 22 | ||
21 | +static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id) | 23 | -FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA |
22 | +{ | 24 | FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions |
23 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0; | 25 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
24 | +} | 26 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) |
25 | + | ||
26 | static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) | ||
27 | { | ||
28 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; | ||
29 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/arm/sve.decode | ||
32 | +++ b/target/arm/sve.decode | ||
33 | @@ -XXX,XX +XXX,XX @@ AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5 | ||
34 | AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0 | ||
35 | AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0 | ||
36 | SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0 | ||
37 | + | ||
38 | +# SVE2 crypto constructive binary operations | ||
39 | +SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0 | ||
40 | +RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0 | ||
41 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
42 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
43 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
44 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
45 | @@ -XXX,XX +XXX,XX @@ static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a) | 31 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) |
46 | { | 32 | TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) |
47 | return do_sm4(s, a, gen_helper_crypto_sm4e); | 33 | TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) |
48 | } | 34 | |
49 | + | 35 | -TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
50 | +static bool trans_SM4EKEY(DisasContext *s, arg_rrr_esz *a) | 36 | - gen_helper_gvec_smmla_b, a, 0) |
51 | +{ | 37 | -TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
52 | + return do_sm4(s, a, gen_helper_crypto_sm4ekey); | 38 | - gen_helper_gvec_usmmla_b, a, 0) |
53 | +} | 39 | -TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
54 | + | 40 | - gen_helper_gvec_ummla_b, a, 0) |
55 | +static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a) | 41 | +TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
56 | +{ | 42 | + gen_helper_gvec_smmla_b, a, 0) |
57 | + if (!dc_isar_feature(aa64_sve2_sha3, s)) { | 43 | +TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
58 | + return false; | 44 | + gen_helper_gvec_usmmla_b, a, 0) |
59 | + } | 45 | +TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
60 | + if (sve_access_check(s)) { | 46 | + gen_helper_gvec_ummla_b, a, 0) |
61 | + gen_gvec_fn_zzz(s, gen_gvec_rax1, MO_64, a->rd, a->rn, a->rm); | 47 | |
62 | + } | 48 | TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, |
63 | + return true; | 49 | gen_helper_gvec_bfdot, a, 0) |
64 | +} | ||
65 | -- | 50 | -- |
66 | 2.20.1 | 51 | 2.25.1 |
67 | |||
68 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-4-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-11-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 14 ++++++++++++ | 11 | target/arm/sme-fa64.decode | 1 - |
9 | target/arm/sve.decode | 5 +++++ | 12 | target/arm/translate-sve.c | 35 ++++++++++++++++++----------------- |
10 | target/arm/sve_helper.c | 44 ++++++++++++++++++++++++++++++++++++++ | 13 | 2 files changed, 18 insertions(+), 18 deletions(-) |
11 | target/arm/translate-sve.c | 39 +++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 102 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 17 | --- a/target/arm/sme-fa64.decode |
17 | +++ b/target/arm/helper-sve.h | 18 | +++ b/target/arm/sme-fa64.decode |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
19 | DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG, | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
20 | void, ptr, ptr, ptr, ptr, i32) | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
21 | 22 | ||
22 | +DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_h, TCG_CALL_NO_RWG, | 23 | -FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions |
23 | + void, ptr, ptr, ptr, ptr, i32) | 24 | FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
24 | +DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_s, TCG_CALL_NO_RWG, | 25 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) |
25 | + void, ptr, ptr, ptr, ptr, i32) | 26 | FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) |
26 | +DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_d, TCG_CALL_NO_RWG, | ||
27 | + void, ptr, ptr, ptr, ptr, i32) | ||
28 | + | ||
29 | +DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_h, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG, | ||
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, ptr, i32) | ||
35 | + | ||
36 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG, | ||
37 | void, ptr, ptr, ptr, ptr, i32) | ||
38 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG, | ||
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/sve.decode | ||
42 | +++ b/target/arm/sve.decode | ||
43 | @@ -XXX,XX +XXX,XX @@ MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm | ||
44 | SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm | ||
45 | UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm | ||
46 | PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0 | ||
47 | + | ||
48 | +### SVE2 Integer - Predicated | ||
49 | + | ||
50 | +SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn | ||
51 | +UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn | ||
52 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/target/arm/sve_helper.c | ||
55 | +++ b/target/arm/sve_helper.c | ||
56 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR) | ||
57 | DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR) | ||
58 | DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL) | ||
59 | |||
60 | +static inline uint16_t do_sadalp_h(int16_t n, int16_t m) | ||
61 | +{ | ||
62 | + int8_t n1 = n, n2 = n >> 8; | ||
63 | + return m + n1 + n2; | ||
64 | +} | ||
65 | + | ||
66 | +static inline uint32_t do_sadalp_s(int32_t n, int32_t m) | ||
67 | +{ | ||
68 | + int16_t n1 = n, n2 = n >> 16; | ||
69 | + return m + n1 + n2; | ||
70 | +} | ||
71 | + | ||
72 | +static inline uint64_t do_sadalp_d(int64_t n, int64_t m) | ||
73 | +{ | ||
74 | + int32_t n1 = n, n2 = n >> 32; | ||
75 | + return m + n1 + n2; | ||
76 | +} | ||
77 | + | ||
78 | +DO_ZPZZ(sve2_sadalp_zpzz_h, int16_t, H1_2, do_sadalp_h) | ||
79 | +DO_ZPZZ(sve2_sadalp_zpzz_s, int32_t, H1_4, do_sadalp_s) | ||
80 | +DO_ZPZZ_D(sve2_sadalp_zpzz_d, int64_t, do_sadalp_d) | ||
81 | + | ||
82 | +static inline uint16_t do_uadalp_h(uint16_t n, uint16_t m) | ||
83 | +{ | ||
84 | + uint8_t n1 = n, n2 = n >> 8; | ||
85 | + return m + n1 + n2; | ||
86 | +} | ||
87 | + | ||
88 | +static inline uint32_t do_uadalp_s(uint32_t n, uint32_t m) | ||
89 | +{ | ||
90 | + uint16_t n1 = n, n2 = n >> 16; | ||
91 | + return m + n1 + n2; | ||
92 | +} | ||
93 | + | ||
94 | +static inline uint64_t do_uadalp_d(uint64_t n, uint64_t m) | ||
95 | +{ | ||
96 | + uint32_t n1 = n, n2 = n >> 32; | ||
97 | + return m + n1 + n2; | ||
98 | +} | ||
99 | + | ||
100 | +DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h) | ||
101 | +DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s) | ||
102 | +DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d) | ||
103 | + | ||
104 | #undef DO_ZPZZ | ||
105 | #undef DO_ZPZZ_D | ||
106 | |||
107 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
108 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
109 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
110 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
111 | @@ -XXX,XX +XXX,XX @@ static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a) | 31 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) |
112 | { | 32 | static gen_helper_gvec_flags_4 * const match_fns[4] = { |
113 | return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b); | 33 | gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL |
114 | } | 34 | }; |
115 | + | 35 | -TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) |
116 | +/* | 36 | +TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) |
117 | + * SVE2 Integer - Predicated | 37 | |
118 | + */ | 38 | static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { |
119 | + | 39 | gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL |
120 | +static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a, | 40 | }; |
121 | + gen_helper_gvec_4 *fn) | 41 | -TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) |
122 | +{ | 42 | +TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) |
123 | + if (!dc_isar_feature(aa64_sve2, s)) { | 43 | |
124 | + return false; | 44 | static gen_helper_gvec_4 * const histcnt_fns[4] = { |
125 | + } | 45 | NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d |
126 | + return do_zpzz_ool(s, a, fn); | 46 | }; |
127 | +} | 47 | -TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, |
128 | + | 48 | - histcnt_fns[a->esz], a, 0) |
129 | +static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a) | 49 | +TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, |
130 | +{ | 50 | + histcnt_fns[a->esz], a, 0) |
131 | + static gen_helper_gvec_4 * const fns[3] = { | 51 | |
132 | + gen_helper_sve2_sadalp_zpzz_h, | 52 | -TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, |
133 | + gen_helper_sve2_sadalp_zpzz_s, | 53 | - a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) |
134 | + gen_helper_sve2_sadalp_zpzz_d, | 54 | +TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, |
135 | + }; | 55 | + a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) |
136 | + if (a->esz == 0) { | 56 | |
137 | + return false; | 57 | DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) |
138 | + } | 58 | DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) |
139 | + return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]); | 59 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, |
140 | +} | 60 | TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, |
141 | + | 61 | a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) |
142 | +static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a) | 62 | |
143 | +{ | 63 | -TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, |
144 | + static gen_helper_gvec_4 * const fns[3] = { | 64 | - gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) |
145 | + gen_helper_sve2_uadalp_zpzz_h, | 65 | +TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, |
146 | + gen_helper_sve2_uadalp_zpzz_s, | 66 | + gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) |
147 | + gen_helper_sve2_uadalp_zpzz_d, | 67 | |
148 | + }; | 68 | -TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, |
149 | + if (a->esz == 0) { | 69 | - gen_helper_crypto_aese, a, false) |
150 | + return false; | 70 | -TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, |
151 | + } | 71 | - gen_helper_crypto_aese, a, true) |
152 | + return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]); | 72 | +TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, |
153 | +} | 73 | + gen_helper_crypto_aese, a, false) |
74 | +TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, | ||
75 | + gen_helper_crypto_aese, a, true) | ||
76 | |||
77 | -TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
78 | - gen_helper_crypto_sm4e, a, 0) | ||
79 | -TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
80 | - gen_helper_crypto_sm4ekey, a, 0) | ||
81 | +TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
82 | + gen_helper_crypto_sm4e, a, 0) | ||
83 | +TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, | ||
84 | + gen_helper_crypto_sm4ekey, a, 0) | ||
85 | |||
86 | -TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a) | ||
87 | +TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, | ||
88 | + gen_gvec_rax1, a) | ||
89 | |||
90 | TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, | ||
91 | gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) | ||
154 | -- | 92 | -- |
155 | 2.20.1 | 93 | 2.25.1 |
156 | |||
157 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Rename the existing sve_while (less-than) helper to sve_whilel | 3 | Mark these as a non-streaming instructions, which should trap |
4 | to make room for a new sve_whileg helper for greater-than. | 4 | if full a64 support is not enabled in streaming mode. |
5 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210525010358.152808-31-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-12-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/helper-sve.h | 3 +- | 11 | target/arm/sme-fa64.decode | 9 --------- |
12 | target/arm/sve.decode | 2 +- | 12 | target/arm/translate-sve.c | 6 ++++++ |
13 | target/arm/sve_helper.c | 38 +++++++++++++++++++++++++- | 13 | 2 files changed, 6 insertions(+), 9 deletions(-) |
14 | target/arm/translate-sve.c | 56 ++++++++++++++++++++++++++++---------- | ||
15 | 4 files changed, 82 insertions(+), 17 deletions(-) | ||
16 | 14 | ||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper-sve.h | 17 | --- a/target/arm/sme-fa64.decode |
20 | +++ b/target/arm/helper-sve.h | 18 | +++ b/target/arm/sme-fa64.decode |
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
22 | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | |
23 | DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
24 | 22 | ||
25 | -DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32) | 23 | -FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar) |
26 | +DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32) | 24 | FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) |
27 | +DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32) | 25 | FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) |
28 | 26 | -FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm) | |
29 | DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | 27 | -FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector) |
30 | DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | 28 | -FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector) |
31 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 29 | -FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector) |
32 | index XXXXXXX..XXXXXXX 100644 | 30 | FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) |
33 | --- a/target/arm/sve.decode | 31 | FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) |
34 | +++ b/target/arm/sve.decode | 32 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) |
35 | @@ -XXX,XX +XXX,XX @@ SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred | 33 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) |
36 | CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000 | 34 | FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch |
37 | 35 | -FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar) | |
38 | # SVE integer compare scalar count and limit | 36 | -FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar) |
39 | -WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4 | 37 | -FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector) |
40 | +WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4 | 38 | -FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc) |
41 | |||
42 | ### SVE Integer Wide Immediate - Unpredicated Group | ||
43 | |||
44 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/target/arm/sve_helper.c | ||
47 | +++ b/target/arm/sve_helper.c | ||
48 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc) | ||
49 | return sum; | ||
50 | } | ||
51 | |||
52 | -uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc) | ||
53 | +uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc) | ||
54 | { | ||
55 | intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); | ||
56 | intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); | ||
57 | @@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc) | ||
58 | return predtest_ones(d, oprsz, esz_mask); | ||
59 | } | ||
60 | |||
61 | +uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc) | ||
62 | +{ | ||
63 | + intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); | ||
64 | + intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); | ||
65 | + uint64_t esz_mask = pred_esz_masks[esz]; | ||
66 | + ARMPredicateReg *d = vd; | ||
67 | + intptr_t i, invcount, oprbits; | ||
68 | + uint64_t bits; | ||
69 | + | ||
70 | + if (count == 0) { | ||
71 | + return do_zero(d, oprsz); | ||
72 | + } | ||
73 | + | ||
74 | + oprbits = oprsz * 8; | ||
75 | + tcg_debug_assert(count <= oprbits); | ||
76 | + | ||
77 | + bits = esz_mask; | ||
78 | + if (oprbits & 63) { | ||
79 | + bits &= MAKE_64BIT_MASK(0, oprbits & 63); | ||
80 | + } | ||
81 | + | ||
82 | + invcount = oprbits - count; | ||
83 | + for (i = (oprsz - 1) / 8; i > invcount / 64; --i) { | ||
84 | + d->p[i] = bits; | ||
85 | + bits = esz_mask; | ||
86 | + } | ||
87 | + | ||
88 | + d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64); | ||
89 | + | ||
90 | + while (--i >= 0) { | ||
91 | + d->p[i] = 0; | ||
92 | + } | ||
93 | + | ||
94 | + return predtest_ones(d, oprsz, esz_mask); | ||
95 | +} | ||
96 | + | ||
97 | /* Recursive reduction on a function; | ||
98 | * C.f. the ARM ARM function ReducePredicated. | ||
99 | * | ||
100 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 39 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
101 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
102 | --- a/target/arm/translate-sve.c | 41 | --- a/target/arm/translate-sve.c |
103 | +++ b/target/arm/translate-sve.c | 42 | +++ b/target/arm/translate-sve.c |
104 | @@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) | 43 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) |
105 | unsigned vsz = vec_full_reg_size(s); | 44 | if (!dc_isar_feature(aa64_sve, s)) { |
106 | unsigned desc = 0; | 45 | return false; |
107 | TCGCond cond; | 46 | } |
108 | + uint64_t maxval; | 47 | + s->is_nonstreaming = true; |
109 | + /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */ | ||
110 | + bool eq = a->eq == a->lt; | ||
111 | |||
112 | + /* The greater-than conditions are all SVE2. */ | ||
113 | + if (!a->lt && !dc_isar_feature(aa64_sve2, s)) { | ||
114 | + return false; | ||
115 | + } | ||
116 | if (!sve_access_check(s)) { | 48 | if (!sve_access_check(s)) { |
117 | return true; | 49 | return true; |
118 | } | 50 | } |
119 | @@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) | 51 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) |
120 | */ | 52 | if (!dc_isar_feature(aa64_sve, s)) { |
121 | t0 = tcg_temp_new_i64(); | 53 | return false; |
122 | t1 = tcg_temp_new_i64(); | ||
123 | - tcg_gen_sub_i64(t0, op1, op0); | ||
124 | + | ||
125 | + if (a->lt) { | ||
126 | + tcg_gen_sub_i64(t0, op1, op0); | ||
127 | + if (a->u) { | ||
128 | + maxval = a->sf ? UINT64_MAX : UINT32_MAX; | ||
129 | + cond = eq ? TCG_COND_LEU : TCG_COND_LTU; | ||
130 | + } else { | ||
131 | + maxval = a->sf ? INT64_MAX : INT32_MAX; | ||
132 | + cond = eq ? TCG_COND_LE : TCG_COND_LT; | ||
133 | + } | ||
134 | + } else { | ||
135 | + tcg_gen_sub_i64(t0, op0, op1); | ||
136 | + if (a->u) { | ||
137 | + maxval = 0; | ||
138 | + cond = eq ? TCG_COND_GEU : TCG_COND_GTU; | ||
139 | + } else { | ||
140 | + maxval = a->sf ? INT64_MIN : INT32_MIN; | ||
141 | + cond = eq ? TCG_COND_GE : TCG_COND_GT; | ||
142 | + } | ||
143 | + } | ||
144 | |||
145 | tmax = tcg_const_i64(vsz >> a->esz); | ||
146 | - if (a->eq) { | ||
147 | + if (eq) { | ||
148 | /* Equality means one more iteration. */ | ||
149 | tcg_gen_addi_i64(t0, t0, 1); | ||
150 | |||
151 | - /* If op1 is max (un)signed integer (and the only time the addition | ||
152 | - * above could overflow), then we produce an all-true predicate by | ||
153 | - * setting the count to the vector length. This is because the | ||
154 | - * pseudocode is described as an increment + compare loop, and the | ||
155 | - * max integer would always compare true. | ||
156 | + /* | ||
157 | + * For the less-than while, if op1 is maxval (and the only time | ||
158 | + * the addition above could overflow), then we produce an all-true | ||
159 | + * predicate by setting the count to the vector length. This is | ||
160 | + * because the pseudocode is described as an increment + compare | ||
161 | + * loop, and the maximum integer would always compare true. | ||
162 | + * Similarly, the greater-than while has the same issue with the | ||
163 | + * minimum integer due to the decrement + compare loop. | ||
164 | */ | ||
165 | - tcg_gen_movi_i64(t1, (a->sf | ||
166 | - ? (a->u ? UINT64_MAX : INT64_MAX) | ||
167 | - : (a->u ? UINT32_MAX : INT32_MAX))); | ||
168 | + tcg_gen_movi_i64(t1, maxval); | ||
169 | tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0); | ||
170 | } | 54 | } |
171 | 55 | + s->is_nonstreaming = true; | |
172 | @@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) | 56 | if (!sve_access_check(s)) { |
173 | tcg_temp_free_i64(tmax); | 57 | return true; |
174 | 58 | } | |
175 | /* Set the count to zero if the condition is false. */ | 59 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) |
176 | - cond = (a->u | 60 | if (!dc_isar_feature(aa64_sve2, s)) { |
177 | - ? (a->eq ? TCG_COND_LEU : TCG_COND_LTU) | 61 | return false; |
178 | - : (a->eq ? TCG_COND_LE : TCG_COND_LT)); | 62 | } |
179 | tcg_gen_movi_i64(t1, 0); | 63 | + s->is_nonstreaming = true; |
180 | tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); | 64 | if (!sve_access_check(s)) { |
181 | tcg_temp_free_i64(t1); | 65 | return true; |
182 | @@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) | 66 | } |
183 | ptr = tcg_temp_new_ptr(); | 67 | @@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) |
184 | tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); | 68 | if (!dc_isar_feature(aa64_sve, s)) { |
185 | 69 | return false; | |
186 | - gen_helper_sve_while(t2, ptr, t2, t3); | 70 | } |
187 | + if (a->lt) { | 71 | + s->is_nonstreaming = true; |
188 | + gen_helper_sve_whilel(t2, ptr, t2, t3); | 72 | if (!sve_access_check(s)) { |
189 | + } else { | 73 | return true; |
190 | + gen_helper_sve_whileg(t2, ptr, t2, t3); | 74 | } |
191 | + } | 75 | @@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) |
192 | do_pred_flags(t2); | 76 | if (!dc_isar_feature(aa64_sve, s)) { |
193 | 77 | return false; | |
194 | tcg_temp_free_ptr(ptr); | 78 | } |
79 | + s->is_nonstreaming = true; | ||
80 | if (!sve_access_check(s)) { | ||
81 | return true; | ||
82 | } | ||
83 | @@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) | ||
84 | if (!dc_isar_feature(aa64_sve2, s)) { | ||
85 | return false; | ||
86 | } | ||
87 | + s->is_nonstreaming = true; | ||
88 | if (!sve_access_check(s)) { | ||
89 | return true; | ||
90 | } | ||
195 | -- | 91 | -- |
196 | 2.20.1 | 92 | 2.25.1 |
197 | |||
198 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap if full | ||
4 | a64 support is not enabled in streaming mode. In this case, introduce | ||
5 | PRF_ns (prefetch non-streaming) to handle the checks. | ||
2 | 6 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-54-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-13-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | target/arm/sve.decode | 7 +++++++ | 12 | target/arm/sme-fa64.decode | 3 --- |
9 | target/arm/translate-sve.c | 30 ++++++++++++++++++++++++++++++ | 13 | target/arm/sve.decode | 10 +++++----- |
10 | 2 files changed, 37 insertions(+) | 14 | target/arm/translate-sve.c | 11 +++++++++++ |
15 | 3 files changed, 16 insertions(+), 8 deletions(-) | ||
11 | 16 | ||
17 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/sme-fa64.decode | ||
20 | +++ b/target/arm/sme-fa64.decode | ||
21 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS | ||
22 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) | ||
23 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | ||
24 | |||
25 | -FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm) | ||
26 | -FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector) | ||
27 | FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) | ||
28 | FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) | ||
29 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) | ||
30 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) | ||
31 | -FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch | ||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 32 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
13 | index XXXXXXX..XXXXXXX 100644 | 33 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/sve.decode | 34 | --- a/target/arm/sve.decode |
15 | +++ b/target/arm/sve.decode | 35 | +++ b/target/arm/sve.decode |
16 | @@ -XXX,XX +XXX,XX @@ MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s | 36 | @@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \ |
17 | DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \ | 37 | @rpri_load_msz nreg=0 |
18 | ra=%reg_movprfx | 38 | |
19 | 39 | # SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets) | |
20 | +#### SVE Multiply - Indexed | 40 | -PRF 1000010 00 -1 ----- 0-- --- ----- 0 ---- |
21 | + | 41 | +PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ---- |
22 | # SVE integer dot product (indexed) | 42 | |
23 | SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 | 43 | # SVE 32-bit gather prefetch (vector plus immediate) |
24 | SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 | 44 | -PRF 1000010 -- 00 ----- 111 --- ----- 0 ---- |
25 | UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 | 45 | +PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ---- |
26 | UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 | 46 | |
27 | 47 | # SVE contiguous prefetch (scalar plus immediate) | |
28 | +# SVE2 integer multiply (indexed) | 48 | PRF 1000010 11 1- ----- 0-- --- ----- 0 ---- |
29 | +MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | 49 | @@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ |
30 | +MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | 50 | @rpri_g_load esz=3 |
31 | +MUL_zzx_d 01000100 11 1 ..... 111110 ..... ..... @rrx_1 esz=3 | 51 | |
32 | + | 52 | # SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets) |
33 | # SVE floating-point complex add (predicated) | 53 | -PRF 1100010 00 11 ----- 1-- --- ----- 0 ---- |
34 | FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \ | 54 | +PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ---- |
35 | rn=%reg_movprfx | 55 | |
56 | # SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets) | ||
57 | -PRF 1100010 00 -1 ----- 0-- --- ----- 0 ---- | ||
58 | +PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ---- | ||
59 | |||
60 | # SVE 64-bit gather prefetch (vector plus immediate) | ||
61 | -PRF 1100010 -- 00 ----- 111 --- ----- 0 ---- | ||
62 | +PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ---- | ||
63 | |||
64 | ### SVE Memory Store Group | ||
65 | |||
36 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 66 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
37 | index XXXXXXX..XXXXXXX 100644 | 67 | index XXXXXXX..XXXXXXX 100644 |
38 | --- a/target/arm/translate-sve.c | 68 | --- a/target/arm/translate-sve.c |
39 | +++ b/target/arm/translate-sve.c | 69 | +++ b/target/arm/translate-sve.c |
40 | @@ -XXX,XX +XXX,XX @@ static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a) | 70 | @@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) |
41 | return true; | 71 | return true; |
42 | } | 72 | } |
43 | 73 | ||
44 | +/* | 74 | +static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) |
45 | + * SVE Multiply - Indexed | ||
46 | + */ | ||
47 | + | ||
48 | static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a, | ||
49 | gen_helper_gvec_4 *fn) | ||
50 | { | ||
51 | @@ -XXX,XX +XXX,XX @@ DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h) | ||
52 | |||
53 | #undef DO_RRXR | ||
54 | |||
55 | +static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data, | ||
56 | + gen_helper_gvec_3 *fn) | ||
57 | +{ | 75 | +{ |
58 | + if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { | 76 | + if (!dc_isar_feature(aa64_sve, s)) { |
59 | + return false; | 77 | + return false; |
60 | + } | 78 | + } |
61 | + if (sve_access_check(s)) { | 79 | + /* Prefetch is a nop within QEMU. */ |
62 | + unsigned vsz = vec_full_reg_size(s); | 80 | + s->is_nonstreaming = true; |
63 | + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), | 81 | + (void)sve_access_check(s); |
64 | + vec_full_reg_offset(s, rn), | ||
65 | + vec_full_reg_offset(s, rm), | ||
66 | + vsz, vsz, data, fn); | ||
67 | + } | ||
68 | + return true; | 82 | + return true; |
69 | +} | 83 | +} |
70 | + | 84 | + |
71 | +#define DO_SVE2_RRX(NAME, FUNC) \ | ||
72 | + static bool NAME(DisasContext *s, arg_rrx_esz *a) \ | ||
73 | + { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); } | ||
74 | + | ||
75 | +DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h) | ||
76 | +DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s) | ||
77 | +DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d) | ||
78 | + | ||
79 | +#undef DO_SVE2_RRX | ||
80 | + | ||
81 | /* | 85 | /* |
82 | *** SVE Floating Point Multiply-Add Indexed Group | 86 | * Move Prefix |
83 | */ | 87 | * |
84 | -- | 88 | -- |
85 | 2.20.1 | 89 | 2.25.1 |
86 | |||
87 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-67-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-14-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/cpu.h | 5 +++++ | 11 | target/arm/sme-fa64.decode | 2 -- |
9 | target/arm/helper.h | 4 ++++ | 12 | target/arm/translate-sve.c | 2 ++ |
10 | target/arm/sve.decode | 4 ++++ | 13 | 2 files changed, 2 insertions(+), 2 deletions(-) |
11 | target/arm/translate-sve.c | 16 ++++++++++++++++ | ||
12 | target/arm/vec_helper.c | 2 ++ | ||
13 | 5 files changed, 31 insertions(+) | ||
14 | 14 | ||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
16 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/sme-fa64.decode |
18 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/sme-fa64.decode |
19 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
20 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; | 20 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
21 | } | 21 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) |
22 | 22 | ||
23 | +static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) | 23 | -FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar) |
24 | +{ | 24 | -FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm) |
25 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; | 25 | FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) |
26 | +} | 26 | FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) |
27 | + | ||
28 | static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) | ||
29 | { | ||
30 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; | ||
31 | diff --git a/target/arm/helper.h b/target/arm/helper.h | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/helper.h | ||
34 | +++ b/target/arm/helper.h | ||
35 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG, | ||
36 | void, ptr, ptr, ptr, ptr, i32) | ||
37 | DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG, | ||
38 | void, ptr, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG, | ||
40 | + void, ptr, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG, | ||
42 | + void, ptr, ptr, ptr, ptr, i32) | ||
43 | |||
44 | DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, | ||
45 | void, ptr, ptr, ptr, ptr, i32) | ||
46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/target/arm/sve.decode | ||
49 | +++ b/target/arm/sve.decode | ||
50 | @@ -XXX,XX +XXX,XX @@ SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1 | ||
51 | SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2 | ||
52 | SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3 | ||
53 | |||
54 | +# SVE mixed sign dot product (indexed) | ||
55 | +USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2 | ||
56 | +SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2 | ||
57 | + | ||
58 | # SVE2 saturating multiply-add (indexed) | ||
59 | SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2 | ||
60 | SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3 | ||
61 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
62 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
63 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
64 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
65 | @@ -XXX,XX +XXX,XX @@ DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h) | 31 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) |
66 | DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b) | 32 | if (!dc_isar_feature(aa64_sve, s)) { |
67 | DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h) | 33 | return false; |
68 | 34 | } | |
69 | +static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a) | 35 | + s->is_nonstreaming = true; |
70 | +{ | 36 | if (sve_access_check(s)) { |
71 | + if (!dc_isar_feature(aa64_sve_i8mm, s)) { | 37 | TCGv_i64 addr = new_tmp_a64(s); |
72 | + return false; | 38 | tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); |
73 | + } | 39 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) |
74 | + return do_zzxz_ool(s, a, gen_helper_gvec_sudot_idx_b); | 40 | if (!dc_isar_feature(aa64_sve, s)) { |
75 | +} | 41 | return false; |
76 | + | 42 | } |
77 | +static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a) | 43 | + s->is_nonstreaming = true; |
78 | +{ | 44 | if (sve_access_check(s)) { |
79 | + if (!dc_isar_feature(aa64_sve_i8mm, s)) { | 45 | int vsz = vec_full_reg_size(s); |
80 | + return false; | 46 | int elements = vsz >> dtype_esz[a->dtype]; |
81 | + } | ||
82 | + return do_zzxz_ool(s, a, gen_helper_gvec_usdot_idx_b); | ||
83 | +} | ||
84 | + | ||
85 | #undef DO_RRXR | ||
86 | |||
87 | static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data, | ||
88 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
89 | index XXXXXXX..XXXXXXX 100644 | ||
90 | --- a/target/arm/vec_helper.c | ||
91 | +++ b/target/arm/vec_helper.c | ||
92 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
93 | |||
94 | DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4) | ||
95 | DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4) | ||
96 | +DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4) | ||
97 | +DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4) | ||
98 | DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, ) | ||
99 | DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, ) | ||
100 | |||
101 | -- | 47 | -- |
102 | 2.20.1 | 48 | 2.25.1 |
103 | |||
104 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Mark these as a non-streaming instructions, which should trap | ||
4 | if full a64 support is not enabled in streaming mode. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-55-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-15-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/sve.decode | 8 ++++++++ | 11 | target/arm/sme-fa64.decode | 3 --- |
9 | target/arm/translate-sve.c | 31 +++++++++++++++++++++++++++++++ | 12 | target/arm/translate-sve.c | 2 ++ |
10 | 2 files changed, 39 insertions(+) | 13 | 2 files changed, 2 insertions(+), 3 deletions(-) |
11 | 14 | ||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 15 | diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode |
13 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/sve.decode | 17 | --- a/target/arm/sme-fa64.decode |
15 | +++ b/target/arm/sve.decode | 18 | +++ b/target/arm/sme-fa64.decode |
16 | @@ -XXX,XX +XXX,XX @@ SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 | 19 | @@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS |
17 | UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 | 20 | # --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) |
18 | UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 | 21 | # --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) |
19 | 22 | # --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) | |
20 | +# SVE2 integer multiply-add (indexed) | 23 | - |
21 | +MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1 | 24 | -FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar) |
22 | +MLA_zzxz_s 01000100 10 1 ..... 000010 ..... ..... @rrxr_2 esz=2 | 25 | -FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm) |
23 | +MLA_zzxz_d 01000100 11 1 ..... 000010 ..... ..... @rrxr_1 esz=3 | ||
24 | +MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1 | ||
25 | +MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2 | ||
26 | +MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3 | ||
27 | + | ||
28 | # SVE2 integer multiply (indexed) | ||
29 | MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | ||
30 | MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | ||
31 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
32 | index XXXXXXX..XXXXXXX 100644 | 27 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/target/arm/translate-sve.c | 28 | --- a/target/arm/translate-sve.c |
34 | +++ b/target/arm/translate-sve.c | 29 | +++ b/target/arm/translate-sve.c |
35 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d) | 30 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) |
36 | 31 | if (a->rm == 31) { | |
37 | #undef DO_SVE2_RRX | 32 | return false; |
38 | 33 | } | |
39 | +static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra, | 34 | + s->is_nonstreaming = true; |
40 | + int data, gen_helper_gvec_4 *fn) | 35 | if (sve_access_check(s)) { |
41 | +{ | 36 | TCGv_i64 addr = new_tmp_a64(s); |
42 | + if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { | 37 | tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); |
43 | + return false; | 38 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) |
44 | + } | 39 | if (!dc_isar_feature(aa64_sve_f64mm, s)) { |
45 | + if (sve_access_check(s)) { | 40 | return false; |
46 | + unsigned vsz = vec_full_reg_size(s); | 41 | } |
47 | + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), | 42 | + s->is_nonstreaming = true; |
48 | + vec_full_reg_offset(s, rn), | 43 | if (sve_access_check(s)) { |
49 | + vec_full_reg_offset(s, rm), | 44 | TCGv_i64 addr = new_tmp_a64(s); |
50 | + vec_full_reg_offset(s, ra), | 45 | tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); |
51 | + vsz, vsz, data, fn); | ||
52 | + } | ||
53 | + return true; | ||
54 | +} | ||
55 | + | ||
56 | +#define DO_SVE2_RRXR(NAME, FUNC) \ | ||
57 | + static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ | ||
58 | + { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); } | ||
59 | + | ||
60 | +DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h) | ||
61 | +DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s) | ||
62 | +DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d) | ||
63 | + | ||
64 | +DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h) | ||
65 | +DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s) | ||
66 | +DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d) | ||
67 | + | ||
68 | +#undef DO_SVE2_RRXR | ||
69 | + | ||
70 | /* | ||
71 | *** SVE Floating Point Multiply-Add Indexed Group | ||
72 | */ | ||
73 | -- | 46 | -- |
74 | 2.20.1 | 47 | 2.25.1 |
75 | |||
76 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | In addition, use the same vector generator interface for AdvSIMD. | 3 | These functions will be used to verify that the cpu |
4 | This fixes a bug in which the AdvSIMD insn failed to clear the | 4 | is in the correct state for a given instruction. |
5 | high bits of the SVE register. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-44-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-16-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/helper-sve.h | 4 ++ | 11 | target/arm/translate-a64.h | 21 +++++++++++++++++++++ |
13 | target/arm/helper.h | 2 + | 12 | target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++ |
14 | target/arm/translate-a64.h | 3 ++ | 13 | 2 files changed, 55 insertions(+) |
15 | target/arm/sve.decode | 4 ++ | ||
16 | target/arm/sve_helper.c | 39 ++++++++++++++ | ||
17 | target/arm/translate-a64.c | 25 ++------- | ||
18 | target/arm/translate-sve.c | 104 +++++++++++++++++++++++++++++++++++++ | ||
19 | target/arm/vec_helper.c | 12 +++++ | ||
20 | 8 files changed, 172 insertions(+), 21 deletions(-) | ||
21 | 14 | ||
22 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/target/arm/helper-sve.h | ||
25 | +++ b/target/arm/helper-sve.h | ||
26 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG, | ||
27 | |||
28 | DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | |||
30 | +DEF_HELPER_FLAGS_4(sve2_xar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | + | ||
34 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
35 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
36 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
37 | diff --git a/target/arm/helper.h b/target/arm/helper.h | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/target/arm/helper.h | ||
40 | +++ b/target/arm/helper.h | ||
41 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG, | ||
42 | DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG, | ||
43 | void, ptr, ptr, ptr, ptr, i32) | ||
44 | |||
45 | +DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
46 | + | ||
47 | #ifdef TARGET_AARCH64 | ||
48 | #include "helper-a64.h" | ||
49 | #include "helper-sve.h" | ||
50 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | 15 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
51 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/target/arm/translate-a64.h | 17 | --- a/target/arm/translate-a64.h |
53 | +++ b/target/arm/translate-a64.h | 18 | +++ b/target/arm/translate-a64.h |
54 | @@ -XXX,XX +XXX,XX @@ bool disas_sve(DisasContext *, uint32_t); | 19 | @@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); |
55 | 20 | bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, | |
56 | void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, | 21 | unsigned int imms, unsigned int immr); |
57 | uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); | 22 | bool sve_access_check(DisasContext *s); |
58 | +void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, | 23 | +bool sme_enabled_check(DisasContext *s); |
59 | + uint32_t rm_ofs, int64_t shift, | 24 | +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); |
60 | + uint32_t opr_sz, uint32_t max_sz); | ||
61 | |||
62 | #endif /* TARGET_ARM_TRANSLATE_A64_H */ | ||
63 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/target/arm/sve.decode | ||
66 | +++ b/target/arm/sve.decode | ||
67 | @@ -XXX,XX +XXX,XX @@ | ||
68 | &rr_dbm rd rn dbm | ||
69 | &rrri rd rn rm imm | ||
70 | &rri_esz rd rn imm esz | ||
71 | +&rrri_esz rd rn rm imm esz | ||
72 | &rrr_esz rd rn rm esz | ||
73 | &rpr_esz rd pg rn esz | ||
74 | &rpr_s rd pg rn s | ||
75 | @@ -XXX,XX +XXX,XX @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
76 | EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
77 | BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
78 | |||
79 | +XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \ | ||
80 | + rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr | ||
81 | + | 25 | + |
82 | # SVE2 bitwise ternary operations | 26 | +/* This function corresponds to CheckStreamingSVEEnabled. */ |
83 | EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0 | 27 | +static inline bool sme_sm_enabled_check(DisasContext *s) |
84 | BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 | ||
85 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/target/arm/sve_helper.c | ||
88 | +++ b/target/arm/sve_helper.c | ||
89 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc) | ||
90 | *(uint64_t *)(vd + i + 8) = out1; | ||
91 | } | ||
92 | } | ||
93 | + | ||
94 | +void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
95 | +{ | 28 | +{ |
96 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 29 | + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK); |
97 | + int shr = simd_data(desc); | ||
98 | + int shl = 8 - shr; | ||
99 | + uint64_t mask = dup_const(MO_8, 0xff >> shr); | ||
100 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
101 | + | ||
102 | + for (i = 0; i < opr_sz; ++i) { | ||
103 | + uint64_t t = n[i] ^ m[i]; | ||
104 | + d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask); | ||
105 | + } | ||
106 | +} | 30 | +} |
107 | + | 31 | + |
108 | +void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc) | 32 | +/* This function corresponds to CheckSMEAndZAEnabled. */ |
33 | +static inline bool sme_za_enabled_check(DisasContext *s) | ||
109 | +{ | 34 | +{ |
110 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 35 | + return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK); |
111 | + int shr = simd_data(desc); | ||
112 | + int shl = 16 - shr; | ||
113 | + uint64_t mask = dup_const(MO_16, 0xffff >> shr); | ||
114 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
115 | + | ||
116 | + for (i = 0; i < opr_sz; ++i) { | ||
117 | + uint64_t t = n[i] ^ m[i]; | ||
118 | + d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask); | ||
119 | + } | ||
120 | +} | 36 | +} |
121 | + | 37 | + |
122 | +void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc) | 38 | +/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */ |
39 | +static inline bool sme_smza_enabled_check(DisasContext *s) | ||
123 | +{ | 40 | +{ |
124 | + intptr_t i, opr_sz = simd_oprsz(desc) / 4; | 41 | + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); |
125 | + int shr = simd_data(desc); | 42 | +} |
126 | + uint32_t *d = vd, *n = vn, *m = vm; | ||
127 | + | 43 | + |
128 | + for (i = 0; i < opr_sz; ++i) { | 44 | TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); |
129 | + d[i] = ror32(n[i] ^ m[i], shr); | 45 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, |
130 | + } | 46 | bool tag_checked, int log2_size); |
131 | +} | ||
132 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 47 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
133 | index XXXXXXX..XXXXXXX 100644 | 48 | index XXXXXXX..XXXXXXX 100644 |
134 | --- a/target/arm/translate-a64.c | 49 | --- a/target/arm/translate-a64.c |
135 | +++ b/target/arm/translate-a64.c | 50 | +++ b/target/arm/translate-a64.c |
136 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn) | 51 | @@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s) |
137 | int imm6 = extract32(insn, 10, 6); | 52 | return true; |
138 | int rn = extract32(insn, 5, 5); | ||
139 | int rd = extract32(insn, 0, 5); | ||
140 | - TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; | ||
141 | - int pass; | ||
142 | |||
143 | if (!dc_isar_feature(aa64_sha3, s)) { | ||
144 | unallocated_encoding(s); | ||
145 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn) | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | - tcg_op1 = tcg_temp_new_i64(); | ||
150 | - tcg_op2 = tcg_temp_new_i64(); | ||
151 | - tcg_res[0] = tcg_temp_new_i64(); | ||
152 | - tcg_res[1] = tcg_temp_new_i64(); | ||
153 | - | ||
154 | - for (pass = 0; pass < 2; pass++) { | ||
155 | - read_vec_element(s, tcg_op1, rn, pass, MO_64); | ||
156 | - read_vec_element(s, tcg_op2, rm, pass, MO_64); | ||
157 | - | ||
158 | - tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2); | ||
159 | - tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6); | ||
160 | - } | ||
161 | - write_vec_element(s, tcg_res[0], rd, 0, MO_64); | ||
162 | - write_vec_element(s, tcg_res[1], rd, 1, MO_64); | ||
163 | - | ||
164 | - tcg_temp_free_i64(tcg_op1); | ||
165 | - tcg_temp_free_i64(tcg_op2); | ||
166 | - tcg_temp_free_i64(tcg_res[0]); | ||
167 | - tcg_temp_free_i64(tcg_res[1]); | ||
168 | + gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd), | ||
169 | + vec_full_reg_offset(s, rn), | ||
170 | + vec_full_reg_offset(s, rm), imm6, 16, | ||
171 | + vec_full_reg_size(s)); | ||
172 | } | 53 | } |
173 | 54 | ||
174 | /* Crypto three-reg imm2 | 55 | +/* This function corresponds to CheckSMEEnabled. */ |
175 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 56 | +bool sme_enabled_check(DisasContext *s) |
176 | index XXXXXXX..XXXXXXX 100644 | ||
177 | --- a/target/arm/translate-sve.c | ||
178 | +++ b/target/arm/translate-sve.c | ||
179 | @@ -XXX,XX +XXX,XX @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a) | ||
180 | return do_zzz_fn(s, a, tcg_gen_gvec_andc); | ||
181 | } | ||
182 | |||
183 | +static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) | ||
184 | +{ | 57 | +{ |
185 | + TCGv_i64 t = tcg_temp_new_i64(); | 58 | + /* |
186 | + uint64_t mask = dup_const(MO_8, 0xff >> sh); | 59 | + * Note that unlike sve_excp_el, we have not constrained sme_excp_el |
187 | + | 60 | + * to be zero when fp_excp_el has priority. This is because we need |
188 | + tcg_gen_xor_i64(t, n, m); | 61 | + * sme_excp_el by itself for cpregs access checks. |
189 | + tcg_gen_shri_i64(d, t, sh); | 62 | + */ |
190 | + tcg_gen_shli_i64(t, t, 8 - sh); | 63 | + if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { |
191 | + tcg_gen_andi_i64(d, d, mask); | 64 | + s->fp_access_checked = true; |
192 | + tcg_gen_andi_i64(t, t, ~mask); | 65 | + return sme_access_check(s); |
193 | + tcg_gen_or_i64(d, d, t); | 66 | + } |
194 | + tcg_temp_free_i64(t); | 67 | + return fp_access_check_only(s); |
195 | +} | 68 | +} |
196 | + | 69 | + |
197 | +static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) | 70 | +/* Common subroutine for CheckSMEAnd*Enabled. */ |
71 | +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) | ||
198 | +{ | 72 | +{ |
199 | + TCGv_i64 t = tcg_temp_new_i64(); | 73 | + if (!sme_enabled_check(s)) { |
200 | + uint64_t mask = dup_const(MO_16, 0xffff >> sh); | ||
201 | + | ||
202 | + tcg_gen_xor_i64(t, n, m); | ||
203 | + tcg_gen_shri_i64(d, t, sh); | ||
204 | + tcg_gen_shli_i64(t, t, 16 - sh); | ||
205 | + tcg_gen_andi_i64(d, d, mask); | ||
206 | + tcg_gen_andi_i64(t, t, ~mask); | ||
207 | + tcg_gen_or_i64(d, d, t); | ||
208 | + tcg_temp_free_i64(t); | ||
209 | +} | ||
210 | + | ||
211 | +static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh) | ||
212 | +{ | ||
213 | + tcg_gen_xor_i32(d, n, m); | ||
214 | + tcg_gen_rotri_i32(d, d, sh); | ||
215 | +} | ||
216 | + | ||
217 | +static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) | ||
218 | +{ | ||
219 | + tcg_gen_xor_i64(d, n, m); | ||
220 | + tcg_gen_rotri_i64(d, d, sh); | ||
221 | +} | ||
222 | + | ||
223 | +static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
224 | + TCGv_vec m, int64_t sh) | ||
225 | +{ | ||
226 | + tcg_gen_xor_vec(vece, d, n, m); | ||
227 | + tcg_gen_rotri_vec(vece, d, d, sh); | ||
228 | +} | ||
229 | + | ||
230 | +void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, | ||
231 | + uint32_t rm_ofs, int64_t shift, | ||
232 | + uint32_t opr_sz, uint32_t max_sz) | ||
233 | +{ | ||
234 | + static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 }; | ||
235 | + static const GVecGen3i ops[4] = { | ||
236 | + { .fni8 = gen_xar8_i64, | ||
237 | + .fniv = gen_xar_vec, | ||
238 | + .fno = gen_helper_sve2_xar_b, | ||
239 | + .opt_opc = vecop, | ||
240 | + .vece = MO_8 }, | ||
241 | + { .fni8 = gen_xar16_i64, | ||
242 | + .fniv = gen_xar_vec, | ||
243 | + .fno = gen_helper_sve2_xar_h, | ||
244 | + .opt_opc = vecop, | ||
245 | + .vece = MO_16 }, | ||
246 | + { .fni4 = gen_xar_i32, | ||
247 | + .fniv = gen_xar_vec, | ||
248 | + .fno = gen_helper_sve2_xar_s, | ||
249 | + .opt_opc = vecop, | ||
250 | + .vece = MO_32 }, | ||
251 | + { .fni8 = gen_xar_i64, | ||
252 | + .fniv = gen_xar_vec, | ||
253 | + .fno = gen_helper_gvec_xar_d, | ||
254 | + .opt_opc = vecop, | ||
255 | + .vece = MO_64 } | ||
256 | + }; | ||
257 | + int esize = 8 << vece; | ||
258 | + | ||
259 | + /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */ | ||
260 | + tcg_debug_assert(shift >= 0); | ||
261 | + tcg_debug_assert(shift <= esize); | ||
262 | + shift &= esize - 1; | ||
263 | + | ||
264 | + if (shift == 0) { | ||
265 | + /* xar with no rotate devolves to xor. */ | ||
266 | + tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz); | ||
267 | + } else { | ||
268 | + tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, | ||
269 | + shift, &ops[vece]); | ||
270 | + } | ||
271 | +} | ||
272 | + | ||
273 | +static bool trans_XAR(DisasContext *s, arg_rrri_esz *a) | ||
274 | +{ | ||
275 | + if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { | ||
276 | + return false; | 74 | + return false; |
277 | + } | 75 | + } |
278 | + if (sve_access_check(s)) { | 76 | + if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { |
279 | + unsigned vsz = vec_full_reg_size(s); | 77 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, |
280 | + gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd), | 78 | + syn_smetrap(SME_ET_NotStreaming, false)); |
281 | + vec_full_reg_offset(s, a->rn), | 79 | + return false; |
282 | + vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz); | 80 | + } |
81 | + if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { | ||
82 | + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, | ||
83 | + syn_smetrap(SME_ET_InactiveZA, false)); | ||
84 | + return false; | ||
283 | + } | 85 | + } |
284 | + return true; | 86 | + return true; |
285 | +} | 87 | +} |
286 | + | 88 | + |
287 | static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn) | 89 | /* |
288 | { | 90 | * This utility function is for doing register extension with an |
289 | if (!dc_isar_feature(aa64_sve2, s)) { | 91 | * optional shift. You will likely want to pass a temporary for the |
290 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
291 | index XXXXXXX..XXXXXXX 100644 | ||
292 | --- a/target/arm/vec_helper.c | ||
293 | +++ b/target/arm/vec_helper.c | ||
294 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
295 | } | ||
296 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
297 | } | ||
298 | + | ||
299 | +void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
300 | +{ | ||
301 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | ||
302 | + int shr = simd_data(desc); | ||
303 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
304 | + | ||
305 | + for (i = 0; i < opr_sz; ++i) { | ||
306 | + d[i] = ror64(n[i] ^ m[i], shr); | ||
307 | + } | ||
308 | + clear_tail(d, opr_sz * 8, simd_maxsz(desc)); | ||
309 | +} | ||
310 | -- | 92 | -- |
311 | 2.20.1 | 93 | 2.25.1 |
312 | |||
313 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For SVE, we potentially have a 4th argument coming from the | 3 | The pseudocode for CheckSVEEnabled gains a check for Streaming |
4 | movprfx instruction. Currently we do not optimize movprfx, | 4 | SVE mode, and for SME present but SVE absent. |
5 | so the problem is not visible. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-51-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-17-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/helper.h | 20 +++++++-------- | 11 | target/arm/translate-a64.c | 22 ++++++++++++++++------ |
13 | target/arm/translate-a64.c | 28 +++++++++++++++++---- | 12 | 1 file changed, 16 insertions(+), 6 deletions(-) |
14 | target/arm/translate-neon.c | 10 +++++--- | ||
15 | target/arm/translate-sve.c | 5 ++-- | ||
16 | target/arm/vec_helper.c | 50 +++++++++++++++---------------------- | ||
17 | 5 files changed, 62 insertions(+), 51 deletions(-) | ||
18 | 13 | ||
19 | diff --git a/target/arm/helper.h b/target/arm/helper.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/target/arm/helper.h | ||
22 | +++ b/target/arm/helper.h | ||
23 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG, | ||
24 | DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG, | ||
25 | void, ptr, ptr, ptr, ptr, i32) | ||
26 | |||
27 | -DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG, | ||
28 | - void, ptr, ptr, ptr, ptr, i32) | ||
29 | -DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG, | ||
30 | - void, ptr, ptr, ptr, ptr, i32) | ||
31 | -DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG, | ||
32 | - void, ptr, ptr, ptr, ptr, i32) | ||
33 | -DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG, | ||
34 | - void, ptr, ptr, ptr, ptr, i32) | ||
35 | -DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG, | ||
36 | - void, ptr, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG, | ||
40 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG, | ||
42 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
43 | +DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG, | ||
44 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
45 | +DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG, | ||
46 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
47 | |||
48 | DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
49 | DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
50 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 14 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
51 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/target/arm/translate-a64.c | 16 | --- a/target/arm/translate-a64.c |
53 | +++ b/target/arm/translate-a64.c | 17 | +++ b/target/arm/translate-a64.c |
54 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn, | 18 | @@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s) |
55 | is_q ? 16 : 8, vec_full_reg_size(s), data, fn); | 19 | return true; |
56 | } | 20 | } |
57 | 21 | ||
22 | -/* Check that SVE access is enabled. If it is, return true. | ||
58 | +/* | 23 | +/* |
59 | + * Expand a 4-operand + fpstatus pointer + simd data value operation using | 24 | + * Check that SVE access is enabled. If it is, return true. |
60 | + * an out-of-line helper. | 25 | * If not, emit code to generate an appropriate exception and return false. |
61 | + */ | 26 | + * This function corresponds to CheckSVEEnabled(). |
62 | +static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, | 27 | */ |
63 | + int rm, int ra, bool is_fp16, int data, | 28 | bool sve_access_check(DisasContext *s) |
64 | + gen_helper_gvec_4_ptr *fn) | 29 | { |
65 | +{ | 30 | - if (s->sve_excp_el) { |
66 | + TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); | 31 | - assert(!s->sve_access_checked); |
67 | + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), | 32 | - s->sve_access_checked = true; |
68 | + vec_full_reg_offset(s, rn), | 33 | - |
69 | + vec_full_reg_offset(s, rm), | 34 | + if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { |
70 | + vec_full_reg_offset(s, ra), fpst, | 35 | + assert(dc_isar_feature(aa64_sme, s)); |
71 | + is_q ? 16 : 8, vec_full_reg_size(s), data, fn); | 36 | + if (!sme_sm_enabled_check(s)) { |
72 | + tcg_temp_free_ptr(fpst); | 37 | + goto fail_exit; |
73 | +} | 38 | + } |
39 | + } else if (s->sve_excp_el) { | ||
40 | gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF, | ||
41 | syn_sve_access_trap(), s->sve_excp_el); | ||
42 | - return false; | ||
43 | + goto fail_exit; | ||
44 | } | ||
45 | s->sve_access_checked = true; | ||
46 | return fp_access_check(s); | ||
74 | + | 47 | + |
75 | /* Set ZF and NF based on a 64 bit result. This is alas fiddlier | 48 | + fail_exit: |
76 | * than the 32 bit equivalent. | 49 | + /* Assert that we only raise one exception per instruction. */ |
77 | */ | 50 | + assert(!s->sve_access_checked); |
78 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | 51 | + s->sve_access_checked = true; |
79 | rot = extract32(opcode, 0, 2); | 52 | + return false; |
80 | switch (size) { | ||
81 | case 1: | ||
82 | - gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot, | ||
83 | + gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot, | ||
84 | gen_helper_gvec_fcmlah); | ||
85 | break; | ||
86 | case 2: | ||
87 | - gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, | ||
88 | + gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, | ||
89 | gen_helper_gvec_fcmlas); | ||
90 | break; | ||
91 | case 3: | ||
92 | - gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, | ||
93 | + gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, | ||
94 | gen_helper_gvec_fcmlad); | ||
95 | break; | ||
96 | default: | ||
97 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
98 | { | ||
99 | int rot = extract32(insn, 13, 2); | ||
100 | int data = (index << 2) | rot; | ||
101 | - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), | ||
102 | + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), | ||
103 | vec_full_reg_offset(s, rn), | ||
104 | - vec_full_reg_offset(s, rm), fpst, | ||
105 | + vec_full_reg_offset(s, rm), | ||
106 | + vec_full_reg_offset(s, rd), fpst, | ||
107 | is_q ? 16 : 8, vec_full_reg_size(s), data, | ||
108 | size == MO_64 | ||
109 | ? gen_helper_gvec_fcmlas_idx | ||
110 | diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c | ||
111 | index XXXXXXX..XXXXXXX 100644 | ||
112 | --- a/target/arm/translate-neon.c | ||
113 | +++ b/target/arm/translate-neon.c | ||
114 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a) | ||
115 | { | ||
116 | int opr_sz; | ||
117 | TCGv_ptr fpst; | ||
118 | - gen_helper_gvec_3_ptr *fn_gvec_ptr; | ||
119 | + gen_helper_gvec_4_ptr *fn_gvec_ptr; | ||
120 | |||
121 | if (!dc_isar_feature(aa32_vcma, s) | ||
122 | || (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) { | ||
123 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a) | ||
124 | fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD); | ||
125 | fn_gvec_ptr = (a->size == MO_16) ? | ||
126 | gen_helper_gvec_fcmlah : gen_helper_gvec_fcmlas; | ||
127 | - tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd), | ||
128 | + tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd), | ||
129 | vfp_reg_offset(1, a->vn), | ||
130 | vfp_reg_offset(1, a->vm), | ||
131 | + vfp_reg_offset(1, a->vd), | ||
132 | fpst, opr_sz, opr_sz, a->rot, | ||
133 | fn_gvec_ptr); | ||
134 | tcg_temp_free_ptr(fpst); | ||
135 | @@ -XXX,XX +XXX,XX @@ static bool trans_VFML(DisasContext *s, arg_VFML *a) | ||
136 | |||
137 | static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a) | ||
138 | { | ||
139 | - gen_helper_gvec_3_ptr *fn_gvec_ptr; | ||
140 | + gen_helper_gvec_4_ptr *fn_gvec_ptr; | ||
141 | int opr_sz; | ||
142 | TCGv_ptr fpst; | ||
143 | |||
144 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a) | ||
145 | gen_helper_gvec_fcmlah_idx : gen_helper_gvec_fcmlas_idx; | ||
146 | opr_sz = (1 + a->q) * 8; | ||
147 | fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD); | ||
148 | - tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd), | ||
149 | + tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd), | ||
150 | vfp_reg_offset(1, a->vn), | ||
151 | vfp_reg_offset(1, a->vm), | ||
152 | + vfp_reg_offset(1, a->vd), | ||
153 | fpst, opr_sz, opr_sz, | ||
154 | (a->index << 2) | a->rot, fn_gvec_ptr); | ||
155 | tcg_temp_free_ptr(fpst); | ||
156 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
157 | index XXXXXXX..XXXXXXX 100644 | ||
158 | --- a/target/arm/translate-sve.c | ||
159 | +++ b/target/arm/translate-sve.c | ||
160 | @@ -XXX,XX +XXX,XX @@ static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a) | ||
161 | |||
162 | static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a) | ||
163 | { | ||
164 | - static gen_helper_gvec_3_ptr * const fns[2] = { | ||
165 | + static gen_helper_gvec_4_ptr * const fns[2] = { | ||
166 | gen_helper_gvec_fcmlah_idx, | ||
167 | gen_helper_gvec_fcmlas_idx, | ||
168 | }; | ||
169 | @@ -XXX,XX +XXX,XX @@ static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a) | ||
170 | if (sve_access_check(s)) { | ||
171 | unsigned vsz = vec_full_reg_size(s); | ||
172 | TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); | ||
173 | - tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), | ||
174 | + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), | ||
175 | vec_full_reg_offset(s, a->rn), | ||
176 | vec_full_reg_offset(s, a->rm), | ||
177 | + vec_full_reg_offset(s, a->ra), | ||
178 | status, vsz, vsz, | ||
179 | a->index * 4 + a->rot, | ||
180 | fns[a->esz - 1]); | ||
181 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
182 | index XXXXXXX..XXXXXXX 100644 | ||
183 | --- a/target/arm/vec_helper.c | ||
184 | +++ b/target/arm/vec_helper.c | ||
185 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, | ||
186 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
187 | } | 53 | } |
188 | 54 | ||
189 | -void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, | 55 | /* |
190 | +void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va, | ||
191 | void *vfpst, uint32_t desc) | ||
192 | { | ||
193 | uintptr_t opr_sz = simd_oprsz(desc); | ||
194 | - float16 *d = vd; | ||
195 | - float16 *n = vn; | ||
196 | - float16 *m = vm; | ||
197 | + float16 *d = vd, *n = vn, *m = vm, *a = va; | ||
198 | float_status *fpst = vfpst; | ||
199 | intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
200 | uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
201 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, | ||
202 | float16 e4 = e2; | ||
203 | float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; | ||
204 | |||
205 | - d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); | ||
206 | - d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); | ||
207 | + d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst); | ||
208 | + d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst); | ||
209 | } | ||
210 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
211 | } | ||
212 | |||
213 | -void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, | ||
214 | +void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va, | ||
215 | void *vfpst, uint32_t desc) | ||
216 | { | ||
217 | uintptr_t opr_sz = simd_oprsz(desc); | ||
218 | - float16 *d = vd; | ||
219 | - float16 *n = vn; | ||
220 | - float16 *m = vm; | ||
221 | + float16 *d = vd, *n = vn, *m = vm, *a = va; | ||
222 | float_status *fpst = vfpst; | ||
223 | intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
224 | uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
225 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, | ||
226 | float16 e2 = n[H2(j + flip)]; | ||
227 | float16 e4 = e2; | ||
228 | |||
229 | - d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); | ||
230 | - d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); | ||
231 | + d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst); | ||
232 | + d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst); | ||
233 | } | ||
234 | } | ||
235 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
236 | } | ||
237 | |||
238 | -void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, | ||
239 | +void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va, | ||
240 | void *vfpst, uint32_t desc) | ||
241 | { | ||
242 | uintptr_t opr_sz = simd_oprsz(desc); | ||
243 | - float32 *d = vd; | ||
244 | - float32 *n = vn; | ||
245 | - float32 *m = vm; | ||
246 | + float32 *d = vd, *n = vn, *m = vm, *a = va; | ||
247 | float_status *fpst = vfpst; | ||
248 | intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
249 | uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
250 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, | ||
251 | float32 e4 = e2; | ||
252 | float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; | ||
253 | |||
254 | - d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); | ||
255 | - d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); | ||
256 | + d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst); | ||
257 | + d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst); | ||
258 | } | ||
259 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
260 | } | ||
261 | |||
262 | -void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, | ||
263 | +void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va, | ||
264 | void *vfpst, uint32_t desc) | ||
265 | { | ||
266 | uintptr_t opr_sz = simd_oprsz(desc); | ||
267 | - float32 *d = vd; | ||
268 | - float32 *n = vn; | ||
269 | - float32 *m = vm; | ||
270 | + float32 *d = vd, *n = vn, *m = vm, *a = va; | ||
271 | float_status *fpst = vfpst; | ||
272 | intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
273 | uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
274 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, | ||
275 | float32 e2 = n[H4(j + flip)]; | ||
276 | float32 e4 = e2; | ||
277 | |||
278 | - d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); | ||
279 | - d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); | ||
280 | + d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst); | ||
281 | + d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst); | ||
282 | } | ||
283 | } | ||
284 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
285 | } | ||
286 | |||
287 | -void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, | ||
288 | +void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va, | ||
289 | void *vfpst, uint32_t desc) | ||
290 | { | ||
291 | uintptr_t opr_sz = simd_oprsz(desc); | ||
292 | - float64 *d = vd; | ||
293 | - float64 *n = vn; | ||
294 | - float64 *m = vm; | ||
295 | + float64 *d = vd, *n = vn, *m = vm, *a = va; | ||
296 | float_status *fpst = vfpst; | ||
297 | intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
298 | uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
299 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, | ||
300 | float64 e4 = e2; | ||
301 | float64 e3 = m[i + 1 - flip] ^ neg_imag; | ||
302 | |||
303 | - d[i] = float64_muladd(e2, e1, d[i], 0, fpst); | ||
304 | - d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); | ||
305 | + d[i] = float64_muladd(e2, e1, a[i], 0, fpst); | ||
306 | + d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst); | ||
307 | } | ||
308 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
309 | } | ||
310 | -- | 56 | -- |
311 | 2.20.1 | 57 | 2.25.1 |
312 | |||
313 | diff view generated by jsdifflib |
1 | From: Stephen Long <steplong@quicinc.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | These SME instructions are nominally within the SVE decode space, | ||
4 | so we add them to sve.decode and translate-sve.c. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20210525010358.152808-48-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-18-richard.henderson@linaro.org |
7 | Message-Id: <20200423180347.9403-1-steplong@quicinc.com> | ||
8 | [rth: Rename the trans_* functions to *_sve2.] | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/sve.decode | 11 +++++++++-- | 11 | target/arm/translate-a64.h | 12 ++++++++++++ |
13 | target/arm/translate-sve.c | 35 ++++++++++++++++++++++++++++++----- | 12 | target/arm/sve.decode | 5 ++++- |
14 | 2 files changed, 39 insertions(+), 7 deletions(-) | 13 | target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++ |
14 | 3 files changed, 54 insertions(+), 1 deletion(-) | ||
15 | 15 | ||
16 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/target/arm/translate-a64.h | ||
19 | +++ b/target/arm/translate-a64.h | ||
20 | @@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s) | ||
21 | return s->vl; | ||
22 | } | ||
23 | |||
24 | +/* Return the byte size of the vector register, SVL / 8. */ | ||
25 | +static inline int streaming_vec_reg_size(DisasContext *s) | ||
26 | +{ | ||
27 | + return s->svl; | ||
28 | +} | ||
29 | + | ||
30 | /* | ||
31 | * Return the offset info CPUARMState of the predicate vector register Pn. | ||
32 | * Note for this purpose, FFR is P16. | ||
33 | @@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s) | ||
34 | return s->vl >> 3; | ||
35 | } | ||
36 | |||
37 | +/* Return the byte size of the predicate register, SVL / 64. */ | ||
38 | +static inline int streaming_pred_reg_size(DisasContext *s) | ||
39 | +{ | ||
40 | + return s->svl >> 3; | ||
41 | +} | ||
42 | + | ||
43 | /* | ||
44 | * Round up the size of a register to a size allowed by | ||
45 | * the tcg vector infrastructure. Any operation which uses this | ||
16 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
17 | index XXXXXXX..XXXXXXX 100644 | 47 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/sve.decode | 48 | --- a/target/arm/sve.decode |
19 | +++ b/target/arm/sve.decode | 49 | +++ b/target/arm/sve.decode |
20 | @@ -XXX,XX +XXX,XX @@ CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s | 50 | @@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5 |
21 | 51 | # SVE index generation (register start, register increment) | |
22 | ### SVE Permute - Extract Group | 52 | INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm |
23 | 53 | ||
24 | -# SVE extract vector (immediate offset) | 54 | -### SVE Stack Allocation Group |
25 | +# SVE extract vector (destructive) | 55 | +### SVE / Streaming SVE Stack Allocation Group |
26 | EXT 00000101 001 ..... 000 ... rm:5 rd:5 \ | 56 | |
27 | &rrri rn=%reg_movprfx imm=%imm8_16_10 | 57 | # SVE stack frame adjustment |
28 | 58 | ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6 | |
29 | +# SVE2 extract vector (constructive) | 59 | +ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6 |
30 | +EXT_sve2 00000101 011 ..... 000 ... rn:5 rd:5 \ | 60 | ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6 |
31 | + &rri imm=%imm8_16_10 | 61 | +ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6 |
32 | + | 62 | |
33 | ### SVE Permute - Unpredicated Group | 63 | # SVE stack frame size |
34 | 64 | RDVL 00000100 101 11111 01010 imm:s6 rd:5 | |
35 | # SVE broadcast general register | 65 | +RDSVL 00000100 101 11111 01011 imm:s6 rd:5 |
36 | @@ -XXX,XX +XXX,XX @@ REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn | 66 | |
37 | REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn | 67 | ### SVE Bitwise Shift - Unpredicated Group |
38 | RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn | 68 | |
39 | |||
40 | -# SVE vector splice (predicated) | ||
41 | +# SVE vector splice (predicated, destructive) | ||
42 | SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm | ||
43 | |||
44 | +# SVE2 vector splice (predicated, constructive) | ||
45 | +SPLICE_sve2 00000101 .. 101 101 100 ... ..... ..... @rd_pg_rn | ||
46 | + | ||
47 | ### SVE Select Vectors Group | ||
48 | |||
49 | # SVE select vector elements (predicated) | ||
50 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 69 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
51 | index XXXXXXX..XXXXXXX 100644 | 70 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/target/arm/translate-sve.c | 71 | --- a/target/arm/translate-sve.c |
53 | +++ b/target/arm/translate-sve.c | 72 | +++ b/target/arm/translate-sve.c |
54 | @@ -XXX,XX +XXX,XX @@ static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) | 73 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) |
55 | *** SVE Permute Extract Group | ||
56 | */ | ||
57 | |||
58 | -static bool trans_EXT(DisasContext *s, arg_EXT *a) | ||
59 | +static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) | ||
60 | { | ||
61 | if (!sve_access_check(s)) { | ||
62 | return true; | ||
63 | } | ||
64 | |||
65 | unsigned vsz = vec_full_reg_size(s); | ||
66 | - unsigned n_ofs = a->imm >= vsz ? 0 : a->imm; | ||
67 | + unsigned n_ofs = imm >= vsz ? 0 : imm; | ||
68 | unsigned n_siz = vsz - n_ofs; | ||
69 | - unsigned d = vec_full_reg_offset(s, a->rd); | ||
70 | - unsigned n = vec_full_reg_offset(s, a->rn); | ||
71 | - unsigned m = vec_full_reg_offset(s, a->rm); | ||
72 | + unsigned d = vec_full_reg_offset(s, rd); | ||
73 | + unsigned n = vec_full_reg_offset(s, rn); | ||
74 | + unsigned m = vec_full_reg_offset(s, rm); | ||
75 | |||
76 | /* Use host vector move insns if we have appropriate sizes | ||
77 | * and no unfortunate overlap. | ||
78 | @@ -XXX,XX +XXX,XX @@ static bool trans_EXT(DisasContext *s, arg_EXT *a) | ||
79 | return true; | 74 | return true; |
80 | } | 75 | } |
81 | 76 | ||
82 | +static bool trans_EXT(DisasContext *s, arg_EXT *a) | 77 | +static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) |
83 | +{ | 78 | +{ |
84 | + return do_EXT(s, a->rd, a->rn, a->rm, a->imm); | 79 | + if (!dc_isar_feature(aa64_sme, s)) { |
80 | + return false; | ||
81 | + } | ||
82 | + if (sme_enabled_check(s)) { | ||
83 | + TCGv_i64 rd = cpu_reg_sp(s, a->rd); | ||
84 | + TCGv_i64 rn = cpu_reg_sp(s, a->rn); | ||
85 | + tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); | ||
86 | + } | ||
87 | + return true; | ||
85 | +} | 88 | +} |
86 | + | 89 | + |
87 | +static bool trans_EXT_sve2(DisasContext *s, arg_rri *a) | 90 | static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) |
91 | { | ||
92 | if (!dc_isar_feature(aa64_sve, s)) { | ||
93 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) | ||
94 | return true; | ||
95 | } | ||
96 | |||
97 | +static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) | ||
88 | +{ | 98 | +{ |
89 | + if (!dc_isar_feature(aa64_sve2, s)) { | 99 | + if (!dc_isar_feature(aa64_sme, s)) { |
90 | + return false; | 100 | + return false; |
91 | + } | 101 | + } |
92 | + return do_EXT(s, a->rd, a->rn, (a->rn + 1) % 32, a->imm); | 102 | + if (sme_enabled_check(s)) { |
103 | + TCGv_i64 rd = cpu_reg_sp(s, a->rd); | ||
104 | + TCGv_i64 rn = cpu_reg_sp(s, a->rn); | ||
105 | + tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); | ||
106 | + } | ||
107 | + return true; | ||
93 | +} | 108 | +} |
94 | + | 109 | + |
95 | /* | 110 | static bool trans_RDVL(DisasContext *s, arg_RDVL *a) |
96 | *** SVE Permute - Unpredicated Group | 111 | { |
97 | */ | 112 | if (!dc_isar_feature(aa64_sve, s)) { |
98 | @@ -XXX,XX +XXX,XX @@ static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a) | 113 | @@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a) |
99 | return true; | 114 | return true; |
100 | } | 115 | } |
101 | 116 | ||
102 | +static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a) | 117 | +static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) |
103 | +{ | 118 | +{ |
104 | + if (!dc_isar_feature(aa64_sve2, s)) { | 119 | + if (!dc_isar_feature(aa64_sme, s)) { |
105 | + return false; | 120 | + return false; |
106 | + } | 121 | + } |
107 | + if (sve_access_check(s)) { | 122 | + if (sme_enabled_check(s)) { |
108 | + gen_gvec_ool_zzzp(s, gen_helper_sve_splice, | 123 | + TCGv_i64 reg = cpu_reg(s, a->rd); |
109 | + a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz); | 124 | + tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); |
110 | + } | 125 | + } |
111 | + return true; | 126 | + return true; |
112 | +} | 127 | +} |
113 | + | 128 | + |
114 | /* | 129 | /* |
115 | *** SVE Integer Compare - Vectors Group | 130 | *** SVE Compute Vector Address Group |
116 | */ | 131 | */ |
117 | -- | 132 | -- |
118 | 2.20.1 | 133 | 2.25.1 |
119 | |||
120 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-16-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-19-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 8 ++ | 8 | target/arm/helper-sme.h | 2 ++ |
9 | target/arm/sve.decode | 8 ++ | 9 | target/arm/sme.decode | 4 ++++ |
10 | target/arm/sve_helper.c | 22 +++++ | 10 | target/arm/sme_helper.c | 25 +++++++++++++++++++++++++ |
11 | target/arm/translate-sve.c | 159 +++++++++++++++++++++++++++++++++++++ | 11 | target/arm/translate-sme.c | 13 +++++++++++++ |
12 | 4 files changed, 197 insertions(+) | 12 | 4 files changed, 44 insertions(+) |
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/target/arm/helper-sme.h |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/target/arm/helper-sme.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 18 | @@ -XXX,XX +XXX,XX @@ |
19 | 19 | ||
20 | DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 20 | DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) |
21 | DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 21 | DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) |
22 | + | 22 | + |
23 | +DEF_HELPER_FLAGS_3(sve2_sshll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 23 | +DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) |
24 | +DEF_HELPER_FLAGS_3(sve2_sshll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 24 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
25 | +DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 25 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/arm/sme.decode | ||
27 | +++ b/target/arm/sme.decode | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | # | ||
30 | # This file is processed by scripts/decodetree.py | ||
31 | # | ||
26 | + | 32 | + |
27 | +DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 33 | +### SME Misc |
28 | +DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 34 | + |
29 | +DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 35 | +ZERO 11000000 00 001 00000000000 imm:8 |
30 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 36 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c |
31 | index XXXXXXX..XXXXXXX 100644 | 37 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/target/arm/sve.decode | 38 | --- a/target/arm/sme_helper.c |
33 | +++ b/target/arm/sve.decode | 39 | +++ b/target/arm/sme_helper.c |
34 | @@ -XXX,XX +XXX,XX @@ SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm | 40 | @@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i) |
35 | SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm | 41 | memset(env->zarray, 0, sizeof(env->zarray)); |
36 | UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm | 42 | } |
37 | UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm | 43 | } |
38 | + | 44 | + |
39 | +## SVE2 bitwise shift left long | 45 | +void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) |
46 | +{ | ||
47 | + uint32_t i; | ||
40 | + | 48 | + |
41 | +# Note bit23 == 0 is handled by esz > 0 in do_sve2_shll_tb. | 49 | + /* |
42 | +SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl | 50 | + * Special case clearing the entire ZA space. |
43 | +SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl | 51 | + * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any |
44 | +USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl | 52 | + * parts of the ZA storage outside of SVL. |
45 | +USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl | 53 | + */ |
46 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 54 | + if (imm == 0xff) { |
47 | index XXXXXXX..XXXXXXX 100644 | 55 | + memset(env->zarray, 0, sizeof(env->zarray)); |
48 | --- a/target/arm/sve_helper.c | 56 | + return; |
49 | +++ b/target/arm/sve_helper.c | 57 | + } |
50 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB) | ||
51 | |||
52 | #undef DO_ZZZ_WTB | ||
53 | |||
54 | +#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \ | ||
55 | +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
56 | +{ \ | ||
57 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
58 | + intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \ | ||
59 | + int shift = simd_data(desc) >> 1; \ | ||
60 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
61 | + TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \ | ||
62 | + *(TYPEW *)(vd + HW(i)) = nn << shift; \ | ||
63 | + } \ | ||
64 | +} | ||
65 | + | 58 | + |
66 | +DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1) | 59 | + /* |
67 | +DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2) | 60 | + * Recall that ZAnH.D[m] is spread across ZA[n+8*m], |
68 | +DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, , H1_4) | 61 | + * so each row is discontiguous within ZA[]. |
69 | + | 62 | + */ |
70 | +DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1) | 63 | + for (i = 0; i < svl; i++) { |
71 | +DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2) | 64 | + if (imm & (1 << (i % 8))) { |
72 | +DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, , H1_4) | 65 | + memset(&env->zarray[i], 0, svl); |
73 | + | ||
74 | +#undef DO_ZZI_SHLL | ||
75 | + | ||
76 | /* Two-operand reduction expander, controlled by a predicate. | ||
77 | * The difference between TYPERED and TYPERET has to do with | ||
78 | * sign-extension. E.g. for SMAX, TYPERED must be signed, | ||
79 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/translate-sve.c | ||
82 | +++ b/target/arm/translate-sve.c | ||
83 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false) | ||
84 | DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true) | ||
85 | DO_SVE2_ZZZ_WTB(USUBWB, usubw, false) | ||
86 | DO_SVE2_ZZZ_WTB(USUBWT, usubw, true) | ||
87 | + | ||
88 | +static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) | ||
89 | +{ | ||
90 | + int top = imm & 1; | ||
91 | + int shl = imm >> 1; | ||
92 | + int halfbits = 4 << vece; | ||
93 | + | ||
94 | + if (top) { | ||
95 | + if (shl == halfbits) { | ||
96 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
97 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); | ||
98 | + tcg_gen_and_vec(vece, d, n, t); | ||
99 | + tcg_temp_free_vec(t); | ||
100 | + } else { | ||
101 | + tcg_gen_sari_vec(vece, d, n, halfbits); | ||
102 | + tcg_gen_shli_vec(vece, d, d, shl); | ||
103 | + } | ||
104 | + } else { | ||
105 | + tcg_gen_shli_vec(vece, d, n, halfbits); | ||
106 | + tcg_gen_sari_vec(vece, d, d, halfbits - shl); | ||
107 | + } | ||
108 | +} | ||
109 | + | ||
110 | +static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm) | ||
111 | +{ | ||
112 | + int halfbits = 4 << vece; | ||
113 | + int top = imm & 1; | ||
114 | + int shl = (imm >> 1); | ||
115 | + int shift; | ||
116 | + uint64_t mask; | ||
117 | + | ||
118 | + mask = MAKE_64BIT_MASK(0, halfbits); | ||
119 | + mask <<= shl; | ||
120 | + mask = dup_const(vece, mask); | ||
121 | + | ||
122 | + shift = shl - top * halfbits; | ||
123 | + if (shift < 0) { | ||
124 | + tcg_gen_shri_i64(d, n, -shift); | ||
125 | + } else { | ||
126 | + tcg_gen_shli_i64(d, n, shift); | ||
127 | + } | ||
128 | + tcg_gen_andi_i64(d, d, mask); | ||
129 | +} | ||
130 | + | ||
131 | +static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) | ||
132 | +{ | ||
133 | + gen_ushll_i64(MO_16, d, n, imm); | ||
134 | +} | ||
135 | + | ||
136 | +static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) | ||
137 | +{ | ||
138 | + gen_ushll_i64(MO_32, d, n, imm); | ||
139 | +} | ||
140 | + | ||
141 | +static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) | ||
142 | +{ | ||
143 | + gen_ushll_i64(MO_64, d, n, imm); | ||
144 | +} | ||
145 | + | ||
146 | +static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) | ||
147 | +{ | ||
148 | + int halfbits = 4 << vece; | ||
149 | + int top = imm & 1; | ||
150 | + int shl = imm >> 1; | ||
151 | + | ||
152 | + if (top) { | ||
153 | + if (shl == halfbits) { | ||
154 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
155 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); | ||
156 | + tcg_gen_and_vec(vece, d, n, t); | ||
157 | + tcg_temp_free_vec(t); | ||
158 | + } else { | ||
159 | + tcg_gen_shri_vec(vece, d, n, halfbits); | ||
160 | + tcg_gen_shli_vec(vece, d, d, shl); | ||
161 | + } | ||
162 | + } else { | ||
163 | + if (shl == 0) { | ||
164 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
165 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
166 | + tcg_gen_and_vec(vece, d, n, t); | ||
167 | + tcg_temp_free_vec(t); | ||
168 | + } else { | ||
169 | + tcg_gen_shli_vec(vece, d, n, halfbits); | ||
170 | + tcg_gen_shri_vec(vece, d, d, halfbits - shl); | ||
171 | + } | 66 | + } |
172 | + } | 67 | + } |
173 | +} | 68 | +} |
69 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/translate-sme.c | ||
72 | +++ b/target/arm/translate-sme.c | ||
73 | @@ -XXX,XX +XXX,XX @@ | ||
74 | */ | ||
75 | |||
76 | #include "decode-sme.c.inc" | ||
174 | + | 77 | + |
175 | +static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a, | 78 | + |
176 | + bool sel, bool uns) | 79 | +static bool trans_ZERO(DisasContext *s, arg_ZERO *a) |
177 | +{ | 80 | +{ |
178 | + static const TCGOpcode sshll_list[] = { | 81 | + if (!dc_isar_feature(aa64_sme, s)) { |
179 | + INDEX_op_shli_vec, INDEX_op_sari_vec, 0 | ||
180 | + }; | ||
181 | + static const TCGOpcode ushll_list[] = { | ||
182 | + INDEX_op_shli_vec, INDEX_op_shri_vec, 0 | ||
183 | + }; | ||
184 | + static const GVecGen2i ops[2][3] = { | ||
185 | + { { .fniv = gen_sshll_vec, | ||
186 | + .opt_opc = sshll_list, | ||
187 | + .fno = gen_helper_sve2_sshll_h, | ||
188 | + .vece = MO_16 }, | ||
189 | + { .fniv = gen_sshll_vec, | ||
190 | + .opt_opc = sshll_list, | ||
191 | + .fno = gen_helper_sve2_sshll_s, | ||
192 | + .vece = MO_32 }, | ||
193 | + { .fniv = gen_sshll_vec, | ||
194 | + .opt_opc = sshll_list, | ||
195 | + .fno = gen_helper_sve2_sshll_d, | ||
196 | + .vece = MO_64 } }, | ||
197 | + { { .fni8 = gen_ushll16_i64, | ||
198 | + .fniv = gen_ushll_vec, | ||
199 | + .opt_opc = ushll_list, | ||
200 | + .fno = gen_helper_sve2_ushll_h, | ||
201 | + .vece = MO_16 }, | ||
202 | + { .fni8 = gen_ushll32_i64, | ||
203 | + .fniv = gen_ushll_vec, | ||
204 | + .opt_opc = ushll_list, | ||
205 | + .fno = gen_helper_sve2_ushll_s, | ||
206 | + .vece = MO_32 }, | ||
207 | + { .fni8 = gen_ushll64_i64, | ||
208 | + .fniv = gen_ushll_vec, | ||
209 | + .opt_opc = ushll_list, | ||
210 | + .fno = gen_helper_sve2_ushll_d, | ||
211 | + .vece = MO_64 } }, | ||
212 | + }; | ||
213 | + | ||
214 | + if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) { | ||
215 | + return false; | 82 | + return false; |
216 | + } | 83 | + } |
217 | + if (sve_access_check(s)) { | 84 | + if (sme_za_enabled_check(s)) { |
218 | + unsigned vsz = vec_full_reg_size(s); | 85 | + gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm), |
219 | + tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), | 86 | + tcg_constant_i32(streaming_vec_reg_size(s))); |
220 | + vec_full_reg_offset(s, a->rn), | ||
221 | + vsz, vsz, (a->imm << 1) | sel, | ||
222 | + &ops[uns][a->esz]); | ||
223 | + } | 87 | + } |
224 | + return true; | 88 | + return true; |
225 | +} | 89 | +} |
226 | + | ||
227 | +static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a) | ||
228 | +{ | ||
229 | + return do_sve2_shll_tb(s, a, false, false); | ||
230 | +} | ||
231 | + | ||
232 | +static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a) | ||
233 | +{ | ||
234 | + return do_sve2_shll_tb(s, a, true, false); | ||
235 | +} | ||
236 | + | ||
237 | +static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a) | ||
238 | +{ | ||
239 | + return do_sve2_shll_tb(s, a, false, true); | ||
240 | +} | ||
241 | + | ||
242 | +static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a) | ||
243 | +{ | ||
244 | + return do_sve2_shll_tb(s, a, true, true); | ||
245 | +} | ||
246 | -- | 90 | -- |
247 | 2.20.1 | 91 | 2.25.1 |
248 | |||
249 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We can reuse the SVE functions for implementing moves to/from | ||
4 | horizontal tile slices, but we need new ones for moves to/from | ||
5 | vertical tile slices. | ||
6 | |||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20210525010358.152808-73-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-20-richard.henderson@linaro.org |
7 | Message-Id: <20200428174332.17162-2-steplong@quicinc.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 11 | --- |
11 | target/arm/helper-sve.h | 5 +++++ | 12 | target/arm/helper-sme.h | 12 +++ |
12 | target/arm/sve.decode | 4 ++++ | 13 | target/arm/helper-sve.h | 2 + |
13 | target/arm/sve_helper.c | 20 ++++++++++++++++++++ | 14 | target/arm/translate-a64.h | 8 ++ |
14 | target/arm/translate-sve.c | 16 ++++++++++++++++ | 15 | target/arm/translate.h | 5 ++ |
15 | 4 files changed, 45 insertions(+) | 16 | target/arm/sme.decode | 15 ++++ |
17 | target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++- | ||
18 | target/arm/sve_helper.c | 12 +++ | ||
19 | target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++ | ||
20 | 8 files changed, 331 insertions(+), 1 deletion(-) | ||
16 | 21 | ||
22 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/target/arm/helper-sme.h | ||
25 | +++ b/target/arm/helper-sme.h | ||
26 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) | ||
27 | DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) | ||
28 | |||
29 | DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) | ||
30 | + | ||
31 | +/* Move to/from vertical array slices, i.e. columns, so 'c'. */ | ||
32 | +DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 42 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h |
18 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper-sve.h | 44 | --- a/target/arm/helper-sve.h |
20 | +++ b/target/arm/helper-sve.h | 45 | +++ b/target/arm/helper-sve.h |
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG, | 46 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, |
22 | void, ptr, ptr, ptr, ptr, i32) | 47 | void, ptr, ptr, ptr, ptr, i32) |
23 | DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG, | 48 | DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, |
24 | void, ptr, ptr, ptr, ptr, i32) | 49 | void, ptr, ptr, ptr, ptr, i32) |
25 | + | 50 | +DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG, |
26 | +DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG, | ||
27 | + void, ptr, ptr, ptr, ptr, i32) | 51 | + void, ptr, ptr, ptr, ptr, i32) |
28 | +DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG, | 52 | |
29 | + void, ptr, ptr, ptr, ptr, i32) | 53 | DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG, |
30 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 54 | void, ptr, ptr, ptr, ptr, i32) |
31 | index XXXXXXX..XXXXXXX 100644 | 55 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
32 | --- a/target/arm/sve.decode | 56 | index XXXXXXX..XXXXXXX 100644 |
33 | +++ b/target/arm/sve.decode | 57 | --- a/target/arm/translate-a64.h |
34 | @@ -XXX,XX +XXX,XX @@ SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0 | 58 | +++ b/target/arm/translate-a64.h |
35 | # SVE2 crypto constructive binary operations | 59 | @@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s) |
36 | SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0 | 60 | return size_for_gvec(pred_full_reg_size(s)); |
37 | RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0 | 61 | } |
38 | + | 62 | |
39 | +### SVE2 floating-point convert precision odd elements | 63 | +/* Return a newly allocated pointer to the predicate register. */ |
40 | +FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0 | 64 | +static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) |
41 | +FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0 | 65 | +{ |
66 | + TCGv_ptr ret = tcg_temp_new_ptr(); | ||
67 | + tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno)); | ||
68 | + return ret; | ||
69 | +} | ||
70 | + | ||
71 | bool disas_sve(DisasContext *, uint32_t); | ||
72 | bool disas_sme(DisasContext *, uint32_t); | ||
73 | |||
74 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/target/arm/translate.h | ||
77 | +++ b/target/arm/translate.h | ||
78 | @@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x) | ||
79 | return x + 2; | ||
80 | } | ||
81 | |||
82 | +static inline int plus_12(DisasContext *s, int x) | ||
83 | +{ | ||
84 | + return x + 12; | ||
85 | +} | ||
86 | + | ||
87 | static inline int times_2(DisasContext *s, int x) | ||
88 | { | ||
89 | return x * 2; | ||
90 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/target/arm/sme.decode | ||
93 | +++ b/target/arm/sme.decode | ||
94 | @@ -XXX,XX +XXX,XX @@ | ||
95 | ### SME Misc | ||
96 | |||
97 | ZERO 11000000 00 001 00000000000 imm:8 | ||
98 | + | ||
99 | +### SME Move into/from Array | ||
100 | + | ||
101 | +%mova_rs 13:2 !function=plus_12 | ||
102 | +&mova esz rs pg zr za_imm v:bool to_vec:bool | ||
103 | + | ||
104 | +MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \ | ||
105 | + &mova to_vec=0 rs=%mova_rs | ||
106 | +MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \ | ||
107 | + &mova to_vec=0 rs=%mova_rs esz=4 | ||
108 | + | ||
109 | +MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ | ||
110 | + &mova to_vec=1 rs=%mova_rs | ||
111 | +MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ | ||
112 | + &mova to_vec=1 rs=%mova_rs esz=4 | ||
113 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
114 | index XXXXXXX..XXXXXXX 100644 | ||
115 | --- a/target/arm/sme_helper.c | ||
116 | +++ b/target/arm/sme_helper.c | ||
117 | @@ -XXX,XX +XXX,XX @@ | ||
118 | |||
119 | #include "qemu/osdep.h" | ||
120 | #include "cpu.h" | ||
121 | -#include "internals.h" | ||
122 | +#include "tcg/tcg-gvec-desc.h" | ||
123 | #include "exec/helper-proto.h" | ||
124 | +#include "qemu/int128.h" | ||
125 | +#include "vec_internal.h" | ||
126 | |||
127 | /* ResetSVEState */ | ||
128 | void arm_reset_sve_state(CPUARMState *env) | ||
129 | @@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | + | ||
134 | + | ||
135 | +/* | ||
136 | + * When considering the ZA storage as an array of elements of | ||
137 | + * type T, the index within that array of the Nth element of | ||
138 | + * a vertical slice of a tile can be calculated like this, | ||
139 | + * regardless of the size of type T. This is because the tiles | ||
140 | + * are interleaved, so if type T is size N bytes then row 1 of | ||
141 | + * the tile is N rows away from row 0. The division by N to | ||
142 | + * convert a byte offset into an array index and the multiplication | ||
143 | + * by N to convert from vslice-index-within-the-tile to | ||
144 | + * the index within the ZA storage cancel out. | ||
145 | + */ | ||
146 | +#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg)) | ||
147 | + | ||
148 | +/* | ||
149 | + * When doing byte arithmetic on the ZA storage, the element | ||
150 | + * byteoff bytes away in a tile vertical slice is always this | ||
151 | + * many bytes away in the ZA storage, regardless of the | ||
152 | + * size of the tile element, assuming that byteoff is a multiple | ||
153 | + * of the element size. Again this is because of the interleaving | ||
154 | + * of the tiles. For instance if we have 1 byte per element then | ||
155 | + * each row of the ZA storage has one byte of the vslice data, | ||
156 | + * and (counting from 0) byte 8 goes in row 8 of the storage | ||
157 | + * at offset (8 * row-size-in-bytes). | ||
158 | + * If we have 8 bytes per element then each row of the ZA storage | ||
159 | + * has 8 bytes of the data, but there are 8 interleaved tiles and | ||
160 | + * so byte 8 of the data goes into row 1 of the tile, | ||
161 | + * which is again row 8 of the storage, so the offset is still | ||
162 | + * (8 * row-size-in-bytes). Similarly for other element sizes. | ||
163 | + */ | ||
164 | +#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg)) | ||
165 | + | ||
166 | + | ||
167 | +/* | ||
168 | + * Move Zreg vector to ZArray column. | ||
169 | + */ | ||
170 | +#define DO_MOVA_C(NAME, TYPE, H) \ | ||
171 | +void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \ | ||
172 | +{ \ | ||
173 | + int i, oprsz = simd_oprsz(desc); \ | ||
174 | + for (i = 0; i < oprsz; ) { \ | ||
175 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
176 | + do { \ | ||
177 | + if (pg & 1) { \ | ||
178 | + *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \ | ||
179 | + } \ | ||
180 | + i += sizeof(TYPE); \ | ||
181 | + pg >>= sizeof(TYPE); \ | ||
182 | + } while (i & 15); \ | ||
183 | + } \ | ||
184 | +} | ||
185 | + | ||
186 | +DO_MOVA_C(sme_mova_cz_b, uint8_t, H1) | ||
187 | +DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2) | ||
188 | +DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4) | ||
189 | + | ||
190 | +void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc) | ||
191 | +{ | ||
192 | + int i, oprsz = simd_oprsz(desc) / 8; | ||
193 | + uint8_t *pg = vg; | ||
194 | + uint64_t *n = vn; | ||
195 | + uint64_t *a = za; | ||
196 | + | ||
197 | + for (i = 0; i < oprsz; i++) { | ||
198 | + if (pg[H1(i)] & 1) { | ||
199 | + a[tile_vslice_index(i)] = n[i]; | ||
200 | + } | ||
201 | + } | ||
202 | +} | ||
203 | + | ||
204 | +void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc) | ||
205 | +{ | ||
206 | + int i, oprsz = simd_oprsz(desc) / 16; | ||
207 | + uint16_t *pg = vg; | ||
208 | + Int128 *n = vn; | ||
209 | + Int128 *a = za; | ||
210 | + | ||
211 | + /* | ||
212 | + * Int128 is used here simply to copy 16 bytes, and to simplify | ||
213 | + * the address arithmetic. | ||
214 | + */ | ||
215 | + for (i = 0; i < oprsz; i++) { | ||
216 | + if (pg[H2(i)] & 1) { | ||
217 | + a[tile_vslice_index(i)] = n[i]; | ||
218 | + } | ||
219 | + } | ||
220 | +} | ||
221 | + | ||
222 | +#undef DO_MOVA_C | ||
223 | + | ||
224 | +/* | ||
225 | + * Move ZArray column to Zreg vector. | ||
226 | + */ | ||
227 | +#define DO_MOVA_Z(NAME, TYPE, H) \ | ||
228 | +void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \ | ||
229 | +{ \ | ||
230 | + int i, oprsz = simd_oprsz(desc); \ | ||
231 | + for (i = 0; i < oprsz; ) { \ | ||
232 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
233 | + do { \ | ||
234 | + if (pg & 1) { \ | ||
235 | + *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \ | ||
236 | + } \ | ||
237 | + i += sizeof(TYPE); \ | ||
238 | + pg >>= sizeof(TYPE); \ | ||
239 | + } while (i & 15); \ | ||
240 | + } \ | ||
241 | +} | ||
242 | + | ||
243 | +DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1) | ||
244 | +DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2) | ||
245 | +DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4) | ||
246 | + | ||
247 | +void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc) | ||
248 | +{ | ||
249 | + int i, oprsz = simd_oprsz(desc) / 8; | ||
250 | + uint8_t *pg = vg; | ||
251 | + uint64_t *d = vd; | ||
252 | + uint64_t *a = za; | ||
253 | + | ||
254 | + for (i = 0; i < oprsz; i++) { | ||
255 | + if (pg[H1(i)] & 1) { | ||
256 | + d[i] = a[tile_vslice_index(i)]; | ||
257 | + } | ||
258 | + } | ||
259 | +} | ||
260 | + | ||
261 | +void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) | ||
262 | +{ | ||
263 | + int i, oprsz = simd_oprsz(desc) / 16; | ||
264 | + uint16_t *pg = vg; | ||
265 | + Int128 *d = vd; | ||
266 | + Int128 *a = za; | ||
267 | + | ||
268 | + /* | ||
269 | + * Int128 is used here simply to copy 16 bytes, and to simplify | ||
270 | + * the address arithmetic. | ||
271 | + */ | ||
272 | + for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) { | ||
273 | + if (pg[H2(i)] & 1) { | ||
274 | + d[i] = a[tile_vslice_index(i)]; | ||
275 | + } | ||
276 | + } | ||
277 | +} | ||
278 | + | ||
279 | +#undef DO_MOVA_Z | ||
42 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 280 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c |
43 | index XXXXXXX..XXXXXXX 100644 | 281 | index XXXXXXX..XXXXXXX 100644 |
44 | --- a/target/arm/sve_helper.c | 282 | --- a/target/arm/sve_helper.c |
45 | +++ b/target/arm/sve_helper.c | 283 | +++ b/target/arm/sve_helper.c |
46 | @@ -XXX,XX +XXX,XX @@ void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va, | 284 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, |
47 | d[3] = float64_add(a[3], float64_add(p0, p1, status), status); | ||
48 | } | 285 | } |
49 | } | 286 | } |
50 | + | 287 | |
51 | +#define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \ | 288 | +void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm, |
52 | +void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ | 289 | + void *vg, uint32_t desc) |
53 | +{ \ | 290 | +{ |
54 | + intptr_t i = simd_oprsz(desc); \ | 291 | + intptr_t i, opr_sz = simd_oprsz(desc) / 16; |
55 | + uint64_t *g = vg; \ | 292 | + Int128 *d = vd, *n = vn, *m = vm; |
56 | + do { \ | 293 | + uint16_t *pg = vg; |
57 | + uint64_t pg = g[(i - 1) >> 6]; \ | 294 | + |
58 | + do { \ | 295 | + for (i = 0; i < opr_sz; i += 1) { |
59 | + i -= sizeof(TYPEW); \ | 296 | + d[i] = (pg[H2(i)] & 1 ? n : m)[i]; |
60 | + if (likely((pg >> (i & 63)) & 1)) { \ | 297 | + } |
61 | + TYPEW nn = *(TYPEW *)(vn + HW(i)); \ | 298 | +} |
62 | + *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, status); \ | 299 | + |
63 | + } \ | 300 | /* Two operand comparison controlled by a predicate. |
64 | + } while (i & 63); \ | 301 | * ??? It is very tempting to want to be able to expand this inline |
65 | + } while (i != 0); \ | 302 | * with x86 instructions, e.g. |
66 | +} | 303 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c |
67 | + | 304 | index XXXXXXX..XXXXXXX 100644 |
68 | +DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16) | 305 | --- a/target/arm/translate-sme.c |
69 | +DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32) | 306 | +++ b/target/arm/translate-sme.c |
70 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 307 | @@ -XXX,XX +XXX,XX @@ |
71 | index XXXXXXX..XXXXXXX 100644 | 308 | #include "decode-sme.c.inc" |
72 | --- a/target/arm/translate-sve.c | 309 | |
73 | +++ b/target/arm/translate-sve.c | 310 | |
74 | @@ -XXX,XX +XXX,XX @@ static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a) | 311 | +/* |
312 | + * Resolve tile.size[index] to a host pointer, where tile and index | ||
313 | + * are always decoded together, dependent on the element size. | ||
314 | + */ | ||
315 | +static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, | ||
316 | + int tile_index, bool vertical) | ||
317 | +{ | ||
318 | + int tile = tile_index >> (4 - esz); | ||
319 | + int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz); | ||
320 | + int pos, len, offset; | ||
321 | + TCGv_i32 tmp; | ||
322 | + TCGv_ptr addr; | ||
323 | + | ||
324 | + /* Compute the final index, which is Rs+imm. */ | ||
325 | + tmp = tcg_temp_new_i32(); | ||
326 | + tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs)); | ||
327 | + tcg_gen_addi_i32(tmp, tmp, index); | ||
328 | + | ||
329 | + /* Prepare a power-of-two modulo via extraction of @len bits. */ | ||
330 | + len = ctz32(streaming_vec_reg_size(s)) - esz; | ||
331 | + | ||
332 | + if (vertical) { | ||
333 | + /* | ||
334 | + * Compute the byte offset of the index within the tile: | ||
335 | + * (index % (svl / size)) * size | ||
336 | + * = (index % (svl >> esz)) << esz | ||
337 | + * Perform the power-of-two modulo via extraction of the low @len bits. | ||
338 | + * Perform the multiply by shifting left by @pos bits. | ||
339 | + * Perform these operations simultaneously via deposit into zero. | ||
340 | + */ | ||
341 | + pos = esz; | ||
342 | + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); | ||
343 | + | ||
344 | + /* | ||
345 | + * For big-endian, adjust the indexed column byte offset within | ||
346 | + * the uint64_t host words that make up env->zarray[]. | ||
347 | + */ | ||
348 | + if (HOST_BIG_ENDIAN && esz < MO_64) { | ||
349 | + tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz)); | ||
350 | + } | ||
351 | + } else { | ||
352 | + /* | ||
353 | + * Compute the byte offset of the index within the tile: | ||
354 | + * (index % (svl / size)) * (size * sizeof(row)) | ||
355 | + * = (index % (svl >> esz)) << (esz + log2(sizeof(row))) | ||
356 | + */ | ||
357 | + pos = esz + ctz32(sizeof(ARMVectorReg)); | ||
358 | + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); | ||
359 | + | ||
360 | + /* Row slices are always aligned and need no endian adjustment. */ | ||
361 | + } | ||
362 | + | ||
363 | + /* The tile byte offset within env->zarray is the row. */ | ||
364 | + offset = tile * sizeof(ARMVectorReg); | ||
365 | + | ||
366 | + /* Include the byte offset of zarray to make this relative to env. */ | ||
367 | + offset += offsetof(CPUARMState, zarray); | ||
368 | + tcg_gen_addi_i32(tmp, tmp, offset); | ||
369 | + | ||
370 | + /* Add the byte offset to env to produce the final pointer. */ | ||
371 | + addr = tcg_temp_new_ptr(); | ||
372 | + tcg_gen_ext_i32_ptr(addr, tmp); | ||
373 | + tcg_temp_free_i32(tmp); | ||
374 | + tcg_gen_add_ptr(addr, addr, cpu_env); | ||
375 | + | ||
376 | + return addr; | ||
377 | +} | ||
378 | + | ||
379 | static bool trans_ZERO(DisasContext *s, arg_ZERO *a) | ||
380 | { | ||
381 | if (!dc_isar_feature(aa64_sme, s)) { | ||
382 | @@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a) | ||
75 | } | 383 | } |
76 | return true; | 384 | return true; |
77 | } | 385 | } |
78 | + | 386 | + |
79 | +static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a) | 387 | +static bool trans_MOVA(DisasContext *s, arg_MOVA *a) |
80 | +{ | 388 | +{ |
81 | + if (!dc_isar_feature(aa64_sve2, s)) { | 389 | + static gen_helper_gvec_4 * const h_fns[5] = { |
390 | + gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, | ||
391 | + gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d, | ||
392 | + gen_helper_sve_sel_zpzz_q | ||
393 | + }; | ||
394 | + static gen_helper_gvec_3 * const cz_fns[5] = { | ||
395 | + gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h, | ||
396 | + gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d, | ||
397 | + gen_helper_sme_mova_cz_q, | ||
398 | + }; | ||
399 | + static gen_helper_gvec_3 * const zc_fns[5] = { | ||
400 | + gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h, | ||
401 | + gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d, | ||
402 | + gen_helper_sme_mova_zc_q, | ||
403 | + }; | ||
404 | + | ||
405 | + TCGv_ptr t_za, t_zr, t_pg; | ||
406 | + TCGv_i32 t_desc; | ||
407 | + int svl; | ||
408 | + | ||
409 | + if (!dc_isar_feature(aa64_sme, s)) { | ||
82 | + return false; | 410 | + return false; |
83 | + } | 411 | + } |
84 | + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh); | 412 | + if (!sme_smza_enabled_check(s)) { |
85 | +} | 413 | + return true; |
86 | + | 414 | + } |
87 | +static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a) | 415 | + |
88 | +{ | 416 | + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); |
89 | + if (!dc_isar_feature(aa64_sve2, s)) { | 417 | + t_zr = vec_full_reg_ptr(s, a->zr); |
90 | + return false; | 418 | + t_pg = pred_full_reg_ptr(s, a->pg); |
91 | + } | 419 | + |
92 | + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds); | 420 | + svl = streaming_vec_reg_size(s); |
421 | + t_desc = tcg_constant_i32(simd_desc(svl, svl, 0)); | ||
422 | + | ||
423 | + if (a->v) { | ||
424 | + /* Vertical slice -- use sme mova helpers. */ | ||
425 | + if (a->to_vec) { | ||
426 | + zc_fns[a->esz](t_zr, t_za, t_pg, t_desc); | ||
427 | + } else { | ||
428 | + cz_fns[a->esz](t_za, t_zr, t_pg, t_desc); | ||
429 | + } | ||
430 | + } else { | ||
431 | + /* Horizontal slice -- reuse sve sel helpers. */ | ||
432 | + if (a->to_vec) { | ||
433 | + h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc); | ||
434 | + } else { | ||
435 | + h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); | ||
436 | + } | ||
437 | + } | ||
438 | + | ||
439 | + tcg_temp_free_ptr(t_za); | ||
440 | + tcg_temp_free_ptr(t_zr); | ||
441 | + tcg_temp_free_ptr(t_pg); | ||
442 | + | ||
443 | + return true; | ||
93 | +} | 444 | +} |
94 | -- | 445 | -- |
95 | 2.20.1 | 446 | 2.25.1 |
96 | |||
97 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | We cannot reuse the SVE functions for LD[1-4] and ST[1-4], | ||
4 | because those functions accept only a Zreg register number. | ||
5 | For SME, we want to pass a pointer into ZA storage. | ||
2 | 6 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-69-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-21-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | target/arm/sve.decode | 6 ++++++ | 12 | target/arm/helper-sme.h | 82 +++++ |
9 | target/arm/translate-sve.c | 11 +++++++++++ | 13 | target/arm/sme.decode | 9 + |
10 | 2 files changed, 17 insertions(+) | 14 | target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++ |
15 | target/arm/translate-sme.c | 70 +++++ | ||
16 | 4 files changed, 756 insertions(+) | ||
11 | 17 | ||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 18 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
13 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/sve.decode | 20 | --- a/target/arm/helper-sme.h |
15 | +++ b/target/arm/sve.decode | 21 | +++ b/target/arm/helper-sme.h |
16 | @@ -XXX,XX +XXX,XX @@ STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \ | 22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
17 | # SVE2 32-bit scatter non-temporal store (vector plus scalar) | 23 | DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
18 | STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \ | 24 | DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
19 | @rprr_scatter_store xs=0 esz=2 scale=0 | 25 | DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
20 | + | 26 | + |
21 | +### SVE2 Crypto Extensions | 27 | +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
22 | + | 28 | +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
23 | +# SVE2 crypto unary operations | 29 | +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
24 | +# AESMC and AESIMC | 30 | +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
25 | +AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5 | 31 | + |
26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 32 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
33 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
34 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
36 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
38 | +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
39 | +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
40 | + | ||
41 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
42 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
43 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
44 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
45 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
47 | +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
48 | +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
49 | + | ||
50 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
51 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
52 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
54 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
56 | +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
57 | +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
58 | + | ||
59 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
61 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
63 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
65 | +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
66 | +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
67 | + | ||
68 | +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
69 | +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
70 | +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
71 | +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
72 | + | ||
73 | +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
74 | +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
75 | +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
76 | +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
77 | +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
78 | +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
79 | +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
80 | +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
81 | + | ||
82 | +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
83 | +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
84 | +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
85 | +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
86 | +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
87 | +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
88 | +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
89 | +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
90 | + | ||
91 | +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
92 | +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
93 | +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
94 | +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
95 | +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
96 | +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
97 | +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
98 | +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
99 | + | ||
100 | +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
101 | +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
102 | +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
103 | +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
104 | +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
105 | +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
106 | +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
107 | +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) | ||
108 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
27 | index XXXXXXX..XXXXXXX 100644 | 109 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/target/arm/translate-sve.c | 110 | --- a/target/arm/sme.decode |
29 | +++ b/target/arm/translate-sve.c | 111 | +++ b/target/arm/sme.decode |
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a) | 112 | @@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ |
31 | } | 113 | &mova to_vec=1 rs=%mova_rs |
114 | MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ | ||
115 | &mova to_vec=1 rs=%mova_rs esz=4 | ||
116 | + | ||
117 | +### SME Memory | ||
118 | + | ||
119 | +&ldst esz rs pg rn rm za_imm v:bool st:bool | ||
120 | + | ||
121 | +LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ | ||
122 | + &ldst rs=%mova_rs | ||
123 | +LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ | ||
124 | + &ldst esz=4 rs=%mova_rs | ||
125 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/target/arm/sme_helper.c | ||
128 | +++ b/target/arm/sme_helper.c | ||
129 | @@ -XXX,XX +XXX,XX @@ | ||
130 | |||
131 | #include "qemu/osdep.h" | ||
132 | #include "cpu.h" | ||
133 | +#include "internals.h" | ||
134 | #include "tcg/tcg-gvec-desc.h" | ||
135 | #include "exec/helper-proto.h" | ||
136 | +#include "exec/cpu_ldst.h" | ||
137 | +#include "exec/exec-all.h" | ||
138 | #include "qemu/int128.h" | ||
139 | #include "vec_internal.h" | ||
140 | +#include "sve_ldst_internal.h" | ||
141 | |||
142 | /* ResetSVEState */ | ||
143 | void arm_reset_sve_state(CPUARMState *env) | ||
144 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) | ||
145 | } | ||
146 | |||
147 | #undef DO_MOVA_Z | ||
148 | + | ||
149 | +/* | ||
150 | + * Clear elements in a tile slice comprising len bytes. | ||
151 | + */ | ||
152 | + | ||
153 | +typedef void ClearFn(void *ptr, size_t off, size_t len); | ||
154 | + | ||
155 | +static void clear_horizontal(void *ptr, size_t off, size_t len) | ||
156 | +{ | ||
157 | + memset(ptr + off, 0, len); | ||
158 | +} | ||
159 | + | ||
160 | +static void clear_vertical_b(void *vptr, size_t off, size_t len) | ||
161 | +{ | ||
162 | + for (size_t i = 0; i < len; ++i) { | ||
163 | + *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
164 | + } | ||
165 | +} | ||
166 | + | ||
167 | +static void clear_vertical_h(void *vptr, size_t off, size_t len) | ||
168 | +{ | ||
169 | + for (size_t i = 0; i < len; i += 2) { | ||
170 | + *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
171 | + } | ||
172 | +} | ||
173 | + | ||
174 | +static void clear_vertical_s(void *vptr, size_t off, size_t len) | ||
175 | +{ | ||
176 | + for (size_t i = 0; i < len; i += 4) { | ||
177 | + *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
178 | + } | ||
179 | +} | ||
180 | + | ||
181 | +static void clear_vertical_d(void *vptr, size_t off, size_t len) | ||
182 | +{ | ||
183 | + for (size_t i = 0; i < len; i += 8) { | ||
184 | + *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0; | ||
185 | + } | ||
186 | +} | ||
187 | + | ||
188 | +static void clear_vertical_q(void *vptr, size_t off, size_t len) | ||
189 | +{ | ||
190 | + for (size_t i = 0; i < len; i += 16) { | ||
191 | + memset(vptr + tile_vslice_offset(i + off), 0, 16); | ||
192 | + } | ||
193 | +} | ||
194 | + | ||
195 | +/* | ||
196 | + * Copy elements from an array into a tile slice comprising len bytes. | ||
197 | + */ | ||
198 | + | ||
199 | +typedef void CopyFn(void *dst, const void *src, size_t len); | ||
200 | + | ||
201 | +static void copy_horizontal(void *dst, const void *src, size_t len) | ||
202 | +{ | ||
203 | + memcpy(dst, src, len); | ||
204 | +} | ||
205 | + | ||
206 | +static void copy_vertical_b(void *vdst, const void *vsrc, size_t len) | ||
207 | +{ | ||
208 | + const uint8_t *src = vsrc; | ||
209 | + uint8_t *dst = vdst; | ||
210 | + size_t i; | ||
211 | + | ||
212 | + for (i = 0; i < len; ++i) { | ||
213 | + dst[tile_vslice_index(i)] = src[i]; | ||
214 | + } | ||
215 | +} | ||
216 | + | ||
217 | +static void copy_vertical_h(void *vdst, const void *vsrc, size_t len) | ||
218 | +{ | ||
219 | + const uint16_t *src = vsrc; | ||
220 | + uint16_t *dst = vdst; | ||
221 | + size_t i; | ||
222 | + | ||
223 | + for (i = 0; i < len / 2; ++i) { | ||
224 | + dst[tile_vslice_index(i)] = src[i]; | ||
225 | + } | ||
226 | +} | ||
227 | + | ||
228 | +static void copy_vertical_s(void *vdst, const void *vsrc, size_t len) | ||
229 | +{ | ||
230 | + const uint32_t *src = vsrc; | ||
231 | + uint32_t *dst = vdst; | ||
232 | + size_t i; | ||
233 | + | ||
234 | + for (i = 0; i < len / 4; ++i) { | ||
235 | + dst[tile_vslice_index(i)] = src[i]; | ||
236 | + } | ||
237 | +} | ||
238 | + | ||
239 | +static void copy_vertical_d(void *vdst, const void *vsrc, size_t len) | ||
240 | +{ | ||
241 | + const uint64_t *src = vsrc; | ||
242 | + uint64_t *dst = vdst; | ||
243 | + size_t i; | ||
244 | + | ||
245 | + for (i = 0; i < len / 8; ++i) { | ||
246 | + dst[tile_vslice_index(i)] = src[i]; | ||
247 | + } | ||
248 | +} | ||
249 | + | ||
250 | +static void copy_vertical_q(void *vdst, const void *vsrc, size_t len) | ||
251 | +{ | ||
252 | + for (size_t i = 0; i < len; i += 16) { | ||
253 | + memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16); | ||
254 | + } | ||
255 | +} | ||
256 | + | ||
257 | +/* | ||
258 | + * Host and TLB primitives for vertical tile slice addressing. | ||
259 | + */ | ||
260 | + | ||
261 | +#define DO_LD(NAME, TYPE, HOST, TLB) \ | ||
262 | +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ | ||
263 | +{ \ | ||
264 | + TYPE val = HOST(host); \ | ||
265 | + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ | ||
266 | +} \ | ||
267 | +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ | ||
268 | + intptr_t off, target_ulong addr, uintptr_t ra) \ | ||
269 | +{ \ | ||
270 | + TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \ | ||
271 | + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ | ||
272 | +} | ||
273 | + | ||
274 | +#define DO_ST(NAME, TYPE, HOST, TLB) \ | ||
275 | +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ | ||
276 | +{ \ | ||
277 | + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ | ||
278 | + HOST(host, val); \ | ||
279 | +} \ | ||
280 | +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ | ||
281 | + intptr_t off, target_ulong addr, uintptr_t ra) \ | ||
282 | +{ \ | ||
283 | + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ | ||
284 | + TLB(env, useronly_clean_ptr(addr), val, ra); \ | ||
285 | +} | ||
286 | + | ||
287 | +/* | ||
288 | + * The ARMVectorReg elements are stored in host-endian 64-bit units. | ||
289 | + * For 128-bit quantities, the sequence defined by the Elem[] pseudocode | ||
290 | + * corresponds to storing the two 64-bit pieces in little-endian order. | ||
291 | + */ | ||
292 | +#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \ | ||
293 | +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ | ||
294 | +{ \ | ||
295 | + uint64_t val0 = HOST(host), val1 = HOST(host + 8); \ | ||
296 | + uint64_t *ptr = za + off; \ | ||
297 | + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ | ||
298 | +} \ | ||
299 | +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ | ||
300 | +{ \ | ||
301 | + HNAME##_host(za, tile_vslice_offset(off), host); \ | ||
302 | +} \ | ||
303 | +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
304 | + target_ulong addr, uintptr_t ra) \ | ||
305 | +{ \ | ||
306 | + uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \ | ||
307 | + uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \ | ||
308 | + uint64_t *ptr = za + off; \ | ||
309 | + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ | ||
310 | +} \ | ||
311 | +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
312 | + target_ulong addr, uintptr_t ra) \ | ||
313 | +{ \ | ||
314 | + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ | ||
315 | +} | ||
316 | + | ||
317 | +#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \ | ||
318 | +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ | ||
319 | +{ \ | ||
320 | + uint64_t *ptr = za + off; \ | ||
321 | + HOST(host, ptr[BE]); \ | ||
322 | + HOST(host + 1, ptr[!BE]); \ | ||
323 | +} \ | ||
324 | +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ | ||
325 | +{ \ | ||
326 | + HNAME##_host(za, tile_vslice_offset(off), host); \ | ||
327 | +} \ | ||
328 | +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
329 | + target_ulong addr, uintptr_t ra) \ | ||
330 | +{ \ | ||
331 | + uint64_t *ptr = za + off; \ | ||
332 | + TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \ | ||
333 | + TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \ | ||
334 | +} \ | ||
335 | +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ | ||
336 | + target_ulong addr, uintptr_t ra) \ | ||
337 | +{ \ | ||
338 | + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ | ||
339 | +} | ||
340 | + | ||
341 | +DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra) | ||
342 | +DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra) | ||
343 | +DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra) | ||
344 | +DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra) | ||
345 | +DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra) | ||
346 | +DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra) | ||
347 | +DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra) | ||
348 | + | ||
349 | +DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra) | ||
350 | +DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra) | ||
351 | + | ||
352 | +DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra) | ||
353 | +DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra) | ||
354 | +DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra) | ||
355 | +DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra) | ||
356 | +DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra) | ||
357 | +DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra) | ||
358 | +DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra) | ||
359 | + | ||
360 | +DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra) | ||
361 | +DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra) | ||
362 | + | ||
363 | +#undef DO_LD | ||
364 | +#undef DO_ST | ||
365 | +#undef DO_LDQ | ||
366 | +#undef DO_STQ | ||
367 | + | ||
368 | +/* | ||
369 | + * Common helper for all contiguous predicated loads. | ||
370 | + */ | ||
371 | + | ||
372 | +static inline QEMU_ALWAYS_INLINE | ||
373 | +void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, | ||
374 | + const target_ulong addr, uint32_t desc, const uintptr_t ra, | ||
375 | + const int esz, uint32_t mtedesc, bool vertical, | ||
376 | + sve_ldst1_host_fn *host_fn, | ||
377 | + sve_ldst1_tlb_fn *tlb_fn, | ||
378 | + ClearFn *clr_fn, | ||
379 | + CopyFn *cpy_fn) | ||
380 | +{ | ||
381 | + const intptr_t reg_max = simd_oprsz(desc); | ||
382 | + const intptr_t esize = 1 << esz; | ||
383 | + intptr_t reg_off, reg_last; | ||
384 | + SVEContLdSt info; | ||
385 | + void *host; | ||
386 | + int flags; | ||
387 | + | ||
388 | + /* Find the active elements. */ | ||
389 | + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { | ||
390 | + /* The entire predicate was false; no load occurs. */ | ||
391 | + clr_fn(za, 0, reg_max); | ||
392 | + return; | ||
393 | + } | ||
394 | + | ||
395 | + /* Probe the page(s). Exit with exception for any invalid page. */ | ||
396 | + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra); | ||
397 | + | ||
398 | + /* Handle watchpoints for all active elements. */ | ||
399 | + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, | ||
400 | + BP_MEM_READ, ra); | ||
401 | + | ||
402 | + /* | ||
403 | + * Handle mte checks for all active elements. | ||
404 | + * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
405 | + */ | ||
406 | + if (mtedesc) { | ||
407 | + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, | ||
408 | + mtedesc, ra); | ||
409 | + } | ||
410 | + | ||
411 | + flags = info.page[0].flags | info.page[1].flags; | ||
412 | + if (unlikely(flags != 0)) { | ||
413 | +#ifdef CONFIG_USER_ONLY | ||
414 | + g_assert_not_reached(); | ||
415 | +#else | ||
416 | + /* | ||
417 | + * At least one page includes MMIO. | ||
418 | + * Any bus operation can fail with cpu_transaction_failed, | ||
419 | + * which for ARM will raise SyncExternal. Perform the load | ||
420 | + * into scratch memory to preserve register state until the end. | ||
421 | + */ | ||
422 | + ARMVectorReg scratch = { }; | ||
423 | + | ||
424 | + reg_off = info.reg_off_first[0]; | ||
425 | + reg_last = info.reg_off_last[1]; | ||
426 | + if (reg_last < 0) { | ||
427 | + reg_last = info.reg_off_split; | ||
428 | + if (reg_last < 0) { | ||
429 | + reg_last = info.reg_off_last[0]; | ||
430 | + } | ||
431 | + } | ||
432 | + | ||
433 | + do { | ||
434 | + uint64_t pg = vg[reg_off >> 6]; | ||
435 | + do { | ||
436 | + if ((pg >> (reg_off & 63)) & 1) { | ||
437 | + tlb_fn(env, &scratch, reg_off, addr + reg_off, ra); | ||
438 | + } | ||
439 | + reg_off += esize; | ||
440 | + } while (reg_off & 63); | ||
441 | + } while (reg_off <= reg_last); | ||
442 | + | ||
443 | + cpy_fn(za, &scratch, reg_max); | ||
444 | + return; | ||
445 | +#endif | ||
446 | + } | ||
447 | + | ||
448 | + /* The entire operation is in RAM, on valid pages. */ | ||
449 | + | ||
450 | + reg_off = info.reg_off_first[0]; | ||
451 | + reg_last = info.reg_off_last[0]; | ||
452 | + host = info.page[0].host; | ||
453 | + | ||
454 | + if (!vertical) { | ||
455 | + memset(za, 0, reg_max); | ||
456 | + } else if (reg_off) { | ||
457 | + clr_fn(za, 0, reg_off); | ||
458 | + } | ||
459 | + | ||
460 | + while (reg_off <= reg_last) { | ||
461 | + uint64_t pg = vg[reg_off >> 6]; | ||
462 | + do { | ||
463 | + if ((pg >> (reg_off & 63)) & 1) { | ||
464 | + host_fn(za, reg_off, host + reg_off); | ||
465 | + } else if (vertical) { | ||
466 | + clr_fn(za, reg_off, esize); | ||
467 | + } | ||
468 | + reg_off += esize; | ||
469 | + } while (reg_off <= reg_last && (reg_off & 63)); | ||
470 | + } | ||
471 | + | ||
472 | + /* | ||
473 | + * Use the slow path to manage the cross-page misalignment. | ||
474 | + * But we know this is RAM and cannot trap. | ||
475 | + */ | ||
476 | + reg_off = info.reg_off_split; | ||
477 | + if (unlikely(reg_off >= 0)) { | ||
478 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
479 | + } | ||
480 | + | ||
481 | + reg_off = info.reg_off_first[1]; | ||
482 | + if (unlikely(reg_off >= 0)) { | ||
483 | + reg_last = info.reg_off_last[1]; | ||
484 | + host = info.page[1].host; | ||
485 | + | ||
486 | + do { | ||
487 | + uint64_t pg = vg[reg_off >> 6]; | ||
488 | + do { | ||
489 | + if ((pg >> (reg_off & 63)) & 1) { | ||
490 | + host_fn(za, reg_off, host + reg_off); | ||
491 | + } else if (vertical) { | ||
492 | + clr_fn(za, reg_off, esize); | ||
493 | + } | ||
494 | + reg_off += esize; | ||
495 | + } while (reg_off & 63); | ||
496 | + } while (reg_off <= reg_last); | ||
497 | + } | ||
498 | +} | ||
499 | + | ||
500 | +static inline QEMU_ALWAYS_INLINE | ||
501 | +void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, | ||
502 | + target_ulong addr, uint32_t desc, uintptr_t ra, | ||
503 | + const int esz, bool vertical, | ||
504 | + sve_ldst1_host_fn *host_fn, | ||
505 | + sve_ldst1_tlb_fn *tlb_fn, | ||
506 | + ClearFn *clr_fn, | ||
507 | + CopyFn *cpy_fn) | ||
508 | +{ | ||
509 | + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
510 | + int bit55 = extract64(addr, 55, 1); | ||
511 | + | ||
512 | + /* Remove mtedesc from the normal sve descriptor. */ | ||
513 | + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
514 | + | ||
515 | + /* Perform gross MTE suppression early. */ | ||
516 | + if (!tbi_check(desc, bit55) || | ||
517 | + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { | ||
518 | + mtedesc = 0; | ||
519 | + } | ||
520 | + | ||
521 | + sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical, | ||
522 | + host_fn, tlb_fn, clr_fn, cpy_fn); | ||
523 | +} | ||
524 | + | ||
525 | +#define DO_LD(L, END, ESZ) \ | ||
526 | +void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ | ||
527 | + target_ulong addr, uint32_t desc) \ | ||
528 | +{ \ | ||
529 | + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ | ||
530 | + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ | ||
531 | + clear_horizontal, copy_horizontal); \ | ||
532 | +} \ | ||
533 | +void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ | ||
534 | + target_ulong addr, uint32_t desc) \ | ||
535 | +{ \ | ||
536 | + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ | ||
537 | + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ | ||
538 | + clear_vertical_##L, copy_vertical_##L); \ | ||
539 | +} \ | ||
540 | +void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ | ||
541 | + target_ulong addr, uint32_t desc) \ | ||
542 | +{ \ | ||
543 | + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ | ||
544 | + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ | ||
545 | + clear_horizontal, copy_horizontal); \ | ||
546 | +} \ | ||
547 | +void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ | ||
548 | + target_ulong addr, uint32_t desc) \ | ||
549 | +{ \ | ||
550 | + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ | ||
551 | + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ | ||
552 | + clear_vertical_##L, copy_vertical_##L); \ | ||
553 | +} | ||
554 | + | ||
555 | +DO_LD(b, , MO_8) | ||
556 | +DO_LD(h, _be, MO_16) | ||
557 | +DO_LD(h, _le, MO_16) | ||
558 | +DO_LD(s, _be, MO_32) | ||
559 | +DO_LD(s, _le, MO_32) | ||
560 | +DO_LD(d, _be, MO_64) | ||
561 | +DO_LD(d, _le, MO_64) | ||
562 | +DO_LD(q, _be, MO_128) | ||
563 | +DO_LD(q, _le, MO_128) | ||
564 | + | ||
565 | +#undef DO_LD | ||
566 | + | ||
567 | +/* | ||
568 | + * Common helper for all contiguous predicated stores. | ||
569 | + */ | ||
570 | + | ||
571 | +static inline QEMU_ALWAYS_INLINE | ||
572 | +void sme_st1(CPUARMState *env, void *za, uint64_t *vg, | ||
573 | + const target_ulong addr, uint32_t desc, const uintptr_t ra, | ||
574 | + const int esz, uint32_t mtedesc, bool vertical, | ||
575 | + sve_ldst1_host_fn *host_fn, | ||
576 | + sve_ldst1_tlb_fn *tlb_fn) | ||
577 | +{ | ||
578 | + const intptr_t reg_max = simd_oprsz(desc); | ||
579 | + const intptr_t esize = 1 << esz; | ||
580 | + intptr_t reg_off, reg_last; | ||
581 | + SVEContLdSt info; | ||
582 | + void *host; | ||
583 | + int flags; | ||
584 | + | ||
585 | + /* Find the active elements. */ | ||
586 | + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { | ||
587 | + /* The entire predicate was false; no store occurs. */ | ||
588 | + return; | ||
589 | + } | ||
590 | + | ||
591 | + /* Probe the page(s). Exit with exception for any invalid page. */ | ||
592 | + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra); | ||
593 | + | ||
594 | + /* Handle watchpoints for all active elements. */ | ||
595 | + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, | ||
596 | + BP_MEM_WRITE, ra); | ||
597 | + | ||
598 | + /* | ||
599 | + * Handle mte checks for all active elements. | ||
600 | + * Since TBI must be set for MTE, !mtedesc => !mte_active. | ||
601 | + */ | ||
602 | + if (mtedesc) { | ||
603 | + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, | ||
604 | + mtedesc, ra); | ||
605 | + } | ||
606 | + | ||
607 | + flags = info.page[0].flags | info.page[1].flags; | ||
608 | + if (unlikely(flags != 0)) { | ||
609 | +#ifdef CONFIG_USER_ONLY | ||
610 | + g_assert_not_reached(); | ||
611 | +#else | ||
612 | + /* | ||
613 | + * At least one page includes MMIO. | ||
614 | + * Any bus operation can fail with cpu_transaction_failed, | ||
615 | + * which for ARM will raise SyncExternal. We cannot avoid | ||
616 | + * this fault and will leave with the store incomplete. | ||
617 | + */ | ||
618 | + reg_off = info.reg_off_first[0]; | ||
619 | + reg_last = info.reg_off_last[1]; | ||
620 | + if (reg_last < 0) { | ||
621 | + reg_last = info.reg_off_split; | ||
622 | + if (reg_last < 0) { | ||
623 | + reg_last = info.reg_off_last[0]; | ||
624 | + } | ||
625 | + } | ||
626 | + | ||
627 | + do { | ||
628 | + uint64_t pg = vg[reg_off >> 6]; | ||
629 | + do { | ||
630 | + if ((pg >> (reg_off & 63)) & 1) { | ||
631 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
632 | + } | ||
633 | + reg_off += esize; | ||
634 | + } while (reg_off & 63); | ||
635 | + } while (reg_off <= reg_last); | ||
636 | + return; | ||
637 | +#endif | ||
638 | + } | ||
639 | + | ||
640 | + reg_off = info.reg_off_first[0]; | ||
641 | + reg_last = info.reg_off_last[0]; | ||
642 | + host = info.page[0].host; | ||
643 | + | ||
644 | + while (reg_off <= reg_last) { | ||
645 | + uint64_t pg = vg[reg_off >> 6]; | ||
646 | + do { | ||
647 | + if ((pg >> (reg_off & 63)) & 1) { | ||
648 | + host_fn(za, reg_off, host + reg_off); | ||
649 | + } | ||
650 | + reg_off += 1 << esz; | ||
651 | + } while (reg_off <= reg_last && (reg_off & 63)); | ||
652 | + } | ||
653 | + | ||
654 | + /* | ||
655 | + * Use the slow path to manage the cross-page misalignment. | ||
656 | + * But we know this is RAM and cannot trap. | ||
657 | + */ | ||
658 | + reg_off = info.reg_off_split; | ||
659 | + if (unlikely(reg_off >= 0)) { | ||
660 | + tlb_fn(env, za, reg_off, addr + reg_off, ra); | ||
661 | + } | ||
662 | + | ||
663 | + reg_off = info.reg_off_first[1]; | ||
664 | + if (unlikely(reg_off >= 0)) { | ||
665 | + reg_last = info.reg_off_last[1]; | ||
666 | + host = info.page[1].host; | ||
667 | + | ||
668 | + do { | ||
669 | + uint64_t pg = vg[reg_off >> 6]; | ||
670 | + do { | ||
671 | + if ((pg >> (reg_off & 63)) & 1) { | ||
672 | + host_fn(za, reg_off, host + reg_off); | ||
673 | + } | ||
674 | + reg_off += 1 << esz; | ||
675 | + } while (reg_off & 63); | ||
676 | + } while (reg_off <= reg_last); | ||
677 | + } | ||
678 | +} | ||
679 | + | ||
680 | +static inline QEMU_ALWAYS_INLINE | ||
681 | +void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, | ||
682 | + uint32_t desc, uintptr_t ra, int esz, bool vertical, | ||
683 | + sve_ldst1_host_fn *host_fn, | ||
684 | + sve_ldst1_tlb_fn *tlb_fn) | ||
685 | +{ | ||
686 | + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
687 | + int bit55 = extract64(addr, 55, 1); | ||
688 | + | ||
689 | + /* Remove mtedesc from the normal sve descriptor. */ | ||
690 | + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); | ||
691 | + | ||
692 | + /* Perform gross MTE suppression early. */ | ||
693 | + if (!tbi_check(desc, bit55) || | ||
694 | + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { | ||
695 | + mtedesc = 0; | ||
696 | + } | ||
697 | + | ||
698 | + sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc, | ||
699 | + vertical, host_fn, tlb_fn); | ||
700 | +} | ||
701 | + | ||
702 | +#define DO_ST(L, END, ESZ) \ | ||
703 | +void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ | ||
704 | + target_ulong addr, uint32_t desc) \ | ||
705 | +{ \ | ||
706 | + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ | ||
707 | + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ | ||
708 | +} \ | ||
709 | +void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ | ||
710 | + target_ulong addr, uint32_t desc) \ | ||
711 | +{ \ | ||
712 | + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ | ||
713 | + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ | ||
714 | +} \ | ||
715 | +void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ | ||
716 | + target_ulong addr, uint32_t desc) \ | ||
717 | +{ \ | ||
718 | + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ | ||
719 | + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ | ||
720 | +} \ | ||
721 | +void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ | ||
722 | + target_ulong addr, uint32_t desc) \ | ||
723 | +{ \ | ||
724 | + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ | ||
725 | + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ | ||
726 | +} | ||
727 | + | ||
728 | +DO_ST(b, , MO_8) | ||
729 | +DO_ST(h, _be, MO_16) | ||
730 | +DO_ST(h, _le, MO_16) | ||
731 | +DO_ST(s, _be, MO_32) | ||
732 | +DO_ST(s, _le, MO_32) | ||
733 | +DO_ST(d, _be, MO_64) | ||
734 | +DO_ST(d, _le, MO_64) | ||
735 | +DO_ST(q, _be, MO_128) | ||
736 | +DO_ST(q, _le, MO_128) | ||
737 | + | ||
738 | +#undef DO_ST | ||
739 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
740 | index XXXXXXX..XXXXXXX 100644 | ||
741 | --- a/target/arm/translate-sme.c | ||
742 | +++ b/target/arm/translate-sme.c | ||
743 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) | ||
744 | |||
32 | return true; | 745 | return true; |
33 | } | 746 | } |
34 | + | 747 | + |
35 | +static bool trans_AESMC(DisasContext *s, arg_AESMC *a) | 748 | +static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) |
36 | +{ | 749 | +{ |
37 | + if (!dc_isar_feature(aa64_sve2_aes, s)) { | 750 | + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); |
751 | + | ||
752 | + /* | ||
753 | + * Indexed by [esz][be][v][mte][st], which is (except for load/store) | ||
754 | + * also the order in which the elements appear in the function names, | ||
755 | + * and so how we must concatenate the pieces. | ||
756 | + */ | ||
757 | + | ||
758 | +#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F } | ||
759 | +#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) } | ||
760 | +#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) } | ||
761 | +#define FN_END(L, B) { FN_HV(L), FN_HV(B) } | ||
762 | + | ||
763 | + static GenLdSt1 * const fns[5][2][2][2][2] = { | ||
764 | + FN_END(b, b), | ||
765 | + FN_END(h_le, h_be), | ||
766 | + FN_END(s_le, s_be), | ||
767 | + FN_END(d_le, d_be), | ||
768 | + FN_END(q_le, q_be), | ||
769 | + }; | ||
770 | + | ||
771 | +#undef FN_LS | ||
772 | +#undef FN_MTE | ||
773 | +#undef FN_HV | ||
774 | +#undef FN_END | ||
775 | + | ||
776 | + TCGv_ptr t_za, t_pg; | ||
777 | + TCGv_i64 addr; | ||
778 | + int svl, desc = 0; | ||
779 | + bool be = s->be_data == MO_BE; | ||
780 | + bool mte = s->mte_active[0]; | ||
781 | + | ||
782 | + if (!dc_isar_feature(aa64_sme, s)) { | ||
38 | + return false; | 783 | + return false; |
39 | + } | 784 | + } |
40 | + if (sve_access_check(s)) { | 785 | + if (!sme_smza_enabled_check(s)) { |
41 | + gen_gvec_ool_zz(s, gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt); | 786 | + return true; |
42 | + } | 787 | + } |
788 | + | ||
789 | + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); | ||
790 | + t_pg = pred_full_reg_ptr(s, a->pg); | ||
791 | + addr = tcg_temp_new_i64(); | ||
792 | + | ||
793 | + tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz); | ||
794 | + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); | ||
795 | + | ||
796 | + if (mte) { | ||
797 | + desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); | ||
798 | + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
799 | + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
800 | + desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st); | ||
801 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1); | ||
802 | + desc <<= SVE_MTEDESC_SHIFT; | ||
803 | + } else { | ||
804 | + addr = clean_data_tbi(s, addr); | ||
805 | + } | ||
806 | + svl = streaming_vec_reg_size(s); | ||
807 | + desc = simd_desc(svl, svl, desc); | ||
808 | + | ||
809 | + fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, | ||
810 | + tcg_constant_i32(desc)); | ||
811 | + | ||
812 | + tcg_temp_free_ptr(t_za); | ||
813 | + tcg_temp_free_ptr(t_pg); | ||
814 | + tcg_temp_free_i64(addr); | ||
43 | + return true; | 815 | + return true; |
44 | +} | 816 | +} |
45 | -- | 817 | -- |
46 | 2.20.1 | 818 | 2.25.1 |
47 | |||
48 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Used by FMLA and DOT, but will shortly be used more. | 3 | Add a TCGv_ptr base argument, which will be cpu_env for SVE. |
4 | Split FMLA from FMLS to avoid an extra sub field; | 4 | We will reuse this for SME save and restore array insns. |
5 | similarly for SDOT from UDOT. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-53-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-22-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/sve.decode | 29 +++++++++++++++++++---------- | 11 | target/arm/translate-a64.h | 3 +++ |
13 | target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++---------- | 12 | target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++---------- |
14 | 2 files changed, 47 insertions(+), 20 deletions(-) | 13 | 2 files changed, 39 insertions(+), 12 deletions(-) |
15 | 14 | ||
16 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 15 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/sve.decode | 17 | --- a/target/arm/translate-a64.h |
19 | +++ b/target/arm/sve.decode | 18 | +++ b/target/arm/translate-a64.h |
20 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, |
21 | &rprr_s rd pg rn rm s | 20 | uint32_t rm_ofs, int64_t shift, |
22 | &rprr_esz rd pg rn rm esz | 21 | uint32_t opr_sz, uint32_t max_sz); |
23 | &rrrr_esz rd ra rn rm esz | 22 | |
24 | +&rrxr_esz rd rn rm ra index esz | 23 | +void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); |
25 | &rprrr_esz rd pg rn rm ra esz | 24 | +void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); |
26 | &rpri_esz rd pg rn imm esz | ||
27 | &ptrue rd esz pat s | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | @rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz | ||
30 | @rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz | ||
31 | |||
32 | +# Three registers and a scalar by N-bit index | ||
33 | +@rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \ | ||
34 | + &rrxr_esz ra=%reg_movprfx index=%index3_22_19 | ||
35 | +@rrxr_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 \ | ||
36 | + &rrxr_esz ra=%reg_movprfx | ||
37 | +@rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \ | ||
38 | + &rrxr_esz ra=%reg_movprfx | ||
39 | + | 25 | + |
40 | ########################################################################### | 26 | #endif /* TARGET_ARM_TRANSLATE_A64_H */ |
41 | # Instruction patterns. Grouped according to the SVE encodingindex.xhtml. | ||
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \ | ||
44 | ra=%reg_movprfx | ||
45 | |||
46 | # SVE integer dot product (indexed) | ||
47 | -DOT_zzxw 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \ | ||
48 | - sz=0 ra=%reg_movprfx | ||
49 | -DOT_zzxw 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \ | ||
50 | - sz=1 ra=%reg_movprfx | ||
51 | +SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 | ||
52 | +SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 | ||
53 | +UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 | ||
54 | +UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 | ||
55 | |||
56 | # SVE floating-point complex add (predicated) | ||
57 | FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \ | ||
58 | @@ -XXX,XX +XXX,XX @@ FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \ | ||
59 | ### SVE FP Multiply-Add Indexed Group | ||
60 | |||
61 | # SVE floating-point multiply-add (indexed) | ||
62 | -FMLA_zzxz 01100100 0.1 .. rm:3 00000 sub:1 rn:5 rd:5 \ | ||
63 | - ra=%reg_movprfx index=%index3_22_19 esz=1 | ||
64 | -FMLA_zzxz 01100100 101 index:2 rm:3 00000 sub:1 rn:5 rd:5 \ | ||
65 | - ra=%reg_movprfx esz=2 | ||
66 | -FMLA_zzxz 01100100 111 index:1 rm:4 00000 sub:1 rn:5 rd:5 \ | ||
67 | - ra=%reg_movprfx esz=3 | ||
68 | +FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1 | ||
69 | +FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 | ||
70 | +FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 | ||
71 | +FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1 | ||
72 | +FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 | ||
73 | +FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 | ||
74 | |||
75 | ### SVE FP Multiply Indexed Group | ||
76 | |||
77 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 27 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
78 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
79 | --- a/target/arm/translate-sve.c | 29 | --- a/target/arm/translate-sve.c |
80 | +++ b/target/arm/translate-sve.c | 30 | +++ b/target/arm/translate-sve.c |
81 | @@ -XXX,XX +XXX,XX @@ static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a) | 31 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, |
82 | return true; | 32 | * The load should begin at the address Rn + IMM. |
33 | */ | ||
34 | |||
35 | -static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
36 | +void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, | ||
37 | + int len, int rn, int imm) | ||
38 | { | ||
39 | int len_align = QEMU_ALIGN_DOWN(len, 8); | ||
40 | int len_remain = len % 8; | ||
41 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
42 | t0 = tcg_temp_new_i64(); | ||
43 | for (i = 0; i < len_align; i += 8) { | ||
44 | tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); | ||
45 | - tcg_gen_st_i64(t0, cpu_env, vofs + i); | ||
46 | + tcg_gen_st_i64(t0, base, vofs + i); | ||
47 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
48 | } | ||
49 | tcg_temp_free_i64(t0); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
51 | clean_addr = new_tmp_a64_local(s); | ||
52 | tcg_gen_mov_i64(clean_addr, t0); | ||
53 | |||
54 | + if (base != cpu_env) { | ||
55 | + TCGv_ptr b = tcg_temp_local_new_ptr(); | ||
56 | + tcg_gen_mov_ptr(b, base); | ||
57 | + base = b; | ||
58 | + } | ||
59 | + | ||
60 | gen_set_label(loop); | ||
61 | |||
62 | t0 = tcg_temp_new_i64(); | ||
63 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
64 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
65 | |||
66 | tp = tcg_temp_new_ptr(); | ||
67 | - tcg_gen_add_ptr(tp, cpu_env, i); | ||
68 | + tcg_gen_add_ptr(tp, base, i); | ||
69 | tcg_gen_addi_ptr(i, i, 8); | ||
70 | tcg_gen_st_i64(t0, tp, vofs); | ||
71 | tcg_temp_free_ptr(tp); | ||
72 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
73 | |||
74 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); | ||
75 | tcg_temp_free_ptr(i); | ||
76 | + | ||
77 | + if (base != cpu_env) { | ||
78 | + tcg_temp_free_ptr(base); | ||
79 | + assert(len_remain == 0); | ||
80 | + } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | @@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
85 | default: | ||
86 | g_assert_not_reached(); | ||
87 | } | ||
88 | - tcg_gen_st_i64(t0, cpu_env, vofs + len_align); | ||
89 | + tcg_gen_st_i64(t0, base, vofs + len_align); | ||
90 | tcg_temp_free_i64(t0); | ||
91 | } | ||
83 | } | 92 | } |
84 | 93 | ||
85 | -static bool trans_DOT_zzxw(DisasContext *s, arg_DOT_zzxw *a) | 94 | /* Similarly for stores. */ |
86 | +static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a, | 95 | -static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) |
87 | + gen_helper_gvec_4 *fn) | 96 | +void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, |
97 | + int len, int rn, int imm) | ||
88 | { | 98 | { |
89 | - static gen_helper_gvec_4 * const fns[2][2] = { | 99 | int len_align = QEMU_ALIGN_DOWN(len, 8); |
90 | - { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h }, | 100 | int len_remain = len % 8; |
91 | - { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h } | 101 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) |
92 | - }; | 102 | |
93 | - | 103 | t0 = tcg_temp_new_i64(); |
94 | + if (fn == NULL) { | 104 | for (i = 0; i < len_align; i += 8) { |
95 | + return false; | 105 | - tcg_gen_ld_i64(t0, cpu_env, vofs + i); |
96 | + } | 106 | + tcg_gen_ld_i64(t0, base, vofs + i); |
107 | tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); | ||
108 | tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
109 | } | ||
110 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
111 | clean_addr = new_tmp_a64_local(s); | ||
112 | tcg_gen_mov_i64(clean_addr, t0); | ||
113 | |||
114 | + if (base != cpu_env) { | ||
115 | + TCGv_ptr b = tcg_temp_local_new_ptr(); | ||
116 | + tcg_gen_mov_ptr(b, base); | ||
117 | + base = b; | ||
118 | + } | ||
119 | + | ||
120 | gen_set_label(loop); | ||
121 | |||
122 | t0 = tcg_temp_new_i64(); | ||
123 | tp = tcg_temp_new_ptr(); | ||
124 | - tcg_gen_add_ptr(tp, cpu_env, i); | ||
125 | + tcg_gen_add_ptr(tp, base, i); | ||
126 | tcg_gen_ld_i64(t0, tp, vofs); | ||
127 | tcg_gen_addi_ptr(i, i, 8); | ||
128 | tcg_temp_free_ptr(tp); | ||
129 | @@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) | ||
130 | |||
131 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); | ||
132 | tcg_temp_free_ptr(i); | ||
133 | + | ||
134 | + if (base != cpu_env) { | ||
135 | + tcg_temp_free_ptr(base); | ||
136 | + assert(len_remain == 0); | ||
137 | + } | ||
138 | } | ||
139 | |||
140 | /* Predicate register stores can be any multiple of 2. */ | ||
141 | if (len_remain) { | ||
142 | t0 = tcg_temp_new_i64(); | ||
143 | - tcg_gen_ld_i64(t0, cpu_env, vofs + len_align); | ||
144 | + tcg_gen_ld_i64(t0, base, vofs + len_align); | ||
145 | |||
146 | switch (len_remain) { | ||
147 | case 2: | ||
148 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a) | ||
97 | if (sve_access_check(s)) { | 149 | if (sve_access_check(s)) { |
98 | - gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, | 150 | int size = vec_full_reg_size(s); |
99 | - a->ra, a->index); | 151 | int off = vec_full_reg_offset(s, a->rd); |
100 | + gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); | 152 | - do_ldr(s, off, size, a->rn, a->imm * size); |
153 | + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); | ||
101 | } | 154 | } |
102 | return true; | 155 | return true; |
103 | } | 156 | } |
104 | 157 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a) | |
105 | +#define DO_RRXR(NAME, FUNC) \ | 158 | if (sve_access_check(s)) { |
106 | + static bool NAME(DisasContext *s, arg_rrxr_esz *a) \ | 159 | int size = pred_full_reg_size(s); |
107 | + { return do_zzxz_ool(s, a, FUNC); } | 160 | int off = pred_full_reg_offset(s, a->rd); |
108 | + | 161 | - do_ldr(s, off, size, a->rn, a->imm * size); |
109 | +DO_RRXR(trans_SDOT_zzxw_s, gen_helper_gvec_sdot_idx_b) | 162 | + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); |
110 | +DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h) | ||
111 | +DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b) | ||
112 | +DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h) | ||
113 | + | ||
114 | +#undef DO_RRXR | ||
115 | |||
116 | /* | ||
117 | *** SVE Floating Point Multiply-Add Indexed Group | ||
118 | */ | ||
119 | |||
120 | -static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) | ||
121 | +static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) | ||
122 | { | ||
123 | static gen_helper_gvec_4_ptr * const fns[3] = { | ||
124 | gen_helper_gvec_fmla_idx_h, | ||
125 | @@ -XXX,XX +XXX,XX @@ static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) | ||
126 | vec_full_reg_offset(s, a->rn), | ||
127 | vec_full_reg_offset(s, a->rm), | ||
128 | vec_full_reg_offset(s, a->ra), | ||
129 | - status, vsz, vsz, (a->index << 1) | a->sub, | ||
130 | + status, vsz, vsz, (a->index << 1) | sub, | ||
131 | fns[a->esz - 1]); | ||
132 | tcg_temp_free_ptr(status); | ||
133 | } | 163 | } |
134 | return true; | 164 | return true; |
135 | } | 165 | } |
136 | 166 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a) | |
137 | +static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) | 167 | if (sve_access_check(s)) { |
138 | +{ | 168 | int size = vec_full_reg_size(s); |
139 | + return do_FMLA_zzxz(s, a, false); | 169 | int off = vec_full_reg_offset(s, a->rd); |
140 | +} | 170 | - do_str(s, off, size, a->rn, a->imm * size); |
141 | + | 171 | + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); |
142 | +static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a) | 172 | } |
143 | +{ | 173 | return true; |
144 | + return do_FMLA_zzxz(s, a, true); | 174 | } |
145 | +} | 175 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) |
146 | + | 176 | if (sve_access_check(s)) { |
147 | /* | 177 | int size = pred_full_reg_size(s); |
148 | *** SVE Floating Point Multiply Indexed Group | 178 | int off = pred_full_reg_offset(s, a->rd); |
149 | */ | 179 | - do_str(s, off, size, a->rn, a->imm * size); |
180 | + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); | ||
181 | } | ||
182 | return true; | ||
183 | } | ||
150 | -- | 184 | -- |
151 | 2.20.1 | 185 | 2.25.1 |
152 | |||
153 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | We can reuse the SVE functions for LDR and STR, passing in the | ||
4 | base of the ZA vector and a zero offset. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-70-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-23-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/cpu.h | 5 +++++ | 11 | target/arm/sme.decode | 7 +++++++ |
9 | target/arm/sve.decode | 7 +++++++ | 12 | target/arm/translate-sme.c | 24 ++++++++++++++++++++++++ |
10 | target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++ | 13 | 2 files changed, 31 insertions(+) |
11 | 3 files changed, 50 insertions(+) | ||
12 | 14 | ||
13 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
14 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/sme.decode |
16 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/sme.decode |
17 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) | 19 | @@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
18 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; | 20 | &ldst rs=%mova_rs |
19 | } | 21 | LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
20 | 22 | &ldst esz=4 rs=%mova_rs | |
21 | +static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) | ||
22 | +{ | ||
23 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; | ||
24 | +} | ||
25 | + | 23 | + |
26 | static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) | 24 | +&ldstr rv rn imm |
27 | { | 25 | +@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \ |
28 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; | 26 | + &ldstr rv=%mova_rs |
29 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 27 | + |
28 | +LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr | ||
29 | +STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr | ||
30 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/target/arm/sve.decode | 32 | --- a/target/arm/translate-sme.c |
32 | +++ b/target/arm/sve.decode | 33 | +++ b/target/arm/translate-sme.c |
33 | @@ -XXX,XX +XXX,XX @@ | 34 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) |
34 | @pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz | 35 | tcg_temp_free_i64(addr); |
35 | @rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \ | ||
36 | &rrr_esz rn=%reg_movprfx | ||
37 | +@rdn_rm_e0 ........ .. ...... ...... rm:5 rd:5 \ | ||
38 | + &rrr_esz rn=%reg_movprfx esz=0 | ||
39 | @rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \ | ||
40 | &rri_esz rn=%reg_movprfx imm=%sh8_i8u | ||
41 | @rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \ | ||
42 | @@ -XXX,XX +XXX,XX @@ STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \ | ||
43 | # SVE2 crypto unary operations | ||
44 | # AESMC and AESIMC | ||
45 | AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5 | ||
46 | + | ||
47 | +# SVE2 crypto destructive binary operations | ||
48 | +AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0 | ||
49 | +AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0 | ||
50 | +SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0 | ||
51 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/target/arm/translate-sve.c | ||
54 | +++ b/target/arm/translate-sve.c | ||
55 | @@ -XXX,XX +XXX,XX @@ static bool trans_AESMC(DisasContext *s, arg_AESMC *a) | ||
56 | } | ||
57 | return true; | 36 | return true; |
58 | } | 37 | } |
59 | + | 38 | + |
60 | +static bool do_aese(DisasContext *s, arg_rrr_esz *a, bool decrypt) | 39 | +typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int); |
40 | + | ||
41 | +static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) | ||
61 | +{ | 42 | +{ |
62 | + if (!dc_isar_feature(aa64_sve2_aes, s)) { | 43 | + int svl = streaming_vec_reg_size(s); |
63 | + return false; | 44 | + int imm = a->imm; |
45 | + TCGv_ptr base; | ||
46 | + | ||
47 | + if (!sme_za_enabled_check(s)) { | ||
48 | + return true; | ||
64 | + } | 49 | + } |
65 | + if (sve_access_check(s)) { | 50 | + |
66 | + gen_gvec_ool_zzz(s, gen_helper_crypto_aese, | 51 | + /* ZA[n] equates to ZA0H.B[n]. */ |
67 | + a->rd, a->rn, a->rm, decrypt); | 52 | + base = get_tile_rowcol(s, MO_8, a->rv, imm, false); |
68 | + } | 53 | + |
54 | + fn(s, base, 0, svl, a->rn, imm * svl); | ||
55 | + | ||
56 | + tcg_temp_free_ptr(base); | ||
69 | + return true; | 57 | + return true; |
70 | +} | 58 | +} |
71 | + | 59 | + |
72 | +static bool trans_AESE(DisasContext *s, arg_rrr_esz *a) | 60 | +TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) |
73 | +{ | 61 | +TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) |
74 | + return do_aese(s, a, false); | ||
75 | +} | ||
76 | + | ||
77 | +static bool trans_AESD(DisasContext *s, arg_rrr_esz *a) | ||
78 | +{ | ||
79 | + return do_aese(s, a, true); | ||
80 | +} | ||
81 | + | ||
82 | +static bool do_sm4(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn) | ||
83 | +{ | ||
84 | + if (!dc_isar_feature(aa64_sve2_sm4, s)) { | ||
85 | + return false; | ||
86 | + } | ||
87 | + if (sve_access_check(s)) { | ||
88 | + gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0); | ||
89 | + } | ||
90 | + return true; | ||
91 | +} | ||
92 | + | ||
93 | +static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a) | ||
94 | +{ | ||
95 | + return do_sm4(s, a, gen_helper_crypto_sm4e); | ||
96 | +} | ||
97 | -- | 62 | -- |
98 | 2.20.1 | 63 | 2.25.1 |
99 | |||
100 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-33-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-24-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 6 ++ | 8 | target/arm/helper-sme.h | 5 +++ |
9 | target/arm/sve.decode | 12 +++ | 9 | target/arm/sme.decode | 11 +++++ |
10 | target/arm/sve_helper.c | 50 +++++++++ | 10 | target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++ |
11 | target/arm/translate-sve.c | 213 +++++++++++++++++++++++++++++++++++++ | 11 | target/arm/translate-sme.c | 31 +++++++++++++ |
12 | 4 files changed, 281 insertions(+) | 12 | 4 files changed, 137 insertions(+) |
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/target/arm/helper-sme.h |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/target/arm/helper-sme.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG, | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i |
19 | void, ptr, ptr, ptr, ptr, ptr, i32) | 19 | DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
20 | DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG, | 20 | DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
21 | void, ptr, ptr, ptr, ptr, ptr, i32) | 21 | DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) |
22 | + | 22 | + |
23 | +DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 23 | +DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
24 | +DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 24 | +DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
25 | +DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 25 | +DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
26 | +DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 26 | +DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
27 | +DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
28 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
29 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/sve.decode | 29 | --- a/target/arm/sme.decode |
31 | +++ b/target/arm/sve.decode | 30 | +++ b/target/arm/sme.decode |
32 | @@ -XXX,XX +XXX,XX @@ | 31 | @@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ |
33 | @rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \ | 32 | |
34 | &rrrr_esz ra=%reg_movprfx | 33 | LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr |
35 | 34 | STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr | |
36 | +# Four operand with unused vector element size | ||
37 | +@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \ | ||
38 | + &rrrr_esz esz=0 rn=%reg_movprfx | ||
39 | + | 35 | + |
40 | # Three operand with "memory" size, aka immediate left shift | 36 | +### SME Add Vector to Array |
41 | @rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri | ||
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
44 | EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
45 | BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0 | ||
46 | |||
47 | +# SVE2 bitwise ternary operations | ||
48 | +EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0 | ||
49 | +BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 | ||
50 | +BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0 | ||
51 | +BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 | ||
52 | +BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 | ||
53 | +NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 | ||
54 | + | 37 | + |
55 | ### SVE Index Generation Group | 38 | +&adda zad zn pm pn |
56 | 39 | +@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda | |
57 | # SVE index generation (immediate start, immediate increment) | 40 | +@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda |
58 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 41 | + |
42 | +ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 | ||
43 | +ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 | ||
44 | +ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 | ||
45 | +ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 | ||
46 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | 47 | index XXXXXXX..XXXXXXX 100644 |
60 | --- a/target/arm/sve_helper.c | 48 | --- a/target/arm/sme_helper.c |
61 | +++ b/target/arm/sve_helper.c | 49 | +++ b/target/arm/sme_helper.c |
62 | @@ -XXX,XX +XXX,XX @@ DO_ST1_ZPZ_D(dd_be, zd, MO_64) | 50 | @@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128) |
63 | 51 | DO_ST(q, _le, MO_128) | |
64 | #undef DO_ST1_ZPZ_S | 52 | |
65 | #undef DO_ST1_ZPZ_D | 53 | #undef DO_ST |
66 | + | 54 | + |
67 | +void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | 55 | +void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn, |
56 | + void *vpm, uint32_t desc) | ||
68 | +{ | 57 | +{ |
69 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 58 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; |
70 | + uint64_t *d = vd, *n = vn, *m = vm, *k = vk; | 59 | + uint64_t *pn = vpn, *pm = vpm; |
60 | + uint32_t *zda = vzda, *zn = vzn; | ||
71 | + | 61 | + |
72 | + for (i = 0; i < opr_sz; ++i) { | 62 | + for (row = 0; row < oprsz; ) { |
73 | + d[i] = n[i] ^ m[i] ^ k[i]; | 63 | + uint64_t pa = pn[row >> 4]; |
64 | + do { | ||
65 | + if (pa & 1) { | ||
66 | + for (col = 0; col < oprsz; ) { | ||
67 | + uint64_t pb = pm[col >> 4]; | ||
68 | + do { | ||
69 | + if (pb & 1) { | ||
70 | + zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)]; | ||
71 | + } | ||
72 | + pb >>= 4; | ||
73 | + } while (++col & 15); | ||
74 | + } | ||
75 | + } | ||
76 | + pa >>= 4; | ||
77 | + } while (++row & 15); | ||
74 | + } | 78 | + } |
75 | +} | 79 | +} |
76 | + | 80 | + |
77 | +void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | 81 | +void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn, |
82 | + void *vpm, uint32_t desc) | ||
78 | +{ | 83 | +{ |
79 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 84 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
80 | + uint64_t *d = vd, *n = vn, *m = vm, *k = vk; | 85 | + uint8_t *pn = vpn, *pm = vpm; |
86 | + uint64_t *zda = vzda, *zn = vzn; | ||
81 | + | 87 | + |
82 | + for (i = 0; i < opr_sz; ++i) { | 88 | + for (row = 0; row < oprsz; ++row) { |
83 | + d[i] = n[i] ^ (m[i] & ~k[i]); | 89 | + if (pn[H1(row)] & 1) { |
90 | + for (col = 0; col < oprsz; ++col) { | ||
91 | + if (pm[H1(col)] & 1) { | ||
92 | + zda[tile_vslice_index(row) + col] += zn[col]; | ||
93 | + } | ||
94 | + } | ||
95 | + } | ||
84 | + } | 96 | + } |
85 | +} | 97 | +} |
86 | + | 98 | + |
87 | +void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | 99 | +void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn, |
100 | + void *vpm, uint32_t desc) | ||
88 | +{ | 101 | +{ |
89 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 102 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; |
90 | + uint64_t *d = vd, *n = vn, *m = vm, *k = vk; | 103 | + uint64_t *pn = vpn, *pm = vpm; |
104 | + uint32_t *zda = vzda, *zn = vzn; | ||
91 | + | 105 | + |
92 | + for (i = 0; i < opr_sz; ++i) { | 106 | + for (row = 0; row < oprsz; ) { |
93 | + d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]); | 107 | + uint64_t pa = pn[row >> 4]; |
108 | + do { | ||
109 | + if (pa & 1) { | ||
110 | + uint32_t zn_row = zn[H4(row)]; | ||
111 | + for (col = 0; col < oprsz; ) { | ||
112 | + uint64_t pb = pm[col >> 4]; | ||
113 | + do { | ||
114 | + if (pb & 1) { | ||
115 | + zda[tile_vslice_index(row) + H4(col)] += zn_row; | ||
116 | + } | ||
117 | + pb >>= 4; | ||
118 | + } while (++col & 15); | ||
119 | + } | ||
120 | + } | ||
121 | + pa >>= 4; | ||
122 | + } while (++row & 15); | ||
94 | + } | 123 | + } |
95 | +} | 124 | +} |
96 | + | 125 | + |
97 | +void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | 126 | +void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, |
127 | + void *vpm, uint32_t desc) | ||
98 | +{ | 128 | +{ |
99 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 129 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
100 | + uint64_t *d = vd, *n = vn, *m = vm, *k = vk; | 130 | + uint8_t *pn = vpn, *pm = vpm; |
131 | + uint64_t *zda = vzda, *zn = vzn; | ||
101 | + | 132 | + |
102 | + for (i = 0; i < opr_sz; ++i) { | 133 | + for (row = 0; row < oprsz; ++row) { |
103 | + d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]); | 134 | + if (pn[H1(row)] & 1) { |
135 | + uint64_t zn_row = zn[row]; | ||
136 | + for (col = 0; col < oprsz; ++col) { | ||
137 | + if (pm[H1(col)] & 1) { | ||
138 | + zda[tile_vslice_index(row) + col] += zn_row; | ||
139 | + } | ||
140 | + } | ||
141 | + } | ||
104 | + } | 142 | + } |
105 | +} | 143 | +} |
144 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
145 | index XXXXXXX..XXXXXXX 100644 | ||
146 | --- a/target/arm/translate-sme.c | ||
147 | +++ b/target/arm/translate-sme.c | ||
148 | @@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) | ||
149 | |||
150 | TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) | ||
151 | TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) | ||
106 | + | 152 | + |
107 | +void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | 153 | +static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, |
154 | + gen_helper_gvec_4 *fn) | ||
108 | +{ | 155 | +{ |
109 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; | 156 | + int svl = streaming_vec_reg_size(s); |
110 | + uint64_t *d = vd, *n = vn, *m = vm, *k = vk; | 157 | + uint32_t desc = simd_desc(svl, svl, 0); |
158 | + TCGv_ptr za, zn, pn, pm; | ||
111 | + | 159 | + |
112 | + for (i = 0; i < opr_sz; ++i) { | 160 | + if (!sme_smza_enabled_check(s)) { |
113 | + d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i])); | 161 | + return true; |
114 | + } | 162 | + } |
115 | +} | ||
116 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/target/arm/translate-sve.c | ||
119 | +++ b/target/arm/translate-sve.c | ||
120 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, | ||
121 | vec_full_reg_offset(s, rm), vsz, vsz); | ||
122 | } | ||
123 | |||
124 | +/* Invoke a vector expander on four Zregs. */ | ||
125 | +static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, | ||
126 | + int esz, int rd, int rn, int rm, int ra) | ||
127 | +{ | ||
128 | + unsigned vsz = vec_full_reg_size(s); | ||
129 | + gvec_fn(esz, vec_full_reg_offset(s, rd), | ||
130 | + vec_full_reg_offset(s, rn), | ||
131 | + vec_full_reg_offset(s, rm), | ||
132 | + vec_full_reg_offset(s, ra), vsz, vsz); | ||
133 | +} | ||
134 | + | 163 | + |
135 | /* Invoke a vector move on two Zregs. */ | 164 | + /* Sum XZR+zad to find ZAd. */ |
136 | static bool do_mov_z(DisasContext *s, int rd, int rn) | 165 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); |
137 | { | 166 | + zn = vec_full_reg_ptr(s, a->zn); |
138 | @@ -XXX,XX +XXX,XX @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a) | 167 | + pn = pred_full_reg_ptr(s, a->pn); |
139 | return do_zzz_fn(s, a, tcg_gen_gvec_andc); | 168 | + pm = pred_full_reg_ptr(s, a->pm); |
140 | } | 169 | + |
141 | 170 | + fn(za, zn, pn, pm, tcg_constant_i32(desc)); | |
142 | +static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn) | 171 | + |
143 | +{ | 172 | + tcg_temp_free_ptr(za); |
144 | + if (!dc_isar_feature(aa64_sve2, s)) { | 173 | + tcg_temp_free_ptr(zn); |
145 | + return false; | 174 | + tcg_temp_free_ptr(pn); |
146 | + } | 175 | + tcg_temp_free_ptr(pm); |
147 | + if (sve_access_check(s)) { | ||
148 | + gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra); | ||
149 | + } | ||
150 | + return true; | 176 | + return true; |
151 | +} | 177 | +} |
152 | + | 178 | + |
153 | +static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) | 179 | +TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) |
154 | +{ | 180 | +TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) |
155 | + tcg_gen_xor_i64(d, n, m); | 181 | +TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) |
156 | + tcg_gen_xor_i64(d, d, k); | 182 | +TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) |
157 | +} | ||
158 | + | ||
159 | +static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
160 | + TCGv_vec m, TCGv_vec k) | ||
161 | +{ | ||
162 | + tcg_gen_xor_vec(vece, d, n, m); | ||
163 | + tcg_gen_xor_vec(vece, d, d, k); | ||
164 | +} | ||
165 | + | ||
166 | +static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
167 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
168 | +{ | ||
169 | + static const GVecGen4 op = { | ||
170 | + .fni8 = gen_eor3_i64, | ||
171 | + .fniv = gen_eor3_vec, | ||
172 | + .fno = gen_helper_sve2_eor3, | ||
173 | + .vece = MO_64, | ||
174 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
175 | + }; | ||
176 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); | ||
177 | +} | ||
178 | + | ||
179 | +static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a) | ||
180 | +{ | ||
181 | + return do_sve2_zzzz_fn(s, a, gen_eor3); | ||
182 | +} | ||
183 | + | ||
184 | +static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) | ||
185 | +{ | ||
186 | + tcg_gen_andc_i64(d, m, k); | ||
187 | + tcg_gen_xor_i64(d, d, n); | ||
188 | +} | ||
189 | + | ||
190 | +static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
191 | + TCGv_vec m, TCGv_vec k) | ||
192 | +{ | ||
193 | + tcg_gen_andc_vec(vece, d, m, k); | ||
194 | + tcg_gen_xor_vec(vece, d, d, n); | ||
195 | +} | ||
196 | + | ||
197 | +static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
198 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
199 | +{ | ||
200 | + static const GVecGen4 op = { | ||
201 | + .fni8 = gen_bcax_i64, | ||
202 | + .fniv = gen_bcax_vec, | ||
203 | + .fno = gen_helper_sve2_bcax, | ||
204 | + .vece = MO_64, | ||
205 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
206 | + }; | ||
207 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); | ||
208 | +} | ||
209 | + | ||
210 | +static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a) | ||
211 | +{ | ||
212 | + return do_sve2_zzzz_fn(s, a, gen_bcax); | ||
213 | +} | ||
214 | + | ||
215 | +static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
216 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
217 | +{ | ||
218 | + /* BSL differs from the generic bitsel in argument ordering. */ | ||
219 | + tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz); | ||
220 | +} | ||
221 | + | ||
222 | +static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a) | ||
223 | +{ | ||
224 | + return do_sve2_zzzz_fn(s, a, gen_bsl); | ||
225 | +} | ||
226 | + | ||
227 | +static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) | ||
228 | +{ | ||
229 | + tcg_gen_andc_i64(n, k, n); | ||
230 | + tcg_gen_andc_i64(m, m, k); | ||
231 | + tcg_gen_or_i64(d, n, m); | ||
232 | +} | ||
233 | + | ||
234 | +static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
235 | + TCGv_vec m, TCGv_vec k) | ||
236 | +{ | ||
237 | + if (TCG_TARGET_HAS_bitsel_vec) { | ||
238 | + tcg_gen_not_vec(vece, n, n); | ||
239 | + tcg_gen_bitsel_vec(vece, d, k, n, m); | ||
240 | + } else { | ||
241 | + tcg_gen_andc_vec(vece, n, k, n); | ||
242 | + tcg_gen_andc_vec(vece, m, m, k); | ||
243 | + tcg_gen_or_vec(vece, d, n, m); | ||
244 | + } | ||
245 | +} | ||
246 | + | ||
247 | +static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
248 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
249 | +{ | ||
250 | + static const GVecGen4 op = { | ||
251 | + .fni8 = gen_bsl1n_i64, | ||
252 | + .fniv = gen_bsl1n_vec, | ||
253 | + .fno = gen_helper_sve2_bsl1n, | ||
254 | + .vece = MO_64, | ||
255 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
256 | + }; | ||
257 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); | ||
258 | +} | ||
259 | + | ||
260 | +static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a) | ||
261 | +{ | ||
262 | + return do_sve2_zzzz_fn(s, a, gen_bsl1n); | ||
263 | +} | ||
264 | + | ||
265 | +static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) | ||
266 | +{ | ||
267 | + /* | ||
268 | + * Z[dn] = (n & k) | (~m & ~k) | ||
269 | + * = | ~(m | k) | ||
270 | + */ | ||
271 | + tcg_gen_and_i64(n, n, k); | ||
272 | + if (TCG_TARGET_HAS_orc_i64) { | ||
273 | + tcg_gen_or_i64(m, m, k); | ||
274 | + tcg_gen_orc_i64(d, n, m); | ||
275 | + } else { | ||
276 | + tcg_gen_nor_i64(m, m, k); | ||
277 | + tcg_gen_or_i64(d, n, m); | ||
278 | + } | ||
279 | +} | ||
280 | + | ||
281 | +static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
282 | + TCGv_vec m, TCGv_vec k) | ||
283 | +{ | ||
284 | + if (TCG_TARGET_HAS_bitsel_vec) { | ||
285 | + tcg_gen_not_vec(vece, m, m); | ||
286 | + tcg_gen_bitsel_vec(vece, d, k, n, m); | ||
287 | + } else { | ||
288 | + tcg_gen_and_vec(vece, n, n, k); | ||
289 | + tcg_gen_or_vec(vece, m, m, k); | ||
290 | + tcg_gen_orc_vec(vece, d, n, m); | ||
291 | + } | ||
292 | +} | ||
293 | + | ||
294 | +static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
295 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
296 | +{ | ||
297 | + static const GVecGen4 op = { | ||
298 | + .fni8 = gen_bsl2n_i64, | ||
299 | + .fniv = gen_bsl2n_vec, | ||
300 | + .fno = gen_helper_sve2_bsl2n, | ||
301 | + .vece = MO_64, | ||
302 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
303 | + }; | ||
304 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); | ||
305 | +} | ||
306 | + | ||
307 | +static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a) | ||
308 | +{ | ||
309 | + return do_sve2_zzzz_fn(s, a, gen_bsl2n); | ||
310 | +} | ||
311 | + | ||
312 | +static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) | ||
313 | +{ | ||
314 | + tcg_gen_and_i64(n, n, k); | ||
315 | + tcg_gen_andc_i64(m, m, k); | ||
316 | + tcg_gen_nor_i64(d, n, m); | ||
317 | +} | ||
318 | + | ||
319 | +static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
320 | + TCGv_vec m, TCGv_vec k) | ||
321 | +{ | ||
322 | + tcg_gen_bitsel_vec(vece, d, k, n, m); | ||
323 | + tcg_gen_not_vec(vece, d, d); | ||
324 | +} | ||
325 | + | ||
326 | +static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
327 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
328 | +{ | ||
329 | + static const GVecGen4 op = { | ||
330 | + .fni8 = gen_nbsl_i64, | ||
331 | + .fniv = gen_nbsl_vec, | ||
332 | + .fno = gen_helper_sve2_nbsl, | ||
333 | + .vece = MO_64, | ||
334 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
335 | + }; | ||
336 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); | ||
337 | +} | ||
338 | + | ||
339 | +static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a) | ||
340 | +{ | ||
341 | + return do_sve2_zzzz_fn(s, a, gen_nbsl); | ||
342 | +} | ||
343 | + | ||
344 | /* | ||
345 | *** SVE Integer Arithmetic - Unpredicated Group | ||
346 | */ | ||
347 | -- | 183 | -- |
348 | 2.20.1 | 184 | 2.25.1 |
349 | |||
350 | diff view generated by jsdifflib |
1 | From: Stephen Long <steplong@quicinc.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20220708151540.18136-25-richard.henderson@linaro.org | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-47-richard.henderson@linaro.org | ||
7 | Message-Id: <20200422165503.13511-1-steplong@quicinc.com> | ||
8 | [rth: Fix indexing in helpers, expand macro to straight functions.] | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | --- | 7 | --- |
12 | target/arm/cpu.h | 10 ++++++ | 8 | target/arm/helper-sme.h | 5 +++ |
13 | target/arm/helper-sve.h | 3 ++ | 9 | target/arm/sme.decode | 9 +++++ |
14 | target/arm/sve.decode | 4 +++ | 10 | target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++ |
15 | target/arm/sve_helper.c | 74 ++++++++++++++++++++++++++++++++++++++ | 11 | target/arm/translate-sme.c | 32 ++++++++++++++++++ |
16 | target/arm/translate-sve.c | 34 ++++++++++++++++++ | 12 | 4 files changed, 115 insertions(+) |
17 | 5 files changed, 125 insertions(+) | ||
18 | 13 | ||
19 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
20 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/cpu.h | 16 | --- a/target/arm/helper-sme.h |
22 | +++ b/target/arm/cpu.h | 17 | +++ b/target/arm/helper-sme.h |
23 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
24 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; | 19 | DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
25 | } | 20 | DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
26 | 21 | DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | |
27 | +static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) | ||
28 | +{ | ||
29 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; | ||
30 | +} | ||
31 | + | 22 | + |
32 | +static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) | 23 | +DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, |
33 | +{ | 24 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
34 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; | 25 | +DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, |
35 | +} | 26 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/sme.decode | ||
30 | +++ b/target/arm/sme.decode | ||
31 | @@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 | ||
32 | ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 | ||
33 | ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 | ||
34 | ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 | ||
36 | + | 35 | + |
37 | /* | 36 | +### SME Outer Product |
38 | * Feature tests for "does this exist in either 32-bit or 64-bit?" | 37 | + |
39 | */ | 38 | +&op zad zn zm pm pn sub:bool |
40 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 39 | +@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op |
40 | +@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op | ||
41 | + | ||
42 | +FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 | ||
43 | +FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
44 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | 45 | index XXXXXXX..XXXXXXX 100644 |
42 | --- a/target/arm/helper-sve.h | 46 | --- a/target/arm/sme_helper.c |
43 | +++ b/target/arm/helper-sve.h | 47 | +++ b/target/arm/sme_helper.c |
44 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG, | 48 | @@ -XXX,XX +XXX,XX @@ |
45 | void, ptr, ptr, ptr, ptr, i32) | 49 | #include "exec/cpu_ldst.h" |
46 | DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG, | 50 | #include "exec/exec-all.h" |
47 | void, ptr, ptr, ptr, ptr, i32) | 51 | #include "qemu/int128.h" |
48 | + | 52 | +#include "fpu/softfloat.h" |
49 | +DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) | 53 | #include "vec_internal.h" |
50 | +DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) | 54 | #include "sve_ldst_internal.h" |
51 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 55 | |
52 | index XXXXXXX..XXXXXXX 100644 | 56 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, |
53 | --- a/target/arm/sve.decode | 57 | } |
54 | +++ b/target/arm/sve.decode | ||
55 | @@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | ||
56 | CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
57 | SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
58 | |||
59 | +### SVE2 floating point matrix multiply accumulate | ||
60 | + | ||
61 | +FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm | ||
62 | + | ||
63 | ### SVE2 Memory Gather Load Group | ||
64 | |||
65 | # SVE2 64-bit gather non-temporal load | ||
66 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/target/arm/sve_helper.c | ||
69 | +++ b/target/arm/sve_helper.c | ||
70 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc) | ||
71 | d[i] = ror32(n[i] ^ m[i], shr); | ||
72 | } | 58 | } |
73 | } | 59 | } |
74 | + | 60 | + |
75 | +void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va, | 61 | +void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, |
76 | + void *status, uint32_t desc) | 62 | + void *vpm, void *vst, uint32_t desc) |
77 | +{ | 63 | +{ |
78 | + intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4); | 64 | + intptr_t row, col, oprsz = simd_maxsz(desc); |
65 | + uint32_t neg = simd_data(desc) << 31; | ||
66 | + uint16_t *pn = vpn, *pm = vpm; | ||
67 | + float_status fpst; | ||
79 | + | 68 | + |
80 | + for (s = 0; s < opr_sz; ++s) { | 69 | + /* |
81 | + float32 *n = vn + s * sizeof(float32) * 4; | 70 | + * Make a copy of float_status because this operation does not |
82 | + float32 *m = vm + s * sizeof(float32) * 4; | 71 | + * update the cumulative fp exception status. It also produces |
83 | + float32 *a = va + s * sizeof(float32) * 4; | 72 | + * default nans. |
84 | + float32 *d = vd + s * sizeof(float32) * 4; | 73 | + */ |
85 | + float32 n00 = n[H4(0)], n01 = n[H4(1)]; | 74 | + fpst = *(float_status *)vst; |
86 | + float32 n10 = n[H4(2)], n11 = n[H4(3)]; | 75 | + set_default_nan_mode(true, &fpst); |
87 | + float32 m00 = m[H4(0)], m01 = m[H4(1)]; | ||
88 | + float32 m10 = m[H4(2)], m11 = m[H4(3)]; | ||
89 | + float32 p0, p1; | ||
90 | + | 76 | + |
91 | + /* i = 0, j = 0 */ | 77 | + for (row = 0; row < oprsz; ) { |
92 | + p0 = float32_mul(n00, m00, status); | 78 | + uint16_t pa = pn[H2(row >> 4)]; |
93 | + p1 = float32_mul(n01, m01, status); | 79 | + do { |
94 | + d[H4(0)] = float32_add(a[H4(0)], float32_add(p0, p1, status), status); | 80 | + if (pa & 1) { |
81 | + void *vza_row = vza + tile_vslice_offset(row); | ||
82 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg; | ||
95 | + | 83 | + |
96 | + /* i = 0, j = 1 */ | 84 | + for (col = 0; col < oprsz; ) { |
97 | + p0 = float32_mul(n00, m10, status); | 85 | + uint16_t pb = pm[H2(col >> 4)]; |
98 | + p1 = float32_mul(n01, m11, status); | 86 | + do { |
99 | + d[H4(1)] = float32_add(a[H4(1)], float32_add(p0, p1, status), status); | 87 | + if (pb & 1) { |
100 | + | 88 | + uint32_t *a = vza_row + H1_4(col); |
101 | + /* i = 1, j = 0 */ | 89 | + uint32_t *m = vzm + H1_4(col); |
102 | + p0 = float32_mul(n10, m00, status); | 90 | + *a = float32_muladd(n, *m, *a, 0, vst); |
103 | + p1 = float32_mul(n11, m01, status); | 91 | + } |
104 | + d[H4(2)] = float32_add(a[H4(2)], float32_add(p0, p1, status), status); | 92 | + col += 4; |
105 | + | 93 | + pb >>= 4; |
106 | + /* i = 1, j = 1 */ | 94 | + } while (col & 15); |
107 | + p0 = float32_mul(n10, m10, status); | 95 | + } |
108 | + p1 = float32_mul(n11, m11, status); | 96 | + } |
109 | + d[H4(3)] = float32_add(a[H4(3)], float32_add(p0, p1, status), status); | 97 | + row += 4; |
98 | + pa >>= 4; | ||
99 | + } while (row & 15); | ||
110 | + } | 100 | + } |
111 | +} | 101 | +} |
112 | + | 102 | + |
113 | +void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va, | 103 | +void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, |
114 | + void *status, uint32_t desc) | 104 | + void *vpm, void *vst, uint32_t desc) |
115 | +{ | 105 | +{ |
116 | + intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4); | 106 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
107 | + uint64_t neg = (uint64_t)simd_data(desc) << 63; | ||
108 | + uint64_t *za = vza, *zn = vzn, *zm = vzm; | ||
109 | + uint8_t *pn = vpn, *pm = vpm; | ||
110 | + float_status fpst = *(float_status *)vst; | ||
117 | + | 111 | + |
118 | + for (s = 0; s < opr_sz; ++s) { | 112 | + set_default_nan_mode(true, &fpst); |
119 | + float64 *n = vn + s * sizeof(float64) * 4; | ||
120 | + float64 *m = vm + s * sizeof(float64) * 4; | ||
121 | + float64 *a = va + s * sizeof(float64) * 4; | ||
122 | + float64 *d = vd + s * sizeof(float64) * 4; | ||
123 | + float64 n00 = n[0], n01 = n[1], n10 = n[2], n11 = n[3]; | ||
124 | + float64 m00 = m[0], m01 = m[1], m10 = m[2], m11 = m[3]; | ||
125 | + float64 p0, p1; | ||
126 | + | 113 | + |
127 | + /* i = 0, j = 0 */ | 114 | + for (row = 0; row < oprsz; ++row) { |
128 | + p0 = float64_mul(n00, m00, status); | 115 | + if (pn[H1(row)] & 1) { |
129 | + p1 = float64_mul(n01, m01, status); | 116 | + uint64_t *za_row = &za[tile_vslice_index(row)]; |
130 | + d[0] = float64_add(a[0], float64_add(p0, p1, status), status); | 117 | + uint64_t n = zn[row] ^ neg; |
131 | + | 118 | + |
132 | + /* i = 0, j = 1 */ | 119 | + for (col = 0; col < oprsz; ++col) { |
133 | + p0 = float64_mul(n00, m10, status); | 120 | + if (pm[H1(col)] & 1) { |
134 | + p1 = float64_mul(n01, m11, status); | 121 | + uint64_t *a = &za_row[col]; |
135 | + d[1] = float64_add(a[1], float64_add(p0, p1, status), status); | 122 | + *a = float64_muladd(n, zm[col], *a, 0, &fpst); |
136 | + | 123 | + } |
137 | + /* i = 1, j = 0 */ | 124 | + } |
138 | + p0 = float64_mul(n10, m00, status); | 125 | + } |
139 | + p1 = float64_mul(n11, m01, status); | ||
140 | + d[2] = float64_add(a[2], float64_add(p0, p1, status), status); | ||
141 | + | ||
142 | + /* i = 1, j = 1 */ | ||
143 | + p0 = float64_mul(n10, m10, status); | ||
144 | + p1 = float64_mul(n11, m11, status); | ||
145 | + d[3] = float64_add(a[3], float64_add(p0, p1, status), status); | ||
146 | + } | 126 | + } |
147 | +} | 127 | +} |
148 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 128 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c |
149 | index XXXXXXX..XXXXXXX 100644 | 129 | index XXXXXXX..XXXXXXX 100644 |
150 | --- a/target/arm/translate-sve.c | 130 | --- a/target/arm/translate-sme.c |
151 | +++ b/target/arm/translate-sve.c | 131 | +++ b/target/arm/translate-sme.c |
152 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ_FP(FMINP, fminp) | 132 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) |
153 | * SVE Integer Multiply-Add (unpredicated) | 133 | TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) |
154 | */ | 134 | TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) |
155 | 135 | TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) | |
156 | +static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a) | 136 | + |
137 | +static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, | ||
138 | + gen_helper_gvec_5_ptr *fn) | ||
157 | +{ | 139 | +{ |
158 | + gen_helper_gvec_4_ptr *fn; | 140 | + int svl = streaming_vec_reg_size(s); |
141 | + uint32_t desc = simd_desc(svl, svl, a->sub); | ||
142 | + TCGv_ptr za, zn, zm, pn, pm, fpst; | ||
159 | + | 143 | + |
160 | + switch (a->esz) { | 144 | + if (!sme_smza_enabled_check(s)) { |
161 | + case MO_32: | 145 | + return true; |
162 | + if (!dc_isar_feature(aa64_sve_f32mm, s)) { | ||
163 | + return false; | ||
164 | + } | ||
165 | + fn = gen_helper_fmmla_s; | ||
166 | + break; | ||
167 | + case MO_64: | ||
168 | + if (!dc_isar_feature(aa64_sve_f64mm, s)) { | ||
169 | + return false; | ||
170 | + } | ||
171 | + fn = gen_helper_fmmla_d; | ||
172 | + break; | ||
173 | + default: | ||
174 | + return false; | ||
175 | + } | 146 | + } |
176 | + | 147 | + |
177 | + if (sve_access_check(s)) { | 148 | + /* Sum XZR+zad to find ZAd. */ |
178 | + unsigned vsz = vec_full_reg_size(s); | 149 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); |
179 | + TCGv_ptr status = fpstatus_ptr(FPST_FPCR); | 150 | + zn = vec_full_reg_ptr(s, a->zn); |
180 | + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), | 151 | + zm = vec_full_reg_ptr(s, a->zm); |
181 | + vec_full_reg_offset(s, a->rn), | 152 | + pn = pred_full_reg_ptr(s, a->pn); |
182 | + vec_full_reg_offset(s, a->rm), | 153 | + pm = pred_full_reg_ptr(s, a->pm); |
183 | + vec_full_reg_offset(s, a->ra), | 154 | + fpst = fpstatus_ptr(FPST_FPCR); |
184 | + status, vsz, vsz, 0, fn); | 155 | + |
185 | + tcg_temp_free_ptr(status); | 156 | + fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); |
186 | + } | 157 | + |
158 | + tcg_temp_free_ptr(za); | ||
159 | + tcg_temp_free_ptr(zn); | ||
160 | + tcg_temp_free_ptr(pn); | ||
161 | + tcg_temp_free_ptr(pm); | ||
162 | + tcg_temp_free_ptr(fpst); | ||
187 | + return true; | 163 | + return true; |
188 | +} | 164 | +} |
189 | + | 165 | + |
190 | static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a, | 166 | +TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
191 | bool sel1, bool sel2) | 167 | +TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) |
192 | { | ||
193 | -- | 168 | -- |
194 | 2.20.1 | 169 | 2.25.1 |
195 | |||
196 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20220708151540.18136-26-richard.henderson@linaro.org | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-5-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 13 +++++++++++ | 8 | target/arm/helper-sme.h | 2 ++ |
9 | target/arm/sve.decode | 7 ++++++ | 9 | target/arm/sme.decode | 2 ++ |
10 | target/arm/sve_helper.c | 21 +++++++++++++++++ | 10 | target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++ |
11 | target/arm/translate-sve.c | 47 ++++++++++++++++++++++++++++++++++++++ | 11 | target/arm/translate-sme.c | 30 ++++++++++++++++++++ |
12 | 4 files changed, 88 insertions(+) | 12 | 4 files changed, 90 insertions(+) |
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/target/arm/helper-sme.h |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/target/arm/helper-sme.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, |
19 | DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 19 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
20 | DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 20 | DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, |
21 | 21 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) | |
22 | +DEF_HELPER_FLAGS_4(sve2_sqabs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 22 | +DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, |
23 | +DEF_HELPER_FLAGS_4(sve2_sqabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 23 | + void, ptr, ptr, ptr, ptr, ptr, i32) |
24 | +DEF_HELPER_FLAGS_4(sve2_sqabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 24 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode |
25 | +DEF_HELPER_FLAGS_4(sve2_sqabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 25 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/arm/sme.decode | ||
27 | +++ b/target/arm/sme.decode | ||
28 | @@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 | ||
29 | |||
30 | FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 | ||
31 | FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
26 | + | 32 | + |
27 | +DEF_HELPER_FLAGS_4(sve2_sqneg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 33 | +BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 |
28 | +DEF_HELPER_FLAGS_4(sve2_sqneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 34 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c |
29 | +DEF_HELPER_FLAGS_4(sve2_sqneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_sqneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | + | ||
32 | +DEF_HELPER_FLAGS_4(sve2_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_4(sve2_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
34 | + | ||
35 | DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
36 | |||
37 | DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG, | ||
38 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
39 | index XXXXXXX..XXXXXXX 100644 | 35 | index XXXXXXX..XXXXXXX 100644 |
40 | --- a/target/arm/sve.decode | 36 | --- a/target/arm/sme_helper.c |
41 | +++ b/target/arm/sve.decode | 37 | +++ b/target/arm/sme_helper.c |
42 | @@ -XXX,XX +XXX,XX @@ PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0 | 38 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, |
43 | 39 | } | |
44 | SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn | ||
45 | UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn | ||
46 | + | ||
47 | +### SVE2 integer unary operations (predicated) | ||
48 | + | ||
49 | +URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn | ||
50 | +URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn | ||
51 | +SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn | ||
52 | +SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn | ||
53 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/target/arm/sve_helper.c | ||
56 | +++ b/target/arm/sve_helper.c | ||
57 | @@ -XXX,XX +XXX,XX @@ DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) | ||
58 | DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) | ||
59 | DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64) | ||
60 | |||
61 | +#define DO_SQABS(X) \ | ||
62 | + ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \ | ||
63 | + x_ >= 0 ? x_ : x_ == min_ ? -min_ - 1 : -x_; }) | ||
64 | + | ||
65 | +DO_ZPZ(sve2_sqabs_b, int8_t, H1, DO_SQABS) | ||
66 | +DO_ZPZ(sve2_sqabs_h, int16_t, H1_2, DO_SQABS) | ||
67 | +DO_ZPZ(sve2_sqabs_s, int32_t, H1_4, DO_SQABS) | ||
68 | +DO_ZPZ_D(sve2_sqabs_d, int64_t, DO_SQABS) | ||
69 | + | ||
70 | +#define DO_SQNEG(X) \ | ||
71 | + ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \ | ||
72 | + x_ == min_ ? -min_ - 1 : -x_; }) | ||
73 | + | ||
74 | +DO_ZPZ(sve2_sqneg_b, uint8_t, H1, DO_SQNEG) | ||
75 | +DO_ZPZ(sve2_sqneg_h, uint16_t, H1_2, DO_SQNEG) | ||
76 | +DO_ZPZ(sve2_sqneg_s, uint32_t, H1_4, DO_SQNEG) | ||
77 | +DO_ZPZ_D(sve2_sqneg_d, uint64_t, DO_SQNEG) | ||
78 | + | ||
79 | +DO_ZPZ(sve2_urecpe_s, uint32_t, H1_4, helper_recpe_u32) | ||
80 | +DO_ZPZ(sve2_ursqrte_s, uint32_t, H1_4, helper_rsqrte_u32) | ||
81 | + | ||
82 | /* Three-operand expander, unpredicated, in which the third operand is "wide". | ||
83 | */ | ||
84 | #define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \ | ||
85 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/target/arm/translate-sve.c | ||
88 | +++ b/target/arm/translate-sve.c | ||
89 | @@ -XXX,XX +XXX,XX @@ static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a) | ||
90 | } | 40 | } |
91 | return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]); | ||
92 | } | 41 | } |
93 | + | 42 | + |
94 | +/* | 43 | +/* |
95 | + * SVE2 integer unary operations (predicated) | 44 | + * Alter PAIR as needed for controlling predicates being false, |
45 | + * and for NEG on an enabled row element. | ||
96 | + */ | 46 | + */ |
97 | + | 47 | +static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) |
98 | +static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a, | ||
99 | + gen_helper_gvec_3 *fn) | ||
100 | +{ | 48 | +{ |
101 | + if (!dc_isar_feature(aa64_sve2, s)) { | 49 | + /* |
102 | + return false; | 50 | + * The pseudocode uses a conditional negate after the conditional zero. |
51 | + * It is simpler here to unconditionally negate before conditional zero. | ||
52 | + */ | ||
53 | + pair ^= neg; | ||
54 | + if (!(pg & 1)) { | ||
55 | + pair &= 0xffff0000u; | ||
103 | + } | 56 | + } |
104 | + return do_zpz_ool(s, a, fn); | 57 | + if (!(pg & 4)) { |
58 | + pair &= 0x0000ffffu; | ||
59 | + } | ||
60 | + return pair; | ||
105 | +} | 61 | +} |
106 | + | 62 | + |
107 | +static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a) | 63 | +void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, |
64 | + void *vpm, uint32_t desc) | ||
108 | +{ | 65 | +{ |
109 | + if (a->esz != 2) { | 66 | + intptr_t row, col, oprsz = simd_maxsz(desc); |
110 | + return false; | 67 | + uint32_t neg = simd_data(desc) * 0x80008000u; |
68 | + uint16_t *pn = vpn, *pm = vpm; | ||
69 | + | ||
70 | + for (row = 0; row < oprsz; ) { | ||
71 | + uint16_t prow = pn[H2(row >> 4)]; | ||
72 | + do { | ||
73 | + void *vza_row = vza + tile_vslice_offset(row); | ||
74 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); | ||
75 | + | ||
76 | + n = f16mop_adj_pair(n, prow, neg); | ||
77 | + | ||
78 | + for (col = 0; col < oprsz; ) { | ||
79 | + uint16_t pcol = pm[H2(col >> 4)]; | ||
80 | + do { | ||
81 | + if (prow & pcol & 0b0101) { | ||
82 | + uint32_t *a = vza_row + H1_4(col); | ||
83 | + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); | ||
84 | + | ||
85 | + m = f16mop_adj_pair(m, pcol, 0); | ||
86 | + *a = bfdotadd(*a, n, m); | ||
87 | + | ||
88 | + col += 4; | ||
89 | + pcol >>= 4; | ||
90 | + } | ||
91 | + } while (col & 15); | ||
92 | + } | ||
93 | + row += 4; | ||
94 | + prow >>= 4; | ||
95 | + } while (row & 15); | ||
111 | + } | 96 | + } |
112 | + return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s); | 97 | +} |
98 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/translate-sme.c | ||
101 | +++ b/target/arm/translate-sme.c | ||
102 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) | ||
103 | TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) | ||
104 | TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) | ||
105 | |||
106 | +static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, | ||
107 | + gen_helper_gvec_5 *fn) | ||
108 | +{ | ||
109 | + int svl = streaming_vec_reg_size(s); | ||
110 | + uint32_t desc = simd_desc(svl, svl, a->sub); | ||
111 | + TCGv_ptr za, zn, zm, pn, pm; | ||
112 | + | ||
113 | + if (!sme_smza_enabled_check(s)) { | ||
114 | + return true; | ||
115 | + } | ||
116 | + | ||
117 | + /* Sum XZR+zad to find ZAd. */ | ||
118 | + za = get_tile_rowcol(s, esz, 31, a->zad, false); | ||
119 | + zn = vec_full_reg_ptr(s, a->zn); | ||
120 | + zm = vec_full_reg_ptr(s, a->zm); | ||
121 | + pn = pred_full_reg_ptr(s, a->pn); | ||
122 | + pm = pred_full_reg_ptr(s, a->pm); | ||
123 | + | ||
124 | + fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); | ||
125 | + | ||
126 | + tcg_temp_free_ptr(za); | ||
127 | + tcg_temp_free_ptr(zn); | ||
128 | + tcg_temp_free_ptr(pn); | ||
129 | + tcg_temp_free_ptr(pm); | ||
130 | + return true; | ||
113 | +} | 131 | +} |
114 | + | 132 | + |
115 | +static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a) | 133 | static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
116 | +{ | 134 | gen_helper_gvec_5_ptr *fn) |
117 | + if (a->esz != 2) { | 135 | { |
118 | + return false; | 136 | @@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
119 | + } | 137 | |
120 | + return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s); | 138 | TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
121 | +} | 139 | TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) |
122 | + | 140 | + |
123 | +static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a) | 141 | +/* TODO: FEAT_EBF16 */ |
124 | +{ | 142 | +TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) |
125 | + static gen_helper_gvec_3 * const fns[4] = { | ||
126 | + gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, | ||
127 | + gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, | ||
128 | + }; | ||
129 | + return do_sve2_zpz_ool(s, a, fns[a->esz]); | ||
130 | +} | ||
131 | + | ||
132 | +static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a) | ||
133 | +{ | ||
134 | + static gen_helper_gvec_3 * const fns[4] = { | ||
135 | + gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, | ||
136 | + gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, | ||
137 | + }; | ||
138 | + return do_sve2_zpz_ool(s, a, fns[a->esz]); | ||
139 | +} | ||
140 | -- | 143 | -- |
141 | 2.20.1 | 144 | 2.25.1 |
142 | |||
143 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20210525010358.152808-64-richard.henderson@linaro.org | 4 | Message-id: 20220708151540.18136-27-richard.henderson@linaro.org |
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 10 ++++ | 8 | target/arm/helper-sme.h | 2 ++ |
9 | target/arm/sve.decode | 9 ++++ | 9 | target/arm/sme.decode | 1 + |
10 | target/arm/sve_helper.c | 99 ++++++++++++++++++++++++++++++++++++++ | 10 | target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++ |
11 | target/arm/translate-sve.c | 17 +++++++ | 11 | target/arm/translate-sme.c | 1 + |
12 | 4 files changed, 135 insertions(+) | 12 | 4 files changed, 78 insertions(+) |
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/target/arm/helper-sme.h |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/target/arm/helper-sme.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG, | 18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
19 | void, ptr, ptr, ptr, ptr, i32) | 19 | DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
20 | DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG, | 20 | DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) |
21 | void, ptr, ptr, ptr, ptr, i32) | 21 | |
22 | +DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, | ||
23 | + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) | ||
24 | DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, | ||
25 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) | ||
26 | DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, | ||
27 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/sme.decode | ||
30 | +++ b/target/arm/sme.decode | ||
31 | @@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 | ||
32 | FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
33 | |||
34 | BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 | ||
35 | +FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 | ||
36 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sme_helper.c | ||
39 | +++ b/target/arm/sme_helper.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) | ||
41 | return pair; | ||
42 | } | ||
43 | |||
44 | +static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, | ||
45 | + float_status *s_std, float_status *s_odd) | ||
46 | +{ | ||
47 | + float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std); | ||
48 | + float64 e1c = float16_to_float64(e1 >> 16, true, s_std); | ||
49 | + float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std); | ||
50 | + float64 e2c = float16_to_float64(e2 >> 16, true, s_std); | ||
51 | + float64 t64; | ||
52 | + float32 t32; | ||
22 | + | 53 | + |
23 | +DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_s, TCG_CALL_NO_RWG, | 54 | + /* |
24 | + void, ptr, ptr, ptr, ptr, i32) | 55 | + * The ARM pseudocode function FPDot performs both multiplies |
25 | +DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_d, TCG_CALL_NO_RWG, | 56 | + * and the add with a single rounding operation. Emulate this |
26 | + void, ptr, ptr, ptr, ptr, i32) | 57 | + * by performing the first multiply in round-to-odd, then doing |
58 | + * the second multiply as fused multiply-add, and rounding to | ||
59 | + * float32 all in one step. | ||
60 | + */ | ||
61 | + t64 = float64_mul(e1r, e2r, s_odd); | ||
62 | + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); | ||
27 | + | 63 | + |
28 | +DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG, | 64 | + /* This conversion is exact, because we've already rounded. */ |
29 | + void, ptr, ptr, ptr, ptr, i32) | 65 | + t32 = float64_to_float32(t64, s_std); |
30 | +DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, i32) | ||
32 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/target/arm/sve.decode | ||
35 | +++ b/target/arm/sve.decode | ||
36 | @@ -XXX,XX +XXX,XX @@ MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s | ||
37 | DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \ | ||
38 | ra=%reg_movprfx | ||
39 | |||
40 | +# SVE2 complex dot product (vectors) | ||
41 | +CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
42 | + | 66 | + |
43 | #### SVE Multiply - Indexed | 67 | + /* The final accumulation step is not fused. */ |
44 | 68 | + return float32_add(sum, t32, s_std); | |
45 | # SVE integer dot product (indexed) | ||
46 | @@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3 | ||
47 | SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2 | ||
48 | SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3 | ||
49 | |||
50 | +# SVE2 complex integer dot product (indexed) | ||
51 | +CDOT_zzxw_s 01000100 10 1 index:2 rm:3 0100 rot:2 rn:5 rd:5 \ | ||
52 | + ra=%reg_movprfx | ||
53 | +CDOT_zzxw_d 01000100 11 1 index:1 rm:4 0100 rot:2 rn:5 rd:5 \ | ||
54 | + ra=%reg_movprfx | ||
55 | + | ||
56 | # SVE2 complex integer multiply-add (indexed) | ||
57 | CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \ | ||
58 | ra=%reg_movprfx | ||
59 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/target/arm/sve_helper.c | ||
62 | +++ b/target/arm/sve_helper.c | ||
63 | @@ -XXX,XX +XXX,XX @@ DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S) | ||
64 | #undef DO_SQRDMLAH_S | ||
65 | #undef DO_SQRDMLAH_D | ||
66 | |||
67 | +/* Note N and M are 4 elements bundled into one unit. */ | ||
68 | +static int32_t do_cdot_s(uint32_t n, uint32_t m, int32_t a, | ||
69 | + int sel_a, int sel_b, int sub_i) | ||
70 | +{ | ||
71 | + for (int i = 0; i <= 1; i++) { | ||
72 | + int32_t elt1_r = (int8_t)(n >> (16 * i)); | ||
73 | + int32_t elt1_i = (int8_t)(n >> (16 * i + 8)); | ||
74 | + int32_t elt2_a = (int8_t)(m >> (16 * i + 8 * sel_a)); | ||
75 | + int32_t elt2_b = (int8_t)(m >> (16 * i + 8 * sel_b)); | ||
76 | + | ||
77 | + a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i; | ||
78 | + } | ||
79 | + return a; | ||
80 | +} | 69 | +} |
81 | + | 70 | + |
82 | +static int64_t do_cdot_d(uint64_t n, uint64_t m, int64_t a, | 71 | +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, |
83 | + int sel_a, int sel_b, int sub_i) | 72 | + void *vpm, void *vst, uint32_t desc) |
84 | +{ | 73 | +{ |
85 | + for (int i = 0; i <= 1; i++) { | 74 | + intptr_t row, col, oprsz = simd_maxsz(desc); |
86 | + int64_t elt1_r = (int16_t)(n >> (32 * i + 0)); | 75 | + uint32_t neg = simd_data(desc) * 0x80008000u; |
87 | + int64_t elt1_i = (int16_t)(n >> (32 * i + 16)); | 76 | + uint16_t *pn = vpn, *pm = vpm; |
88 | + int64_t elt2_a = (int16_t)(m >> (32 * i + 16 * sel_a)); | 77 | + float_status fpst_odd, fpst_std; |
89 | + int64_t elt2_b = (int16_t)(m >> (32 * i + 16 * sel_b)); | ||
90 | + | 78 | + |
91 | + a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i; | 79 | + /* |
92 | + } | 80 | + * Make a copy of float_status because this operation does not |
93 | + return a; | 81 | + * update the cumulative fp exception status. It also produces |
94 | +} | 82 | + * default nans. Make a second copy with round-to-odd -- see above. |
83 | + */ | ||
84 | + fpst_std = *(float_status *)vst; | ||
85 | + set_default_nan_mode(true, &fpst_std); | ||
86 | + fpst_odd = fpst_std; | ||
87 | + set_float_rounding_mode(float_round_to_odd, &fpst_odd); | ||
95 | + | 88 | + |
96 | +void HELPER(sve2_cdot_zzzz_s)(void *vd, void *vn, void *vm, | 89 | + for (row = 0; row < oprsz; ) { |
97 | + void *va, uint32_t desc) | 90 | + uint16_t prow = pn[H2(row >> 4)]; |
98 | +{ | 91 | + do { |
99 | + int opr_sz = simd_oprsz(desc); | 92 | + void *vza_row = vza + tile_vslice_offset(row); |
100 | + int rot = simd_data(desc); | 93 | + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); |
101 | + int sel_a = rot & 1; | ||
102 | + int sel_b = sel_a ^ 1; | ||
103 | + int sub_i = (rot == 0 || rot == 3 ? -1 : 1); | ||
104 | + uint32_t *d = vd, *n = vn, *m = vm, *a = va; | ||
105 | + | 94 | + |
106 | + for (int e = 0; e < opr_sz / 4; e++) { | 95 | + n = f16mop_adj_pair(n, prow, neg); |
107 | + d[e] = do_cdot_s(n[e], m[e], a[e], sel_a, sel_b, sub_i); | 96 | + |
97 | + for (col = 0; col < oprsz; ) { | ||
98 | + uint16_t pcol = pm[H2(col >> 4)]; | ||
99 | + do { | ||
100 | + if (prow & pcol & 0b0101) { | ||
101 | + uint32_t *a = vza_row + H1_4(col); | ||
102 | + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); | ||
103 | + | ||
104 | + m = f16mop_adj_pair(m, pcol, 0); | ||
105 | + *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd); | ||
106 | + | ||
107 | + col += 4; | ||
108 | + pcol >>= 4; | ||
109 | + } | ||
110 | + } while (col & 15); | ||
111 | + } | ||
112 | + row += 4; | ||
113 | + prow >>= 4; | ||
114 | + } while (row & 15); | ||
108 | + } | 115 | + } |
109 | +} | 116 | +} |
110 | + | 117 | + |
111 | +void HELPER(sve2_cdot_zzzz_d)(void *vd, void *vn, void *vm, | 118 | void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, |
112 | + void *va, uint32_t desc) | 119 | void *vpm, uint32_t desc) |
113 | +{ | 120 | { |
114 | + int opr_sz = simd_oprsz(desc); | 121 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c |
115 | + int rot = simd_data(desc); | ||
116 | + int sel_a = rot & 1; | ||
117 | + int sel_b = sel_a ^ 1; | ||
118 | + int sub_i = (rot == 0 || rot == 3 ? -1 : 1); | ||
119 | + uint64_t *d = vd, *n = vn, *m = vm, *a = va; | ||
120 | + | ||
121 | + for (int e = 0; e < opr_sz / 8; e++) { | ||
122 | + d[e] = do_cdot_d(n[e], m[e], a[e], sel_a, sel_b, sub_i); | ||
123 | + } | ||
124 | +} | ||
125 | + | ||
126 | +void HELPER(sve2_cdot_idx_s)(void *vd, void *vn, void *vm, | ||
127 | + void *va, uint32_t desc) | ||
128 | +{ | ||
129 | + int opr_sz = simd_oprsz(desc); | ||
130 | + int rot = extract32(desc, SIMD_DATA_SHIFT, 2); | ||
131 | + int idx = H4(extract32(desc, SIMD_DATA_SHIFT + 2, 2)); | ||
132 | + int sel_a = rot & 1; | ||
133 | + int sel_b = sel_a ^ 1; | ||
134 | + int sub_i = (rot == 0 || rot == 3 ? -1 : 1); | ||
135 | + uint32_t *d = vd, *n = vn, *m = vm, *a = va; | ||
136 | + | ||
137 | + for (int seg = 0; seg < opr_sz / 4; seg += 4) { | ||
138 | + uint32_t seg_m = m[seg + idx]; | ||
139 | + for (int e = 0; e < 4; e++) { | ||
140 | + d[seg + e] = do_cdot_s(n[seg + e], seg_m, a[seg + e], | ||
141 | + sel_a, sel_b, sub_i); | ||
142 | + } | ||
143 | + } | ||
144 | +} | ||
145 | + | ||
146 | +void HELPER(sve2_cdot_idx_d)(void *vd, void *vn, void *vm, | ||
147 | + void *va, uint32_t desc) | ||
148 | +{ | ||
149 | + int seg, opr_sz = simd_oprsz(desc); | ||
150 | + int rot = extract32(desc, SIMD_DATA_SHIFT, 2); | ||
151 | + int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2); | ||
152 | + int sel_a = rot & 1; | ||
153 | + int sel_b = sel_a ^ 1; | ||
154 | + int sub_i = (rot == 0 || rot == 3 ? -1 : 1); | ||
155 | + uint64_t *d = vd, *n = vn, *m = vm, *a = va; | ||
156 | + | ||
157 | + for (seg = 0; seg < opr_sz / 8; seg += 2) { | ||
158 | + uint64_t seg_m = m[seg + idx]; | ||
159 | + for (int e = 0; e < 2; e++) { | ||
160 | + d[seg + e] = do_cdot_d(n[seg + e], seg_m, a[seg + e], | ||
161 | + sel_a, sel_b, sub_i); | ||
162 | + } | ||
163 | + } | ||
164 | +} | ||
165 | + | ||
166 | #define DO_ZZXZ(NAME, TYPE, H, OP) \ | ||
167 | void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
168 | { \ | ||
169 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
170 | index XXXXXXX..XXXXXXX 100644 | 122 | index XXXXXXX..XXXXXXX 100644 |
171 | --- a/target/arm/translate-sve.c | 123 | --- a/target/arm/translate-sme.c |
172 | +++ b/target/arm/translate-sve.c | 124 | +++ b/target/arm/translate-sme.c |
173 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) | 125 | @@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, |
174 | DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h) | ||
175 | DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s) | ||
176 | |||
177 | +DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s) | ||
178 | +DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) | ||
179 | + | ||
180 | #undef DO_SVE2_RRXR_ROT | ||
181 | |||
182 | /* | ||
183 | @@ -XXX,XX +XXX,XX @@ static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a) | ||
184 | return true; | 126 | return true; |
185 | } | 127 | } |
186 | 128 | ||
187 | +static bool trans_CDOT_zzzz(DisasContext *s, arg_CMLA_zzzz *a) | 129 | +TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h) |
188 | +{ | 130 | TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) |
189 | + if (!dc_isar_feature(aa64_sve2, s) || a->esz < MO_32) { | 131 | TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) |
190 | + return false; | 132 | |
191 | + } | ||
192 | + if (sve_access_check(s)) { | ||
193 | + gen_helper_gvec_4 *fn = (a->esz == MO_32 | ||
194 | + ? gen_helper_sve2_cdot_zzzz_s | ||
195 | + : gen_helper_sve2_cdot_zzzz_d); | ||
196 | + gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->rot); | ||
197 | + } | ||
198 | + return true; | ||
199 | +} | ||
200 | + | ||
201 | static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a) | ||
202 | { | ||
203 | static gen_helper_gvec_4 * const fns[] = { | ||
204 | -- | 133 | -- |
205 | 2.20.1 | 134 | 2.25.1 |
206 | |||
207 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16. | ||
2 | 4 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-60-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-28-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 9 | --- |
8 | target/arm/helper.h | 14 ++++++ | 10 | target/arm/helper-sme.h | 16 ++++++++ |
9 | target/arm/sve.decode | 8 ++++ | 11 | target/arm/sme.decode | 10 +++++ |
10 | target/arm/translate-sve.c | 8 ++++ | 12 | target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++ |
11 | target/arm/vec_helper.c | 88 ++++++++++++++++++++++++++++++++++++++ | 13 | target/arm/translate-sme.c | 10 +++++ |
12 | 4 files changed, 118 insertions(+) | 14 | 4 files changed, 118 insertions(+) |
13 | 15 | ||
14 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 16 | diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h |
15 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper.h | 18 | --- a/target/arm/helper-sme.h |
17 | +++ b/target/arm/helper.h | 19 | +++ b/target/arm/helper-sme.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, |
19 | DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 21 | void, ptr, ptr, ptr, ptr, ptr, ptr, i32) |
20 | DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 22 | DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, |
21 | 23 | void, ptr, ptr, ptr, ptr, ptr, i32) | |
22 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG, | 24 | +DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG, |
23 | + void, ptr, ptr, ptr, i32) | 25 | + void, ptr, ptr, ptr, ptr, ptr, i32) |
24 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG, | 26 | +DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG, |
25 | + void, ptr, ptr, ptr, i32) | 27 | + void, ptr, ptr, ptr, ptr, ptr, i32) |
26 | +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG, | 28 | +DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG, |
27 | + void, ptr, ptr, ptr, i32) | 29 | + void, ptr, ptr, ptr, ptr, ptr, i32) |
30 | +DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG, | ||
37 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG, | ||
39 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
40 | diff --git a/target/arm/sme.decode b/target/arm/sme.decode | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/target/arm/sme.decode | ||
43 | +++ b/target/arm/sme.decode | ||
44 | @@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 | ||
45 | |||
46 | BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 | ||
47 | FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 | ||
28 | + | 48 | + |
29 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG, | 49 | +SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32 |
30 | + void, ptr, ptr, ptr, i32) | 50 | +SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32 |
31 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG, | 51 | +USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32 |
32 | + void, ptr, ptr, ptr, i32) | 52 | +UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32 |
33 | +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, i32) | ||
35 | + | 53 | + |
36 | DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 54 | +SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64 |
37 | 55 | +SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64 | |
38 | #ifdef TARGET_AARCH64 | 56 | +USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64 |
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 57 | +UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64 |
58 | diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | 59 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/target/arm/sve.decode | 60 | --- a/target/arm/sme_helper.c |
42 | +++ b/target/arm/sve.decode | 61 | +++ b/target/arm/sme_helper.c |
43 | @@ -XXX,XX +XXX,XX @@ SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3 | 62 | @@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, |
44 | SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2 | 63 | } while (row & 15); |
45 | SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3 | ||
46 | |||
47 | +# SVE2 saturating multiply high (indexed) | ||
48 | +SQDMULH_zzx_h 01000100 0. 1 ..... 111100 ..... ..... @rrx_3 esz=1 | ||
49 | +SQDMULH_zzx_s 01000100 10 1 ..... 111100 ..... ..... @rrx_2 esz=2 | ||
50 | +SQDMULH_zzx_d 01000100 11 1 ..... 111100 ..... ..... @rrx_1 esz=3 | ||
51 | +SQRDMULH_zzx_h 01000100 0. 1 ..... 111101 ..... ..... @rrx_3 esz=1 | ||
52 | +SQRDMULH_zzx_s 01000100 10 1 ..... 111101 ..... ..... @rrx_2 esz=2 | ||
53 | +SQRDMULH_zzx_d 01000100 11 1 ..... 111101 ..... ..... @rrx_1 esz=3 | ||
54 | + | ||
55 | # SVE2 integer multiply (indexed) | ||
56 | MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1 | ||
57 | MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2 | ||
58 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/translate-sve.c | ||
61 | +++ b/target/arm/translate-sve.c | ||
62 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h) | ||
63 | DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s) | ||
64 | DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d) | ||
65 | |||
66 | +DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) | ||
67 | +DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) | ||
68 | +DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) | ||
69 | + | ||
70 | +DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) | ||
71 | +DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) | ||
72 | +DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) | ||
73 | + | ||
74 | #undef DO_SVE2_RRX | ||
75 | |||
76 | #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ | ||
77 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/target/arm/vec_helper.c | ||
80 | +++ b/target/arm/vec_helper.c | ||
81 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
82 | } | 64 | } |
83 | } | 65 | } |
84 | 66 | + | |
85 | +void HELPER(sve2_sqdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | 67 | +typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool); |
68 | + | ||
69 | +static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, | ||
70 | + uint8_t *pn, uint8_t *pm, | ||
71 | + uint32_t desc, IMOPFn *fn) | ||
86 | +{ | 72 | +{ |
87 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | 73 | + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; |
88 | + int idx = simd_data(desc); | 74 | + bool neg = simd_data(desc); |
89 | + int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx); | ||
90 | + uint32_t discard; | ||
91 | + | 75 | + |
92 | + for (i = 0; i < opr_sz / 2; i += 16 / 2) { | 76 | + for (row = 0; row < oprsz; ++row) { |
93 | + int16_t mm = m[i]; | 77 | + uint8_t pa = pn[H1(row)]; |
94 | + for (j = 0; j < 16 / 2; ++j) { | 78 | + uint64_t *za_row = &za[tile_vslice_index(row)]; |
95 | + d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, &discard); | 79 | + uint64_t n = zn[row]; |
80 | + | ||
81 | + for (col = 0; col < oprsz; ++col) { | ||
82 | + uint8_t pb = pm[H1(col)]; | ||
83 | + uint64_t *a = &za_row[col]; | ||
84 | + | ||
85 | + *a = fn(n, zm[col], *a, pa & pb, neg); | ||
96 | + } | 86 | + } |
97 | + } | 87 | + } |
98 | +} | 88 | +} |
99 | + | 89 | + |
100 | +void HELPER(sve2_sqrdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | 90 | +#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ |
101 | +{ | 91 | +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ |
102 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | 92 | +{ \ |
103 | + int idx = simd_data(desc); | 93 | + uint32_t sum0 = 0, sum1 = 0; \ |
104 | + int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx); | 94 | + /* Apply P to N as a mask, making the inactive elements 0. */ \ |
105 | + uint32_t discard; | 95 | + n &= expand_pred_b(p); \ |
106 | + | 96 | + sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ |
107 | + for (i = 0; i < opr_sz / 2; i += 16 / 2) { | 97 | + sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ |
108 | + int16_t mm = m[i]; | 98 | + sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ |
109 | + for (j = 0; j < 16 / 2; ++j) { | 99 | + sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ |
110 | + d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, &discard); | 100 | + sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ |
111 | + } | 101 | + sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \ |
112 | + } | 102 | + sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ |
103 | + sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \ | ||
104 | + if (neg) { \ | ||
105 | + sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \ | ||
106 | + } else { \ | ||
107 | + sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \ | ||
108 | + } \ | ||
109 | + return ((uint64_t)sum1 << 32) | sum0; \ | ||
113 | +} | 110 | +} |
114 | + | 111 | + |
115 | /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ | 112 | +#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ |
116 | int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, | 113 | +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ |
117 | bool neg, bool round, uint32_t *sat) | 114 | +{ \ |
118 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc) | 115 | + uint64_t sum = 0; \ |
119 | } | 116 | + /* Apply P to N as a mask, making the inactive elements 0. */ \ |
120 | } | 117 | + n &= expand_pred_h(p); \ |
121 | 118 | + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ | |
122 | +void HELPER(sve2_sqdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc) | 119 | + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ |
123 | +{ | 120 | + sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ |
124 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | 121 | + sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ |
125 | + int idx = simd_data(desc); | 122 | + return neg ? a - sum : a + sum; \ |
126 | + int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx); | ||
127 | + uint32_t discard; | ||
128 | + | ||
129 | + for (i = 0; i < opr_sz / 4; i += 16 / 4) { | ||
130 | + int32_t mm = m[i]; | ||
131 | + for (j = 0; j < 16 / 4; ++j) { | ||
132 | + d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, &discard); | ||
133 | + } | ||
134 | + } | ||
135 | +} | 123 | +} |
136 | + | 124 | + |
137 | +void HELPER(sve2_sqrdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc) | 125 | +DEF_IMOP_32(smopa_s, int8_t, int8_t) |
138 | +{ | 126 | +DEF_IMOP_32(umopa_s, uint8_t, uint8_t) |
139 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | 127 | +DEF_IMOP_32(sumopa_s, int8_t, uint8_t) |
140 | + int idx = simd_data(desc); | 128 | +DEF_IMOP_32(usmopa_s, uint8_t, int8_t) |
141 | + int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx); | ||
142 | + uint32_t discard; | ||
143 | + | 129 | + |
144 | + for (i = 0; i < opr_sz / 4; i += 16 / 4) { | 130 | +DEF_IMOP_64(smopa_d, int16_t, int16_t) |
145 | + int32_t mm = m[i]; | 131 | +DEF_IMOP_64(umopa_d, uint16_t, uint16_t) |
146 | + for (j = 0; j < 16 / 4; ++j) { | 132 | +DEF_IMOP_64(sumopa_d, int16_t, uint16_t) |
147 | + d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, &discard); | 133 | +DEF_IMOP_64(usmopa_d, uint16_t, int16_t) |
148 | + } | ||
149 | + } | ||
150 | +} | ||
151 | + | 134 | + |
152 | /* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */ | 135 | +#define DEF_IMOPH(NAME) \ |
153 | static int64_t do_sat128_d(Int128 r) | 136 | + void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \ |
154 | { | 137 | + void *vpm, uint32_t desc) \ |
155 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | 138 | + { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); } |
156 | } | ||
157 | } | ||
158 | |||
159 | +void HELPER(sve2_sqdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
160 | +{ | ||
161 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | ||
162 | + int idx = simd_data(desc); | ||
163 | + int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx; | ||
164 | + | 139 | + |
165 | + for (i = 0; i < opr_sz / 8; i += 16 / 8) { | 140 | +DEF_IMOPH(smopa_s) |
166 | + int64_t mm = m[i]; | 141 | +DEF_IMOPH(umopa_s) |
167 | + for (j = 0; j < 16 / 8; ++j) { | 142 | +DEF_IMOPH(sumopa_s) |
168 | + d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, false); | 143 | +DEF_IMOPH(usmopa_s) |
169 | + } | 144 | +DEF_IMOPH(smopa_d) |
170 | + } | 145 | +DEF_IMOPH(umopa_d) |
171 | +} | 146 | +DEF_IMOPH(sumopa_d) |
147 | +DEF_IMOPH(usmopa_d) | ||
148 | diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c | ||
149 | index XXXXXXX..XXXXXXX 100644 | ||
150 | --- a/target/arm/translate-sme.c | ||
151 | +++ b/target/arm/translate-sme.c | ||
152 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f | ||
153 | |||
154 | /* TODO: FEAT_EBF16 */ | ||
155 | TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) | ||
172 | + | 156 | + |
173 | +void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc) | 157 | +TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s) |
174 | +{ | 158 | +TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s) |
175 | + intptr_t i, j, opr_sz = simd_oprsz(desc); | 159 | +TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s) |
176 | + int idx = simd_data(desc); | 160 | +TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s) |
177 | + int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx; | ||
178 | + | 161 | + |
179 | + for (i = 0; i < opr_sz / 8; i += 16 / 8) { | 162 | +TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d) |
180 | + int64_t mm = m[i]; | 163 | +TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d) |
181 | + for (j = 0; j < 16 / 8; ++j) { | 164 | +TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d) |
182 | + d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, true); | 165 | +TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d) |
183 | + } | ||
184 | + } | ||
185 | +} | ||
186 | + | ||
187 | /* Integer 8 and 16-bit dot-product. | ||
188 | * | ||
189 | * Note that for the loops herein, host endianness does not matter | ||
190 | -- | 166 | -- |
191 | 2.20.1 | 167 | 2.25.1 |
192 | |||
193 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This is an SVE instruction that operates using the SVE vector | ||
4 | length but that it is present only if SME is implemented. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-32-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-29-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/sve.decode | 3 ++ | 11 | target/arm/sve.decode | 20 +++++++++++++ |
9 | target/arm/translate-sve.c | 67 ++++++++++++++++++++++++++++++++++++++ | 12 | target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++ |
10 | 2 files changed, 70 insertions(+) | 13 | 2 files changed, 77 insertions(+) |
11 | 14 | ||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 15 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
13 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/sve.decode | 17 | --- a/target/arm/sve.decode |
15 | +++ b/target/arm/sve.decode | 18 | +++ b/target/arm/sve.decode |
16 | @@ -XXX,XX +XXX,XX @@ CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000 | 19 | @@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 |
17 | # SVE integer compare scalar count and limit | 20 | |
18 | WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4 | 21 | ### SVE2 floating-point bfloat16 dot-product (indexed) |
19 | 22 | BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2 | |
20 | +# SVE2 pointer conflict compare | ||
21 | +WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4 | ||
22 | + | 23 | + |
23 | ### SVE Integer Wide Immediate - Unpredicated Group | 24 | +### SVE broadcast predicate element |
24 | 25 | + | |
25 | # SVE broadcast floating-point immediate (unpredicated) | 26 | +&psel esz pd pn pm rv imm |
27 | +%psel_rv 16:2 !function=plus_12 | ||
28 | +%psel_imm_b 22:2 19:2 | ||
29 | +%psel_imm_h 22:2 20:1 | ||
30 | +%psel_imm_s 22:2 | ||
31 | +%psel_imm_d 23:1 | ||
32 | +@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \ | ||
33 | + &psel rv=%psel_rv | ||
34 | + | ||
35 | +PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \ | ||
36 | + @psel esz=0 imm=%psel_imm_b | ||
37 | +PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \ | ||
38 | + @psel esz=1 imm=%psel_imm_h | ||
39 | +PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ | ||
40 | + @psel esz=2 imm=%psel_imm_s | ||
41 | +PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ | ||
42 | + @psel esz=3 imm=%psel_imm_d | ||
26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 43 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
27 | index XXXXXXX..XXXXXXX 100644 | 44 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/target/arm/translate-sve.c | 45 | --- a/target/arm/translate-sve.c |
29 | +++ b/target/arm/translate-sve.c | 46 | +++ b/target/arm/translate-sve.c |
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) | 47 | @@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) |
31 | return true; | 48 | |
32 | } | 49 | TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) |
33 | 50 | TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) | |
34 | +static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) | 51 | + |
52 | +static bool trans_PSEL(DisasContext *s, arg_psel *a) | ||
35 | +{ | 53 | +{ |
36 | + TCGv_i64 op0, op1, diff, t1, tmax; | 54 | + int vl = vec_full_reg_size(s); |
37 | + TCGv_i32 t2, t3; | 55 | + int pl = pred_gvec_reg_size(s); |
56 | + int elements = vl >> a->esz; | ||
57 | + TCGv_i64 tmp, didx, dbit; | ||
38 | + TCGv_ptr ptr; | 58 | + TCGv_ptr ptr; |
39 | + unsigned vsz = vec_full_reg_size(s); | ||
40 | + unsigned desc = 0; | ||
41 | + | 59 | + |
42 | + if (!dc_isar_feature(aa64_sve2, s)) { | 60 | + if (!dc_isar_feature(aa64_sme, s)) { |
43 | + return false; | 61 | + return false; |
44 | + } | 62 | + } |
45 | + if (!sve_access_check(s)) { | 63 | + if (!sve_access_check(s)) { |
46 | + return true; | 64 | + return true; |
47 | + } | 65 | + } |
48 | + | 66 | + |
49 | + op0 = read_cpu_reg(s, a->rn, 1); | 67 | + tmp = tcg_temp_new_i64(); |
50 | + op1 = read_cpu_reg(s, a->rm, 1); | 68 | + dbit = tcg_temp_new_i64(); |
69 | + didx = tcg_temp_new_i64(); | ||
70 | + ptr = tcg_temp_new_ptr(); | ||
51 | + | 71 | + |
52 | + tmax = tcg_const_i64(vsz); | 72 | + /* Compute the predicate element. */ |
53 | + diff = tcg_temp_new_i64(); | 73 | + tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); |
54 | + | 74 | + if (is_power_of_2(elements)) { |
55 | + if (a->rw) { | 75 | + tcg_gen_andi_i64(tmp, tmp, elements - 1); |
56 | + /* WHILERW */ | ||
57 | + /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */ | ||
58 | + t1 = tcg_temp_new_i64(); | ||
59 | + tcg_gen_sub_i64(diff, op0, op1); | ||
60 | + tcg_gen_sub_i64(t1, op1, op0); | ||
61 | + tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); | ||
62 | + tcg_temp_free_i64(t1); | ||
63 | + /* Round down to a multiple of ESIZE. */ | ||
64 | + tcg_gen_andi_i64(diff, diff, -1 << a->esz); | ||
65 | + /* If op1 == op0, diff == 0, and the condition is always true. */ | ||
66 | + tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff); | ||
67 | + } else { | 76 | + } else { |
68 | + /* WHILEWR */ | 77 | + tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); |
69 | + tcg_gen_sub_i64(diff, op1, op0); | ||
70 | + /* Round down to a multiple of ESIZE. */ | ||
71 | + tcg_gen_andi_i64(diff, diff, -1 << a->esz); | ||
72 | + /* If op0 >= op1, diff <= 0, the condition is always true. */ | ||
73 | + tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff); | ||
74 | + } | 78 | + } |
75 | + | 79 | + |
76 | + /* Bound to the maximum. */ | 80 | + /* Extract the predicate byte and bit indices. */ |
77 | + tcg_gen_umin_i64(diff, diff, tmax); | 81 | + tcg_gen_shli_i64(tmp, tmp, a->esz); |
78 | + tcg_temp_free_i64(tmax); | 82 | + tcg_gen_andi_i64(dbit, tmp, 7); |
83 | + tcg_gen_shri_i64(didx, tmp, 3); | ||
84 | + if (HOST_BIG_ENDIAN) { | ||
85 | + tcg_gen_xori_i64(didx, didx, 7); | ||
86 | + } | ||
79 | + | 87 | + |
80 | + /* Since we're bounded, pass as a 32-bit type. */ | 88 | + /* Load the predicate word. */ |
81 | + t2 = tcg_temp_new_i32(); | 89 | + tcg_gen_trunc_i64_ptr(ptr, didx); |
82 | + tcg_gen_extrl_i64_i32(t2, diff); | 90 | + tcg_gen_add_ptr(ptr, ptr, cpu_env); |
83 | + tcg_temp_free_i64(diff); | 91 | + tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); |
84 | + | 92 | + |
85 | + desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); | 93 | + /* Extract the predicate bit and replicate to MO_64. */ |
86 | + desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); | 94 | + tcg_gen_shr_i64(tmp, tmp, dbit); |
87 | + t3 = tcg_const_i32(desc); | 95 | + tcg_gen_andi_i64(tmp, tmp, 1); |
96 | + tcg_gen_neg_i64(tmp, tmp); | ||
88 | + | 97 | + |
89 | + ptr = tcg_temp_new_ptr(); | 98 | + /* Apply to either copy the source, or write zeros. */ |
90 | + tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); | 99 | + tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), |
100 | + pred_full_reg_offset(s, a->pn), tmp, pl, pl); | ||
91 | + | 101 | + |
92 | + gen_helper_sve_whilel(t2, ptr, t2, t3); | 102 | + tcg_temp_free_i64(tmp); |
93 | + do_pred_flags(t2); | 103 | + tcg_temp_free_i64(dbit); |
94 | + | 104 | + tcg_temp_free_i64(didx); |
95 | + tcg_temp_free_ptr(ptr); | 105 | + tcg_temp_free_ptr(ptr); |
96 | + tcg_temp_free_i32(t2); | ||
97 | + tcg_temp_free_i32(t3); | ||
98 | + return true; | 106 | + return true; |
99 | +} | 107 | +} |
100 | + | ||
101 | /* | ||
102 | *** SVE Integer Wide Immediate - Unpredicated Group | ||
103 | */ | ||
104 | -- | 108 | -- |
105 | 2.20.1 | 109 | 2.25.1 |
106 | |||
107 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This is an SVE instruction that operates using the SVE vector | ||
4 | length but that it is present only if SME is implemented. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-21-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-30-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 3 +++ | 11 | target/arm/helper-sve.h | 2 ++ |
9 | target/arm/sve.decode | 6 ++++++ | 12 | target/arm/sve.decode | 1 + |
10 | target/arm/sve_helper.c | 34 ++++++++++++++++++++++++++++++++++ | 13 | target/arm/sve_helper.c | 16 ++++++++++++++++ |
11 | target/arm/translate-sve.c | 23 +++++++++++++++++++++++ | 14 | target/arm/translate-sve.c | 2 ++ |
12 | 4 files changed, 66 insertions(+) | 15 | 4 files changed, 21 insertions(+) |
13 | 16 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 19 | --- a/target/arm/helper-sve.h |
17 | +++ b/target/arm/helper-sve.h | 20 | +++ b/target/arm/helper-sve.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG, | 21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
19 | void, ptr, ptr, ptr, ptr, i32) | 22 | |
20 | DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG, | 23 | DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
21 | void, ptr, ptr, ptr, ptr, i32) | 24 | |
25 | +DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
22 | + | 26 | + |
23 | +DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 27 | DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
24 | +DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 28 | DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) |
29 | DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 30 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
26 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/target/arm/sve.decode | 32 | --- a/target/arm/sve.decode |
28 | +++ b/target/arm/sve.decode | 33 | +++ b/target/arm/sve.decode |
29 | @@ -XXX,XX +XXX,XX @@ SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm | 34 | @@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn |
30 | SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm | 35 | REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn |
31 | UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm | 36 | REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn |
32 | UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm | 37 | RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn |
33 | + | 38 | +REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0 |
34 | +## SVE2 integer add/subtract long with carry | 39 | |
35 | + | 40 | # SVE vector splice (predicated, destructive) |
36 | +# ADC and SBC decoded via size in helper dispatch. | 41 | SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm |
37 | +ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm | ||
38 | +ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm | ||
39 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | 42 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c |
40 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/target/arm/sve_helper.c | 44 | --- a/target/arm/sve_helper.c |
42 | +++ b/target/arm/sve_helper.c | 45 | +++ b/target/arm/sve_helper.c |
43 | @@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD) | 46 | @@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) |
44 | 47 | ||
45 | #undef DO_ZZZW_ACC | 48 | DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) |
46 | 49 | ||
47 | +void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | 50 | +void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc) |
48 | +{ | 51 | +{ |
49 | + intptr_t i, opr_sz = simd_oprsz(desc); | 52 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; |
50 | + int sel = H4(extract32(desc, SIMD_DATA_SHIFT, 1)); | 53 | + uint64_t *d = vd, *n = vn; |
51 | + uint32_t inv = -extract32(desc, SIMD_DATA_SHIFT + 1, 1); | 54 | + uint8_t *pg = vg; |
52 | + uint32_t *a = va, *n = vn; | ||
53 | + uint64_t *d = vd, *m = vm; | ||
54 | + | 55 | + |
55 | + for (i = 0; i < opr_sz / 8; ++i) { | 56 | + for (i = 0; i < opr_sz; i += 2) { |
56 | + uint32_t e1 = a[2 * i + H4(0)]; | 57 | + if (pg[H1(i)] & 1) { |
57 | + uint32_t e2 = n[2 * i + sel] ^ inv; | 58 | + uint64_t n0 = n[i + 0]; |
58 | + uint64_t c = extract64(m[i], 32, 1); | 59 | + uint64_t n1 = n[i + 1]; |
59 | + /* Compute and store the entire 33-bit result at once. */ | 60 | + d[i + 0] = n1; |
60 | + d[i] = c + e1 + e2; | 61 | + d[i + 1] = n0; |
62 | + } | ||
61 | + } | 63 | + } |
62 | +} | 64 | +} |
63 | + | 65 | + |
64 | +void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | 66 | DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) |
65 | +{ | 67 | DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) |
66 | + intptr_t i, opr_sz = simd_oprsz(desc); | 68 | DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) |
67 | + int sel = extract32(desc, SIMD_DATA_SHIFT, 1); | ||
68 | + uint64_t inv = -(uint64_t)extract32(desc, SIMD_DATA_SHIFT + 1, 1); | ||
69 | + uint64_t *d = vd, *a = va, *n = vn, *m = vm; | ||
70 | + | ||
71 | + for (i = 0; i < opr_sz / 8; i += 2) { | ||
72 | + Int128 e1 = int128_make64(a[i]); | ||
73 | + Int128 e2 = int128_make64(n[i + sel] ^ inv); | ||
74 | + Int128 c = int128_make64(m[i + 1] & 1); | ||
75 | + Int128 r = int128_add(int128_add(e1, e2), c); | ||
76 | + d[i + 0] = int128_getlo(r); | ||
77 | + d[i + 1] = int128_gethi(r); | ||
78 | + } | ||
79 | +} | ||
80 | + | ||
81 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
82 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
83 | { \ | ||
84 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 69 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
85 | index XXXXXXX..XXXXXXX 100644 | 70 | index XXXXXXX..XXXXXXX 100644 |
86 | --- a/target/arm/translate-sve.c | 71 | --- a/target/arm/translate-sve.c |
87 | +++ b/target/arm/translate-sve.c | 72 | +++ b/target/arm/translate-sve.c |
88 | @@ -XXX,XX +XXX,XX @@ static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a) | 73 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) |
89 | { | 74 | TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, |
90 | return do_abal(s, a, true, true); | 75 | a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) |
91 | } | 76 | |
77 | +TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) | ||
92 | + | 78 | + |
93 | +static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) | 79 | TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, |
94 | +{ | 80 | gen_helper_sve_splice, a, a->esz) |
95 | + static gen_helper_gvec_4 * const fns[2] = { | 81 | |
96 | + gen_helper_sve2_adcl_s, | ||
97 | + gen_helper_sve2_adcl_d, | ||
98 | + }; | ||
99 | + /* | ||
100 | + * Note that in this case the ESZ field encodes both size and sign. | ||
101 | + * Split out 'subtract' into bit 1 of the data field for the helper. | ||
102 | + */ | ||
103 | + return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel); | ||
104 | +} | ||
105 | + | ||
106 | +static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a) | ||
107 | +{ | ||
108 | + return do_adcl(s, a, false); | ||
109 | +} | ||
110 | + | ||
111 | +static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a) | ||
112 | +{ | ||
113 | + return do_adcl(s, a, true); | ||
114 | +} | ||
115 | -- | 82 | -- |
116 | 2.20.1 | 83 | 2.25.1 |
117 | |||
118 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | This is an SVE instruction that operates using the SVE vector | ||
4 | length but that it is present only if SME is implemented. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-68-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-31-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper.h | 1 + | 11 | target/arm/helper.h | 18 +++++++ |
9 | target/arm/sve.decode | 4 ++++ | 12 | target/arm/sve.decode | 5 ++ |
10 | target/arm/translate-sve.c | 16 ++++++++++++++++ | 13 | target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++ |
11 | target/arm/vec_helper.c | 1 + | 14 | target/arm/vec_helper.c | 24 +++++++++ |
12 | 4 files changed, 22 insertions(+) | 15 | 4 files changed, 149 insertions(+) |
13 | 16 | ||
14 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 17 | diff --git a/target/arm/helper.h b/target/arm/helper.h |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper.h | 19 | --- a/target/arm/helper.h |
17 | +++ b/target/arm/helper.h | 20 | +++ b/target/arm/helper.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, |
19 | DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 22 | DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, |
20 | DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 23 | void, ptr, ptr, ptr, ptr, ptr, i32) |
21 | DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 24 | |
22 | +DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 25 | +DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, |
23 | 26 | + void, ptr, ptr, ptr, ptr, i32) | |
24 | DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG, | 27 | +DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, |
25 | void, ptr, ptr, ptr, ptr, i32) | 28 | + void, ptr, ptr, ptr, ptr, i32) |
29 | +DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, | ||
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, | ||
37 | + void, ptr, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, | ||
39 | + void, ptr, ptr, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, | ||
41 | + void, ptr, ptr, ptr, ptr, i32) | ||
42 | + | ||
43 | #ifdef TARGET_AARCH64 | ||
44 | #include "helper-a64.h" | ||
45 | #include "helper-sve.h" | ||
26 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode |
27 | index XXXXXXX..XXXXXXX 100644 | 47 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/target/arm/sve.decode | 48 | --- a/target/arm/sve.decode |
29 | +++ b/target/arm/sve.decode | 49 | +++ b/target/arm/sve.decode |
30 | @@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | 50 | @@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ |
31 | CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx | 51 | @psel esz=2 imm=%psel_imm_s |
32 | SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx | 52 | PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ |
33 | 53 | @psel esz=3 imm=%psel_imm_d | |
34 | +## SVE mixed sign dot product | 54 | + |
35 | + | 55 | +### SVE clamp |
36 | +USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm | 56 | + |
37 | + | 57 | +SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm |
38 | ### SVE2 floating point matrix multiply accumulate | 58 | +UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm |
39 | |||
40 | FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm | ||
41 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | 59 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c |
42 | index XXXXXXX..XXXXXXX 100644 | 60 | index XXXXXXX..XXXXXXX 100644 |
43 | --- a/target/arm/translate-sve.c | 61 | --- a/target/arm/translate-sve.c |
44 | +++ b/target/arm/translate-sve.c | 62 | +++ b/target/arm/translate-sve.c |
45 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a) | 63 | @@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) |
46 | } | 64 | tcg_temp_free_ptr(ptr); |
47 | return true; | 65 | return true; |
48 | } | 66 | } |
49 | + | 67 | + |
50 | +static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a) | 68 | +static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) |
51 | +{ | 69 | +{ |
52 | + if (a->esz != 2 || !dc_isar_feature(aa64_sve_i8mm, s)) { | 70 | + tcg_gen_smax_i32(d, a, n); |
53 | + return false; | 71 | + tcg_gen_smin_i32(d, d, m); |
54 | + } | 72 | +} |
55 | + if (sve_access_check(s)) { | 73 | + |
56 | + unsigned vsz = vec_full_reg_size(s); | 74 | +static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) |
57 | + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd), | 75 | +{ |
58 | + vec_full_reg_offset(s, a->rn), | 76 | + tcg_gen_smax_i64(d, a, n); |
59 | + vec_full_reg_offset(s, a->rm), | 77 | + tcg_gen_smin_i64(d, d, m); |
60 | + vec_full_reg_offset(s, a->ra), | 78 | +} |
61 | + vsz, vsz, 0, gen_helper_gvec_usdot_b); | 79 | + |
62 | + } | 80 | +static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, |
63 | + return true; | 81 | + TCGv_vec m, TCGv_vec a) |
64 | +} | 82 | +{ |
83 | + tcg_gen_smax_vec(vece, d, a, n); | ||
84 | + tcg_gen_smin_vec(vece, d, d, m); | ||
85 | +} | ||
86 | + | ||
87 | +static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
88 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
89 | +{ | ||
90 | + static const TCGOpcode vecop[] = { | ||
91 | + INDEX_op_smin_vec, INDEX_op_smax_vec, 0 | ||
92 | + }; | ||
93 | + static const GVecGen4 ops[4] = { | ||
94 | + { .fniv = gen_sclamp_vec, | ||
95 | + .fno = gen_helper_gvec_sclamp_b, | ||
96 | + .opt_opc = vecop, | ||
97 | + .vece = MO_8 }, | ||
98 | + { .fniv = gen_sclamp_vec, | ||
99 | + .fno = gen_helper_gvec_sclamp_h, | ||
100 | + .opt_opc = vecop, | ||
101 | + .vece = MO_16 }, | ||
102 | + { .fni4 = gen_sclamp_i32, | ||
103 | + .fniv = gen_sclamp_vec, | ||
104 | + .fno = gen_helper_gvec_sclamp_s, | ||
105 | + .opt_opc = vecop, | ||
106 | + .vece = MO_32 }, | ||
107 | + { .fni8 = gen_sclamp_i64, | ||
108 | + .fniv = gen_sclamp_vec, | ||
109 | + .fno = gen_helper_gvec_sclamp_d, | ||
110 | + .opt_opc = vecop, | ||
111 | + .vece = MO_64, | ||
112 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } | ||
113 | + }; | ||
114 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); | ||
115 | +} | ||
116 | + | ||
117 | +TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) | ||
118 | + | ||
119 | +static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) | ||
120 | +{ | ||
121 | + tcg_gen_umax_i32(d, a, n); | ||
122 | + tcg_gen_umin_i32(d, d, m); | ||
123 | +} | ||
124 | + | ||
125 | +static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) | ||
126 | +{ | ||
127 | + tcg_gen_umax_i64(d, a, n); | ||
128 | + tcg_gen_umin_i64(d, d, m); | ||
129 | +} | ||
130 | + | ||
131 | +static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, | ||
132 | + TCGv_vec m, TCGv_vec a) | ||
133 | +{ | ||
134 | + tcg_gen_umax_vec(vece, d, a, n); | ||
135 | + tcg_gen_umin_vec(vece, d, d, m); | ||
136 | +} | ||
137 | + | ||
138 | +static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, | ||
139 | + uint32_t a, uint32_t oprsz, uint32_t maxsz) | ||
140 | +{ | ||
141 | + static const TCGOpcode vecop[] = { | ||
142 | + INDEX_op_umin_vec, INDEX_op_umax_vec, 0 | ||
143 | + }; | ||
144 | + static const GVecGen4 ops[4] = { | ||
145 | + { .fniv = gen_uclamp_vec, | ||
146 | + .fno = gen_helper_gvec_uclamp_b, | ||
147 | + .opt_opc = vecop, | ||
148 | + .vece = MO_8 }, | ||
149 | + { .fniv = gen_uclamp_vec, | ||
150 | + .fno = gen_helper_gvec_uclamp_h, | ||
151 | + .opt_opc = vecop, | ||
152 | + .vece = MO_16 }, | ||
153 | + { .fni4 = gen_uclamp_i32, | ||
154 | + .fniv = gen_uclamp_vec, | ||
155 | + .fno = gen_helper_gvec_uclamp_s, | ||
156 | + .opt_opc = vecop, | ||
157 | + .vece = MO_32 }, | ||
158 | + { .fni8 = gen_uclamp_i64, | ||
159 | + .fniv = gen_uclamp_vec, | ||
160 | + .fno = gen_helper_gvec_uclamp_d, | ||
161 | + .opt_opc = vecop, | ||
162 | + .vece = MO_64, | ||
163 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } | ||
164 | + }; | ||
165 | + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); | ||
166 | +} | ||
167 | + | ||
168 | +TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) | ||
65 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | 169 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c |
66 | index XXXXXXX..XXXXXXX 100644 | 170 | index XXXXXXX..XXXXXXX 100644 |
67 | --- a/target/arm/vec_helper.c | 171 | --- a/target/arm/vec_helper.c |
68 | +++ b/target/arm/vec_helper.c | 172 | +++ b/target/arm/vec_helper.c |
69 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | 173 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, |
70 | 174 | } | |
71 | DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t) | 175 | clear_tail(d, opr_sz, simd_maxsz(desc)); |
72 | DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t) | 176 | } |
73 | +DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t) | 177 | + |
74 | DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t) | 178 | +#define DO_CLAMP(NAME, TYPE) \ |
75 | DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t) | 179 | +void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \ |
76 | 180 | +{ \ | |
181 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
182 | + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ | ||
183 | + TYPE aa = *(TYPE *)(a + i); \ | ||
184 | + TYPE nn = *(TYPE *)(n + i); \ | ||
185 | + TYPE mm = *(TYPE *)(m + i); \ | ||
186 | + TYPE dd = MIN(MAX(aa, nn), mm); \ | ||
187 | + *(TYPE *)(d + i) = dd; \ | ||
188 | + } \ | ||
189 | + clear_tail(d, opr_sz, simd_maxsz(desc)); \ | ||
190 | +} | ||
191 | + | ||
192 | +DO_CLAMP(gvec_sclamp_b, int8_t) | ||
193 | +DO_CLAMP(gvec_sclamp_h, int16_t) | ||
194 | +DO_CLAMP(gvec_sclamp_s, int32_t) | ||
195 | +DO_CLAMP(gvec_sclamp_d, int64_t) | ||
196 | + | ||
197 | +DO_CLAMP(gvec_uclamp_b, uint8_t) | ||
198 | +DO_CLAMP(gvec_uclamp_h, uint16_t) | ||
199 | +DO_CLAMP(gvec_uclamp_s, uint32_t) | ||
200 | +DO_CLAMP(gvec_uclamp_d, uint64_t) | ||
77 | -- | 201 | -- |
78 | 2.20.1 | 202 | 2.25.1 |
79 | |||
80 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Will be used for SVE2 isa subset enablement. | 3 | We can handle both exception entry and exception return by |
4 | hooking into aarch64_sve_change_el. | ||
4 | 5 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20210525010358.152808-2-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-32-richard.henderson@linaro.org |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 10 | --- |
11 | target/arm/cpu.h | 16 ++++++++++++++++ | 11 | target/arm/helper.c | 15 +++++++++++++-- |
12 | target/arm/helper.c | 3 +-- | 12 | 1 file changed, 13 insertions(+), 2 deletions(-) |
13 | target/arm/kvm64.c | 21 +++++++++++++++------ | ||
14 | 3 files changed, 32 insertions(+), 8 deletions(-) | ||
15 | 13 | ||
16 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/target/arm/cpu.h | ||
19 | +++ b/target/arm/cpu.h | ||
20 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | ||
21 | uint64_t id_aa64mmfr2; | ||
22 | uint64_t id_aa64dfr0; | ||
23 | uint64_t id_aa64dfr1; | ||
24 | + uint64_t id_aa64zfr0; | ||
25 | } isar; | ||
26 | uint64_t midr; | ||
27 | uint32_t revidr; | ||
28 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) | ||
29 | FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) | ||
30 | FIELD(ID_AA64DFR0, MTPMU, 48, 4) | ||
31 | |||
32 | +FIELD(ID_AA64ZFR0, SVEVER, 0, 4) | ||
33 | +FIELD(ID_AA64ZFR0, AES, 4, 4) | ||
34 | +FIELD(ID_AA64ZFR0, BITPERM, 16, 4) | ||
35 | +FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) | ||
36 | +FIELD(ID_AA64ZFR0, SHA3, 32, 4) | ||
37 | +FIELD(ID_AA64ZFR0, SM4, 40, 4) | ||
38 | +FIELD(ID_AA64ZFR0, I8MM, 44, 4) | ||
39 | +FIELD(ID_AA64ZFR0, F32MM, 52, 4) | ||
40 | +FIELD(ID_AA64ZFR0, F64MM, 56, 4) | ||
41 | + | ||
42 | FIELD(ID_DFR0, COPDBG, 0, 4) | ||
43 | FIELD(ID_DFR0, COPSDBG, 4, 4) | ||
44 | FIELD(ID_DFR0, MMAPDBG, 8, 4) | ||
45 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) | ||
46 | return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; | ||
47 | } | ||
48 | |||
49 | +static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) | ||
50 | +{ | ||
51 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; | ||
52 | +} | ||
53 | + | ||
54 | /* | ||
55 | * Feature tests for "does this exist in either 32-bit or 64-bit?" | ||
56 | */ | ||
57 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 14 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
58 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
59 | --- a/target/arm/helper.c | 16 | --- a/target/arm/helper.c |
60 | +++ b/target/arm/helper.c | 17 | +++ b/target/arm/helper.c |
61 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 18 | @@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, |
62 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, | 19 | return; |
63 | .access = PL1_R, .type = ARM_CP_CONST, | 20 | } |
64 | .accessfn = access_aa64_tid3, | 21 | |
65 | - /* At present, only SVEver == 0 is defined anyway. */ | 22 | + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; |
66 | - .resetvalue = 0 }, | 23 | + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; |
67 | + .resetvalue = cpu->isar.id_aa64zfr0 }, | ||
68 | { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | ||
69 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, | ||
70 | .access = PL1_R, .type = ARM_CP_CONST, | ||
71 | diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c | ||
72 | index XXXXXXX..XXXXXXX 100644 | ||
73 | --- a/target/arm/kvm64.c | ||
74 | +++ b/target/arm/kvm64.c | ||
75 | @@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) | ||
76 | |||
77 | sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0; | ||
78 | |||
79 | - kvm_arm_destroy_scratch_host_vcpu(fdarray); | ||
80 | - | ||
81 | - if (err < 0) { | ||
82 | - return false; | ||
83 | - } | ||
84 | - | ||
85 | /* Add feature bits that can't appear until after VCPU init. */ | ||
86 | if (sve_supported) { | ||
87 | t = ahcf->isar.id_aa64pfr0; | ||
88 | t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); | ||
89 | ahcf->isar.id_aa64pfr0 = t; | ||
90 | + | 24 | + |
91 | + /* | 25 | + /* |
92 | + * Before v5.1, KVM did not support SVE and did not expose | 26 | + * Both AArch64.TakeException and AArch64.ExceptionReturn |
93 | + * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does | 27 | + * invoke ResetSVEState when taking an exception from, or |
94 | + * not expose the register to "user" requests like this | 28 | + * returning to, AArch32 state when PSTATE.SM is enabled. |
95 | + * unless the host supports SVE. | 29 | + */ |
96 | + */ | 30 | + if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) { |
97 | + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, | 31 | + arm_reset_sve_state(env); |
98 | + ARM64_SYS_REG(3, 0, 0, 4, 4)); | 32 | + return; |
99 | + } | 33 | + } |
100 | + | 34 | + |
101 | + kvm_arm_destroy_scratch_host_vcpu(fdarray); | ||
102 | + | ||
103 | + if (err < 0) { | ||
104 | + return false; | ||
105 | } | ||
106 | |||
107 | /* | 35 | /* |
36 | * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped | ||
37 | * at ELx, or not available because the EL is in AArch32 state, then | ||
38 | @@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, | ||
39 | * we already have the correct register contents when encountering the | ||
40 | * vq0->vq0 transition between EL0->EL1. | ||
41 | */ | ||
42 | - old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; | ||
43 | old_len = (old_a64 && !sve_exception_el(env, old_el) | ||
44 | ? sve_vqm1_for_el(env, old_el) : 0); | ||
45 | - new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; | ||
46 | new_len = (new_a64 && !sve_exception_el(env, new_el) | ||
47 | ? sve_vqm1_for_el(env, new_el) : 0); | ||
48 | |||
108 | -- | 49 | -- |
109 | 2.20.1 | 50 | 2.25.1 |
110 | |||
111 | diff view generated by jsdifflib |
1 | From: Rebecca Cran <rebecca@nuviainc.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Indicate support for FEAT_TLBIOS and FEAT_TLBIRANGE by setting | 3 | Note that SME remains effectively disabled for user-only, |
4 | ID_AA64ISAR0.TLB to 2 for the max AARCH64 CPU type. | 4 | because we do not yet set CPACR_EL1.SMEN. This needs to |
5 | wait until the kernel ABI is implemented. | ||
5 | 6 | ||
6 | Signed-off-by: Rebecca Cran <rebecca@nuviainc.com> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20210512182337.18563-4-rebecca@nuviainc.com | 9 | Message-id: 20220708151540.18136-33-richard.henderson@linaro.org |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 11 | --- |
11 | target/arm/cpu64.c | 1 + | 12 | docs/system/arm/emulation.rst | 4 ++++ |
12 | 1 file changed, 1 insertion(+) | 13 | target/arm/cpu64.c | 11 +++++++++++ |
14 | 2 files changed, 15 insertions(+) | ||
13 | 15 | ||
16 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/docs/system/arm/emulation.rst | ||
19 | +++ b/docs/system/arm/emulation.rst | ||
20 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: | ||
21 | - FEAT_SHA512 (Advanced SIMD SHA512 instructions) | ||
22 | - FEAT_SM3 (Advanced SIMD SM3 instructions) | ||
23 | - FEAT_SM4 (Advanced SIMD SM4 instructions) | ||
24 | +- FEAT_SME (Scalable Matrix Extension) | ||
25 | +- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) | ||
26 | +- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) | ||
27 | +- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) | ||
28 | - FEAT_SPECRES (Speculation restriction instructions) | ||
29 | - FEAT_SSBS (Speculative Store Bypass Safe) | ||
30 | - FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain) | ||
14 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | 31 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c |
15 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/cpu64.c | 33 | --- a/target/arm/cpu64.c |
17 | +++ b/target/arm/cpu64.c | 34 | +++ b/target/arm/cpu64.c |
18 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | 35 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) |
19 | t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); | 36 | */ |
20 | t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); | 37 | t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ |
21 | t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */ | 38 | t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ |
22 | + t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ | 39 | + t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ |
23 | t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); | 40 | t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ |
24 | cpu->isar.id_aa64isar0 = t; | 41 | cpu->isar.id_aa64pfr1 = t; |
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
44 | t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */ | ||
45 | cpu->isar.id_aa64dfr0 = t; | ||
46 | |||
47 | + t = cpu->isar.id_aa64smfr0; | ||
48 | + t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ | ||
49 | + t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ | ||
50 | + t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ | ||
51 | + t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ | ||
52 | + t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ | ||
53 | + t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ | ||
54 | + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ | ||
55 | + cpu->isar.id_aa64smfr0 = t; | ||
56 | + | ||
57 | /* Replicate the same data to the 32-bit id registers. */ | ||
58 | aa32_max_features(cpu); | ||
25 | 59 | ||
26 | -- | 60 | -- |
27 | 2.20.1 | 61 | 2.25.1 |
28 | |||
29 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
2 | 1 | ||
3 | When selecting an ARM target on Debian unstable, we get: | ||
4 | |||
5 | Compiling C++ object libcommon.fa.p/disas_libvixl_vixl_utils.cc.o | ||
6 | FAILED: libcommon.fa.p/disas_libvixl_vixl_utils.cc.o | ||
7 | c++ -Ilibcommon.fa.p -I. -I.. [...] -o libcommon.fa.p/disas_libvixl_vixl_utils.cc.o -c ../disas/libvixl/vixl/utils.cc | ||
8 | In file included from /home/philmd/qemu/disas/libvixl/vixl/utils.h:30, | ||
9 | from ../disas/libvixl/vixl/utils.cc:27: | ||
10 | /usr/include/string.h:36:43: error: missing binary operator before token "(" | ||
11 | 36 | #if defined __cplusplus && (__GNUC_PREREQ (4, 4) \ | ||
12 | | ^ | ||
13 | /usr/include/string.h:53:62: error: missing binary operator before token "(" | ||
14 | 53 | #if defined __USE_MISC || defined __USE_XOPEN || __GLIBC_USE (ISOC2X) | ||
15 | | ^ | ||
16 | /usr/include/string.h:165:21: error: missing binary operator before token "(" | ||
17 | 165 | || __GLIBC_USE (LIB_EXT2) || __GLIBC_USE (ISOC2X)) | ||
18 | | ^ | ||
19 | /usr/include/string.h:174:43: error: missing binary operator before token "(" | ||
20 | 174 | #if defined __USE_XOPEN2K8 || __GLIBC_USE (LIB_EXT2) || __GLIBC_USE (ISOC2X) | ||
21 | | ^ | ||
22 | /usr/include/string.h:492:19: error: missing binary operator before token "(" | ||
23 | 492 | #if __GNUC_PREREQ (3,4) | ||
24 | | ^ | ||
25 | |||
26 | Relevant information from the host: | ||
27 | |||
28 | $ lsb_release -d | ||
29 | Description: Debian GNU/Linux 11 (bullseye) | ||
30 | $ gcc --version | ||
31 | gcc (Debian 10.2.1-6) 10.2.1 20210110 | ||
32 | $ dpkg -S /usr/include/string.h | ||
33 | libc6-dev: /usr/include/string.h | ||
34 | $ apt-cache show libc6-dev | ||
35 | Package: libc6-dev | ||
36 | Version: 2.31-11 | ||
37 | |||
38 | Partially cherry-pick vixl commit 78973f258039f6e96 [*]: | ||
39 | |||
40 | Refactor VIXL to use `extern` block when including C header | ||
41 | that do not have a C++ counterpart. | ||
42 | |||
43 | which is similar to commit 875df03b221 ('osdep: protect qemu/osdep.h | ||
44 | with extern "C"'). | ||
45 | |||
46 | [*] https://git.linaro.org/arm/vixl.git/commit/?id=78973f258039f6e96 | ||
47 | |||
48 | Buglink: https://bugs.launchpad.net/qemu/+bug/1914870 | ||
49 | Suggested-by: Thomas Huth <thuth@redhat.com> | ||
50 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
51 | Reviewed-by: Thomas Huth <thuth@redhat.com> | ||
52 | Message-id: 20210516171023.510778-1-f4bug@amsat.org | ||
53 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
54 | --- | ||
55 | disas/libvixl/vixl/code-buffer.h | 2 +- | ||
56 | disas/libvixl/vixl/globals.h | 16 +++++++++------- | ||
57 | disas/libvixl/vixl/invalset.h | 2 +- | ||
58 | disas/libvixl/vixl/platform.h | 2 ++ | ||
59 | disas/libvixl/vixl/utils.h | 2 +- | ||
60 | disas/libvixl/vixl/utils.cc | 2 +- | ||
61 | 6 files changed, 15 insertions(+), 11 deletions(-) | ||
62 | |||
63 | diff --git a/disas/libvixl/vixl/code-buffer.h b/disas/libvixl/vixl/code-buffer.h | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/disas/libvixl/vixl/code-buffer.h | ||
66 | +++ b/disas/libvixl/vixl/code-buffer.h | ||
67 | @@ -XXX,XX +XXX,XX @@ | ||
68 | #ifndef VIXL_CODE_BUFFER_H | ||
69 | #define VIXL_CODE_BUFFER_H | ||
70 | |||
71 | -#include <string.h> | ||
72 | +#include <cstring> | ||
73 | #include "vixl/globals.h" | ||
74 | |||
75 | namespace vixl { | ||
76 | diff --git a/disas/libvixl/vixl/globals.h b/disas/libvixl/vixl/globals.h | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/disas/libvixl/vixl/globals.h | ||
79 | +++ b/disas/libvixl/vixl/globals.h | ||
80 | @@ -XXX,XX +XXX,XX @@ | ||
81 | #define __STDC_FORMAT_MACROS | ||
82 | #endif | ||
83 | |||
84 | -#include <stdint.h> | ||
85 | +extern "C" { | ||
86 | #include <inttypes.h> | ||
87 | - | ||
88 | -#include <assert.h> | ||
89 | -#include <stdarg.h> | ||
90 | -#include <stdio.h> | ||
91 | #include <stdint.h> | ||
92 | -#include <stdlib.h> | ||
93 | -#include <stddef.h> | ||
94 | +} | ||
95 | + | ||
96 | +#include <cassert> | ||
97 | +#include <cstdarg> | ||
98 | +#include <cstddef> | ||
99 | +#include <cstdio> | ||
100 | +#include <cstdlib> | ||
101 | + | ||
102 | #include "vixl/platform.h" | ||
103 | |||
104 | |||
105 | diff --git a/disas/libvixl/vixl/invalset.h b/disas/libvixl/vixl/invalset.h | ||
106 | index XXXXXXX..XXXXXXX 100644 | ||
107 | --- a/disas/libvixl/vixl/invalset.h | ||
108 | +++ b/disas/libvixl/vixl/invalset.h | ||
109 | @@ -XXX,XX +XXX,XX @@ | ||
110 | #ifndef VIXL_INVALSET_H_ | ||
111 | #define VIXL_INVALSET_H_ | ||
112 | |||
113 | -#include <string.h> | ||
114 | +#include <cstring> | ||
115 | |||
116 | #include <algorithm> | ||
117 | #include <vector> | ||
118 | diff --git a/disas/libvixl/vixl/platform.h b/disas/libvixl/vixl/platform.h | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/disas/libvixl/vixl/platform.h | ||
121 | +++ b/disas/libvixl/vixl/platform.h | ||
122 | @@ -XXX,XX +XXX,XX @@ | ||
123 | #define PLATFORM_H | ||
124 | |||
125 | // Define platform specific functionalities. | ||
126 | +extern "C" { | ||
127 | #include <signal.h> | ||
128 | +} | ||
129 | |||
130 | namespace vixl { | ||
131 | inline void HostBreakpoint() { raise(SIGINT); } | ||
132 | diff --git a/disas/libvixl/vixl/utils.h b/disas/libvixl/vixl/utils.h | ||
133 | index XXXXXXX..XXXXXXX 100644 | ||
134 | --- a/disas/libvixl/vixl/utils.h | ||
135 | +++ b/disas/libvixl/vixl/utils.h | ||
136 | @@ -XXX,XX +XXX,XX @@ | ||
137 | #ifndef VIXL_UTILS_H | ||
138 | #define VIXL_UTILS_H | ||
139 | |||
140 | -#include <string.h> | ||
141 | #include <cmath> | ||
142 | +#include <cstring> | ||
143 | #include "vixl/globals.h" | ||
144 | #include "vixl/compiler-intrinsics.h" | ||
145 | |||
146 | diff --git a/disas/libvixl/vixl/utils.cc b/disas/libvixl/vixl/utils.cc | ||
147 | index XXXXXXX..XXXXXXX 100644 | ||
148 | --- a/disas/libvixl/vixl/utils.cc | ||
149 | +++ b/disas/libvixl/vixl/utils.cc | ||
150 | @@ -XXX,XX +XXX,XX @@ | ||
151 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
152 | |||
153 | #include "vixl/utils.h" | ||
154 | -#include <stdio.h> | ||
155 | +#include <cstdio> | ||
156 | |||
157 | namespace vixl { | ||
158 | |||
159 | -- | ||
160 | 2.20.1 | ||
161 | |||
162 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-29-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-34-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 16 +++++++ | 8 | linux-user/aarch64/target_cpu.h | 5 ++++- |
9 | target/arm/sve.decode | 4 ++ | 9 | 1 file changed, 4 insertions(+), 1 deletion(-) |
10 | target/arm/sve_helper.c | 24 ++++++++++ | ||
11 | target/arm/translate-sve.c | 93 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 137 insertions(+) | ||
13 | 10 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 11 | diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h |
15 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 13 | --- a/linux-user/aarch64/target_cpu.h |
17 | +++ b/target/arm/helper-sve.h | 14 | +++ b/linux-user/aarch64/target_cpu.h |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 15 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags) |
19 | DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 16 | |
20 | DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 17 | static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) |
21 | 18 | { | |
22 | +DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 19 | - /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is |
23 | +DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 20 | + /* |
24 | +DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 21 | + * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is |
25 | + | 22 | * different from AArch32 Linux, which uses TPIDRRO. |
26 | +DEF_HELPER_FLAGS_3(sve2_uqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 23 | */ |
27 | +DEF_HELPER_FLAGS_3(sve2_uqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 24 | env->cp15.tpidr_el[0] = newtls; |
28 | +DEF_HELPER_FLAGS_3(sve2_uqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 25 | + /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */ |
29 | + | 26 | + env->cp15.tpidr2_el0 = 0; |
30 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
37 | + | ||
38 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
39 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
40 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
41 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/target/arm/sve.decode | ||
44 | +++ b/target/arm/sve.decode | ||
45 | @@ -XXX,XX +XXX,XX @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr | ||
46 | SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr | ||
47 | RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr | ||
48 | RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr | ||
49 | +UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr | ||
50 | +UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr | ||
51 | +UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr | ||
52 | +UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr | ||
53 | |||
54 | ## SVE2 floating-point pairwise operations | ||
55 | |||
56 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/target/arm/sve_helper.c | ||
59 | +++ b/target/arm/sve_helper.c | ||
60 | @@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H) | ||
61 | DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S) | ||
62 | DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D) | ||
63 | |||
64 | +#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX) | ||
65 | +#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX) | ||
66 | +#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX) | ||
67 | + | ||
68 | +DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H) | ||
69 | +DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S) | ||
70 | +DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D) | ||
71 | + | ||
72 | +DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H) | ||
73 | +DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S) | ||
74 | +DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQSHRN_D) | ||
75 | + | ||
76 | +#define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX) | ||
77 | +#define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX) | ||
78 | +#define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX) | ||
79 | + | ||
80 | +DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H) | ||
81 | +DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S) | ||
82 | +DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D) | ||
83 | + | ||
84 | +DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H) | ||
85 | +DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S) | ||
86 | +DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D) | ||
87 | + | ||
88 | #undef DO_SHRNB | ||
89 | #undef DO_SHRNT | ||
90 | |||
91 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
92 | index XXXXXXX..XXXXXXX 100644 | ||
93 | --- a/target/arm/translate-sve.c | ||
94 | +++ b/target/arm/translate-sve.c | ||
95 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a) | ||
96 | return do_sve2_shr_narrow(s, a, ops); | ||
97 | } | 27 | } |
98 | 28 | ||
99 | +static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, | 29 | static inline abi_ulong get_sp_from_cpustate(CPUARMState *state) |
100 | + TCGv_vec n, int64_t shr) | ||
101 | +{ | ||
102 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
103 | + int halfbits = 4 << vece; | ||
104 | + | ||
105 | + tcg_gen_shri_vec(vece, n, n, shr); | ||
106 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
107 | + tcg_gen_umin_vec(vece, d, n, t); | ||
108 | + tcg_temp_free_vec(t); | ||
109 | +} | ||
110 | + | ||
111 | +static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a) | ||
112 | +{ | ||
113 | + static const TCGOpcode vec_list[] = { | ||
114 | + INDEX_op_shri_vec, INDEX_op_umin_vec, 0 | ||
115 | + }; | ||
116 | + static const GVecGen2i ops[3] = { | ||
117 | + { .fniv = gen_uqshrnb_vec, | ||
118 | + .opt_opc = vec_list, | ||
119 | + .fno = gen_helper_sve2_uqshrnb_h, | ||
120 | + .vece = MO_16 }, | ||
121 | + { .fniv = gen_uqshrnb_vec, | ||
122 | + .opt_opc = vec_list, | ||
123 | + .fno = gen_helper_sve2_uqshrnb_s, | ||
124 | + .vece = MO_32 }, | ||
125 | + { .fniv = gen_uqshrnb_vec, | ||
126 | + .opt_opc = vec_list, | ||
127 | + .fno = gen_helper_sve2_uqshrnb_d, | ||
128 | + .vece = MO_64 }, | ||
129 | + }; | ||
130 | + return do_sve2_shr_narrow(s, a, ops); | ||
131 | +} | ||
132 | + | ||
133 | +static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, | ||
134 | + TCGv_vec n, int64_t shr) | ||
135 | +{ | ||
136 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
137 | + int halfbits = 4 << vece; | ||
138 | + | ||
139 | + tcg_gen_shri_vec(vece, n, n, shr); | ||
140 | + tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); | ||
141 | + tcg_gen_umin_vec(vece, n, n, t); | ||
142 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
143 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
144 | + tcg_temp_free_vec(t); | ||
145 | +} | ||
146 | + | ||
147 | +static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a) | ||
148 | +{ | ||
149 | + static const TCGOpcode vec_list[] = { | ||
150 | + INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 | ||
151 | + }; | ||
152 | + static const GVecGen2i ops[3] = { | ||
153 | + { .fniv = gen_uqshrnt_vec, | ||
154 | + .opt_opc = vec_list, | ||
155 | + .load_dest = true, | ||
156 | + .fno = gen_helper_sve2_uqshrnt_h, | ||
157 | + .vece = MO_16 }, | ||
158 | + { .fniv = gen_uqshrnt_vec, | ||
159 | + .opt_opc = vec_list, | ||
160 | + .load_dest = true, | ||
161 | + .fno = gen_helper_sve2_uqshrnt_s, | ||
162 | + .vece = MO_32 }, | ||
163 | + { .fniv = gen_uqshrnt_vec, | ||
164 | + .opt_opc = vec_list, | ||
165 | + .load_dest = true, | ||
166 | + .fno = gen_helper_sve2_uqshrnt_d, | ||
167 | + .vece = MO_64 }, | ||
168 | + }; | ||
169 | + return do_sve2_shr_narrow(s, a, ops); | ||
170 | +} | ||
171 | + | ||
172 | +static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a) | ||
173 | +{ | ||
174 | + static const GVecGen2i ops[3] = { | ||
175 | + { .fno = gen_helper_sve2_uqrshrnb_h }, | ||
176 | + { .fno = gen_helper_sve2_uqrshrnb_s }, | ||
177 | + { .fno = gen_helper_sve2_uqrshrnb_d }, | ||
178 | + }; | ||
179 | + return do_sve2_shr_narrow(s, a, ops); | ||
180 | +} | ||
181 | + | ||
182 | +static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a) | ||
183 | +{ | ||
184 | + static const GVecGen2i ops[3] = { | ||
185 | + { .fno = gen_helper_sve2_uqrshrnt_h }, | ||
186 | + { .fno = gen_helper_sve2_uqrshrnt_s }, | ||
187 | + { .fno = gen_helper_sve2_uqrshrnt_d }, | ||
188 | + }; | ||
189 | + return do_sve2_shr_narrow(s, a, ops); | ||
190 | +} | ||
191 | + | ||
192 | static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
193 | gen_helper_gvec_4_ptr *fn) | ||
194 | { | ||
195 | -- | 30 | -- |
196 | 2.20.1 | 31 | 2.25.1 |
197 | |||
198 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-37-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-35-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 28 ++++++++++++++ | 8 | linux-user/aarch64/cpu_loop.c | 9 +++++++++ |
9 | target/arm/sve.decode | 11 ++++++ | 9 | 1 file changed, 9 insertions(+) |
10 | target/arm/sve_helper.c | 18 +++++++++ | ||
11 | target/arm/translate-sve.c | 76 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 133 insertions(+) | ||
13 | 10 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 11 | diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c |
15 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 13 | --- a/linux-user/aarch64/cpu_loop.c |
17 | +++ b/target/arm/helper-sve.h | 14 | +++ b/linux-user/aarch64/cpu_loop.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG, | 15 | @@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env) |
19 | void, ptr, ptr, ptr, ptr, i32) | 16 | |
20 | DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG, | 17 | switch (trapnr) { |
21 | void, ptr, ptr, ptr, ptr, i32) | 18 | case EXCP_SWI: |
22 | + | 19 | + /* |
23 | +DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_h, TCG_CALL_NO_RWG, | 20 | + * On syscall, PSTATE.ZA is preserved, along with the ZA matrix. |
24 | + void, ptr, ptr, ptr, ptr, i32) | 21 | + * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState. |
25 | +DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_s, TCG_CALL_NO_RWG, | 22 | + */ |
26 | + void, ptr, ptr, ptr, ptr, i32) | 23 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
27 | +DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_d, TCG_CALL_NO_RWG, | 24 | + env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0); |
28 | + void, ptr, ptr, ptr, ptr, i32) | 25 | + arm_rebuild_hflags(env); |
29 | + | 26 | + arm_reset_sve_state(env); |
30 | +DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_h, TCG_CALL_NO_RWG, | 27 | + } |
31 | + void, ptr, ptr, ptr, ptr, i32) | 28 | ret = do_syscall(env, |
32 | +DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_s, TCG_CALL_NO_RWG, | 29 | env->xregs[8], |
33 | + void, ptr, ptr, ptr, ptr, i32) | 30 | env->xregs[0], |
34 | +DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | + | ||
37 | +DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_h, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_s, TCG_CALL_NO_RWG, | ||
40 | + void, ptr, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_d, TCG_CALL_NO_RWG, | ||
42 | + void, ptr, ptr, ptr, ptr, i32) | ||
43 | + | ||
44 | +DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_h, TCG_CALL_NO_RWG, | ||
45 | + void, ptr, ptr, ptr, ptr, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG, | ||
47 | + void, ptr, ptr, ptr, ptr, i32) | ||
48 | +DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG, | ||
49 | + void, ptr, ptr, ptr, ptr, i32) | ||
50 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
51 | index XXXXXXX..XXXXXXX 100644 | ||
52 | --- a/target/arm/sve.decode | ||
53 | +++ b/target/arm/sve.decode | ||
54 | @@ -XXX,XX +XXX,XX @@ SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm | ||
55 | |||
56 | SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm | ||
57 | SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm | ||
58 | + | ||
59 | +## SVE2 integer multiply-add long | ||
60 | + | ||
61 | +SMLALB_zzzw 01000100 .. 0 ..... 010 000 ..... ..... @rda_rn_rm | ||
62 | +SMLALT_zzzw 01000100 .. 0 ..... 010 001 ..... ..... @rda_rn_rm | ||
63 | +UMLALB_zzzw 01000100 .. 0 ..... 010 010 ..... ..... @rda_rn_rm | ||
64 | +UMLALT_zzzw 01000100 .. 0 ..... 010 011 ..... ..... @rda_rn_rm | ||
65 | +SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm | ||
66 | +SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm | ||
67 | +UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm | ||
68 | +UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | ||
69 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/sve_helper.c | ||
72 | +++ b/target/arm/sve_helper.c | ||
73 | @@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD) | ||
74 | DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD) | ||
75 | DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
76 | |||
77 | +DO_ZZZW_ACC(sve2_smlal_zzzw_h, int16_t, int8_t, H1_2, H1, DO_MUL) | ||
78 | +DO_ZZZW_ACC(sve2_smlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_MUL) | ||
79 | +DO_ZZZW_ACC(sve2_smlal_zzzw_d, int64_t, int32_t, , H1_4, DO_MUL) | ||
80 | + | ||
81 | +DO_ZZZW_ACC(sve2_umlal_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_MUL) | ||
82 | +DO_ZZZW_ACC(sve2_umlal_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL) | ||
83 | +DO_ZZZW_ACC(sve2_umlal_zzzw_d, uint64_t, uint32_t, , H1_4, DO_MUL) | ||
84 | + | ||
85 | +#define DO_NMUL(N, M) -(N * M) | ||
86 | + | ||
87 | +DO_ZZZW_ACC(sve2_smlsl_zzzw_h, int16_t, int8_t, H1_2, H1, DO_NMUL) | ||
88 | +DO_ZZZW_ACC(sve2_smlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_NMUL) | ||
89 | +DO_ZZZW_ACC(sve2_smlsl_zzzw_d, int64_t, int32_t, , H1_4, DO_NMUL) | ||
90 | + | ||
91 | +DO_ZZZW_ACC(sve2_umlsl_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_NMUL) | ||
92 | +DO_ZZZW_ACC(sve2_umlsl_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_NMUL) | ||
93 | +DO_ZZZW_ACC(sve2_umlsl_zzzw_d, uint64_t, uint32_t, , H1_4, DO_NMUL) | ||
94 | + | ||
95 | #undef DO_ZZZW_ACC | ||
96 | |||
97 | #define DO_XTNB(NAME, TYPE, OP) \ | ||
98 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/translate-sve.c | ||
101 | +++ b/target/arm/translate-sve.c | ||
102 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a) | ||
103 | }; | ||
104 | return do_sve2_zzzz_ool(s, a, fns[a->esz], 0); | ||
105 | } | ||
106 | + | ||
107 | +static bool do_smlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) | ||
108 | +{ | ||
109 | + static gen_helper_gvec_4 * const fns[] = { | ||
110 | + NULL, gen_helper_sve2_smlal_zzzw_h, | ||
111 | + gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, | ||
112 | + }; | ||
113 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); | ||
114 | +} | ||
115 | + | ||
116 | +static bool trans_SMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
117 | +{ | ||
118 | + return do_smlal_zzzw(s, a, false); | ||
119 | +} | ||
120 | + | ||
121 | +static bool trans_SMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
122 | +{ | ||
123 | + return do_smlal_zzzw(s, a, true); | ||
124 | +} | ||
125 | + | ||
126 | +static bool do_umlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) | ||
127 | +{ | ||
128 | + static gen_helper_gvec_4 * const fns[] = { | ||
129 | + NULL, gen_helper_sve2_umlal_zzzw_h, | ||
130 | + gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, | ||
131 | + }; | ||
132 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); | ||
133 | +} | ||
134 | + | ||
135 | +static bool trans_UMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
136 | +{ | ||
137 | + return do_umlal_zzzw(s, a, false); | ||
138 | +} | ||
139 | + | ||
140 | +static bool trans_UMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
141 | +{ | ||
142 | + return do_umlal_zzzw(s, a, true); | ||
143 | +} | ||
144 | + | ||
145 | +static bool do_smlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) | ||
146 | +{ | ||
147 | + static gen_helper_gvec_4 * const fns[] = { | ||
148 | + NULL, gen_helper_sve2_smlsl_zzzw_h, | ||
149 | + gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, | ||
150 | + }; | ||
151 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); | ||
152 | +} | ||
153 | + | ||
154 | +static bool trans_SMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
155 | +{ | ||
156 | + return do_smlsl_zzzw(s, a, false); | ||
157 | +} | ||
158 | + | ||
159 | +static bool trans_SMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
160 | +{ | ||
161 | + return do_smlsl_zzzw(s, a, true); | ||
162 | +} | ||
163 | + | ||
164 | +static bool do_umlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) | ||
165 | +{ | ||
166 | + static gen_helper_gvec_4 * const fns[] = { | ||
167 | + NULL, gen_helper_sve2_umlsl_zzzw_h, | ||
168 | + gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, | ||
169 | + }; | ||
170 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], sel); | ||
171 | +} | ||
172 | + | ||
173 | +static bool trans_UMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
174 | +{ | ||
175 | + return do_umlsl_zzzw(s, a, false); | ||
176 | +} | ||
177 | + | ||
178 | +static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
179 | +{ | ||
180 | + return do_umlsl_zzzw(s, a, true); | ||
181 | +} | ||
182 | -- | 31 | -- |
183 | 2.20.1 | 32 | 2.25.1 |
184 | |||
185 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Currently only used by FMUL, but will shortly be used more. | 3 | Make sure to zero the currently reserved fields. |
4 | 4 | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20210525010358.152808-52-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-36-richard.henderson@linaro.org |
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 9 | --- |
10 | target/arm/sve.decode | 14 ++++++++++---- | 10 | linux-user/aarch64/signal.c | 9 ++++++++- |
11 | 1 file changed, 10 insertions(+), 4 deletions(-) | 11 | 1 file changed, 8 insertions(+), 1 deletion(-) |
12 | 12 | ||
13 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 13 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
14 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/sve.decode | 15 | --- a/linux-user/aarch64/signal.c |
16 | +++ b/target/arm/sve.decode | 16 | +++ b/linux-user/aarch64/signal.c |
17 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ struct target_extra_context { |
18 | &rri_esz rd rn imm esz | 18 | struct target_sve_context { |
19 | &rrri_esz rd rn rm imm esz | 19 | struct target_aarch64_ctx head; |
20 | &rrr_esz rd rn rm esz | 20 | uint16_t vl; |
21 | +&rrx_esz rd rn rm index esz | 21 | - uint16_t reserved[3]; |
22 | &rpr_esz rd pg rn esz | 22 | + uint16_t flags; |
23 | &rpr_s rd pg rn s | 23 | + uint16_t reserved[2]; |
24 | &rprr_s rd pg rn rm s | 24 | /* The actual SVE data immediately follows. It is laid out |
25 | @@ -XXX,XX +XXX,XX @@ | 25 | * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of |
26 | @rpri_scatter_store ....... msz:2 .. imm:5 ... pg:3 rn:5 rd:5 \ | 26 | * the original struct pointer. |
27 | &rpri_scatter_store | 27 | @@ -XXX,XX +XXX,XX @@ struct target_sve_context { |
28 | 28 | #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \ | |
29 | +# Two registers and a scalar by N-bit index | 29 | (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17)) |
30 | +@rrx_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \ | 30 | |
31 | + &rrx_esz index=%index3_22_19 | 31 | +#define TARGET_SVE_SIG_FLAG_SM 1 |
32 | +@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz | ||
33 | +@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz | ||
34 | + | 32 | + |
35 | ########################################################################### | 33 | struct target_rt_sigframe { |
36 | # Instruction patterns. Grouped according to the SVE encodingindex.xhtml. | 34 | struct target_siginfo info; |
37 | 35 | struct target_ucontext uc; | |
38 | @@ -XXX,XX +XXX,XX @@ FMLA_zzxz 01100100 111 index:1 rm:4 00000 sub:1 rn:5 rd:5 \ | 36 | @@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve, |
39 | ### SVE FP Multiply Indexed Group | 37 | { |
40 | 38 | int i, j; | |
41 | # SVE floating-point multiply (indexed) | 39 | |
42 | -FMUL_zzx 01100100 0.1 .. rm:3 001000 rn:5 rd:5 \ | 40 | + memset(sve, 0, sizeof(*sve)); |
43 | - index=%index3_22_19 esz=1 | 41 | __put_user(TARGET_SVE_MAGIC, &sve->head.magic); |
44 | -FMUL_zzx 01100100 101 index:2 rm:3 001000 rn:5 rd:5 esz=2 | 42 | __put_user(size, &sve->head.size); |
45 | -FMUL_zzx 01100100 111 index:1 rm:4 001000 rn:5 rd:5 esz=3 | 43 | __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl); |
46 | +FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1 | 44 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
47 | +FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2 | 45 | + __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags); |
48 | +FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3 | 46 | + } |
49 | 47 | ||
50 | ### SVE FP Fast Reduction Group | 48 | /* Note that SVE regs are stored as a byte stream, with each byte element |
51 | 49 | * at a subsequent address. This corresponds to a little-endian store | |
52 | -- | 50 | -- |
53 | 2.20.1 | 51 | 2.25.1 |
54 | |||
55 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Fold the return value setting into the goto, so each | ||
4 | point of failure need not do both. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-20-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-37-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 14 ++++++++++ | 11 | linux-user/aarch64/signal.c | 26 +++++++++++--------------- |
9 | target/arm/sve.decode | 12 +++++++++ | 12 | 1 file changed, 11 insertions(+), 15 deletions(-) |
10 | target/arm/sve_helper.c | 23 ++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 55 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 104 insertions(+) | ||
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/linux-user/aarch64/signal.c |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/linux-user/aarch64/signal.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 18 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
19 | DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 19 | struct target_sve_context *sve = NULL; |
20 | DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 20 | uint64_t extra_datap = 0; |
21 | DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 21 | bool used_extra = false; |
22 | - bool err = false; | ||
23 | int vq = 0, sve_size = 0; | ||
24 | |||
25 | target_restore_general_frame(env, sf); | ||
26 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
27 | switch (magic) { | ||
28 | case 0: | ||
29 | if (size != 0) { | ||
30 | - err = true; | ||
31 | - goto exit; | ||
32 | + goto err; | ||
33 | } | ||
34 | if (used_extra) { | ||
35 | ctx = NULL; | ||
36 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
37 | |||
38 | case TARGET_FPSIMD_MAGIC: | ||
39 | if (fpsimd || size != sizeof(struct target_fpsimd_context)) { | ||
40 | - err = true; | ||
41 | - goto exit; | ||
42 | + goto err; | ||
43 | } | ||
44 | fpsimd = (struct target_fpsimd_context *)ctx; | ||
45 | break; | ||
46 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
47 | break; | ||
48 | } | ||
49 | } | ||
50 | - err = true; | ||
51 | - goto exit; | ||
52 | + goto err; | ||
53 | |||
54 | case TARGET_EXTRA_MAGIC: | ||
55 | if (extra || size != sizeof(struct target_extra_context)) { | ||
56 | - err = true; | ||
57 | - goto exit; | ||
58 | + goto err; | ||
59 | } | ||
60 | __get_user(extra_datap, | ||
61 | &((struct target_extra_context *)ctx)->datap); | ||
62 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
63 | /* Unknown record -- we certainly didn't generate it. | ||
64 | * Did we in fact get out of sync? | ||
65 | */ | ||
66 | - err = true; | ||
67 | - goto exit; | ||
68 | + goto err; | ||
69 | } | ||
70 | ctx = (void *)ctx + size; | ||
71 | } | ||
72 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | ||
73 | if (fpsimd) { | ||
74 | target_restore_fpsimd_record(env, fpsimd); | ||
75 | } else { | ||
76 | - err = true; | ||
77 | + goto err; | ||
78 | } | ||
79 | |||
80 | /* SVE data, if present, overwrites FPSIMD data. */ | ||
81 | if (sve) { | ||
82 | target_restore_sve_record(env, sve, vq); | ||
83 | } | ||
84 | - | ||
85 | - exit: | ||
86 | unlock_user(extra, extra_datap, 0); | ||
87 | - return err; | ||
88 | + return 0; | ||
22 | + | 89 | + |
23 | +DEF_HELPER_FLAGS_5(sve2_sabal_h, TCG_CALL_NO_RWG, | 90 | + err: |
24 | + void, ptr, ptr, ptr, ptr, i32) | 91 | + unlock_user(extra, extra_datap, 0); |
25 | +DEF_HELPER_FLAGS_5(sve2_sabal_s, TCG_CALL_NO_RWG, | 92 | + return 1; |
26 | + void, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_5(sve2_sabal_d, TCG_CALL_NO_RWG, | ||
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_5(sve2_uabal_h, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ | ||
41 | &rpr_s rd pg rn s | ||
42 | &rprr_s rd pg rn rm s | ||
43 | &rprr_esz rd pg rn rm esz | ||
44 | +&rrrr_esz rd ra rn rm esz | ||
45 | &rprrr_esz rd pg rn rm ra esz | ||
46 | &rpri_esz rd pg rn imm esz | ||
47 | &ptrue rd esz pat s | ||
48 | @@ -XXX,XX +XXX,XX @@ | ||
49 | @rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \ | ||
50 | &rri_esz rn=%reg_movprfx | ||
51 | |||
52 | +# Four operand, vector element size | ||
53 | +@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \ | ||
54 | + &rrrr_esz ra=%reg_movprfx | ||
55 | + | ||
56 | # Three operand with "memory" size, aka immediate left shift | ||
57 | @rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri | ||
58 | |||
59 | @@ -XXX,XX +XXX,XX @@ CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm | ||
60 | CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm | ||
61 | SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm | ||
62 | SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm | ||
63 | + | ||
64 | +## SVE2 integer absolute difference and accumulate long | ||
65 | + | ||
66 | +SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm | ||
67 | +SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm | ||
68 | +UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm | ||
69 | +UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm | ||
70 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/target/arm/sve_helper.c | ||
73 | +++ b/target/arm/sve_helper.c | ||
74 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR) | ||
75 | |||
76 | #undef DO_ZZZ_NTB | ||
77 | |||
78 | +#define DO_ZZZW_ACC(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
79 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
80 | +{ \ | ||
81 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
82 | + intptr_t sel1 = simd_data(desc) * sizeof(TYPEN); \ | ||
83 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
84 | + TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \ | ||
85 | + TYPEW mm = *(TYPEN *)(vm + HN(i + sel1)); \ | ||
86 | + TYPEW aa = *(TYPEW *)(va + HW(i)); \ | ||
87 | + *(TYPEW *)(vd + HW(i)) = OP(nn, mm) + aa; \ | ||
88 | + } \ | ||
89 | +} | ||
90 | + | ||
91 | +DO_ZZZW_ACC(sve2_sabal_h, int16_t, int8_t, H1_2, H1, DO_ABD) | ||
92 | +DO_ZZZW_ACC(sve2_sabal_s, int32_t, int16_t, H1_4, H1_2, DO_ABD) | ||
93 | +DO_ZZZW_ACC(sve2_sabal_d, int64_t, int32_t, , H1_4, DO_ABD) | ||
94 | + | ||
95 | +DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD) | ||
96 | +DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD) | ||
97 | +DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
98 | + | ||
99 | +#undef DO_ZZZW_ACC | ||
100 | + | ||
101 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
102 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
103 | { \ | ||
104 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
105 | index XXXXXXX..XXXXXXX 100644 | ||
106 | --- a/target/arm/translate-sve.c | ||
107 | +++ b/target/arm/translate-sve.c | ||
108 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, | ||
109 | vsz, vsz, data, fn); | ||
110 | } | 93 | } |
111 | 94 | ||
112 | +/* Invoke an out-of-line helper on 4 Zregs. */ | 95 | static abi_ulong get_sigframe(struct target_sigaction *ka, |
113 | +static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, | ||
114 | + int rd, int rn, int rm, int ra, int data) | ||
115 | +{ | ||
116 | + unsigned vsz = vec_full_reg_size(s); | ||
117 | + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), | ||
118 | + vec_full_reg_offset(s, rn), | ||
119 | + vec_full_reg_offset(s, rm), | ||
120 | + vec_full_reg_offset(s, ra), | ||
121 | + vsz, vsz, data, fn); | ||
122 | +} | ||
123 | + | ||
124 | /* Invoke an out-of-line helper on 2 Zregs and a predicate. */ | ||
125 | static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, | ||
126 | int rd, int rn, int pg, int data) | ||
127 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a) | ||
128 | { | ||
129 | return do_cadd(s, a, true, true); | ||
130 | } | ||
131 | + | ||
132 | +static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a, | ||
133 | + gen_helper_gvec_4 *fn, int data) | ||
134 | +{ | ||
135 | + if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { | ||
136 | + return false; | ||
137 | + } | ||
138 | + if (sve_access_check(s)) { | ||
139 | + gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); | ||
140 | + } | ||
141 | + return true; | ||
142 | +} | ||
143 | + | ||
144 | +static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel) | ||
145 | +{ | ||
146 | + static gen_helper_gvec_4 * const fns[2][4] = { | ||
147 | + { NULL, gen_helper_sve2_sabal_h, | ||
148 | + gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d }, | ||
149 | + { NULL, gen_helper_sve2_uabal_h, | ||
150 | + gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d }, | ||
151 | + }; | ||
152 | + return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel); | ||
153 | +} | ||
154 | + | ||
155 | +static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a) | ||
156 | +{ | ||
157 | + return do_abal(s, a, false, false); | ||
158 | +} | ||
159 | + | ||
160 | +static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a) | ||
161 | +{ | ||
162 | + return do_abal(s, a, false, true); | ||
163 | +} | ||
164 | + | ||
165 | +static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a) | ||
166 | +{ | ||
167 | + return do_abal(s, a, true, false); | ||
168 | +} | ||
169 | + | ||
170 | +static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a) | ||
171 | +{ | ||
172 | + return do_abal(s, a, true, true); | ||
173 | +} | ||
174 | -- | 96 | -- |
175 | 2.20.1 | 97 | 2.25.1 |
176 | |||
177 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | In parse_user_sigframe, the kernel rejects duplicate sve records, | ||
4 | or records that are smaller than the header. We were silently | ||
5 | allowing these cases to pass, dropping the record. | ||
2 | 6 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-8-richard.henderson@linaro.org | 9 | Message-id: 20220708151540.18136-38-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | target/arm/helper-sve.h | 54 ++++++++++++++++++++++++++++++++++++++ | 12 | linux-user/aarch64/signal.c | 5 ++++- |
9 | target/arm/sve.decode | 11 ++++++++ | 13 | 1 file changed, 4 insertions(+), 1 deletion(-) |
10 | target/arm/sve_helper.c | 39 +++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 8 ++++++ | ||
12 | 4 files changed, 112 insertions(+) | ||
13 | 14 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 17 | --- a/linux-user/aarch64/signal.c |
17 | +++ b/target/arm/helper-sve.h | 18 | +++ b/linux-user/aarch64/signal.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
19 | DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG, | 20 | break; |
20 | void, ptr, ptr, ptr, ptr, i32) | 21 | |
21 | 22 | case TARGET_SVE_MAGIC: | |
22 | +DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_b, TCG_CALL_NO_RWG, | 23 | + if (sve || size < sizeof(struct target_sve_context)) { |
23 | + void, ptr, ptr, ptr, ptr, i32) | 24 | + goto err; |
24 | +DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_h, TCG_CALL_NO_RWG, | 25 | + } |
25 | + void, ptr, ptr, ptr, ptr, i32) | 26 | if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
26 | +DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_s, TCG_CALL_NO_RWG, | 27 | vq = sve_vq(env); |
27 | + void, ptr, ptr, ptr, ptr, i32) | 28 | sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); |
28 | +DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_d, TCG_CALL_NO_RWG, | 29 | - if (!sve && size == sve_size) { |
29 | + void, ptr, ptr, ptr, ptr, i32) | 30 | + if (size == sve_size) { |
30 | + | 31 | sve = (struct target_sve_context *)ctx; |
31 | +DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_b, TCG_CALL_NO_RWG, | 32 | break; |
32 | + void, ptr, ptr, ptr, ptr, i32) | 33 | } |
33 | +DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_h, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_s, TCG_CALL_NO_RWG, | ||
36 | + void, ptr, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_d, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, i32) | ||
39 | + | ||
40 | +DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_b, TCG_CALL_NO_RWG, | ||
41 | + void, ptr, ptr, ptr, ptr, i32) | ||
42 | +DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_h, TCG_CALL_NO_RWG, | ||
43 | + void, ptr, ptr, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_s, TCG_CALL_NO_RWG, | ||
45 | + void, ptr, ptr, ptr, ptr, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_d, TCG_CALL_NO_RWG, | ||
47 | + void, ptr, ptr, ptr, ptr, i32) | ||
48 | + | ||
49 | +DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_b, TCG_CALL_NO_RWG, | ||
50 | + void, ptr, ptr, ptr, ptr, i32) | ||
51 | +DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_h, TCG_CALL_NO_RWG, | ||
52 | + void, ptr, ptr, ptr, ptr, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_s, TCG_CALL_NO_RWG, | ||
54 | + void, ptr, ptr, ptr, ptr, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_d, TCG_CALL_NO_RWG, | ||
56 | + void, ptr, ptr, ptr, ptr, i32) | ||
57 | + | ||
58 | +DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_b, TCG_CALL_NO_RWG, | ||
59 | + void, ptr, ptr, ptr, ptr, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_h, TCG_CALL_NO_RWG, | ||
61 | + void, ptr, ptr, ptr, ptr, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_s, TCG_CALL_NO_RWG, | ||
63 | + void, ptr, ptr, ptr, ptr, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_d, TCG_CALL_NO_RWG, | ||
65 | + void, ptr, ptr, ptr, ptr, i32) | ||
66 | + | ||
67 | +DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_b, TCG_CALL_NO_RWG, | ||
68 | + void, ptr, ptr, ptr, ptr, i32) | ||
69 | +DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_h, TCG_CALL_NO_RWG, | ||
70 | + void, ptr, ptr, ptr, ptr, i32) | ||
71 | +DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_s, TCG_CALL_NO_RWG, | ||
72 | + void, ptr, ptr, ptr, ptr, i32) | ||
73 | +DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_d, TCG_CALL_NO_RWG, | ||
74 | + void, ptr, ptr, ptr, ptr, i32) | ||
75 | + | ||
76 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG, | ||
77 | void, ptr, ptr, ptr, ptr, i32) | ||
78 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG, | ||
79 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/sve.decode | ||
82 | +++ b/target/arm/sve.decode | ||
83 | @@ -XXX,XX +XXX,XX @@ SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm | ||
84 | UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm | ||
85 | SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR | ||
86 | UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR | ||
87 | + | ||
88 | +### SVE2 integer halving add/subtract (predicated) | ||
89 | + | ||
90 | +SHADD 01000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm | ||
91 | +UHADD 01000100 .. 010 001 100 ... ..... ..... @rdn_pg_rm | ||
92 | +SHSUB 01000100 .. 010 010 100 ... ..... ..... @rdn_pg_rm | ||
93 | +UHSUB 01000100 .. 010 011 100 ... ..... ..... @rdn_pg_rm | ||
94 | +SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm | ||
95 | +URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm | ||
96 | +SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR | ||
97 | +UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR | ||
98 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/sve_helper.c | ||
101 | +++ b/target/arm/sve_helper.c | ||
102 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d) | ||
103 | |||
104 | #undef do_uqrshl_d | ||
105 | |||
106 | +#define DO_HADD_BHS(n, m) (((int64_t)n + m) >> 1) | ||
107 | +#define DO_HADD_D(n, m) ((n >> 1) + (m >> 1) + (n & m & 1)) | ||
108 | + | ||
109 | +DO_ZPZZ(sve2_shadd_zpzz_b, int8_t, H1, DO_HADD_BHS) | ||
110 | +DO_ZPZZ(sve2_shadd_zpzz_h, int16_t, H1_2, DO_HADD_BHS) | ||
111 | +DO_ZPZZ(sve2_shadd_zpzz_s, int32_t, H1_4, DO_HADD_BHS) | ||
112 | +DO_ZPZZ_D(sve2_shadd_zpzz_d, int64_t, DO_HADD_D) | ||
113 | + | ||
114 | +DO_ZPZZ(sve2_uhadd_zpzz_b, uint8_t, H1, DO_HADD_BHS) | ||
115 | +DO_ZPZZ(sve2_uhadd_zpzz_h, uint16_t, H1_2, DO_HADD_BHS) | ||
116 | +DO_ZPZZ(sve2_uhadd_zpzz_s, uint32_t, H1_4, DO_HADD_BHS) | ||
117 | +DO_ZPZZ_D(sve2_uhadd_zpzz_d, uint64_t, DO_HADD_D) | ||
118 | + | ||
119 | +#define DO_RHADD_BHS(n, m) (((int64_t)n + m + 1) >> 1) | ||
120 | +#define DO_RHADD_D(n, m) ((n >> 1) + (m >> 1) + ((n | m) & 1)) | ||
121 | + | ||
122 | +DO_ZPZZ(sve2_srhadd_zpzz_b, int8_t, H1, DO_RHADD_BHS) | ||
123 | +DO_ZPZZ(sve2_srhadd_zpzz_h, int16_t, H1_2, DO_RHADD_BHS) | ||
124 | +DO_ZPZZ(sve2_srhadd_zpzz_s, int32_t, H1_4, DO_RHADD_BHS) | ||
125 | +DO_ZPZZ_D(sve2_srhadd_zpzz_d, int64_t, DO_RHADD_D) | ||
126 | + | ||
127 | +DO_ZPZZ(sve2_urhadd_zpzz_b, uint8_t, H1, DO_RHADD_BHS) | ||
128 | +DO_ZPZZ(sve2_urhadd_zpzz_h, uint16_t, H1_2, DO_RHADD_BHS) | ||
129 | +DO_ZPZZ(sve2_urhadd_zpzz_s, uint32_t, H1_4, DO_RHADD_BHS) | ||
130 | +DO_ZPZZ_D(sve2_urhadd_zpzz_d, uint64_t, DO_RHADD_D) | ||
131 | + | ||
132 | +#define DO_HSUB_BHS(n, m) (((int64_t)n - m) >> 1) | ||
133 | +#define DO_HSUB_D(n, m) ((n >> 1) - (m >> 1) - (~n & m & 1)) | ||
134 | + | ||
135 | +DO_ZPZZ(sve2_shsub_zpzz_b, int8_t, H1, DO_HSUB_BHS) | ||
136 | +DO_ZPZZ(sve2_shsub_zpzz_h, int16_t, H1_2, DO_HSUB_BHS) | ||
137 | +DO_ZPZZ(sve2_shsub_zpzz_s, int32_t, H1_4, DO_HSUB_BHS) | ||
138 | +DO_ZPZZ_D(sve2_shsub_zpzz_d, int64_t, DO_HSUB_D) | ||
139 | + | ||
140 | +DO_ZPZZ(sve2_uhsub_zpzz_b, uint8_t, H1, DO_HSUB_BHS) | ||
141 | +DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS) | ||
142 | +DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS) | ||
143 | +DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D) | ||
144 | + | ||
145 | #undef DO_ZPZZ | ||
146 | #undef DO_ZPZZ_D | ||
147 | |||
148 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
149 | index XXXXXXX..XXXXXXX 100644 | ||
150 | --- a/target/arm/translate-sve.c | ||
151 | +++ b/target/arm/translate-sve.c | ||
152 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SRSHL, srshl) | ||
153 | DO_SVE2_ZPZZ(UQSHL, uqshl) | ||
154 | DO_SVE2_ZPZZ(UQRSHL, uqrshl) | ||
155 | DO_SVE2_ZPZZ(URSHL, urshl) | ||
156 | + | ||
157 | +DO_SVE2_ZPZZ(SHADD, shadd) | ||
158 | +DO_SVE2_ZPZZ(SRHADD, srhadd) | ||
159 | +DO_SVE2_ZPZZ(SHSUB, shsub) | ||
160 | + | ||
161 | +DO_SVE2_ZPZZ(UHADD, uhadd) | ||
162 | +DO_SVE2_ZPZZ(URHADD, urhadd) | ||
163 | +DO_SVE2_ZPZZ(UHSUB, uhsub) | ||
164 | -- | 34 | -- |
165 | 2.20.1 | 35 | 2.25.1 |
166 | |||
167 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-35-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-39-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 14 ++++++++++ | 8 | linux-user/aarch64/signal.c | 3 +++ |
9 | target/arm/sve.decode | 14 ++++++++++ | 9 | 1 file changed, 3 insertions(+) |
10 | target/arm/sve_helper.c | 30 +++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 54 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 112 insertions(+) | ||
13 | 10 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 11 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
15 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 13 | --- a/linux-user/aarch64/signal.c |
17 | +++ b/target/arm/helper-sve.h | 14 | +++ b/linux-user/aarch64/signal.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 15 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
19 | DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 16 | __get_user(extra_size, |
20 | DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 17 | &((struct target_extra_context *)ctx)->size); |
21 | DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | 18 | extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0); |
22 | + | 19 | + if (!extra) { |
23 | +DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_h, TCG_CALL_NO_RWG, | 20 | + return 1; |
24 | + void, ptr, ptr, ptr, ptr, i32) | 21 | + } |
25 | +DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_s, TCG_CALL_NO_RWG, | 22 | break; |
26 | + void, ptr, ptr, ptr, ptr, i32) | 23 | |
27 | +DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_d, TCG_CALL_NO_RWG, | 24 | default: |
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_h, TCG_CALL_NO_RWG, | ||
31 | + void, ptr, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm | ||
41 | FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm | ||
42 | FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm | ||
43 | FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm | ||
44 | + | ||
45 | +#### SVE Integer Multiply-Add (unpredicated) | ||
46 | + | ||
47 | +## SVE2 saturating multiply-add long | ||
48 | + | ||
49 | +SQDMLALB_zzzw 01000100 .. 0 ..... 0110 00 ..... ..... @rda_rn_rm | ||
50 | +SQDMLALT_zzzw 01000100 .. 0 ..... 0110 01 ..... ..... @rda_rn_rm | ||
51 | +SQDMLSLB_zzzw 01000100 .. 0 ..... 0110 10 ..... ..... @rda_rn_rm | ||
52 | +SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm | ||
53 | + | ||
54 | +## SVE2 saturating multiply-add interleaved long | ||
55 | + | ||
56 | +SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm | ||
57 | +SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm | ||
58 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/sve_helper.c | ||
61 | +++ b/target/arm/sve_helper.c | ||
62 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
63 | } | ||
64 | } | ||
65 | |||
66 | +#define DO_SQDMLAL(NAME, TYPEW, TYPEN, HW, HN, DMUL_OP, SUM_OP) \ | ||
67 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
68 | +{ \ | ||
69 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
70 | + int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \ | ||
71 | + int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \ | ||
72 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
73 | + TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \ | ||
74 | + TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \ | ||
75 | + TYPEW aa = *(TYPEW *)(va + HW(i)); \ | ||
76 | + *(TYPEW *)(vd + HW(i)) = SUM_OP(aa, DMUL_OP(nn, mm)); \ | ||
77 | + } \ | ||
78 | +} | ||
79 | + | ||
80 | +DO_SQDMLAL(sve2_sqdmlal_zzzw_h, int16_t, int8_t, H1_2, H1, | ||
81 | + do_sqdmull_h, DO_SQADD_H) | ||
82 | +DO_SQDMLAL(sve2_sqdmlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, | ||
83 | + do_sqdmull_s, DO_SQADD_S) | ||
84 | +DO_SQDMLAL(sve2_sqdmlal_zzzw_d, int64_t, int32_t, , H1_4, | ||
85 | + do_sqdmull_d, do_sqadd_d) | ||
86 | + | ||
87 | +DO_SQDMLAL(sve2_sqdmlsl_zzzw_h, int16_t, int8_t, H1_2, H1, | ||
88 | + do_sqdmull_h, DO_SQSUB_H) | ||
89 | +DO_SQDMLAL(sve2_sqdmlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, | ||
90 | + do_sqdmull_s, DO_SQSUB_S) | ||
91 | +DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4, | ||
92 | + do_sqdmull_d, do_sqsub_d) | ||
93 | + | ||
94 | +#undef DO_SQDMLAL | ||
95 | + | ||
96 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
97 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
98 | { \ | ||
99 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
100 | index XXXXXXX..XXXXXXX 100644 | ||
101 | --- a/target/arm/translate-sve.c | ||
102 | +++ b/target/arm/translate-sve.c | ||
103 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp) | ||
104 | DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp) | ||
105 | DO_SVE2_ZPZZ_FP(FMAXP, fmaxp) | ||
106 | DO_SVE2_ZPZZ_FP(FMINP, fminp) | ||
107 | + | ||
108 | +/* | ||
109 | + * SVE Integer Multiply-Add (unpredicated) | ||
110 | + */ | ||
111 | + | ||
112 | +static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a, | ||
113 | + bool sel1, bool sel2) | ||
114 | +{ | ||
115 | + static gen_helper_gvec_4 * const fns[] = { | ||
116 | + NULL, gen_helper_sve2_sqdmlal_zzzw_h, | ||
117 | + gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, | ||
118 | + }; | ||
119 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1); | ||
120 | +} | ||
121 | + | ||
122 | +static bool do_sqdmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, | ||
123 | + bool sel1, bool sel2) | ||
124 | +{ | ||
125 | + static gen_helper_gvec_4 * const fns[] = { | ||
126 | + NULL, gen_helper_sve2_sqdmlsl_zzzw_h, | ||
127 | + gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, | ||
128 | + }; | ||
129 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1); | ||
130 | +} | ||
131 | + | ||
132 | +static bool trans_SQDMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
133 | +{ | ||
134 | + return do_sqdmlal_zzzw(s, a, false, false); | ||
135 | +} | ||
136 | + | ||
137 | +static bool trans_SQDMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
138 | +{ | ||
139 | + return do_sqdmlal_zzzw(s, a, true, true); | ||
140 | +} | ||
141 | + | ||
142 | +static bool trans_SQDMLALBT(DisasContext *s, arg_rrrr_esz *a) | ||
143 | +{ | ||
144 | + return do_sqdmlal_zzzw(s, a, false, true); | ||
145 | +} | ||
146 | + | ||
147 | +static bool trans_SQDMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
148 | +{ | ||
149 | + return do_sqdmlsl_zzzw(s, a, false, false); | ||
150 | +} | ||
151 | + | ||
152 | +static bool trans_SQDMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
153 | +{ | ||
154 | + return do_sqdmlsl_zzzw(s, a, true, true); | ||
155 | +} | ||
156 | + | ||
157 | +static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a) | ||
158 | +{ | ||
159 | + return do_sqdmlsl_zzzw(s, a, false, true); | ||
160 | +} | ||
161 | -- | 25 | -- |
162 | 2.20.1 | 26 | 2.25.1 |
163 | |||
164 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Split these operations out into a header that can be shared | 3 | Move the checks out of the parsing loop and into the |
4 | between neon and sve. The "sat" pointer acts both as a boolean | 4 | restore function. This more closely mirrors the code |
5 | for control of saturating behavior and controls the difference | 5 | structure in the kernel, and is slightly clearer. |
6 | in behavior between neon and sve -- QC bit or no QC bit. | ||
7 | 6 | ||
8 | Widen the shift operand in the new helpers, as the SVE2 insns treat | 7 | Reject rather than silently skip incorrect VL and SVE record sizes, |
9 | the whole input element as significant. For the neon uses, truncate | 8 | bringing our checks in to line with those the kernel does. |
10 | the shift to int8_t while passing the parameter. | ||
11 | |||
12 | Implement right-shift rounding as | ||
13 | |||
14 | tmp = src >> (shift - 1); | ||
15 | dst = (tmp >> 1) + (tmp & 1); | ||
16 | |||
17 | This is the same number of instructions as the current | ||
18 | |||
19 | tmp = 1 << (shift - 1); | ||
20 | dst = (src + tmp) >> shift; | ||
21 | |||
22 | without any possibility of intermediate overflow. | ||
23 | 9 | ||
24 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
25 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
26 | Message-id: 20210525010358.152808-6-richard.henderson@linaro.org | 12 | Message-id: 20220708151540.18136-40-richard.henderson@linaro.org |
27 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
28 | --- | 14 | --- |
29 | target/arm/vec_internal.h | 138 +++++++++++ | 15 | linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------ |
30 | target/arm/neon_helper.c | 507 +++++++------------------------------- | 16 | 1 file changed, 35 insertions(+), 16 deletions(-) |
31 | 2 files changed, 221 insertions(+), 424 deletions(-) | ||
32 | 17 | ||
33 | diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h | 18 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
34 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/target/arm/vec_internal.h | 20 | --- a/linux-user/aarch64/signal.c |
36 | +++ b/target/arm/vec_internal.h | 21 | +++ b/linux-user/aarch64/signal.c |
37 | @@ -XXX,XX +XXX,XX @@ static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz) | 22 | @@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env, |
38 | } | 23 | } |
39 | } | 24 | } |
40 | 25 | ||
41 | +static inline int32_t do_sqrshl_bhs(int32_t src, int32_t shift, int bits, | 26 | -static void target_restore_sve_record(CPUARMState *env, |
42 | + bool round, uint32_t *sat) | 27 | - struct target_sve_context *sve, int vq) |
43 | +{ | 28 | +static bool target_restore_sve_record(CPUARMState *env, |
44 | + if (shift <= -bits) { | 29 | + struct target_sve_context *sve, |
45 | + /* Rounding the sign bit always produces 0. */ | 30 | + int size) |
46 | + if (round) { | 31 | { |
47 | + return 0; | 32 | - int i, j; |
48 | + } | 33 | + int i, j, vl, vq; |
49 | + return src >> 31; | 34 | |
50 | + } else if (shift < 0) { | 35 | - /* Note that SVE regs are stored as a byte stream, with each byte element |
51 | + if (round) { | 36 | + if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
52 | + src >>= -shift - 1; | 37 | + return false; |
53 | + return (src >> 1) + (src & 1); | ||
54 | + } | ||
55 | + return src >> -shift; | ||
56 | + } else if (shift < bits) { | ||
57 | + int32_t val = src << shift; | ||
58 | + if (bits == 32) { | ||
59 | + if (!sat || val >> shift == src) { | ||
60 | + return val; | ||
61 | + } | ||
62 | + } else { | ||
63 | + int32_t extval = sextract32(val, 0, bits); | ||
64 | + if (!sat || val == extval) { | ||
65 | + return extval; | ||
66 | + } | ||
67 | + } | ||
68 | + } else if (!sat || src == 0) { | ||
69 | + return 0; | ||
70 | + } | 38 | + } |
71 | + | 39 | + |
72 | + *sat = 1; | 40 | + __get_user(vl, &sve->vl); |
73 | + return (1u << (bits - 1)) - (src >= 0); | 41 | + vq = sve_vq(env); |
74 | +} | ||
75 | + | 42 | + |
76 | +static inline uint32_t do_uqrshl_bhs(uint32_t src, int32_t shift, int bits, | 43 | + /* Reject mismatched VL. */ |
77 | + bool round, uint32_t *sat) | 44 | + if (vl != vq * TARGET_SVE_VQ_BYTES) { |
78 | +{ | 45 | + return false; |
79 | + if (shift <= -(bits + round)) { | ||
80 | + return 0; | ||
81 | + } else if (shift < 0) { | ||
82 | + if (round) { | ||
83 | + src >>= -shift - 1; | ||
84 | + return (src >> 1) + (src & 1); | ||
85 | + } | ||
86 | + return src >> -shift; | ||
87 | + } else if (shift < bits) { | ||
88 | + uint32_t val = src << shift; | ||
89 | + if (bits == 32) { | ||
90 | + if (!sat || val >> shift == src) { | ||
91 | + return val; | ||
92 | + } | ||
93 | + } else { | ||
94 | + uint32_t extval = extract32(val, 0, bits); | ||
95 | + if (!sat || val == extval) { | ||
96 | + return extval; | ||
97 | + } | ||
98 | + } | ||
99 | + } else if (!sat || src == 0) { | ||
100 | + return 0; | ||
101 | + } | 46 | + } |
102 | + | 47 | + |
103 | + *sat = 1; | 48 | + /* Accept empty record -- used to clear PSTATE.SM. */ |
104 | + return MAKE_64BIT_MASK(0, bits); | 49 | + if (size <= sizeof(*sve)) { |
105 | +} | 50 | + return true; |
106 | + | ||
107 | +static inline int32_t do_suqrshl_bhs(int32_t src, int32_t shift, int bits, | ||
108 | + bool round, uint32_t *sat) | ||
109 | +{ | ||
110 | + if (sat && src < 0) { | ||
111 | + *sat = 1; | ||
112 | + return 0; | ||
113 | + } | ||
114 | + return do_uqrshl_bhs(src, shift, bits, round, sat); | ||
115 | +} | ||
116 | + | ||
117 | +static inline int64_t do_sqrshl_d(int64_t src, int64_t shift, | ||
118 | + bool round, uint32_t *sat) | ||
119 | +{ | ||
120 | + if (shift <= -64) { | ||
121 | + /* Rounding the sign bit always produces 0. */ | ||
122 | + if (round) { | ||
123 | + return 0; | ||
124 | + } | ||
125 | + return src >> 63; | ||
126 | + } else if (shift < 0) { | ||
127 | + if (round) { | ||
128 | + src >>= -shift - 1; | ||
129 | + return (src >> 1) + (src & 1); | ||
130 | + } | ||
131 | + return src >> -shift; | ||
132 | + } else if (shift < 64) { | ||
133 | + int64_t val = src << shift; | ||
134 | + if (!sat || val >> shift == src) { | ||
135 | + return val; | ||
136 | + } | ||
137 | + } else if (!sat || src == 0) { | ||
138 | + return 0; | ||
139 | + } | 51 | + } |
140 | + | 52 | + |
141 | + *sat = 1; | 53 | + /* Reject non-empty but incomplete record. */ |
142 | + return src < 0 ? INT64_MIN : INT64_MAX; | 54 | + if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) { |
143 | +} | 55 | + return false; |
144 | + | ||
145 | +static inline uint64_t do_uqrshl_d(uint64_t src, int64_t shift, | ||
146 | + bool round, uint32_t *sat) | ||
147 | +{ | ||
148 | + if (shift <= -(64 + round)) { | ||
149 | + return 0; | ||
150 | + } else if (shift < 0) { | ||
151 | + if (round) { | ||
152 | + src >>= -shift - 1; | ||
153 | + return (src >> 1) + (src & 1); | ||
154 | + } | ||
155 | + return src >> -shift; | ||
156 | + } else if (shift < 64) { | ||
157 | + uint64_t val = src << shift; | ||
158 | + if (!sat || val >> shift == src) { | ||
159 | + return val; | ||
160 | + } | ||
161 | + } else if (!sat || src == 0) { | ||
162 | + return 0; | ||
163 | + } | 56 | + } |
164 | + | 57 | + |
165 | + *sat = 1; | 58 | + /* |
166 | + return UINT64_MAX; | 59 | + * Note that SVE regs are stored as a byte stream, with each byte element |
167 | +} | 60 | * at a subsequent address. This corresponds to a little-endian load |
168 | + | 61 | * of our 64-bit hunks. |
169 | +static inline int64_t do_suqrshl_d(int64_t src, int64_t shift, | 62 | */ |
170 | + bool round, uint32_t *sat) | 63 | @@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env, |
171 | +{ | 64 | } |
172 | + if (sat && src < 0) { | 65 | } |
173 | + *sat = 1; | 66 | } |
174 | + return 0; | 67 | + return true; |
175 | + } | ||
176 | + return do_uqrshl_d(src, shift, round, sat); | ||
177 | +} | ||
178 | + | ||
179 | #endif /* TARGET_ARM_VEC_INTERNALS_H */ | ||
180 | diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c | ||
181 | index XXXXXXX..XXXXXXX 100644 | ||
182 | --- a/target/arm/neon_helper.c | ||
183 | +++ b/target/arm/neon_helper.c | ||
184 | @@ -XXX,XX +XXX,XX @@ | ||
185 | #include "cpu.h" | ||
186 | #include "exec/helper-proto.h" | ||
187 | #include "fpu/softfloat.h" | ||
188 | +#include "vec_internal.h" | ||
189 | |||
190 | #define SIGNBIT (uint32_t)0x80000000 | ||
191 | #define SIGNBIT64 ((uint64_t)1 << 63) | ||
192 | @@ -XXX,XX +XXX,XX @@ NEON_POP(pmax_s16, neon_s16, 2) | ||
193 | NEON_POP(pmax_u16, neon_u16, 2) | ||
194 | #undef NEON_FN | ||
195 | |||
196 | -#define NEON_FN(dest, src1, src2) do { \ | ||
197 | - int8_t tmp; \ | ||
198 | - tmp = (int8_t)src2; \ | ||
199 | - if (tmp >= (ssize_t)sizeof(src1) * 8 || \ | ||
200 | - tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
201 | - dest = 0; \ | ||
202 | - } else if (tmp < 0) { \ | ||
203 | - dest = src1 >> -tmp; \ | ||
204 | - } else { \ | ||
205 | - dest = src1 << tmp; \ | ||
206 | - }} while (0) | ||
207 | +#define NEON_FN(dest, src1, src2) \ | ||
208 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, NULL)) | ||
209 | NEON_VOP(shl_u16, neon_u16, 2) | ||
210 | #undef NEON_FN | ||
211 | |||
212 | -#define NEON_FN(dest, src1, src2) do { \ | ||
213 | - int8_t tmp; \ | ||
214 | - tmp = (int8_t)src2; \ | ||
215 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
216 | - dest = 0; \ | ||
217 | - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
218 | - dest = src1 >> (sizeof(src1) * 8 - 1); \ | ||
219 | - } else if (tmp < 0) { \ | ||
220 | - dest = src1 >> -tmp; \ | ||
221 | - } else { \ | ||
222 | - dest = src1 << tmp; \ | ||
223 | - }} while (0) | ||
224 | +#define NEON_FN(dest, src1, src2) \ | ||
225 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, NULL)) | ||
226 | NEON_VOP(shl_s16, neon_s16, 2) | ||
227 | #undef NEON_FN | ||
228 | |||
229 | -#define NEON_FN(dest, src1, src2) do { \ | ||
230 | - int8_t tmp; \ | ||
231 | - tmp = (int8_t)src2; \ | ||
232 | - if ((tmp >= (ssize_t)sizeof(src1) * 8) \ | ||
233 | - || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \ | ||
234 | - dest = 0; \ | ||
235 | - } else if (tmp < 0) { \ | ||
236 | - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ | ||
237 | - } else { \ | ||
238 | - dest = src1 << tmp; \ | ||
239 | - }} while (0) | ||
240 | +#define NEON_FN(dest, src1, src2) \ | ||
241 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL)) | ||
242 | NEON_VOP(rshl_s8, neon_s8, 4) | ||
243 | +#undef NEON_FN | ||
244 | + | ||
245 | +#define NEON_FN(dest, src1, src2) \ | ||
246 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL)) | ||
247 | NEON_VOP(rshl_s16, neon_s16, 2) | ||
248 | #undef NEON_FN | ||
249 | |||
250 | -/* The addition of the rounding constant may overflow, so we use an | ||
251 | - * intermediate 64 bit accumulator. */ | ||
252 | -uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) | ||
253 | +uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift) | ||
254 | { | ||
255 | - int32_t dest; | ||
256 | - int32_t val = (int32_t)valop; | ||
257 | - int8_t shift = (int8_t)shiftop; | ||
258 | - if ((shift >= 32) || (shift <= -32)) { | ||
259 | - dest = 0; | ||
260 | - } else if (shift < 0) { | ||
261 | - int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); | ||
262 | - dest = big_dest >> -shift; | ||
263 | - } else { | ||
264 | - dest = val << shift; | ||
265 | - } | ||
266 | - return dest; | ||
267 | + return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL); | ||
268 | } | 68 | } |
269 | 69 | ||
270 | -/* Handling addition overflow with 64 bit input values is more | 70 | static int target_restore_sigframe(CPUARMState *env, |
271 | - * tricky than with 32 bit values. */ | 71 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
272 | -uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) | 72 | struct target_sve_context *sve = NULL; |
273 | +uint64_t HELPER(neon_rshl_s64)(uint64_t val, uint64_t shift) | 73 | uint64_t extra_datap = 0; |
274 | { | 74 | bool used_extra = false; |
275 | - int8_t shift = (int8_t)shiftop; | 75 | - int vq = 0, sve_size = 0; |
276 | - int64_t val = valop; | 76 | + int sve_size = 0; |
277 | - if ((shift >= 64) || (shift <= -64)) { | 77 | |
278 | - val = 0; | 78 | target_restore_general_frame(env, sf); |
279 | - } else if (shift < 0) { | 79 | |
280 | - val >>= (-shift - 1); | 80 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
281 | - if (val == INT64_MAX) { | 81 | if (sve || size < sizeof(struct target_sve_context)) { |
282 | - /* In this case, it means that the rounding constant is 1, | 82 | goto err; |
283 | - * and the addition would overflow. Return the actual | 83 | } |
284 | - * result directly. */ | 84 | - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
285 | - val = 0x4000000000000000LL; | 85 | - vq = sve_vq(env); |
286 | - } else { | 86 | - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); |
287 | - val++; | 87 | - if (size == sve_size) { |
288 | - val >>= 1; | 88 | - sve = (struct target_sve_context *)ctx; |
289 | - } | 89 | - break; |
290 | - } else { | 90 | - } |
291 | - val <<= shift; | 91 | - } |
292 | - } | 92 | - goto err; |
293 | - return val; | 93 | + sve = (struct target_sve_context *)ctx; |
294 | + return do_sqrshl_d(val, (int8_t)shift, true, NULL); | 94 | + sve_size = size; |
295 | } | 95 | + break; |
296 | 96 | ||
297 | -#define NEON_FN(dest, src1, src2) do { \ | 97 | case TARGET_EXTRA_MAGIC: |
298 | - int8_t tmp; \ | 98 | if (extra || size != sizeof(struct target_extra_context)) { |
299 | - tmp = (int8_t)src2; \ | 99 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
300 | - if (tmp >= (ssize_t)sizeof(src1) * 8 || \ | 100 | } |
301 | - tmp < -(ssize_t)sizeof(src1) * 8) { \ | 101 | |
302 | - dest = 0; \ | 102 | /* SVE data, if present, overwrites FPSIMD data. */ |
303 | - } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ | 103 | - if (sve) { |
304 | - dest = src1 >> (-tmp - 1); \ | 104 | - target_restore_sve_record(env, sve, vq); |
305 | - } else if (tmp < 0) { \ | 105 | + if (sve && !target_restore_sve_record(env, sve, sve_size)) { |
306 | - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ | 106 | + goto err; |
307 | - } else { \ | 107 | } |
308 | - dest = src1 << tmp; \ | 108 | unlock_user(extra, extra_datap, 0); |
309 | - }} while (0) | 109 | return 0; |
310 | +#define NEON_FN(dest, src1, src2) \ | ||
311 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL)) | ||
312 | NEON_VOP(rshl_u8, neon_u8, 4) | ||
313 | +#undef NEON_FN | ||
314 | + | ||
315 | +#define NEON_FN(dest, src1, src2) \ | ||
316 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL)) | ||
317 | NEON_VOP(rshl_u16, neon_u16, 2) | ||
318 | #undef NEON_FN | ||
319 | |||
320 | -/* The addition of the rounding constant may overflow, so we use an | ||
321 | - * intermediate 64 bit accumulator. */ | ||
322 | -uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) | ||
323 | +uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift) | ||
324 | { | ||
325 | - uint32_t dest; | ||
326 | - int8_t shift = (int8_t)shiftop; | ||
327 | - if (shift >= 32 || shift < -32) { | ||
328 | - dest = 0; | ||
329 | - } else if (shift == -32) { | ||
330 | - dest = val >> 31; | ||
331 | - } else if (shift < 0) { | ||
332 | - uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); | ||
333 | - dest = big_dest >> -shift; | ||
334 | - } else { | ||
335 | - dest = val << shift; | ||
336 | - } | ||
337 | - return dest; | ||
338 | + return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL); | ||
339 | } | ||
340 | |||
341 | -/* Handling addition overflow with 64 bit input values is more | ||
342 | - * tricky than with 32 bit values. */ | ||
343 | -uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) | ||
344 | +uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift) | ||
345 | { | ||
346 | - int8_t shift = (uint8_t)shiftop; | ||
347 | - if (shift >= 64 || shift < -64) { | ||
348 | - val = 0; | ||
349 | - } else if (shift == -64) { | ||
350 | - /* Rounding a 1-bit result just preserves that bit. */ | ||
351 | - val >>= 63; | ||
352 | - } else if (shift < 0) { | ||
353 | - val >>= (-shift - 1); | ||
354 | - if (val == UINT64_MAX) { | ||
355 | - /* In this case, it means that the rounding constant is 1, | ||
356 | - * and the addition would overflow. Return the actual | ||
357 | - * result directly. */ | ||
358 | - val = 0x8000000000000000ULL; | ||
359 | - } else { | ||
360 | - val++; | ||
361 | - val >>= 1; | ||
362 | - } | ||
363 | - } else { | ||
364 | - val <<= shift; | ||
365 | - } | ||
366 | - return val; | ||
367 | + return do_uqrshl_d(val, (int8_t)shift, true, NULL); | ||
368 | } | ||
369 | |||
370 | -#define NEON_FN(dest, src1, src2) do { \ | ||
371 | - int8_t tmp; \ | ||
372 | - tmp = (int8_t)src2; \ | ||
373 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
374 | - if (src1) { \ | ||
375 | - SET_QC(); \ | ||
376 | - dest = ~0; \ | ||
377 | - } else { \ | ||
378 | - dest = 0; \ | ||
379 | - } \ | ||
380 | - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
381 | - dest = 0; \ | ||
382 | - } else if (tmp < 0) { \ | ||
383 | - dest = src1 >> -tmp; \ | ||
384 | - } else { \ | ||
385 | - dest = src1 << tmp; \ | ||
386 | - if ((dest >> tmp) != src1) { \ | ||
387 | - SET_QC(); \ | ||
388 | - dest = ~0; \ | ||
389 | - } \ | ||
390 | - }} while (0) | ||
391 | +#define NEON_FN(dest, src1, src2) \ | ||
392 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) | ||
393 | NEON_VOP_ENV(qshl_u8, neon_u8, 4) | ||
394 | +#undef NEON_FN | ||
395 | + | ||
396 | +#define NEON_FN(dest, src1, src2) \ | ||
397 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) | ||
398 | NEON_VOP_ENV(qshl_u16, neon_u16, 2) | ||
399 | -NEON_VOP_ENV(qshl_u32, neon_u32, 1) | ||
400 | #undef NEON_FN | ||
401 | |||
402 | -uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) | ||
403 | +uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift) | ||
404 | { | ||
405 | - int8_t shift = (int8_t)shiftop; | ||
406 | - if (shift >= 64) { | ||
407 | - if (val) { | ||
408 | - val = ~(uint64_t)0; | ||
409 | - SET_QC(); | ||
410 | - } | ||
411 | - } else if (shift <= -64) { | ||
412 | - val = 0; | ||
413 | - } else if (shift < 0) { | ||
414 | - val >>= -shift; | ||
415 | - } else { | ||
416 | - uint64_t tmp = val; | ||
417 | - val <<= shift; | ||
418 | - if ((val >> shift) != tmp) { | ||
419 | - SET_QC(); | ||
420 | - val = ~(uint64_t)0; | ||
421 | - } | ||
422 | - } | ||
423 | - return val; | ||
424 | + return do_uqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc); | ||
425 | } | ||
426 | |||
427 | -#define NEON_FN(dest, src1, src2) do { \ | ||
428 | - int8_t tmp; \ | ||
429 | - tmp = (int8_t)src2; \ | ||
430 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
431 | - if (src1) { \ | ||
432 | - SET_QC(); \ | ||
433 | - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ | ||
434 | - if (src1 > 0) { \ | ||
435 | - dest--; \ | ||
436 | - } \ | ||
437 | - } else { \ | ||
438 | - dest = src1; \ | ||
439 | - } \ | ||
440 | - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
441 | - dest = src1 >> 31; \ | ||
442 | - } else if (tmp < 0) { \ | ||
443 | - dest = src1 >> -tmp; \ | ||
444 | - } else { \ | ||
445 | - dest = src1 << tmp; \ | ||
446 | - if ((dest >> tmp) != src1) { \ | ||
447 | - SET_QC(); \ | ||
448 | - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ | ||
449 | - if (src1 > 0) { \ | ||
450 | - dest--; \ | ||
451 | - } \ | ||
452 | - } \ | ||
453 | - }} while (0) | ||
454 | +uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift) | ||
455 | +{ | ||
456 | + return do_uqrshl_d(val, (int8_t)shift, false, env->vfp.qc); | ||
457 | +} | ||
458 | + | ||
459 | +#define NEON_FN(dest, src1, src2) \ | ||
460 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) | ||
461 | NEON_VOP_ENV(qshl_s8, neon_s8, 4) | ||
462 | +#undef NEON_FN | ||
463 | + | ||
464 | +#define NEON_FN(dest, src1, src2) \ | ||
465 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) | ||
466 | NEON_VOP_ENV(qshl_s16, neon_s16, 2) | ||
467 | -NEON_VOP_ENV(qshl_s32, neon_s32, 1) | ||
468 | #undef NEON_FN | ||
469 | |||
470 | -uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) | ||
471 | +uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift) | ||
472 | { | ||
473 | - int8_t shift = (uint8_t)shiftop; | ||
474 | - int64_t val = valop; | ||
475 | - if (shift >= 64) { | ||
476 | - if (val) { | ||
477 | - SET_QC(); | ||
478 | - val = (val >> 63) ^ ~SIGNBIT64; | ||
479 | - } | ||
480 | - } else if (shift <= -64) { | ||
481 | - val >>= 63; | ||
482 | - } else if (shift < 0) { | ||
483 | - val >>= -shift; | ||
484 | - } else { | ||
485 | - int64_t tmp = val; | ||
486 | - val <<= shift; | ||
487 | - if ((val >> shift) != tmp) { | ||
488 | - SET_QC(); | ||
489 | - val = (tmp >> 63) ^ ~SIGNBIT64; | ||
490 | - } | ||
491 | - } | ||
492 | - return val; | ||
493 | + return do_sqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc); | ||
494 | } | ||
495 | |||
496 | -#define NEON_FN(dest, src1, src2) do { \ | ||
497 | - if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ | ||
498 | - SET_QC(); \ | ||
499 | - dest = 0; \ | ||
500 | - } else { \ | ||
501 | - int8_t tmp; \ | ||
502 | - tmp = (int8_t)src2; \ | ||
503 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
504 | - if (src1) { \ | ||
505 | - SET_QC(); \ | ||
506 | - dest = ~0; \ | ||
507 | - } else { \ | ||
508 | - dest = 0; \ | ||
509 | - } \ | ||
510 | - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
511 | - dest = 0; \ | ||
512 | - } else if (tmp < 0) { \ | ||
513 | - dest = src1 >> -tmp; \ | ||
514 | - } else { \ | ||
515 | - dest = src1 << tmp; \ | ||
516 | - if ((dest >> tmp) != src1) { \ | ||
517 | - SET_QC(); \ | ||
518 | - dest = ~0; \ | ||
519 | - } \ | ||
520 | - } \ | ||
521 | - }} while (0) | ||
522 | -NEON_VOP_ENV(qshlu_s8, neon_u8, 4) | ||
523 | -NEON_VOP_ENV(qshlu_s16, neon_u16, 2) | ||
524 | +uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift) | ||
525 | +{ | ||
526 | + return do_sqrshl_d(val, (int8_t)shift, false, env->vfp.qc); | ||
527 | +} | ||
528 | + | ||
529 | +#define NEON_FN(dest, src1, src2) \ | ||
530 | + (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) | ||
531 | +NEON_VOP_ENV(qshlu_s8, neon_s8, 4) | ||
532 | #undef NEON_FN | ||
533 | |||
534 | -uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) | ||
535 | +#define NEON_FN(dest, src1, src2) \ | ||
536 | + (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) | ||
537 | +NEON_VOP_ENV(qshlu_s16, neon_s16, 2) | ||
538 | +#undef NEON_FN | ||
539 | + | ||
540 | +uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift) | ||
541 | { | ||
542 | - if ((int32_t)valop < 0) { | ||
543 | - SET_QC(); | ||
544 | - return 0; | ||
545 | - } | ||
546 | - return helper_neon_qshl_u32(env, valop, shiftop); | ||
547 | + return do_suqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc); | ||
548 | } | ||
549 | |||
550 | -uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) | ||
551 | +uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift) | ||
552 | { | ||
553 | - if ((int64_t)valop < 0) { | ||
554 | - SET_QC(); | ||
555 | - return 0; | ||
556 | - } | ||
557 | - return helper_neon_qshl_u64(env, valop, shiftop); | ||
558 | + return do_suqrshl_d(val, (int8_t)shift, false, env->vfp.qc); | ||
559 | } | ||
560 | |||
561 | -#define NEON_FN(dest, src1, src2) do { \ | ||
562 | - int8_t tmp; \ | ||
563 | - tmp = (int8_t)src2; \ | ||
564 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
565 | - if (src1) { \ | ||
566 | - SET_QC(); \ | ||
567 | - dest = ~0; \ | ||
568 | - } else { \ | ||
569 | - dest = 0; \ | ||
570 | - } \ | ||
571 | - } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ | ||
572 | - dest = 0; \ | ||
573 | - } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ | ||
574 | - dest = src1 >> (sizeof(src1) * 8 - 1); \ | ||
575 | - } else if (tmp < 0) { \ | ||
576 | - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ | ||
577 | - } else { \ | ||
578 | - dest = src1 << tmp; \ | ||
579 | - if ((dest >> tmp) != src1) { \ | ||
580 | - SET_QC(); \ | ||
581 | - dest = ~0; \ | ||
582 | - } \ | ||
583 | - }} while (0) | ||
584 | +#define NEON_FN(dest, src1, src2) \ | ||
585 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc)) | ||
586 | NEON_VOP_ENV(qrshl_u8, neon_u8, 4) | ||
587 | +#undef NEON_FN | ||
588 | + | ||
589 | +#define NEON_FN(dest, src1, src2) \ | ||
590 | + (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc)) | ||
591 | NEON_VOP_ENV(qrshl_u16, neon_u16, 2) | ||
592 | #undef NEON_FN | ||
593 | |||
594 | -/* The addition of the rounding constant may overflow, so we use an | ||
595 | - * intermediate 64 bit accumulator. */ | ||
596 | -uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) | ||
597 | +uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift) | ||
598 | { | ||
599 | - uint32_t dest; | ||
600 | - int8_t shift = (int8_t)shiftop; | ||
601 | - if (shift >= 32) { | ||
602 | - if (val) { | ||
603 | - SET_QC(); | ||
604 | - dest = ~0; | ||
605 | - } else { | ||
606 | - dest = 0; | ||
607 | - } | ||
608 | - } else if (shift < -32) { | ||
609 | - dest = 0; | ||
610 | - } else if (shift == -32) { | ||
611 | - dest = val >> 31; | ||
612 | - } else if (shift < 0) { | ||
613 | - uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); | ||
614 | - dest = big_dest >> -shift; | ||
615 | - } else { | ||
616 | - dest = val << shift; | ||
617 | - if ((dest >> shift) != val) { | ||
618 | - SET_QC(); | ||
619 | - dest = ~0; | ||
620 | - } | ||
621 | - } | ||
622 | - return dest; | ||
623 | + return do_uqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc); | ||
624 | } | ||
625 | |||
626 | -/* Handling addition overflow with 64 bit input values is more | ||
627 | - * tricky than with 32 bit values. */ | ||
628 | -uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) | ||
629 | +uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift) | ||
630 | { | ||
631 | - int8_t shift = (int8_t)shiftop; | ||
632 | - if (shift >= 64) { | ||
633 | - if (val) { | ||
634 | - SET_QC(); | ||
635 | - val = ~0; | ||
636 | - } | ||
637 | - } else if (shift < -64) { | ||
638 | - val = 0; | ||
639 | - } else if (shift == -64) { | ||
640 | - val >>= 63; | ||
641 | - } else if (shift < 0) { | ||
642 | - val >>= (-shift - 1); | ||
643 | - if (val == UINT64_MAX) { | ||
644 | - /* In this case, it means that the rounding constant is 1, | ||
645 | - * and the addition would overflow. Return the actual | ||
646 | - * result directly. */ | ||
647 | - val = 0x8000000000000000ULL; | ||
648 | - } else { | ||
649 | - val++; | ||
650 | - val >>= 1; | ||
651 | - } | ||
652 | - } else { \ | ||
653 | - uint64_t tmp = val; | ||
654 | - val <<= shift; | ||
655 | - if ((val >> shift) != tmp) { | ||
656 | - SET_QC(); | ||
657 | - val = ~0; | ||
658 | - } | ||
659 | - } | ||
660 | - return val; | ||
661 | + return do_uqrshl_d(val, (int8_t)shift, true, env->vfp.qc); | ||
662 | } | ||
663 | |||
664 | -#define NEON_FN(dest, src1, src2) do { \ | ||
665 | - int8_t tmp; \ | ||
666 | - tmp = (int8_t)src2; \ | ||
667 | - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ | ||
668 | - if (src1) { \ | ||
669 | - SET_QC(); \ | ||
670 | - dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \ | ||
671 | - if (src1 > 0) { \ | ||
672 | - dest--; \ | ||
673 | - } \ | ||
674 | - } else { \ | ||
675 | - dest = 0; \ | ||
676 | - } \ | ||
677 | - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ | ||
678 | - dest = 0; \ | ||
679 | - } else if (tmp < 0) { \ | ||
680 | - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ | ||
681 | - } else { \ | ||
682 | - dest = src1 << tmp; \ | ||
683 | - if ((dest >> tmp) != src1) { \ | ||
684 | - SET_QC(); \ | ||
685 | - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ | ||
686 | - if (src1 > 0) { \ | ||
687 | - dest--; \ | ||
688 | - } \ | ||
689 | - } \ | ||
690 | - }} while (0) | ||
691 | +#define NEON_FN(dest, src1, src2) \ | ||
692 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc)) | ||
693 | NEON_VOP_ENV(qrshl_s8, neon_s8, 4) | ||
694 | +#undef NEON_FN | ||
695 | + | ||
696 | +#define NEON_FN(dest, src1, src2) \ | ||
697 | + (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc)) | ||
698 | NEON_VOP_ENV(qrshl_s16, neon_s16, 2) | ||
699 | #undef NEON_FN | ||
700 | |||
701 | -/* The addition of the rounding constant may overflow, so we use an | ||
702 | - * intermediate 64 bit accumulator. */ | ||
703 | -uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) | ||
704 | +uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift) | ||
705 | { | ||
706 | - int32_t dest; | ||
707 | - int32_t val = (int32_t)valop; | ||
708 | - int8_t shift = (int8_t)shiftop; | ||
709 | - if (shift >= 32) { | ||
710 | - if (val) { | ||
711 | - SET_QC(); | ||
712 | - dest = (val >> 31) ^ ~SIGNBIT; | ||
713 | - } else { | ||
714 | - dest = 0; | ||
715 | - } | ||
716 | - } else if (shift <= -32) { | ||
717 | - dest = 0; | ||
718 | - } else if (shift < 0) { | ||
719 | - int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); | ||
720 | - dest = big_dest >> -shift; | ||
721 | - } else { | ||
722 | - dest = val << shift; | ||
723 | - if ((dest >> shift) != val) { | ||
724 | - SET_QC(); | ||
725 | - dest = (val >> 31) ^ ~SIGNBIT; | ||
726 | - } | ||
727 | - } | ||
728 | - return dest; | ||
729 | + return do_sqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc); | ||
730 | } | ||
731 | |||
732 | -/* Handling addition overflow with 64 bit input values is more | ||
733 | - * tricky than with 32 bit values. */ | ||
734 | -uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) | ||
735 | +uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift) | ||
736 | { | ||
737 | - int8_t shift = (uint8_t)shiftop; | ||
738 | - int64_t val = valop; | ||
739 | - | ||
740 | - if (shift >= 64) { | ||
741 | - if (val) { | ||
742 | - SET_QC(); | ||
743 | - val = (val >> 63) ^ ~SIGNBIT64; | ||
744 | - } | ||
745 | - } else if (shift <= -64) { | ||
746 | - val = 0; | ||
747 | - } else if (shift < 0) { | ||
748 | - val >>= (-shift - 1); | ||
749 | - if (val == INT64_MAX) { | ||
750 | - /* In this case, it means that the rounding constant is 1, | ||
751 | - * and the addition would overflow. Return the actual | ||
752 | - * result directly. */ | ||
753 | - val = 0x4000000000000000ULL; | ||
754 | - } else { | ||
755 | - val++; | ||
756 | - val >>= 1; | ||
757 | - } | ||
758 | - } else { | ||
759 | - int64_t tmp = val; | ||
760 | - val <<= shift; | ||
761 | - if ((val >> shift) != tmp) { | ||
762 | - SET_QC(); | ||
763 | - val = (tmp >> 63) ^ ~SIGNBIT64; | ||
764 | - } | ||
765 | - } | ||
766 | - return val; | ||
767 | + return do_sqrshl_d(val, (int8_t)shift, true, env->vfp.qc); | ||
768 | } | ||
769 | |||
770 | uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) | ||
771 | -- | 110 | -- |
772 | 2.20.1 | 111 | 2.25.1 |
773 | |||
774 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Set the SM bit in the SVE record on signal delivery, create the ZA record. | ||
4 | Restore SM and ZA state according to the records present on return. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-27-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-41-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 16 ++++ | 11 | linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++--- |
9 | target/arm/sve.decode | 8 ++ | 12 | 1 file changed, 154 insertions(+), 13 deletions(-) |
10 | target/arm/sve_helper.c | 54 ++++++++++++- | ||
11 | target/arm/translate-sve.c | 160 +++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 236 insertions(+), 2 deletions(-) | ||
13 | 13 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 14 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 16 | --- a/linux-user/aarch64/signal.c |
17 | +++ b/target/arm/helper-sve.h | 17 | +++ b/linux-user/aarch64/signal.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 18 | @@ -XXX,XX +XXX,XX @@ struct target_sve_context { |
19 | DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 19 | |
20 | DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 20 | #define TARGET_SVE_SIG_FLAG_SM 1 |
21 | 21 | ||
22 | +DEF_HELPER_FLAGS_3(sve2_shrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 22 | +#define TARGET_ZA_MAGIC 0x54366345 |
23 | +DEF_HELPER_FLAGS_3(sve2_shrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 23 | + |
24 | +DEF_HELPER_FLAGS_3(sve2_shrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 24 | +struct target_za_context { |
25 | + | 25 | + struct target_aarch64_ctx head; |
26 | +DEF_HELPER_FLAGS_3(sve2_shrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 26 | + uint16_t vl; |
27 | +DEF_HELPER_FLAGS_3(sve2_shrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 27 | + uint16_t reserved[3]; |
28 | +DEF_HELPER_FLAGS_3(sve2_shrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 28 | + /* The actual ZA data immediately follows. */ |
29 | + | 29 | +}; |
30 | +DEF_HELPER_FLAGS_3(sve2_rshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 30 | + |
31 | +DEF_HELPER_FLAGS_3(sve2_rshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 31 | +#define TARGET_ZA_SIG_REGS_OFFSET \ |
32 | +DEF_HELPER_FLAGS_3(sve2_rshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 32 | + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) |
33 | + | 33 | +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ |
34 | +DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 34 | + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) |
35 | +DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 35 | +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ |
36 | +DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 36 | + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) |
37 | + | 37 | + |
38 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | 38 | struct target_rt_sigframe { |
39 | void, ptr, ptr, ptr, ptr, ptr, i32) | 39 | struct target_siginfo info; |
40 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | 40 | struct target_ucontext uc; |
41 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 41 | @@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end) |
42 | index XXXXXXX..XXXXXXX 100644 | 42 | } |
43 | --- a/target/arm/sve.decode | 43 | |
44 | +++ b/target/arm/sve.decode | 44 | static void target_setup_sve_record(struct target_sve_context *sve, |
45 | @@ -XXX,XX +XXX,XX @@ UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl | 45 | - CPUARMState *env, int vq, int size) |
46 | SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl | 46 | + CPUARMState *env, int size) |
47 | SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl | 47 | { |
48 | 48 | - int i, j; | |
49 | +## SVE2 bitwise shift right narrow | 49 | + int i, j, vq = sve_vq(env); |
50 | + | 50 | |
51 | +# Bit 23 == 0 is handled by esz > 0 in the translator. | 51 | memset(sve, 0, sizeof(*sve)); |
52 | +SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr | 52 | __put_user(TARGET_SVE_MAGIC, &sve->head.magic); |
53 | +SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr | 53 | @@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve, |
54 | +RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr | 54 | } |
55 | +RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr | 55 | } |
56 | + | 56 | |
57 | ## SVE2 floating-point pairwise operations | 57 | +static void target_setup_za_record(struct target_za_context *za, |
58 | 58 | + CPUARMState *env, int size) | |
59 | FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm | ||
60 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
61 | index XXXXXXX..XXXXXXX 100644 | ||
62 | --- a/target/arm/sve_helper.c | ||
63 | +++ b/target/arm/sve_helper.c | ||
64 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ | ||
65 | when N is negative, add 2**M-1. */ | ||
66 | #define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M) | ||
67 | |||
68 | +static inline uint64_t do_urshr(uint64_t x, unsigned sh) | ||
69 | +{ | 59 | +{ |
70 | + if (likely(sh < 64)) { | 60 | + int vq = sme_vq(env); |
71 | + return (x >> sh) + ((x >> (sh - 1)) & 1); | 61 | + int vl = vq * TARGET_SVE_VQ_BYTES; |
72 | + } else if (sh == 64) { | 62 | + int i, j; |
73 | + return x >> 63; | 63 | + |
74 | + } else { | 64 | + memset(za, 0, sizeof(*za)); |
75 | + return 0; | 65 | + __put_user(TARGET_ZA_MAGIC, &za->head.magic); |
66 | + __put_user(size, &za->head.size); | ||
67 | + __put_user(vl, &za->vl); | ||
68 | + | ||
69 | + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { | ||
70 | + return; | ||
71 | + } | ||
72 | + assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq)); | ||
73 | + | ||
74 | + /* | ||
75 | + * Note that ZA vectors are stored as a byte stream, | ||
76 | + * with each byte element at a subsequent address. | ||
77 | + */ | ||
78 | + for (i = 0; i < vl; ++i) { | ||
79 | + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); | ||
80 | + for (j = 0; j < vq * 2; ++j) { | ||
81 | + __put_user_e(env->zarray[i].d[j], z + j, le); | ||
82 | + } | ||
76 | + } | 83 | + } |
77 | +} | 84 | +} |
78 | + | 85 | + |
79 | DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR) | 86 | static void target_restore_general_frame(CPUARMState *env, |
80 | DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR) | 87 | struct target_rt_sigframe *sf) |
81 | DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR) | 88 | { |
82 | @@ -XXX,XX +XXX,XX @@ DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD) | 89 | @@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env, |
83 | DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD) | 90 | |
84 | DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD) | 91 | static bool target_restore_sve_record(CPUARMState *env, |
85 | 92 | struct target_sve_context *sve, | |
86 | -#undef DO_SHR | 93 | - int size) |
87 | -#undef DO_SHL | 94 | + int size, int *svcr) |
88 | #undef DO_ASRD | 95 | { |
89 | #undef DO_ZPZI | 96 | - int i, j, vl, vq; |
90 | #undef DO_ZPZI_D | 97 | + int i, j, vl, vq, flags; |
91 | 98 | + bool sm; | |
92 | +#define DO_SHRNB(NAME, TYPEW, TYPEN, OP) \ | 99 | |
93 | +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | 100 | - if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
94 | +{ \ | 101 | + __get_user(vl, &sve->vl); |
95 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | 102 | + __get_user(flags, &sve->flags); |
96 | + int shift = simd_data(desc); \ | 103 | + |
97 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | 104 | + sm = flags & TARGET_SVE_SIG_FLAG_SM; |
98 | + TYPEW nn = *(TYPEW *)(vn + i); \ | 105 | + |
99 | + *(TYPEW *)(vd + i) = (TYPEN)OP(nn, shift); \ | 106 | + /* The cpu must support Streaming or Non-streaming SVE. */ |
100 | + } \ | 107 | + if (sm |
101 | +} | 108 | + ? !cpu_isar_feature(aa64_sme, env_archcpu(env)) |
102 | + | 109 | + : !cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
103 | +#define DO_SHRNT(NAME, TYPEW, TYPEN, HW, HN, OP) \ | 110 | return false; |
104 | +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | 111 | } |
105 | +{ \ | 112 | |
106 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | 113 | - __get_user(vl, &sve->vl); |
107 | + int shift = simd_data(desc); \ | 114 | - vq = sve_vq(env); |
108 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | 115 | + /* |
109 | + TYPEW nn = *(TYPEW *)(vn + HW(i)); \ | 116 | + * Note that we cannot use sve_vq() because that depends on the |
110 | + *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, shift); \ | 117 | + * current setting of PSTATE.SM, not the state to be restored. |
111 | + } \ | 118 | + */ |
112 | +} | 119 | + vq = sve_vqm1_for_el_sm(env, 0, sm) + 1; |
113 | + | 120 | |
114 | +DO_SHRNB(sve2_shrnb_h, uint16_t, uint8_t, DO_SHR) | 121 | /* Reject mismatched VL. */ |
115 | +DO_SHRNB(sve2_shrnb_s, uint32_t, uint16_t, DO_SHR) | 122 | if (vl != vq * TARGET_SVE_VQ_BYTES) { |
116 | +DO_SHRNB(sve2_shrnb_d, uint64_t, uint32_t, DO_SHR) | 123 | @@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env, |
117 | + | 124 | return false; |
118 | +DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1, DO_SHR) | 125 | } |
119 | +DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR) | 126 | |
120 | +DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, , H1_4, DO_SHR) | 127 | + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); |
121 | + | 128 | + |
122 | +DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, do_urshr) | 129 | /* |
123 | +DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, do_urshr) | 130 | * Note that SVE regs are stored as a byte stream, with each byte element |
124 | +DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, do_urshr) | 131 | * at a subsequent address. This corresponds to a little-endian load |
125 | + | 132 | @@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env, |
126 | +DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr) | 133 | return true; |
127 | +DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr) | ||
128 | +DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr) | ||
129 | + | ||
130 | +#undef DO_SHRNB | ||
131 | +#undef DO_SHRNT | ||
132 | + | ||
133 | /* Fully general four-operand expander, controlled by a predicate. | ||
134 | */ | ||
135 | #define DO_ZPZZZ(NAME, TYPE, H, OP) \ | ||
136 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
137 | index XXXXXXX..XXXXXXX 100644 | ||
138 | --- a/target/arm/translate-sve.c | ||
139 | +++ b/target/arm/translate-sve.c | ||
140 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a) | ||
141 | return do_sve2_narrow_extract(s, a, ops); | ||
142 | } | 134 | } |
143 | 135 | ||
144 | +static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a, | 136 | +static bool target_restore_za_record(CPUARMState *env, |
145 | + const GVecGen2i ops[3]) | 137 | + struct target_za_context *za, |
138 | + int size, int *svcr) | ||
146 | +{ | 139 | +{ |
147 | + if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) { | 140 | + int i, j, vl, vq; |
141 | + | ||
142 | + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { | ||
148 | + return false; | 143 | + return false; |
149 | + } | 144 | + } |
150 | + assert(a->imm > 0 && a->imm <= (8 << a->esz)); | 145 | + |
151 | + if (sve_access_check(s)) { | 146 | + __get_user(vl, &za->vl); |
152 | + unsigned vsz = vec_full_reg_size(s); | 147 | + vq = sme_vq(env); |
153 | + tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), | 148 | + |
154 | + vec_full_reg_offset(s, a->rn), | 149 | + /* Reject mismatched VL. */ |
155 | + vsz, vsz, a->imm, &ops[a->esz]); | 150 | + if (vl != vq * TARGET_SVE_VQ_BYTES) { |
151 | + return false; | ||
152 | + } | ||
153 | + | ||
154 | + /* Accept empty record -- used to clear PSTATE.ZA. */ | ||
155 | + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { | ||
156 | + return true; | ||
157 | + } | ||
158 | + | ||
159 | + /* Reject non-empty but incomplete record. */ | ||
160 | + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { | ||
161 | + return false; | ||
162 | + } | ||
163 | + | ||
164 | + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); | ||
165 | + | ||
166 | + for (i = 0; i < vl; ++i) { | ||
167 | + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); | ||
168 | + for (j = 0; j < vq * 2; ++j) { | ||
169 | + __get_user_e(env->zarray[i].d[j], z + j, le); | ||
170 | + } | ||
156 | + } | 171 | + } |
157 | + return true; | 172 | + return true; |
158 | +} | 173 | +} |
159 | + | 174 | + |
160 | +static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) | 175 | static int target_restore_sigframe(CPUARMState *env, |
161 | +{ | 176 | struct target_rt_sigframe *sf) |
162 | + int halfbits = 4 << vece; | 177 | { |
163 | + uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); | 178 | struct target_aarch64_ctx *ctx, *extra = NULL; |
164 | + | 179 | struct target_fpsimd_context *fpsimd = NULL; |
165 | + tcg_gen_shri_i64(d, n, shr); | 180 | struct target_sve_context *sve = NULL; |
166 | + tcg_gen_andi_i64(d, d, mask); | 181 | + struct target_za_context *za = NULL; |
167 | +} | 182 | uint64_t extra_datap = 0; |
168 | + | 183 | bool used_extra = false; |
169 | +static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 184 | int sve_size = 0; |
170 | +{ | 185 | + int za_size = 0; |
171 | + gen_shrnb_i64(MO_16, d, n, shr); | 186 | + int svcr = 0; |
172 | +} | 187 | |
173 | + | 188 | target_restore_general_frame(env, sf); |
174 | +static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 189 | |
175 | +{ | 190 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
176 | + gen_shrnb_i64(MO_32, d, n, shr); | 191 | sve_size = size; |
177 | +} | 192 | break; |
178 | + | 193 | |
179 | +static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 194 | + case TARGET_ZA_MAGIC: |
180 | +{ | 195 | + if (za || size < sizeof(struct target_za_context)) { |
181 | + gen_shrnb_i64(MO_64, d, n, shr); | 196 | + goto err; |
182 | +} | 197 | + } |
183 | + | 198 | + za = (struct target_za_context *)ctx; |
184 | +static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) | 199 | + za_size = size; |
185 | +{ | 200 | + break; |
186 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 201 | + |
187 | + int halfbits = 4 << vece; | 202 | case TARGET_EXTRA_MAGIC: |
188 | + uint64_t mask = MAKE_64BIT_MASK(0, halfbits); | 203 | if (extra || size != sizeof(struct target_extra_context)) { |
189 | + | 204 | goto err; |
190 | + tcg_gen_shri_vec(vece, n, n, shr); | 205 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, |
191 | + tcg_gen_dupi_vec(vece, t, mask); | 206 | } |
192 | + tcg_gen_and_vec(vece, d, n, t); | 207 | |
193 | + tcg_temp_free_vec(t); | 208 | /* SVE data, if present, overwrites FPSIMD data. */ |
194 | +} | 209 | - if (sve && !target_restore_sve_record(env, sve, sve_size)) { |
195 | + | 210 | + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { |
196 | +static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a) | 211 | goto err; |
197 | +{ | 212 | } |
198 | + static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 }; | 213 | + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { |
199 | + static const GVecGen2i ops[3] = { | 214 | + goto err; |
200 | + { .fni8 = gen_shrnb16_i64, | 215 | + } |
201 | + .fniv = gen_shrnb_vec, | 216 | + if (env->svcr != svcr) { |
202 | + .opt_opc = vec_list, | 217 | + env->svcr = svcr; |
203 | + .fno = gen_helper_sve2_shrnb_h, | 218 | + arm_rebuild_hflags(env); |
204 | + .vece = MO_16 }, | 219 | + } |
205 | + { .fni8 = gen_shrnb32_i64, | 220 | unlock_user(extra, extra_datap, 0); |
206 | + .fniv = gen_shrnb_vec, | 221 | return 0; |
207 | + .opt_opc = vec_list, | 222 | |
208 | + .fno = gen_helper_sve2_shrnb_s, | 223 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
209 | + .vece = MO_32 }, | 224 | .total_size = offsetof(struct target_rt_sigframe, |
210 | + { .fni8 = gen_shrnb64_i64, | 225 | uc.tuc_mcontext.__reserved), |
211 | + .fniv = gen_shrnb_vec, | 226 | }; |
212 | + .opt_opc = vec_list, | 227 | - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; |
213 | + .fno = gen_helper_sve2_shrnb_d, | 228 | + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; |
214 | + .vece = MO_64 }, | 229 | + int sve_size = 0, za_size = 0; |
215 | + }; | 230 | struct target_rt_sigframe *frame; |
216 | + return do_sve2_shr_narrow(s, a, ops); | 231 | struct target_rt_frame_record *fr; |
217 | +} | 232 | abi_ulong frame_addr, return_addr; |
218 | + | 233 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
219 | +static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) | 234 | &layout); |
220 | +{ | 235 | |
221 | + int halfbits = 4 << vece; | 236 | /* SVE state needs saving only if it exists. */ |
222 | + uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); | 237 | - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { |
223 | + | 238 | - vq = sve_vq(env); |
224 | + tcg_gen_shli_i64(n, n, halfbits - shr); | 239 | - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); |
225 | + tcg_gen_andi_i64(n, n, ~mask); | 240 | + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || |
226 | + tcg_gen_andi_i64(d, d, mask); | 241 | + cpu_isar_feature(aa64_sme, env_archcpu(env))) { |
227 | + tcg_gen_or_i64(d, d, n); | 242 | + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16); |
228 | +} | 243 | sve_ofs = alloc_sigframe_space(sve_size, &layout); |
229 | + | 244 | } |
230 | +static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 245 | + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { |
231 | +{ | 246 | + /* ZA state needs saving only if it is enabled. */ |
232 | + gen_shrnt_i64(MO_16, d, n, shr); | 247 | + if (FIELD_EX64(env->svcr, SVCR, ZA)) { |
233 | +} | 248 | + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); |
234 | + | 249 | + } else { |
235 | +static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 250 | + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); |
236 | +{ | 251 | + } |
237 | + gen_shrnt_i64(MO_32, d, n, shr); | 252 | + za_ofs = alloc_sigframe_space(za_size, &layout); |
238 | +} | 253 | + } |
239 | + | 254 | |
240 | +static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) | 255 | if (layout.extra_ofs) { |
241 | +{ | 256 | /* Reserve space for the extra end marker. The standard end marker |
242 | + tcg_gen_shri_i64(n, n, shr); | 257 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
243 | + tcg_gen_deposit_i64(d, d, n, 32, 32); | 258 | target_setup_end_record((void *)frame + layout.extra_end_ofs); |
244 | +} | 259 | } |
245 | + | 260 | if (sve_ofs) { |
246 | +static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) | 261 | - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); |
247 | +{ | 262 | + target_setup_sve_record((void *)frame + sve_ofs, env, sve_size); |
248 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 263 | + } |
249 | + int halfbits = 4 << vece; | 264 | + if (za_ofs) { |
250 | + uint64_t mask = MAKE_64BIT_MASK(0, halfbits); | 265 | + target_setup_za_record((void *)frame + za_ofs, env, za_size); |
251 | + | 266 | } |
252 | + tcg_gen_shli_vec(vece, n, n, halfbits - shr); | 267 | |
253 | + tcg_gen_dupi_vec(vece, t, mask); | 268 | /* Set up the stack frame for unwinding. */ |
254 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | 269 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, |
255 | + tcg_temp_free_vec(t); | 270 | env->btype = 2; |
256 | +} | 271 | } |
257 | + | 272 | |
258 | +static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a) | 273 | + /* |
259 | +{ | 274 | + * Invoke the signal handler with both SM and ZA disabled. |
260 | + static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 }; | 275 | + * When clearing SM, ResetSVEState, per SMSTOP. |
261 | + static const GVecGen2i ops[3] = { | 276 | + */ |
262 | + { .fni8 = gen_shrnt16_i64, | 277 | + if (FIELD_EX64(env->svcr, SVCR, SM)) { |
263 | + .fniv = gen_shrnt_vec, | 278 | + arm_reset_sve_state(env); |
264 | + .opt_opc = vec_list, | 279 | + } |
265 | + .load_dest = true, | 280 | + if (env->svcr) { |
266 | + .fno = gen_helper_sve2_shrnt_h, | 281 | + env->svcr = 0; |
267 | + .vece = MO_16 }, | 282 | + arm_rebuild_hflags(env); |
268 | + { .fni8 = gen_shrnt32_i64, | 283 | + } |
269 | + .fniv = gen_shrnt_vec, | 284 | + |
270 | + .opt_opc = vec_list, | 285 | if (info) { |
271 | + .load_dest = true, | 286 | tswap_siginfo(&frame->info, info); |
272 | + .fno = gen_helper_sve2_shrnt_s, | 287 | env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); |
273 | + .vece = MO_32 }, | ||
274 | + { .fni8 = gen_shrnt64_i64, | ||
275 | + .fniv = gen_shrnt_vec, | ||
276 | + .opt_opc = vec_list, | ||
277 | + .load_dest = true, | ||
278 | + .fno = gen_helper_sve2_shrnt_d, | ||
279 | + .vece = MO_64 }, | ||
280 | + }; | ||
281 | + return do_sve2_shr_narrow(s, a, ops); | ||
282 | +} | ||
283 | + | ||
284 | +static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a) | ||
285 | +{ | ||
286 | + static const GVecGen2i ops[3] = { | ||
287 | + { .fno = gen_helper_sve2_rshrnb_h }, | ||
288 | + { .fno = gen_helper_sve2_rshrnb_s }, | ||
289 | + { .fno = gen_helper_sve2_rshrnb_d }, | ||
290 | + }; | ||
291 | + return do_sve2_shr_narrow(s, a, ops); | ||
292 | +} | ||
293 | + | ||
294 | +static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a) | ||
295 | +{ | ||
296 | + static const GVecGen2i ops[3] = { | ||
297 | + { .fno = gen_helper_sve2_rshrnt_h }, | ||
298 | + { .fno = gen_helper_sve2_rshrnt_s }, | ||
299 | + { .fno = gen_helper_sve2_rshrnt_d }, | ||
300 | + }; | ||
301 | + return do_sve2_shr_narrow(s, a, ops); | ||
302 | +} | ||
303 | + | ||
304 | static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
305 | gen_helper_gvec_4_ptr *fn) | ||
306 | { | ||
307 | -- | 288 | -- |
308 | 2.20.1 | 289 | 2.25.1 |
309 | |||
310 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | |||
3 | Add "sve" to the sve prctl functions, to distinguish | ||
4 | them from the coming "sme" prctls with similar names. | ||
2 | 5 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-38-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-42-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/helper-sve.h | 18 +++++++++++++++ | 11 | linux-user/aarch64/target_prctl.h | 8 ++++---- |
9 | target/arm/vec_internal.h | 5 +++++ | 12 | linux-user/syscall.c | 12 ++++++------ |
10 | target/arm/sve.decode | 5 +++++ | 13 | 2 files changed, 10 insertions(+), 10 deletions(-) |
11 | target/arm/sve_helper.c | 46 ++++++++++++++++++++++++++++++++++++++ | ||
12 | target/arm/translate-sve.c | 32 ++++++++++++++++++++++++++ | ||
13 | target/arm/vec_helper.c | 15 ++++++------- | ||
14 | 6 files changed, 113 insertions(+), 8 deletions(-) | ||
15 | 14 | ||
16 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 15 | diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/helper-sve.h | 17 | --- a/linux-user/aarch64/target_prctl.h |
19 | +++ b/target/arm/helper-sve.h | 18 | +++ b/linux-user/aarch64/target_prctl.h |
20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ |
21 | void, ptr, ptr, ptr, ptr, i32) | 20 | #ifndef AARCH64_TARGET_PRCTL_H |
22 | DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG, | 21 | #define AARCH64_TARGET_PRCTL_H |
23 | void, ptr, ptr, ptr, ptr, i32) | 22 | |
24 | + | 23 | -static abi_long do_prctl_get_vl(CPUArchState *env) |
25 | +DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_b, TCG_CALL_NO_RWG, | 24 | +static abi_long do_prctl_sve_get_vl(CPUArchState *env) |
26 | + void, ptr, ptr, ptr, ptr, i32) | 25 | { |
27 | +DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_h, TCG_CALL_NO_RWG, | 26 | ARMCPU *cpu = env_archcpu(env); |
28 | + void, ptr, ptr, ptr, ptr, i32) | 27 | if (cpu_isar_feature(aa64_sve, cpu)) { |
29 | +DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_s, TCG_CALL_NO_RWG, | 28 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env) |
30 | + void, ptr, ptr, ptr, ptr, i32) | 29 | } |
31 | +DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_d, TCG_CALL_NO_RWG, | 30 | return -TARGET_EINVAL; |
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_b, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_h, TCG_CALL_NO_RWG, | ||
37 | + void, ptr, ptr, ptr, ptr, i32) | ||
38 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG, | ||
39 | + void, ptr, ptr, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG, | ||
41 | + void, ptr, ptr, ptr, ptr, i32) | ||
42 | diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/target/arm/vec_internal.h | ||
45 | +++ b/target/arm/vec_internal.h | ||
46 | @@ -XXX,XX +XXX,XX @@ static inline int64_t do_suqrshl_d(int64_t src, int64_t shift, | ||
47 | return do_uqrshl_d(src, shift, round, sat); | ||
48 | } | 31 | } |
49 | 32 | -#define do_prctl_get_vl do_prctl_get_vl | |
50 | +int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool); | 33 | +#define do_prctl_sve_get_vl do_prctl_sve_get_vl |
51 | +int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *); | 34 | |
52 | +int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *); | 35 | -static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) |
53 | +int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool); | 36 | +static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
54 | + | ||
55 | #endif /* TARGET_ARM_VEC_INTERNALS_H */ | ||
56 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/target/arm/sve.decode | ||
59 | +++ b/target/arm/sve.decode | ||
60 | @@ -XXX,XX +XXX,XX @@ SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm | ||
61 | SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm | ||
62 | UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm | ||
63 | UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | ||
64 | + | ||
65 | +## SVE2 complex integer multiply-add | ||
66 | + | ||
67 | +CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
68 | +SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
69 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/sve_helper.c | ||
72 | +++ b/target/arm/sve_helper.c | ||
73 | @@ -XXX,XX +XXX,XX @@ DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4, | ||
74 | |||
75 | #undef DO_SQDMLAL | ||
76 | |||
77 | +#define DO_CMLA_FUNC(NAME, TYPE, H, OP) \ | ||
78 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
79 | +{ \ | ||
80 | + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \ | ||
81 | + int rot = simd_data(desc); \ | ||
82 | + int sel_a = rot & 1, sel_b = sel_a ^ 1; \ | ||
83 | + bool sub_r = rot == 1 || rot == 2; \ | ||
84 | + bool sub_i = rot >= 2; \ | ||
85 | + TYPE *d = vd, *n = vn, *m = vm, *a = va; \ | ||
86 | + for (i = 0; i < opr_sz; i += 2) { \ | ||
87 | + TYPE elt1_a = n[H(i + sel_a)]; \ | ||
88 | + TYPE elt2_a = m[H(i + sel_a)]; \ | ||
89 | + TYPE elt2_b = m[H(i + sel_b)]; \ | ||
90 | + d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \ | ||
91 | + d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \ | ||
92 | + } \ | ||
93 | +} | ||
94 | + | ||
95 | +#define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1)) | ||
96 | + | ||
97 | +DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA) | ||
98 | +DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA) | ||
99 | +DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA) | ||
100 | +DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, , DO_CMLA) | ||
101 | + | ||
102 | +#define DO_SQRDMLAH_B(N, M, A, S) \ | ||
103 | + do_sqrdmlah_b(N, M, A, S, true) | ||
104 | +#define DO_SQRDMLAH_H(N, M, A, S) \ | ||
105 | + ({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); }) | ||
106 | +#define DO_SQRDMLAH_S(N, M, A, S) \ | ||
107 | + ({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); }) | ||
108 | +#define DO_SQRDMLAH_D(N, M, A, S) \ | ||
109 | + do_sqrdmlah_d(N, M, A, S, true) | ||
110 | + | ||
111 | +DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B) | ||
112 | +DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H) | ||
113 | +DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S) | ||
114 | +DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D) | ||
115 | + | ||
116 | +#undef DO_CMLA | ||
117 | +#undef DO_CMLA_FUNC | ||
118 | +#undef DO_SQRDMLAH_B | ||
119 | +#undef DO_SQRDMLAH_H | ||
120 | +#undef DO_SQRDMLAH_S | ||
121 | +#undef DO_SQRDMLAH_D | ||
122 | + | ||
123 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
124 | void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
125 | { \ | ||
126 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
127 | index XXXXXXX..XXXXXXX 100644 | ||
128 | --- a/target/arm/translate-sve.c | ||
129 | +++ b/target/arm/translate-sve.c | ||
130 | @@ -XXX,XX +XXX,XX @@ static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) | ||
131 | { | ||
132 | return do_umlsl_zzzw(s, a, true); | ||
133 | } | ||
134 | + | ||
135 | +static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a) | ||
136 | +{ | ||
137 | + static gen_helper_gvec_4 * const fns[] = { | ||
138 | + gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, | ||
139 | + gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, | ||
140 | + }; | ||
141 | + | ||
142 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
143 | + return false; | ||
144 | + } | ||
145 | + if (sve_access_check(s)) { | ||
146 | + gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot); | ||
147 | + } | ||
148 | + return true; | ||
149 | +} | ||
150 | + | ||
151 | +static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a) | ||
152 | +{ | ||
153 | + static gen_helper_gvec_4 * const fns[] = { | ||
154 | + gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, | ||
155 | + gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, | ||
156 | + }; | ||
157 | + | ||
158 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
159 | + return false; | ||
160 | + } | ||
161 | + if (sve_access_check(s)) { | ||
162 | + gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot); | ||
163 | + } | ||
164 | + return true; | ||
165 | +} | ||
166 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
167 | index XXXXXXX..XXXXXXX 100644 | ||
168 | --- a/target/arm/vec_helper.c | ||
169 | +++ b/target/arm/vec_helper.c | ||
170 | @@ -XXX,XX +XXX,XX @@ | ||
171 | #endif | ||
172 | |||
173 | /* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */ | ||
174 | -static int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3, | ||
175 | - bool neg, bool round) | ||
176 | +int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3, | ||
177 | + bool neg, bool round) | ||
178 | { | 37 | { |
179 | /* | 38 | /* |
180 | * Simplify: | 39 | * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. |
181 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm, | 40 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) |
41 | } | ||
42 | return -TARGET_EINVAL; | ||
182 | } | 43 | } |
183 | 44 | -#define do_prctl_set_vl do_prctl_set_vl | |
184 | /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ | 45 | +#define do_prctl_sve_set_vl do_prctl_sve_set_vl |
185 | -static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3, | 46 | |
186 | - bool neg, bool round, uint32_t *sat) | 47 | static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) |
187 | +int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3, | ||
188 | + bool neg, bool round, uint32_t *sat) | ||
189 | { | 48 | { |
190 | /* Simplify similarly to do_sqrdmlah_b above. */ | 49 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
191 | int32_t ret = (int32_t)src1 * src2; | 50 | index XXXXXXX..XXXXXXX 100644 |
192 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm, | 51 | --- a/linux-user/syscall.c |
193 | } | 52 | +++ b/linux-user/syscall.c |
194 | 53 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) | |
195 | /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ | 54 | #ifndef do_prctl_set_fp_mode |
196 | -static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, | 55 | #define do_prctl_set_fp_mode do_prctl_inval1 |
197 | - bool neg, bool round, uint32_t *sat) | 56 | #endif |
198 | +int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, | 57 | -#ifndef do_prctl_get_vl |
199 | + bool neg, bool round, uint32_t *sat) | 58 | -#define do_prctl_get_vl do_prctl_inval0 |
200 | { | 59 | +#ifndef do_prctl_sve_get_vl |
201 | /* Simplify similarly to do_sqrdmlah_b above. */ | 60 | +#define do_prctl_sve_get_vl do_prctl_inval0 |
202 | int64_t ret = (int64_t)src1 * src2; | 61 | #endif |
203 | @@ -XXX,XX +XXX,XX @@ static int64_t do_sat128_d(Int128 r) | 62 | -#ifndef do_prctl_set_vl |
204 | return ls; | 63 | -#define do_prctl_set_vl do_prctl_inval1 |
205 | } | 64 | +#ifndef do_prctl_sve_set_vl |
206 | 65 | +#define do_prctl_sve_set_vl do_prctl_inval1 | |
207 | -static int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, | 66 | #endif |
208 | - bool neg, bool round) | 67 | #ifndef do_prctl_reset_keys |
209 | +int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, bool neg, bool round) | 68 | #define do_prctl_reset_keys do_prctl_inval1 |
210 | { | 69 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, |
211 | uint64_t l, h; | 70 | case PR_SET_FP_MODE: |
212 | Int128 r, t; | 71 | return do_prctl_set_fp_mode(env, arg2); |
72 | case PR_SVE_GET_VL: | ||
73 | - return do_prctl_get_vl(env); | ||
74 | + return do_prctl_sve_get_vl(env); | ||
75 | case PR_SVE_SET_VL: | ||
76 | - return do_prctl_set_vl(env, arg2); | ||
77 | + return do_prctl_sve_set_vl(env, arg2); | ||
78 | case PR_PAC_RESET_KEYS: | ||
79 | if (arg3 || arg4 || arg5) { | ||
80 | return -TARGET_EINVAL; | ||
213 | -- | 81 | -- |
214 | 2.20.1 | 82 | 2.25.1 |
215 | |||
216 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | SVE2 has two additional sizes of the operation and unlike NEON, | 3 | These prctl set the Streaming SVE vector length, which may |
4 | there is no saturation flag. Create new entry points for SVE2 | 4 | be completely different from the Normal SVE vector length. |
5 | that do not set QC. | ||
6 | 5 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-36-richard.henderson@linaro.org | 8 | Message-id: 20220708151540.18136-43-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/helper.h | 17 ++++ | 11 | linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++ |
13 | target/arm/sve.decode | 5 ++ | 12 | linux-user/syscall.c | 16 +++++++++ |
14 | target/arm/translate-sve.c | 18 +++++ | 13 | 2 files changed, 70 insertions(+) |
15 | target/arm/vec_helper.c | 161 +++++++++++++++++++++++++++++++++++-- | ||
16 | 4 files changed, 195 insertions(+), 6 deletions(-) | ||
17 | 14 | ||
18 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 15 | diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h |
19 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper.h | 17 | --- a/linux-user/aarch64/target_prctl.h |
21 | +++ b/target/arm/helper.h | 18 | +++ b/linux-user/aarch64/target_prctl.h |
22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG, | 19 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env) |
23 | DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG, | 20 | { |
24 | void, ptr, ptr, ptr, ptr, i32) | 21 | ARMCPU *cpu = env_archcpu(env); |
25 | 22 | if (cpu_isar_feature(aa64_sve, cpu)) { | |
26 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG, | 23 | + /* PSTATE.SM is always unset on syscall entry. */ |
27 | + void, ptr, ptr, ptr, ptr, i32) | 24 | return sve_vq(env) * 16; |
28 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG, | 25 | } |
29 | + void, ptr, ptr, ptr, ptr, i32) | 26 | return -TARGET_EINVAL; |
30 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG, | 27 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
31 | + void, ptr, ptr, ptr, ptr, i32) | 28 | && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { |
32 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG, | 29 | uint32_t vq, old_vq; |
33 | + void, ptr, ptr, ptr, ptr, i32) | 30 | |
34 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG, | 31 | + /* PSTATE.SM is always unset on syscall entry. */ |
35 | + void, ptr, ptr, ptr, ptr, i32) | 32 | old_vq = sve_vq(env); |
36 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG, | 33 | |
37 | + void, ptr, ptr, ptr, ptr, i32) | 34 | /* |
38 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG, | 35 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) |
39 | + void, ptr, ptr, ptr, ptr, i32) | 36 | } |
40 | +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG, | 37 | #define do_prctl_sve_set_vl do_prctl_sve_set_vl |
41 | + void, ptr, ptr, ptr, ptr, i32) | 38 | |
39 | +static abi_long do_prctl_sme_get_vl(CPUArchState *env) | ||
40 | +{ | ||
41 | + ARMCPU *cpu = env_archcpu(env); | ||
42 | + if (cpu_isar_feature(aa64_sme, cpu)) { | ||
43 | + return sme_vq(env) * 16; | ||
44 | + } | ||
45 | + return -TARGET_EINVAL; | ||
46 | +} | ||
47 | +#define do_prctl_sme_get_vl do_prctl_sme_get_vl | ||
42 | + | 48 | + |
43 | DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 49 | +static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) |
44 | DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
45 | DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/target/arm/sve.decode | ||
49 | +++ b/target/arm/sve.decode | ||
50 | @@ -XXX,XX +XXX,XX @@ SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm | ||
51 | |||
52 | SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm | ||
53 | SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm | ||
54 | + | ||
55 | +## SVE2 saturating multiply-add high | ||
56 | + | ||
57 | +SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm | ||
58 | +SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm | ||
59 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/target/arm/translate-sve.c | ||
62 | +++ b/target/arm/translate-sve.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a) | ||
64 | { | ||
65 | return do_sqdmlsl_zzzw(s, a, false, true); | ||
66 | } | ||
67 | + | ||
68 | +static bool trans_SQRDMLAH_zzzz(DisasContext *s, arg_rrrr_esz *a) | ||
69 | +{ | ||
70 | + static gen_helper_gvec_4 * const fns[] = { | ||
71 | + gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, | ||
72 | + gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, | ||
73 | + }; | ||
74 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], 0); | ||
75 | +} | ||
76 | + | ||
77 | +static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a) | ||
78 | +{ | ||
79 | + static gen_helper_gvec_4 * const fns[] = { | ||
80 | + gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, | ||
81 | + gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, | ||
82 | + }; | ||
83 | + return do_sve2_zzzz_ool(s, a, fns[a->esz], 0); | ||
84 | +} | ||
85 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/target/arm/vec_helper.c | ||
88 | +++ b/target/arm/vec_helper.c | ||
89 | @@ -XXX,XX +XXX,XX @@ | ||
90 | #include "exec/helper-proto.h" | ||
91 | #include "tcg/tcg-gvec-desc.h" | ||
92 | #include "fpu/softfloat.h" | ||
93 | +#include "qemu/int128.h" | ||
94 | #include "vec_internal.h" | ||
95 | |||
96 | /* Note that vector data is stored in host-endian 64-bit chunks, | ||
97 | @@ -XXX,XX +XXX,XX @@ | ||
98 | #define H4(x) (x) | ||
99 | #endif | ||
100 | |||
101 | +/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */ | ||
102 | +static int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3, | ||
103 | + bool neg, bool round) | ||
104 | +{ | 50 | +{ |
105 | + /* | 51 | + /* |
106 | + * Simplify: | 52 | + * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. |
107 | + * = ((a3 << 8) + ((e1 * e2) << 1) + (round << 7)) >> 8 | 53 | + * Note the kernel definition of sve_vl_valid allows for VQ=512, |
108 | + * = ((a3 << 7) + (e1 * e2) + (round << 6)) >> 7 | 54 | + * i.e. VL=8192, even though the architectural maximum is VQ=16. |
109 | + */ | 55 | + */ |
110 | + int32_t ret = (int32_t)src1 * src2; | 56 | + if (cpu_isar_feature(aa64_sme, env_archcpu(env)) |
111 | + if (neg) { | 57 | + && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { |
112 | + ret = -ret; | 58 | + int vq, old_vq; |
59 | + | ||
60 | + old_vq = sme_vq(env); | ||
61 | + | ||
62 | + /* | ||
63 | + * Bound the value of vq, so that we know that it fits into | ||
64 | + * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared | ||
65 | + * on syscall entry, we are not modifying the current SVE | ||
66 | + * vector length. | ||
67 | + */ | ||
68 | + vq = MAX(arg2 / 16, 1); | ||
69 | + vq = MIN(vq, 16); | ||
70 | + env->vfp.smcr_el[1] = | ||
71 | + FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); | ||
72 | + | ||
73 | + /* Delay rebuilding hflags until we know if ZA must change. */ | ||
74 | + vq = sve_vqm1_for_el_sm(env, 0, true) + 1; | ||
75 | + | ||
76 | + if (vq != old_vq) { | ||
77 | + /* | ||
78 | + * PSTATE.ZA state is cleared on any change to SVL. | ||
79 | + * We need not call arm_rebuild_hflags because PSTATE.SM was | ||
80 | + * cleared on syscall entry, so this hasn't changed VL. | ||
81 | + */ | ||
82 | + env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); | ||
83 | + arm_rebuild_hflags(env); | ||
84 | + } | ||
85 | + return vq * 16; | ||
113 | + } | 86 | + } |
114 | + ret += ((int32_t)src3 << 7) + (round << 6); | 87 | + return -TARGET_EINVAL; |
115 | + ret >>= 7; | 88 | +} |
89 | +#define do_prctl_sme_set_vl do_prctl_sme_set_vl | ||
116 | + | 90 | + |
117 | + if (ret != (int8_t)ret) { | 91 | static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) |
118 | + ret = (ret < 0 ? INT8_MIN : INT8_MAX); | ||
119 | + } | ||
120 | + return ret; | ||
121 | +} | ||
122 | + | ||
123 | +void HELPER(sve2_sqrdmlah_b)(void *vd, void *vn, void *vm, | ||
124 | + void *va, uint32_t desc) | ||
125 | +{ | ||
126 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
127 | + int8_t *d = vd, *n = vn, *m = vm, *a = va; | ||
128 | + | ||
129 | + for (i = 0; i < opr_sz; ++i) { | ||
130 | + d[i] = do_sqrdmlah_b(n[i], m[i], a[i], false, true); | ||
131 | + } | ||
132 | +} | ||
133 | + | ||
134 | +void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm, | ||
135 | + void *va, uint32_t desc) | ||
136 | +{ | ||
137 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
138 | + int8_t *d = vd, *n = vn, *m = vm, *a = va; | ||
139 | + | ||
140 | + for (i = 0; i < opr_sz; ++i) { | ||
141 | + d[i] = do_sqrdmlah_b(n[i], m[i], a[i], true, true); | ||
142 | + } | ||
143 | +} | ||
144 | + | ||
145 | /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ | ||
146 | static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3, | ||
147 | bool neg, bool round, uint32_t *sat) | ||
148 | { | 92 | { |
149 | - /* | 93 | ARMCPU *cpu = env_archcpu(env); |
150 | - * Simplify: | 94 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
151 | - * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16 | 95 | index XXXXXXX..XXXXXXX 100644 |
152 | - * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15 | 96 | --- a/linux-user/syscall.c |
153 | - */ | 97 | +++ b/linux-user/syscall.c |
154 | + /* Simplify similarly to do_sqrdmlah_b above. */ | 98 | @@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) |
155 | int32_t ret = (int32_t)src1 * src2; | 99 | #ifndef PR_SET_SYSCALL_USER_DISPATCH |
156 | if (neg) { | 100 | # define PR_SET_SYSCALL_USER_DISPATCH 59 |
157 | ret = -ret; | 101 | #endif |
158 | @@ -XXX,XX +XXX,XX @@ void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm, | 102 | +#ifndef PR_SME_SET_VL |
159 | clear_tail(d, opr_sz, simd_maxsz(desc)); | 103 | +# define PR_SME_SET_VL 63 |
160 | } | 104 | +# define PR_SME_GET_VL 64 |
161 | 105 | +# define PR_SME_VL_LEN_MASK 0xffff | |
162 | +void HELPER(sve2_sqrdmlah_h)(void *vd, void *vn, void *vm, | 106 | +# define PR_SME_VL_INHERIT (1 << 17) |
163 | + void *va, uint32_t desc) | 107 | +#endif |
164 | +{ | 108 | |
165 | + intptr_t i, opr_sz = simd_oprsz(desc); | 109 | #include "target_prctl.h" |
166 | + int16_t *d = vd, *n = vn, *m = vm, *a = va; | 110 | |
167 | + uint32_t discard; | 111 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) |
168 | + | 112 | #ifndef do_prctl_set_unalign |
169 | + for (i = 0; i < opr_sz / 2; ++i) { | 113 | #define do_prctl_set_unalign do_prctl_inval1 |
170 | + d[i] = do_sqrdmlah_h(n[i], m[i], a[i], false, true, &discard); | 114 | #endif |
171 | + } | 115 | +#ifndef do_prctl_sme_get_vl |
172 | +} | 116 | +#define do_prctl_sme_get_vl do_prctl_inval0 |
173 | + | 117 | +#endif |
174 | +void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm, | 118 | +#ifndef do_prctl_sme_set_vl |
175 | + void *va, uint32_t desc) | 119 | +#define do_prctl_sme_set_vl do_prctl_inval1 |
176 | +{ | 120 | +#endif |
177 | + intptr_t i, opr_sz = simd_oprsz(desc); | 121 | |
178 | + int16_t *d = vd, *n = vn, *m = vm, *a = va; | 122 | static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, |
179 | + uint32_t discard; | 123 | abi_long arg3, abi_long arg4, abi_long arg5) |
180 | + | 124 | @@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, |
181 | + for (i = 0; i < opr_sz / 2; ++i) { | 125 | return do_prctl_sve_get_vl(env); |
182 | + d[i] = do_sqrdmlah_h(n[i], m[i], a[i], true, true, &discard); | 126 | case PR_SVE_SET_VL: |
183 | + } | 127 | return do_prctl_sve_set_vl(env, arg2); |
184 | +} | 128 | + case PR_SME_GET_VL: |
185 | + | 129 | + return do_prctl_sme_get_vl(env); |
186 | /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ | 130 | + case PR_SME_SET_VL: |
187 | static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, | 131 | + return do_prctl_sme_set_vl(env, arg2); |
188 | bool neg, bool round, uint32_t *sat) | 132 | case PR_PAC_RESET_KEYS: |
189 | { | 133 | if (arg3 || arg4 || arg5) { |
190 | - /* Simplify similarly to int_qrdmlah_s16 above. */ | 134 | return -TARGET_EINVAL; |
191 | + /* Simplify similarly to do_sqrdmlah_b above. */ | ||
192 | int64_t ret = (int64_t)src1 * src2; | ||
193 | if (neg) { | ||
194 | ret = -ret; | ||
195 | @@ -XXX,XX +XXX,XX @@ void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm, | ||
196 | clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
197 | } | ||
198 | |||
199 | +void HELPER(sve2_sqrdmlah_s)(void *vd, void *vn, void *vm, | ||
200 | + void *va, uint32_t desc) | ||
201 | +{ | ||
202 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
203 | + int32_t *d = vd, *n = vn, *m = vm, *a = va; | ||
204 | + uint32_t discard; | ||
205 | + | ||
206 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
207 | + d[i] = do_sqrdmlah_s(n[i], m[i], a[i], false, true, &discard); | ||
208 | + } | ||
209 | +} | ||
210 | + | ||
211 | +void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm, | ||
212 | + void *va, uint32_t desc) | ||
213 | +{ | ||
214 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
215 | + int32_t *d = vd, *n = vn, *m = vm, *a = va; | ||
216 | + uint32_t discard; | ||
217 | + | ||
218 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
219 | + d[i] = do_sqrdmlah_s(n[i], m[i], a[i], true, true, &discard); | ||
220 | + } | ||
221 | +} | ||
222 | + | ||
223 | +/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */ | ||
224 | +static int64_t do_sat128_d(Int128 r) | ||
225 | +{ | ||
226 | + int64_t ls = int128_getlo(r); | ||
227 | + int64_t hs = int128_gethi(r); | ||
228 | + | ||
229 | + if (unlikely(hs != (ls >> 63))) { | ||
230 | + return hs < 0 ? INT64_MIN : INT64_MAX; | ||
231 | + } | ||
232 | + return ls; | ||
233 | +} | ||
234 | + | ||
235 | +static int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, | ||
236 | + bool neg, bool round) | ||
237 | +{ | ||
238 | + uint64_t l, h; | ||
239 | + Int128 r, t; | ||
240 | + | ||
241 | + /* As in do_sqrdmlah_b, but with 128-bit arithmetic. */ | ||
242 | + muls64(&l, &h, m, n); | ||
243 | + r = int128_make128(l, h); | ||
244 | + if (neg) { | ||
245 | + r = int128_neg(r); | ||
246 | + } | ||
247 | + if (a) { | ||
248 | + t = int128_exts64(a); | ||
249 | + t = int128_lshift(t, 63); | ||
250 | + r = int128_add(r, t); | ||
251 | + } | ||
252 | + if (round) { | ||
253 | + t = int128_exts64(1ll << 62); | ||
254 | + r = int128_add(r, t); | ||
255 | + } | ||
256 | + r = int128_rshift(r, 63); | ||
257 | + | ||
258 | + return do_sat128_d(r); | ||
259 | +} | ||
260 | + | ||
261 | +void HELPER(sve2_sqrdmlah_d)(void *vd, void *vn, void *vm, | ||
262 | + void *va, uint32_t desc) | ||
263 | +{ | ||
264 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
265 | + int64_t *d = vd, *n = vn, *m = vm, *a = va; | ||
266 | + | ||
267 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
268 | + d[i] = do_sqrdmlah_d(n[i], m[i], a[i], false, true); | ||
269 | + } | ||
270 | +} | ||
271 | + | ||
272 | +void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm, | ||
273 | + void *va, uint32_t desc) | ||
274 | +{ | ||
275 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
276 | + int64_t *d = vd, *n = vn, *m = vm, *a = va; | ||
277 | + | ||
278 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
279 | + d[i] = do_sqrdmlah_d(n[i], m[i], a[i], true, true); | ||
280 | + } | ||
281 | +} | ||
282 | + | ||
283 | /* Integer 8 and 16-bit dot-product. | ||
284 | * | ||
285 | * Note that for the loops herein, host endianness does not matter | ||
286 | -- | 135 | -- |
287 | 2.20.1 | 136 | 2.25.1 |
288 | |||
289 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Exclude PMULL from this category for the moment. | 3 | There's no reason to set CPACR_EL1.ZEN if SVE disabled. |
4 | 4 | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20210525010358.152808-14-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-44-richard.henderson@linaro.org |
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 9 | --- |
10 | target/arm/helper-sve.h | 15 +++++++++++++++ | 10 | target/arm/cpu.c | 7 +++---- |
11 | target/arm/sve.decode | 9 +++++++++ | 11 | 1 file changed, 3 insertions(+), 4 deletions(-) |
12 | target/arm/sve_helper.c | 31 +++++++++++++++++++++++++++++++ | ||
13 | target/arm/translate-sve.c | 9 +++++++++ | ||
14 | 4 files changed, 64 insertions(+) | ||
15 | 12 | ||
16 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/helper-sve.h | 15 | --- a/target/arm/cpu.c |
19 | +++ b/target/arm/helper-sve.h | 16 | +++ b/target/arm/cpu.c |
20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG, | 17 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
21 | DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG, | 18 | /* and to the FP/Neon instructions */ |
22 | void, env, ptr, ptr, ptr, tl, i32) | 19 | env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
23 | 20 | CPACR_EL1, FPEN, 3); | |
24 | +DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG, | 21 | - /* and to the SVE instructions */ |
25 | + void, ptr, ptr, ptr, i32) | 22 | - env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
26 | +DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_s, TCG_CALL_NO_RWG, | 23 | - CPACR_EL1, ZEN, 3); |
27 | + void, ptr, ptr, ptr, i32) | 24 | - /* with reasonable vector length */ |
28 | +DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_d, TCG_CALL_NO_RWG, | 25 | + /* and to the SVE instructions, with default vector length */ |
29 | + void, ptr, ptr, ptr, i32) | 26 | if (cpu_isar_feature(aa64_sve, cpu)) { |
30 | + | 27 | + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
31 | +DEF_HELPER_FLAGS_4(sve2_smull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 28 | + CPACR_EL1, ZEN, 3); |
32 | +DEF_HELPER_FLAGS_4(sve2_smull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 29 | env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; |
33 | +DEF_HELPER_FLAGS_4(sve2_smull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 30 | } |
34 | + | 31 | /* |
35 | +DEF_HELPER_FLAGS_4(sve2_umull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
38 | + | ||
39 | DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
40 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/target/arm/sve.decode | ||
43 | +++ b/target/arm/sve.decode | ||
44 | @@ -XXX,XX +XXX,XX @@ SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm | ||
45 | SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm | ||
46 | USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm | ||
47 | USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm | ||
48 | + | ||
49 | +## SVE2 integer multiply long | ||
50 | + | ||
51 | +SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm | ||
52 | +SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm | ||
53 | +SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm | ||
54 | +SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm | ||
55 | +UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm | ||
56 | +UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm | ||
57 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/target/arm/sve_helper.c | ||
60 | +++ b/target/arm/sve_helper.c | ||
61 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD) | ||
62 | DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD) | ||
63 | DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
64 | |||
65 | +DO_ZZZ_TB(sve2_smull_zzz_h, int16_t, int8_t, H1_2, H1, DO_MUL) | ||
66 | +DO_ZZZ_TB(sve2_smull_zzz_s, int32_t, int16_t, H1_4, H1_2, DO_MUL) | ||
67 | +DO_ZZZ_TB(sve2_smull_zzz_d, int64_t, int32_t, , H1_4, DO_MUL) | ||
68 | + | ||
69 | +DO_ZZZ_TB(sve2_umull_zzz_h, uint16_t, uint8_t, H1_2, H1, DO_MUL) | ||
70 | +DO_ZZZ_TB(sve2_umull_zzz_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL) | ||
71 | +DO_ZZZ_TB(sve2_umull_zzz_d, uint64_t, uint32_t, , H1_4, DO_MUL) | ||
72 | + | ||
73 | +/* Note that the multiply cannot overflow, but the doubling can. */ | ||
74 | +static inline int16_t do_sqdmull_h(int16_t n, int16_t m) | ||
75 | +{ | ||
76 | + int16_t val = n * m; | ||
77 | + return DO_SQADD_H(val, val); | ||
78 | +} | ||
79 | + | ||
80 | +static inline int32_t do_sqdmull_s(int32_t n, int32_t m) | ||
81 | +{ | ||
82 | + int32_t val = n * m; | ||
83 | + return DO_SQADD_S(val, val); | ||
84 | +} | ||
85 | + | ||
86 | +static inline int64_t do_sqdmull_d(int64_t n, int64_t m) | ||
87 | +{ | ||
88 | + int64_t val = n * m; | ||
89 | + return do_sqadd_d(val, val); | ||
90 | +} | ||
91 | + | ||
92 | +DO_ZZZ_TB(sve2_sqdmull_zzz_h, int16_t, int8_t, H1_2, H1, do_sqdmull_h) | ||
93 | +DO_ZZZ_TB(sve2_sqdmull_zzz_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s) | ||
94 | +DO_ZZZ_TB(sve2_sqdmull_zzz_d, int64_t, int32_t, , H1_4, do_sqdmull_d) | ||
95 | + | ||
96 | #undef DO_ZZZ_TB | ||
97 | |||
98 | #define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
99 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
100 | index XXXXXXX..XXXXXXX 100644 | ||
101 | --- a/target/arm/translate-sve.c | ||
102 | +++ b/target/arm/translate-sve.c | ||
103 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true) | ||
104 | DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true) | ||
105 | DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false) | ||
106 | |||
107 | +DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false) | ||
108 | +DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true) | ||
109 | + | ||
110 | +DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false) | ||
111 | +DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true) | ||
112 | + | ||
113 | +DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false) | ||
114 | +DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true) | ||
115 | + | ||
116 | #define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \ | ||
117 | static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
118 | { \ | ||
119 | -- | 32 | -- |
120 | 2.20.1 | 33 | 2.25.1 |
121 | |||
122 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For MUL, we can rely on generic support. For SMULH and UMULH, | 3 | Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu. |
4 | create some trivial helpers. For PMUL, back in a21bb78e5817, | ||
5 | we organized helper_gvec_pmul_b in preparation for this use. | ||
6 | 4 | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210525010358.152808-3-richard.henderson@linaro.org | 7 | Message-id: 20220708151540.18136-45-richard.henderson@linaro.org |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 9 | --- |
12 | target/arm/helper.h | 10 ++++ | 10 | target/arm/cpu.c | 11 +++++++++++ |
13 | target/arm/sve.decode | 10 ++++ | 11 | 1 file changed, 11 insertions(+) |
14 | target/arm/translate-sve.c | 50 ++++++++++++++++++++ | ||
15 | target/arm/vec_helper.c | 96 ++++++++++++++++++++++++++++++++++++++ | ||
16 | 4 files changed, 166 insertions(+) | ||
17 | 12 | ||
18 | diff --git a/target/arm/helper.h b/target/arm/helper.h | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
19 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper.h | 15 | --- a/target/arm/cpu.c |
21 | +++ b/target/arm/helper.h | 16 | +++ b/target/arm/cpu.c |
22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 17 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
23 | DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 18 | CPACR_EL1, ZEN, 3); |
24 | DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | 19 | env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; |
25 | 20 | } | |
26 | +DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 21 | + /* and for SME instructions, with default vector length, and TPIDR2 */ |
27 | +DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 22 | + if (cpu_isar_feature(aa64_sme, cpu)) { |
28 | +DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 23 | + env->cp15.sctlr_el[1] |= SCTLR_EnTP2; |
29 | +DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 24 | + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, |
30 | + | 25 | + CPACR_EL1, SMEN, 3); |
31 | +DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 26 | + env->vfp.smcr_el[1] = cpu->sme_default_vq - 1; |
32 | +DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 27 | + if (cpu_isar_feature(aa64_sme_fa64, cpu)) { |
33 | +DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 28 | + env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1], |
34 | +DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 29 | + SMCR, FA64, 1); |
35 | + | 30 | + } |
36 | DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 31 | + } |
37 | DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 32 | /* |
38 | DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | 33 | * Enable 48-bit address space (TODO: take reserved_va into account). |
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | 34 | * Enable TBI0 but not TBI1. |
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/sve.decode | ||
42 | +++ b/target/arm/sve.decode | ||
43 | @@ -XXX,XX +XXX,XX @@ ST1_zprz 1110010 .. 00 ..... 100 ... ..... ..... \ | ||
44 | @rprr_scatter_store xs=0 esz=3 scale=0 | ||
45 | ST1_zprz 1110010 .. 00 ..... 110 ... ..... ..... \ | ||
46 | @rprr_scatter_store xs=1 esz=3 scale=0 | ||
47 | + | ||
48 | +#### SVE2 Support | ||
49 | + | ||
50 | +### SVE2 Integer Multiply - Unpredicated | ||
51 | + | ||
52 | +# SVE2 integer multiply vectors (unpredicated) | ||
53 | +MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm | ||
54 | +SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm | ||
55 | +UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm | ||
56 | +PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0 | ||
57 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/target/arm/translate-sve.c | ||
60 | +++ b/target/arm/translate-sve.c | ||
61 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a) | ||
62 | { | ||
63 | return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false); | ||
64 | } | ||
65 | + | ||
66 | +/* | ||
67 | + * SVE2 Integer Multiply - Unpredicated | ||
68 | + */ | ||
69 | + | ||
70 | +static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a) | ||
71 | +{ | ||
72 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
73 | + return false; | ||
74 | + } | ||
75 | + if (sve_access_check(s)) { | ||
76 | + gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm); | ||
77 | + } | ||
78 | + return true; | ||
79 | +} | ||
80 | + | ||
81 | +static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a, | ||
82 | + gen_helper_gvec_3 *fn) | ||
83 | +{ | ||
84 | + if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { | ||
85 | + return false; | ||
86 | + } | ||
87 | + if (sve_access_check(s)) { | ||
88 | + gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0); | ||
89 | + } | ||
90 | + return true; | ||
91 | +} | ||
92 | + | ||
93 | +static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a) | ||
94 | +{ | ||
95 | + static gen_helper_gvec_3 * const fns[4] = { | ||
96 | + gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, | ||
97 | + gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, | ||
98 | + }; | ||
99 | + return do_sve2_zzz_ool(s, a, fns[a->esz]); | ||
100 | +} | ||
101 | + | ||
102 | +static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a) | ||
103 | +{ | ||
104 | + static gen_helper_gvec_3 * const fns[4] = { | ||
105 | + gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, | ||
106 | + gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, | ||
107 | + }; | ||
108 | + return do_sve2_zzz_ool(s, a, fns[a->esz]); | ||
109 | +} | ||
110 | + | ||
111 | +static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a) | ||
112 | +{ | ||
113 | + return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b); | ||
114 | +} | ||
115 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
116 | index XXXXXXX..XXXXXXX 100644 | ||
117 | --- a/target/arm/vec_helper.c | ||
118 | +++ b/target/arm/vec_helper.c | ||
119 | @@ -XXX,XX +XXX,XX @@ void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc) | ||
120 | clear_tail(vd, oprsz, simd_maxsz(desc)); | ||
121 | } | ||
122 | #endif | ||
123 | + | ||
124 | +/* | ||
125 | + * NxN -> N highpart multiply | ||
126 | + * | ||
127 | + * TODO: expose this as a generic vector operation. | ||
128 | + */ | ||
129 | + | ||
130 | +void HELPER(gvec_smulh_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
131 | +{ | ||
132 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
133 | + int8_t *d = vd, *n = vn, *m = vm; | ||
134 | + | ||
135 | + for (i = 0; i < opr_sz; ++i) { | ||
136 | + d[i] = ((int32_t)n[i] * m[i]) >> 8; | ||
137 | + } | ||
138 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
139 | +} | ||
140 | + | ||
141 | +void HELPER(gvec_smulh_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
142 | +{ | ||
143 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
144 | + int16_t *d = vd, *n = vn, *m = vm; | ||
145 | + | ||
146 | + for (i = 0; i < opr_sz / 2; ++i) { | ||
147 | + d[i] = ((int32_t)n[i] * m[i]) >> 16; | ||
148 | + } | ||
149 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
150 | +} | ||
151 | + | ||
152 | +void HELPER(gvec_smulh_s)(void *vd, void *vn, void *vm, uint32_t desc) | ||
153 | +{ | ||
154 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
155 | + int32_t *d = vd, *n = vn, *m = vm; | ||
156 | + | ||
157 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
158 | + d[i] = ((int64_t)n[i] * m[i]) >> 32; | ||
159 | + } | ||
160 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
161 | +} | ||
162 | + | ||
163 | +void HELPER(gvec_smulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
164 | +{ | ||
165 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
166 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
167 | + uint64_t discard; | ||
168 | + | ||
169 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
170 | + muls64(&discard, &d[i], n[i], m[i]); | ||
171 | + } | ||
172 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
173 | +} | ||
174 | + | ||
175 | +void HELPER(gvec_umulh_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
176 | +{ | ||
177 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
178 | + uint8_t *d = vd, *n = vn, *m = vm; | ||
179 | + | ||
180 | + for (i = 0; i < opr_sz; ++i) { | ||
181 | + d[i] = ((uint32_t)n[i] * m[i]) >> 8; | ||
182 | + } | ||
183 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
184 | +} | ||
185 | + | ||
186 | +void HELPER(gvec_umulh_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
187 | +{ | ||
188 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
189 | + uint16_t *d = vd, *n = vn, *m = vm; | ||
190 | + | ||
191 | + for (i = 0; i < opr_sz / 2; ++i) { | ||
192 | + d[i] = ((uint32_t)n[i] * m[i]) >> 16; | ||
193 | + } | ||
194 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
195 | +} | ||
196 | + | ||
197 | +void HELPER(gvec_umulh_s)(void *vd, void *vn, void *vm, uint32_t desc) | ||
198 | +{ | ||
199 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
200 | + uint32_t *d = vd, *n = vn, *m = vm; | ||
201 | + | ||
202 | + for (i = 0; i < opr_sz / 4; ++i) { | ||
203 | + d[i] = ((uint64_t)n[i] * m[i]) >> 32; | ||
204 | + } | ||
205 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
206 | +} | ||
207 | + | ||
208 | +void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
209 | +{ | ||
210 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
211 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
212 | + uint64_t discard; | ||
213 | + | ||
214 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
215 | + mulu64(&discard, &d[i], n[i], m[i]); | ||
216 | + } | ||
217 | + clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
218 | +} | ||
219 | -- | 35 | -- |
220 | 2.20.1 | 36 | 2.25.1 |
221 | |||
222 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20210525010358.152808-7-richard.henderson@linaro.org | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 54 +++++++++++++++++++++++ | ||
9 | target/arm/sve.decode | 17 ++++++++ | ||
10 | target/arm/sve_helper.c | 87 ++++++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 18 ++++++++ | ||
12 | 4 files changed, 176 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG, | ||
19 | DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG, | ||
20 | void, ptr, ptr, ptr, ptr, i32) | ||
21 | |||
22 | +DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_b, TCG_CALL_NO_RWG, | ||
23 | + void, ptr, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_h, TCG_CALL_NO_RWG, | ||
25 | + void, ptr, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_s, TCG_CALL_NO_RWG, | ||
27 | + void, ptr, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_d, TCG_CALL_NO_RWG, | ||
29 | + void, ptr, ptr, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_b, TCG_CALL_NO_RWG, | ||
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_h, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_s, TCG_CALL_NO_RWG, | ||
36 | + void, ptr, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_d, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, i32) | ||
39 | + | ||
40 | +DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_b, TCG_CALL_NO_RWG, | ||
41 | + void, ptr, ptr, ptr, ptr, i32) | ||
42 | +DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_h, TCG_CALL_NO_RWG, | ||
43 | + void, ptr, ptr, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_s, TCG_CALL_NO_RWG, | ||
45 | + void, ptr, ptr, ptr, ptr, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_d, TCG_CALL_NO_RWG, | ||
47 | + void, ptr, ptr, ptr, ptr, i32) | ||
48 | + | ||
49 | +DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_b, TCG_CALL_NO_RWG, | ||
50 | + void, ptr, ptr, ptr, ptr, i32) | ||
51 | +DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_h, TCG_CALL_NO_RWG, | ||
52 | + void, ptr, ptr, ptr, ptr, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_s, TCG_CALL_NO_RWG, | ||
54 | + void, ptr, ptr, ptr, ptr, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_d, TCG_CALL_NO_RWG, | ||
56 | + void, ptr, ptr, ptr, ptr, i32) | ||
57 | + | ||
58 | +DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_b, TCG_CALL_NO_RWG, | ||
59 | + void, ptr, ptr, ptr, ptr, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_h, TCG_CALL_NO_RWG, | ||
61 | + void, ptr, ptr, ptr, ptr, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_s, TCG_CALL_NO_RWG, | ||
63 | + void, ptr, ptr, ptr, ptr, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_d, TCG_CALL_NO_RWG, | ||
65 | + void, ptr, ptr, ptr, ptr, i32) | ||
66 | + | ||
67 | +DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_b, TCG_CALL_NO_RWG, | ||
68 | + void, ptr, ptr, ptr, ptr, i32) | ||
69 | +DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_h, TCG_CALL_NO_RWG, | ||
70 | + void, ptr, ptr, ptr, ptr, i32) | ||
71 | +DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG, | ||
72 | + void, ptr, ptr, ptr, ptr, i32) | ||
73 | +DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG, | ||
74 | + void, ptr, ptr, ptr, ptr, i32) | ||
75 | + | ||
76 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG, | ||
77 | void, ptr, ptr, ptr, ptr, i32) | ||
78 | DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG, | ||
79 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/sve.decode | ||
82 | +++ b/target/arm/sve.decode | ||
83 | @@ -XXX,XX +XXX,XX @@ URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn | ||
84 | URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn | ||
85 | SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn | ||
86 | SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn | ||
87 | + | ||
88 | +### SVE2 saturating/rounding bitwise shift left (predicated) | ||
89 | + | ||
90 | +SRSHL 01000100 .. 000 010 100 ... ..... ..... @rdn_pg_rm | ||
91 | +URSHL 01000100 .. 000 011 100 ... ..... ..... @rdn_pg_rm | ||
92 | +SRSHL 01000100 .. 000 110 100 ... ..... ..... @rdm_pg_rn # SRSHLR | ||
93 | +URSHL 01000100 .. 000 111 100 ... ..... ..... @rdm_pg_rn # URSHLR | ||
94 | + | ||
95 | +SQSHL 01000100 .. 001 000 100 ... ..... ..... @rdn_pg_rm | ||
96 | +UQSHL 01000100 .. 001 001 100 ... ..... ..... @rdn_pg_rm | ||
97 | +SQSHL 01000100 .. 001 100 100 ... ..... ..... @rdm_pg_rn # SQSHLR | ||
98 | +UQSHL 01000100 .. 001 101 100 ... ..... ..... @rdm_pg_rn # UQSHLR | ||
99 | + | ||
100 | +SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm | ||
101 | +UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm | ||
102 | +SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR | ||
103 | +UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR | ||
104 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
105 | index XXXXXXX..XXXXXXX 100644 | ||
106 | --- a/target/arm/sve_helper.c | ||
107 | +++ b/target/arm/sve_helper.c | ||
108 | @@ -XXX,XX +XXX,XX @@ | ||
109 | #include "tcg/tcg-gvec-desc.h" | ||
110 | #include "fpu/softfloat.h" | ||
111 | #include "tcg/tcg.h" | ||
112 | +#include "vec_internal.h" | ||
113 | |||
114 | |||
115 | /* Note that vector data is stored in host-endian 64-bit chunks, | ||
116 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h) | ||
117 | DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s) | ||
118 | DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d) | ||
119 | |||
120 | +#define do_srshl_b(n, m) do_sqrshl_bhs(n, m, 8, true, NULL) | ||
121 | +#define do_srshl_h(n, m) do_sqrshl_bhs(n, m, 16, true, NULL) | ||
122 | +#define do_srshl_s(n, m) do_sqrshl_bhs(n, m, 32, true, NULL) | ||
123 | +#define do_srshl_d(n, m) do_sqrshl_d(n, m, true, NULL) | ||
124 | + | ||
125 | +DO_ZPZZ(sve2_srshl_zpzz_b, int8_t, H1, do_srshl_b) | ||
126 | +DO_ZPZZ(sve2_srshl_zpzz_h, int16_t, H1_2, do_srshl_h) | ||
127 | +DO_ZPZZ(sve2_srshl_zpzz_s, int32_t, H1_4, do_srshl_s) | ||
128 | +DO_ZPZZ_D(sve2_srshl_zpzz_d, int64_t, do_srshl_d) | ||
129 | + | ||
130 | +#define do_urshl_b(n, m) do_uqrshl_bhs(n, (int8_t)m, 8, true, NULL) | ||
131 | +#define do_urshl_h(n, m) do_uqrshl_bhs(n, (int16_t)m, 16, true, NULL) | ||
132 | +#define do_urshl_s(n, m) do_uqrshl_bhs(n, m, 32, true, NULL) | ||
133 | +#define do_urshl_d(n, m) do_uqrshl_d(n, m, true, NULL) | ||
134 | + | ||
135 | +DO_ZPZZ(sve2_urshl_zpzz_b, uint8_t, H1, do_urshl_b) | ||
136 | +DO_ZPZZ(sve2_urshl_zpzz_h, uint16_t, H1_2, do_urshl_h) | ||
137 | +DO_ZPZZ(sve2_urshl_zpzz_s, uint32_t, H1_4, do_urshl_s) | ||
138 | +DO_ZPZZ_D(sve2_urshl_zpzz_d, uint64_t, do_urshl_d) | ||
139 | + | ||
140 | +/* | ||
141 | + * Unlike the NEON and AdvSIMD versions, there is no QC bit to set. | ||
142 | + * We pass in a pointer to a dummy saturation field to trigger | ||
143 | + * the saturating arithmetic but discard the information about | ||
144 | + * whether it has occurred. | ||
145 | + */ | ||
146 | +#define do_sqshl_b(n, m) \ | ||
147 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, false, &discard); }) | ||
148 | +#define do_sqshl_h(n, m) \ | ||
149 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, false, &discard); }) | ||
150 | +#define do_sqshl_s(n, m) \ | ||
151 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, false, &discard); }) | ||
152 | +#define do_sqshl_d(n, m) \ | ||
153 | + ({ uint32_t discard; do_sqrshl_d(n, m, false, &discard); }) | ||
154 | + | ||
155 | +DO_ZPZZ(sve2_sqshl_zpzz_b, int8_t, H1_2, do_sqshl_b) | ||
156 | +DO_ZPZZ(sve2_sqshl_zpzz_h, int16_t, H1_2, do_sqshl_h) | ||
157 | +DO_ZPZZ(sve2_sqshl_zpzz_s, int32_t, H1_4, do_sqshl_s) | ||
158 | +DO_ZPZZ_D(sve2_sqshl_zpzz_d, int64_t, do_sqshl_d) | ||
159 | + | ||
160 | +#define do_uqshl_b(n, m) \ | ||
161 | + ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, false, &discard); }) | ||
162 | +#define do_uqshl_h(n, m) \ | ||
163 | + ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, false, &discard); }) | ||
164 | +#define do_uqshl_s(n, m) \ | ||
165 | + ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, false, &discard); }) | ||
166 | +#define do_uqshl_d(n, m) \ | ||
167 | + ({ uint32_t discard; do_uqrshl_d(n, m, false, &discard); }) | ||
168 | + | ||
169 | +DO_ZPZZ(sve2_uqshl_zpzz_b, uint8_t, H1_2, do_uqshl_b) | ||
170 | +DO_ZPZZ(sve2_uqshl_zpzz_h, uint16_t, H1_2, do_uqshl_h) | ||
171 | +DO_ZPZZ(sve2_uqshl_zpzz_s, uint32_t, H1_4, do_uqshl_s) | ||
172 | +DO_ZPZZ_D(sve2_uqshl_zpzz_d, uint64_t, do_uqshl_d) | ||
173 | + | ||
174 | +#define do_sqrshl_b(n, m) \ | ||
175 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, true, &discard); }) | ||
176 | +#define do_sqrshl_h(n, m) \ | ||
177 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, true, &discard); }) | ||
178 | +#define do_sqrshl_s(n, m) \ | ||
179 | + ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, true, &discard); }) | ||
180 | +#define do_sqrshl_d(n, m) \ | ||
181 | + ({ uint32_t discard; do_sqrshl_d(n, m, true, &discard); }) | ||
182 | + | ||
183 | +DO_ZPZZ(sve2_sqrshl_zpzz_b, int8_t, H1_2, do_sqrshl_b) | ||
184 | +DO_ZPZZ(sve2_sqrshl_zpzz_h, int16_t, H1_2, do_sqrshl_h) | ||
185 | +DO_ZPZZ(sve2_sqrshl_zpzz_s, int32_t, H1_4, do_sqrshl_s) | ||
186 | +DO_ZPZZ_D(sve2_sqrshl_zpzz_d, int64_t, do_sqrshl_d) | ||
187 | + | ||
188 | +#undef do_sqrshl_d | ||
189 | + | ||
190 | +#define do_uqrshl_b(n, m) \ | ||
191 | + ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, true, &discard); }) | ||
192 | +#define do_uqrshl_h(n, m) \ | ||
193 | + ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, true, &discard); }) | ||
194 | +#define do_uqrshl_s(n, m) \ | ||
195 | + ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, true, &discard); }) | ||
196 | +#define do_uqrshl_d(n, m) \ | ||
197 | + ({ uint32_t discard; do_uqrshl_d(n, m, true, &discard); }) | ||
198 | + | ||
199 | +DO_ZPZZ(sve2_uqrshl_zpzz_b, uint8_t, H1_2, do_uqrshl_b) | ||
200 | +DO_ZPZZ(sve2_uqrshl_zpzz_h, uint16_t, H1_2, do_uqrshl_h) | ||
201 | +DO_ZPZZ(sve2_uqrshl_zpzz_s, uint32_t, H1_4, do_uqrshl_s) | ||
202 | +DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d) | ||
203 | + | ||
204 | +#undef do_uqrshl_d | ||
205 | + | ||
206 | #undef DO_ZPZZ | ||
207 | #undef DO_ZPZZ_D | ||
208 | |||
209 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
210 | index XXXXXXX..XXXXXXX 100644 | ||
211 | --- a/target/arm/translate-sve.c | ||
212 | +++ b/target/arm/translate-sve.c | ||
213 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a) | ||
214 | }; | ||
215 | return do_sve2_zpz_ool(s, a, fns[a->esz]); | ||
216 | } | ||
217 | + | ||
218 | +#define DO_SVE2_ZPZZ(NAME, name) \ | ||
219 | +static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ | ||
220 | +{ \ | ||
221 | + static gen_helper_gvec_4 * const fns[4] = { \ | ||
222 | + gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \ | ||
223 | + gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \ | ||
224 | + }; \ | ||
225 | + return do_sve2_zpzz_ool(s, a, fns[a->esz]); \ | ||
226 | +} | ||
227 | + | ||
228 | +DO_SVE2_ZPZZ(SQSHL, sqshl) | ||
229 | +DO_SVE2_ZPZZ(SQRSHL, sqrshl) | ||
230 | +DO_SVE2_ZPZZ(SRSHL, srshl) | ||
231 | + | ||
232 | +DO_SVE2_ZPZZ(UQSHL, uqshl) | ||
233 | +DO_SVE2_ZPZZ(UQRSHL, uqrshl) | ||
234 | +DO_SVE2_ZPZZ(URSHL, urshl) | ||
235 | -- | ||
236 | 2.20.1 | ||
237 | |||
238 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-9-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 45 ++++++++++++++++++++++ | ||
9 | target/arm/sve.decode | 8 ++++ | ||
10 | target/arm/sve_helper.c | 76 ++++++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 6 +++ | ||
12 | 4 files changed, 135 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, | ||
19 | DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, | ||
20 | void, ptr, ptr, ptr, ptr, i32) | ||
21 | |||
22 | +DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG, | ||
23 | + void, ptr, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_5(sve2_addp_zpzz_h, TCG_CALL_NO_RWG, | ||
25 | + void, ptr, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_5(sve2_addp_zpzz_s, TCG_CALL_NO_RWG, | ||
27 | + void, ptr, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_5(sve2_addp_zpzz_d, TCG_CALL_NO_RWG, | ||
29 | + void, ptr, ptr, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_b, TCG_CALL_NO_RWG, | ||
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_h, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_s, TCG_CALL_NO_RWG, | ||
36 | + void, ptr, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_d, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, i32) | ||
39 | + | ||
40 | +DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_b, TCG_CALL_NO_RWG, | ||
41 | + void, ptr, ptr, ptr, ptr, i32) | ||
42 | +DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_h, TCG_CALL_NO_RWG, | ||
43 | + void, ptr, ptr, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_s, TCG_CALL_NO_RWG, | ||
45 | + void, ptr, ptr, ptr, ptr, i32) | ||
46 | +DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_d, TCG_CALL_NO_RWG, | ||
47 | + void, ptr, ptr, ptr, ptr, i32) | ||
48 | + | ||
49 | +DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_b, TCG_CALL_NO_RWG, | ||
50 | + void, ptr, ptr, ptr, ptr, i32) | ||
51 | +DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_h, TCG_CALL_NO_RWG, | ||
52 | + void, ptr, ptr, ptr, ptr, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_s, TCG_CALL_NO_RWG, | ||
54 | + void, ptr, ptr, ptr, ptr, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_d, TCG_CALL_NO_RWG, | ||
56 | + void, ptr, ptr, ptr, ptr, i32) | ||
57 | + | ||
58 | +DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_b, TCG_CALL_NO_RWG, | ||
59 | + void, ptr, ptr, ptr, ptr, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_h, TCG_CALL_NO_RWG, | ||
61 | + void, ptr, ptr, ptr, ptr, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG, | ||
63 | + void, ptr, ptr, ptr, ptr, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG, | ||
65 | + void, ptr, ptr, ptr, ptr, i32) | ||
66 | + | ||
67 | DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG, | ||
68 | void, ptr, ptr, ptr, ptr, i32) | ||
69 | DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG, | ||
70 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/target/arm/sve.decode | ||
73 | +++ b/target/arm/sve.decode | ||
74 | @@ -XXX,XX +XXX,XX @@ SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm | ||
75 | URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm | ||
76 | SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR | ||
77 | UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR | ||
78 | + | ||
79 | +### SVE2 integer pairwise arithmetic | ||
80 | + | ||
81 | +ADDP 01000100 .. 010 001 101 ... ..... ..... @rdn_pg_rm | ||
82 | +SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm | ||
83 | +UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm | ||
84 | +SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm | ||
85 | +UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm | ||
86 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
87 | index XXXXXXX..XXXXXXX 100644 | ||
88 | --- a/target/arm/sve_helper.c | ||
89 | +++ b/target/arm/sve_helper.c | ||
90 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D) | ||
91 | #undef DO_ZPZZ | ||
92 | #undef DO_ZPZZ_D | ||
93 | |||
94 | +/* | ||
95 | + * Three operand expander, operating on element pairs. | ||
96 | + * If the slot I is even, the elements from from VN {I, I+1}. | ||
97 | + * If the slot I is odd, the elements from from VM {I-1, I}. | ||
98 | + * Load all of the input elements in each pair before overwriting output. | ||
99 | + */ | ||
100 | +#define DO_ZPZZ_PAIR(NAME, TYPE, H, OP) \ | ||
101 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ | ||
102 | +{ \ | ||
103 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
104 | + for (i = 0; i < opr_sz; ) { \ | ||
105 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
106 | + do { \ | ||
107 | + TYPE n0 = *(TYPE *)(vn + H(i)); \ | ||
108 | + TYPE m0 = *(TYPE *)(vm + H(i)); \ | ||
109 | + TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \ | ||
110 | + TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \ | ||
111 | + if (pg & 1) { \ | ||
112 | + *(TYPE *)(vd + H(i)) = OP(n0, n1); \ | ||
113 | + } \ | ||
114 | + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ | ||
115 | + if (pg & 1) { \ | ||
116 | + *(TYPE *)(vd + H(i)) = OP(m0, m1); \ | ||
117 | + } \ | ||
118 | + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ | ||
119 | + } while (i & 15); \ | ||
120 | + } \ | ||
121 | +} | ||
122 | + | ||
123 | +/* Similarly, specialized for 64-bit operands. */ | ||
124 | +#define DO_ZPZZ_PAIR_D(NAME, TYPE, OP) \ | ||
125 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ | ||
126 | +{ \ | ||
127 | + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ | ||
128 | + TYPE *d = vd, *n = vn, *m = vm; \ | ||
129 | + uint8_t *pg = vg; \ | ||
130 | + for (i = 0; i < opr_sz; i += 2) { \ | ||
131 | + TYPE n0 = n[i], n1 = n[i + 1]; \ | ||
132 | + TYPE m0 = m[i], m1 = m[i + 1]; \ | ||
133 | + if (pg[H1(i)] & 1) { \ | ||
134 | + d[i] = OP(n0, n1); \ | ||
135 | + } \ | ||
136 | + if (pg[H1(i + 1)] & 1) { \ | ||
137 | + d[i + 1] = OP(m0, m1); \ | ||
138 | + } \ | ||
139 | + } \ | ||
140 | +} | ||
141 | + | ||
142 | +DO_ZPZZ_PAIR(sve2_addp_zpzz_b, uint8_t, H1, DO_ADD) | ||
143 | +DO_ZPZZ_PAIR(sve2_addp_zpzz_h, uint16_t, H1_2, DO_ADD) | ||
144 | +DO_ZPZZ_PAIR(sve2_addp_zpzz_s, uint32_t, H1_4, DO_ADD) | ||
145 | +DO_ZPZZ_PAIR_D(sve2_addp_zpzz_d, uint64_t, DO_ADD) | ||
146 | + | ||
147 | +DO_ZPZZ_PAIR(sve2_umaxp_zpzz_b, uint8_t, H1, DO_MAX) | ||
148 | +DO_ZPZZ_PAIR(sve2_umaxp_zpzz_h, uint16_t, H1_2, DO_MAX) | ||
149 | +DO_ZPZZ_PAIR(sve2_umaxp_zpzz_s, uint32_t, H1_4, DO_MAX) | ||
150 | +DO_ZPZZ_PAIR_D(sve2_umaxp_zpzz_d, uint64_t, DO_MAX) | ||
151 | + | ||
152 | +DO_ZPZZ_PAIR(sve2_uminp_zpzz_b, uint8_t, H1, DO_MIN) | ||
153 | +DO_ZPZZ_PAIR(sve2_uminp_zpzz_h, uint16_t, H1_2, DO_MIN) | ||
154 | +DO_ZPZZ_PAIR(sve2_uminp_zpzz_s, uint32_t, H1_4, DO_MIN) | ||
155 | +DO_ZPZZ_PAIR_D(sve2_uminp_zpzz_d, uint64_t, DO_MIN) | ||
156 | + | ||
157 | +DO_ZPZZ_PAIR(sve2_smaxp_zpzz_b, int8_t, H1, DO_MAX) | ||
158 | +DO_ZPZZ_PAIR(sve2_smaxp_zpzz_h, int16_t, H1_2, DO_MAX) | ||
159 | +DO_ZPZZ_PAIR(sve2_smaxp_zpzz_s, int32_t, H1_4, DO_MAX) | ||
160 | +DO_ZPZZ_PAIR_D(sve2_smaxp_zpzz_d, int64_t, DO_MAX) | ||
161 | + | ||
162 | +DO_ZPZZ_PAIR(sve2_sminp_zpzz_b, int8_t, H1, DO_MIN) | ||
163 | +DO_ZPZZ_PAIR(sve2_sminp_zpzz_h, int16_t, H1_2, DO_MIN) | ||
164 | +DO_ZPZZ_PAIR(sve2_sminp_zpzz_s, int32_t, H1_4, DO_MIN) | ||
165 | +DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN) | ||
166 | + | ||
167 | +#undef DO_ZPZZ_PAIR | ||
168 | +#undef DO_ZPZZ_PAIR_D | ||
169 | + | ||
170 | /* Three-operand expander, controlled by a predicate, in which the | ||
171 | * third operand is "wide". That is, for D = N op M, the same 64-bit | ||
172 | * value of M is used with all of the narrower values of N. | ||
173 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
174 | index XXXXXXX..XXXXXXX 100644 | ||
175 | --- a/target/arm/translate-sve.c | ||
176 | +++ b/target/arm/translate-sve.c | ||
177 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SHSUB, shsub) | ||
178 | DO_SVE2_ZPZZ(UHADD, uhadd) | ||
179 | DO_SVE2_ZPZZ(URHADD, urhadd) | ||
180 | DO_SVE2_ZPZZ(UHSUB, uhsub) | ||
181 | + | ||
182 | +DO_SVE2_ZPZZ(ADDP, addp) | ||
183 | +DO_SVE2_ZPZZ(SMAXP, smaxp) | ||
184 | +DO_SVE2_ZPZZ(UMAXP, umaxp) | ||
185 | +DO_SVE2_ZPZZ(SMINP, sminp) | ||
186 | +DO_SVE2_ZPZZ(UMINP, uminp) | ||
187 | -- | ||
188 | 2.20.1 | ||
189 | |||
190 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210525010358.152808-10-richard.henderson@linaro.org | 5 | Message-id: 20220708151540.18136-46-richard.henderson@linaro.org |
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 7 | --- |
8 | target/arm/helper-sve.h | 54 +++++++++++ | 8 | linux-user/elfload.c | 20 ++++++++++++++++++++ |
9 | target/arm/sve.decode | 11 +++ | 9 | 1 file changed, 20 insertions(+) |
10 | target/arm/sve_helper.c | 194 ++++++++++++++++++++++++++----------- | ||
11 | target/arm/translate-sve.c | 7 ++ | ||
12 | 4 files changed, 210 insertions(+), 56 deletions(-) | ||
13 | 10 | ||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | 11 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c |
15 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/helper-sve.h | 13 | --- a/linux-user/elfload.c |
17 | +++ b/target/arm/helper-sve.h | 14 | +++ b/linux-user/elfload.c |
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG, | 15 | @@ -XXX,XX +XXX,XX @@ enum { |
19 | DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG, | 16 | ARM_HWCAP2_A64_RNG = 1 << 16, |
20 | void, ptr, ptr, ptr, ptr, i32) | 17 | ARM_HWCAP2_A64_BTI = 1 << 17, |
21 | 18 | ARM_HWCAP2_A64_MTE = 1 << 18, | |
22 | +DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_b, TCG_CALL_NO_RWG, | 19 | + ARM_HWCAP2_A64_ECV = 1 << 19, |
23 | + void, ptr, ptr, ptr, ptr, i32) | 20 | + ARM_HWCAP2_A64_AFP = 1 << 20, |
24 | +DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_h, TCG_CALL_NO_RWG, | 21 | + ARM_HWCAP2_A64_RPRES = 1 << 21, |
25 | + void, ptr, ptr, ptr, ptr, i32) | 22 | + ARM_HWCAP2_A64_MTE3 = 1 << 22, |
26 | +DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_s, TCG_CALL_NO_RWG, | 23 | + ARM_HWCAP2_A64_SME = 1 << 23, |
27 | + void, ptr, ptr, ptr, ptr, i32) | 24 | + ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, |
28 | +DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_d, TCG_CALL_NO_RWG, | 25 | + ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, |
29 | + void, ptr, ptr, ptr, ptr, i32) | 26 | + ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, |
30 | + | 27 | + ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, |
31 | +DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_b, TCG_CALL_NO_RWG, | 28 | + ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, |
32 | + void, ptr, ptr, ptr, ptr, i32) | 29 | + ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, |
33 | +DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_h, TCG_CALL_NO_RWG, | 30 | + ARM_HWCAP2_A64_SME_FA64 = 1 << 30, |
34 | + void, ptr, ptr, ptr, ptr, i32) | 31 | }; |
35 | +DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_s, TCG_CALL_NO_RWG, | 32 | |
36 | + void, ptr, ptr, ptr, ptr, i32) | 33 | #define ELF_HWCAP get_elf_hwcap() |
37 | +DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_d, TCG_CALL_NO_RWG, | 34 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void) |
38 | + void, ptr, ptr, ptr, ptr, i32) | 35 | GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); |
39 | + | 36 | GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); |
40 | +DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_b, TCG_CALL_NO_RWG, | 37 | GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); |
41 | + void, ptr, ptr, ptr, ptr, i32) | 38 | + GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | |
42 | +DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_h, TCG_CALL_NO_RWG, | 39 | + ARM_HWCAP2_A64_SME_F32F32 | |
43 | + void, ptr, ptr, ptr, ptr, i32) | 40 | + ARM_HWCAP2_A64_SME_B16F32 | |
44 | +DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_s, TCG_CALL_NO_RWG, | 41 | + ARM_HWCAP2_A64_SME_F16F32 | |
45 | + void, ptr, ptr, ptr, ptr, i32) | 42 | + ARM_HWCAP2_A64_SME_I8I32)); |
46 | +DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_d, TCG_CALL_NO_RWG, | 43 | + GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); |
47 | + void, ptr, ptr, ptr, ptr, i32) | 44 | + GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); |
48 | + | 45 | + GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); |
49 | +DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_b, TCG_CALL_NO_RWG, | 46 | |
50 | + void, ptr, ptr, ptr, ptr, i32) | 47 | return hwcaps; |
51 | +DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_h, TCG_CALL_NO_RWG, | ||
52 | + void, ptr, ptr, ptr, ptr, i32) | ||
53 | +DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_s, TCG_CALL_NO_RWG, | ||
54 | + void, ptr, ptr, ptr, ptr, i32) | ||
55 | +DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_d, TCG_CALL_NO_RWG, | ||
56 | + void, ptr, ptr, ptr, ptr, i32) | ||
57 | + | ||
58 | +DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_b, TCG_CALL_NO_RWG, | ||
59 | + void, ptr, ptr, ptr, ptr, i32) | ||
60 | +DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_h, TCG_CALL_NO_RWG, | ||
61 | + void, ptr, ptr, ptr, ptr, i32) | ||
62 | +DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_s, TCG_CALL_NO_RWG, | ||
63 | + void, ptr, ptr, ptr, ptr, i32) | ||
64 | +DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_d, TCG_CALL_NO_RWG, | ||
65 | + void, ptr, ptr, ptr, ptr, i32) | ||
66 | + | ||
67 | +DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_b, TCG_CALL_NO_RWG, | ||
68 | + void, ptr, ptr, ptr, ptr, i32) | ||
69 | +DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_h, TCG_CALL_NO_RWG, | ||
70 | + void, ptr, ptr, ptr, ptr, i32) | ||
71 | +DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_s, TCG_CALL_NO_RWG, | ||
72 | + void, ptr, ptr, ptr, ptr, i32) | ||
73 | +DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_d, TCG_CALL_NO_RWG, | ||
74 | + void, ptr, ptr, ptr, ptr, i32) | ||
75 | + | ||
76 | DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG, | ||
77 | void, ptr, ptr, ptr, ptr, i32) | ||
78 | DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG, | ||
79 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/sve.decode | ||
82 | +++ b/target/arm/sve.decode | ||
83 | @@ -XXX,XX +XXX,XX @@ SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm | ||
84 | UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm | ||
85 | SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm | ||
86 | UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm | ||
87 | + | ||
88 | +### SVE2 saturating add/subtract (predicated) | ||
89 | + | ||
90 | +SQADD_zpzz 01000100 .. 011 000 100 ... ..... ..... @rdn_pg_rm | ||
91 | +UQADD_zpzz 01000100 .. 011 001 100 ... ..... ..... @rdn_pg_rm | ||
92 | +SQSUB_zpzz 01000100 .. 011 010 100 ... ..... ..... @rdn_pg_rm | ||
93 | +UQSUB_zpzz 01000100 .. 011 011 100 ... ..... ..... @rdn_pg_rm | ||
94 | +SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm | ||
95 | +USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm | ||
96 | +SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR | ||
97 | +UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR | ||
98 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/target/arm/sve_helper.c | ||
101 | +++ b/target/arm/sve_helper.c | ||
102 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS) | ||
103 | DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS) | ||
104 | DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D) | ||
105 | |||
106 | +static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max) | ||
107 | +{ | ||
108 | + return val >= max ? max : val <= min ? min : val; | ||
109 | +} | ||
110 | + | ||
111 | +#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX) | ||
112 | +#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX) | ||
113 | +#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX) | ||
114 | + | ||
115 | +static inline int64_t do_sqadd_d(int64_t n, int64_t m) | ||
116 | +{ | ||
117 | + int64_t r = n + m; | ||
118 | + if (((r ^ n) & ~(n ^ m)) < 0) { | ||
119 | + /* Signed overflow. */ | ||
120 | + return r < 0 ? INT64_MAX : INT64_MIN; | ||
121 | + } | ||
122 | + return r; | ||
123 | +} | ||
124 | + | ||
125 | +DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1, DO_SQADD_B) | ||
126 | +DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H) | ||
127 | +DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S) | ||
128 | +DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d) | ||
129 | + | ||
130 | +#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX) | ||
131 | +#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX) | ||
132 | +#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX) | ||
133 | + | ||
134 | +static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m) | ||
135 | +{ | ||
136 | + uint64_t r = n + m; | ||
137 | + return r < n ? UINT64_MAX : r; | ||
138 | +} | ||
139 | + | ||
140 | +DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1, DO_UQADD_B) | ||
141 | +DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H) | ||
142 | +DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S) | ||
143 | +DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d) | ||
144 | + | ||
145 | +#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX) | ||
146 | +#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX) | ||
147 | +#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX) | ||
148 | + | ||
149 | +static inline int64_t do_sqsub_d(int64_t n, int64_t m) | ||
150 | +{ | ||
151 | + int64_t r = n - m; | ||
152 | + if (((r ^ n) & (n ^ m)) < 0) { | ||
153 | + /* Signed overflow. */ | ||
154 | + return r < 0 ? INT64_MAX : INT64_MIN; | ||
155 | + } | ||
156 | + return r; | ||
157 | +} | ||
158 | + | ||
159 | +DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1, DO_SQSUB_B) | ||
160 | +DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H) | ||
161 | +DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S) | ||
162 | +DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d) | ||
163 | + | ||
164 | +#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX) | ||
165 | +#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX) | ||
166 | +#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX) | ||
167 | + | ||
168 | +static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m) | ||
169 | +{ | ||
170 | + return n > m ? n - m : 0; | ||
171 | +} | ||
172 | + | ||
173 | +DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1, DO_UQSUB_B) | ||
174 | +DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H) | ||
175 | +DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S) | ||
176 | +DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d) | ||
177 | + | ||
178 | +#define DO_SUQADD_B(n, m) \ | ||
179 | + do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX) | ||
180 | +#define DO_SUQADD_H(n, m) \ | ||
181 | + do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX) | ||
182 | +#define DO_SUQADD_S(n, m) \ | ||
183 | + do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX) | ||
184 | + | ||
185 | +static inline int64_t do_suqadd_d(int64_t n, uint64_t m) | ||
186 | +{ | ||
187 | + uint64_t r = n + m; | ||
188 | + | ||
189 | + if (n < 0) { | ||
190 | + /* Note that m - abs(n) cannot underflow. */ | ||
191 | + if (r > INT64_MAX) { | ||
192 | + /* Result is either very large positive or negative. */ | ||
193 | + if (m > -n) { | ||
194 | + /* m > abs(n), so r is a very large positive. */ | ||
195 | + return INT64_MAX; | ||
196 | + } | ||
197 | + /* Result is negative. */ | ||
198 | + } | ||
199 | + } else { | ||
200 | + /* Both inputs are positive: check for overflow. */ | ||
201 | + if (r < m || r > INT64_MAX) { | ||
202 | + return INT64_MAX; | ||
203 | + } | ||
204 | + } | ||
205 | + return r; | ||
206 | +} | ||
207 | + | ||
208 | +DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1, DO_SUQADD_B) | ||
209 | +DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H) | ||
210 | +DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S) | ||
211 | +DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d) | ||
212 | + | ||
213 | +#define DO_USQADD_B(n, m) \ | ||
214 | + do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX) | ||
215 | +#define DO_USQADD_H(n, m) \ | ||
216 | + do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX) | ||
217 | +#define DO_USQADD_S(n, m) \ | ||
218 | + do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX) | ||
219 | + | ||
220 | +static inline uint64_t do_usqadd_d(uint64_t n, int64_t m) | ||
221 | +{ | ||
222 | + uint64_t r = n + m; | ||
223 | + | ||
224 | + if (m < 0) { | ||
225 | + return n < -m ? 0 : r; | ||
226 | + } | ||
227 | + return r < n ? UINT64_MAX : r; | ||
228 | +} | ||
229 | + | ||
230 | +DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1, DO_USQADD_B) | ||
231 | +DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H) | ||
232 | +DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S) | ||
233 | +DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d) | ||
234 | + | ||
235 | #undef DO_ZPZZ | ||
236 | #undef DO_ZPZZ_D | ||
237 | |||
238 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) | ||
239 | intptr_t i, oprsz = simd_oprsz(desc); | ||
240 | |||
241 | for (i = 0; i < oprsz; i += sizeof(int8_t)) { | ||
242 | - int r = *(int8_t *)(a + i) + b; | ||
243 | - if (r > INT8_MAX) { | ||
244 | - r = INT8_MAX; | ||
245 | - } else if (r < INT8_MIN) { | ||
246 | - r = INT8_MIN; | ||
247 | - } | ||
248 | - *(int8_t *)(d + i) = r; | ||
249 | + *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i)); | ||
250 | } | ||
251 | } | 48 | } |
252 | |||
253 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) | ||
254 | intptr_t i, oprsz = simd_oprsz(desc); | ||
255 | |||
256 | for (i = 0; i < oprsz; i += sizeof(int16_t)) { | ||
257 | - int r = *(int16_t *)(a + i) + b; | ||
258 | - if (r > INT16_MAX) { | ||
259 | - r = INT16_MAX; | ||
260 | - } else if (r < INT16_MIN) { | ||
261 | - r = INT16_MIN; | ||
262 | - } | ||
263 | - *(int16_t *)(d + i) = r; | ||
264 | + *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i)); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) | ||
269 | intptr_t i, oprsz = simd_oprsz(desc); | ||
270 | |||
271 | for (i = 0; i < oprsz; i += sizeof(int32_t)) { | ||
272 | - int64_t r = *(int32_t *)(a + i) + b; | ||
273 | - if (r > INT32_MAX) { | ||
274 | - r = INT32_MAX; | ||
275 | - } else if (r < INT32_MIN) { | ||
276 | - r = INT32_MIN; | ||
277 | - } | ||
278 | - *(int32_t *)(d + i) = r; | ||
279 | + *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i)); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc) | ||
284 | intptr_t i, oprsz = simd_oprsz(desc); | ||
285 | |||
286 | for (i = 0; i < oprsz; i += sizeof(int64_t)) { | ||
287 | - int64_t ai = *(int64_t *)(a + i); | ||
288 | - int64_t r = ai + b; | ||
289 | - if (((r ^ ai) & ~(ai ^ b)) < 0) { | ||
290 | - /* Signed overflow. */ | ||
291 | - r = (r < 0 ? INT64_MAX : INT64_MIN); | ||
292 | - } | ||
293 | - *(int64_t *)(d + i) = r; | ||
294 | + *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i)); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) | ||
299 | intptr_t i, oprsz = simd_oprsz(desc); | ||
300 | |||
301 | for (i = 0; i < oprsz; i += sizeof(uint8_t)) { | ||
302 | - int r = *(uint8_t *)(a + i) + b; | ||
303 | - if (r > UINT8_MAX) { | ||
304 | - r = UINT8_MAX; | ||
305 | - } else if (r < 0) { | ||
306 | - r = 0; | ||
307 | - } | ||
308 | - *(uint8_t *)(d + i) = r; | ||
309 | + *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i)); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) | ||
314 | intptr_t i, oprsz = simd_oprsz(desc); | ||
315 | |||
316 | for (i = 0; i < oprsz; i += sizeof(uint16_t)) { | ||
317 | - int r = *(uint16_t *)(a + i) + b; | ||
318 | - if (r > UINT16_MAX) { | ||
319 | - r = UINT16_MAX; | ||
320 | - } else if (r < 0) { | ||
321 | - r = 0; | ||
322 | - } | ||
323 | - *(uint16_t *)(d + i) = r; | ||
324 | + *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i)); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) | ||
329 | intptr_t i, oprsz = simd_oprsz(desc); | ||
330 | |||
331 | for (i = 0; i < oprsz; i += sizeof(uint32_t)) { | ||
332 | - int64_t r = *(uint32_t *)(a + i) + b; | ||
333 | - if (r > UINT32_MAX) { | ||
334 | - r = UINT32_MAX; | ||
335 | - } else if (r < 0) { | ||
336 | - r = 0; | ||
337 | - } | ||
338 | - *(uint32_t *)(d + i) = r; | ||
339 | + *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i)); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc) | ||
344 | intptr_t i, oprsz = simd_oprsz(desc); | ||
345 | |||
346 | for (i = 0; i < oprsz; i += sizeof(uint64_t)) { | ||
347 | - uint64_t r = *(uint64_t *)(a + i) + b; | ||
348 | - if (r < b) { | ||
349 | - r = UINT64_MAX; | ||
350 | - } | ||
351 | - *(uint64_t *)(d + i) = r; | ||
352 | + *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i)); | ||
353 | } | ||
354 | } | ||
355 | |||
356 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc) | ||
357 | intptr_t i, oprsz = simd_oprsz(desc); | ||
358 | |||
359 | for (i = 0; i < oprsz; i += sizeof(uint64_t)) { | ||
360 | - uint64_t ai = *(uint64_t *)(a + i); | ||
361 | - *(uint64_t *)(d + i) = (ai < b ? 0 : ai - b); | ||
362 | + *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b); | ||
363 | } | ||
364 | } | ||
365 | |||
366 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
367 | index XXXXXXX..XXXXXXX 100644 | ||
368 | --- a/target/arm/translate-sve.c | ||
369 | +++ b/target/arm/translate-sve.c | ||
370 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SMAXP, smaxp) | ||
371 | DO_SVE2_ZPZZ(UMAXP, umaxp) | ||
372 | DO_SVE2_ZPZZ(SMINP, sminp) | ||
373 | DO_SVE2_ZPZZ(UMINP, uminp) | ||
374 | + | ||
375 | +DO_SVE2_ZPZZ(SQADD_zpzz, sqadd) | ||
376 | +DO_SVE2_ZPZZ(UQADD_zpzz, uqadd) | ||
377 | +DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub) | ||
378 | +DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub) | ||
379 | +DO_SVE2_ZPZZ(SUQADD, suqadd) | ||
380 | +DO_SVE2_ZPZZ(USQADD, usqadd) | ||
381 | -- | 49 | -- |
382 | 2.20.1 | 50 | 2.25.1 |
383 | |||
384 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-11-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 24 ++++++++++++++++++++ | ||
9 | target/arm/sve.decode | 19 ++++++++++++++++ | ||
10 | target/arm/sve_helper.c | 43 +++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 46 ++++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 132 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
19 | DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
21 | |||
22 | +DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
23 | +DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_4(sve2_saddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | + | ||
26 | +DEF_HELPER_FLAGS_4(sve2_ssubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_4(sve2_ssubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_4(sve2_ssubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_4(sve2_sabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_sabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_4(sve2_sabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_4(sve2_uaddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_4(sve2_uaddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_4(sve2_uaddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
37 | + | ||
38 | +DEF_HELPER_FLAGS_4(sve2_usubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
39 | +DEF_HELPER_FLAGS_4(sve2_usubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_4(sve2_usubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
41 | + | ||
42 | +DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
43 | +DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
45 | + | ||
46 | DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
47 | DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
48 | DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
49 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/sve.decode | ||
52 | +++ b/target/arm/sve.decode | ||
53 | @@ -XXX,XX +XXX,XX @@ SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm | ||
54 | USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm | ||
55 | SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR | ||
56 | UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR | ||
57 | + | ||
58 | +#### SVE2 Widening Integer Arithmetic | ||
59 | + | ||
60 | +## SVE2 integer add/subtract long | ||
61 | + | ||
62 | +SADDLB 01000101 .. 0 ..... 00 0000 ..... ..... @rd_rn_rm | ||
63 | +SADDLT 01000101 .. 0 ..... 00 0001 ..... ..... @rd_rn_rm | ||
64 | +UADDLB 01000101 .. 0 ..... 00 0010 ..... ..... @rd_rn_rm | ||
65 | +UADDLT 01000101 .. 0 ..... 00 0011 ..... ..... @rd_rn_rm | ||
66 | + | ||
67 | +SSUBLB 01000101 .. 0 ..... 00 0100 ..... ..... @rd_rn_rm | ||
68 | +SSUBLT 01000101 .. 0 ..... 00 0101 ..... ..... @rd_rn_rm | ||
69 | +USUBLB 01000101 .. 0 ..... 00 0110 ..... ..... @rd_rn_rm | ||
70 | +USUBLT 01000101 .. 0 ..... 00 0111 ..... ..... @rd_rn_rm | ||
71 | + | ||
72 | +SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm | ||
73 | +SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm | ||
74 | +UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm | ||
75 | +UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm | ||
76 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/target/arm/sve_helper.c | ||
79 | +++ b/target/arm/sve_helper.c | ||
80 | @@ -XXX,XX +XXX,XX @@ DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL) | ||
81 | #undef DO_ZPZ | ||
82 | #undef DO_ZPZ_D | ||
83 | |||
84 | +/* | ||
85 | + * Three-operand expander, unpredicated, in which the two inputs are | ||
86 | + * selected from the top or bottom half of the wide column. | ||
87 | + */ | ||
88 | +#define DO_ZZZ_TB(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
89 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
90 | +{ \ | ||
91 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
92 | + int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \ | ||
93 | + int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \ | ||
94 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
95 | + TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \ | ||
96 | + TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \ | ||
97 | + *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \ | ||
98 | + } \ | ||
99 | +} | ||
100 | + | ||
101 | +DO_ZZZ_TB(sve2_saddl_h, int16_t, int8_t, H1_2, H1, DO_ADD) | ||
102 | +DO_ZZZ_TB(sve2_saddl_s, int32_t, int16_t, H1_4, H1_2, DO_ADD) | ||
103 | +DO_ZZZ_TB(sve2_saddl_d, int64_t, int32_t, , H1_4, DO_ADD) | ||
104 | + | ||
105 | +DO_ZZZ_TB(sve2_ssubl_h, int16_t, int8_t, H1_2, H1, DO_SUB) | ||
106 | +DO_ZZZ_TB(sve2_ssubl_s, int32_t, int16_t, H1_4, H1_2, DO_SUB) | ||
107 | +DO_ZZZ_TB(sve2_ssubl_d, int64_t, int32_t, , H1_4, DO_SUB) | ||
108 | + | ||
109 | +DO_ZZZ_TB(sve2_sabdl_h, int16_t, int8_t, H1_2, H1, DO_ABD) | ||
110 | +DO_ZZZ_TB(sve2_sabdl_s, int32_t, int16_t, H1_4, H1_2, DO_ABD) | ||
111 | +DO_ZZZ_TB(sve2_sabdl_d, int64_t, int32_t, , H1_4, DO_ABD) | ||
112 | + | ||
113 | +DO_ZZZ_TB(sve2_uaddl_h, uint16_t, uint8_t, H1_2, H1, DO_ADD) | ||
114 | +DO_ZZZ_TB(sve2_uaddl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD) | ||
115 | +DO_ZZZ_TB(sve2_uaddl_d, uint64_t, uint32_t, , H1_4, DO_ADD) | ||
116 | + | ||
117 | +DO_ZZZ_TB(sve2_usubl_h, uint16_t, uint8_t, H1_2, H1, DO_SUB) | ||
118 | +DO_ZZZ_TB(sve2_usubl_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB) | ||
119 | +DO_ZZZ_TB(sve2_usubl_d, uint64_t, uint32_t, , H1_4, DO_SUB) | ||
120 | + | ||
121 | +DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD) | ||
122 | +DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD) | ||
123 | +DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
124 | + | ||
125 | +#undef DO_ZZZ_TB | ||
126 | + | ||
127 | /* Two-operand reduction expander, controlled by a predicate. | ||
128 | * The difference between TYPERED and TYPERET has to do with | ||
129 | * sign-extension. E.g. for SMAX, TYPERED must be signed, | ||
130 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/target/arm/translate-sve.c | ||
133 | +++ b/target/arm/translate-sve.c | ||
134 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub) | ||
135 | DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub) | ||
136 | DO_SVE2_ZPZZ(SUQADD, suqadd) | ||
137 | DO_SVE2_ZPZZ(USQADD, usqadd) | ||
138 | + | ||
139 | +/* | ||
140 | + * SVE2 Widening Integer Arithmetic | ||
141 | + */ | ||
142 | + | ||
143 | +static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a, | ||
144 | + gen_helper_gvec_3 *fn, int data) | ||
145 | +{ | ||
146 | + if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) { | ||
147 | + return false; | ||
148 | + } | ||
149 | + if (sve_access_check(s)) { | ||
150 | + unsigned vsz = vec_full_reg_size(s); | ||
151 | + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd), | ||
152 | + vec_full_reg_offset(s, a->rn), | ||
153 | + vec_full_reg_offset(s, a->rm), | ||
154 | + vsz, vsz, data, fn); | ||
155 | + } | ||
156 | + return true; | ||
157 | +} | ||
158 | + | ||
159 | +#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \ | ||
160 | +static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
161 | +{ \ | ||
162 | + static gen_helper_gvec_3 * const fns[4] = { \ | ||
163 | + NULL, gen_helper_sve2_##name##_h, \ | ||
164 | + gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ | ||
165 | + }; \ | ||
166 | + return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \ | ||
167 | +} | ||
168 | + | ||
169 | +DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false) | ||
170 | +DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false) | ||
171 | +DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false) | ||
172 | + | ||
173 | +DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false) | ||
174 | +DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false) | ||
175 | +DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false) | ||
176 | + | ||
177 | +DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true) | ||
178 | +DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true) | ||
179 | +DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true) | ||
180 | + | ||
181 | +DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true) | ||
182 | +DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true) | ||
183 | +DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true) | ||
184 | -- | ||
185 | 2.20.1 | ||
186 | |||
187 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-12-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/sve.decode | 6 ++++++ | ||
9 | target/arm/translate-sve.c | 4 ++++ | ||
10 | 2 files changed, 10 insertions(+) | ||
11 | |||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/arm/sve.decode | ||
15 | +++ b/target/arm/sve.decode | ||
16 | @@ -XXX,XX +XXX,XX @@ SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm | ||
17 | SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm | ||
18 | UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm | ||
19 | UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm | ||
20 | + | ||
21 | +## SVE2 integer add/subtract interleaved long | ||
22 | + | ||
23 | +SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm | ||
24 | +SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm | ||
25 | +SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm | ||
26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/target/arm/translate-sve.c | ||
29 | +++ b/target/arm/translate-sve.c | ||
30 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true) | ||
31 | DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true) | ||
32 | DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true) | ||
33 | DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true) | ||
34 | + | ||
35 | +DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true) | ||
36 | +DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true) | ||
37 | +DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false) | ||
38 | -- | ||
39 | 2.20.1 | ||
40 | |||
41 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-13-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 16 ++++++++++++++++ | ||
9 | target/arm/sve.decode | 12 ++++++++++++ | ||
10 | target/arm/sve_helper.c | 30 ++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 20 ++++++++++++++++++++ | ||
12 | 4 files changed, 78 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
19 | DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
21 | |||
22 | +DEF_HELPER_FLAGS_4(sve2_saddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
23 | +DEF_HELPER_FLAGS_4(sve2_saddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_4(sve2_saddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | + | ||
26 | +DEF_HELPER_FLAGS_4(sve2_ssubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_4(sve2_ssubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_4(sve2_ssubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_4(sve2_uaddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_uaddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_4(sve2_uaddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | + | ||
34 | +DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
37 | + | ||
38 | DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
39 | DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
40 | DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) | ||
41 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/target/arm/sve.decode | ||
44 | +++ b/target/arm/sve.decode | ||
45 | @@ -XXX,XX +XXX,XX @@ UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm | ||
46 | SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm | ||
47 | SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm | ||
48 | SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm | ||
49 | + | ||
50 | +## SVE2 integer add/subtract wide | ||
51 | + | ||
52 | +SADDWB 01000101 .. 0 ..... 010 000 ..... ..... @rd_rn_rm | ||
53 | +SADDWT 01000101 .. 0 ..... 010 001 ..... ..... @rd_rn_rm | ||
54 | +UADDWB 01000101 .. 0 ..... 010 010 ..... ..... @rd_rn_rm | ||
55 | +UADDWT 01000101 .. 0 ..... 010 011 ..... ..... @rd_rn_rm | ||
56 | + | ||
57 | +SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm | ||
58 | +SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm | ||
59 | +USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm | ||
60 | +USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm | ||
61 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/target/arm/sve_helper.c | ||
64 | +++ b/target/arm/sve_helper.c | ||
65 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
66 | |||
67 | #undef DO_ZZZ_TB | ||
68 | |||
69 | +#define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
70 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
71 | +{ \ | ||
72 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
73 | + int sel2 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \ | ||
74 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
75 | + TYPEW nn = *(TYPEW *)(vn + HW(i)); \ | ||
76 | + TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \ | ||
77 | + *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \ | ||
78 | + } \ | ||
79 | +} | ||
80 | + | ||
81 | +DO_ZZZ_WTB(sve2_saddw_h, int16_t, int8_t, H1_2, H1, DO_ADD) | ||
82 | +DO_ZZZ_WTB(sve2_saddw_s, int32_t, int16_t, H1_4, H1_2, DO_ADD) | ||
83 | +DO_ZZZ_WTB(sve2_saddw_d, int64_t, int32_t, , H1_4, DO_ADD) | ||
84 | + | ||
85 | +DO_ZZZ_WTB(sve2_ssubw_h, int16_t, int8_t, H1_2, H1, DO_SUB) | ||
86 | +DO_ZZZ_WTB(sve2_ssubw_s, int32_t, int16_t, H1_4, H1_2, DO_SUB) | ||
87 | +DO_ZZZ_WTB(sve2_ssubw_d, int64_t, int32_t, , H1_4, DO_SUB) | ||
88 | + | ||
89 | +DO_ZZZ_WTB(sve2_uaddw_h, uint16_t, uint8_t, H1_2, H1, DO_ADD) | ||
90 | +DO_ZZZ_WTB(sve2_uaddw_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD) | ||
91 | +DO_ZZZ_WTB(sve2_uaddw_d, uint64_t, uint32_t, , H1_4, DO_ADD) | ||
92 | + | ||
93 | +DO_ZZZ_WTB(sve2_usubw_h, uint16_t, uint8_t, H1_2, H1, DO_SUB) | ||
94 | +DO_ZZZ_WTB(sve2_usubw_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB) | ||
95 | +DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB) | ||
96 | + | ||
97 | +#undef DO_ZZZ_WTB | ||
98 | + | ||
99 | /* Two-operand reduction expander, controlled by a predicate. | ||
100 | * The difference between TYPERED and TYPERET has to do with | ||
101 | * sign-extension. E.g. for SMAX, TYPERED must be signed, | ||
102 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/target/arm/translate-sve.c | ||
105 | +++ b/target/arm/translate-sve.c | ||
106 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true) | ||
107 | DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true) | ||
108 | DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true) | ||
109 | DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false) | ||
110 | + | ||
111 | +#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \ | ||
112 | +static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
113 | +{ \ | ||
114 | + static gen_helper_gvec_3 * const fns[4] = { \ | ||
115 | + NULL, gen_helper_sve2_##name##_h, \ | ||
116 | + gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ | ||
117 | + }; \ | ||
118 | + return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \ | ||
119 | +} | ||
120 | + | ||
121 | +DO_SVE2_ZZZ_WTB(SADDWB, saddw, false) | ||
122 | +DO_SVE2_ZZZ_WTB(SADDWT, saddw, true) | ||
123 | +DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false) | ||
124 | +DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true) | ||
125 | + | ||
126 | +DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false) | ||
127 | +DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true) | ||
128 | +DO_SVE2_ZZZ_WTB(USUBWB, usubw, false) | ||
129 | +DO_SVE2_ZZZ_WTB(USUBWT, usubw, true) | ||
130 | -- | ||
131 | 2.20.1 | ||
132 | |||
133 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-15-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/cpu.h | 10 ++++++++++ | ||
9 | target/arm/helper-sve.h | 1 + | ||
10 | target/arm/sve.decode | 2 ++ | ||
11 | target/arm/translate-sve.c | 22 ++++++++++++++++++++++ | ||
12 | target/arm/vec_helper.c | 24 ++++++++++++++++++++++++ | ||
13 | 5 files changed, 59 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/cpu.h | ||
18 | +++ b/target/arm/cpu.h | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) | ||
20 | return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; | ||
21 | } | ||
22 | |||
23 | +static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id) | ||
24 | +{ | ||
25 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0; | ||
26 | +} | ||
27 | + | ||
28 | +static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) | ||
29 | +{ | ||
30 | + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; | ||
31 | +} | ||
32 | + | ||
33 | /* | ||
34 | * Feature tests for "does this exist in either 32-bit or 64-bit?" | ||
35 | */ | ||
36 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/helper-sve.h | ||
39 | +++ b/target/arm/helper-sve.h | ||
40 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
41 | DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
42 | |||
43 | DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
45 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/target/arm/sve.decode | ||
48 | +++ b/target/arm/sve.decode | ||
49 | @@ -XXX,XX +XXX,XX @@ USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm | ||
50 | |||
51 | SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm | ||
52 | SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm | ||
53 | +PMULLB 01000101 .. 0 ..... 011 010 ..... ..... @rd_rn_rm | ||
54 | +PMULLT 01000101 .. 0 ..... 011 011 ..... ..... @rd_rn_rm | ||
55 | SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm | ||
56 | SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm | ||
57 | UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm | ||
58 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/translate-sve.c | ||
61 | +++ b/target/arm/translate-sve.c | ||
62 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true) | ||
63 | DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false) | ||
64 | DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true) | ||
65 | |||
66 | +static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) | ||
67 | +{ | ||
68 | + static gen_helper_gvec_3 * const fns[4] = { | ||
69 | + gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, | ||
70 | + NULL, gen_helper_sve2_pmull_d, | ||
71 | + }; | ||
72 | + if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) { | ||
73 | + return false; | ||
74 | + } | ||
75 | + return do_sve2_zzw_ool(s, a, fns[a->esz], sel); | ||
76 | +} | ||
77 | + | ||
78 | +static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a) | ||
79 | +{ | ||
80 | + return do_trans_pmull(s, a, false); | ||
81 | +} | ||
82 | + | ||
83 | +static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a) | ||
84 | +{ | ||
85 | + return do_trans_pmull(s, a, true); | ||
86 | +} | ||
87 | + | ||
88 | #define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \ | ||
89 | static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
90 | { \ | ||
91 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
92 | index XXXXXXX..XXXXXXX 100644 | ||
93 | --- a/target/arm/vec_helper.c | ||
94 | +++ b/target/arm/vec_helper.c | ||
95 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
96 | d[i] = pmull_h(nn, mm); | ||
97 | } | ||
98 | } | ||
99 | + | ||
100 | +static uint64_t pmull_d(uint64_t op1, uint64_t op2) | ||
101 | +{ | ||
102 | + uint64_t result = 0; | ||
103 | + int i; | ||
104 | + | ||
105 | + for (i = 0; i < 32; ++i) { | ||
106 | + uint64_t mask = -((op1 >> i) & 1); | ||
107 | + result ^= (op2 << i) & mask; | ||
108 | + } | ||
109 | + return result; | ||
110 | +} | ||
111 | + | ||
112 | +void HELPER(sve2_pmull_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
113 | +{ | ||
114 | + intptr_t sel = H4(simd_data(desc)); | ||
115 | + intptr_t i, opr_sz = simd_oprsz(desc); | ||
116 | + uint32_t *n = vn, *m = vm; | ||
117 | + uint64_t *d = vd; | ||
118 | + | ||
119 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
120 | + d[i] = pmull_d(n[2 * i + sel], m[2 * i + sel]); | ||
121 | + } | ||
122 | +} | ||
123 | #endif | ||
124 | |||
125 | #define DO_CMP0(NAME, TYPE, OP) \ | ||
126 | -- | ||
127 | 2.20.1 | ||
128 | |||
129 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-17-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 5 +++++ | ||
9 | target/arm/sve.decode | 5 +++++ | ||
10 | target/arm/sve_helper.c | 20 ++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 19 +++++++++++++++++++ | ||
12 | 4 files changed, 49 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
19 | DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
21 | DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/sve.decode | ||
30 | +++ b/target/arm/sve.decode | ||
31 | @@ -XXX,XX +XXX,XX @@ SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl | ||
32 | SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl | ||
33 | USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl | ||
34 | USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl | ||
35 | + | ||
36 | +## SVE2 bitwise exclusive-or interleaved | ||
37 | + | ||
38 | +EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm | ||
39 | +EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm | ||
40 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/target/arm/sve_helper.c | ||
43 | +++ b/target/arm/sve_helper.c | ||
44 | @@ -XXX,XX +XXX,XX @@ DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB) | ||
45 | |||
46 | #undef DO_ZZZ_WTB | ||
47 | |||
48 | +#define DO_ZZZ_NTB(NAME, TYPE, H, OP) \ | ||
49 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
50 | +{ \ | ||
51 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
52 | + intptr_t sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPE); \ | ||
53 | + intptr_t sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPE); \ | ||
54 | + for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \ | ||
55 | + TYPE nn = *(TYPE *)(vn + H(i + sel1)); \ | ||
56 | + TYPE mm = *(TYPE *)(vm + H(i + sel2)); \ | ||
57 | + *(TYPE *)(vd + H(i + sel1)) = OP(nn, mm); \ | ||
58 | + } \ | ||
59 | +} | ||
60 | + | ||
61 | +DO_ZZZ_NTB(sve2_eoril_b, uint8_t, H1, DO_EOR) | ||
62 | +DO_ZZZ_NTB(sve2_eoril_h, uint16_t, H1_2, DO_EOR) | ||
63 | +DO_ZZZ_NTB(sve2_eoril_s, uint32_t, H1_4, DO_EOR) | ||
64 | +DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR) | ||
65 | + | ||
66 | +#undef DO_ZZZ_NTB | ||
67 | + | ||
68 | #define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \ | ||
69 | void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
70 | { \ | ||
71 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
72 | index XXXXXXX..XXXXXXX 100644 | ||
73 | --- a/target/arm/translate-sve.c | ||
74 | +++ b/target/arm/translate-sve.c | ||
75 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true) | ||
76 | DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false) | ||
77 | DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true) | ||
78 | |||
79 | +static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1) | ||
80 | +{ | ||
81 | + static gen_helper_gvec_3 * const fns[4] = { | ||
82 | + gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, | ||
83 | + gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, | ||
84 | + }; | ||
85 | + return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1); | ||
86 | +} | ||
87 | + | ||
88 | +static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a) | ||
89 | +{ | ||
90 | + return do_eor_tb(s, a, false); | ||
91 | +} | ||
92 | + | ||
93 | +static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a) | ||
94 | +{ | ||
95 | + return do_eor_tb(s, a, true); | ||
96 | +} | ||
97 | + | ||
98 | static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) | ||
99 | { | ||
100 | static gen_helper_gvec_3 * const fns[4] = { | ||
101 | -- | ||
102 | 2.20.1 | ||
103 | |||
104 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-19-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 10 +++++++++ | ||
9 | target/arm/sve.decode | 9 ++++++++ | ||
10 | target/arm/sve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 31 ++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 92 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
19 | DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
21 | DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_4(sve2_cadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_4(sve2_cadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_4(sve2_cadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_cadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | + | ||
28 | +DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/target/arm/sve.decode | ||
35 | +++ b/target/arm/sve.decode | ||
36 | @@ -XXX,XX +XXX,XX @@ EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm | ||
37 | BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm | ||
38 | BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm | ||
39 | BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm | ||
40 | + | ||
41 | +#### SVE2 Accumulate | ||
42 | + | ||
43 | +## SVE2 complex integer add | ||
44 | + | ||
45 | +CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm | ||
46 | +CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm | ||
47 | +SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm | ||
48 | +SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm | ||
49 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/sve_helper.c | ||
52 | +++ b/target/arm/sve_helper.c | ||
53 | @@ -XXX,XX +XXX,XX @@ DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup) | ||
54 | |||
55 | #undef DO_BITPERM | ||
56 | |||
57 | +#define DO_CADD(NAME, TYPE, H, ADD_OP, SUB_OP) \ | ||
58 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
59 | +{ \ | ||
60 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
61 | + int sub_r = simd_data(desc); \ | ||
62 | + if (sub_r) { \ | ||
63 | + for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \ | ||
64 | + TYPE acc_r = *(TYPE *)(vn + H(i)); \ | ||
65 | + TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \ | ||
66 | + TYPE el2_r = *(TYPE *)(vm + H(i)); \ | ||
67 | + TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \ | ||
68 | + acc_r = ADD_OP(acc_r, el2_i); \ | ||
69 | + acc_i = SUB_OP(acc_i, el2_r); \ | ||
70 | + *(TYPE *)(vd + H(i)) = acc_r; \ | ||
71 | + *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \ | ||
72 | + } \ | ||
73 | + } else { \ | ||
74 | + for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \ | ||
75 | + TYPE acc_r = *(TYPE *)(vn + H(i)); \ | ||
76 | + TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \ | ||
77 | + TYPE el2_r = *(TYPE *)(vm + H(i)); \ | ||
78 | + TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \ | ||
79 | + acc_r = SUB_OP(acc_r, el2_i); \ | ||
80 | + acc_i = ADD_OP(acc_i, el2_r); \ | ||
81 | + *(TYPE *)(vd + H(i)) = acc_r; \ | ||
82 | + *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \ | ||
83 | + } \ | ||
84 | + } \ | ||
85 | +} | ||
86 | + | ||
87 | +DO_CADD(sve2_cadd_b, int8_t, H1, DO_ADD, DO_SUB) | ||
88 | +DO_CADD(sve2_cadd_h, int16_t, H1_2, DO_ADD, DO_SUB) | ||
89 | +DO_CADD(sve2_cadd_s, int32_t, H1_4, DO_ADD, DO_SUB) | ||
90 | +DO_CADD(sve2_cadd_d, int64_t, , DO_ADD, DO_SUB) | ||
91 | + | ||
92 | +DO_CADD(sve2_sqcadd_b, int8_t, H1, DO_SQADD_B, DO_SQSUB_B) | ||
93 | +DO_CADD(sve2_sqcadd_h, int16_t, H1_2, DO_SQADD_H, DO_SQSUB_H) | ||
94 | +DO_CADD(sve2_sqcadd_s, int32_t, H1_4, DO_SQADD_S, DO_SQSUB_S) | ||
95 | +DO_CADD(sve2_sqcadd_d, int64_t, , do_sqadd_d, do_sqsub_d) | ||
96 | + | ||
97 | +#undef DO_CADD | ||
98 | + | ||
99 | #define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \ | ||
100 | void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
101 | { \ | ||
102 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/target/arm/translate-sve.c | ||
105 | +++ b/target/arm/translate-sve.c | ||
106 | @@ -XXX,XX +XXX,XX @@ static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a) | ||
107 | } | ||
108 | return do_sve2_zzw_ool(s, a, fns[a->esz], 0); | ||
109 | } | ||
110 | + | ||
111 | +static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot) | ||
112 | +{ | ||
113 | + static gen_helper_gvec_3 * const fns[2][4] = { | ||
114 | + { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, | ||
115 | + gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d }, | ||
116 | + { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, | ||
117 | + gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d }, | ||
118 | + }; | ||
119 | + return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot); | ||
120 | +} | ||
121 | + | ||
122 | +static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a) | ||
123 | +{ | ||
124 | + return do_cadd(s, a, false, false); | ||
125 | +} | ||
126 | + | ||
127 | +static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a) | ||
128 | +{ | ||
129 | + return do_cadd(s, a, false, true); | ||
130 | +} | ||
131 | + | ||
132 | +static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a) | ||
133 | +{ | ||
134 | + return do_cadd(s, a, true, false); | ||
135 | +} | ||
136 | + | ||
137 | +static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a) | ||
138 | +{ | ||
139 | + return do_cadd(s, a, true, true); | ||
140 | +} | ||
141 | -- | ||
142 | 2.20.1 | ||
143 | |||
144 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-22-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/sve.decode | 8 ++++++++ | ||
9 | target/arm/translate-sve.c | 34 ++++++++++++++++++++++++++++++++++ | ||
10 | 2 files changed, 42 insertions(+) | ||
11 | |||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/arm/sve.decode | ||
15 | +++ b/target/arm/sve.decode | ||
16 | @@ -XXX,XX +XXX,XX @@ UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm | ||
17 | # ADC and SBC decoded via size in helper dispatch. | ||
18 | ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm | ||
19 | ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm | ||
20 | + | ||
21 | +## SVE2 bitwise shift right and accumulate | ||
22 | + | ||
23 | +# TODO: Use @rda and %reg_movprfx here. | ||
24 | +SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr | ||
25 | +USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr | ||
26 | +SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr | ||
27 | +URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr | ||
28 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/target/arm/translate-sve.c | ||
31 | +++ b/target/arm/translate-sve.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a) | ||
33 | { | ||
34 | return do_adcl(s, a, true); | ||
35 | } | ||
36 | + | ||
37 | +static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn) | ||
38 | +{ | ||
39 | + if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { | ||
40 | + return false; | ||
41 | + } | ||
42 | + if (sve_access_check(s)) { | ||
43 | + unsigned vsz = vec_full_reg_size(s); | ||
44 | + unsigned rd_ofs = vec_full_reg_offset(s, a->rd); | ||
45 | + unsigned rn_ofs = vec_full_reg_offset(s, a->rn); | ||
46 | + fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz); | ||
47 | + } | ||
48 | + return true; | ||
49 | +} | ||
50 | + | ||
51 | +static bool trans_SSRA(DisasContext *s, arg_rri_esz *a) | ||
52 | +{ | ||
53 | + return do_sve2_fn2i(s, a, gen_gvec_ssra); | ||
54 | +} | ||
55 | + | ||
56 | +static bool trans_USRA(DisasContext *s, arg_rri_esz *a) | ||
57 | +{ | ||
58 | + return do_sve2_fn2i(s, a, gen_gvec_usra); | ||
59 | +} | ||
60 | + | ||
61 | +static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a) | ||
62 | +{ | ||
63 | + return do_sve2_fn2i(s, a, gen_gvec_srsra); | ||
64 | +} | ||
65 | + | ||
66 | +static bool trans_URSRA(DisasContext *s, arg_rri_esz *a) | ||
67 | +{ | ||
68 | + return do_sve2_fn2i(s, a, gen_gvec_ursra); | ||
69 | +} | ||
70 | -- | ||
71 | 2.20.1 | ||
72 | |||
73 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-23-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/sve.decode | 5 +++++ | ||
9 | target/arm/translate-sve.c | 10 ++++++++++ | ||
10 | 2 files changed, 15 insertions(+) | ||
11 | |||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/arm/sve.decode | ||
15 | +++ b/target/arm/sve.decode | ||
16 | @@ -XXX,XX +XXX,XX @@ SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr | ||
17 | USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr | ||
18 | SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr | ||
19 | URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr | ||
20 | + | ||
21 | +## SVE2 bitwise shift and insert | ||
22 | + | ||
23 | +SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr | ||
24 | +SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl | ||
25 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/target/arm/translate-sve.c | ||
28 | +++ b/target/arm/translate-sve.c | ||
29 | @@ -XXX,XX +XXX,XX @@ static bool trans_URSRA(DisasContext *s, arg_rri_esz *a) | ||
30 | { | ||
31 | return do_sve2_fn2i(s, a, gen_gvec_ursra); | ||
32 | } | ||
33 | + | ||
34 | +static bool trans_SRI(DisasContext *s, arg_rri_esz *a) | ||
35 | +{ | ||
36 | + return do_sve2_fn2i(s, a, gen_gvec_sri); | ||
37 | +} | ||
38 | + | ||
39 | +static bool trans_SLI(DisasContext *s, arg_rri_esz *a) | ||
40 | +{ | ||
41 | + return do_sve2_fn2i(s, a, gen_gvec_sli); | ||
42 | +} | ||
43 | -- | ||
44 | 2.20.1 | ||
45 | |||
46 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-24-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/sve.decode | 6 ++++++ | ||
9 | target/arm/translate-sve.c | 21 +++++++++++++++++++++ | ||
10 | 2 files changed, 27 insertions(+) | ||
11 | |||
12 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/arm/sve.decode | ||
15 | +++ b/target/arm/sve.decode | ||
16 | @@ -XXX,XX +XXX,XX @@ URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr | ||
17 | |||
18 | SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr | ||
19 | SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl | ||
20 | + | ||
21 | +## SVE2 integer absolute difference and accumulate | ||
22 | + | ||
23 | +# TODO: Use @rda and %reg_movprfx here. | ||
24 | +SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm | ||
25 | +UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm | ||
26 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/target/arm/translate-sve.c | ||
29 | +++ b/target/arm/translate-sve.c | ||
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_SLI(DisasContext *s, arg_rri_esz *a) | ||
31 | { | ||
32 | return do_sve2_fn2i(s, a, gen_gvec_sli); | ||
33 | } | ||
34 | + | ||
35 | +static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn) | ||
36 | +{ | ||
37 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
38 | + return false; | ||
39 | + } | ||
40 | + if (sve_access_check(s)) { | ||
41 | + gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); | ||
42 | + } | ||
43 | + return true; | ||
44 | +} | ||
45 | + | ||
46 | +static bool trans_SABA(DisasContext *s, arg_rrr_esz *a) | ||
47 | +{ | ||
48 | + return do_sve2_fn_zzz(s, a, gen_gvec_saba); | ||
49 | +} | ||
50 | + | ||
51 | +static bool trans_UABA(DisasContext *s, arg_rrr_esz *a) | ||
52 | +{ | ||
53 | + return do_sve2_fn_zzz(s, a, gen_gvec_uaba); | ||
54 | +} | ||
55 | -- | ||
56 | 2.20.1 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-id: 20210525010358.152808-25-richard.henderson@linaro.org | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 24 ++++ | ||
9 | target/arm/sve.decode | 12 ++ | ||
10 | target/arm/sve_helper.c | 56 +++++++++ | ||
11 | target/arm/translate-sve.c | 238 +++++++++++++++++++++++++++++++++++++ | ||
12 | 4 files changed, 330 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG, | ||
19 | |||
20 | DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
21 | DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_3(sve2_sqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_3(sve2_sqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_3(sve2_sqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
26 | + | ||
27 | +DEF_HELPER_FLAGS_3(sve2_uqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_3(sve2_uqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_3(sve2_uqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_3(sve2_sqxtunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_3(sve2_sqxtunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_3(sve2_sqxtunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
34 | + | ||
35 | +DEF_HELPER_FLAGS_3(sve2_sqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_3(sve2_sqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_3(sve2_sqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
38 | + | ||
39 | +DEF_HELPER_FLAGS_3(sve2_uqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
40 | +DEF_HELPER_FLAGS_3(sve2_uqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
42 | + | ||
43 | +DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
44 | +DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
45 | +DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
46 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/target/arm/sve.decode | ||
49 | +++ b/target/arm/sve.decode | ||
50 | @@ -XXX,XX +XXX,XX @@ SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl | ||
51 | # TODO: Use @rda and %reg_movprfx here. | ||
52 | SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm | ||
53 | UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm | ||
54 | + | ||
55 | +#### SVE2 Narrowing | ||
56 | + | ||
57 | +## SVE2 saturating extract narrow | ||
58 | + | ||
59 | +# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0. | ||
60 | +SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl | ||
61 | +SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl | ||
62 | +UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl | ||
63 | +UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl | ||
64 | +SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl | ||
65 | +SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl | ||
66 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/target/arm/sve_helper.c | ||
69 | +++ b/target/arm/sve_helper.c | ||
70 | @@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD) | ||
71 | |||
72 | #undef DO_ZZZW_ACC | ||
73 | |||
74 | +#define DO_XTNB(NAME, TYPE, OP) \ | ||
75 | +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
76 | +{ \ | ||
77 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
78 | + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ | ||
79 | + TYPE nn = *(TYPE *)(vn + i); \ | ||
80 | + nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4); \ | ||
81 | + *(TYPE *)(vd + i) = nn; \ | ||
82 | + } \ | ||
83 | +} | ||
84 | + | ||
85 | +#define DO_XTNT(NAME, TYPE, TYPEN, H, OP) \ | ||
86 | +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
87 | +{ \ | ||
88 | + intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN)); \ | ||
89 | + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ | ||
90 | + TYPE nn = *(TYPE *)(vn + i); \ | ||
91 | + *(TYPEN *)(vd + i + odd) = OP(nn); \ | ||
92 | + } \ | ||
93 | +} | ||
94 | + | ||
95 | +#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX) | ||
96 | +#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX) | ||
97 | +#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX) | ||
98 | + | ||
99 | +DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H) | ||
100 | +DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S) | ||
101 | +DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D) | ||
102 | + | ||
103 | +DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H) | ||
104 | +DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S) | ||
105 | +DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D) | ||
106 | + | ||
107 | +#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX) | ||
108 | +#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX) | ||
109 | +#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX) | ||
110 | + | ||
111 | +DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H) | ||
112 | +DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S) | ||
113 | +DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D) | ||
114 | + | ||
115 | +DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H) | ||
116 | +DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S) | ||
117 | +DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D) | ||
118 | + | ||
119 | +DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H) | ||
120 | +DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S) | ||
121 | +DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D) | ||
122 | + | ||
123 | +DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H) | ||
124 | +DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S) | ||
125 | +DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D) | ||
126 | + | ||
127 | +#undef DO_XTNB | ||
128 | +#undef DO_XTNT | ||
129 | + | ||
130 | void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
131 | { | ||
132 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
133 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
134 | index XXXXXXX..XXXXXXX 100644 | ||
135 | --- a/target/arm/translate-sve.c | ||
136 | +++ b/target/arm/translate-sve.c | ||
137 | @@ -XXX,XX +XXX,XX @@ static bool trans_UABA(DisasContext *s, arg_rrr_esz *a) | ||
138 | { | ||
139 | return do_sve2_fn_zzz(s, a, gen_gvec_uaba); | ||
140 | } | ||
141 | + | ||
142 | +static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a, | ||
143 | + const GVecGen2 ops[3]) | ||
144 | +{ | ||
145 | + if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 || | ||
146 | + !dc_isar_feature(aa64_sve2, s)) { | ||
147 | + return false; | ||
148 | + } | ||
149 | + if (sve_access_check(s)) { | ||
150 | + unsigned vsz = vec_full_reg_size(s); | ||
151 | + tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd), | ||
152 | + vec_full_reg_offset(s, a->rn), | ||
153 | + vsz, vsz, &ops[a->esz]); | ||
154 | + } | ||
155 | + return true; | ||
156 | +} | ||
157 | + | ||
158 | +static const TCGOpcode sqxtn_list[] = { | ||
159 | + INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0 | ||
160 | +}; | ||
161 | + | ||
162 | +static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
163 | +{ | ||
164 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
165 | + int halfbits = 4 << vece; | ||
166 | + int64_t mask = (1ull << halfbits) - 1; | ||
167 | + int64_t min = -1ull << (halfbits - 1); | ||
168 | + int64_t max = -min - 1; | ||
169 | + | ||
170 | + tcg_gen_dupi_vec(vece, t, min); | ||
171 | + tcg_gen_smax_vec(vece, d, n, t); | ||
172 | + tcg_gen_dupi_vec(vece, t, max); | ||
173 | + tcg_gen_smin_vec(vece, d, d, t); | ||
174 | + tcg_gen_dupi_vec(vece, t, mask); | ||
175 | + tcg_gen_and_vec(vece, d, d, t); | ||
176 | + tcg_temp_free_vec(t); | ||
177 | +} | ||
178 | + | ||
179 | +static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a) | ||
180 | +{ | ||
181 | + static const GVecGen2 ops[3] = { | ||
182 | + { .fniv = gen_sqxtnb_vec, | ||
183 | + .opt_opc = sqxtn_list, | ||
184 | + .fno = gen_helper_sve2_sqxtnb_h, | ||
185 | + .vece = MO_16 }, | ||
186 | + { .fniv = gen_sqxtnb_vec, | ||
187 | + .opt_opc = sqxtn_list, | ||
188 | + .fno = gen_helper_sve2_sqxtnb_s, | ||
189 | + .vece = MO_32 }, | ||
190 | + { .fniv = gen_sqxtnb_vec, | ||
191 | + .opt_opc = sqxtn_list, | ||
192 | + .fno = gen_helper_sve2_sqxtnb_d, | ||
193 | + .vece = MO_64 }, | ||
194 | + }; | ||
195 | + return do_sve2_narrow_extract(s, a, ops); | ||
196 | +} | ||
197 | + | ||
198 | +static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
199 | +{ | ||
200 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
201 | + int halfbits = 4 << vece; | ||
202 | + int64_t mask = (1ull << halfbits) - 1; | ||
203 | + int64_t min = -1ull << (halfbits - 1); | ||
204 | + int64_t max = -min - 1; | ||
205 | + | ||
206 | + tcg_gen_dupi_vec(vece, t, min); | ||
207 | + tcg_gen_smax_vec(vece, n, n, t); | ||
208 | + tcg_gen_dupi_vec(vece, t, max); | ||
209 | + tcg_gen_smin_vec(vece, n, n, t); | ||
210 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
211 | + tcg_gen_dupi_vec(vece, t, mask); | ||
212 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
213 | + tcg_temp_free_vec(t); | ||
214 | +} | ||
215 | + | ||
216 | +static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a) | ||
217 | +{ | ||
218 | + static const GVecGen2 ops[3] = { | ||
219 | + { .fniv = gen_sqxtnt_vec, | ||
220 | + .opt_opc = sqxtn_list, | ||
221 | + .load_dest = true, | ||
222 | + .fno = gen_helper_sve2_sqxtnt_h, | ||
223 | + .vece = MO_16 }, | ||
224 | + { .fniv = gen_sqxtnt_vec, | ||
225 | + .opt_opc = sqxtn_list, | ||
226 | + .load_dest = true, | ||
227 | + .fno = gen_helper_sve2_sqxtnt_s, | ||
228 | + .vece = MO_32 }, | ||
229 | + { .fniv = gen_sqxtnt_vec, | ||
230 | + .opt_opc = sqxtn_list, | ||
231 | + .load_dest = true, | ||
232 | + .fno = gen_helper_sve2_sqxtnt_d, | ||
233 | + .vece = MO_64 }, | ||
234 | + }; | ||
235 | + return do_sve2_narrow_extract(s, a, ops); | ||
236 | +} | ||
237 | + | ||
238 | +static const TCGOpcode uqxtn_list[] = { | ||
239 | + INDEX_op_shli_vec, INDEX_op_umin_vec, 0 | ||
240 | +}; | ||
241 | + | ||
242 | +static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
243 | +{ | ||
244 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
245 | + int halfbits = 4 << vece; | ||
246 | + int64_t max = (1ull << halfbits) - 1; | ||
247 | + | ||
248 | + tcg_gen_dupi_vec(vece, t, max); | ||
249 | + tcg_gen_umin_vec(vece, d, n, t); | ||
250 | + tcg_temp_free_vec(t); | ||
251 | +} | ||
252 | + | ||
253 | +static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a) | ||
254 | +{ | ||
255 | + static const GVecGen2 ops[3] = { | ||
256 | + { .fniv = gen_uqxtnb_vec, | ||
257 | + .opt_opc = uqxtn_list, | ||
258 | + .fno = gen_helper_sve2_uqxtnb_h, | ||
259 | + .vece = MO_16 }, | ||
260 | + { .fniv = gen_uqxtnb_vec, | ||
261 | + .opt_opc = uqxtn_list, | ||
262 | + .fno = gen_helper_sve2_uqxtnb_s, | ||
263 | + .vece = MO_32 }, | ||
264 | + { .fniv = gen_uqxtnb_vec, | ||
265 | + .opt_opc = uqxtn_list, | ||
266 | + .fno = gen_helper_sve2_uqxtnb_d, | ||
267 | + .vece = MO_64 }, | ||
268 | + }; | ||
269 | + return do_sve2_narrow_extract(s, a, ops); | ||
270 | +} | ||
271 | + | ||
272 | +static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
273 | +{ | ||
274 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
275 | + int halfbits = 4 << vece; | ||
276 | + int64_t max = (1ull << halfbits) - 1; | ||
277 | + | ||
278 | + tcg_gen_dupi_vec(vece, t, max); | ||
279 | + tcg_gen_umin_vec(vece, n, n, t); | ||
280 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
281 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
282 | + tcg_temp_free_vec(t); | ||
283 | +} | ||
284 | + | ||
285 | +static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a) | ||
286 | +{ | ||
287 | + static const GVecGen2 ops[3] = { | ||
288 | + { .fniv = gen_uqxtnt_vec, | ||
289 | + .opt_opc = uqxtn_list, | ||
290 | + .load_dest = true, | ||
291 | + .fno = gen_helper_sve2_uqxtnt_h, | ||
292 | + .vece = MO_16 }, | ||
293 | + { .fniv = gen_uqxtnt_vec, | ||
294 | + .opt_opc = uqxtn_list, | ||
295 | + .load_dest = true, | ||
296 | + .fno = gen_helper_sve2_uqxtnt_s, | ||
297 | + .vece = MO_32 }, | ||
298 | + { .fniv = gen_uqxtnt_vec, | ||
299 | + .opt_opc = uqxtn_list, | ||
300 | + .load_dest = true, | ||
301 | + .fno = gen_helper_sve2_uqxtnt_d, | ||
302 | + .vece = MO_64 }, | ||
303 | + }; | ||
304 | + return do_sve2_narrow_extract(s, a, ops); | ||
305 | +} | ||
306 | + | ||
307 | +static const TCGOpcode sqxtun_list[] = { | ||
308 | + INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0 | ||
309 | +}; | ||
310 | + | ||
311 | +static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
312 | +{ | ||
313 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
314 | + int halfbits = 4 << vece; | ||
315 | + int64_t max = (1ull << halfbits) - 1; | ||
316 | + | ||
317 | + tcg_gen_dupi_vec(vece, t, 0); | ||
318 | + tcg_gen_smax_vec(vece, d, n, t); | ||
319 | + tcg_gen_dupi_vec(vece, t, max); | ||
320 | + tcg_gen_umin_vec(vece, d, d, t); | ||
321 | + tcg_temp_free_vec(t); | ||
322 | +} | ||
323 | + | ||
324 | +static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a) | ||
325 | +{ | ||
326 | + static const GVecGen2 ops[3] = { | ||
327 | + { .fniv = gen_sqxtunb_vec, | ||
328 | + .opt_opc = sqxtun_list, | ||
329 | + .fno = gen_helper_sve2_sqxtunb_h, | ||
330 | + .vece = MO_16 }, | ||
331 | + { .fniv = gen_sqxtunb_vec, | ||
332 | + .opt_opc = sqxtun_list, | ||
333 | + .fno = gen_helper_sve2_sqxtunb_s, | ||
334 | + .vece = MO_32 }, | ||
335 | + { .fniv = gen_sqxtunb_vec, | ||
336 | + .opt_opc = sqxtun_list, | ||
337 | + .fno = gen_helper_sve2_sqxtunb_d, | ||
338 | + .vece = MO_64 }, | ||
339 | + }; | ||
340 | + return do_sve2_narrow_extract(s, a, ops); | ||
341 | +} | ||
342 | + | ||
343 | +static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) | ||
344 | +{ | ||
345 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
346 | + int halfbits = 4 << vece; | ||
347 | + int64_t max = (1ull << halfbits) - 1; | ||
348 | + | ||
349 | + tcg_gen_dupi_vec(vece, t, 0); | ||
350 | + tcg_gen_smax_vec(vece, n, n, t); | ||
351 | + tcg_gen_dupi_vec(vece, t, max); | ||
352 | + tcg_gen_umin_vec(vece, n, n, t); | ||
353 | + tcg_gen_shli_vec(vece, n, n, halfbits); | ||
354 | + tcg_gen_bitsel_vec(vece, d, t, d, n); | ||
355 | + tcg_temp_free_vec(t); | ||
356 | +} | ||
357 | + | ||
358 | +static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a) | ||
359 | +{ | ||
360 | + static const GVecGen2 ops[3] = { | ||
361 | + { .fniv = gen_sqxtunt_vec, | ||
362 | + .opt_opc = sqxtun_list, | ||
363 | + .load_dest = true, | ||
364 | + .fno = gen_helper_sve2_sqxtunt_h, | ||
365 | + .vece = MO_16 }, | ||
366 | + { .fniv = gen_sqxtunt_vec, | ||
367 | + .opt_opc = sqxtun_list, | ||
368 | + .load_dest = true, | ||
369 | + .fno = gen_helper_sve2_sqxtunt_s, | ||
370 | + .vece = MO_32 }, | ||
371 | + { .fniv = gen_sqxtunt_vec, | ||
372 | + .opt_opc = sqxtun_list, | ||
373 | + .load_dest = true, | ||
374 | + .fno = gen_helper_sve2_sqxtunt_d, | ||
375 | + .vece = MO_64 }, | ||
376 | + }; | ||
377 | + return do_sve2_narrow_extract(s, a, ops); | ||
378 | +} | ||
379 | -- | ||
380 | 2.20.1 | ||
381 | |||
382 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 20210525010358.152808-26-richard.henderson@linaro.org | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | --- | ||
10 | target/arm/helper-sve.h | 35 +++++++++++++++++++++++++++++ | ||
11 | target/arm/sve.decode | 8 +++++++ | ||
12 | target/arm/sve_helper.c | 46 ++++++++++++++++++++++++++++++++++++++ | ||
13 | target/arm/translate-sve.c | 25 +++++++++++++++++++++ | ||
14 | 4 files changed, 114 insertions(+) | ||
15 | |||
16 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/target/arm/helper-sve.h | ||
19 | +++ b/target/arm/helper-sve.h | ||
20 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
21 | DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
22 | DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
24 | + | ||
25 | +DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
26 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
28 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
31 | + | ||
32 | +DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG, | ||
33 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG, | ||
35 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
36 | +DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG, | ||
37 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
38 | + | ||
39 | +DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG, | ||
40 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
41 | +DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG, | ||
42 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
43 | +DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG, | ||
44 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
45 | + | ||
46 | +DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG, | ||
47 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
48 | +DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG, | ||
49 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
50 | +DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG, | ||
51 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
52 | + | ||
53 | +DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG, | ||
54 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
55 | +DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG, | ||
56 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
57 | +DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG, | ||
58 | + void, ptr, ptr, ptr, ptr, ptr, i32) | ||
59 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/target/arm/sve.decode | ||
62 | +++ b/target/arm/sve.decode | ||
63 | @@ -XXX,XX +XXX,XX @@ UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl | ||
64 | UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl | ||
65 | SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl | ||
66 | SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl | ||
67 | + | ||
68 | +## SVE2 floating-point pairwise operations | ||
69 | + | ||
70 | +FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm | ||
71 | +FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm | ||
72 | +FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm | ||
73 | +FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm | ||
74 | +FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm | ||
75 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | ||
77 | --- a/target/arm/sve_helper.c | ||
78 | +++ b/target/arm/sve_helper.c | ||
79 | @@ -XXX,XX +XXX,XX @@ DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN) | ||
80 | #undef DO_ZPZZ_PAIR | ||
81 | #undef DO_ZPZZ_PAIR_D | ||
82 | |||
83 | +#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \ | ||
84 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ | ||
85 | + void *status, uint32_t desc) \ | ||
86 | +{ \ | ||
87 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
88 | + for (i = 0; i < opr_sz; ) { \ | ||
89 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ | ||
90 | + do { \ | ||
91 | + TYPE n0 = *(TYPE *)(vn + H(i)); \ | ||
92 | + TYPE m0 = *(TYPE *)(vm + H(i)); \ | ||
93 | + TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \ | ||
94 | + TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \ | ||
95 | + if (pg & 1) { \ | ||
96 | + *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \ | ||
97 | + } \ | ||
98 | + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ | ||
99 | + if (pg & 1) { \ | ||
100 | + *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \ | ||
101 | + } \ | ||
102 | + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ | ||
103 | + } while (i & 15); \ | ||
104 | + } \ | ||
105 | +} | ||
106 | + | ||
107 | +DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add) | ||
108 | +DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add) | ||
109 | +DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, , float64_add) | ||
110 | + | ||
111 | +DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum) | ||
112 | +DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum) | ||
113 | +DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, , float64_maxnum) | ||
114 | + | ||
115 | +DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum) | ||
116 | +DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum) | ||
117 | +DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, , float64_minnum) | ||
118 | + | ||
119 | +DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max) | ||
120 | +DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max) | ||
121 | +DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, , float64_max) | ||
122 | + | ||
123 | +DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min) | ||
124 | +DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min) | ||
125 | +DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, , float64_min) | ||
126 | + | ||
127 | +#undef DO_ZPZZ_PAIR_FP | ||
128 | + | ||
129 | /* Three-operand expander, controlled by a predicate, in which the | ||
130 | * third operand is "wide". That is, for D = N op M, the same 64-bit | ||
131 | * value of M is used with all of the narrower values of N. | ||
132 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
133 | index XXXXXXX..XXXXXXX 100644 | ||
134 | --- a/target/arm/translate-sve.c | ||
135 | +++ b/target/arm/translate-sve.c | ||
136 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a) | ||
137 | }; | ||
138 | return do_sve2_narrow_extract(s, a, ops); | ||
139 | } | ||
140 | + | ||
141 | +static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
142 | + gen_helper_gvec_4_ptr *fn) | ||
143 | +{ | ||
144 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
145 | + return false; | ||
146 | + } | ||
147 | + return do_zpzz_fp(s, a, fn); | ||
148 | +} | ||
149 | + | ||
150 | +#define DO_SVE2_ZPZZ_FP(NAME, name) \ | ||
151 | +static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ | ||
152 | +{ \ | ||
153 | + static gen_helper_gvec_4_ptr * const fns[4] = { \ | ||
154 | + NULL, gen_helper_sve2_##name##_zpzz_h, \ | ||
155 | + gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \ | ||
156 | + }; \ | ||
157 | + return do_sve2_zpzz_fp(s, a, fns[a->esz]); \ | ||
158 | +} | ||
159 | + | ||
160 | +DO_SVE2_ZPZZ_FP(FADDP, faddp) | ||
161 | +DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp) | ||
162 | +DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp) | ||
163 | +DO_SVE2_ZPZZ_FP(FMAXP, fmaxp) | ||
164 | +DO_SVE2_ZPZZ_FP(FMINP, fminp) | ||
165 | -- | ||
166 | 2.20.1 | ||
167 | |||
168 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-34-richard.henderson@linaro.org | ||
7 | Message-Id: <20200415145915.2859-1-steplong@quicinc.com> | ||
8 | [rth: Expanded comment for do_match2] | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | --- | ||
12 | target/arm/helper-sve.h | 10 ++++++ | ||
13 | target/arm/sve.decode | 5 +++ | ||
14 | target/arm/sve_helper.c | 64 ++++++++++++++++++++++++++++++++++++++ | ||
15 | target/arm/translate-sve.c | 22 +++++++++++++ | ||
16 | 4 files changed, 101 insertions(+) | ||
17 | |||
18 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/target/arm/helper-sve.h | ||
21 | +++ b/target/arm/helper-sve.h | ||
22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
24 | DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
25 | |||
26 | +DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG, | ||
27 | + i32, ptr, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG, | ||
29 | + i32, ptr, ptr, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG, | ||
32 | + i32, ptr, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG, | ||
34 | + i32, ptr, ptr, ptr, ptr, i32) | ||
35 | + | ||
36 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
37 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
38 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/sve.decode | ||
42 | +++ b/target/arm/sve.decode | ||
43 | @@ -XXX,XX +XXX,XX @@ UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr | ||
44 | UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr | ||
45 | UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr | ||
46 | |||
47 | +### SVE2 Character Match | ||
48 | + | ||
49 | +MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm | ||
50 | +NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm | ||
51 | + | ||
52 | ## SVE2 floating-point pairwise operations | ||
53 | |||
54 | FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm | ||
55 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/target/arm/sve_helper.c | ||
58 | +++ b/target/arm/sve_helper.c | ||
59 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) | ||
60 | d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i])); | ||
61 | } | ||
62 | } | ||
63 | + | ||
64 | +/* | ||
65 | + * Returns true if m0 or m1 contains the low uint8_t/uint16_t in n. | ||
66 | + * See hasless(v,1) from | ||
67 | + * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord | ||
68 | + */ | ||
69 | +static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz) | ||
70 | +{ | ||
71 | + int bits = 8 << esz; | ||
72 | + uint64_t ones = dup_const(esz, 1); | ||
73 | + uint64_t signs = ones << (bits - 1); | ||
74 | + uint64_t cmp0, cmp1; | ||
75 | + | ||
76 | + cmp1 = dup_const(esz, n); | ||
77 | + cmp0 = cmp1 ^ m0; | ||
78 | + cmp1 = cmp1 ^ m1; | ||
79 | + cmp0 = (cmp0 - ones) & ~cmp0; | ||
80 | + cmp1 = (cmp1 - ones) & ~cmp1; | ||
81 | + return (cmp0 | cmp1) & signs; | ||
82 | +} | ||
83 | + | ||
84 | +static inline uint32_t do_match(void *vd, void *vn, void *vm, void *vg, | ||
85 | + uint32_t desc, int esz, bool nmatch) | ||
86 | +{ | ||
87 | + uint16_t esz_mask = pred_esz_masks[esz]; | ||
88 | + intptr_t opr_sz = simd_oprsz(desc); | ||
89 | + uint32_t flags = PREDTEST_INIT; | ||
90 | + intptr_t i, j, k; | ||
91 | + | ||
92 | + for (i = 0; i < opr_sz; i += 16) { | ||
93 | + uint64_t m0 = *(uint64_t *)(vm + i); | ||
94 | + uint64_t m1 = *(uint64_t *)(vm + i + 8); | ||
95 | + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)) & esz_mask; | ||
96 | + uint16_t out = 0; | ||
97 | + | ||
98 | + for (j = 0; j < 16; j += 8) { | ||
99 | + uint64_t n = *(uint64_t *)(vn + i + j); | ||
100 | + | ||
101 | + for (k = 0; k < 8; k += 1 << esz) { | ||
102 | + if (pg & (1 << (j + k))) { | ||
103 | + bool o = do_match2(n >> (k * 8), m0, m1, esz); | ||
104 | + out |= (o ^ nmatch) << (j + k); | ||
105 | + } | ||
106 | + } | ||
107 | + } | ||
108 | + *(uint16_t *)(vd + H1_2(i >> 3)) = out; | ||
109 | + flags = iter_predtest_fwd(out, pg, flags); | ||
110 | + } | ||
111 | + return flags; | ||
112 | +} | ||
113 | + | ||
114 | +#define DO_PPZZ_MATCH(NAME, ESZ, INV) \ | ||
115 | +uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ | ||
116 | +{ \ | ||
117 | + return do_match(vd, vn, vm, vg, desc, ESZ, INV); \ | ||
118 | +} | ||
119 | + | ||
120 | +DO_PPZZ_MATCH(sve2_match_ppzz_b, MO_8, false) | ||
121 | +DO_PPZZ_MATCH(sve2_match_ppzz_h, MO_16, false) | ||
122 | + | ||
123 | +DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true) | ||
124 | +DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true) | ||
125 | + | ||
126 | +#undef DO_PPZZ_MATCH | ||
127 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
128 | index XXXXXXX..XXXXXXX 100644 | ||
129 | --- a/target/arm/translate-sve.c | ||
130 | +++ b/target/arm/translate-sve.c | ||
131 | @@ -XXX,XX +XXX,XX @@ static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a) | ||
132 | return do_sve2_shr_narrow(s, a, ops); | ||
133 | } | ||
134 | |||
135 | +static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, | ||
136 | + gen_helper_gvec_flags_4 *fn) | ||
137 | +{ | ||
138 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
139 | + return false; | ||
140 | + } | ||
141 | + return do_ppzz_flags(s, a, fn); | ||
142 | +} | ||
143 | + | ||
144 | +#define DO_SVE2_PPZZ_MATCH(NAME, name) \ | ||
145 | +static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ | ||
146 | +{ \ | ||
147 | + static gen_helper_gvec_flags_4 * const fns[4] = { \ | ||
148 | + gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \ | ||
149 | + NULL, NULL \ | ||
150 | + }; \ | ||
151 | + return do_sve2_ppzz_flags(s, a, fns[a->esz]); \ | ||
152 | +} | ||
153 | + | ||
154 | +DO_SVE2_PPZZ_MATCH(MATCH, match) | ||
155 | +DO_SVE2_PPZZ_MATCH(NMATCH, nmatch) | ||
156 | + | ||
157 | static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
158 | gen_helper_gvec_4_ptr *fn) | ||
159 | { | ||
160 | -- | ||
161 | 2.20.1 | ||
162 | |||
163 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-39-richard.henderson@linaro.org | ||
7 | Message-Id: <20200417162231.10374-2-steplong@quicinc.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/helper-sve.h | 8 ++++++++ | ||
12 | target/arm/sve.decode | 5 +++++ | ||
13 | target/arm/sve_helper.c | 36 ++++++++++++++++++++++++++++++++++++ | ||
14 | target/arm/translate-sve.c | 13 +++++++++++++ | ||
15 | 4 files changed, 62 insertions(+) | ||
16 | |||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/helper-sve.h | ||
20 | +++ b/target/arm/helper-sve.h | ||
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
22 | DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
24 | |||
25 | +DEF_HELPER_FLAGS_4(sve2_addhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_addhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_4(sve2_addhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | + | ||
29 | +DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | + | ||
33 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG, | ||
34 | i32, ptr, ptr, ptr, ptr, i32) | ||
35 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG, | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr | ||
41 | UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr | ||
42 | UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr | ||
43 | |||
44 | +## SVE2 integer add/subtract narrow high part | ||
45 | + | ||
46 | +ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm | ||
47 | +ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm | ||
48 | + | ||
49 | ### SVE2 Character Match | ||
50 | |||
51 | MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm | ||
52 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/target/arm/sve_helper.c | ||
55 | +++ b/target/arm/sve_helper.c | ||
56 | @@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D) | ||
57 | #undef DO_SHRNB | ||
58 | #undef DO_SHRNT | ||
59 | |||
60 | +#define DO_BINOPNB(NAME, TYPEW, TYPEN, SHIFT, OP) \ | ||
61 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
62 | +{ \ | ||
63 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
64 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
65 | + TYPEW nn = *(TYPEW *)(vn + i); \ | ||
66 | + TYPEW mm = *(TYPEW *)(vm + i); \ | ||
67 | + *(TYPEW *)(vd + i) = (TYPEN)OP(nn, mm, SHIFT); \ | ||
68 | + } \ | ||
69 | +} | ||
70 | + | ||
71 | +#define DO_BINOPNT(NAME, TYPEW, TYPEN, SHIFT, HW, HN, OP) \ | ||
72 | +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
73 | +{ \ | ||
74 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
75 | + for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \ | ||
76 | + TYPEW nn = *(TYPEW *)(vn + HW(i)); \ | ||
77 | + TYPEW mm = *(TYPEW *)(vm + HW(i)); \ | ||
78 | + *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, mm, SHIFT); \ | ||
79 | + } \ | ||
80 | +} | ||
81 | + | ||
82 | +#define DO_ADDHN(N, M, SH) ((N + M) >> SH) | ||
83 | + | ||
84 | +DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN) | ||
85 | +DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN) | ||
86 | +DO_BINOPNB(sve2_addhnb_d, uint64_t, uint32_t, 32, DO_ADDHN) | ||
87 | + | ||
88 | +DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN) | ||
89 | +DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN) | ||
90 | +DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN) | ||
91 | + | ||
92 | +#undef DO_ADDHN | ||
93 | + | ||
94 | +#undef DO_BINOPNB | ||
95 | + | ||
96 | /* Fully general four-operand expander, controlled by a predicate. | ||
97 | */ | ||
98 | #define DO_ZPZZZ(NAME, TYPE, H, OP) \ | ||
99 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
100 | index XXXXXXX..XXXXXXX 100644 | ||
101 | --- a/target/arm/translate-sve.c | ||
102 | +++ b/target/arm/translate-sve.c | ||
103 | @@ -XXX,XX +XXX,XX @@ static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a) | ||
104 | return do_sve2_shr_narrow(s, a, ops); | ||
105 | } | ||
106 | |||
107 | +#define DO_SVE2_ZZZ_NARROW(NAME, name) \ | ||
108 | +static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
109 | +{ \ | ||
110 | + static gen_helper_gvec_3 * const fns[4] = { \ | ||
111 | + NULL, gen_helper_sve2_##name##_h, \ | ||
112 | + gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ | ||
113 | + }; \ | ||
114 | + return do_sve2_zzz_ool(s, a, fns[a->esz]); \ | ||
115 | +} | ||
116 | + | ||
117 | +DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) | ||
118 | +DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) | ||
119 | + | ||
120 | static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, | ||
121 | gen_helper_gvec_flags_4 *fn) | ||
122 | { | ||
123 | -- | ||
124 | 2.20.1 | ||
125 | |||
126 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-40-richard.henderson@linaro.org | ||
7 | Message-Id: <20200417162231.10374-3-steplong@quicinc.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/helper-sve.h | 8 ++++++++ | ||
12 | target/arm/sve.decode | 2 ++ | ||
13 | target/arm/sve_helper.c | 10 ++++++++++ | ||
14 | target/arm/translate-sve.c | 2 ++ | ||
15 | 4 files changed, 22 insertions(+) | ||
16 | |||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/helper-sve.h | ||
20 | +++ b/target/arm/helper-sve.h | ||
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
22 | DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | |||
25 | +DEF_HELPER_FLAGS_4(sve2_raddhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_raddhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_4(sve2_raddhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | + | ||
29 | +DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | + | ||
33 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG, | ||
34 | i32, ptr, ptr, ptr, ptr, i32) | ||
35 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG, | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr | ||
41 | |||
42 | ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm | ||
43 | ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm | ||
44 | +RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm | ||
45 | +RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm | ||
46 | |||
47 | ### SVE2 Character Match | ||
48 | |||
49 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/sve_helper.c | ||
52 | +++ b/target/arm/sve_helper.c | ||
53 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
54 | } | ||
55 | |||
56 | #define DO_ADDHN(N, M, SH) ((N + M) >> SH) | ||
57 | +#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH) | ||
58 | |||
59 | DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN) | ||
60 | DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN) | ||
61 | @@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN) | ||
62 | DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN) | ||
63 | DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN) | ||
64 | |||
65 | +DO_BINOPNB(sve2_raddhnb_h, uint16_t, uint8_t, 8, DO_RADDHN) | ||
66 | +DO_BINOPNB(sve2_raddhnb_s, uint32_t, uint16_t, 16, DO_RADDHN) | ||
67 | +DO_BINOPNB(sve2_raddhnb_d, uint64_t, uint32_t, 32, DO_RADDHN) | ||
68 | + | ||
69 | +DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN) | ||
70 | +DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN) | ||
71 | +DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN) | ||
72 | + | ||
73 | +#undef DO_RADDHN | ||
74 | #undef DO_ADDHN | ||
75 | |||
76 | #undef DO_BINOPNB | ||
77 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/target/arm/translate-sve.c | ||
80 | +++ b/target/arm/translate-sve.c | ||
81 | @@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ | ||
82 | |||
83 | DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) | ||
84 | DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) | ||
85 | +DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb) | ||
86 | +DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) | ||
87 | |||
88 | static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, | ||
89 | gen_helper_gvec_flags_4 *fn) | ||
90 | -- | ||
91 | 2.20.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-41-richard.henderson@linaro.org | ||
7 | Message-Id: <20200417162231.10374-4-steplong@quicinc.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/helper-sve.h | 8 ++++++++ | ||
12 | target/arm/sve.decode | 2 ++ | ||
13 | target/arm/sve_helper.c | 10 ++++++++++ | ||
14 | target/arm/translate-sve.c | 3 +++ | ||
15 | 4 files changed, 23 insertions(+) | ||
16 | |||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/helper-sve.h | ||
20 | +++ b/target/arm/helper-sve.h | ||
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
22 | DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | |||
25 | +DEF_HELPER_FLAGS_4(sve2_subhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_subhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_4(sve2_subhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | + | ||
29 | +DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | +DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | + | ||
33 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG, | ||
34 | i32, ptr, ptr, ptr, ptr, i32) | ||
35 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG, | ||
36 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/sve.decode | ||
39 | +++ b/target/arm/sve.decode | ||
40 | @@ -XXX,XX +XXX,XX @@ ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm | ||
41 | ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm | ||
42 | RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm | ||
43 | RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm | ||
44 | +SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm | ||
45 | +SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm | ||
46 | |||
47 | ### SVE2 Character Match | ||
48 | |||
49 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/sve_helper.c | ||
52 | +++ b/target/arm/sve_helper.c | ||
53 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
54 | |||
55 | #define DO_ADDHN(N, M, SH) ((N + M) >> SH) | ||
56 | #define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH) | ||
57 | +#define DO_SUBHN(N, M, SH) ((N - M) >> SH) | ||
58 | |||
59 | DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN) | ||
60 | DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN) | ||
61 | @@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN) | ||
62 | DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN) | ||
63 | DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN) | ||
64 | |||
65 | +DO_BINOPNB(sve2_subhnb_h, uint16_t, uint8_t, 8, DO_SUBHN) | ||
66 | +DO_BINOPNB(sve2_subhnb_s, uint32_t, uint16_t, 16, DO_SUBHN) | ||
67 | +DO_BINOPNB(sve2_subhnb_d, uint64_t, uint32_t, 32, DO_SUBHN) | ||
68 | + | ||
69 | +DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN) | ||
70 | +DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN) | ||
71 | +DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN) | ||
72 | + | ||
73 | +#undef DO_SUBHN | ||
74 | #undef DO_RADDHN | ||
75 | #undef DO_ADDHN | ||
76 | |||
77 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/target/arm/translate-sve.c | ||
80 | +++ b/target/arm/translate-sve.c | ||
81 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) | ||
82 | DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb) | ||
83 | DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) | ||
84 | |||
85 | +DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb) | ||
86 | +DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) | ||
87 | + | ||
88 | static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, | ||
89 | gen_helper_gvec_flags_4 *fn) | ||
90 | { | ||
91 | -- | ||
92 | 2.20.1 | ||
93 | |||
94 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | This completes the section 'SVE2 integer add/subtract narrow high part' | ||
4 | |||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210525010358.152808-42-richard.henderson@linaro.org | ||
9 | Message-Id: <20200417162231.10374-5-steplong@quicinc.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | --- | ||
13 | target/arm/helper-sve.h | 8 ++++++++ | ||
14 | target/arm/sve.decode | 2 ++ | ||
15 | target/arm/sve_helper.c | 10 ++++++++++ | ||
16 | target/arm/translate-sve.c | 2 ++ | ||
17 | 4 files changed, 22 insertions(+) | ||
18 | |||
19 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/target/arm/helper-sve.h | ||
22 | +++ b/target/arm/helper-sve.h | ||
23 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | |||
27 | +DEF_HELPER_FLAGS_4(sve2_rsubhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_4(sve2_rsubhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_4(sve2_rsubhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_4(sve2_rsubhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_4(sve2_rsubhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_4(sve2_rsubhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
34 | + | ||
35 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG, | ||
36 | i32, ptr, ptr, ptr, ptr, i32) | ||
37 | DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG, | ||
38 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/target/arm/sve.decode | ||
41 | +++ b/target/arm/sve.decode | ||
42 | @@ -XXX,XX +XXX,XX @@ RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm | ||
43 | RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm | ||
44 | SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm | ||
45 | SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm | ||
46 | +RSUBHNB 01000101 .. 1 ..... 011 110 ..... ..... @rd_rn_rm | ||
47 | +RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm | ||
48 | |||
49 | ### SVE2 Character Match | ||
50 | |||
51 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/target/arm/sve_helper.c | ||
54 | +++ b/target/arm/sve_helper.c | ||
55 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
56 | #define DO_ADDHN(N, M, SH) ((N + M) >> SH) | ||
57 | #define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH) | ||
58 | #define DO_SUBHN(N, M, SH) ((N - M) >> SH) | ||
59 | +#define DO_RSUBHN(N, M, SH) ((N - M + ((__typeof(N))1 << (SH - 1))) >> SH) | ||
60 | |||
61 | DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN) | ||
62 | DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN) | ||
63 | @@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN) | ||
64 | DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN) | ||
65 | DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN) | ||
66 | |||
67 | +DO_BINOPNB(sve2_rsubhnb_h, uint16_t, uint8_t, 8, DO_RSUBHN) | ||
68 | +DO_BINOPNB(sve2_rsubhnb_s, uint32_t, uint16_t, 16, DO_RSUBHN) | ||
69 | +DO_BINOPNB(sve2_rsubhnb_d, uint64_t, uint32_t, 32, DO_RSUBHN) | ||
70 | + | ||
71 | +DO_BINOPNT(sve2_rsubhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RSUBHN) | ||
72 | +DO_BINOPNT(sve2_rsubhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RSUBHN) | ||
73 | +DO_BINOPNT(sve2_rsubhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RSUBHN) | ||
74 | + | ||
75 | +#undef DO_RSUBHN | ||
76 | #undef DO_SUBHN | ||
77 | #undef DO_RADDHN | ||
78 | #undef DO_ADDHN | ||
79 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/arm/translate-sve.c | ||
82 | +++ b/target/arm/translate-sve.c | ||
83 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) | ||
84 | |||
85 | DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb) | ||
86 | DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) | ||
87 | +DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb) | ||
88 | +DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) | ||
89 | |||
90 | static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a, | ||
91 | gen_helper_gvec_flags_4 *fn) | ||
92 | -- | ||
93 | 2.20.1 | ||
94 | |||
95 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-43-richard.henderson@linaro.org | ||
7 | Message-Id: <20200416173109.8856-1-steplong@quicinc.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/helper-sve.h | 7 ++ | ||
12 | target/arm/sve.decode | 6 ++ | ||
13 | target/arm/sve_helper.c | 131 +++++++++++++++++++++++++++++++++++++ | ||
14 | target/arm/translate-sve.c | 19 ++++++ | ||
15 | 4 files changed, 163 insertions(+) | ||
16 | |||
17 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/helper-sve.h | ||
20 | +++ b/target/arm/helper-sve.h | ||
21 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG, | ||
22 | DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG, | ||
23 | i32, ptr, ptr, ptr, ptr, i32) | ||
24 | |||
25 | +DEF_HELPER_FLAGS_5(sve2_histcnt_s, TCG_CALL_NO_RWG, | ||
26 | + void, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG, | ||
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | + | ||
30 | +DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
31 | + | ||
32 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, | ||
33 | void, ptr, ptr, ptr, ptr, ptr, i32) | ||
34 | DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, | ||
35 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/target/arm/sve.decode | ||
38 | +++ b/target/arm/sve.decode | ||
39 | @@ -XXX,XX +XXX,XX @@ | ||
40 | &rprrr_esz rn=%reg_movprfx | ||
41 | @rdn_pg_rm_ra ........ esz:2 . ra:5 ... pg:3 rm:5 rd:5 \ | ||
42 | &rprrr_esz rn=%reg_movprfx | ||
43 | +@rd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 &rprr_esz | ||
44 | |||
45 | # One register operand, with governing predicate, vector element size | ||
46 | @rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz | ||
47 | @@ -XXX,XX +XXX,XX @@ RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm | ||
48 | MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm | ||
49 | NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm | ||
50 | |||
51 | +### SVE2 Histogram Computation | ||
52 | + | ||
53 | +HISTCNT 01000101 .. 1 ..... 110 ... ..... ..... @rd_pg_rn_rm | ||
54 | +HISTSEG 01000101 .. 1 ..... 101 000 ..... ..... @rd_rn_rm | ||
55 | + | ||
56 | ## SVE2 floating-point pairwise operations | ||
57 | |||
58 | FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm | ||
59 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/target/arm/sve_helper.c | ||
62 | +++ b/target/arm/sve_helper.c | ||
63 | @@ -XXX,XX +XXX,XX @@ DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true) | ||
64 | DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true) | ||
65 | |||
66 | #undef DO_PPZZ_MATCH | ||
67 | + | ||
68 | +void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg, | ||
69 | + uint32_t desc) | ||
70 | +{ | ||
71 | + ARMVectorReg scratch; | ||
72 | + intptr_t i, j; | ||
73 | + intptr_t opr_sz = simd_oprsz(desc); | ||
74 | + uint32_t *d = vd, *n = vn, *m = vm; | ||
75 | + uint8_t *pg = vg; | ||
76 | + | ||
77 | + if (d == n) { | ||
78 | + n = memcpy(&scratch, n, opr_sz); | ||
79 | + if (d == m) { | ||
80 | + m = n; | ||
81 | + } | ||
82 | + } else if (d == m) { | ||
83 | + m = memcpy(&scratch, m, opr_sz); | ||
84 | + } | ||
85 | + | ||
86 | + for (i = 0; i < opr_sz; i += 4) { | ||
87 | + uint64_t count = 0; | ||
88 | + uint8_t pred; | ||
89 | + | ||
90 | + pred = pg[H1(i >> 3)] >> (i & 7); | ||
91 | + if (pred & 1) { | ||
92 | + uint32_t nn = n[H4(i >> 2)]; | ||
93 | + | ||
94 | + for (j = 0; j <= i; j += 4) { | ||
95 | + pred = pg[H1(j >> 3)] >> (j & 7); | ||
96 | + if ((pred & 1) && nn == m[H4(j >> 2)]) { | ||
97 | + ++count; | ||
98 | + } | ||
99 | + } | ||
100 | + } | ||
101 | + d[H4(i >> 2)] = count; | ||
102 | + } | ||
103 | +} | ||
104 | + | ||
105 | +void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg, | ||
106 | + uint32_t desc) | ||
107 | +{ | ||
108 | + ARMVectorReg scratch; | ||
109 | + intptr_t i, j; | ||
110 | + intptr_t opr_sz = simd_oprsz(desc); | ||
111 | + uint64_t *d = vd, *n = vn, *m = vm; | ||
112 | + uint8_t *pg = vg; | ||
113 | + | ||
114 | + if (d == n) { | ||
115 | + n = memcpy(&scratch, n, opr_sz); | ||
116 | + if (d == m) { | ||
117 | + m = n; | ||
118 | + } | ||
119 | + } else if (d == m) { | ||
120 | + m = memcpy(&scratch, m, opr_sz); | ||
121 | + } | ||
122 | + | ||
123 | + for (i = 0; i < opr_sz / 8; ++i) { | ||
124 | + uint64_t count = 0; | ||
125 | + if (pg[H1(i)] & 1) { | ||
126 | + uint64_t nn = n[i]; | ||
127 | + for (j = 0; j <= i; ++j) { | ||
128 | + if ((pg[H1(j)] & 1) && nn == m[j]) { | ||
129 | + ++count; | ||
130 | + } | ||
131 | + } | ||
132 | + } | ||
133 | + d[i] = count; | ||
134 | + } | ||
135 | +} | ||
136 | + | ||
137 | +/* | ||
138 | + * Returns the number of bytes in m0 and m1 that match n. | ||
139 | + * Unlike do_match2 we don't just need true/false, we need an exact count. | ||
140 | + * This requires two extra logical operations. | ||
141 | + */ | ||
142 | +static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1) | ||
143 | +{ | ||
144 | + const uint64_t mask = dup_const(MO_8, 0x7f); | ||
145 | + uint64_t cmp0, cmp1; | ||
146 | + | ||
147 | + cmp1 = dup_const(MO_8, n); | ||
148 | + cmp0 = cmp1 ^ m0; | ||
149 | + cmp1 = cmp1 ^ m1; | ||
150 | + | ||
151 | + /* | ||
152 | + * 1: clear msb of each byte to avoid carry to next byte (& mask) | ||
153 | + * 2: carry in to msb if byte != 0 (+ mask) | ||
154 | + * 3: set msb if cmp has msb set (| cmp) | ||
155 | + * 4: set ~msb to ignore them (| mask) | ||
156 | + * We now have 0xff for byte != 0 or 0x7f for byte == 0. | ||
157 | + * 5: invert, resulting in 0x80 if and only if byte == 0. | ||
158 | + */ | ||
159 | + cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask); | ||
160 | + cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask); | ||
161 | + | ||
162 | + /* | ||
163 | + * Combine the two compares in a way that the bits do | ||
164 | + * not overlap, and so preserves the count of set bits. | ||
165 | + * If the host has an efficient instruction for ctpop, | ||
166 | + * then ctpop(x) + ctpop(y) has the same number of | ||
167 | + * operations as ctpop(x | (y >> 1)). If the host does | ||
168 | + * not have an efficient ctpop, then we only want to | ||
169 | + * use it once. | ||
170 | + */ | ||
171 | + return ctpop64(cmp0 | (cmp1 >> 1)); | ||
172 | +} | ||
173 | + | ||
174 | +void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc) | ||
175 | +{ | ||
176 | + intptr_t i, j; | ||
177 | + intptr_t opr_sz = simd_oprsz(desc); | ||
178 | + | ||
179 | + for (i = 0; i < opr_sz; i += 16) { | ||
180 | + uint64_t n0 = *(uint64_t *)(vn + i); | ||
181 | + uint64_t m0 = *(uint64_t *)(vm + i); | ||
182 | + uint64_t n1 = *(uint64_t *)(vn + i + 8); | ||
183 | + uint64_t m1 = *(uint64_t *)(vm + i + 8); | ||
184 | + uint64_t out0 = 0; | ||
185 | + uint64_t out1 = 0; | ||
186 | + | ||
187 | + for (j = 0; j < 64; j += 8) { | ||
188 | + uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1); | ||
189 | + uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1); | ||
190 | + out0 |= cnt0 << j; | ||
191 | + out1 |= cnt1 << j; | ||
192 | + } | ||
193 | + | ||
194 | + *(uint64_t *)(vd + i) = out0; | ||
195 | + *(uint64_t *)(vd + i + 8) = out1; | ||
196 | + } | ||
197 | +} | ||
198 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
199 | index XXXXXXX..XXXXXXX 100644 | ||
200 | --- a/target/arm/translate-sve.c | ||
201 | +++ b/target/arm/translate-sve.c | ||
202 | @@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ | ||
203 | DO_SVE2_PPZZ_MATCH(MATCH, match) | ||
204 | DO_SVE2_PPZZ_MATCH(NMATCH, nmatch) | ||
205 | |||
206 | +static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a) | ||
207 | +{ | ||
208 | + static gen_helper_gvec_4 * const fns[2] = { | ||
209 | + gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d | ||
210 | + }; | ||
211 | + if (a->esz < 2) { | ||
212 | + return false; | ||
213 | + } | ||
214 | + return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]); | ||
215 | +} | ||
216 | + | ||
217 | +static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a) | ||
218 | +{ | ||
219 | + if (a->esz != 0) { | ||
220 | + return false; | ||
221 | + } | ||
222 | + return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg); | ||
223 | +} | ||
224 | + | ||
225 | static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a, | ||
226 | gen_helper_gvec_4_ptr *fn) | ||
227 | { | ||
228 | -- | ||
229 | 2.20.1 | ||
230 | |||
231 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Add decoding logic for SVE2 64-bit/32-bit scatter non-temporal | ||
4 | store insns. | ||
5 | |||
6 | 64-bit | ||
7 | * STNT1B (vector plus scalar) | ||
8 | * STNT1H (vector plus scalar) | ||
9 | * STNT1W (vector plus scalar) | ||
10 | * STNT1D (vector plus scalar) | ||
11 | |||
12 | 32-bit | ||
13 | * STNT1B (vector plus scalar) | ||
14 | * STNT1H (vector plus scalar) | ||
15 | * STNT1W (vector plus scalar) | ||
16 | |||
17 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
18 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
19 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
20 | Message-id: 20210525010358.152808-45-richard.henderson@linaro.org | ||
21 | Message-Id: <20200422141553.8037-1-steplong@quicinc.com> | ||
22 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
24 | --- | ||
25 | target/arm/sve.decode | 10 ++++++++++ | ||
26 | target/arm/translate-sve.c | 8 ++++++++ | ||
27 | 2 files changed, 18 insertions(+) | ||
28 | |||
29 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/arm/sve.decode | ||
32 | +++ b/target/arm/sve.decode | ||
33 | @@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | ||
34 | |||
35 | CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
36 | SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
37 | + | ||
38 | +### SVE2 Memory Store Group | ||
39 | + | ||
40 | +# SVE2 64-bit scatter non-temporal store (vector plus scalar) | ||
41 | +STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \ | ||
42 | + @rprr_scatter_store xs=2 esz=3 scale=0 | ||
43 | + | ||
44 | +# SVE2 32-bit scatter non-temporal store (vector plus scalar) | ||
45 | +STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \ | ||
46 | + @rprr_scatter_store xs=0 esz=2 scale=0 | ||
47 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/target/arm/translate-sve.c | ||
50 | +++ b/target/arm/translate-sve.c | ||
51 | @@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) | ||
52 | return true; | ||
53 | } | ||
54 | |||
55 | +static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) | ||
56 | +{ | ||
57 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
58 | + return false; | ||
59 | + } | ||
60 | + return trans_ST1_zprz(s, a); | ||
61 | +} | ||
62 | + | ||
63 | /* | ||
64 | * Prefetches | ||
65 | */ | ||
66 | -- | ||
67 | 2.20.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Add decoding logic for SVE2 64-bit/32-bit gather non-temporal | ||
4 | load insns. | ||
5 | |||
6 | 64-bit | ||
7 | * LDNT1SB | ||
8 | * LDNT1B (vector plus scalar) | ||
9 | * LDNT1SH | ||
10 | * LDNT1H (vector plus scalar) | ||
11 | * LDNT1SW | ||
12 | * LDNT1W (vector plus scalar) | ||
13 | * LDNT1D (vector plus scalar) | ||
14 | |||
15 | 32-bit | ||
16 | * LDNT1SB | ||
17 | * LDNT1B (vector plus scalar) | ||
18 | * LDNT1SH | ||
19 | * LDNT1H (vector plus scalar) | ||
20 | * LDNT1W (vector plus scalar) | ||
21 | |||
22 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
23 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
24 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
25 | Message-id: 20210525010358.152808-46-richard.henderson@linaro.org | ||
26 | Message-Id: <20200422152343.12493-1-steplong@quicinc.com> | ||
27 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
28 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
29 | --- | ||
30 | target/arm/sve.decode | 11 +++++++++++ | ||
31 | target/arm/translate-sve.c | 8 ++++++++ | ||
32 | 2 files changed, 19 insertions(+) | ||
33 | |||
34 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/target/arm/sve.decode | ||
37 | +++ b/target/arm/sve.decode | ||
38 | @@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm | ||
39 | CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
40 | SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx | ||
41 | |||
42 | +### SVE2 Memory Gather Load Group | ||
43 | + | ||
44 | +# SVE2 64-bit gather non-temporal load | ||
45 | +# (scalar plus unpacked 32-bit unscaled offsets) | ||
46 | +LDNT1_zprz 1100010 msz:2 00 rm:5 1 u:1 0 pg:3 rn:5 rd:5 \ | ||
47 | + &rprr_gather_load xs=0 esz=3 scale=0 ff=0 | ||
48 | + | ||
49 | +# SVE2 32-bit gather non-temporal load (scalar plus 32-bit unscaled offsets) | ||
50 | +LDNT1_zprz 1000010 msz:2 00 rm:5 10 u:1 pg:3 rn:5 rd:5 \ | ||
51 | + &rprr_gather_load xs=0 esz=2 scale=0 ff=0 | ||
52 | + | ||
53 | ### SVE2 Memory Store Group | ||
54 | |||
55 | # SVE2 64-bit scatter non-temporal store (vector plus scalar) | ||
56 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/target/arm/translate-sve.c | ||
59 | +++ b/target/arm/translate-sve.c | ||
60 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) | ||
61 | return true; | ||
62 | } | ||
63 | |||
64 | +static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) | ||
65 | +{ | ||
66 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
67 | + return false; | ||
68 | + } | ||
69 | + return trans_LD1_zprz(s, a); | ||
70 | +} | ||
71 | + | ||
72 | /* Indexed by [mte][be][xs][msz]. */ | ||
73 | static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = { | ||
74 | { /* MTE Inactive */ | ||
75 | -- | ||
76 | 2.20.1 | ||
77 | |||
78 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | The signed dot product routines produce a signed result. | ||
4 | Since we use -fwrapv, there is no functional change. | ||
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 20210525010358.152808-49-richard.henderson@linaro.org | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/vec_helper.c | 8 ++++---- | ||
12 | 1 file changed, 4 insertions(+), 4 deletions(-) | ||
13 | |||
14 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/vec_helper.c | ||
17 | +++ b/target/arm/vec_helper.c | ||
18 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm, | ||
19 | void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
20 | { | ||
21 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
22 | - uint32_t *d = vd; | ||
23 | + int32_t *d = vd; | ||
24 | int8_t *n = vn, *m = vm; | ||
25 | |||
26 | for (i = 0; i < opr_sz / 4; ++i) { | ||
27 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
28 | void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
29 | { | ||
30 | intptr_t i, opr_sz = simd_oprsz(desc); | ||
31 | - uint64_t *d = vd; | ||
32 | + int64_t *d = vd; | ||
33 | int16_t *n = vn, *m = vm; | ||
34 | |||
35 | for (i = 0; i < opr_sz / 8; ++i) { | ||
36 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) | ||
37 | { | ||
38 | intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; | ||
39 | intptr_t index = simd_data(desc); | ||
40 | - uint32_t *d = vd; | ||
41 | + int32_t *d = vd; | ||
42 | int8_t *n = vn; | ||
43 | int8_t *m_indexed = (int8_t *)vm + H4(index) * 4; | ||
44 | |||
45 | @@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) | ||
46 | { | ||
47 | intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; | ||
48 | intptr_t index = simd_data(desc); | ||
49 | - uint64_t *d = vd; | ||
50 | + int64_t *d = vd; | ||
51 | int16_t *n = vn; | ||
52 | int16_t *m_indexed = (int16_t *)vm + index * 4; | ||
53 | |||
54 | -- | ||
55 | 2.20.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20210525010358.152808-61-richard.henderson@linaro.org | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 17 +++++++++++++++++ | ||
9 | target/arm/sve.decode | 18 ++++++++++++++++++ | ||
10 | target/arm/sve_helper.c | 16 ++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 20 ++++++++++++++++++++ | ||
12 | 4 files changed, 71 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG, | ||
19 | void, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG, | ||
21 | void, ptr, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_5(sve2_smlal_idx_s, TCG_CALL_NO_RWG, | ||
24 | + void, ptr, ptr, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_5(sve2_smlal_idx_d, TCG_CALL_NO_RWG, | ||
26 | + void, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_5(sve2_smlsl_idx_s, TCG_CALL_NO_RWG, | ||
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_5(sve2_smlsl_idx_d, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, i32) | ||
31 | +DEF_HELPER_FLAGS_5(sve2_umlal_idx_s, TCG_CALL_NO_RWG, | ||
32 | + void, ptr, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_5(sve2_umlal_idx_d, TCG_CALL_NO_RWG, | ||
34 | + void, ptr, ptr, ptr, ptr, i32) | ||
35 | +DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG, | ||
36 | + void, ptr, ptr, ptr, ptr, i32) | ||
37 | +DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG, | ||
38 | + void, ptr, ptr, ptr, ptr, i32) | ||
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/sve.decode | ||
42 | +++ b/target/arm/sve.decode | ||
43 | @@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3 | ||
44 | SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2 | ||
45 | SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3 | ||
46 | |||
47 | +# SVE2 multiply-add long (indexed) | ||
48 | +SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2 | ||
49 | +SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3 | ||
50 | +SMLALT_zzxw_s 01000100 10 1 ..... 1000.1 ..... ..... @rrxr_3a esz=2 | ||
51 | +SMLALT_zzxw_d 01000100 11 1 ..... 1000.1 ..... ..... @rrxr_2a esz=3 | ||
52 | +UMLALB_zzxw_s 01000100 10 1 ..... 1001.0 ..... ..... @rrxr_3a esz=2 | ||
53 | +UMLALB_zzxw_d 01000100 11 1 ..... 1001.0 ..... ..... @rrxr_2a esz=3 | ||
54 | +UMLALT_zzxw_s 01000100 10 1 ..... 1001.1 ..... ..... @rrxr_3a esz=2 | ||
55 | +UMLALT_zzxw_d 01000100 11 1 ..... 1001.1 ..... ..... @rrxr_2a esz=3 | ||
56 | +SMLSLB_zzxw_s 01000100 10 1 ..... 1010.0 ..... ..... @rrxr_3a esz=2 | ||
57 | +SMLSLB_zzxw_d 01000100 11 1 ..... 1010.0 ..... ..... @rrxr_2a esz=3 | ||
58 | +SMLSLT_zzxw_s 01000100 10 1 ..... 1010.1 ..... ..... @rrxr_3a esz=2 | ||
59 | +SMLSLT_zzxw_d 01000100 11 1 ..... 1010.1 ..... ..... @rrxr_2a esz=3 | ||
60 | +UMLSLB_zzxw_s 01000100 10 1 ..... 1011.0 ..... ..... @rrxr_3a esz=2 | ||
61 | +UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3 | ||
62 | +UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2 | ||
63 | +UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3 | ||
64 | + | ||
65 | # SVE2 saturating multiply (indexed) | ||
66 | SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2 | ||
67 | SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3 | ||
68 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
69 | index XXXXXXX..XXXXXXX 100644 | ||
70 | --- a/target/arm/sve_helper.c | ||
71 | +++ b/target/arm/sve_helper.c | ||
72 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
73 | } \ | ||
74 | } | ||
75 | |||
76 | +#define DO_MLA(N, M, A) (A + N * M) | ||
77 | + | ||
78 | +DO_ZZXW(sve2_smlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLA) | ||
79 | +DO_ZZXW(sve2_smlal_idx_d, int64_t, int32_t, , H1_4, DO_MLA) | ||
80 | +DO_ZZXW(sve2_umlal_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLA) | ||
81 | +DO_ZZXW(sve2_umlal_idx_d, uint64_t, uint32_t, , H1_4, DO_MLA) | ||
82 | + | ||
83 | +#define DO_MLS(N, M, A) (A - N * M) | ||
84 | + | ||
85 | +DO_ZZXW(sve2_smlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLS) | ||
86 | +DO_ZZXW(sve2_smlsl_idx_d, int64_t, int32_t, , H1_4, DO_MLS) | ||
87 | +DO_ZZXW(sve2_umlsl_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLS) | ||
88 | +DO_ZZXW(sve2_umlsl_idx_d, uint64_t, uint32_t, , H1_4, DO_MLS) | ||
89 | + | ||
90 | #define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M)) | ||
91 | #define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M)) | ||
92 | |||
93 | @@ -XXX,XX +XXX,XX @@ DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D) | ||
94 | DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S) | ||
95 | DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D) | ||
96 | |||
97 | +#undef DO_MLA | ||
98 | +#undef DO_MLS | ||
99 | #undef DO_ZZXW | ||
100 | |||
101 | #define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \ | ||
102 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/target/arm/translate-sve.c | ||
105 | +++ b/target/arm/translate-sve.c | ||
106 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) | ||
107 | DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) | ||
108 | DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) | ||
109 | |||
110 | +DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) | ||
111 | +DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) | ||
112 | +DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) | ||
113 | +DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) | ||
114 | + | ||
115 | +DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) | ||
116 | +DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) | ||
117 | +DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) | ||
118 | +DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) | ||
119 | + | ||
120 | +DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) | ||
121 | +DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) | ||
122 | +DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) | ||
123 | +DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) | ||
124 | + | ||
125 | +DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) | ||
126 | +DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) | ||
127 | +DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) | ||
128 | +DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) | ||
129 | + | ||
130 | #undef DO_SVE2_RRXR_TB | ||
131 | |||
132 | /* | ||
133 | -- | ||
134 | 2.20.1 | ||
135 | |||
136 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20210525010358.152808-62-richard.henderson@linaro.org | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 5 +++++ | ||
9 | target/arm/sve.decode | 10 ++++++++++ | ||
10 | target/arm/sve_helper.c | 6 ++++++ | ||
11 | target/arm/translate-sve.c | 10 ++++++++++ | ||
12 | 4 files changed, 31 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG, | ||
19 | void, ptr, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG, | ||
21 | void, ptr, ptr, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | +DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
26 | +DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
27 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/target/arm/sve.decode | ||
30 | +++ b/target/arm/sve.decode | ||
31 | @@ -XXX,XX +XXX,XX @@ UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3 | ||
32 | UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2 | ||
33 | UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3 | ||
34 | |||
35 | +# SVE2 integer multiply long (indexed) | ||
36 | +SMULLB_zzx_s 01000100 10 1 ..... 1100.0 ..... ..... @rrx_3a esz=2 | ||
37 | +SMULLB_zzx_d 01000100 11 1 ..... 1100.0 ..... ..... @rrx_2a esz=3 | ||
38 | +SMULLT_zzx_s 01000100 10 1 ..... 1100.1 ..... ..... @rrx_3a esz=2 | ||
39 | +SMULLT_zzx_d 01000100 11 1 ..... 1100.1 ..... ..... @rrx_2a esz=3 | ||
40 | +UMULLB_zzx_s 01000100 10 1 ..... 1101.0 ..... ..... @rrx_3a esz=2 | ||
41 | +UMULLB_zzx_d 01000100 11 1 ..... 1101.0 ..... ..... @rrx_2a esz=3 | ||
42 | +UMULLT_zzx_s 01000100 10 1 ..... 1101.1 ..... ..... @rrx_3a esz=2 | ||
43 | +UMULLT_zzx_d 01000100 11 1 ..... 1101.1 ..... ..... @rrx_2a esz=3 | ||
44 | + | ||
45 | # SVE2 saturating multiply (indexed) | ||
46 | SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2 | ||
47 | SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3 | ||
48 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/target/arm/sve_helper.c | ||
51 | +++ b/target/arm/sve_helper.c | ||
52 | @@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
53 | DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s) | ||
54 | DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d) | ||
55 | |||
56 | +DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL) | ||
57 | +DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, , H1_4, DO_MUL) | ||
58 | + | ||
59 | +DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL) | ||
60 | +DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, , H1_4, DO_MUL) | ||
61 | + | ||
62 | #undef DO_ZZX | ||
63 | |||
64 | #define DO_BITPERM(NAME, TYPE, OP) \ | ||
65 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/target/arm/translate-sve.c | ||
68 | +++ b/target/arm/translate-sve.c | ||
69 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) | ||
70 | DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) | ||
71 | DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) | ||
72 | |||
73 | +DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) | ||
74 | +DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) | ||
75 | +DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) | ||
76 | +DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) | ||
77 | + | ||
78 | +DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) | ||
79 | +DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) | ||
80 | +DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) | ||
81 | +DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) | ||
82 | + | ||
83 | #undef DO_SVE2_RRX_TB | ||
84 | |||
85 | static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra, | ||
86 | -- | ||
87 | 2.20.1 | ||
88 | |||
89 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | Message-id: 20210525010358.152808-63-richard.henderson@linaro.org | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | --- | ||
8 | target/arm/helper-sve.h | 9 +++++++++ | ||
9 | target/arm/sve.decode | 12 ++++++++++++ | ||
10 | target/arm/sve_helper.c | 28 ++++++++++++++++++++++++++++ | ||
11 | target/arm/translate-sve.c | 15 +++++++++++++++ | ||
12 | 4 files changed, 64 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper-sve.h | ||
17 | +++ b/target/arm/helper-sve.h | ||
18 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
19 | DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
20 | DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
21 | DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
22 | + | ||
23 | +DEF_HELPER_FLAGS_5(sve2_cmla_idx_h, TCG_CALL_NO_RWG, | ||
24 | + void, ptr, ptr, ptr, ptr, i32) | ||
25 | +DEF_HELPER_FLAGS_5(sve2_cmla_idx_s, TCG_CALL_NO_RWG, | ||
26 | + void, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG, | ||
28 | + void, ptr, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG, | ||
30 | + void, ptr, ptr, ptr, ptr, i32) | ||
31 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/sve.decode | ||
34 | +++ b/target/arm/sve.decode | ||
35 | @@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3 | ||
36 | SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2 | ||
37 | SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3 | ||
38 | |||
39 | +# SVE2 complex integer multiply-add (indexed) | ||
40 | +CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \ | ||
41 | + ra=%reg_movprfx | ||
42 | +CMLA_zzxz_s 01000100 11 1 index:1 rm:4 0110 rot:2 rn:5 rd:5 \ | ||
43 | + ra=%reg_movprfx | ||
44 | + | ||
45 | +# SVE2 complex saturating integer multiply-add (indexed) | ||
46 | +SQRDCMLAH_zzxz_h 01000100 10 1 index:2 rm:3 0111 rot:2 rn:5 rd:5 \ | ||
47 | + ra=%reg_movprfx | ||
48 | +SQRDCMLAH_zzxz_s 01000100 11 1 index:1 rm:4 0111 rot:2 rn:5 rd:5 \ | ||
49 | + ra=%reg_movprfx | ||
50 | + | ||
51 | # SVE2 multiply-add long (indexed) | ||
52 | SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2 | ||
53 | SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3 | ||
54 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/target/arm/sve_helper.c | ||
57 | +++ b/target/arm/sve_helper.c | ||
58 | @@ -XXX,XX +XXX,XX @@ DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H) | ||
59 | DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S) | ||
60 | DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D) | ||
61 | |||
62 | +#define DO_CMLA_IDX_FUNC(NAME, TYPE, H, OP) \ | ||
63 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
64 | +{ \ | ||
65 | + intptr_t i, j, oprsz = simd_oprsz(desc); \ | ||
66 | + int rot = extract32(desc, SIMD_DATA_SHIFT, 2); \ | ||
67 | + int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2) * 2; \ | ||
68 | + int sel_a = rot & 1, sel_b = sel_a ^ 1; \ | ||
69 | + bool sub_r = rot == 1 || rot == 2; \ | ||
70 | + bool sub_i = rot >= 2; \ | ||
71 | + TYPE *d = vd, *n = vn, *m = vm, *a = va; \ | ||
72 | + for (i = 0; i < oprsz / sizeof(TYPE); i += 16 / sizeof(TYPE)) { \ | ||
73 | + TYPE elt2_a = m[H(i + idx + sel_a)]; \ | ||
74 | + TYPE elt2_b = m[H(i + idx + sel_b)]; \ | ||
75 | + for (j = 0; j < 16 / sizeof(TYPE); j += 2) { \ | ||
76 | + TYPE elt1_a = n[H(i + j + sel_a)]; \ | ||
77 | + d[H2(i + j)] = OP(elt1_a, elt2_a, a[H(i + j)], sub_r); \ | ||
78 | + d[H2(i + j + 1)] = OP(elt1_a, elt2_b, a[H(i + j + 1)], sub_i); \ | ||
79 | + } \ | ||
80 | + } \ | ||
81 | +} | ||
82 | + | ||
83 | +DO_CMLA_IDX_FUNC(sve2_cmla_idx_h, int16_t, H2, DO_CMLA) | ||
84 | +DO_CMLA_IDX_FUNC(sve2_cmla_idx_s, int32_t, H4, DO_CMLA) | ||
85 | + | ||
86 | +DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H) | ||
87 | +DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S) | ||
88 | + | ||
89 | #undef DO_CMLA | ||
90 | #undef DO_CMLA_FUNC | ||
91 | +#undef DO_CMLA_IDX_FUNC | ||
92 | #undef DO_SQRDMLAH_B | ||
93 | #undef DO_SQRDMLAH_H | ||
94 | #undef DO_SQRDMLAH_S | ||
95 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/target/arm/translate-sve.c | ||
98 | +++ b/target/arm/translate-sve.c | ||
99 | @@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) | ||
100 | |||
101 | #undef DO_SVE2_RRXR_TB | ||
102 | |||
103 | +#define DO_SVE2_RRXR_ROT(NAME, FUNC) \ | ||
104 | + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ | ||
105 | + { \ | ||
106 | + return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \ | ||
107 | + (a->index << 2) | a->rot, FUNC); \ | ||
108 | + } | ||
109 | + | ||
110 | +DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h) | ||
111 | +DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) | ||
112 | + | ||
113 | +DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h) | ||
114 | +DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s) | ||
115 | + | ||
116 | +#undef DO_SVE2_RRXR_ROT | ||
117 | + | ||
118 | /* | ||
119 | *** SVE Floating Point Multiply-Add Indexed Group | ||
120 | */ | ||
121 | -- | ||
122 | 2.20.1 | ||
123 | |||
124 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | We're about to add more variations on this theme. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-65-richard.henderson@linaro.org | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | --- | ||
10 | target/arm/vec_helper.c | 82 ++++++++++------------------------------- | ||
11 | 1 file changed, 20 insertions(+), 62 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/vec_helper.c | ||
16 | +++ b/target/arm/vec_helper.c | ||
17 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc) | ||
18 | /* Integer 8 and 16-bit dot-product. | ||
19 | * | ||
20 | * Note that for the loops herein, host endianness does not matter | ||
21 | - * with respect to the ordering of data within the 64-bit lanes. | ||
22 | + * with respect to the ordering of data within the quad-width lanes. | ||
23 | * All elements are treated equally, no matter where they are. | ||
24 | */ | ||
25 | |||
26 | -void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
27 | -{ | ||
28 | - intptr_t i, opr_sz = simd_oprsz(desc); | ||
29 | - int32_t *d = vd, *a = va; | ||
30 | - int8_t *n = vn, *m = vm; | ||
31 | - | ||
32 | - for (i = 0; i < opr_sz / 4; ++i) { | ||
33 | - d[i] = (a[i] + | ||
34 | - n[i * 4 + 0] * m[i * 4 + 0] + | ||
35 | - n[i * 4 + 1] * m[i * 4 + 1] + | ||
36 | - n[i * 4 + 2] * m[i * 4 + 2] + | ||
37 | - n[i * 4 + 3] * m[i * 4 + 3]); | ||
38 | - } | ||
39 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
40 | +#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \ | ||
41 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
42 | +{ \ | ||
43 | + intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
44 | + TYPED *d = vd, *a = va; \ | ||
45 | + TYPEN *n = vn; \ | ||
46 | + TYPEM *m = vm; \ | ||
47 | + for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \ | ||
48 | + d[i] = (a[i] + \ | ||
49 | + (TYPED)n[i * 4 + 0] * m[i * 4 + 0] + \ | ||
50 | + (TYPED)n[i * 4 + 1] * m[i * 4 + 1] + \ | ||
51 | + (TYPED)n[i * 4 + 2] * m[i * 4 + 2] + \ | ||
52 | + (TYPED)n[i * 4 + 3] * m[i * 4 + 3]); \ | ||
53 | + } \ | ||
54 | + clear_tail(d, opr_sz, simd_maxsz(desc)); \ | ||
55 | } | ||
56 | |||
57 | -void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
58 | -{ | ||
59 | - intptr_t i, opr_sz = simd_oprsz(desc); | ||
60 | - uint32_t *d = vd, *a = va; | ||
61 | - uint8_t *n = vn, *m = vm; | ||
62 | - | ||
63 | - for (i = 0; i < opr_sz / 4; ++i) { | ||
64 | - d[i] = (a[i] + | ||
65 | - n[i * 4 + 0] * m[i * 4 + 0] + | ||
66 | - n[i * 4 + 1] * m[i * 4 + 1] + | ||
67 | - n[i * 4 + 2] * m[i * 4 + 2] + | ||
68 | - n[i * 4 + 3] * m[i * 4 + 3]); | ||
69 | - } | ||
70 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
71 | -} | ||
72 | - | ||
73 | -void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
74 | -{ | ||
75 | - intptr_t i, opr_sz = simd_oprsz(desc); | ||
76 | - int64_t *d = vd, *a = va; | ||
77 | - int16_t *n = vn, *m = vm; | ||
78 | - | ||
79 | - for (i = 0; i < opr_sz / 8; ++i) { | ||
80 | - d[i] = (a[i] + | ||
81 | - (int64_t)n[i * 4 + 0] * m[i * 4 + 0] + | ||
82 | - (int64_t)n[i * 4 + 1] * m[i * 4 + 1] + | ||
83 | - (int64_t)n[i * 4 + 2] * m[i * 4 + 2] + | ||
84 | - (int64_t)n[i * 4 + 3] * m[i * 4 + 3]); | ||
85 | - } | ||
86 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
87 | -} | ||
88 | - | ||
89 | -void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc) | ||
90 | -{ | ||
91 | - intptr_t i, opr_sz = simd_oprsz(desc); | ||
92 | - uint64_t *d = vd, *a = va; | ||
93 | - uint16_t *n = vn, *m = vm; | ||
94 | - | ||
95 | - for (i = 0; i < opr_sz / 8; ++i) { | ||
96 | - d[i] = (a[i] + | ||
97 | - (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] + | ||
98 | - (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] + | ||
99 | - (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] + | ||
100 | - (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]); | ||
101 | - } | ||
102 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
103 | -} | ||
104 | +DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t) | ||
105 | +DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t) | ||
106 | +DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t) | ||
107 | +DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t) | ||
108 | |||
109 | void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, | ||
110 | void *va, uint32_t desc) | ||
111 | -- | ||
112 | 2.20.1 | ||
113 | |||
114 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | We're about to add more variations on this theme. | ||
4 | Accept the inner loop for the _h variants, rather | ||
5 | than keep it unrolled. | ||
6 | |||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210525010358.152808-66-richard.henderson@linaro.org | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | --- | ||
12 | target/arm/vec_helper.c | 160 ++++++++-------------------------------- | ||
13 | 1 file changed, 29 insertions(+), 131 deletions(-) | ||
14 | |||
15 | diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/vec_helper.c | ||
18 | +++ b/target/arm/vec_helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t) | ||
20 | DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t) | ||
21 | DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t) | ||
22 | |||
23 | -void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, | ||
24 | - void *va, uint32_t desc) | ||
25 | -{ | ||
26 | - intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; | ||
27 | - intptr_t index = simd_data(desc); | ||
28 | - int32_t *d = vd, *a = va; | ||
29 | - int8_t *n = vn; | ||
30 | - int8_t *m_indexed = (int8_t *)vm + H4(index) * 4; | ||
31 | - | ||
32 | - /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. | ||
33 | - * Otherwise opr_sz is a multiple of 16. | ||
34 | - */ | ||
35 | - segend = MIN(4, opr_sz_4); | ||
36 | - i = 0; | ||
37 | - do { | ||
38 | - int8_t m0 = m_indexed[i * 4 + 0]; | ||
39 | - int8_t m1 = m_indexed[i * 4 + 1]; | ||
40 | - int8_t m2 = m_indexed[i * 4 + 2]; | ||
41 | - int8_t m3 = m_indexed[i * 4 + 3]; | ||
42 | - | ||
43 | - do { | ||
44 | - d[i] = (a[i] + | ||
45 | - n[i * 4 + 0] * m0 + | ||
46 | - n[i * 4 + 1] * m1 + | ||
47 | - n[i * 4 + 2] * m2 + | ||
48 | - n[i * 4 + 3] * m3); | ||
49 | - } while (++i < segend); | ||
50 | - segend = i + 4; | ||
51 | - } while (i < opr_sz_4); | ||
52 | - | ||
53 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
54 | +#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \ | ||
55 | +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ | ||
56 | +{ \ | ||
57 | + intptr_t i = 0, opr_sz = simd_oprsz(desc); \ | ||
58 | + intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \ | ||
59 | + intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \ | ||
60 | + intptr_t index = simd_data(desc); \ | ||
61 | + TYPED *d = vd, *a = va; \ | ||
62 | + TYPEN *n = vn; \ | ||
63 | + TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 4; \ | ||
64 | + do { \ | ||
65 | + TYPED m0 = m_indexed[i * 4 + 0]; \ | ||
66 | + TYPED m1 = m_indexed[i * 4 + 1]; \ | ||
67 | + TYPED m2 = m_indexed[i * 4 + 2]; \ | ||
68 | + TYPED m3 = m_indexed[i * 4 + 3]; \ | ||
69 | + do { \ | ||
70 | + d[i] = (a[i] + \ | ||
71 | + n[i * 4 + 0] * m0 + \ | ||
72 | + n[i * 4 + 1] * m1 + \ | ||
73 | + n[i * 4 + 2] * m2 + \ | ||
74 | + n[i * 4 + 3] * m3); \ | ||
75 | + } while (++i < segend); \ | ||
76 | + segend = i + 4; \ | ||
77 | + } while (i < opr_sz_n); \ | ||
78 | + clear_tail(d, opr_sz, simd_maxsz(desc)); \ | ||
79 | } | ||
80 | |||
81 | -void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, | ||
82 | - void *va, uint32_t desc) | ||
83 | -{ | ||
84 | - intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; | ||
85 | - intptr_t index = simd_data(desc); | ||
86 | - uint32_t *d = vd, *a = va; | ||
87 | - uint8_t *n = vn; | ||
88 | - uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4; | ||
89 | - | ||
90 | - /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. | ||
91 | - * Otherwise opr_sz is a multiple of 16. | ||
92 | - */ | ||
93 | - segend = MIN(4, opr_sz_4); | ||
94 | - i = 0; | ||
95 | - do { | ||
96 | - uint8_t m0 = m_indexed[i * 4 + 0]; | ||
97 | - uint8_t m1 = m_indexed[i * 4 + 1]; | ||
98 | - uint8_t m2 = m_indexed[i * 4 + 2]; | ||
99 | - uint8_t m3 = m_indexed[i * 4 + 3]; | ||
100 | - | ||
101 | - do { | ||
102 | - d[i] = (a[i] + | ||
103 | - n[i * 4 + 0] * m0 + | ||
104 | - n[i * 4 + 1] * m1 + | ||
105 | - n[i * 4 + 2] * m2 + | ||
106 | - n[i * 4 + 3] * m3); | ||
107 | - } while (++i < segend); | ||
108 | - segend = i + 4; | ||
109 | - } while (i < opr_sz_4); | ||
110 | - | ||
111 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
112 | -} | ||
113 | - | ||
114 | -void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, | ||
115 | - void *va, uint32_t desc) | ||
116 | -{ | ||
117 | - intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; | ||
118 | - intptr_t index = simd_data(desc); | ||
119 | - int64_t *d = vd, *a = va; | ||
120 | - int16_t *n = vn; | ||
121 | - int16_t *m_indexed = (int16_t *)vm + index * 4; | ||
122 | - | ||
123 | - /* This is supported by SVE only, so opr_sz is always a multiple of 16. | ||
124 | - * Process the entire segment all at once, writing back the results | ||
125 | - * only after we've consumed all of the inputs. | ||
126 | - */ | ||
127 | - for (i = 0; i < opr_sz_8; i += 2) { | ||
128 | - int64_t d0, d1; | ||
129 | - | ||
130 | - d0 = a[i + 0]; | ||
131 | - d0 += n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; | ||
132 | - d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; | ||
133 | - d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; | ||
134 | - d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; | ||
135 | - | ||
136 | - d1 = a[i + 1]; | ||
137 | - d1 += n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; | ||
138 | - d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; | ||
139 | - d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; | ||
140 | - d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; | ||
141 | - | ||
142 | - d[i + 0] = d0; | ||
143 | - d[i + 1] = d1; | ||
144 | - } | ||
145 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
146 | -} | ||
147 | - | ||
148 | -void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, | ||
149 | - void *va, uint32_t desc) | ||
150 | -{ | ||
151 | - intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; | ||
152 | - intptr_t index = simd_data(desc); | ||
153 | - uint64_t *d = vd, *a = va; | ||
154 | - uint16_t *n = vn; | ||
155 | - uint16_t *m_indexed = (uint16_t *)vm + index * 4; | ||
156 | - | ||
157 | - /* This is supported by SVE only, so opr_sz is always a multiple of 16. | ||
158 | - * Process the entire segment all at once, writing back the results | ||
159 | - * only after we've consumed all of the inputs. | ||
160 | - */ | ||
161 | - for (i = 0; i < opr_sz_8; i += 2) { | ||
162 | - uint64_t d0, d1; | ||
163 | - | ||
164 | - d0 = a[i + 0]; | ||
165 | - d0 += n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; | ||
166 | - d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; | ||
167 | - d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; | ||
168 | - d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; | ||
169 | - | ||
170 | - d1 = a[i + 1]; | ||
171 | - d1 += n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; | ||
172 | - d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; | ||
173 | - d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; | ||
174 | - d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; | ||
175 | - | ||
176 | - d[i + 0] = d0; | ||
177 | - d[i + 1] = d1; | ||
178 | - } | ||
179 | - clear_tail(d, opr_sz, simd_maxsz(desc)); | ||
180 | -} | ||
181 | +DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4) | ||
182 | +DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4) | ||
183 | +DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, ) | ||
184 | +DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, ) | ||
185 | |||
186 | void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, | ||
187 | void *vfpst, uint32_t desc) | ||
188 | -- | ||
189 | 2.20.1 | ||
190 | |||
191 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Stephen Long <steplong@quicinc.com> | ||
2 | 1 | ||
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Stephen Long <steplong@quicinc.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20210525010358.152808-72-richard.henderson@linaro.org | ||
7 | Message-Id: <20200428144352.9275-1-steplong@quicinc.com> | ||
8 | [rth: rearrange the macros a little and rebase] | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | --- | ||
12 | target/arm/helper-sve.h | 10 +++++ | ||
13 | target/arm/sve.decode | 5 +++ | ||
14 | target/arm/sve_helper.c | 90 ++++++++++++++++++++++++++++++-------- | ||
15 | target/arm/translate-sve.c | 33 ++++++++++++++ | ||
16 | 4 files changed, 119 insertions(+), 19 deletions(-) | ||
17 | |||
18 | diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/target/arm/helper-sve.h | ||
21 | +++ b/target/arm/helper-sve.h | ||
22 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
23 | DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
24 | DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
25 | |||
26 | +DEF_HELPER_FLAGS_5(sve2_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
27 | +DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
28 | +DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
29 | +DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||
30 | + | ||
31 | +DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
32 | +DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
33 | +DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
34 | +DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||
35 | + | ||
36 | DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
37 | DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
38 | DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) | ||
39 | diff --git a/target/arm/sve.decode b/target/arm/sve.decode | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/target/arm/sve.decode | ||
42 | +++ b/target/arm/sve.decode | ||
43 | @@ -XXX,XX +XXX,XX @@ TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm | ||
44 | # SVE unpack vector elements | ||
45 | UNPK 00000101 esz:2 1100 u:1 h:1 001110 rn:5 rd:5 | ||
46 | |||
47 | +# SVE2 Table Lookup (three sources) | ||
48 | + | ||
49 | +TBL_sve2 00000101 .. 1 ..... 001010 ..... ..... @rd_rn_rm | ||
50 | +TBX 00000101 .. 1 ..... 001011 ..... ..... @rd_rn_rm | ||
51 | + | ||
52 | ### SVE Permute - Predicates Group | ||
53 | |||
54 | # SVE permute predicate elements | ||
55 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/target/arm/sve_helper.c | ||
58 | +++ b/target/arm/sve_helper.c | ||
59 | @@ -XXX,XX +XXX,XX @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc) | ||
60 | } | ||
61 | } | ||
62 | |||
63 | -#define DO_TBL(NAME, TYPE, H) \ | ||
64 | -void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
65 | -{ \ | ||
66 | - intptr_t i, opr_sz = simd_oprsz(desc); \ | ||
67 | - uintptr_t elem = opr_sz / sizeof(TYPE); \ | ||
68 | - TYPE *d = vd, *n = vn, *m = vm; \ | ||
69 | - ARMVectorReg tmp; \ | ||
70 | - if (unlikely(vd == vn)) { \ | ||
71 | - n = memcpy(&tmp, vn, opr_sz); \ | ||
72 | - } \ | ||
73 | - for (i = 0; i < elem; i++) { \ | ||
74 | - TYPE j = m[H(i)]; \ | ||
75 | - d[H(i)] = j < elem ? n[H(j)] : 0; \ | ||
76 | - } \ | ||
77 | +typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool); | ||
78 | + | ||
79 | +static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc, | ||
80 | + bool is_tbx, tb_impl_fn *fn) | ||
81 | +{ | ||
82 | + ARMVectorReg scratch; | ||
83 | + uintptr_t oprsz = simd_oprsz(desc); | ||
84 | + | ||
85 | + if (unlikely(vd == vn)) { | ||
86 | + vn = memcpy(&scratch, vn, oprsz); | ||
87 | + } | ||
88 | + | ||
89 | + fn(vd, vn, NULL, vm, oprsz, is_tbx); | ||
90 | } | ||
91 | |||
92 | -DO_TBL(sve_tbl_b, uint8_t, H1) | ||
93 | -DO_TBL(sve_tbl_h, uint16_t, H2) | ||
94 | -DO_TBL(sve_tbl_s, uint32_t, H4) | ||
95 | -DO_TBL(sve_tbl_d, uint64_t, ) | ||
96 | +static inline void do_tbl2(void *vd, void *vn0, void *vn1, void *vm, | ||
97 | + uint32_t desc, bool is_tbx, tb_impl_fn *fn) | ||
98 | +{ | ||
99 | + ARMVectorReg scratch; | ||
100 | + uintptr_t oprsz = simd_oprsz(desc); | ||
101 | |||
102 | -#undef TBL | ||
103 | + if (unlikely(vd == vn0)) { | ||
104 | + vn0 = memcpy(&scratch, vn0, oprsz); | ||
105 | + if (vd == vn1) { | ||
106 | + vn1 = vn0; | ||
107 | + } | ||
108 | + } else if (unlikely(vd == vn1)) { | ||
109 | + vn1 = memcpy(&scratch, vn1, oprsz); | ||
110 | + } | ||
111 | + | ||
112 | + fn(vd, vn0, vn1, vm, oprsz, is_tbx); | ||
113 | +} | ||
114 | + | ||
115 | +#define DO_TB(SUFF, TYPE, H) \ | ||
116 | +static inline void do_tb_##SUFF(void *vd, void *vt0, void *vt1, \ | ||
117 | + void *vm, uintptr_t oprsz, bool is_tbx) \ | ||
118 | +{ \ | ||
119 | + TYPE *d = vd, *tbl0 = vt0, *tbl1 = vt1, *indexes = vm; \ | ||
120 | + uintptr_t i, nelem = oprsz / sizeof(TYPE); \ | ||
121 | + for (i = 0; i < nelem; ++i) { \ | ||
122 | + TYPE index = indexes[H1(i)], val = 0; \ | ||
123 | + if (index < nelem) { \ | ||
124 | + val = tbl0[H(index)]; \ | ||
125 | + } else { \ | ||
126 | + index -= nelem; \ | ||
127 | + if (tbl1 && index < nelem) { \ | ||
128 | + val = tbl1[H(index)]; \ | ||
129 | + } else if (is_tbx) { \ | ||
130 | + continue; \ | ||
131 | + } \ | ||
132 | + } \ | ||
133 | + d[H(i)] = val; \ | ||
134 | + } \ | ||
135 | +} \ | ||
136 | +void HELPER(sve_tbl_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
137 | +{ \ | ||
138 | + do_tbl1(vd, vn, vm, desc, false, do_tb_##SUFF); \ | ||
139 | +} \ | ||
140 | +void HELPER(sve2_tbl_##SUFF)(void *vd, void *vn0, void *vn1, \ | ||
141 | + void *vm, uint32_t desc) \ | ||
142 | +{ \ | ||
143 | + do_tbl2(vd, vn0, vn1, vm, desc, false, do_tb_##SUFF); \ | ||
144 | +} \ | ||
145 | +void HELPER(sve2_tbx_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \ | ||
146 | +{ \ | ||
147 | + do_tbl1(vd, vn, vm, desc, true, do_tb_##SUFF); \ | ||
148 | +} | ||
149 | + | ||
150 | +DO_TB(b, uint8_t, H1) | ||
151 | +DO_TB(h, uint16_t, H2) | ||
152 | +DO_TB(s, uint32_t, H4) | ||
153 | +DO_TB(d, uint64_t, ) | ||
154 | + | ||
155 | +#undef DO_TB | ||
156 | |||
157 | #define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \ | ||
158 | void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ | ||
159 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
160 | index XXXXXXX..XXXXXXX 100644 | ||
161 | --- a/target/arm/translate-sve.c | ||
162 | +++ b/target/arm/translate-sve.c | ||
163 | @@ -XXX,XX +XXX,XX @@ static bool trans_TBL(DisasContext *s, arg_rrr_esz *a) | ||
164 | return true; | ||
165 | } | ||
166 | |||
167 | +static bool trans_TBL_sve2(DisasContext *s, arg_rrr_esz *a) | ||
168 | +{ | ||
169 | + static gen_helper_gvec_4 * const fns[4] = { | ||
170 | + gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, | ||
171 | + gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d | ||
172 | + }; | ||
173 | + | ||
174 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
175 | + return false; | ||
176 | + } | ||
177 | + if (sve_access_check(s)) { | ||
178 | + gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, | ||
179 | + (a->rn + 1) % 32, a->rm, 0); | ||
180 | + } | ||
181 | + return true; | ||
182 | +} | ||
183 | + | ||
184 | +static bool trans_TBX(DisasContext *s, arg_rrr_esz *a) | ||
185 | +{ | ||
186 | + static gen_helper_gvec_3 * const fns[4] = { | ||
187 | + gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, | ||
188 | + gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d | ||
189 | + }; | ||
190 | + | ||
191 | + if (!dc_isar_feature(aa64_sve2, s)) { | ||
192 | + return false; | ||
193 | + } | ||
194 | + if (sve_access_check(s)) { | ||
195 | + gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0); | ||
196 | + } | ||
197 | + return true; | ||
198 | +} | ||
199 | + | ||
200 | static bool trans_UNPK(DisasContext *s, arg_UNPK *a) | ||
201 | { | ||
202 | static gen_helper_gvec_2 * const fns[4][2] = { | ||
203 | -- | ||
204 | 2.20.1 | ||
205 | |||
206 | diff view generated by jsdifflib |