1
Most of this is the Neon decodetree patches, followed by Edgar's versal cleanups.
1
Hi; here's the latest arm pullreq. This is mostly patches from
2
RTH, plus a couple of other more minor things. Switching to
3
PCREL is the big one, hopefully should improve performance.
2
4
3
thanks
5
thanks
4
-- PMM
6
-- PMM
5
7
8
The following changes since commit 214a8da23651f2472b296b3293e619fd58d9e212:
6
9
7
The following changes since commit 2ef486e76d64436be90f7359a3071fb2a56ce835:
10
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging (2022-10-18 11:14:31 -0400)
8
9
Merge remote-tracking branch 'remotes/marcel/tags/rdma-pull-request' into staging (2020-05-03 14:12:56 +0100)
10
11
11
are available in the Git repository at:
12
are available in the Git repository at:
12
13
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200504
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20221020
14
15
15
for you to fetch changes up to 9aefc6cf9b73f66062d2f914a0136756e7a28211:
16
for you to fetch changes up to 5db899303799e49209016a93289b8694afa1449e:
16
17
17
target/arm: Move gen_ function typedefs to translate.h (2020-05-04 12:59:26 +0100)
18
hw/ide/microdrive: Use device_cold_reset() for self-resets (2022-10-20 12:11:53 +0100)
18
19
19
----------------------------------------------------------------
20
----------------------------------------------------------------
20
target-arm queue:
21
target-arm queue:
21
* Start of conversion of Neon insns to decodetree
22
* Switch to TARGET_TB_PCREL
22
* versal board: support SD and RTC
23
* More pagetable-walk refactoring preparatory to HAFDBS
23
* Implement ARMv8.2-TTS2UXN
24
* update the cortex-a15 MIDR to latest rev
24
* Make VQDMULL undefined when U=1
25
* hw/char/pl011: fix baud rate calculation
25
* Some minor code cleanups
26
* hw/ide/microdrive: Use device_cold_reset() for self-resets
26
27
27
----------------------------------------------------------------
28
----------------------------------------------------------------
28
Edgar E. Iglesias (11):
29
Alex Bennée (1):
29
hw/arm: versal: Remove inclusion of arm_gicv3_common.h
30
target/arm: update the cortex-a15 MIDR to latest rev
30
hw/arm: versal: Move misplaced comment
31
hw/arm: versal-virt: Fix typo xlnx-ve -> xlnx-versal
32
hw/arm: versal: Embed the UARTs into the SoC type
33
hw/arm: versal: Embed the GEMs into the SoC type
34
hw/arm: versal: Embed the ADMAs into the SoC type
35
hw/arm: versal: Embed the APUs into the SoC type
36
hw/arm: versal: Add support for SD
37
hw/arm: versal: Add support for the RTC
38
hw/arm: versal-virt: Add support for SD
39
hw/arm: versal-virt: Add support for the RTC
40
31
41
Fredrik Strupe (1):
32
Baruch Siach (1):
42
target/arm: Make VQDMULL undefined when U=1
33
hw/char/pl011: fix baud rate calculation
43
34
44
Peter Maydell (25):
35
Peter Maydell (1):
45
target/arm: Don't use a TLB for ARMMMUIdx_Stage2
36
hw/ide/microdrive: Use device_cold_reset() for self-resets
46
target/arm: Use enum constant in get_phys_addr_lpae() call
47
target/arm: Add new 's1_is_el0' argument to get_phys_addr_lpae()
48
target/arm: Implement ARMv8.2-TTS2UXN
49
target/arm: Use correct variable for setting 'max' cpu's ID_AA64DFR0
50
target/arm/translate-vfp.inc.c: Remove duplicate simd_r32 check
51
target/arm: Don't allow Thumb Neon insns without FEATURE_NEON
52
target/arm: Add stubs for AArch32 Neon decodetree
53
target/arm: Convert VCMLA (vector) to decodetree
54
target/arm: Convert VCADD (vector) to decodetree
55
target/arm: Convert V[US]DOT (vector) to decodetree
56
target/arm: Convert VFM[AS]L (vector) to decodetree
57
target/arm: Convert VCMLA (scalar) to decodetree
58
target/arm: Convert V[US]DOT (scalar) to decodetree
59
target/arm: Convert VFM[AS]L (scalar) to decodetree
60
target/arm: Convert Neon load/store multiple structures to decodetree
61
target/arm: Convert Neon 'load single structure to all lanes' to decodetree
62
target/arm: Convert Neon 'load/store single structure' to decodetree
63
target/arm: Convert Neon 3-reg-same VADD/VSUB to decodetree
64
target/arm: Convert Neon 3-reg-same logic ops to decodetree
65
target/arm: Convert Neon 3-reg-same VMAX/VMIN to decodetree
66
target/arm: Convert Neon 3-reg-same comparisons to decodetree
67
target/arm: Convert Neon 3-reg-same VQADD/VQSUB to decodetree
68
target/arm: Convert Neon 3-reg-same VMUL, VMLA, VMLS, VSHL to decodetree
69
target/arm: Move gen_ function typedefs to translate.h
70
37
71
Philippe Mathieu-Daudé (2):
38
Richard Henderson (21):
72
hw/arm/mps2-tz: Use TYPE_IOTKIT instead of hardcoded string
39
target/arm: Enable TARGET_PAGE_ENTRY_EXTRA
73
target/arm: Use uint64_t for midr field in CPU state struct
40
target/arm: Use probe_access_full for MTE
41
target/arm: Use probe_access_full for BTI
42
target/arm: Add ARMMMUIdx_Phys_{S,NS}
43
target/arm: Move ARMMMUIdx_Stage2 to a real tlb mmu_idx
44
target/arm: Restrict tlb flush from vttbr_write to vmid change
45
target/arm: Split out S1Translate type
46
target/arm: Plumb debug into S1Translate
47
target/arm: Move be test for regime into S1TranslateResult
48
target/arm: Use softmmu tlbs for page table walking
49
target/arm: Split out get_phys_addr_twostage
50
target/arm: Use bool consistently for get_phys_addr subroutines
51
target/arm: Introduce curr_insn_len
52
target/arm: Change gen_goto_tb to work on displacements
53
target/arm: Change gen_*set_pc_im to gen_*update_pc
54
target/arm: Change gen_exception_insn* to work on displacements
55
target/arm: Remove gen_exception_internal_insn pc argument
56
target/arm: Change gen_jmp* to work on displacements
57
target/arm: Introduce gen_pc_plus_diff for aarch64
58
target/arm: Introduce gen_pc_plus_diff for aarch32
59
target/arm: Enable TARGET_TB_PCREL
74
60
75
include/hw/arm/xlnx-versal.h | 31 +-
61
target/arm/cpu-param.h | 17 +-
76
target/arm/cpu-param.h | 2 +-
62
target/arm/cpu.h | 47 ++--
77
target/arm/cpu.h | 38 ++-
63
target/arm/internals.h | 1 +
78
target/arm/translate-a64.h | 9 -
64
target/arm/sve_ldst_internal.h | 1 +
79
target/arm/translate.h | 26 ++
65
target/arm/translate-a32.h | 2 +-
80
target/arm/neon-dp.decode | 86 +++++
66
target/arm/translate.h | 66 ++++-
81
target/arm/neon-ls.decode | 52 +++
67
hw/char/pl011.c | 2 +-
82
target/arm/neon-shared.decode | 66 ++++
68
hw/ide/microdrive.c | 8 +-
83
hw/arm/mps2-tz.c | 2 +-
69
target/arm/cpu.c | 23 +-
84
hw/arm/xlnx-versal-virt.c | 74 ++++-
70
target/arm/cpu_tcg.c | 4 +-
85
hw/arm/xlnx-versal.c | 115 +++++--
71
target/arm/helper.c | 155 +++++++++---
86
target/arm/cpu.c | 3 +-
72
target/arm/mte_helper.c | 62 ++---
87
target/arm/cpu64.c | 8 +-
73
target/arm/ptw.c | 535 +++++++++++++++++++++++++----------------
88
target/arm/helper.c | 183 ++++------
74
target/arm/sve_helper.c | 54 ++---
89
target/arm/translate-a64.c | 17 -
75
target/arm/tlb_helper.c | 24 +-
90
target/arm/translate-neon.inc.c | 714 +++++++++++++++++++++++++++++++++++++++
76
target/arm/translate-a64.c | 220 ++++++++++-------
91
target/arm/translate-vfp.inc.c | 6 -
77
target/arm/translate-m-nocp.c | 8 +-
92
target/arm/translate.c | 716 +++-------------------------------------
78
target/arm/translate-mve.c | 2 +-
93
target/arm/Makefile.objs | 18 +
79
target/arm/translate-vfp.c | 10 +-
94
19 files changed, 1302 insertions(+), 864 deletions(-)
80
target/arm/translate.c | 284 +++++++++++++---------
95
create mode 100644 target/arm/neon-dp.decode
81
20 files changed, 918 insertions(+), 607 deletions(-)
96
create mode 100644 target/arm/neon-ls.decode
97
create mode 100644 target/arm/neon-shared.decode
98
create mode 100644 target/arm/translate-neon.inc.c
99
82
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Baruch Siach <baruch@tkos.co.il>
2
2
3
By using the TYPE_* definitions for devices, we can:
3
The PL011 TRM says that "UARTIBRD = 0 is invalid and UARTFBRD is ignored
4
- quickly find where devices are used with 'git-grep'
4
when this is the case". But the code looks at FBRD for the invalid case.
5
- easily rename a device (one-line change).
5
Fix this.
6
6
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
8
Message-id: 20200428154650.21991-1-f4bug@amsat.org
8
Message-id: 1408f62a2e45665816527d4845ffde650957d5ab.1665051588.git.baruchs-c@neureality.ai
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
hw/arm/mps2-tz.c | 2 +-
12
hw/char/pl011.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
14
15
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
15
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/mps2-tz.c
17
--- a/hw/char/pl011.c
18
+++ b/hw/arm/mps2-tz.c
18
+++ b/hw/char/pl011.c
19
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
19
@@ -XXX,XX +XXX,XX @@ static unsigned int pl011_get_baudrate(const PL011State *s)
20
exit(EXIT_FAILURE);
20
{
21
uint64_t clk;
22
23
- if (s->fbrd == 0) {
24
+ if (s->ibrd == 0) {
25
return 0;
21
}
26
}
22
27
23
- sysbus_init_child_obj(OBJECT(machine), "iotkit", &mms->iotkit,
24
+ sysbus_init_child_obj(OBJECT(machine), TYPE_IOTKIT, &mms->iotkit,
25
sizeof(mms->iotkit), mmc->armsse_type);
26
iotkitdev = DEVICE(&mms->iotkit);
27
object_property_set_link(OBJECT(&mms->iotkit), OBJECT(system_memory),
28
--
28
--
29
2.20.1
29
2.25.1
30
31
diff view generated by jsdifflib
1
The ARMv8.2-TTS2UXN feature extends the XN field in stage 2
1
From: Richard Henderson <richard.henderson@linaro.org>
2
translation table descriptors from just bit [54] to bits [54:53],
3
allowing stage 2 to control execution permissions separately for EL0
4
and EL1. Implement the new semantics of the XN field and enable
5
the feature for our 'max' CPU.
6
2
3
The CPUTLBEntryFull structure now stores the original pte attributes, as
4
well as the physical address. Therefore, we no longer need a separate
5
bit in MemTxAttrs, nor do we need to walk the tree of memory regions.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20221011031911.2408754-3-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200330210400.11724-5-peter.maydell@linaro.org
11
---
11
---
12
target/arm/cpu.h | 15 +++++++++++++++
12
target/arm/cpu.h | 1 -
13
target/arm/cpu.c | 1 +
13
target/arm/sve_ldst_internal.h | 1 +
14
target/arm/cpu64.c | 2 ++
14
target/arm/mte_helper.c | 62 ++++++++++------------------------
15
target/arm/helper.c | 37 +++++++++++++++++++++++++++++++------
15
target/arm/sve_helper.c | 54 ++++++++++-------------------
16
4 files changed, 49 insertions(+), 6 deletions(-)
16
target/arm/tlb_helper.c | 4 ---
17
5 files changed, 36 insertions(+), 86 deletions(-)
17
18
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
23
@@ -XXX,XX +XXX,XX @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
23
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
24
* generic target bits directly.
25
*/
26
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
27
-#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
28
29
/*
30
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
31
diff --git a/target/arm/sve_ldst_internal.h b/target/arm/sve_ldst_internal.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/sve_ldst_internal.h
34
+++ b/target/arm/sve_ldst_internal.h
35
@@ -XXX,XX +XXX,XX @@ typedef struct {
36
void *host;
37
int flags;
38
MemTxAttrs attrs;
39
+ bool tagged;
40
} SVEHostPage;
41
42
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
43
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mte_helper.c
46
+++ b/target/arm/mte_helper.c
47
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
48
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
49
return tags + index;
50
#else
51
- uintptr_t index;
52
CPUTLBEntryFull *full;
53
+ MemTxAttrs attrs;
54
int in_page, flags;
55
- ram_addr_t ptr_ra;
56
hwaddr ptr_paddr, tag_paddr, xlat;
57
MemoryRegion *mr;
58
ARMASIdx tag_asi;
59
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
60
* valid. Indicate to probe_access_flags no-fault, then assert that
61
* we received a valid page.
62
*/
63
- flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
64
- ra == 0, &host, ra);
65
+ flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
66
+ ra == 0, &host, &full, ra);
67
assert(!(flags & TLB_INVALID_MASK));
68
69
- /*
70
- * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
71
- * because we just found the mapping.
72
- * TODO: Perhaps there should be a cputlb helper that returns a
73
- * matching tlb entry + iotlb entry.
74
- */
75
- index = tlb_index(env, ptr_mmu_idx, ptr);
76
-# ifdef CONFIG_DEBUG_TCG
77
- {
78
- CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
79
- target_ulong comparator = (ptr_access == MMU_DATA_LOAD
80
- ? entry->addr_read
81
- : tlb_addr_write(entry));
82
- g_assert(tlb_hit(comparator, ptr));
83
- }
84
-# endif
85
- full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
86
-
87
/* If the virtual page MemAttr != Tagged, access unchecked. */
88
- if (!arm_tlb_mte_tagged(&full->attrs)) {
89
+ if (full->pte_attrs != 0xf0) {
90
return NULL;
91
}
92
93
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
94
return NULL;
95
}
96
97
+ /*
98
+ * Remember these values across the second lookup below,
99
+ * which may invalidate this pointer via tlb resize.
100
+ */
101
+ ptr_paddr = full->phys_addr;
102
+ attrs = full->attrs;
103
+ full = NULL;
104
+
105
/*
106
* The Normal memory access can extend to the next page. E.g. a single
107
* 8-byte access to the last byte of a page will check only the last
108
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
109
*/
110
in_page = -(ptr | TARGET_PAGE_MASK);
111
if (unlikely(ptr_size > in_page)) {
112
- void *ignore;
113
- flags |= probe_access_flags(env, ptr + in_page, ptr_access,
114
- ptr_mmu_idx, ra == 0, &ignore, ra);
115
+ flags |= probe_access_full(env, ptr + in_page, ptr_access,
116
+ ptr_mmu_idx, ra == 0, &host, &full, ra);
117
assert(!(flags & TLB_INVALID_MASK));
118
}
119
120
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
121
if (unlikely(flags & TLB_WATCHPOINT)) {
122
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
123
assert(ra != 0);
124
- cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
125
- full->attrs, wp, ra);
126
+ cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
127
}
128
129
- /*
130
- * Find the physical address within the normal mem space.
131
- * The memory region lookup must succeed because TLB_MMIO was
132
- * not set in the cputlb lookup above.
133
- */
134
- mr = memory_region_from_host(host, &ptr_ra);
135
- tcg_debug_assert(mr != NULL);
136
- tcg_debug_assert(memory_region_is_ram(mr));
137
- ptr_paddr = ptr_ra;
138
- do {
139
- ptr_paddr += mr->addr;
140
- mr = mr->container;
141
- } while (mr);
142
-
143
/* Convert to the physical address in tag space. */
144
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
145
146
/* Look up the address in tag space. */
147
- tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
148
+ tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
149
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
150
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
151
- tag_access == MMU_DATA_STORE,
152
- full->attrs);
153
+ tag_access == MMU_DATA_STORE, attrs);
154
155
/*
156
* Note that @mr will never be NULL. If there is nothing in the address
157
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/sve_helper.c
160
+++ b/target/arm/sve_helper.c
161
@@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
162
*/
163
addr = useronly_clean_ptr(addr);
164
165
+#ifdef CONFIG_USER_ONLY
166
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
167
&info->host, retaddr);
168
+ memset(&info->attrs, 0, sizeof(info->attrs));
169
+ /* Require both ANON and MTE; see allocation_tag_mem(). */
170
+ info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
171
+#else
172
+ CPUTLBEntryFull *full;
173
+ flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
174
+ &info->host, &full, retaddr);
175
+ info->attrs = full->attrs;
176
+ info->tagged = full->pte_attrs == 0xf0;
177
+#endif
178
info->flags = flags;
179
180
if (flags & TLB_INVALID_MASK) {
181
@@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
182
183
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
184
info->host -= mem_off;
185
-
186
-#ifdef CONFIG_USER_ONLY
187
- memset(&info->attrs, 0, sizeof(info->attrs));
188
- /* Require both MAP_ANON and PROT_MTE -- see allocation_tag_mem. */
189
- arm_tlb_mte_tagged(&info->attrs) =
190
- (flags & PAGE_ANON) && (flags & PAGE_MTE);
191
-#else
192
- /*
193
- * Find the iotlbentry for addr and return the transaction attributes.
194
- * This *must* be present in the TLB because we just found the mapping.
195
- */
196
- {
197
- uintptr_t index = tlb_index(env, mmu_idx, addr);
198
-
199
-# ifdef CONFIG_DEBUG_TCG
200
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
201
- target_ulong comparator = (access_type == MMU_DATA_LOAD
202
- ? entry->addr_read
203
- : tlb_addr_write(entry));
204
- g_assert(tlb_hit(comparator, addr));
205
-# endif
206
-
207
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
208
- info->attrs = full->attrs;
209
- }
210
-#endif
211
-
212
return true;
24
}
213
}
25
214
26
+static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
215
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
27
+{
216
intptr_t mem_off, reg_off, reg_last;
28
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
217
29
+}
218
/* Process the page only if MemAttr == Tagged. */
30
+
219
- if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
31
/*
220
+ if (info->page[0].tagged) {
32
* 64-bit feature tests via id registers.
221
mem_off = info->mem_off_first[0];
33
*/
222
reg_off = info->reg_off_first[0];
34
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
223
reg_last = info->reg_off_split;
35
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
224
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
36
}
225
}
37
226
38
+static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
227
mem_off = info->mem_off_first[1];
39
+{
228
- if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
40
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
229
+ if (mem_off >= 0 && info->page[1].tagged) {
41
+}
230
reg_off = info->reg_off_first[1];
42
+
231
reg_last = info->reg_off_last[1];
43
/*
232
44
* Feature tests for "does this exist in either 32-bit or 64-bit?"
233
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
45
*/
234
* Disable MTE checking if the Tagged bit is not set. Since TBI must
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
235
* be set within MTEDESC for MTE, !mtedesc => !mte_active.
47
return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
236
*/
48
}
237
- if (!arm_tlb_mte_tagged(&info.page[0].attrs)) {
49
238
+ if (!info.page[0].tagged) {
50
+static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
239
mtedesc = 0;
51
+{
240
}
52
+ return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
241
53
+}
242
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
54
+
243
cpu_check_watchpoint(env_cpu(env), addr, msize,
55
/*
244
info.attrs, BP_MEM_READ, retaddr);
56
* Forward to the above feature tests given an ARMCPU pointer.
245
}
57
*/
246
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
58
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
247
+ if (mtedesc && info.tagged) {
59
index XXXXXXX..XXXXXXX 100644
248
mte_check(env, mtedesc, addr, retaddr);
60
--- a/target/arm/cpu.c
249
}
61
+++ b/target/arm/cpu.c
250
if (unlikely(info.flags & TLB_MMIO)) {
62
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
251
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
63
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
252
msize, info.attrs,
64
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
253
BP_MEM_READ, retaddr);
65
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
254
}
66
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
255
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
67
cpu->isar.id_mmfr4 = t;
256
+ if (mtedesc && info.tagged) {
257
mte_check(env, mtedesc, addr, retaddr);
258
}
259
tlb_fn(env, &scratch, reg_off, addr, retaddr);
260
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
261
(env_cpu(env), addr, msize) & BP_MEM_READ)) {
262
goto fault;
263
}
264
- if (mtedesc &&
265
- arm_tlb_mte_tagged(&info.attrs) &&
266
- !mte_probe(env, mtedesc, addr)) {
267
+ if (mtedesc && info.tagged && !mte_probe(env, mtedesc, addr)) {
268
goto fault;
269
}
270
271
@@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
272
info.attrs, BP_MEM_WRITE, retaddr);
273
}
274
275
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
276
+ if (mtedesc && info.tagged) {
277
mte_check(env, mtedesc, addr, retaddr);
278
}
279
}
280
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
281
index XXXXXXX..XXXXXXX 100644
282
--- a/target/arm/tlb_helper.c
283
+++ b/target/arm/tlb_helper.c
284
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
285
res.f.phys_addr &= TARGET_PAGE_MASK;
286
address &= TARGET_PAGE_MASK;
68
}
287
}
69
#endif
288
- /* Notice and record tagged memory. */
70
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
289
- if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
71
index XXXXXXX..XXXXXXX 100644
290
- arm_tlb_mte_tagged(&res.f.attrs) = true;
72
--- a/target/arm/cpu64.c
291
- }
73
+++ b/target/arm/cpu64.c
292
74
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
293
res.f.pte_attrs = res.cacheattrs.attrs;
75
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
294
res.f.shareability = res.cacheattrs.shareability;
76
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
77
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
78
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
79
cpu->isar.id_aa64mmfr1 = t;
80
81
t = cpu->isar.id_aa64mmfr2;
82
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
83
u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
84
u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
85
u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
86
+ u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
87
cpu->isar.id_mmfr4 = u;
88
89
u = cpu->isar.id_aa64dfr0;
90
diff --git a/target/arm/helper.c b/target/arm/helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/helper.c
93
+++ b/target/arm/helper.c
94
@@ -XXX,XX +XXX,XX @@ simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
95
*
96
* @env: CPUARMState
97
* @s2ap: The 2-bit stage2 access permissions (S2AP)
98
- * @xn: XN (execute-never) bit
99
+ * @xn: XN (execute-never) bits
100
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
101
*/
102
-static int get_S2prot(CPUARMState *env, int s2ap, int xn)
103
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
104
{
105
int prot = 0;
106
107
@@ -XXX,XX +XXX,XX @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn)
108
if (s2ap & 2) {
109
prot |= PAGE_WRITE;
110
}
111
- if (!xn) {
112
- if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
113
+
114
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
115
+ switch (xn) {
116
+ case 0:
117
prot |= PAGE_EXEC;
118
+ break;
119
+ case 1:
120
+ if (s1_is_el0) {
121
+ prot |= PAGE_EXEC;
122
+ }
123
+ break;
124
+ case 2:
125
+ break;
126
+ case 3:
127
+ if (!s1_is_el0) {
128
+ prot |= PAGE_EXEC;
129
+ }
130
+ break;
131
+ default:
132
+ g_assert_not_reached();
133
+ }
134
+ } else {
135
+ if (!extract32(xn, 1, 1)) {
136
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
137
+ prot |= PAGE_EXEC;
138
+ }
139
}
140
}
141
return prot;
142
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
143
}
144
145
ap = extract32(attrs, 4, 2);
146
- xn = extract32(attrs, 12, 1);
147
148
if (mmu_idx == ARMMMUIdx_Stage2) {
149
ns = true;
150
- *prot = get_S2prot(env, ap, xn);
151
+ xn = extract32(attrs, 11, 2);
152
+ *prot = get_S2prot(env, ap, xn, s1_is_el0);
153
} else {
154
ns = extract32(attrs, 3, 1);
155
+ xn = extract32(attrs, 12, 1);
156
pxn = extract32(attrs, 11, 1);
157
*prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
158
}
159
--
295
--
160
2.20.1
296
2.25.1
161
162
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Remove inclusion of arm_gicv3_common.h, this already gets
3
Add a field to TARGET_PAGE_ENTRY_EXTRA to hold the guarded bit.
4
included via xlnx-versal.h.
4
In is_guarded_page, use probe_access_full instead of just guessing
5
that the tlb entry is still present. Also handles the FIXME about
6
executing from device memory.
5
7
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20221011031911.2408754-4-richard.henderson@linaro.org
9
Message-id: 20200427181649.26851-2-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
hw/arm/xlnx-versal.c | 1 -
13
target/arm/cpu-param.h | 9 +++++----
13
1 file changed, 1 deletion(-)
14
target/arm/cpu.h | 13 -------------
15
target/arm/internals.h | 1 +
16
target/arm/ptw.c | 7 ++++---
17
target/arm/translate-a64.c | 21 ++++++++++-----------
18
5 files changed, 20 insertions(+), 31 deletions(-)
14
19
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
20
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal.c
22
--- a/target/arm/cpu-param.h
18
+++ b/hw/arm/xlnx-versal.c
23
+++ b/target/arm/cpu-param.h
19
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@
20
#include "hw/arm/boot.h"
25
*
21
#include "kvm_arm.h"
26
* For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
22
#include "hw/misc/unimp.h"
27
* Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
23
-#include "hw/intc/arm_gicv3_common.h"
28
- * For shareability, as in the SH field of the VMSAv8-64 PTEs.
24
#include "hw/arm/xlnx-versal.h"
29
+ * For shareability and guarded, as in the SH and GP fields respectively
25
#include "hw/char/pl011.h"
30
+ * of the VMSAv8-64 PTEs.
31
*/
32
# define TARGET_PAGE_ENTRY_EXTRA \
33
- uint8_t pte_attrs; \
34
- uint8_t shareability;
35
-
36
+ uint8_t pte_attrs; \
37
+ uint8_t shareability; \
38
+ bool guarded;
39
#endif
40
41
#define NB_MMU_MODES 8
42
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/cpu.h
45
+++ b/target/arm/cpu.h
46
@@ -XXX,XX +XXX,XX @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
47
/* Shared between translate-sve.c and sve_helper.c. */
48
extern const uint64_t pred_esz_masks[5];
49
50
-/* Helper for the macros below, validating the argument type. */
51
-static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
52
-{
53
- return x;
54
-}
55
-
56
-/*
57
- * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
58
- * Using these should be a bit more self-documenting than using the
59
- * generic target bits directly.
60
- */
61
-#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
62
-
63
/*
64
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
65
* Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
66
diff --git a/target/arm/internals.h b/target/arm/internals.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/internals.h
69
+++ b/target/arm/internals.h
70
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCacheAttrs {
71
unsigned int attrs:8;
72
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
73
bool is_s2_format:1;
74
+ bool guarded:1; /* guarded bit of the v8-64 PTE */
75
} ARMCacheAttrs;
76
77
/* Fields that are valid upon success. */
78
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/ptw.c
81
+++ b/target/arm/ptw.c
82
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
83
*/
84
result->f.attrs.secure = false;
85
}
86
- /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
87
- if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
88
- arm_tlb_bti_gp(&result->f.attrs) = true;
89
+
90
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
91
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
92
+ result->f.guarded = guarded;
93
}
94
95
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
96
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-a64.c
99
+++ b/target/arm/translate-a64.c
100
@@ -XXX,XX +XXX,XX @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
101
#ifdef CONFIG_USER_ONLY
102
return page_get_flags(addr) & PAGE_BTI;
103
#else
104
+ CPUTLBEntryFull *full;
105
+ void *host;
106
int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
107
- unsigned int index = tlb_index(env, mmu_idx, addr);
108
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
109
+ int flags;
110
111
/*
112
* We test this immediately after reading an insn, which means
113
- * that any normal page must be in the TLB. The only exception
114
- * would be for executing from flash or device memory, which
115
- * does not retain the TLB entry.
116
- *
117
- * FIXME: Assume false for those, for now. We could use
118
- * arm_cpu_get_phys_page_attrs_debug to re-read the page
119
- * table entry even for that case.
120
+ * that the TLB entry must be present and valid, and thus this
121
+ * access will never raise an exception.
122
*/
123
- return (tlb_hit(entry->addr_code, addr) &&
124
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
125
+ flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
126
+ false, &host, &full, 0);
127
+ assert(!(flags & TLB_INVALID_MASK));
128
+
129
+ return full->guarded;
130
#endif
131
}
26
132
27
--
133
--
28
2.20.1
134
2.25.1
29
30
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Embed the APUs into the SoC type.
3
Not yet used, but add mmu indexes for 1-1 mapping
4
to physical addresses.
4
5
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20221011031911.2408754-5-richard.henderson@linaro.org
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-8-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
include/hw/arm/xlnx-versal.h | 2 +-
11
target/arm/cpu-param.h | 2 +-
14
hw/arm/xlnx-versal-virt.c | 4 ++--
12
target/arm/cpu.h | 7 ++++++-
15
hw/arm/xlnx-versal.c | 19 +++++--------------
13
target/arm/ptw.c | 19 +++++++++++++++++--
16
3 files changed, 8 insertions(+), 17 deletions(-)
14
3 files changed, 24 insertions(+), 4 deletions(-)
17
15
18
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
16
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/arm/xlnx-versal.h
18
--- a/target/arm/cpu-param.h
21
+++ b/include/hw/arm/xlnx-versal.h
19
+++ b/target/arm/cpu-param.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
20
@@ -XXX,XX +XXX,XX @@
23
struct {
21
bool guarded;
24
struct {
22
#endif
25
MemoryRegion mr;
23
26
- ARMCPU *cpu[XLNX_VERSAL_NR_ACPUS];
24
-#define NB_MMU_MODES 8
27
+ ARMCPU cpu[XLNX_VERSAL_NR_ACPUS];
25
+#define NB_MMU_MODES 10
28
GICv3State gic;
26
29
} apu;
27
#endif
30
} fpd;
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
31
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
32
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/arm/xlnx-versal-virt.c
30
--- a/target/arm/cpu.h
34
+++ b/hw/arm/xlnx-versal-virt.c
31
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
32
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
36
s->binfo.get_dtb = versal_virt_get_dtb;
33
* EL2 EL2&0 +PAN
37
s->binfo.modify_dtb = versal_virt_modify_dtb;
34
* EL2 (aka NS PL2)
38
if (machine->kernel_filename) {
35
* EL3 (aka S PL1)
39
- arm_load_kernel(s->soc.fpd.apu.cpu[0], machine, &s->binfo);
36
+ * Physical (NS & S)
40
+ arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
37
*
41
} else {
38
- * for a total of 8 different mmu_idx.
42
- AddressSpace *as = arm_boot_address_space(s->soc.fpd.apu.cpu[0],
39
+ * for a total of 10 different mmu_idx.
43
+ AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0],
40
*
44
&s->binfo);
41
* R profile CPUs have an MPU, but can use the same set of MMU indexes
45
/* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL).
42
* as A profile. They only need to distinguish EL0 and EL1 (and
46
* Offset things by 4K. */
43
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
47
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
44
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
45
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
46
47
+ /* TLBs with 1-1 mapping to the physical address spaces. */
48
+ ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A,
49
+ ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A,
50
+
51
/*
52
* These are not allocated TLBs and are used only for AT system
53
* instructions or for the first stage of an S12 page table walk.
54
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
48
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/arm/xlnx-versal.c
56
--- a/target/arm/ptw.c
50
+++ b/hw/arm/xlnx-versal.c
57
+++ b/target/arm/ptw.c
51
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
58
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
52
59
case ARMMMUIdx_E3:
53
for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) {
60
break;
54
Object *obj;
61
55
- char *name;
62
+ case ARMMMUIdx_Phys_NS:
56
-
63
+ case ARMMMUIdx_Phys_S:
57
- obj = object_new(XLNX_VERSAL_ACPU_TYPE);
64
+ /* No translation for physical address spaces. */
58
- if (!obj) {
65
+ return true;
59
- error_report("Unable to create apu.cpu[%d] of type %s",
66
+
60
- i, XLNX_VERSAL_ACPU_TYPE);
67
default:
61
- exit(EXIT_FAILURE);
68
g_assert_not_reached();
62
- }
63
-
64
- name = g_strdup_printf("apu-cpu[%d]", i);
65
- object_property_add_child(OBJECT(s), name, obj, &error_fatal);
66
- g_free(name);
67
68
+ object_initialize_child(OBJECT(s), "apu-cpu[*]",
69
+ &s->fpd.apu.cpu[i], sizeof(s->fpd.apu.cpu[i]),
70
+ XLNX_VERSAL_ACPU_TYPE, &error_abort, NULL);
71
+ obj = OBJECT(&s->fpd.apu.cpu[i]);
72
object_property_set_int(obj, s->cfg.psci_conduit,
73
"psci-conduit", &error_abort);
74
if (i) {
75
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
76
object_property_set_link(obj, OBJECT(&s->fpd.apu.mr), "memory",
77
&error_abort);
78
object_property_set_bool(obj, true, "realized", &error_fatal);
79
- s->fpd.apu.cpu[i] = ARM_CPU(obj);
80
}
69
}
81
}
70
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
82
71
{
83
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_gic(Versal *s, qemu_irq *pic)
72
uint8_t memattr = 0x00; /* Device nGnRnE */
73
uint8_t shareability = 0; /* non-sharable */
74
+ int r_el;
75
76
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
77
- int r_el = regime_el(env, mmu_idx);
78
+ switch (mmu_idx) {
79
+ case ARMMMUIdx_Stage2:
80
+ case ARMMMUIdx_Stage2_S:
81
+ case ARMMMUIdx_Phys_NS:
82
+ case ARMMMUIdx_Phys_S:
83
+ break;
84
85
+ default:
86
+ r_el = regime_el(env, mmu_idx);
87
if (arm_el_is_aa64(env, r_el)) {
88
int pamax = arm_pamax(env_archcpu(env));
89
uint64_t tcr = env->cp15.tcr_el[r_el];
90
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
91
shareability = 2; /* outer sharable */
92
}
93
result->cacheattrs.is_s2_format = false;
94
+ break;
84
}
95
}
85
96
86
for (i = 0; i < nr_apu_cpus; i++) {
97
result->f.phys_addr = address;
87
- DeviceState *cpudev = DEVICE(s->fpd.apu.cpu[i]);
98
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
88
+ DeviceState *cpudev = DEVICE(&s->fpd.apu.cpu[i]);
99
is_secure = arm_is_secure_below_el3(env);
89
int ppibase = XLNX_VERSAL_NR_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
100
break;
90
qemu_irq maint_irq;
101
case ARMMMUIdx_Stage2:
91
int ti;
102
+ case ARMMMUIdx_Phys_NS:
103
case ARMMMUIdx_MPrivNegPri:
104
case ARMMMUIdx_MUserNegPri:
105
case ARMMMUIdx_MPriv:
106
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
107
break;
108
case ARMMMUIdx_E3:
109
case ARMMMUIdx_Stage2_S:
110
+ case ARMMMUIdx_Phys_S:
111
case ARMMMUIdx_MSPrivNegPri:
112
case ARMMMUIdx_MSUserNegPri:
113
case ARMMMUIdx_MSPriv:
92
--
114
--
93
2.20.1
115
2.25.1
94
95
diff view generated by jsdifflib
1
We define ARMMMUIdx_Stage2 as being an MMU index which uses a QEMU
1
From: Richard Henderson <richard.henderson@linaro.org>
2
TLB. However we never actually use the TLB -- all stage 2 lookups
3
are done by direct calls to get_phys_addr_lpae() followed by a
4
physical address load via address_space_ld*().
5
2
6
Remove Stage2 from the list of ARM MMU indexes which correspond to
3
We had been marking this ARM_MMU_IDX_NOTLB, move it to a real tlb.
7
real core MMU indexes, and instead put it in the set of "NOTLB" ARM
4
Flush the tlb when invalidating stage 1+2 translations. Re-use
8
MMU indexes.
5
alle1_tlbmask() for other instances of EL1&0 + Stage2.
9
6
10
This allows us to drop NB_MMU_MODES to 11. It also means we can
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
safely add support for the ARMv8.3-TTS2UXN extension, which adds
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
permission bits to the stage 2 descriptors which define execute
9
Message-id: 20221011031911.2408754-6-richard.henderson@linaro.org
13
permission separatel for EL0 and EL1; supporting that while keeping
14
Stage2 in a QEMU TLB would require us to use separate TLBs for
15
"Stage2 for an EL0 access" and "Stage2 for an EL1 access", which is a
16
lot of extra complication given we aren't even using the QEMU TLB.
17
18
In the process of updating the comment on our MMU index use,
19
fix a couple of other minor errors:
20
* NS EL2 EL2&0 was missing from the list in the comment
21
* some text hadn't been updated from when we bumped NB_MMU_MODES
22
above 8
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
26
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
27
Message-id: 20200330210400.11724-2-peter.maydell@linaro.org
28
---
11
---
29
target/arm/cpu-param.h | 2 +-
12
target/arm/cpu-param.h | 2 +-
30
target/arm/cpu.h | 21 +++++---
13
target/arm/cpu.h | 23 ++++---
31
target/arm/helper.c | 112 ++++-------------------------------------
14
target/arm/helper.c | 151 ++++++++++++++++++++++++++++++-----------
32
3 files changed, 27 insertions(+), 108 deletions(-)
15
3 files changed, 127 insertions(+), 49 deletions(-)
33
16
34
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
17
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
35
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/cpu-param.h
19
--- a/target/arm/cpu-param.h
37
+++ b/target/arm/cpu-param.h
20
+++ b/target/arm/cpu-param.h
38
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@
39
# define TARGET_PAGE_BITS_MIN 10
22
bool guarded;
40
#endif
23
#endif
41
24
42
-#define NB_MMU_MODES 12
25
-#define NB_MMU_MODES 10
43
+#define NB_MMU_MODES 11
26
+#define NB_MMU_MODES 12
44
27
45
#endif
28
#endif
46
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
29
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
47
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/cpu.h
31
--- a/target/arm/cpu.h
49
+++ b/target/arm/cpu.h
32
+++ b/target/arm/cpu.h
50
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
33
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
51
* handling via the TLB. The only way to do a stage 1 translation without
34
* EL2 (aka NS PL2)
52
* the immediate stage 2 translation is via the ATS or AT system insns,
35
* EL3 (aka S PL1)
53
* which can be slow-pathed and always do a page table walk.
36
* Physical (NS & S)
54
+ * The only use of stage 2 translations is either as part of an s1+2
37
+ * Stage2 (NS & S)
55
+ * lookup or when loading the descriptors during a stage 1 page table walk,
56
+ * and in both those cases we don't use the TLB.
57
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
58
* translation regimes, because they map reasonably well to each other
59
* and they can't both be active at the same time.
60
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
61
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
62
* NS EL1 EL1&0 stage 1+2 +PAN
63
* NS EL0 EL2&0
64
+ * NS EL2 EL2&0
65
* NS EL2 EL2&0 +PAN
66
* NS EL2 (aka NS PL2)
67
* S EL0 EL1&0 (aka S PL0)
68
* S EL1 EL1&0 (not used if EL3 is 32 bit)
69
* S EL1 EL1&0 +PAN
70
* S EL3 (aka S PL1)
71
- * NS EL1&0 stage 2
72
*
38
*
73
- * for a total of 12 different mmu_idx.
39
- * for a total of 10 different mmu_idx.
74
+ * for a total of 11 different mmu_idx.
40
+ * for a total of 12 different mmu_idx.
75
*
41
*
76
* R profile CPUs have an MPU, but can use the same set of MMU indexes
42
* R profile CPUs have an MPU, but can use the same set of MMU indexes
77
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
43
* as A profile. They only need to distinguish EL0 and EL1 (and
78
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
79
* are not quite the same -- different CPU types (most notably M profile
80
* vs A/R profile) would like to use MMU indexes with different semantics,
81
* but since we don't ever need to use all of those in a single CPU we
82
- * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
83
+ * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
84
+ * modes + total number of M profile MMU modes". The lower bits of
85
* ARMMMUIdx are the core TLB mmu index, and the higher bits are always
86
* the same for any particular CPU.
87
* Variables of type ARMMUIdx are always full values, and the core
88
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
44
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
89
ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A,
45
ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A,
90
ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A,
46
ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A,
91
47
92
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
48
+ /*
93
-
49
+ * Used for second stage of an S12 page table walk, or for descriptor
50
+ * loads during first stage of an S1 page table walk. Note that both
51
+ * are in use simultaneously for SecureEL2: the security state for
52
+ * the S2 ptw is selected by the NS bit from the S1 ptw.
53
+ */
54
+ ARMMMUIdx_Stage2 = 10 | ARM_MMU_IDX_A,
55
+ ARMMMUIdx_Stage2_S = 11 | ARM_MMU_IDX_A,
56
+
94
/*
57
/*
95
* These are not allocated TLBs and are used only for AT system
58
* These are not allocated TLBs and are used only for AT system
96
* instructions or for the first stage of an S12 page table walk.
59
* instructions or for the first stage of an S12 page table walk.
97
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
60
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
98
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
61
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
99
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
62
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
100
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
63
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
101
+ /*
64
- /*
102
+ * Not allocated a TLB: used only for second stage of an S12 page
65
- * Not allocated a TLB: used only for second stage of an S12 page
103
+ * table walk, or for descriptor loads during first stage of an S1
66
- * table walk, or for descriptor loads during first stage of an S1
104
+ * page table walk. Note that if we ever want to have a TLB for this
67
- * page table walk. Note that if we ever want to have a TLB for this
105
+ * then various TLB flush insns which currently are no-ops or flush
68
- * then various TLB flush insns which currently are no-ops or flush
106
+ * only stage 1 MMU indexes will need to change to flush stage 2.
69
- * only stage 1 MMU indexes will need to change to flush stage 2.
107
+ */
70
- */
108
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
71
- ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
72
- ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
109
73
110
/*
74
/*
111
* M-profile.
75
* M-profile.
112
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
76
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
113
TO_CORE_BIT(SE10_1),
77
TO_CORE_BIT(E20_2),
114
TO_CORE_BIT(SE10_1_PAN),
78
TO_CORE_BIT(E20_2_PAN),
115
TO_CORE_BIT(SE3),
79
TO_CORE_BIT(E3),
116
- TO_CORE_BIT(Stage2),
80
+ TO_CORE_BIT(Stage2),
81
+ TO_CORE_BIT(Stage2_S),
117
82
118
TO_CORE_BIT(MUser),
83
TO_CORE_BIT(MUser),
119
TO_CORE_BIT(MPriv),
84
TO_CORE_BIT(MPriv),
120
diff --git a/target/arm/helper.c b/target/arm/helper.c
85
diff --git a/target/arm/helper.c b/target/arm/helper.c
121
index XXXXXXX..XXXXXXX 100644
86
index XXXXXXX..XXXXXXX 100644
122
--- a/target/arm/helper.c
87
--- a/target/arm/helper.c
123
+++ b/target/arm/helper.c
88
+++ b/target/arm/helper.c
89
@@ -XXX,XX +XXX,XX @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
90
raw_write(env, ri, value);
91
}
92
93
+static int alle1_tlbmask(CPUARMState *env)
94
+{
95
+ /*
96
+ * Note that the 'ALL' scope must invalidate both stage 1 and
97
+ * stage 2 translations, whereas most other scopes only invalidate
98
+ * stage 1 translations.
99
+ */
100
+ return (ARMMMUIdxBit_E10_1 |
101
+ ARMMMUIdxBit_E10_1_PAN |
102
+ ARMMMUIdxBit_E10_0 |
103
+ ARMMMUIdxBit_Stage2 |
104
+ ARMMMUIdxBit_Stage2_S);
105
+}
106
+
107
+
108
/* IS variants of TLB operations must affect all cores */
109
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
110
uint64_t value)
124
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
111
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
125
tlb_flush_by_mmuidx(cs,
112
{
126
ARMMMUIdxBit_E10_1 |
113
CPUState *cs = env_cpu(env);
127
ARMMMUIdxBit_E10_1_PAN |
114
128
- ARMMMUIdxBit_E10_0 |
115
- tlb_flush_by_mmuidx(cs,
129
- ARMMMUIdxBit_Stage2);
116
- ARMMMUIdxBit_E10_1 |
130
+ ARMMMUIdxBit_E10_0);
117
- ARMMMUIdxBit_E10_1_PAN |
131
}
118
- ARMMMUIdxBit_E10_0);
119
+ tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
120
}
132
121
133
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
122
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
134
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
123
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
135
tlb_flush_by_mmuidx_all_cpus_synced(cs,
124
{
136
ARMMMUIdxBit_E10_1 |
125
CPUState *cs = env_cpu(env);
137
ARMMMUIdxBit_E10_1_PAN |
126
138
- ARMMMUIdxBit_E10_0 |
127
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
139
- ARMMMUIdxBit_Stage2);
128
- ARMMMUIdxBit_E10_1 |
140
+ ARMMMUIdxBit_E10_0);
129
- ARMMMUIdxBit_E10_1_PAN |
141
}
130
- ARMMMUIdxBit_E10_0);
142
131
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
143
-static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
132
}
144
- uint64_t value)
133
134
135
@@ -XXX,XX +XXX,XX @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
136
ARMMMUIdxBit_E2);
137
}
138
139
+static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
140
+ uint64_t value)
141
+{
142
+ CPUState *cs = env_cpu(env);
143
+ uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
144
+
145
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
146
+}
147
+
148
+static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
149
+ uint64_t value)
150
+{
151
+ CPUState *cs = env_cpu(env);
152
+ uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
153
+
154
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
155
+}
156
+
157
static const ARMCPRegInfo cp_reginfo[] = {
158
/* Define the secure and non-secure FCSE identifier CP registers
159
* separately because there is no secure bank in V8 (no _EL3). This allows
160
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
161
162
/*
163
* A change in VMID to the stage2 page table (Stage2) invalidates
164
- * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
165
+ * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
166
*/
167
if (raw_read(env, ri) != value) {
168
- uint16_t mask = ARMMMUIdxBit_E10_1 |
169
- ARMMMUIdxBit_E10_1_PAN |
170
- ARMMMUIdxBit_E10_0;
171
- tlb_flush_by_mmuidx(cs, mask);
172
+ tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
173
raw_write(env, ri, value);
174
}
175
}
176
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
177
}
178
}
179
180
-static int alle1_tlbmask(CPUARMState *env)
145
-{
181
-{
146
- /* Invalidate by IPA. This has to invalidate any structures that
182
- /*
147
- * contain only stage 2 translation information, but does not need
183
- * Note that the 'ALL' scope must invalidate both stage 1 and
148
- * to apply to structures that contain combined stage 1 and stage 2
184
- * stage 2 translations, whereas most other scopes only invalidate
149
- * translation information.
185
- * stage 1 translations.
150
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
151
- */
186
- */
152
- CPUState *cs = env_cpu(env);
187
- return (ARMMMUIdxBit_E10_1 |
153
- uint64_t pageaddr;
188
- ARMMMUIdxBit_E10_1_PAN |
154
-
189
- ARMMMUIdxBit_E10_0);
155
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
156
- return;
157
- }
158
-
159
- pageaddr = sextract64(value << 12, 0, 40);
160
-
161
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
162
-}
190
-}
163
-
191
-
164
-static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
192
static int e2_tlbmask(CPUARMState *env)
165
- uint64_t value)
193
{
166
-{
194
return (ARMMMUIdxBit_E20_0 |
167
- CPUState *cs = env_cpu(env);
168
- uint64_t pageaddr;
169
-
170
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
171
- return;
172
- }
173
-
174
- pageaddr = sextract64(value << 12, 0, 40);
175
-
176
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
177
- ARMMMUIdxBit_Stage2);
178
-}
179
180
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
181
uint64_t value)
182
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
183
tlb_flush_by_mmuidx(cs,
184
ARMMMUIdxBit_E10_1 |
185
ARMMMUIdxBit_E10_1_PAN |
186
- ARMMMUIdxBit_E10_0 |
187
- ARMMMUIdxBit_Stage2);
188
+ ARMMMUIdxBit_E10_0);
189
raw_write(env, ri, value);
190
}
191
}
192
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
193
return ARMMMUIdxBit_SE10_1 |
194
ARMMMUIdxBit_SE10_1_PAN |
195
ARMMMUIdxBit_SE10_0;
196
- } else if (arm_feature(env, ARM_FEATURE_EL2)) {
197
- return ARMMMUIdxBit_E10_1 |
198
- ARMMMUIdxBit_E10_1_PAN |
199
- ARMMMUIdxBit_E10_0 |
200
- ARMMMUIdxBit_Stage2;
201
} else {
202
return ARMMMUIdxBit_E10_1 |
203
ARMMMUIdxBit_E10_1_PAN |
204
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
195
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
205
ARMMMUIdxBit_SE3);
196
ARMMMUIdxBit_E3, bits);
206
}
197
}
207
198
208
-static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
199
+static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
209
- uint64_t value)
200
+{
210
-{
201
+ /*
211
- /* Invalidate by IPA. This has to invalidate any structures that
202
+ * The MSB of value is the NS field, which only applies if SEL2
212
- * contain only stage 2 translation information, but does not need
203
+ * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
213
- * to apply to structures that contain combined stage 1 and stage 2
204
+ */
214
- * translation information.
205
+ return (value >= 0
215
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
206
+ && cpu_isar_feature(aa64_sel2, env_archcpu(env))
216
- */
207
+ && arm_is_secure_below_el3(env)
217
- ARMCPU *cpu = env_archcpu(env);
208
+ ? ARMMMUIdxBit_Stage2_S
218
- CPUState *cs = CPU(cpu);
209
+ : ARMMMUIdxBit_Stage2);
219
- uint64_t pageaddr;
210
+}
220
-
211
+
221
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
212
+static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
222
- return;
213
+ uint64_t value)
223
- }
214
+{
224
-
215
+ CPUState *cs = env_cpu(env);
225
- pageaddr = sextract64(value << 12, 0, 48);
216
+ int mask = ipas2e1_tlbmask(env, value);
226
-
217
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
227
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
218
+
228
-}
219
+ if (tlb_force_broadcast(env)) {
229
-
220
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
230
-static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
221
+ } else {
231
- uint64_t value)
222
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
232
-{
223
+ }
233
- CPUState *cs = env_cpu(env);
224
+}
234
- uint64_t pageaddr;
225
+
235
-
226
+static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
236
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
227
+ uint64_t value)
237
- return;
228
+{
238
- }
229
+ CPUState *cs = env_cpu(env);
239
-
230
+ int mask = ipas2e1_tlbmask(env, value);
240
- pageaddr = sextract64(value << 12, 0, 48);
231
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
241
-
232
+
242
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
233
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
243
- ARMMMUIdxBit_Stage2);
234
+}
244
-}
235
+
245
-
236
#ifdef TARGET_AARCH64
237
typedef struct {
238
uint64_t base;
239
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
240
241
do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
242
}
243
+
244
+static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
245
+ uint64_t value)
246
+{
247
+ do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
248
+ tlb_force_broadcast(env));
249
+}
250
+
251
+static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
252
+ const ARMCPRegInfo *ri,
253
+ uint64_t value)
254
+{
255
+ do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
256
+}
257
#endif
258
246
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
259
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
247
bool isread)
248
{
249
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
260
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
250
.writefn = tlbi_aa64_vae1_write },
261
.writefn = tlbi_aa64_vae1_write },
251
{ .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
262
{ .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
252
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
263
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
253
- .access = PL2_W, .type = ARM_CP_NO_RAW,
264
- .access = PL2_W, .type = ARM_CP_NOP },
254
- .writefn = tlbi_aa64_ipas2e1is_write },
265
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
255
+ .access = PL2_W, .type = ARM_CP_NOP },
266
+ .writefn = tlbi_aa64_ipas2e1is_write },
256
{ .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
267
{ .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
257
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
268
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
258
- .access = PL2_W, .type = ARM_CP_NO_RAW,
269
- .access = PL2_W, .type = ARM_CP_NOP },
259
- .writefn = tlbi_aa64_ipas2e1is_write },
270
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
260
+ .access = PL2_W, .type = ARM_CP_NOP },
271
+ .writefn = tlbi_aa64_ipas2e1is_write },
261
{ .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
272
{ .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
262
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
273
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
263
.access = PL2_W, .type = ARM_CP_NO_RAW,
274
.access = PL2_W, .type = ARM_CP_NO_RAW,
264
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
275
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
265
.writefn = tlbi_aa64_alle1is_write },
276
.writefn = tlbi_aa64_alle1is_write },
266
{ .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
277
{ .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
267
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
278
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
268
- .access = PL2_W, .type = ARM_CP_NO_RAW,
279
- .access = PL2_W, .type = ARM_CP_NOP },
269
- .writefn = tlbi_aa64_ipas2e1_write },
280
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
270
+ .access = PL2_W, .type = ARM_CP_NOP },
281
+ .writefn = tlbi_aa64_ipas2e1_write },
271
{ .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
282
{ .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
272
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
283
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
273
- .access = PL2_W, .type = ARM_CP_NO_RAW,
284
- .access = PL2_W, .type = ARM_CP_NOP },
274
- .writefn = tlbi_aa64_ipas2e1_write },
285
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
275
+ .access = PL2_W, .type = ARM_CP_NOP },
286
+ .writefn = tlbi_aa64_ipas2e1_write },
276
{ .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
287
{ .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
277
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
288
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
278
.access = PL2_W, .type = ARM_CP_NO_RAW,
289
.access = PL2_W, .type = ARM_CP_NO_RAW,
279
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
290
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
280
.writefn = tlbimva_hyp_is_write },
291
.writefn = tlbimva_hyp_is_write },
281
{ .name = "TLBIIPAS2",
292
{ .name = "TLBIIPAS2",
282
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
293
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
283
- .type = ARM_CP_NO_RAW, .access = PL2_W,
294
- .type = ARM_CP_NOP, .access = PL2_W },
284
- .writefn = tlbiipas2_write },
295
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
285
+ .type = ARM_CP_NOP, .access = PL2_W },
296
+ .writefn = tlbiipas2_hyp_write },
286
{ .name = "TLBIIPAS2IS",
297
{ .name = "TLBIIPAS2IS",
287
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
298
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
288
- .type = ARM_CP_NO_RAW, .access = PL2_W,
299
- .type = ARM_CP_NOP, .access = PL2_W },
289
- .writefn = tlbiipas2_is_write },
300
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
290
+ .type = ARM_CP_NOP, .access = PL2_W },
301
+ .writefn = tlbiipas2is_hyp_write },
291
{ .name = "TLBIIPAS2L",
302
{ .name = "TLBIIPAS2L",
292
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
303
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
293
- .type = ARM_CP_NO_RAW, .access = PL2_W,
304
- .type = ARM_CP_NOP, .access = PL2_W },
294
- .writefn = tlbiipas2_write },
305
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
295
+ .type = ARM_CP_NOP, .access = PL2_W },
306
+ .writefn = tlbiipas2_hyp_write },
296
{ .name = "TLBIIPAS2LIS",
307
{ .name = "TLBIIPAS2LIS",
297
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
308
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
298
- .type = ARM_CP_NO_RAW, .access = PL2_W,
309
- .type = ARM_CP_NOP, .access = PL2_W },
299
- .writefn = tlbiipas2_is_write },
310
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
300
+ .type = ARM_CP_NOP, .access = PL2_W },
311
+ .writefn = tlbiipas2is_hyp_write },
301
/* 32 bit cache operations */
312
/* 32 bit cache operations */
302
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
313
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
303
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
314
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
315
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
316
.writefn = tlbi_aa64_rvae1_write },
317
{ .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
318
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
319
- .access = PL2_W, .type = ARM_CP_NOP },
320
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
321
+ .writefn = tlbi_aa64_ripas2e1is_write },
322
{ .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
323
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
324
- .access = PL2_W, .type = ARM_CP_NOP },
325
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
326
+ .writefn = tlbi_aa64_ripas2e1is_write },
327
{ .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
328
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
329
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
330
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
331
.writefn = tlbi_aa64_rvae2is_write },
332
{ .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
333
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
334
- .access = PL2_W, .type = ARM_CP_NOP },
335
- { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
336
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
337
+ .writefn = tlbi_aa64_ripas2e1_write },
338
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
339
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
340
- .access = PL2_W, .type = ARM_CP_NOP },
341
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
342
+ .writefn = tlbi_aa64_ripas2e1_write },
343
{ .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
344
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
345
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
304
--
346
--
305
2.20.1
347
2.25.1
306
307
diff view generated by jsdifflib
1
For ARMv8.2-TTS2UXN, the stage 2 page table walk wants to know
1
From: Richard Henderson <richard.henderson@linaro.org>
2
whether the stage 1 access is for EL0 or not, because whether
3
exec permission is given can depend on whether this is an EL0
4
or EL1 access. Add a new argument to get_phys_addr_lpae() so
5
the call sites can pass this information in.
6
2
7
Since get_phys_addr_lpae() doesn't already have a doc comment,
3
Compare only the VMID field when considering whether we need to flush.
8
add one so we have a place to put the documentation of the
9
semantics of the new s1_is_el0 argument.
10
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20221011031911.2408754-7-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20200330210400.11724-4-peter.maydell@linaro.org
15
---
9
---
16
target/arm/helper.c | 29 ++++++++++++++++++++++++++++-
10
target/arm/helper.c | 4 ++--
17
1 file changed, 28 insertions(+), 1 deletion(-)
11
1 file changed, 2 insertions(+), 2 deletions(-)
18
12
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
15
--- a/target/arm/helper.c
22
+++ b/target/arm/helper.c
16
+++ b/target/arm/helper.c
23
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
24
18
* A change in VMID to the stage2 page table (Stage2) invalidates
25
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
19
* the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
26
MMUAccessType access_type, ARMMMUIdx mmu_idx,
20
*/
27
+ bool s1_is_el0,
21
- if (raw_read(env, ri) != value) {
28
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
22
+ if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
29
target_ulong *page_size_ptr,
23
tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
30
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
24
- raw_write(env, ri, value);
31
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
25
}
32
}
26
+ raw_write(env, ri, value);
33
34
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
35
+ false,
36
&s2pa, &txattrs, &s2prot, &s2size, fi,
37
pcacheattrs);
38
if (ret) {
39
@@ -XXX,XX +XXX,XX @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
40
};
41
}
27
}
42
28
43
+/**
29
static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
44
+ * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
45
+ *
46
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
47
+ * prot and page_size may not be filled in, and the populated fsr value provides
48
+ * information on why the translation aborted, in the format of a long-format
49
+ * DFSR/IFSR fault register, with the following caveats:
50
+ * * the WnR bit is never set (the caller must do this).
51
+ *
52
+ * @env: CPUARMState
53
+ * @address: virtual address to get physical address for
54
+ * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
55
+ * @mmu_idx: MMU index indicating required translation regime
56
+ * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
57
+ * walk), must be true if this is stage 2 of a stage 1+2 walk for an
58
+ * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
59
+ * @phys_ptr: set to the physical address corresponding to the virtual address
60
+ * @attrs: set to the memory transaction attributes to use
61
+ * @prot: set to the permissions for the page containing phys_ptr
62
+ * @page_size_ptr: set to the size of the page containing phys_ptr
63
+ * @fi: set to fault info if the translation fails
64
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
65
+ */
66
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
67
MMUAccessType access_type, ARMMMUIdx mmu_idx,
68
+ bool s1_is_el0,
69
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
70
target_ulong *page_size_ptr,
71
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
72
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
73
74
/* S1 is done. Now do S2 translation. */
75
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
76
+ mmu_idx == ARMMMUIdx_E10_0,
77
phys_ptr, attrs, &s2_prot,
78
page_size, fi,
79
cacheattrs != NULL ? &cacheattrs2 : NULL);
80
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
81
}
82
83
if (regime_using_lpae_format(env, mmu_idx)) {
84
- return get_phys_addr_lpae(env, address, access_type, mmu_idx,
85
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
86
phys_ptr, attrs, prot, page_size,
87
fi, cacheattrs);
88
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
89
--
30
--
90
2.20.1
31
2.25.1
91
92
diff view generated by jsdifflib
1
Convert VCMLA (scalar) in the 2reg-scalar-ext group to decodetree.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Consolidate most of the inputs and outputs of S1_ptw_translate
4
into a single structure. Plumb this through arm_ld*_ptw from
5
the controlling get_phys_addr_* routine.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20221011031911.2408754-8-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-9-peter.maydell@linaro.org
6
---
11
---
7
target/arm/neon-shared.decode | 5 +++++
12
target/arm/ptw.c | 140 ++++++++++++++++++++++++++---------------------
8
target/arm/translate-neon.inc.c | 40 +++++++++++++++++++++++++++++++++
13
1 file changed, 79 insertions(+), 61 deletions(-)
9
target/arm/translate.c | 26 +--------------------
10
3 files changed, 46 insertions(+), 25 deletions(-)
11
14
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
17
--- a/target/arm/ptw.c
15
+++ b/target/arm/neon-shared.decode
18
+++ b/target/arm/ptw.c
16
@@ -XXX,XX +XXX,XX @@ VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
19
@@ -XXX,XX +XXX,XX @@
17
vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
20
#include "idau.h"
18
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
21
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
22
23
-static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
24
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
25
- bool is_secure, bool s1_is_el0,
26
+typedef struct S1Translate {
27
+ ARMMMUIdx in_mmu_idx;
28
+ bool in_secure;
29
+ bool out_secure;
30
+ hwaddr out_phys;
31
+} S1Translate;
20
+
32
+
21
+VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
33
+static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
22
+ vn=%vn_dp vd=%vd_dp size=0
34
+ uint64_t address,
23
+VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
35
+ MMUAccessType access_type, bool s1_is_el0,
24
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
36
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
25
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
__attribute__((nonnull));
26
index XXXXXXX..XXXXXXX 100644
38
27
--- a/target/arm/translate-neon.inc.c
39
@@ -XXX,XX +XXX,XX @@ static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
28
+++ b/target/arm/translate-neon.inc.c
40
}
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
41
30
gen_helper_gvec_fmlal_a32);
42
/* Translate a S1 pagetable walk through S2 if needed. */
43
-static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
44
- hwaddr addr, bool *is_secure_ptr,
45
- ARMMMUFaultInfo *fi)
46
+static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
47
+ hwaddr addr, ARMMMUFaultInfo *fi)
48
{
49
- bool is_secure = *is_secure_ptr;
50
+ bool is_secure = ptw->in_secure;
51
ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
52
53
- if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
54
+ if (arm_mmu_idx_is_stage1_of_2(ptw->in_mmu_idx) &&
55
!regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
56
GetPhysAddrResult s2 = {};
57
+ S1Translate s2ptw = {
58
+ .in_mmu_idx = s2_mmu_idx,
59
+ .in_secure = is_secure,
60
+ };
61
uint64_t hcr;
62
int ret;
63
64
- ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
65
- is_secure, false, &s2, fi);
66
+ ret = get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
67
+ false, &s2, fi);
68
if (ret) {
69
assert(fi->type != ARMFault_None);
70
fi->s2addr = addr;
71
fi->stage2 = true;
72
fi->s1ptw = true;
73
fi->s1ns = !is_secure;
74
- return ~0;
75
+ return false;
76
}
77
78
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
79
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
80
fi->stage2 = true;
81
fi->s1ptw = true;
82
fi->s1ns = !is_secure;
83
- return ~0;
84
+ return false;
85
}
86
87
if (arm_is_secure_below_el3(env)) {
88
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
89
} else {
90
is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
91
}
92
- *is_secure_ptr = is_secure;
93
} else {
94
assert(!is_secure);
95
}
96
97
addr = s2.f.phys_addr;
98
}
99
- return addr;
100
+
101
+ ptw->out_secure = is_secure;
102
+ ptw->out_phys = addr;
103
+ return true;
104
}
105
106
/* All loads done in the course of a page table walk go through here. */
107
-static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
108
- ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
109
+static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
110
+ ARMMMUFaultInfo *fi)
111
{
112
CPUState *cs = env_cpu(env);
113
MemTxAttrs attrs = {};
114
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
115
AddressSpace *as;
116
uint32_t data;
117
118
- addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
119
- attrs.secure = is_secure;
120
- as = arm_addressspace(cs, attrs);
121
- if (fi->s1ptw) {
122
+ if (!S1_ptw_translate(env, ptw, addr, fi)) {
123
return 0;
124
}
125
- if (regime_translation_big_endian(env, mmu_idx)) {
126
+ addr = ptw->out_phys;
127
+ attrs.secure = ptw->out_secure;
128
+ as = arm_addressspace(cs, attrs);
129
+ if (regime_translation_big_endian(env, ptw->in_mmu_idx)) {
130
data = address_space_ldl_be(as, addr, attrs, &result);
131
} else {
132
data = address_space_ldl_le(as, addr, attrs, &result);
133
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
134
return 0;
135
}
136
137
-static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
138
- ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
139
+static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
140
+ ARMMMUFaultInfo *fi)
141
{
142
CPUState *cs = env_cpu(env);
143
MemTxAttrs attrs = {};
144
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
145
AddressSpace *as;
146
uint64_t data;
147
148
- addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
149
- attrs.secure = is_secure;
150
- as = arm_addressspace(cs, attrs);
151
- if (fi->s1ptw) {
152
+ if (!S1_ptw_translate(env, ptw, addr, fi)) {
153
return 0;
154
}
155
- if (regime_translation_big_endian(env, mmu_idx)) {
156
+ addr = ptw->out_phys;
157
+ attrs.secure = ptw->out_secure;
158
+ as = arm_addressspace(cs, attrs);
159
+ if (regime_translation_big_endian(env, ptw->in_mmu_idx)) {
160
data = address_space_ldq_be(as, addr, attrs, &result);
161
} else {
162
data = address_space_ldq_le(as, addr, attrs, &result);
163
@@ -XXX,XX +XXX,XX @@ static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
164
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
165
}
166
167
-static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
168
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
169
- bool is_secure, GetPhysAddrResult *result,
170
- ARMMMUFaultInfo *fi)
171
+static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
172
+ uint32_t address, MMUAccessType access_type,
173
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
174
{
175
int level = 1;
176
uint32_t table;
177
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
178
179
/* Pagetable walk. */
180
/* Lookup l1 descriptor. */
181
- if (!get_level1_table_address(env, mmu_idx, &table, address)) {
182
+ if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
183
/* Section translation fault if page walk is disabled by PD0 or PD1 */
184
fi->type = ARMFault_Translation;
185
goto do_fault;
186
}
187
- desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
188
+ desc = arm_ldl_ptw(env, ptw, table, fi);
189
if (fi->type != ARMFault_None) {
190
goto do_fault;
191
}
192
type = (desc & 3);
193
domain = (desc >> 5) & 0x0f;
194
- if (regime_el(env, mmu_idx) == 1) {
195
+ if (regime_el(env, ptw->in_mmu_idx) == 1) {
196
dacr = env->cp15.dacr_ns;
197
} else {
198
dacr = env->cp15.dacr_s;
199
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
200
/* Fine pagetable. */
201
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
202
}
203
- desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
204
+ desc = arm_ldl_ptw(env, ptw, table, fi);
205
if (fi->type != ARMFault_None) {
206
goto do_fault;
207
}
208
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
209
g_assert_not_reached();
210
}
211
}
212
- result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
213
+ result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
214
result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
215
if (!(result->f.prot & (1 << access_type))) {
216
/* Access permission fault. */
217
@@ -XXX,XX +XXX,XX @@ do_fault:
31
return true;
218
return true;
32
}
219
}
220
221
-static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
222
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
223
- bool is_secure, GetPhysAddrResult *result,
224
- ARMMMUFaultInfo *fi)
225
+static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
226
+ uint32_t address, MMUAccessType access_type,
227
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
228
{
229
ARMCPU *cpu = env_archcpu(env);
230
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
231
int level = 1;
232
uint32_t table;
233
uint32_t desc;
234
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
235
fi->type = ARMFault_Translation;
236
goto do_fault;
237
}
238
- desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
239
+ desc = arm_ldl_ptw(env, ptw, table, fi);
240
if (fi->type != ARMFault_None) {
241
goto do_fault;
242
}
243
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
244
ns = extract32(desc, 3, 1);
245
/* Lookup l2 entry. */
246
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
247
- desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
248
+ desc = arm_ldl_ptw(env, ptw, table, fi);
249
if (fi->type != ARMFault_None) {
250
goto do_fault;
251
}
252
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
253
* the WnR bit is never set (the caller must do this).
254
*
255
* @env: CPUARMState
256
+ * @ptw: Current and next stage parameters for the walk.
257
* @address: virtual address to get physical address for
258
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
259
- * @mmu_idx: MMU index indicating required translation regime
260
- * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
261
- * table walk), must be true if this is stage 2 of a stage 1+2
262
+ * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
263
+ * (so this is a stage 2 page table walk),
264
+ * must be true if this is stage 2 of a stage 1+2
265
* walk for an EL0 access. If @mmu_idx is anything else,
266
* @s1_is_el0 is ignored.
267
* @result: set on translation success,
268
* @fi: set to fault info if the translation fails
269
*/
270
-static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
271
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
272
- bool is_secure, bool s1_is_el0,
273
+static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
274
+ uint64_t address,
275
+ MMUAccessType access_type, bool s1_is_el0,
276
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
277
{
278
ARMCPU *cpu = env_archcpu(env);
279
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
280
+ bool is_secure = ptw->in_secure;
281
/* Read an LPAE long-descriptor translation table. */
282
ARMFaultType fault_type = ARMFault_Translation;
283
uint32_t level;
284
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
285
descaddr |= (address >> (stride * (4 - level))) & indexmask;
286
descaddr &= ~7ULL;
287
nstable = extract32(tableattrs, 4, 1);
288
- descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
289
+ ptw->in_secure = !nstable;
290
+ descriptor = arm_ldq_ptw(env, ptw, descaddr, fi);
291
if (fi->type != ARMFault_None) {
292
goto do_fault;
293
}
294
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
295
ARMMMUFaultInfo *fi)
296
{
297
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
298
+ S1Translate ptw;
299
300
if (mmu_idx != s1_mmu_idx) {
301
/*
302
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
303
int ret;
304
bool ipa_secure, s2walk_secure;
305
ARMCacheAttrs cacheattrs1;
306
- ARMMMUIdx s2_mmu_idx;
307
bool is_el0;
308
uint64_t hcr;
309
310
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
311
s2walk_secure = false;
312
}
313
314
- s2_mmu_idx = (s2walk_secure
315
- ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
316
+ ptw.in_mmu_idx =
317
+ s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
318
+ ptw.in_secure = s2walk_secure;
319
is_el0 = mmu_idx == ARMMMUIdx_E10_0;
320
321
/*
322
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
323
cacheattrs1 = result->cacheattrs;
324
memset(result, 0, sizeof(*result));
325
326
- ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
327
- s2walk_secure, is_el0, result, fi);
328
+ ret = get_phys_addr_lpae(env, &ptw, ipa, access_type,
329
+ is_el0, result, fi);
330
fi->s2addr = ipa;
331
332
/* Combine the S1 and S2 perms. */
333
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
334
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
335
is_secure, result, fi);
336
}
33
+
337
+
34
+static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
338
+ ptw.in_mmu_idx = mmu_idx;
35
+{
339
+ ptw.in_secure = is_secure;
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+ int opr_sz;
38
+ TCGv_ptr fpst;
39
+
340
+
40
+ if (!dc_isar_feature(aa32_vcma, s)) {
341
if (regime_using_lpae_format(env, mmu_idx)) {
41
+ return false;
342
- return get_phys_addr_lpae(env, address, access_type, mmu_idx,
42
+ }
343
- is_secure, false, result, fi);
43
+ if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
344
+ return get_phys_addr_lpae(env, &ptw, address, access_type, false,
44
+ return false;
345
+ result, fi);
45
+ }
346
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
46
+
347
- return get_phys_addr_v6(env, address, access_type, mmu_idx,
47
+ /* UNDEF accesses to D16-D31 if they don't exist. */
348
- is_secure, result, fi);
48
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
349
+ return get_phys_addr_v6(env, &ptw, address, access_type, result, fi);
49
+ ((a->vd | a->vn | a->vm) & 0x10)) {
350
} else {
50
+ return false;
351
- return get_phys_addr_v5(env, address, access_type, mmu_idx,
51
+ }
352
- is_secure, result, fi);
52
+
353
+ return get_phys_addr_v5(env, &ptw, address, access_type, result, fi);
53
+ if ((a->vd | a->vn) & a->q) {
354
}
54
+ return false;
355
}
55
+ }
56
+
57
+ if (!vfp_access_check(s)) {
58
+ return true;
59
+ }
60
+
61
+ fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
62
+ : gen_helper_gvec_fcmlah_idx);
63
+ opr_sz = (1 + a->q) * 8;
64
+ fpst = get_fpstatus_ptr(1);
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(1, a->vn),
67
+ vfp_reg_offset(1, a->vm),
68
+ fpst, opr_sz, opr_sz,
69
+ (a->index << 2) | a->rot, fn_gvec_ptr);
70
+ tcg_temp_free_ptr(fpst);
71
+ return true;
72
+}
73
diff --git a/target/arm/translate.c b/target/arm/translate.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/translate.c
76
+++ b/target/arm/translate.c
77
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
78
bool is_long = false, q = extract32(insn, 6, 1);
79
bool ptr_is_env = false;
80
81
- if ((insn & 0xff000f10) == 0xfe000800) {
82
- /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
83
- int rot = extract32(insn, 20, 2);
84
- int size = extract32(insn, 23, 1);
85
- int index;
86
-
87
- if (!dc_isar_feature(aa32_vcma, s)) {
88
- return 1;
89
- }
90
- if (size == 0) {
91
- if (!dc_isar_feature(aa32_fp16_arith, s)) {
92
- return 1;
93
- }
94
- /* For fp16, rm is just Vm, and index is M. */
95
- rm = extract32(insn, 0, 4);
96
- index = extract32(insn, 5, 1);
97
- } else {
98
- /* For fp32, rm is the usual M:Vm, and index is 0. */
99
- VFP_DREG_M(rm, insn);
100
- index = 0;
101
- }
102
- data = (index << 2) | rot;
103
- fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
104
- : gen_helper_gvec_fcmlah_idx);
105
- } else if ((insn & 0xffb00f00) == 0xfe200d00) {
106
+ if ((insn & 0xffb00f00) == 0xfe200d00) {
107
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
108
int u = extract32(insn, 4, 1);
109
356
110
--
357
--
111
2.20.1
358
2.25.1
112
113
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
hw/arm: versal: Add support for the RTC.
3
Before using softmmu page tables for the ptw, plumb down
4
a debug parameter so that we can query page table entries
5
from gdbstub without modifying cpu state.
4
6
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20221011031911.2408754-9-richard.henderson@linaro.org
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-10-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
include/hw/arm/xlnx-versal.h | 8 ++++++++
12
target/arm/ptw.c | 55 ++++++++++++++++++++++++++++++++----------------
13
hw/arm/xlnx-versal.c | 21 +++++++++++++++++++++
13
1 file changed, 37 insertions(+), 18 deletions(-)
14
2 files changed, 29 insertions(+)
15
14
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
17
--- a/target/arm/ptw.c
19
+++ b/include/hw/arm/xlnx-versal.h
18
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
21
#include "hw/char/pl011.h"
20
typedef struct S1Translate {
22
#include "hw/dma/xlnx-zdma.h"
21
ARMMMUIdx in_mmu_idx;
23
#include "hw/net/cadence_gem.h"
22
bool in_secure;
24
+#include "hw/rtc/xlnx-zynqmp-rtc.h"
23
+ bool in_debug;
25
24
bool out_secure;
26
#define TYPE_XLNX_VERSAL "xlnx-versal"
25
hwaddr out_phys;
27
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
26
} S1Translate;
28
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
27
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
29
struct {
28
S1Translate s2ptw = {
30
SDHCIState sd[XLNX_VERSAL_NR_SDS];
29
.in_mmu_idx = s2_mmu_idx,
31
} iou;
30
.in_secure = is_secure,
32
+
31
+ .in_debug = ptw->in_debug,
33
+ XlnxZynqMPRTC rtc;
32
};
34
} pmc;
33
uint64_t hcr;
35
34
int ret;
36
struct {
35
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
37
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
36
return 0;
38
#define VERSAL_GEM1_IRQ_0 58
37
}
39
#define VERSAL_GEM1_WAKE_IRQ_0 59
38
40
#define VERSAL_ADMA_IRQ_0 60
39
-bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
41
+#define VERSAL_RTC_APB_ERR_IRQ 121
40
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
42
#define VERSAL_SD0_IRQ_0 126
41
- bool is_secure, GetPhysAddrResult *result,
43
+#define VERSAL_RTC_ALARM_IRQ 142
42
- ARMMMUFaultInfo *fi)
44
+#define VERSAL_RTC_SECONDS_IRQ 143
43
+static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
45
44
+ target_ulong address,
46
/* Architecturally reserved IRQs suitable for virtualization. */
45
+ MMUAccessType access_type,
47
#define VERSAL_RSVD_IRQ_FIRST 111
46
+ GetPhysAddrResult *result,
48
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
47
+ ARMMMUFaultInfo *fi)
49
#define MM_PMC_SD0_SIZE 0x10000
48
{
50
#define MM_PMC_CRP 0xf1260000U
49
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
51
#define MM_PMC_CRP_SIZE 0x10000
50
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
52
+#define MM_PMC_RTC 0xf12a0000
51
- S1Translate ptw;
53
+#define MM_PMC_RTC_SIZE 0x10000
52
+ bool is_secure = ptw->in_secure;
54
#endif
53
55
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
54
if (mmu_idx != s1_mmu_idx) {
56
index XXXXXXX..XXXXXXX 100644
55
/*
57
--- a/hw/arm/xlnx-versal.c
56
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
58
+++ b/hw/arm/xlnx-versal.c
57
bool is_el0;
59
@@ -XXX,XX +XXX,XX @@ static void versal_create_sds(Versal *s, qemu_irq *pic)
58
uint64_t hcr;
59
60
- ret = get_phys_addr_with_secure(env, address, access_type,
61
- s1_mmu_idx, is_secure, result, fi);
62
+ ptw->in_mmu_idx = s1_mmu_idx;
63
+ ret = get_phys_addr_with_struct(env, ptw, address, access_type,
64
+ result, fi);
65
66
/* If S1 fails or S2 is disabled, return early. */
67
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
68
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
69
s2walk_secure = false;
70
}
71
72
- ptw.in_mmu_idx =
73
+ ptw->in_mmu_idx =
74
s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
75
- ptw.in_secure = s2walk_secure;
76
+ ptw->in_secure = s2walk_secure;
77
is_el0 = mmu_idx == ARMMMUIdx_E10_0;
78
79
/*
80
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
81
cacheattrs1 = result->cacheattrs;
82
memset(result, 0, sizeof(*result));
83
84
- ret = get_phys_addr_lpae(env, &ptw, ipa, access_type,
85
+ ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
86
is_el0, result, fi);
87
fi->s2addr = ipa;
88
89
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
90
is_secure, result, fi);
91
}
92
93
- ptw.in_mmu_idx = mmu_idx;
94
- ptw.in_secure = is_secure;
95
-
96
if (regime_using_lpae_format(env, mmu_idx)) {
97
- return get_phys_addr_lpae(env, &ptw, address, access_type, false,
98
+ return get_phys_addr_lpae(env, ptw, address, access_type, false,
99
result, fi);
100
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
101
- return get_phys_addr_v6(env, &ptw, address, access_type, result, fi);
102
+ return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
103
} else {
104
- return get_phys_addr_v5(env, &ptw, address, access_type, result, fi);
105
+ return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
60
}
106
}
61
}
107
}
62
108
63
+static void versal_create_rtc(Versal *s, qemu_irq *pic)
109
+bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
110
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
111
+ bool is_secure, GetPhysAddrResult *result,
112
+ ARMMMUFaultInfo *fi)
64
+{
113
+{
65
+ SysBusDevice *sbd;
114
+ S1Translate ptw = {
66
+ MemoryRegion *mr;
115
+ .in_mmu_idx = mmu_idx,
67
+
116
+ .in_secure = is_secure,
68
+ sysbus_init_child_obj(OBJECT(s), "rtc", &s->pmc.rtc, sizeof(s->pmc.rtc),
117
+ };
69
+ TYPE_XLNX_ZYNQMP_RTC);
118
+ return get_phys_addr_with_struct(env, &ptw, address, access_type,
70
+ sbd = SYS_BUS_DEVICE(&s->pmc.rtc);
119
+ result, fi);
71
+ qdev_init_nofail(DEVICE(sbd));
72
+
73
+ mr = sysbus_mmio_get_region(sbd, 0);
74
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr);
75
+
76
+ /*
77
+ * TODO: Connect the ALARM and SECONDS interrupts once our RTC model
78
+ * supports them.
79
+ */
80
+ sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]);
81
+}
120
+}
82
+
121
+
83
/* This takes the board allocated linear DDR memory and creates aliases
122
bool get_phys_addr(CPUARMState *env, target_ulong address,
84
* for each split DDR range/aperture on the Versal address map.
123
MMUAccessType access_type, ARMMMUIdx mmu_idx,
85
*/
124
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
86
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
125
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
87
versal_create_gems(s, pic);
126
{
88
versal_create_admas(s, pic);
127
ARMCPU *cpu = ARM_CPU(cs);
89
versal_create_sds(s, pic);
128
CPUARMState *env = &cpu->env;
90
+ versal_create_rtc(s, pic);
129
+ S1Translate ptw = {
91
versal_map_ddr(s);
130
+ .in_mmu_idx = arm_mmu_idx(env),
92
versal_unimp(s);
131
+ .in_secure = arm_is_secure(env),
93
132
+ .in_debug = true,
133
+ };
134
GetPhysAddrResult res = {};
135
ARMMMUFaultInfo fi = {};
136
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
137
bool ret;
138
139
- ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
140
+ ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
141
*attrs = res.f.attrs;
142
143
if (ret) {
94
--
144
--
95
2.20.1
145
2.25.1
96
97
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for the RTC.
3
Hoist this test out of arm_ld[lq]_ptw into S1_ptw_translate.
4
4
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
7
Message-id: 20221011031911.2408754-10-richard.henderson@linaro.org
8
Message-id: 20200427181649.26851-12-edgar.iglesias@gmail.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
hw/arm/xlnx-versal-virt.c | 22 ++++++++++++++++++++++
10
target/arm/ptw.c | 6 ++++--
12
1 file changed, 22 insertions(+)
11
1 file changed, 4 insertions(+), 2 deletions(-)
13
12
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
13
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-versal-virt.c
15
--- a/target/arm/ptw.c
17
+++ b/hw/arm/xlnx-versal-virt.c
16
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ static void fdt_add_sd_nodes(VersalVirt *s)
17
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
19
}
18
bool in_secure;
19
bool in_debug;
20
bool out_secure;
21
+ bool out_be;
22
hwaddr out_phys;
23
} S1Translate;
24
25
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
26
27
ptw->out_secure = is_secure;
28
ptw->out_phys = addr;
29
+ ptw->out_be = regime_translation_big_endian(env, ptw->in_mmu_idx);
30
return true;
20
}
31
}
21
32
22
+static void fdt_add_rtc_node(VersalVirt *s)
33
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
23
+{
34
addr = ptw->out_phys;
24
+ const char compat[] = "xlnx,zynqmp-rtc";
35
attrs.secure = ptw->out_secure;
25
+ const char interrupt_names[] = "alarm\0sec";
36
as = arm_addressspace(cs, attrs);
26
+ char *name = g_strdup_printf("/rtc@%x", MM_PMC_RTC);
37
- if (regime_translation_big_endian(env, ptw->in_mmu_idx)) {
27
+
38
+ if (ptw->out_be) {
28
+ qemu_fdt_add_subnode(s->fdt, name);
39
data = address_space_ldl_be(as, addr, attrs, &result);
29
+
40
} else {
30
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
41
data = address_space_ldl_le(as, addr, attrs, &result);
31
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_ALARM_IRQ,
42
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
32
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI,
43
addr = ptw->out_phys;
33
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_SECONDS_IRQ,
44
attrs.secure = ptw->out_secure;
34
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
45
as = arm_addressspace(cs, attrs);
35
+ qemu_fdt_setprop(s->fdt, name, "interrupt-names",
46
- if (regime_translation_big_endian(env, ptw->in_mmu_idx)) {
36
+ interrupt_names, sizeof(interrupt_names));
47
+ if (ptw->out_be) {
37
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
48
data = address_space_ldq_be(as, addr, attrs, &result);
38
+ 2, MM_PMC_RTC, 2, MM_PMC_RTC_SIZE);
49
} else {
39
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
50
data = address_space_ldq_le(as, addr, attrs, &result);
40
+ g_free(name);
41
+}
42
+
43
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
44
{
45
Error *err = NULL;
46
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
47
fdt_add_timer_nodes(s);
48
fdt_add_zdma_nodes(s);
49
fdt_add_sd_nodes(s);
50
+ fdt_add_rtc_node(s);
51
fdt_add_cpu_nodes(s, psci_conduit);
52
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
53
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
54
--
51
--
55
2.20.1
52
2.25.1
56
57
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
MIDR_EL1 is a 64-bit system register with the top 32-bit being RES0.
3
So far, limit the change to S1_ptw_translate, arm_ldl_ptw, and
4
Represent it in QEMU's ARMCPU struct with a uint64_t, not a
4
arm_ldq_ptw. Use probe_access_full to find the host address,
5
uint32_t.
5
and if so use a host load. If the probe fails, we've got our
6
fault info already. On the off chance that page tables are not
7
in RAM, continue to use the address_space_ld* functions.
6
8
7
This fixes an error when compiling with -Werror=conversion
8
because we were manipulating the register value using a
9
local uint64_t variable:
10
11
target/arm/cpu64.c: In function ‘aarch64_max_initfn’:
12
target/arm/cpu64.c:628:21: error: conversion from ‘uint64_t’ {aka ‘long unsigned int’} to ‘uint32_t’ {aka ‘unsigned int’} may change value [-Werror=conversion]
13
628 | cpu->midr = t;
14
| ^
15
16
and future-proofs us against a possible future architecture
17
change using some of the top 32 bits.
18
19
Suggested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
20
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
22
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
23
Message-id: 20200428172634.29707-1-f4bug@amsat.org
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20221011031911.2408754-11-richard.henderson@linaro.org
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
---
13
---
27
target/arm/cpu.h | 2 +-
14
target/arm/cpu.h | 5 +
28
target/arm/cpu.c | 2 +-
15
target/arm/ptw.c | 196 +++++++++++++++++++++++++---------------
29
2 files changed, 2 insertions(+), 2 deletions(-)
16
target/arm/tlb_helper.c | 17 +++-
17
3 files changed, 144 insertions(+), 74 deletions(-)
30
18
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
23
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMTBFlags {
36
uint64_t id_aa64dfr0;
24
target_ulong flags2;
37
uint64_t id_aa64dfr1;
25
} CPUARMTBFlags;
38
} isar;
26
39
- uint32_t midr;
27
+typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
40
+ uint64_t midr;
28
+
41
uint32_t revidr;
29
typedef struct CPUArchState {
42
uint32_t reset_fpsid;
30
/* Regs for current mode. */
43
uint32_t ctr;
31
uint32_t regs[16];
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
32
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
33
struct CPUBreakpoint *cpu_breakpoint[16];
34
struct CPUWatchpoint *cpu_watchpoint[16];
35
36
+ /* Optional fault info across tlb lookup. */
37
+ ARMMMUFaultInfo *tlb_fi;
38
+
39
/* Fields up to this point are cleared by a CPU reset */
40
struct {} end_reset_fields;
41
42
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
45
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu.c
44
--- a/target/arm/ptw.c
47
+++ b/target/arm/cpu.c
45
+++ b/target/arm/ptw.c
48
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_cpus[] = {
46
@@ -XXX,XX +XXX,XX @@
49
static Property arm_cpu_properties[] = {
47
#include "qemu/osdep.h"
50
DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
48
#include "qemu/log.h"
51
DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
49
#include "qemu/range.h"
52
- DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
50
+#include "exec/exec-all.h"
53
+ DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
51
#include "cpu.h"
54
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
52
#include "internals.h"
55
mp_affinity, ARM64_AFFINITY_INVALID),
53
#include "idau.h"
56
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
54
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
55
bool out_secure;
56
bool out_be;
57
hwaddr out_phys;
58
+ void *out_host;
59
} S1Translate;
60
61
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
62
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
63
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
64
}
65
66
-static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
67
+static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
68
{
69
/*
70
* For an S1 page table walk, the stage 1 attributes are always
71
@@ -XXX,XX +XXX,XX @@ static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
72
* With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
73
* when cacheattrs.attrs bit [2] is 0.
74
*/
75
- assert(cacheattrs.is_s2_format);
76
if (hcr & HCR_FWB) {
77
- return (cacheattrs.attrs & 0x4) == 0;
78
+ return (attrs & 0x4) == 0;
79
} else {
80
- return (cacheattrs.attrs & 0xc) == 0;
81
+ return (attrs & 0xc) == 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
86
hwaddr addr, ARMMMUFaultInfo *fi)
87
{
88
bool is_secure = ptw->in_secure;
89
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
90
ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
91
+ bool s2_phys = false;
92
+ uint8_t pte_attrs;
93
+ bool pte_secure;
94
95
- if (arm_mmu_idx_is_stage1_of_2(ptw->in_mmu_idx) &&
96
- !regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
97
- GetPhysAddrResult s2 = {};
98
- S1Translate s2ptw = {
99
- .in_mmu_idx = s2_mmu_idx,
100
- .in_secure = is_secure,
101
- .in_debug = ptw->in_debug,
102
- };
103
- uint64_t hcr;
104
- int ret;
105
+ if (!arm_mmu_idx_is_stage1_of_2(mmu_idx)
106
+ || regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
107
+ s2_mmu_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
108
+ s2_phys = true;
109
+ }
110
111
- ret = get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
112
- false, &s2, fi);
113
- if (ret) {
114
- assert(fi->type != ARMFault_None);
115
- fi->s2addr = addr;
116
- fi->stage2 = true;
117
- fi->s1ptw = true;
118
- fi->s1ns = !is_secure;
119
- return false;
120
+ if (unlikely(ptw->in_debug)) {
121
+ /*
122
+ * From gdbstub, do not use softmmu so that we don't modify the
123
+ * state of the cpu at all, including softmmu tlb contents.
124
+ */
125
+ if (s2_phys) {
126
+ ptw->out_phys = addr;
127
+ pte_attrs = 0;
128
+ pte_secure = is_secure;
129
+ } else {
130
+ S1Translate s2ptw = {
131
+ .in_mmu_idx = s2_mmu_idx,
132
+ .in_secure = is_secure,
133
+ .in_debug = true,
134
+ };
135
+ GetPhysAddrResult s2 = { };
136
+ if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
137
+ false, &s2, fi)) {
138
+ goto fail;
139
+ }
140
+ ptw->out_phys = s2.f.phys_addr;
141
+ pte_attrs = s2.cacheattrs.attrs;
142
+ pte_secure = s2.f.attrs.secure;
143
}
144
+ ptw->out_host = NULL;
145
+ } else {
146
+ CPUTLBEntryFull *full;
147
+ int flags;
148
149
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
150
- if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
151
+ env->tlb_fi = fi;
152
+ flags = probe_access_full(env, addr, MMU_DATA_LOAD,
153
+ arm_to_core_mmu_idx(s2_mmu_idx),
154
+ true, &ptw->out_host, &full, 0);
155
+ env->tlb_fi = NULL;
156
+
157
+ if (unlikely(flags & TLB_INVALID_MASK)) {
158
+ goto fail;
159
+ }
160
+ ptw->out_phys = full->phys_addr;
161
+ pte_attrs = full->pte_attrs;
162
+ pte_secure = full->attrs.secure;
163
+ }
164
+
165
+ if (!s2_phys) {
166
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
167
+
168
+ if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
169
/*
170
* PTW set and S1 walk touched S2 Device memory:
171
* generate Permission fault.
172
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
173
fi->s1ns = !is_secure;
174
return false;
175
}
176
-
177
- if (arm_is_secure_below_el3(env)) {
178
- /* Check if page table walk is to secure or non-secure PA space. */
179
- if (is_secure) {
180
- is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
181
- } else {
182
- is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
183
- }
184
- } else {
185
- assert(!is_secure);
186
- }
187
-
188
- addr = s2.f.phys_addr;
189
}
190
191
- ptw->out_secure = is_secure;
192
- ptw->out_phys = addr;
193
- ptw->out_be = regime_translation_big_endian(env, ptw->in_mmu_idx);
194
+ /* Check if page table walk is to secure or non-secure PA space. */
195
+ ptw->out_secure = (is_secure
196
+ && !(pte_secure
197
+ ? env->cp15.vstcr_el2 & VSTCR_SW
198
+ : env->cp15.vtcr_el2 & VTCR_NSW));
199
+ ptw->out_be = regime_translation_big_endian(env, mmu_idx);
200
return true;
201
+
202
+ fail:
203
+ assert(fi->type != ARMFault_None);
204
+ fi->s2addr = addr;
205
+ fi->stage2 = true;
206
+ fi->s1ptw = true;
207
+ fi->s1ns = !is_secure;
208
+ return false;
209
}
210
211
/* All loads done in the course of a page table walk go through here. */
212
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
213
ARMMMUFaultInfo *fi)
214
{
215
CPUState *cs = env_cpu(env);
216
- MemTxAttrs attrs = {};
217
- MemTxResult result = MEMTX_OK;
218
- AddressSpace *as;
219
uint32_t data;
220
221
if (!S1_ptw_translate(env, ptw, addr, fi)) {
222
+ /* Failure. */
223
+ assert(fi->s1ptw);
224
return 0;
225
}
226
- addr = ptw->out_phys;
227
- attrs.secure = ptw->out_secure;
228
- as = arm_addressspace(cs, attrs);
229
- if (ptw->out_be) {
230
- data = address_space_ldl_be(as, addr, attrs, &result);
231
+
232
+ if (likely(ptw->out_host)) {
233
+ /* Page tables are in RAM, and we have the host address. */
234
+ if (ptw->out_be) {
235
+ data = ldl_be_p(ptw->out_host);
236
+ } else {
237
+ data = ldl_le_p(ptw->out_host);
238
+ }
239
} else {
240
- data = address_space_ldl_le(as, addr, attrs, &result);
241
+ /* Page tables are in MMIO. */
242
+ MemTxAttrs attrs = { .secure = ptw->out_secure };
243
+ AddressSpace *as = arm_addressspace(cs, attrs);
244
+ MemTxResult result = MEMTX_OK;
245
+
246
+ if (ptw->out_be) {
247
+ data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
248
+ } else {
249
+ data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
250
+ }
251
+ if (unlikely(result != MEMTX_OK)) {
252
+ fi->type = ARMFault_SyncExternalOnWalk;
253
+ fi->ea = arm_extabort_type(result);
254
+ return 0;
255
+ }
256
}
257
- if (result == MEMTX_OK) {
258
- return data;
259
- }
260
- fi->type = ARMFault_SyncExternalOnWalk;
261
- fi->ea = arm_extabort_type(result);
262
- return 0;
263
+ return data;
264
}
265
266
static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
267
ARMMMUFaultInfo *fi)
268
{
269
CPUState *cs = env_cpu(env);
270
- MemTxAttrs attrs = {};
271
- MemTxResult result = MEMTX_OK;
272
- AddressSpace *as;
273
uint64_t data;
274
275
if (!S1_ptw_translate(env, ptw, addr, fi)) {
276
+ /* Failure. */
277
+ assert(fi->s1ptw);
278
return 0;
279
}
280
- addr = ptw->out_phys;
281
- attrs.secure = ptw->out_secure;
282
- as = arm_addressspace(cs, attrs);
283
- if (ptw->out_be) {
284
- data = address_space_ldq_be(as, addr, attrs, &result);
285
+
286
+ if (likely(ptw->out_host)) {
287
+ /* Page tables are in RAM, and we have the host address. */
288
+ if (ptw->out_be) {
289
+ data = ldq_be_p(ptw->out_host);
290
+ } else {
291
+ data = ldq_le_p(ptw->out_host);
292
+ }
293
} else {
294
- data = address_space_ldq_le(as, addr, attrs, &result);
295
+ /* Page tables are in MMIO. */
296
+ MemTxAttrs attrs = { .secure = ptw->out_secure };
297
+ AddressSpace *as = arm_addressspace(cs, attrs);
298
+ MemTxResult result = MEMTX_OK;
299
+
300
+ if (ptw->out_be) {
301
+ data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
302
+ } else {
303
+ data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
304
+ }
305
+ if (unlikely(result != MEMTX_OK)) {
306
+ fi->type = ARMFault_SyncExternalOnWalk;
307
+ fi->ea = arm_extabort_type(result);
308
+ return 0;
309
+ }
310
}
311
- if (result == MEMTX_OK) {
312
- return data;
313
- }
314
- fi->type = ARMFault_SyncExternalOnWalk;
315
- fi->ea = arm_extabort_type(result);
316
- return 0;
317
+ return data;
318
}
319
320
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
321
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/arm/tlb_helper.c
324
+++ b/target/arm/tlb_helper.c
325
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
326
bool probe, uintptr_t retaddr)
327
{
328
ARMCPU *cpu = ARM_CPU(cs);
329
- ARMMMUFaultInfo fi = {};
330
GetPhysAddrResult res = {};
331
+ ARMMMUFaultInfo local_fi, *fi;
332
int ret;
333
334
+ /*
335
+ * Allow S1_ptw_translate to see any fault generated here.
336
+ * Since this may recurse, read and clear.
337
+ */
338
+ fi = cpu->env.tlb_fi;
339
+ if (fi) {
340
+ cpu->env.tlb_fi = NULL;
341
+ } else {
342
+ fi = memset(&local_fi, 0, sizeof(local_fi));
343
+ }
344
+
345
/*
346
* Walk the page table and (if the mapping exists) add the page
347
* to the TLB. On success, return true. Otherwise, if probing,
348
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
349
*/
350
ret = get_phys_addr(&cpu->env, address, access_type,
351
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
352
- &res, &fi);
353
+ &res, fi);
354
if (likely(!ret)) {
355
/*
356
* Map a single [sub]page. Regions smaller than our declared
357
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
358
} else {
359
/* now we have a real cpu fault */
360
cpu_restore_state(cs, retaddr, true);
361
- arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
362
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
363
}
364
}
365
#else
57
--
366
--
58
2.20.1
367
2.25.1
59
60
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Fix typo xlnx-ve -> xlnx-versal.
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Message-id: 20221011031911.2408754-12-richard.henderson@linaro.org
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-4-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
7
---
12
hw/arm/xlnx-versal-virt.c | 2 +-
8
target/arm/ptw.c | 191 +++++++++++++++++++++++++----------------------
13
1 file changed, 1 insertion(+), 1 deletion(-)
9
1 file changed, 100 insertions(+), 91 deletions(-)
14
10
15
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
11
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal-virt.c
13
--- a/target/arm/ptw.c
18
+++ b/hw/arm/xlnx-versal-virt.c
14
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
15
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
20
psci_conduit = QEMU_PSCI_CONDUIT_SMC;
16
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
17
__attribute__((nonnull));
18
19
+static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
20
+ target_ulong address,
21
+ MMUAccessType access_type,
22
+ GetPhysAddrResult *result,
23
+ ARMMMUFaultInfo *fi)
24
+ __attribute__((nonnull));
25
+
26
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
27
static const uint8_t pamax_map[] = {
28
[0] = 32,
29
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
30
return 0;
31
}
32
33
+static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
34
+ target_ulong address,
35
+ MMUAccessType access_type,
36
+ GetPhysAddrResult *result,
37
+ ARMMMUFaultInfo *fi)
38
+{
39
+ hwaddr ipa;
40
+ int s1_prot;
41
+ int ret;
42
+ bool is_secure = ptw->in_secure;
43
+ bool ipa_secure, s2walk_secure;
44
+ ARMCacheAttrs cacheattrs1;
45
+ bool is_el0;
46
+ uint64_t hcr;
47
+
48
+ ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
49
+
50
+ /* If S1 fails or S2 is disabled, return early. */
51
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
52
+ return ret;
53
+ }
54
+
55
+ ipa = result->f.phys_addr;
56
+ ipa_secure = result->f.attrs.secure;
57
+ if (is_secure) {
58
+ /* Select TCR based on the NS bit from the S1 walk. */
59
+ s2walk_secure = !(ipa_secure
60
+ ? env->cp15.vstcr_el2 & VSTCR_SW
61
+ : env->cp15.vtcr_el2 & VTCR_NSW);
62
+ } else {
63
+ assert(!ipa_secure);
64
+ s2walk_secure = false;
65
+ }
66
+
67
+ is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
68
+ ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
69
+ ptw->in_secure = s2walk_secure;
70
+
71
+ /*
72
+ * S1 is done, now do S2 translation.
73
+ * Save the stage1 results so that we may merge prot and cacheattrs later.
74
+ */
75
+ s1_prot = result->f.prot;
76
+ cacheattrs1 = result->cacheattrs;
77
+ memset(result, 0, sizeof(*result));
78
+
79
+ ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
80
+ fi->s2addr = ipa;
81
+
82
+ /* Combine the S1 and S2 perms. */
83
+ result->f.prot &= s1_prot;
84
+
85
+ /* If S2 fails, return early. */
86
+ if (ret) {
87
+ return ret;
88
+ }
89
+
90
+ /* Combine the S1 and S2 cache attributes. */
91
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
92
+ if (hcr & HCR_DC) {
93
+ /*
94
+ * HCR.DC forces the first stage attributes to
95
+ * Normal Non-Shareable,
96
+ * Inner Write-Back Read-Allocate Write-Allocate,
97
+ * Outer Write-Back Read-Allocate Write-Allocate.
98
+ * Do not overwrite Tagged within attrs.
99
+ */
100
+ if (cacheattrs1.attrs != 0xf0) {
101
+ cacheattrs1.attrs = 0xff;
102
+ }
103
+ cacheattrs1.shareability = 0;
104
+ }
105
+ result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
106
+ result->cacheattrs);
107
+
108
+ /*
109
+ * Check if IPA translates to secure or non-secure PA space.
110
+ * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
111
+ */
112
+ result->f.attrs.secure =
113
+ (is_secure
114
+ && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
115
+ && (ipa_secure
116
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
117
+
118
+ return 0;
119
+}
120
+
121
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
122
target_ulong address,
123
MMUAccessType access_type,
124
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
125
if (mmu_idx != s1_mmu_idx) {
126
/*
127
* Call ourselves recursively to do the stage 1 and then stage 2
128
- * translations if mmu_idx is a two-stage regime.
129
+ * translations if mmu_idx is a two-stage regime, and EL2 present.
130
+ * Otherwise, a stage1+stage2 translation is just stage 1.
131
*/
132
+ ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
133
if (arm_feature(env, ARM_FEATURE_EL2)) {
134
- hwaddr ipa;
135
- int s1_prot;
136
- int ret;
137
- bool ipa_secure, s2walk_secure;
138
- ARMCacheAttrs cacheattrs1;
139
- bool is_el0;
140
- uint64_t hcr;
141
-
142
- ptw->in_mmu_idx = s1_mmu_idx;
143
- ret = get_phys_addr_with_struct(env, ptw, address, access_type,
144
- result, fi);
145
-
146
- /* If S1 fails or S2 is disabled, return early. */
147
- if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
148
- is_secure)) {
149
- return ret;
150
- }
151
-
152
- ipa = result->f.phys_addr;
153
- ipa_secure = result->f.attrs.secure;
154
- if (is_secure) {
155
- /* Select TCR based on the NS bit from the S1 walk. */
156
- s2walk_secure = !(ipa_secure
157
- ? env->cp15.vstcr_el2 & VSTCR_SW
158
- : env->cp15.vtcr_el2 & VTCR_NSW);
159
- } else {
160
- assert(!ipa_secure);
161
- s2walk_secure = false;
162
- }
163
-
164
- ptw->in_mmu_idx =
165
- s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
166
- ptw->in_secure = s2walk_secure;
167
- is_el0 = mmu_idx == ARMMMUIdx_E10_0;
168
-
169
- /*
170
- * S1 is done, now do S2 translation.
171
- * Save the stage1 results so that we may merge
172
- * prot and cacheattrs later.
173
- */
174
- s1_prot = result->f.prot;
175
- cacheattrs1 = result->cacheattrs;
176
- memset(result, 0, sizeof(*result));
177
-
178
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
179
- is_el0, result, fi);
180
- fi->s2addr = ipa;
181
-
182
- /* Combine the S1 and S2 perms. */
183
- result->f.prot &= s1_prot;
184
-
185
- /* If S2 fails, return early. */
186
- if (ret) {
187
- return ret;
188
- }
189
-
190
- /* Combine the S1 and S2 cache attributes. */
191
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
192
- if (hcr & HCR_DC) {
193
- /*
194
- * HCR.DC forces the first stage attributes to
195
- * Normal Non-Shareable,
196
- * Inner Write-Back Read-Allocate Write-Allocate,
197
- * Outer Write-Back Read-Allocate Write-Allocate.
198
- * Do not overwrite Tagged within attrs.
199
- */
200
- if (cacheattrs1.attrs != 0xf0) {
201
- cacheattrs1.attrs = 0xff;
202
- }
203
- cacheattrs1.shareability = 0;
204
- }
205
- result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
206
- result->cacheattrs);
207
-
208
- /*
209
- * Check if IPA translates to secure or non-secure PA space.
210
- * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
211
- */
212
- result->f.attrs.secure =
213
- (is_secure
214
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
215
- && (ipa_secure
216
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
217
-
218
- return 0;
219
- } else {
220
- /*
221
- * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
222
- */
223
- mmu_idx = stage_1_mmu_idx(mmu_idx);
224
+ return get_phys_addr_twostage(env, ptw, address, access_type,
225
+ result, fi);
226
}
21
}
227
}
22
228
23
- sysbus_init_child_obj(OBJECT(machine), "xlnx-ve", &s->soc,
24
+ sysbus_init_child_obj(OBJECT(machine), "xlnx-versal", &s->soc,
25
sizeof(s->soc), TYPE_XLNX_VERSAL);
26
object_property_set_link(OBJECT(&s->soc), OBJECT(machine->ram),
27
"ddr", &error_abort);
28
--
229
--
29
2.20.1
230
2.25.1
30
31
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for SD.
3
The return type of the functions is already bool, but in a few
4
instances we used an integer type with the return statement.
4
5
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20221011031911.2408754-13-richard.henderson@linaro.org
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-9-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
include/hw/arm/xlnx-versal.h | 12 ++++++++++++
11
target/arm/ptw.c | 7 +++----
13
hw/arm/xlnx-versal.c | 31 +++++++++++++++++++++++++++++++
12
1 file changed, 3 insertions(+), 4 deletions(-)
14
2 files changed, 43 insertions(+)
15
13
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
16
--- a/target/arm/ptw.c
19
+++ b/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
21
19
result->f.lg_page_size = TARGET_PAGE_BITS;
22
#include "hw/sysbus.h"
20
result->cacheattrs.shareability = shareability;
23
#include "hw/arm/boot.h"
21
result->cacheattrs.attrs = memattr;
24
+#include "hw/sd/sdhci.h"
22
- return 0;
25
#include "hw/intc/arm_gicv3.h"
23
+ return false;
26
#include "hw/char/pl011.h"
27
#include "hw/dma/xlnx-zdma.h"
28
@@ -XXX,XX +XXX,XX @@
29
#define XLNX_VERSAL_NR_UARTS 2
30
#define XLNX_VERSAL_NR_GEMS 2
31
#define XLNX_VERSAL_NR_ADMAS 8
32
+#define XLNX_VERSAL_NR_SDS 2
33
#define XLNX_VERSAL_NR_IRQS 192
34
35
typedef struct Versal {
36
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
37
} iou;
38
} lpd;
39
40
+ /* The Platform Management Controller subsystem. */
41
+ struct {
42
+ struct {
43
+ SDHCIState sd[XLNX_VERSAL_NR_SDS];
44
+ } iou;
45
+ } pmc;
46
+
47
struct {
48
MemoryRegion *mr_ddr;
49
uint32_t psci_conduit;
50
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
51
#define VERSAL_GEM1_IRQ_0 58
52
#define VERSAL_GEM1_WAKE_IRQ_0 59
53
#define VERSAL_ADMA_IRQ_0 60
54
+#define VERSAL_SD0_IRQ_0 126
55
56
/* Architecturally reserved IRQs suitable for virtualization. */
57
#define VERSAL_RSVD_IRQ_FIRST 111
58
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
59
#define MM_FPD_CRF 0xfd1a0000U
60
#define MM_FPD_CRF_SIZE 0x140000
61
62
+#define MM_PMC_SD0 0xf1040000U
63
+#define MM_PMC_SD0_SIZE 0x10000
64
#define MM_PMC_CRP 0xf1260000U
65
#define MM_PMC_CRP_SIZE 0x10000
66
#endif
67
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/hw/arm/xlnx-versal.c
70
+++ b/hw/arm/xlnx-versal.c
71
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
72
}
73
}
24
}
74
25
75
+#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */
26
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
76
+static void versal_create_sds(Versal *s, qemu_irq *pic)
27
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
77
+{
28
{
78
+ int i;
29
hwaddr ipa;
79
+
30
int s1_prot;
80
+ for (i = 0; i < ARRAY_SIZE(s->pmc.iou.sd); i++) {
31
- int ret;
81
+ DeviceState *dev;
32
bool is_secure = ptw->in_secure;
82
+ MemoryRegion *mr;
33
- bool ipa_secure, s2walk_secure;
83
+
34
+ bool ret, ipa_secure, s2walk_secure;
84
+ sysbus_init_child_obj(OBJECT(s), "sd[*]",
35
ARMCacheAttrs cacheattrs1;
85
+ &s->pmc.iou.sd[i], sizeof(s->pmc.iou.sd[i]),
36
bool is_el0;
86
+ TYPE_SYSBUS_SDHCI);
37
uint64_t hcr;
87
+ dev = DEVICE(&s->pmc.iou.sd[i]);
38
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
88
+
39
&& (ipa_secure
89
+ object_property_set_uint(OBJECT(dev),
40
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
90
+ 3, "sd-spec-version", &error_fatal);
41
91
+ object_property_set_uint(OBJECT(dev), SDHCI_CAPABILITIES, "capareg",
42
- return 0;
92
+ &error_fatal);
43
+ return false;
93
+ object_property_set_uint(OBJECT(dev), UHS_I, "uhs", &error_fatal);
44
}
94
+ qdev_init_nofail(dev);
45
95
+
46
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
96
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
97
+ memory_region_add_subregion(&s->mr_ps,
98
+ MM_PMC_SD0 + i * MM_PMC_SD0_SIZE, mr);
99
+
100
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
101
+ pic[VERSAL_SD0_IRQ_0 + i * 2]);
102
+ }
103
+}
104
+
105
/* This takes the board allocated linear DDR memory and creates aliases
106
* for each split DDR range/aperture on the Versal address map.
107
*/
108
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
109
versal_create_uarts(s, pic);
110
versal_create_gems(s, pic);
111
versal_create_admas(s, pic);
112
+ versal_create_sds(s, pic);
113
versal_map_ddr(s);
114
versal_unimp(s);
115
116
--
47
--
117
2.20.1
48
2.25.1
118
119
diff view generated by jsdifflib
1
Convert the Neon "load/store multiple structures" insns to decodetree.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
A simple helper to retrieve the length of the current insn.
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221020030641.2066807-2-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-12-peter.maydell@linaro.org
6
---
9
---
7
target/arm/neon-ls.decode | 7 ++
10
target/arm/translate.h | 5 +++++
8
target/arm/translate-neon.inc.c | 124 ++++++++++++++++++++++++++++++++
11
target/arm/translate-vfp.c | 2 +-
9
target/arm/translate.c | 91 +----------------------
12
target/arm/translate.c | 5 ++---
10
3 files changed, 133 insertions(+), 89 deletions(-)
13
3 files changed, 8 insertions(+), 4 deletions(-)
11
14
12
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-ls.decode
17
--- a/target/arm/translate.h
15
+++ b/target/arm/neon-ls.decode
18
+++ b/target/arm/translate.h
16
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
17
# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
20
s->insn_start = NULL;
18
# This file works on the A32 encoding only; calling code for T32 has to
19
# transform the insn into the A32 version first.
20
+
21
+%vd_dp 22:1 12:4
22
+
23
+# Neon load/store multiple structures
24
+
25
+VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
26
+ vd=%vd_dp
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
30
+++ b/target/arm/translate-neon.inc.c
31
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
32
gen_helper_gvec_fmlal_idx_a32);
33
return true;
34
}
21
}
35
+
22
36
+static struct {
23
+static inline int curr_insn_len(DisasContext *s)
37
+ int nregs;
38
+ int interleave;
39
+ int spacing;
40
+} const neon_ls_element_type[11] = {
41
+ {1, 4, 1},
42
+ {1, 4, 2},
43
+ {4, 1, 1},
44
+ {2, 2, 2},
45
+ {1, 3, 1},
46
+ {1, 3, 2},
47
+ {3, 1, 1},
48
+ {1, 1, 1},
49
+ {1, 2, 1},
50
+ {1, 2, 2},
51
+ {2, 1, 1}
52
+};
53
+
54
+static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
55
+ int stride)
56
+{
24
+{
57
+ if (rm != 15) {
25
+ return s->base.pc_next - s->pc_curr;
58
+ TCGv_i32 base;
59
+
60
+ base = load_reg(s, rn);
61
+ if (rm == 13) {
62
+ tcg_gen_addi_i32(base, base, stride);
63
+ } else {
64
+ TCGv_i32 index;
65
+ index = load_reg(s, rm);
66
+ tcg_gen_add_i32(base, base, index);
67
+ tcg_temp_free_i32(index);
68
+ }
69
+ store_reg(s, rn, base);
70
+ }
71
+}
26
+}
72
+
27
+
73
+static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
28
/* is_jmp field values */
74
+{
29
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
75
+ /* Neon load/store multiple structures */
30
/* CPU state was modified dynamically; exit to main loop for interrupts. */
76
+ int nregs, interleave, spacing, reg, n;
31
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
77
+ MemOp endian = s->be_data;
32
index XXXXXXX..XXXXXXX 100644
78
+ int mmu_idx = get_mem_index(s);
33
--- a/target/arm/translate-vfp.c
79
+ int size = a->size;
34
+++ b/target/arm/translate-vfp.c
80
+ TCGv_i64 tmp64;
35
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
81
+ TCGv_i32 addr, tmp;
36
if (s->sme_trap_nonstreaming) {
82
+
37
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
83
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
38
syn_smetrap(SME_ET_Streaming,
84
+ return false;
39
- s->base.pc_next - s->pc_curr == 2));
85
+ }
40
+ curr_insn_len(s) == 2));
86
+
41
return false;
87
+ /* UNDEF accesses to D16-D31 if they don't exist */
42
}
88
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
43
89
+ return false;
90
+ }
91
+ if (a->itype > 10) {
92
+ return false;
93
+ }
94
+ /* Catch UNDEF cases for bad values of align field */
95
+ switch (a->itype & 0xc) {
96
+ case 4:
97
+ if (a->align >= 2) {
98
+ return false;
99
+ }
100
+ break;
101
+ case 8:
102
+ if (a->align == 3) {
103
+ return false;
104
+ }
105
+ break;
106
+ default:
107
+ break;
108
+ }
109
+ nregs = neon_ls_element_type[a->itype].nregs;
110
+ interleave = neon_ls_element_type[a->itype].interleave;
111
+ spacing = neon_ls_element_type[a->itype].spacing;
112
+ if (size == 3 && (interleave | spacing) != 1) {
113
+ return false;
114
+ }
115
+
116
+ if (!vfp_access_check(s)) {
117
+ return true;
118
+ }
119
+
120
+ /* For our purposes, bytes are always little-endian. */
121
+ if (size == 0) {
122
+ endian = MO_LE;
123
+ }
124
+ /*
125
+ * Consecutive little-endian elements from a single register
126
+ * can be promoted to a larger little-endian operation.
127
+ */
128
+ if (interleave == 1 && endian == MO_LE) {
129
+ size = 3;
130
+ }
131
+ tmp64 = tcg_temp_new_i64();
132
+ addr = tcg_temp_new_i32();
133
+ tmp = tcg_const_i32(1 << size);
134
+ load_reg_var(s, addr, a->rn);
135
+ for (reg = 0; reg < nregs; reg++) {
136
+ for (n = 0; n < 8 >> size; n++) {
137
+ int xs;
138
+ for (xs = 0; xs < interleave; xs++) {
139
+ int tt = a->vd + reg + spacing * xs;
140
+
141
+ if (a->l) {
142
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
143
+ neon_store_element64(tt, n, size, tmp64);
144
+ } else {
145
+ neon_load_element64(tmp64, tt, n, size);
146
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
147
+ }
148
+ tcg_gen_add_i32(addr, addr, tmp);
149
+ }
150
+ }
151
+ }
152
+ tcg_temp_free_i32(addr);
153
+ tcg_temp_free_i32(tmp);
154
+ tcg_temp_free_i64(tmp64);
155
+
156
+ gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
157
+ return true;
158
+}
159
diff --git a/target/arm/translate.c b/target/arm/translate.c
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
160
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/translate.c
46
--- a/target/arm/translate.c
162
+++ b/target/arm/translate.c
47
+++ b/target/arm/translate.c
163
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
48
@@ -XXX,XX +XXX,XX @@ static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
164
}
49
/* ISS not valid if writeback */
165
50
if (p && !w) {
166
51
ret = rd;
167
-static struct {
52
- if (s->base.pc_next - s->pc_curr == 2) {
168
- int nregs;
53
+ if (curr_insn_len(s) == 2) {
169
- int interleave;
54
ret |= ISSIs16Bit;
170
- int spacing;
55
}
171
-} const neon_ls_element_type[11] = {
172
- {1, 4, 1},
173
- {1, 4, 2},
174
- {4, 1, 1},
175
- {2, 2, 2},
176
- {1, 3, 1},
177
- {1, 3, 2},
178
- {3, 1, 1},
179
- {1, 1, 1},
180
- {1, 2, 1},
181
- {1, 2, 2},
182
- {2, 1, 1}
183
-};
184
-
185
/* Translate a NEON load/store element instruction. Return nonzero if the
186
instruction is invalid. */
187
static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
188
{
189
int rd, rn, rm;
190
- int op;
191
int nregs;
192
- int interleave;
193
- int spacing;
194
int stride;
195
int size;
196
int reg;
197
int load;
198
- int n;
199
int vec_size;
200
- int mmu_idx;
201
- MemOp endian;
202
TCGv_i32 addr;
203
TCGv_i32 tmp;
204
- TCGv_i32 tmp2;
205
- TCGv_i64 tmp64;
206
207
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
208
return 1;
209
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
210
rn = (insn >> 16) & 0xf;
211
rm = insn & 0xf;
212
load = (insn & (1 << 21)) != 0;
213
- endian = s->be_data;
214
- mmu_idx = get_mem_index(s);
215
if ((insn & (1 << 23)) == 0) {
216
- /* Load store all elements. */
217
- op = (insn >> 8) & 0xf;
218
- size = (insn >> 6) & 3;
219
- if (op > 10)
220
- return 1;
221
- /* Catch UNDEF cases for bad values of align field */
222
- switch (op & 0xc) {
223
- case 4:
224
- if (((insn >> 5) & 1) == 1) {
225
- return 1;
226
- }
227
- break;
228
- case 8:
229
- if (((insn >> 4) & 3) == 3) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- break;
235
- }
236
- nregs = neon_ls_element_type[op].nregs;
237
- interleave = neon_ls_element_type[op].interleave;
238
- spacing = neon_ls_element_type[op].spacing;
239
- if (size == 3 && (interleave | spacing) != 1) {
240
- return 1;
241
- }
242
- /* For our purposes, bytes are always little-endian. */
243
- if (size == 0) {
244
- endian = MO_LE;
245
- }
246
- /* Consecutive little-endian elements from a single register
247
- * can be promoted to a larger little-endian operation.
248
- */
249
- if (interleave == 1 && endian == MO_LE) {
250
- size = 3;
251
- }
252
- tmp64 = tcg_temp_new_i64();
253
- addr = tcg_temp_new_i32();
254
- tmp2 = tcg_const_i32(1 << size);
255
- load_reg_var(s, addr, rn);
256
- for (reg = 0; reg < nregs; reg++) {
257
- for (n = 0; n < 8 >> size; n++) {
258
- int xs;
259
- for (xs = 0; xs < interleave; xs++) {
260
- int tt = rd + reg + spacing * xs;
261
-
262
- if (load) {
263
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
264
- neon_store_element64(tt, n, size, tmp64);
265
- } else {
266
- neon_load_element64(tmp64, tt, n, size);
267
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
268
- }
269
- tcg_gen_add_i32(addr, addr, tmp2);
270
- }
271
- }
272
- }
273
- tcg_temp_free_i32(addr);
274
- tcg_temp_free_i32(tmp2);
275
- tcg_temp_free_i64(tmp64);
276
- stride = nregs * interleave * 8;
277
+ /* Load store all elements -- handled already by decodetree */
278
+ return 1;
279
} else {
56
} else {
280
size = (insn >> 10) & 3;
57
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
281
if (size == 3) {
58
/* nothing more to generate */
59
break;
60
case DISAS_WFI:
61
- gen_helper_wfi(cpu_env,
62
- tcg_constant_i32(dc->base.pc_next - dc->pc_curr));
63
+ gen_helper_wfi(cpu_env, tcg_constant_i32(curr_insn_len(dc)));
64
/*
65
* The helper doesn't necessarily throw an exception, but we
66
* must go back to the main loop to check for interrupts anyway.
282
--
67
--
283
2.20.1
68
2.25.1
284
69
285
70
diff view generated by jsdifflib
1
From: Fredrik Strupe <fredrik@strupe.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
According to Arm ARM, VQDMULL is only valid when U=0, while having
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
4
U=1 is unallocated.
5
4
6
Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
7
Fixes: 695272dcb976 ("target-arm: Handle UNDEF cases for Neon 3-regs-different-widths")
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221020030641.2066807-3-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/translate.c | 2 +-
10
target/arm/translate-a64.c | 40 ++++++++++++++++++++------------------
12
1 file changed, 1 insertion(+), 1 deletion(-)
11
target/arm/translate.c | 10 ++++++----
12
2 files changed, 27 insertions(+), 23 deletions(-)
13
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
19
return translator_use_goto_tb(&s->base, dest);
20
}
21
22
-static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
23
+static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
24
{
25
+ uint64_t dest = s->pc_curr + diff;
26
+
27
if (use_goto_tb(s, dest)) {
28
tcg_gen_goto_tb(n);
29
gen_a64_set_pc_im(dest);
30
@@ -XXX,XX +XXX,XX @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
31
*/
32
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
33
{
34
- uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
35
+ int64_t diff = sextract32(insn, 0, 26) * 4;
36
37
if (insn & (1U << 31)) {
38
/* BL Branch with link */
39
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
40
41
/* B Branch / BL Branch with link */
42
reset_btype(s);
43
- gen_goto_tb(s, 0, addr);
44
+ gen_goto_tb(s, 0, diff);
45
}
46
47
/* Compare and branch (immediate)
48
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
49
static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
50
{
51
unsigned int sf, op, rt;
52
- uint64_t addr;
53
+ int64_t diff;
54
TCGLabel *label_match;
55
TCGv_i64 tcg_cmp;
56
57
sf = extract32(insn, 31, 1);
58
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
59
rt = extract32(insn, 0, 5);
60
- addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
61
+ diff = sextract32(insn, 5, 19) * 4;
62
63
tcg_cmp = read_cpu_reg(s, rt, sf);
64
label_match = gen_new_label();
65
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
66
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
67
tcg_cmp, 0, label_match);
68
69
- gen_goto_tb(s, 0, s->base.pc_next);
70
+ gen_goto_tb(s, 0, 4);
71
gen_set_label(label_match);
72
- gen_goto_tb(s, 1, addr);
73
+ gen_goto_tb(s, 1, diff);
74
}
75
76
/* Test and branch (immediate)
77
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
78
static void disas_test_b_imm(DisasContext *s, uint32_t insn)
79
{
80
unsigned int bit_pos, op, rt;
81
- uint64_t addr;
82
+ int64_t diff;
83
TCGLabel *label_match;
84
TCGv_i64 tcg_cmp;
85
86
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
87
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
88
- addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
89
+ diff = sextract32(insn, 5, 14) * 4;
90
rt = extract32(insn, 0, 5);
91
92
tcg_cmp = tcg_temp_new_i64();
93
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
94
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
95
tcg_cmp, 0, label_match);
96
tcg_temp_free_i64(tcg_cmp);
97
- gen_goto_tb(s, 0, s->base.pc_next);
98
+ gen_goto_tb(s, 0, 4);
99
gen_set_label(label_match);
100
- gen_goto_tb(s, 1, addr);
101
+ gen_goto_tb(s, 1, diff);
102
}
103
104
/* Conditional branch (immediate)
105
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
106
static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
107
{
108
unsigned int cond;
109
- uint64_t addr;
110
+ int64_t diff;
111
112
if ((insn & (1 << 4)) || (insn & (1 << 24))) {
113
unallocated_encoding(s);
114
return;
115
}
116
- addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
117
+ diff = sextract32(insn, 5, 19) * 4;
118
cond = extract32(insn, 0, 4);
119
120
reset_btype(s);
121
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
122
/* genuinely conditional branches */
123
TCGLabel *label_match = gen_new_label();
124
arm_gen_test_cc(cond, label_match);
125
- gen_goto_tb(s, 0, s->base.pc_next);
126
+ gen_goto_tb(s, 0, 4);
127
gen_set_label(label_match);
128
- gen_goto_tb(s, 1, addr);
129
+ gen_goto_tb(s, 1, diff);
130
} else {
131
/* 0xe and 0xf are both "always" conditions */
132
- gen_goto_tb(s, 0, addr);
133
+ gen_goto_tb(s, 0, diff);
134
}
135
}
136
137
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
138
* any pending interrupts immediately.
139
*/
140
reset_btype(s);
141
- gen_goto_tb(s, 0, s->base.pc_next);
142
+ gen_goto_tb(s, 0, 4);
143
return;
144
145
case 7: /* SB */
146
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
147
* MB and end the TB instead.
148
*/
149
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
150
- gen_goto_tb(s, 0, s->base.pc_next);
151
+ gen_goto_tb(s, 0, 4);
152
return;
153
154
default:
155
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
156
switch (dc->base.is_jmp) {
157
case DISAS_NEXT:
158
case DISAS_TOO_MANY:
159
- gen_goto_tb(dc, 1, dc->base.pc_next);
160
+ gen_goto_tb(dc, 1, 4);
161
break;
162
default:
163
case DISAS_UPDATE_EXIT:
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
164
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
165
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
166
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
167
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
168
@@ -XXX,XX +XXX,XX @@ static void gen_goto_ptr(void)
19
{0, 0, 0, 0}, /* VMLSL */
169
* cpu_loop_exec. Any live exit_requests will be processed as we
20
{0, 0, 0, 9}, /* VQDMLSL */
170
* enter the next TB.
21
{0, 0, 0, 0}, /* Integer VMULL */
171
*/
22
- {0, 0, 0, 1}, /* VQDMULL */
172
-static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
23
+ {0, 0, 0, 9}, /* VQDMULL */
173
+static void gen_goto_tb(DisasContext *s, int n, int diff)
24
{0, 0, 0, 0xa}, /* Polynomial VMULL */
174
{
25
{0, 0, 0, 7}, /* Reserved: always UNDEF */
175
+ target_ulong dest = s->pc_curr + diff;
26
};
176
+
177
if (translator_use_goto_tb(&s->base, dest)) {
178
tcg_gen_goto_tb(n);
179
gen_set_pc_im(s, dest);
180
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
181
* gen_jmp();
182
* on the second call to gen_jmp().
183
*/
184
- gen_goto_tb(s, tbno, dest);
185
+ gen_goto_tb(s, tbno, dest - s->pc_curr);
186
break;
187
case DISAS_UPDATE_NOCHAIN:
188
case DISAS_UPDATE_EXIT:
189
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
190
switch (dc->base.is_jmp) {
191
case DISAS_NEXT:
192
case DISAS_TOO_MANY:
193
- gen_goto_tb(dc, 1, dc->base.pc_next);
194
+ gen_goto_tb(dc, 1, curr_insn_len(dc));
195
break;
196
case DISAS_UPDATE_NOCHAIN:
197
gen_set_pc_im(dc, dc->base.pc_next);
198
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
199
gen_set_pc_im(dc, dc->base.pc_next);
200
gen_singlestep_exception(dc);
201
} else {
202
- gen_goto_tb(dc, 1, dc->base.pc_next);
203
+ gen_goto_tb(dc, 1, curr_insn_len(dc));
204
}
205
}
206
}
27
--
207
--
28
2.20.1
208
2.25.1
29
30
diff view generated by jsdifflib
Deleted patch
1
The access_type argument to get_phys_addr_lpae() is an MMUAccessType;
2
use the enum constant MMU_DATA_LOAD rather than a literal 0 when we
3
call it in S1_ptw_translate().
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200330210400.11724-3-peter.maydell@linaro.org
9
---
10
target/arm/helper.c | 5 +++--
11
1 file changed, 3 insertions(+), 2 deletions(-)
12
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
18
pcacheattrs = &cacheattrs;
19
}
20
21
- ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa,
22
- &txattrs, &s2prot, &s2size, fi, pcacheattrs);
23
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
24
+ &s2pa, &txattrs, &s2prot, &s2size, fi,
25
+ pcacheattrs);
26
if (ret) {
27
assert(fi->type != ARMFault_None);
28
fi->s2addr = addr;
29
--
30
2.20.1
31
32
diff view generated by jsdifflib
Deleted patch
1
In aarch64_max_initfn() we update both 32-bit and 64-bit ID
2
registers. The intended pattern is that for 64-bit ID registers we
3
use FIELD_DP64 and the uint64_t 't' register, while 32-bit ID
4
registers use FIELD_DP32 and the uint32_t 'u' register. For
5
ID_AA64DFR0 we accidentally used 'u', meaning that the top 32 bits of
6
this 64-bit ID register would end up always zero. Luckily at the
7
moment that's what they should be anyway, so this bug has no visible
8
effects.
9
1
10
Use the right-sized variable.
11
12
Fixes: 3bec78447a958d481991
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 20200423110915.10527-1-peter.maydell@linaro.org
17
---
18
target/arm/cpu64.c | 6 +++---
19
1 file changed, 3 insertions(+), 3 deletions(-)
20
21
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu64.c
24
+++ b/target/arm/cpu64.c
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
26
u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
27
cpu->isar.id_mmfr4 = u;
28
29
- u = cpu->isar.id_aa64dfr0;
30
- u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
31
- cpu->isar.id_aa64dfr0 = u;
32
+ t = cpu->isar.id_aa64dfr0;
33
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
34
+ cpu->isar.id_aa64dfr0 = t;
35
36
u = cpu->isar.id_dfr0;
37
u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
38
--
39
2.20.1
40
41
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Move misplaced comment.
4
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-3-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/arm/xlnx-versal.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal.c
18
+++ b/hw/arm/xlnx-versal.c
19
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
20
21
obj = object_new(XLNX_VERSAL_ACPU_TYPE);
22
if (!obj) {
23
- /* Secondary CPUs start in PSCI powered-down state */
24
error_report("Unable to create apu.cpu[%d] of type %s",
25
i, XLNX_VERSAL_ACPU_TYPE);
26
exit(EXIT_FAILURE);
27
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
28
object_property_set_int(obj, s->cfg.psci_conduit,
29
"psci-conduit", &error_abort);
30
if (i) {
31
+ /* Secondary CPUs start in PSCI powered-down state */
32
object_property_set_bool(obj, true,
33
"start-powered-off", &error_abort);
34
}
35
--
36
2.20.1
37
38
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Embed the UARTs into the SoC type.
4
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-5-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
14
hw/arm/xlnx-versal.c | 12 ++++++------
15
2 files changed, 8 insertions(+), 7 deletions(-)
16
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
20
+++ b/include/hw/arm/xlnx-versal.h
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/sysbus.h"
23
#include "hw/arm/boot.h"
24
#include "hw/intc/arm_gicv3.h"
25
+#include "hw/char/pl011.h"
26
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
30
MemoryRegion mr_ocm;
31
32
struct {
33
- SysBusDevice *uart[XLNX_VERSAL_NR_UARTS];
34
+ PL011State uart[XLNX_VERSAL_NR_UARTS];
35
SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
36
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
37
} iou;
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "kvm_arm.h"
44
#include "hw/misc/unimp.h"
45
#include "hw/arm/xlnx-versal.h"
46
-#include "hw/char/pl011.h"
47
48
#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72")
49
#define GEM_REVISION 0x40070106
50
@@ -XXX,XX +XXX,XX @@ static void versal_create_uarts(Versal *s, qemu_irq *pic)
51
DeviceState *dev;
52
MemoryRegion *mr;
53
54
- dev = qdev_create(NULL, TYPE_PL011);
55
- s->lpd.iou.uart[i] = SYS_BUS_DEVICE(dev);
56
+ sysbus_init_child_obj(OBJECT(s), name,
57
+ &s->lpd.iou.uart[i], sizeof(s->lpd.iou.uart[i]),
58
+ TYPE_PL011);
59
+ dev = DEVICE(&s->lpd.iou.uart[i]);
60
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
61
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
62
qdev_init_nofail(dev);
63
64
- mr = sysbus_mmio_get_region(s->lpd.iou.uart[i], 0);
65
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
66
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
67
68
- sysbus_connect_irq(s->lpd.iou.uart[i], 0, pic[irqs[i]]);
69
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
70
g_free(name);
71
}
72
}
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Embed the GEMs into the SoC type.
4
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-6-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
14
hw/arm/xlnx-versal.c | 15 ++++++++-------
15
2 files changed, 10 insertions(+), 8 deletions(-)
16
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
20
+++ b/include/hw/arm/xlnx-versal.h
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/arm/boot.h"
23
#include "hw/intc/arm_gicv3.h"
24
#include "hw/char/pl011.h"
25
+#include "hw/net/cadence_gem.h"
26
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
30
31
struct {
32
PL011State uart[XLNX_VERSAL_NR_UARTS];
33
- SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
34
+ CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
35
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
36
} iou;
37
} lpd;
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_gems(Versal *s, qemu_irq *pic)
43
DeviceState *dev;
44
MemoryRegion *mr;
45
46
- dev = qdev_create(NULL, "cadence_gem");
47
- s->lpd.iou.gem[i] = SYS_BUS_DEVICE(dev);
48
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
49
+ sysbus_init_child_obj(OBJECT(s), name,
50
+ &s->lpd.iou.gem[i], sizeof(s->lpd.iou.gem[i]),
51
+ TYPE_CADENCE_GEM);
52
+ dev = DEVICE(&s->lpd.iou.gem[i]);
53
if (nd->used) {
54
qemu_check_nic_model(nd, "cadence_gem");
55
qdev_set_nic_properties(dev, nd);
56
}
57
- object_property_set_int(OBJECT(s->lpd.iou.gem[i]),
58
+ object_property_set_int(OBJECT(dev),
59
2, "num-priority-queues",
60
&error_abort);
61
- object_property_set_link(OBJECT(s->lpd.iou.gem[i]),
62
+ object_property_set_link(OBJECT(dev),
63
OBJECT(&s->mr_ps), "dma",
64
&error_abort);
65
qdev_init_nofail(dev);
66
67
- mr = sysbus_mmio_get_region(s->lpd.iou.gem[i], 0);
68
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
69
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
70
71
- sysbus_connect_irq(s->lpd.iou.gem[i], 0, pic[irqs[i]]);
72
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
73
g_free(name);
74
}
75
}
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Embed the ADMAs into the SoC type.
4
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-7-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
14
hw/arm/xlnx-versal.c | 14 +++++++-------
15
2 files changed, 9 insertions(+), 8 deletions(-)
16
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
20
+++ b/include/hw/arm/xlnx-versal.h
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/arm/boot.h"
23
#include "hw/intc/arm_gicv3.h"
24
#include "hw/char/pl011.h"
25
+#include "hw/dma/xlnx-zdma.h"
26
#include "hw/net/cadence_gem.h"
27
28
#define TYPE_XLNX_VERSAL "xlnx-versal"
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
30
struct {
31
PL011State uart[XLNX_VERSAL_NR_UARTS];
32
CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
33
- SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
34
+ XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
35
} iou;
36
} lpd;
37
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
43
DeviceState *dev;
44
MemoryRegion *mr;
45
46
- dev = qdev_create(NULL, "xlnx.zdma");
47
- s->lpd.iou.adma[i] = SYS_BUS_DEVICE(dev);
48
- object_property_set_int(OBJECT(s->lpd.iou.adma[i]), 128, "bus-width",
49
- &error_abort);
50
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
51
+ sysbus_init_child_obj(OBJECT(s), name,
52
+ &s->lpd.iou.adma[i], sizeof(s->lpd.iou.adma[i]),
53
+ TYPE_XLNX_ZDMA);
54
+ dev = DEVICE(&s->lpd.iou.adma[i]);
55
+ object_property_set_int(OBJECT(dev), 128, "bus-width", &error_abort);
56
qdev_init_nofail(dev);
57
58
- mr = sysbus_mmio_get_region(s->lpd.iou.adma[i], 0);
59
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
60
memory_region_add_subregion(&s->mr_ps,
61
MM_ADMA_CH0 + i * MM_ADMA_CH0_SIZE, mr);
62
63
- sysbus_connect_irq(s->lpd.iou.adma[i], 0, pic[VERSAL_ADMA_IRQ_0 + i]);
64
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_ADMA_IRQ_0 + i]);
65
g_free(name);
66
}
67
}
68
--
69
2.20.1
70
71
diff view generated by jsdifflib
1
Convert the Neon 3-reg-same VADD and VSUB insns to decodetree.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Note that we don't need the neon_3r_sizes[op] check here because all
3
In preparation for TARGET_TB_PCREL, reduce reliance on
4
size values are OK for VADD and VSUB; we'll add this when we convert
4
absolute values by passing in pc difference.
5
the first insn that has size restrictions.
6
5
7
For this we need one of the GVecGen*Fn typedefs currently in
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
translate-a64.h; move them all to translate.h as a block so they
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
are visible to the 32-bit decoder.
8
Message-id: 20221020030641.2066807-4-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-a32.h | 2 +-
12
target/arm/translate.h | 6 ++--
13
target/arm/translate-a64.c | 32 +++++++++---------
14
target/arm/translate-vfp.c | 2 +-
15
target/arm/translate.c | 68 ++++++++++++++++++++------------------
16
5 files changed, 56 insertions(+), 54 deletions(-)
10
17
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20200430181003.21682-15-peter.maydell@linaro.org
14
---
15
target/arm/translate-a64.h | 9 --------
16
target/arm/translate.h | 9 ++++++++
17
target/arm/neon-dp.decode | 17 +++++++++++++++
18
target/arm/translate-neon.inc.c | 38 +++++++++++++++++++++++++++++++++
19
target/arm/translate.c | 14 ++++--------
20
5 files changed, 68 insertions(+), 19 deletions(-)
21
22
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
23
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/translate-a64.h
20
--- a/target/arm/translate-a32.h
25
+++ b/target/arm/translate-a64.h
21
+++ b/target/arm/translate-a32.h
26
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
22
@@ -XXX,XX +XXX,XX @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop);
27
23
TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs);
28
bool disas_sve(DisasContext *, uint32_t);
24
void gen_set_cpsr(TCGv_i32 var, uint32_t mask);
29
25
void gen_set_condexec(DisasContext *s);
30
-/* Note that the gvec expanders operate on offsets + sizes. */
26
-void gen_set_pc_im(DisasContext *s, target_ulong val);
31
-typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
27
+void gen_update_pc(DisasContext *s, target_long diff);
32
-typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
28
void gen_lookup_tb(DisasContext *s);
33
- uint32_t, uint32_t);
29
long vfp_reg_offset(bool dp, unsigned reg);
34
-typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
30
long neon_full_reg_offset(unsigned reg);
35
- uint32_t, uint32_t, uint32_t);
36
-typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
37
- uint32_t, uint32_t, uint32_t);
38
-
39
#endif /* TARGET_ARM_TRANSLATE_A64_H */
40
diff --git a/target/arm/translate.h b/target/arm/translate.h
31
diff --git a/target/arm/translate.h b/target/arm/translate.h
41
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate.h
33
--- a/target/arm/translate.h
43
+++ b/target/arm/translate.h
34
+++ b/target/arm/translate.h
44
@@ -XXX,XX +XXX,XX @@ void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
35
@@ -XXX,XX +XXX,XX @@ static inline int curr_insn_len(DisasContext *s)
45
#define dc_isar_feature(name, ctx) \
36
* For instructions which want an immediate exit to the main loop, as opposed
46
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
37
* to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
47
38
* doesn't write the PC on exiting the translation loop so you need to ensure
48
+/* Note that the gvec expanders operate on offsets + sizes. */
39
- * something (gen_a64_set_pc_im or runtime helper) has done so before we reach
49
+typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
40
+ * something (gen_a64_update_pc or runtime helper) has done so before we reach
50
+typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
41
* return from cpu_tb_exec.
51
+ uint32_t, uint32_t);
42
*/
52
+typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
43
#define DISAS_EXIT DISAS_TARGET_9
53
+ uint32_t, uint32_t, uint32_t);
44
@@ -XXX,XX +XXX,XX @@ static inline int curr_insn_len(DisasContext *s)
54
+typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
45
55
+ uint32_t, uint32_t, uint32_t);
46
#ifdef TARGET_AARCH64
56
+
47
void a64_translate_init(void);
57
#endif /* TARGET_ARM_TRANSLATE_H */
48
-void gen_a64_set_pc_im(uint64_t val);
58
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
49
+void gen_a64_update_pc(DisasContext *s, target_long diff);
50
extern const TranslatorOps aarch64_translator_ops;
51
#else
52
static inline void a64_translate_init(void)
53
{
54
}
55
56
-static inline void gen_a64_set_pc_im(uint64_t val)
57
+static inline void gen_a64_update_pc(DisasContext *s, target_long diff)
58
{
59
}
60
#endif
61
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
59
index XXXXXXX..XXXXXXX 100644
62
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/neon-dp.decode
63
--- a/target/arm/translate-a64.c
61
+++ b/target/arm/neon-dp.decode
64
+++ b/target/arm/translate-a64.c
62
@@ -XXX,XX +XXX,XX @@
65
@@ -XXX,XX +XXX,XX @@ static void reset_btype(DisasContext *s)
63
#
66
}
64
# This file is processed by scripts/decodetree.py
67
}
65
#
68
66
+# VFP/Neon register fields; same as vfp.decode
69
-void gen_a64_set_pc_im(uint64_t val)
67
+%vm_dp 5:1 0:4
70
+void gen_a64_update_pc(DisasContext *s, target_long diff)
68
+%vn_dp 7:1 16:4
71
{
69
+%vd_dp 22:1 12:4
72
- tcg_gen_movi_i64(cpu_pc, val);
70
73
+ tcg_gen_movi_i64(cpu_pc, s->pc_curr + diff);
71
# Encodings for Neon data processing instructions where the T32 encoding
74
}
72
# is a simple transformation of the A32 encoding.
75
73
@@ -XXX,XX +XXX,XX @@
76
/*
74
# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
77
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
75
# This file works on the A32 encoding only; calling code for T32 has to
78
76
# transform the insn into the A32 version first.
79
static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
77
+
80
{
78
+######################################################################
81
- gen_a64_set_pc_im(pc);
79
+# 3-reg-same grouping:
82
+ gen_a64_update_pc(s, pc - s->pc_curr);
80
+# 1111 001 U 0 D sz:2 Vn:4 Vd:4 opc:4 N Q M op Vm:4
83
gen_exception_internal(excp);
81
+######################################################################
84
s->base.is_jmp = DISAS_NORETURN;
82
+
85
}
83
+&3same vm vn vd q size
86
84
+
87
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
85
+@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
88
{
86
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
89
- gen_a64_set_pc_im(s->pc_curr);
87
+
90
+ gen_a64_update_pc(s, 0);
88
+VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
91
gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
89
+VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
92
s->base.is_jmp = DISAS_NORETURN;
90
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
93
}
94
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
95
96
if (use_goto_tb(s, dest)) {
97
tcg_gen_goto_tb(n);
98
- gen_a64_set_pc_im(dest);
99
+ gen_a64_update_pc(s, diff);
100
tcg_gen_exit_tb(s->base.tb, n);
101
s->base.is_jmp = DISAS_NORETURN;
102
} else {
103
- gen_a64_set_pc_im(dest);
104
+ gen_a64_update_pc(s, diff);
105
if (s->ss_active) {
106
gen_step_complete_exception(s);
107
} else {
108
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
109
uint32_t syndrome;
110
111
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
112
- gen_a64_set_pc_im(s->pc_curr);
113
+ gen_a64_update_pc(s, 0);
114
gen_helper_access_check_cp_reg(cpu_env,
115
tcg_constant_ptr(ri),
116
tcg_constant_i32(syndrome),
117
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
118
* The readfn or writefn might raise an exception;
119
* synchronize the CPU state in case it does.
120
*/
121
- gen_a64_set_pc_im(s->pc_curr);
122
+ gen_a64_update_pc(s, 0);
123
}
124
125
/* Handle special cases first */
126
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
127
/* The pre HVC helper handles cases when HVC gets trapped
128
* as an undefined insn by runtime configuration.
129
*/
130
- gen_a64_set_pc_im(s->pc_curr);
131
+ gen_a64_update_pc(s, 0);
132
gen_helper_pre_hvc(cpu_env);
133
gen_ss_advance(s);
134
gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
135
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
136
unallocated_encoding(s);
137
break;
138
}
139
- gen_a64_set_pc_im(s->pc_curr);
140
+ gen_a64_update_pc(s, 0);
141
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
142
gen_ss_advance(s);
143
gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
144
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
145
*/
146
switch (dc->base.is_jmp) {
147
default:
148
- gen_a64_set_pc_im(dc->base.pc_next);
149
+ gen_a64_update_pc(dc, 4);
150
/* fall through */
151
case DISAS_EXIT:
152
case DISAS_JUMP:
153
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
154
break;
155
default:
156
case DISAS_UPDATE_EXIT:
157
- gen_a64_set_pc_im(dc->base.pc_next);
158
+ gen_a64_update_pc(dc, 4);
159
/* fall through */
160
case DISAS_EXIT:
161
tcg_gen_exit_tb(NULL, 0);
162
break;
163
case DISAS_UPDATE_NOCHAIN:
164
- gen_a64_set_pc_im(dc->base.pc_next);
165
+ gen_a64_update_pc(dc, 4);
166
/* fall through */
167
case DISAS_JUMP:
168
tcg_gen_lookup_and_goto_ptr();
169
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
170
case DISAS_SWI:
171
break;
172
case DISAS_WFE:
173
- gen_a64_set_pc_im(dc->base.pc_next);
174
+ gen_a64_update_pc(dc, 4);
175
gen_helper_wfe(cpu_env);
176
break;
177
case DISAS_YIELD:
178
- gen_a64_set_pc_im(dc->base.pc_next);
179
+ gen_a64_update_pc(dc, 4);
180
gen_helper_yield(cpu_env);
181
break;
182
case DISAS_WFI:
183
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
184
* This is a special case because we don't want to just halt
185
* the CPU if trying to debug across a WFI.
186
*/
187
- gen_a64_set_pc_im(dc->base.pc_next);
188
+ gen_a64_update_pc(dc, 4);
189
gen_helper_wfi(cpu_env, tcg_constant_i32(4));
190
/*
191
* The helper doesn't necessarily throw an exception, but we
192
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
91
index XXXXXXX..XXXXXXX 100644
193
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/translate-neon.inc.c
194
--- a/target/arm/translate-vfp.c
93
+++ b/target/arm/translate-neon.inc.c
195
+++ b/target/arm/translate-vfp.c
94
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
196
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
95
197
case ARM_VFP_FPSID:
96
return true;
198
if (s->current_el == 1) {
97
}
199
gen_set_condexec(s);
98
+
200
- gen_set_pc_im(s, s->pc_curr);
99
+static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
201
+ gen_update_pc(s, 0);
100
+{
202
gen_helper_check_hcr_el2_trap(cpu_env,
101
+ int vec_size = a->q ? 16 : 8;
203
tcg_constant_i32(a->rt),
102
+ int rd_ofs = neon_reg_offset(a->vd, 0);
204
tcg_constant_i32(a->reg));
103
+ int rn_ofs = neon_reg_offset(a->vn, 0);
104
+ int rm_ofs = neon_reg_offset(a->vm, 0);
105
+
106
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
107
+ return false;
108
+ }
109
+
110
+ /* UNDEF accesses to D16-D31 if they don't exist. */
111
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
112
+ ((a->vd | a->vn | a->vm) & 0x10)) {
113
+ return false;
114
+ }
115
+
116
+ if ((a->vn | a->vm | a->vd) & a->q) {
117
+ return false;
118
+ }
119
+
120
+ if (!vfp_access_check(s)) {
121
+ return true;
122
+ }
123
+
124
+ fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
125
+ return true;
126
+}
127
+
128
+#define DO_3SAME(INSN, FUNC) \
129
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
130
+ { \
131
+ return do_3same(s, a, FUNC); \
132
+ }
133
+
134
+DO_3SAME(VADD, tcg_gen_gvec_add)
135
+DO_3SAME(VSUB, tcg_gen_gvec_sub)
136
diff --git a/target/arm/translate.c b/target/arm/translate.c
205
diff --git a/target/arm/translate.c b/target/arm/translate.c
137
index XXXXXXX..XXXXXXX 100644
206
index XXXXXXX..XXXXXXX 100644
138
--- a/target/arm/translate.c
207
--- a/target/arm/translate.c
139
+++ b/target/arm/translate.c
208
+++ b/target/arm/translate.c
140
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
209
@@ -XXX,XX +XXX,XX @@ void gen_set_condexec(DisasContext *s)
210
}
211
}
212
213
-void gen_set_pc_im(DisasContext *s, target_ulong val)
214
+void gen_update_pc(DisasContext *s, target_long diff)
215
{
216
- tcg_gen_movi_i32(cpu_R[15], val);
217
+ tcg_gen_movi_i32(cpu_R[15], s->pc_curr + diff);
218
}
219
220
/* Set PC and Thumb state from var. var is marked as dead. */
221
@@ -XXX,XX +XXX,XX @@ static inline void gen_bxns(DisasContext *s, int rm)
222
223
/* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
224
* we need to sync state before calling it, but:
225
- * - we don't need to do gen_set_pc_im() because the bxns helper will
226
+ * - we don't need to do gen_update_pc() because the bxns helper will
227
* always set the PC itself
228
* - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
229
* unless it's outside an IT block or the last insn in an IT block,
230
@@ -XXX,XX +XXX,XX @@ static inline void gen_blxns(DisasContext *s, int rm)
231
* We do however need to set the PC, because the blxns helper reads it.
232
* The blxns helper may throw an exception.
233
*/
234
- gen_set_pc_im(s, s->base.pc_next);
235
+ gen_update_pc(s, curr_insn_len(s));
236
gen_helper_v7m_blxns(cpu_env, var);
237
tcg_temp_free_i32(var);
238
s->base.is_jmp = DISAS_EXIT;
239
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
240
* as an undefined insn by runtime configuration (ie before
241
* the insn really executes).
242
*/
243
- gen_set_pc_im(s, s->pc_curr);
244
+ gen_update_pc(s, 0);
245
gen_helper_pre_hvc(cpu_env);
246
/* Otherwise we will treat this as a real exception which
247
* happens after execution of the insn. (The distinction matters
248
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
249
* for single stepping.)
250
*/
251
s->svc_imm = imm16;
252
- gen_set_pc_im(s, s->base.pc_next);
253
+ gen_update_pc(s, curr_insn_len(s));
254
s->base.is_jmp = DISAS_HVC;
255
}
256
257
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
258
/* As with HVC, we may take an exception either before or after
259
* the insn executes.
260
*/
261
- gen_set_pc_im(s, s->pc_curr);
262
+ gen_update_pc(s, 0);
263
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa32_smc()));
264
- gen_set_pc_im(s, s->base.pc_next);
265
+ gen_update_pc(s, curr_insn_len(s));
266
s->base.is_jmp = DISAS_SMC;
267
}
268
269
static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
270
{
271
gen_set_condexec(s);
272
- gen_set_pc_im(s, pc);
273
+ gen_update_pc(s, pc - s->pc_curr);
274
gen_exception_internal(excp);
275
s->base.is_jmp = DISAS_NORETURN;
276
}
277
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn_el_v(DisasContext *s, uint64_t pc, int excp,
278
uint32_t syn, TCGv_i32 tcg_el)
279
{
280
if (s->aarch64) {
281
- gen_a64_set_pc_im(pc);
282
+ gen_a64_update_pc(s, pc - s->pc_curr);
283
} else {
284
gen_set_condexec(s);
285
- gen_set_pc_im(s, pc);
286
+ gen_update_pc(s, pc - s->pc_curr);
287
}
288
gen_exception_el_v(excp, syn, tcg_el);
289
s->base.is_jmp = DISAS_NORETURN;
290
@@ -XXX,XX +XXX,XX @@ void gen_exception_insn_el(DisasContext *s, uint64_t pc, int excp,
291
void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn)
292
{
293
if (s->aarch64) {
294
- gen_a64_set_pc_im(pc);
295
+ gen_a64_update_pc(s, pc - s->pc_curr);
296
} else {
297
gen_set_condexec(s);
298
- gen_set_pc_im(s, pc);
299
+ gen_update_pc(s, pc - s->pc_curr);
300
}
301
gen_exception(excp, syn);
302
s->base.is_jmp = DISAS_NORETURN;
303
@@ -XXX,XX +XXX,XX @@ void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn)
304
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
305
{
306
gen_set_condexec(s);
307
- gen_set_pc_im(s, s->pc_curr);
308
+ gen_update_pc(s, 0);
309
gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syn));
310
s->base.is_jmp = DISAS_NORETURN;
311
}
312
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, int diff)
313
314
if (translator_use_goto_tb(&s->base, dest)) {
315
tcg_gen_goto_tb(n);
316
- gen_set_pc_im(s, dest);
317
+ gen_update_pc(s, diff);
318
tcg_gen_exit_tb(s->base.tb, n);
319
} else {
320
- gen_set_pc_im(s, dest);
321
+ gen_update_pc(s, diff);
322
gen_goto_ptr();
323
}
324
s->base.is_jmp = DISAS_NORETURN;
325
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, int diff)
326
/* Jump, specifying which TB number to use if we gen_goto_tb() */
327
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
328
{
329
+ int diff = dest - s->pc_curr;
330
+
331
if (unlikely(s->ss_active)) {
332
/* An indirect jump so that we still trigger the debug exception. */
333
- gen_set_pc_im(s, dest);
334
+ gen_update_pc(s, diff);
335
s->base.is_jmp = DISAS_JUMP;
336
return;
337
}
338
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
339
* gen_jmp();
340
* on the second call to gen_jmp().
341
*/
342
- gen_goto_tb(s, tbno, dest - s->pc_curr);
343
+ gen_goto_tb(s, tbno, diff);
344
break;
345
case DISAS_UPDATE_NOCHAIN:
346
case DISAS_UPDATE_EXIT:
347
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
348
* Avoid using goto_tb so we really do exit back to the main loop
349
* and don't chain to another TB.
350
*/
351
- gen_set_pc_im(s, dest);
352
+ gen_update_pc(s, diff);
353
gen_goto_ptr();
354
s->base.is_jmp = DISAS_NORETURN;
355
break;
356
@@ -XXX,XX +XXX,XX @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
357
358
/* Sync state because msr_banked() can raise exceptions */
359
gen_set_condexec(s);
360
- gen_set_pc_im(s, s->pc_curr);
361
+ gen_update_pc(s, 0);
362
tcg_reg = load_reg(s, rn);
363
gen_helper_msr_banked(cpu_env, tcg_reg,
364
tcg_constant_i32(tgtmode),
365
@@ -XXX,XX +XXX,XX @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
366
367
/* Sync state because mrs_banked() can raise exceptions */
368
gen_set_condexec(s);
369
- gen_set_pc_im(s, s->pc_curr);
370
+ gen_update_pc(s, 0);
371
tcg_reg = tcg_temp_new_i32();
372
gen_helper_mrs_banked(tcg_reg, cpu_env,
373
tcg_constant_i32(tgtmode),
374
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
141
}
375
}
142
return 0;
376
143
377
gen_set_condexec(s);
144
- case NEON_3R_VADD_VSUB:
378
- gen_set_pc_im(s, s->pc_curr);
145
- if (u) {
379
+ gen_update_pc(s, 0);
146
- tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
380
gen_helper_access_check_cp_reg(cpu_env,
147
- vec_size, vec_size);
381
tcg_constant_ptr(ri),
148
- } else {
382
tcg_constant_i32(syndrome),
149
- tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
383
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
150
- vec_size, vec_size);
384
* synchronize the CPU state in case it does.
151
- }
385
*/
152
- return 0;
386
gen_set_condexec(s);
153
-
387
- gen_set_pc_im(s, s->pc_curr);
154
case NEON_3R_VQADD:
388
+ gen_update_pc(s, 0);
155
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
156
rn_ofs, rm_ofs, vec_size, vec_size,
157
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
158
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
159
u ? &ushl_op[size] : &sshl_op[size]);
160
return 0;
161
+
162
+ case NEON_3R_VADD_VSUB:
163
+ /* Already handled by decodetree */
164
+ return 1;
165
}
389
}
166
390
167
if (size == 3) {
391
/* Handle special cases first */
392
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
393
unallocated_encoding(s);
394
return;
395
}
396
- gen_set_pc_im(s, s->base.pc_next);
397
+ gen_update_pc(s, curr_insn_len(s));
398
s->base.is_jmp = DISAS_WFI;
399
return;
400
default:
401
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
402
addr = tcg_temp_new_i32();
403
/* get_r13_banked() will raise an exception if called from System mode */
404
gen_set_condexec(s);
405
- gen_set_pc_im(s, s->pc_curr);
406
+ gen_update_pc(s, 0);
407
gen_helper_get_r13_banked(addr, cpu_env, tcg_constant_i32(mode));
408
switch (amode) {
409
case 0: /* DA */
410
@@ -XXX,XX +XXX,XX @@ static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
411
* scheduling of other vCPUs.
412
*/
413
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
414
- gen_set_pc_im(s, s->base.pc_next);
415
+ gen_update_pc(s, curr_insn_len(s));
416
s->base.is_jmp = DISAS_YIELD;
417
}
418
return true;
419
@@ -XXX,XX +XXX,XX @@ static bool trans_WFE(DisasContext *s, arg_WFE *a)
420
* implemented so we can't sleep like WFI does.
421
*/
422
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
423
- gen_set_pc_im(s, s->base.pc_next);
424
+ gen_update_pc(s, curr_insn_len(s));
425
s->base.is_jmp = DISAS_WFE;
426
}
427
return true;
428
@@ -XXX,XX +XXX,XX @@ static bool trans_WFE(DisasContext *s, arg_WFE *a)
429
static bool trans_WFI(DisasContext *s, arg_WFI *a)
430
{
431
/* For WFI, halt the vCPU until an IRQ. */
432
- gen_set_pc_im(s, s->base.pc_next);
433
+ gen_update_pc(s, curr_insn_len(s));
434
s->base.is_jmp = DISAS_WFI;
435
return true;
436
}
437
@@ -XXX,XX +XXX,XX @@ static bool trans_SVC(DisasContext *s, arg_SVC *a)
438
(a->imm == semihost_imm)) {
439
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
440
} else {
441
- gen_set_pc_im(s, s->base.pc_next);
442
+ gen_update_pc(s, curr_insn_len(s));
443
s->svc_imm = a->imm;
444
s->base.is_jmp = DISAS_SWI;
445
}
446
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
447
case DISAS_TOO_MANY:
448
case DISAS_UPDATE_EXIT:
449
case DISAS_UPDATE_NOCHAIN:
450
- gen_set_pc_im(dc, dc->base.pc_next);
451
+ gen_update_pc(dc, curr_insn_len(dc));
452
/* fall through */
453
default:
454
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
455
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
456
gen_goto_tb(dc, 1, curr_insn_len(dc));
457
break;
458
case DISAS_UPDATE_NOCHAIN:
459
- gen_set_pc_im(dc, dc->base.pc_next);
460
+ gen_update_pc(dc, curr_insn_len(dc));
461
/* fall through */
462
case DISAS_JUMP:
463
gen_goto_ptr();
464
break;
465
case DISAS_UPDATE_EXIT:
466
- gen_set_pc_im(dc, dc->base.pc_next);
467
+ gen_update_pc(dc, curr_insn_len(dc));
468
/* fall through */
469
default:
470
/* indicate that the hash table must be used to find the next TB */
471
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
472
gen_set_label(dc->condlabel);
473
gen_set_condexec(dc);
474
if (unlikely(dc->ss_active)) {
475
- gen_set_pc_im(dc, dc->base.pc_next);
476
+ gen_update_pc(dc, curr_insn_len(dc));
477
gen_singlestep_exception(dc);
478
} else {
479
gen_goto_tb(dc, 1, curr_insn_len(dc));
168
--
480
--
169
2.20.1
481
2.25.1
170
482
171
483
diff view generated by jsdifflib
1
We're going to want at least some of the NeonGen* typedefs
1
From: Richard Henderson <richard.henderson@linaro.org>
2
for the refactored 32-bit Neon decoder, so move them all
2
3
to translate.h since it makes more sense to keep them in
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
4
one group.
4
5
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221020030641.2066807-5-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200430181003.21682-23-peter.maydell@linaro.org
9
---
9
---
10
target/arm/translate.h | 17 +++++++++++++++++
10
target/arm/translate.h | 5 +++--
11
target/arm/translate-a64.c | 17 -----------------
11
target/arm/translate-a64.c | 28 ++++++++++-------------
12
2 files changed, 17 insertions(+), 17 deletions(-)
12
target/arm/translate-m-nocp.c | 6 ++---
13
target/arm/translate-mve.c | 2 +-
14
target/arm/translate-vfp.c | 6 ++---
15
target/arm/translate.c | 42 +++++++++++++++++------------------
16
6 files changed, 43 insertions(+), 46 deletions(-)
13
17
14
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
diff --git a/target/arm/translate.h b/target/arm/translate.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.h
20
--- a/target/arm/translate.h
17
+++ b/target/arm/translate.h
21
+++ b/target/arm/translate.h
18
@@ -XXX,XX +XXX,XX @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
22
@@ -XXX,XX +XXX,XX @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
19
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
23
void arm_gen_test_cc(int cc, TCGLabel *label);
20
uint32_t, uint32_t, uint32_t);
24
MemOp pow2_align(unsigned i);
21
25
void unallocated_encoding(DisasContext *s);
22
+/* Function prototype for gen_ functions for calling Neon helpers */
26
-void gen_exception_insn_el(DisasContext *s, uint64_t pc, int excp,
23
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
27
+void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
24
+typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
28
uint32_t syn, uint32_t target_el);
25
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
29
-void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn);
26
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
30
+void gen_exception_insn(DisasContext *s, target_long pc_diff,
27
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
31
+ int excp, uint32_t syn);
28
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
32
29
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
33
/* Return state of Alternate Half-precision flag, caller frees result */
30
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
34
static inline TCGv_i32 get_ahp_flag(void)
31
+typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
32
+typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
33
+typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
34
+typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
35
+typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
36
+typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
37
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
38
+
39
#endif /* TARGET_ARM_TRANSLATE_H */
40
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
35
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
41
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate-a64.c
37
--- a/target/arm/translate-a64.c
43
+++ b/target/arm/translate-a64.c
38
+++ b/target/arm/translate-a64.c
44
@@ -XXX,XX +XXX,XX @@ typedef struct AArch64DecodeTable {
39
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check_only(DisasContext *s)
45
AArch64DecodeFn *disas_fn;
40
assert(!s->fp_access_checked);
46
} AArch64DecodeTable;
41
s->fp_access_checked = true;
47
42
48
-/* Function prototype for gen_ functions for calling Neon helpers */
43
- gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
49
-typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
44
+ gen_exception_insn_el(s, 0, EXCP_UDEF,
50
-typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
45
syn_fp_access_trap(1, 0xe, false, 0),
51
-typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
46
s->fp_excp_el);
52
-typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
47
return false;
53
-typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
48
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
54
-typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
49
return false;
55
-typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
50
}
56
-typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
51
if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
57
-typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
52
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
58
-typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
53
+ gen_exception_insn(s, 0, EXCP_UDEF,
59
-typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
54
syn_smetrap(SME_ET_Streaming, false));
60
-typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
55
return false;
61
-typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
56
}
62
-typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
57
@@ -XXX,XX +XXX,XX @@ bool sve_access_check(DisasContext *s)
63
-typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
58
goto fail_exit;
64
-
59
}
65
/* initialize TCG globals. */
60
} else if (s->sve_excp_el) {
66
void a64_translate_init(void)
61
- gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
67
{
62
+ gen_exception_insn_el(s, 0, EXCP_UDEF,
63
syn_sve_access_trap(), s->sve_excp_el);
64
goto fail_exit;
65
}
66
@@ -XXX,XX +XXX,XX @@ bool sve_access_check(DisasContext *s)
67
static bool sme_access_check(DisasContext *s)
68
{
69
if (s->sme_excp_el) {
70
- gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
71
+ gen_exception_insn_el(s, 0, EXCP_UDEF,
72
syn_smetrap(SME_ET_AccessTrap, false),
73
s->sme_excp_el);
74
return false;
75
@@ -XXX,XX +XXX,XX @@ bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
76
return false;
77
}
78
if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
79
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
80
+ gen_exception_insn(s, 0, EXCP_UDEF,
81
syn_smetrap(SME_ET_NotStreaming, false));
82
return false;
83
}
84
if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
85
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
86
+ gen_exception_insn(s, 0, EXCP_UDEF,
87
syn_smetrap(SME_ET_InactiveZA, false));
88
return false;
89
}
90
@@ -XXX,XX +XXX,XX @@ static void gen_sysreg_undef(DisasContext *s, bool isread,
91
} else {
92
syndrome = syn_uncategorized();
93
}
94
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome);
95
+ gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
96
}
97
98
/* MRS - move from system register
99
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
100
switch (op2_ll) {
101
case 1: /* SVC */
102
gen_ss_advance(s);
103
- gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
104
- syn_aa64_svc(imm16));
105
+ gen_exception_insn(s, 4, EXCP_SWI, syn_aa64_svc(imm16));
106
break;
107
case 2: /* HVC */
108
if (s->current_el == 0) {
109
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
110
gen_a64_update_pc(s, 0);
111
gen_helper_pre_hvc(cpu_env);
112
gen_ss_advance(s);
113
- gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
114
- syn_aa64_hvc(imm16), 2);
115
+ gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2);
116
break;
117
case 3: /* SMC */
118
if (s->current_el == 0) {
119
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
120
gen_a64_update_pc(s, 0);
121
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
122
gen_ss_advance(s);
123
- gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
124
- syn_aa64_smc(imm16), 3);
125
+ gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3);
126
break;
127
default:
128
unallocated_encoding(s);
129
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
130
* Illegal execution state. This has priority over BTI
131
* exceptions, but comes after instruction abort exceptions.
132
*/
133
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
134
+ gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
135
return;
136
}
137
138
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
139
if (s->btype != 0
140
&& s->guarded_page
141
&& !btype_destination_ok(insn, s->bt, s->btype)) {
142
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
143
- syn_btitrap(s->btype));
144
+ gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
145
return;
146
}
147
} else {
148
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/translate-m-nocp.c
151
+++ b/target/arm/translate-m-nocp.c
152
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
153
tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
154
155
if (s->fp_excp_el != 0) {
156
- gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
157
+ gen_exception_insn_el(s, 0, EXCP_NOCP,
158
syn_uncategorized(), s->fp_excp_el);
159
return true;
160
}
161
@@ -XXX,XX +XXX,XX @@ static bool trans_NOCP(DisasContext *s, arg_nocp *a)
162
}
163
164
if (a->cp != 10) {
165
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized());
166
+ gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized());
167
return true;
168
}
169
170
if (s->fp_excp_el != 0) {
171
- gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
172
+ gen_exception_insn_el(s, 0, EXCP_NOCP,
173
syn_uncategorized(), s->fp_excp_el);
174
return true;
175
}
176
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/target/arm/translate-mve.c
179
+++ b/target/arm/translate-mve.c
180
@@ -XXX,XX +XXX,XX @@ bool mve_eci_check(DisasContext *s)
181
return true;
182
default:
183
/* Reserved value: INVSTATE UsageFault */
184
- gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
185
+ gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
186
return false;
187
}
188
}
189
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/arm/translate-vfp.c
192
+++ b/target/arm/translate-vfp.c
193
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
194
int coproc = arm_dc_feature(s, ARM_FEATURE_V8) ? 0 : 0xa;
195
uint32_t syn = syn_fp_access_trap(1, 0xe, false, coproc);
196
197
- gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF, syn, s->fp_excp_el);
198
+ gen_exception_insn_el(s, 0, EXCP_UDEF, syn, s->fp_excp_el);
199
return false;
200
}
201
202
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
203
* appear to be any insns which touch VFP which are allowed.
204
*/
205
if (s->sme_trap_nonstreaming) {
206
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
207
+ gen_exception_insn(s, 0, EXCP_UDEF,
208
syn_smetrap(SME_ET_Streaming,
209
curr_insn_len(s) == 2));
210
return false;
211
@@ -XXX,XX +XXX,XX @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
212
* the encoding space handled by the patterns in m-nocp.decode,
213
* and for them we may need to raise NOCP here.
214
*/
215
- gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
216
+ gen_exception_insn_el(s, 0, EXCP_NOCP,
217
syn_uncategorized(), s->fp_excp_el);
218
return false;
219
}
220
diff --git a/target/arm/translate.c b/target/arm/translate.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/target/arm/translate.c
223
+++ b/target/arm/translate.c
224
@@ -XXX,XX +XXX,XX @@ static void gen_exception(int excp, uint32_t syndrome)
225
tcg_constant_i32(syndrome));
226
}
227
228
-static void gen_exception_insn_el_v(DisasContext *s, uint64_t pc, int excp,
229
- uint32_t syn, TCGv_i32 tcg_el)
230
+static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
231
+ int excp, uint32_t syn, TCGv_i32 tcg_el)
232
{
233
if (s->aarch64) {
234
- gen_a64_update_pc(s, pc - s->pc_curr);
235
+ gen_a64_update_pc(s, pc_diff);
236
} else {
237
gen_set_condexec(s);
238
- gen_update_pc(s, pc - s->pc_curr);
239
+ gen_update_pc(s, pc_diff);
240
}
241
gen_exception_el_v(excp, syn, tcg_el);
242
s->base.is_jmp = DISAS_NORETURN;
243
}
244
245
-void gen_exception_insn_el(DisasContext *s, uint64_t pc, int excp,
246
+void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
247
uint32_t syn, uint32_t target_el)
248
{
249
- gen_exception_insn_el_v(s, pc, excp, syn, tcg_constant_i32(target_el));
250
+ gen_exception_insn_el_v(s, pc_diff, excp, syn,
251
+ tcg_constant_i32(target_el));
252
}
253
254
-void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn)
255
+void gen_exception_insn(DisasContext *s, target_long pc_diff,
256
+ int excp, uint32_t syn)
257
{
258
if (s->aarch64) {
259
- gen_a64_update_pc(s, pc - s->pc_curr);
260
+ gen_a64_update_pc(s, pc_diff);
261
} else {
262
gen_set_condexec(s);
263
- gen_update_pc(s, pc - s->pc_curr);
264
+ gen_update_pc(s, pc_diff);
265
}
266
gen_exception(excp, syn);
267
s->base.is_jmp = DISAS_NORETURN;
268
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
269
void unallocated_encoding(DisasContext *s)
270
{
271
/* Unallocated and reserved encodings are uncategorized */
272
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized());
273
+ gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
274
}
275
276
/* Force a TB lookup after an instruction that changes the CPU state. */
277
@@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
278
tcg_el = tcg_constant_i32(3);
279
}
280
281
- gen_exception_insn_el_v(s, s->pc_curr, EXCP_UDEF,
282
+ gen_exception_insn_el_v(s, 0, EXCP_UDEF,
283
syn_uncategorized(), tcg_el);
284
tcg_temp_free_i32(tcg_el);
285
return false;
286
@@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
287
288
undef:
289
/* If we get here then some access check did not pass */
290
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized());
291
+ gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
292
return false;
293
}
294
295
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
296
* For the UNPREDICTABLE cases we choose to UNDEF.
297
*/
298
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
299
- gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
300
- syn_uncategorized(), 3);
301
+ gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
302
return;
303
}
304
305
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
306
* Do the check-and-raise-exception by hand.
307
*/
308
if (s->fp_excp_el) {
309
- gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
310
+ gen_exception_insn_el(s, 0, EXCP_NOCP,
311
syn_uncategorized(), s->fp_excp_el);
312
return true;
313
}
314
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
315
tmp = load_cpu_field(v7m.ltpsize);
316
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc);
317
tcg_temp_free_i32(tmp);
318
- gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
319
+ gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
320
gen_set_label(skipexc);
321
}
322
323
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
324
* UsageFault exception.
325
*/
326
if (arm_dc_feature(s, ARM_FEATURE_M)) {
327
- gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
328
+ gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
329
return;
330
}
331
332
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
333
* Illegal execution state. This has priority over BTI
334
* exceptions, but comes after instruction abort exceptions.
335
*/
336
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
337
+ gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
338
return;
339
}
340
341
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
342
* Illegal execution state. This has priority over BTI
343
* exceptions, but comes after instruction abort exceptions.
344
*/
345
- gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF, syn_illegalstate());
346
+ gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
347
return;
348
}
349
350
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
351
*/
352
tcg_remove_ops_after(dc->insn_eci_rewind);
353
dc->condjmp = 0;
354
- gen_exception_insn(dc, dc->pc_curr, EXCP_INVSTATE,
355
- syn_uncategorized());
356
+ gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
357
}
358
359
arm_post_translate_insn(dc);
68
--
360
--
69
2.20.1
361
2.25.1
70
362
71
363
diff view generated by jsdifflib
1
Add the infrastructure for building and invoking a decodetree decoder
1
From: Richard Henderson <richard.henderson@linaro.org>
2
for the AArch32 Neon encodings. At the moment the new decoder covers
3
nothing, so we always fall back to the existing hand-written decode.
4
2
5
We follow the same pattern we did for the VFP decodetree conversion
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
6
(commit 78e138bc1f672c145ef6ace74617d and following): code that deals
4
Since we always pass dc->pc_curr, fold the arithmetic to zero displacement.
7
with Neon will be moving gradually out to translate-neon.vfp.inc,
8
which we #include into translate.c.
9
5
10
In order to share the decode files between A32 and T32, we
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
split Neon into 3 parts:
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
* data-processing
8
Message-id: 20221020030641.2066807-6-richard.henderson@linaro.org
13
* load-store
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
* 'shared' encodings
10
---
11
target/arm/translate-a64.c | 6 +++---
12
target/arm/translate.c | 10 +++++-----
13
2 files changed, 8 insertions(+), 8 deletions(-)
15
14
16
The first two groups of instructions have similar but not identical
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
A32 and T32 encodings, so we need to manually transform the T32
16
index XXXXXXX..XXXXXXX 100644
18
encoding into the A32 one before calling the decoder; the third group
17
--- a/target/arm/translate-a64.c
19
covers the Neon instructions which are identical in A32 and T32.
18
+++ b/target/arm/translate-a64.c
20
19
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
}
23
Message-id: 20200430181003.21682-4-peter.maydell@linaro.org
22
24
---
23
-static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
25
target/arm/neon-dp.decode | 29 ++++++++++++++++++++++++++
24
+static void gen_exception_internal_insn(DisasContext *s, int excp)
26
target/arm/neon-ls.decode | 29 ++++++++++++++++++++++++++
25
{
27
target/arm/neon-shared.decode | 27 +++++++++++++++++++++++++
26
- gen_a64_update_pc(s, pc - s->pc_curr);
28
target/arm/translate-neon.inc.c | 32 +++++++++++++++++++++++++++++
27
+ gen_a64_update_pc(s, 0);
29
target/arm/translate.c | 36 +++++++++++++++++++++++++++++++--
28
gen_exception_internal(excp);
30
target/arm/Makefile.objs | 18 +++++++++++++++++
29
s->base.is_jmp = DISAS_NORETURN;
31
6 files changed, 169 insertions(+), 2 deletions(-)
30
}
32
create mode 100644 target/arm/neon-dp.decode
31
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
33
create mode 100644 target/arm/neon-ls.decode
32
* Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
34
create mode 100644 target/arm/neon-shared.decode
33
*/
35
create mode 100644 target/arm/translate-neon.inc.c
34
if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) {
36
35
- gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
37
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
36
+ gen_exception_internal_insn(s, EXCP_SEMIHOST);
38
new file mode 100644
37
} else {
39
index XXXXXXX..XXXXXXX
38
unallocated_encoding(s);
40
--- /dev/null
39
}
41
+++ b/target/arm/neon-dp.decode
42
@@ -XXX,XX +XXX,XX @@
43
+# AArch32 Neon data-processing instruction descriptions
44
+#
45
+# Copyright (c) 2020 Linaro, Ltd
46
+#
47
+# This library is free software; you can redistribute it and/or
48
+# modify it under the terms of the GNU Lesser General Public
49
+# License as published by the Free Software Foundation; either
50
+# version 2 of the License, or (at your option) any later version.
51
+#
52
+# This library is distributed in the hope that it will be useful,
53
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
54
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
55
+# Lesser General Public License for more details.
56
+#
57
+# You should have received a copy of the GNU Lesser General Public
58
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
59
+
60
+#
61
+# This file is processed by scripts/decodetree.py
62
+#
63
+
64
+# Encodings for Neon data processing instructions where the T32 encoding
65
+# is a simple transformation of the A32 encoding.
66
+# More specifically, this file covers instructions where the A32 encoding is
67
+# 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
68
+# and the T32 encoding is
69
+# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
70
+# This file works on the A32 encoding only; calling code for T32 has to
71
+# transform the insn into the A32 version first.
72
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
73
new file mode 100644
74
index XXXXXXX..XXXXXXX
75
--- /dev/null
76
+++ b/target/arm/neon-ls.decode
77
@@ -XXX,XX +XXX,XX @@
78
+# AArch32 Neon load/store instruction descriptions
79
+#
80
+# Copyright (c) 2020 Linaro, Ltd
81
+#
82
+# This library is free software; you can redistribute it and/or
83
+# modify it under the terms of the GNU Lesser General Public
84
+# License as published by the Free Software Foundation; either
85
+# version 2 of the License, or (at your option) any later version.
86
+#
87
+# This library is distributed in the hope that it will be useful,
88
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
89
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
90
+# Lesser General Public License for more details.
91
+#
92
+# You should have received a copy of the GNU Lesser General Public
93
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
94
+
95
+#
96
+# This file is processed by scripts/decodetree.py
97
+#
98
+
99
+# Encodings for Neon load/store instructions where the T32 encoding
100
+# is a simple transformation of the A32 encoding.
101
+# More specifically, this file covers instructions where the A32 encoding is
102
+# 0b1111_0100_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
103
+# and the T32 encoding is
104
+# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
105
+# This file works on the A32 encoding only; calling code for T32 has to
106
+# transform the insn into the A32 version first.
107
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
108
new file mode 100644
109
index XXXXXXX..XXXXXXX
110
--- /dev/null
111
+++ b/target/arm/neon-shared.decode
112
@@ -XXX,XX +XXX,XX @@
113
+# AArch32 Neon instruction descriptions
114
+#
115
+# Copyright (c) 2020 Linaro, Ltd
116
+#
117
+# This library is free software; you can redistribute it and/or
118
+# modify it under the terms of the GNU Lesser General Public
119
+# License as published by the Free Software Foundation; either
120
+# version 2 of the License, or (at your option) any later version.
121
+#
122
+# This library is distributed in the hope that it will be useful,
123
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
124
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
125
+# Lesser General Public License for more details.
126
+#
127
+# You should have received a copy of the GNU Lesser General Public
128
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
129
+
130
+#
131
+# This file is processed by scripts/decodetree.py
132
+#
133
+
134
+# Encodings for Neon instructions whose encoding is the same for
135
+# both A32 and T32.
136
+
137
+# More specifically, this covers:
138
+# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
139
+# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
140
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
141
new file mode 100644
142
index XXXXXXX..XXXXXXX
143
--- /dev/null
144
+++ b/target/arm/translate-neon.inc.c
145
@@ -XXX,XX +XXX,XX @@
146
+/*
147
+ * ARM translation: AArch32 Neon instructions
148
+ *
149
+ * Copyright (c) 2003 Fabrice Bellard
150
+ * Copyright (c) 2005-2007 CodeSourcery
151
+ * Copyright (c) 2007 OpenedHand, Ltd.
152
+ * Copyright (c) 2020 Linaro, Ltd.
153
+ *
154
+ * This library is free software; you can redistribute it and/or
155
+ * modify it under the terms of the GNU Lesser General Public
156
+ * License as published by the Free Software Foundation; either
157
+ * version 2 of the License, or (at your option) any later version.
158
+ *
159
+ * This library is distributed in the hope that it will be useful,
160
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
161
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
162
+ * Lesser General Public License for more details.
163
+ *
164
+ * You should have received a copy of the GNU Lesser General Public
165
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
166
+ */
167
+
168
+/*
169
+ * This file is intended to be included from translate.c; it uses
170
+ * some macros and definitions provided by that file.
171
+ * It might be possible to convert it to a standalone .c file eventually.
172
+ */
173
+
174
+/* Include the generated Neon decoder */
175
+#include "decode-neon-dp.inc.c"
176
+#include "decode-neon-ls.inc.c"
177
+#include "decode-neon-shared.inc.c"
178
diff --git a/target/arm/translate.c b/target/arm/translate.c
40
diff --git a/target/arm/translate.c b/target/arm/translate.c
179
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
180
--- a/target/arm/translate.c
42
--- a/target/arm/translate.c
181
+++ b/target/arm/translate.c
43
+++ b/target/arm/translate.c
182
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
44
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
183
45
s->base.is_jmp = DISAS_SMC;
184
#define ARM_CP_RW_BIT (1 << 20)
46
}
185
47
186
-/* Include the VFP decoder */
48
-static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
187
+/* Include the VFP and Neon decoders */
49
+static void gen_exception_internal_insn(DisasContext *s, int excp)
188
#include "translate-vfp.inc.c"
189
+#include "translate-neon.inc.c"
190
191
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
192
{
50
{
193
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
51
gen_set_condexec(s);
194
/* Unconditional instructions. */
52
- gen_update_pc(s, pc - s->pc_curr);
195
/* TODO: Perhaps merge these into one decodetree output file. */
53
+ gen_update_pc(s, 0);
196
if (disas_a32_uncond(s, insn) ||
54
gen_exception_internal(excp);
197
- disas_vfp_uncond(s, insn)) {
55
s->base.is_jmp = DISAS_NORETURN;
198
+ disas_vfp_uncond(s, insn) ||
56
}
199
+ disas_neon_dp(s, insn) ||
57
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
200
+ disas_neon_ls(s, insn) ||
201
+ disas_neon_shared(s, insn)) {
202
return;
203
}
204
/* fall back to legacy decoder */
205
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
206
ARCH(6T2);
207
}
208
209
+ if ((insn & 0xef000000) == 0xef000000) {
210
+ /*
211
+ * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
212
+ * transform into
213
+ * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
214
+ */
215
+ uint32_t a32_insn = (insn & 0xe2ffffff) |
216
+ ((insn & (1 << 28)) >> 4) | (1 << 28);
217
+
218
+ if (disas_neon_dp(s, a32_insn)) {
219
+ return;
220
+ }
221
+ }
222
+
223
+ if ((insn & 0xff100000) == 0xf9000000) {
224
+ /*
225
+ * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
226
+ * transform into
227
+ * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
228
+ */
229
+ uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
230
+
231
+ if (disas_neon_ls(s, a32_insn)) {
232
+ return;
233
+ }
234
+ }
235
+
236
/*
237
* TODO: Perhaps merge these into one decodetree output file.
238
* Note disas_vfp is written for a32 with cond field in the
239
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
240
*/
58
*/
241
if (disas_t32(s, insn) ||
59
if (semihosting_enabled(s->current_el != 0) &&
242
disas_vfp_uncond(s, insn) ||
60
(imm == (s->thumb ? 0x3c : 0xf000))) {
243
+ disas_neon_shared(s, insn) ||
61
- gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
244
((insn >> 28) == 0xe && disas_vfp(s, insn))) {
62
+ gen_exception_internal_insn(s, EXCP_SEMIHOST);
245
return;
63
return;
246
}
64
}
247
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
65
248
index XXXXXXX..XXXXXXX 100644
66
@@ -XXX,XX +XXX,XX @@ static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
249
--- a/target/arm/Makefile.objs
67
if (arm_dc_feature(s, ARM_FEATURE_M) &&
250
+++ b/target/arm/Makefile.objs
68
semihosting_enabled(s->current_el == 0) &&
251
@@ -XXX,XX +XXX,XX @@ target/arm/decode-sve.inc.c: $(SRC_PATH)/target/arm/sve.decode $(DECODETREE)
69
(a->imm == 0xab)) {
252
     $(PYTHON) $(DECODETREE) --decode disas_sve -o $@ $<,\
70
- gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
253
     "GEN", $(TARGET_DIR)$@)
71
+ gen_exception_internal_insn(s, EXCP_SEMIHOST);
254
72
} else {
255
+target/arm/decode-neon-shared.inc.c: $(SRC_PATH)/target/arm/neon-shared.decode $(DECODETREE)
73
gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
256
+    $(call quiet-command,\
74
}
257
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_shared -o $@ $<,\
75
@@ -XXX,XX +XXX,XX @@ static bool trans_SVC(DisasContext *s, arg_SVC *a)
258
+     "GEN", $(TARGET_DIR)$@)
76
if (!arm_dc_feature(s, ARM_FEATURE_M) &&
259
+
77
semihosting_enabled(s->current_el == 0) &&
260
+target/arm/decode-neon-dp.inc.c: $(SRC_PATH)/target/arm/neon-dp.decode $(DECODETREE)
78
(a->imm == semihost_imm)) {
261
+    $(call quiet-command,\
79
- gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
262
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_dp -o $@ $<,\
80
+ gen_exception_internal_insn(s, EXCP_SEMIHOST);
263
+     "GEN", $(TARGET_DIR)$@)
81
} else {
264
+
82
gen_update_pc(s, curr_insn_len(s));
265
+target/arm/decode-neon-ls.inc.c: $(SRC_PATH)/target/arm/neon-ls.decode $(DECODETREE)
83
s->svc_imm = a->imm;
266
+    $(call quiet-command,\
267
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_ls -o $@ $<,\
268
+     "GEN", $(TARGET_DIR)$@)
269
+
270
target/arm/decode-vfp.inc.c: $(SRC_PATH)/target/arm/vfp.decode $(DECODETREE)
271
    $(call quiet-command,\
272
     $(PYTHON) $(DECODETREE) --static-decode disas_vfp -o $@ $<,\
273
@@ -XXX,XX +XXX,XX @@ target/arm/decode-t16.inc.c: $(SRC_PATH)/target/arm/t16.decode $(DECODETREE)
274
     "GEN", $(TARGET_DIR)$@)
275
276
target/arm/translate-sve.o: target/arm/decode-sve.inc.c
277
+target/arm/translate.o: target/arm/decode-neon-shared.inc.c
278
+target/arm/translate.o: target/arm/decode-neon-dp.inc.c
279
+target/arm/translate.o: target/arm/decode-neon-ls.inc.c
280
target/arm/translate.o: target/arm/decode-vfp.inc.c
281
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
282
target/arm/translate.o: target/arm/decode-a32.inc.c
283
--
84
--
284
2.20.1
85
2.25.1
285
86
286
87
diff view generated by jsdifflib
1
Convert the VFM[AS]L (scalar) insns in the 2reg-scalar-ext group
1
From: Richard Henderson <richard.henderson@linaro.org>
2
to decodetree. These are the last ones in the group so we can remove
3
all the legacy decode for the group.
4
2
5
Note that in disas_thumb2_insn() the parts of this encoding space
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
6
where the decodetree decoder returns false will correctly be directed
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
8
into disas_coproc_insn() by mistake.
9
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221020030641.2066807-7-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200430181003.21682-11-peter.maydell@linaro.org
13
---
9
---
14
target/arm/neon-shared.decode | 7 +++
10
target/arm/translate.c | 37 +++++++++++++++++++++----------------
15
target/arm/translate-neon.inc.c | 32 ++++++++++
11
1 file changed, 21 insertions(+), 16 deletions(-)
16
target/arm/translate.c | 107 +-------------------------------
17
3 files changed, 40 insertions(+), 106 deletions(-)
18
12
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/neon-shared.decode
22
+++ b/target/arm/neon-shared.decode
23
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
24
25
VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
27
+
28
+%vfml_scalar_q0_rm 0:3 5:1
29
+%vfml_scalar_q1_index 5:1 3:1
30
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
31
+ rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
32
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
33
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
34
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-neon.inc.c
37
+++ b/target/arm/translate-neon.inc.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
39
tcg_temp_free_ptr(fpst);
40
return true;
41
}
42
+
43
+static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
44
+{
45
+ int opr_sz;
46
+
47
+ if (!dc_isar_feature(aa32_fhm, s)) {
48
+ return false;
49
+ }
50
+
51
+ /* UNDEF accesses to D16-D31 if they don't exist. */
52
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
53
+ ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
54
+ return false;
55
+ }
56
+
57
+ if (a->vd & a->q) {
58
+ return false;
59
+ }
60
+
61
+ if (!vfp_access_check(s)) {
62
+ return true;
63
+ }
64
+
65
+ opr_sz = (1 + a->q) * 8;
66
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
67
+ vfp_reg_offset(a->q, a->vn),
68
+ vfp_reg_offset(a->q, a->rm),
69
+ cpu_env, opr_sz, opr_sz,
70
+ (a->index << 2) | a->s, /* is_2 == 0 */
71
+ gen_helper_gvec_fmlal_idx_a32);
72
+ return true;
73
+}
74
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
75
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
77
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
78
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ static uint32_t read_pc(DisasContext *s)
18
return s->pc_curr + (s->thumb ? 4 : 8);
79
}
19
}
80
20
81
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
21
+/* The pc_curr difference for an architectural jump. */
82
-#define VFP_SREG(insn, bigbit, smallbit) \
22
+static target_long jmp_diff(DisasContext *s, target_long diff)
83
- ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
23
+{
84
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
24
+ return diff + (s->thumb ? 4 : 8);
85
if (dc_isar_feature(aa32_simd_r32, s)) { \
25
+}
86
reg = (((insn) >> (bigbit)) & 0x0f) \
26
+
87
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
27
/* Set a variable to the value of a CPU register. */
88
reg = ((insn) >> (bigbit)) & 0x0f; \
28
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
89
}} while (0)
29
{
90
30
@@ -XXX,XX +XXX,XX @@ static void gen_goto_ptr(void)
91
-#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
31
* cpu_loop_exec. Any live exit_requests will be processed as we
92
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
32
* enter the next TB.
93
-#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
33
*/
94
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
34
-static void gen_goto_tb(DisasContext *s, int n, int diff)
95
-#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
35
+static void gen_goto_tb(DisasContext *s, int n, target_long diff)
96
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
36
{
97
37
target_ulong dest = s->pc_curr + diff;
98
static void gen_neon_dup_low16(TCGv_i32 var)
38
99
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
39
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, int diff)
100
return 0;
101
}
40
}
102
41
103
-/* Advanced SIMD two registers and a scalar extension.
42
/* Jump, specifying which TB number to use if we gen_goto_tb() */
104
- * 31 24 23 22 20 16 12 11 10 9 8 3 0
43
-static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
105
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
44
+static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
106
- * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
45
{
107
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
46
- int diff = dest - s->pc_curr;
108
- *
109
- */
110
-
47
-
111
-static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
48
if (unlikely(s->ss_active)) {
112
-{
49
/* An indirect jump so that we still trigger the debug exception. */
113
- gen_helper_gvec_3 *fn_gvec = NULL;
50
gen_update_pc(s, diff);
114
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
51
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
115
- int rd, rn, rm, opr_sz, data;
52
}
116
- int off_rn, off_rm;
53
}
117
- bool is_long = false, q = extract32(insn, 6, 1);
54
118
- bool ptr_is_env = false;
55
-static inline void gen_jmp(DisasContext *s, uint32_t dest)
119
-
56
+static inline void gen_jmp(DisasContext *s, target_long diff)
120
- if ((insn & 0xffa00f10) == 0xfe000810) {
121
- /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
122
- int is_s = extract32(insn, 20, 1);
123
- int vm20 = extract32(insn, 0, 3);
124
- int vm3 = extract32(insn, 3, 1);
125
- int m = extract32(insn, 5, 1);
126
- int index;
127
-
128
- if (!dc_isar_feature(aa32_fhm, s)) {
129
- return 1;
130
- }
131
- if (q) {
132
- rm = vm20;
133
- index = m * 2 + vm3;
134
- } else {
135
- rm = vm20 * 2 + m;
136
- index = vm3;
137
- }
138
- is_long = true;
139
- data = (index << 2) | is_s; /* is_2 == 0 */
140
- fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
141
- ptr_is_env = true;
142
- } else {
143
- return 1;
144
- }
145
-
146
- VFP_DREG_D(rd, insn);
147
- if (rd & q) {
148
- return 1;
149
- }
150
- if (q || !is_long) {
151
- VFP_DREG_N(rn, insn);
152
- if (rn & q & !is_long) {
153
- return 1;
154
- }
155
- off_rn = vfp_reg_offset(1, rn);
156
- off_rm = vfp_reg_offset(1, rm);
157
- } else {
158
- rn = VFP_SREG_N(insn);
159
- off_rn = vfp_reg_offset(0, rn);
160
- off_rm = vfp_reg_offset(0, rm);
161
- }
162
- if (s->fp_excp_el) {
163
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
165
- return 0;
166
- }
167
- if (!s->vfp_enabled) {
168
- return 1;
169
- }
170
-
171
- opr_sz = (1 + q) * 8;
172
- if (fn_gvec_ptr) {
173
- TCGv_ptr ptr;
174
- if (ptr_is_env) {
175
- ptr = cpu_env;
176
- } else {
177
- ptr = get_fpstatus_ptr(1);
178
- }
179
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
180
- opr_sz, opr_sz, data, fn_gvec_ptr);
181
- if (!ptr_is_env) {
182
- tcg_temp_free_ptr(ptr);
183
- }
184
- } else {
185
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
186
- opr_sz, opr_sz, data, fn_gvec);
187
- }
188
- return 0;
189
-}
190
-
191
static int disas_coproc_insn(DisasContext *s, uint32_t insn)
192
{
57
{
193
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
58
- gen_jmp_tb(s, dest, 0);
194
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
59
+ gen_jmp_tb(s, diff, 0);
195
}
60
}
196
}
61
197
}
62
static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
198
- } else if ((insn & 0x0f000a00) == 0x0e000800
63
@@ -XXX,XX +XXX,XX @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
199
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
64
200
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
65
static bool trans_B(DisasContext *s, arg_i *a)
201
- goto illegal_op;
66
{
202
- }
67
- gen_jmp(s, read_pc(s) + a->imm);
203
- return;
68
+ gen_jmp(s, jmp_diff(s, a->imm));
204
}
69
return true;
205
goto illegal_op;
70
}
71
72
@@ -XXX,XX +XXX,XX @@ static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
73
return true;
206
}
74
}
207
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
75
arm_skip_unless(s, a->cond);
208
}
76
- gen_jmp(s, read_pc(s) + a->imm);
209
break;
77
+ gen_jmp(s, jmp_diff(s, a->imm));
210
}
78
return true;
211
- if ((insn & 0xff000a00) == 0xfe000800
79
}
212
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
80
213
- /* The Thumb2 and ARM encodings are identical. */
81
static bool trans_BL(DisasContext *s, arg_i *a)
214
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
82
{
215
- goto illegal_op;
83
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
216
- }
84
- gen_jmp(s, read_pc(s) + a->imm);
217
- } else if (((insn >> 24) & 3) == 3) {
85
+ gen_jmp(s, jmp_diff(s, a->imm));
218
+ if (((insn >> 24) & 3) == 3) {
86
return true;
219
/* Translate into the equivalent ARM encoding. */
87
}
220
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
88
221
if (disas_neon_data_insn(s, insn)) {
89
@@ -XXX,XX +XXX,XX @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
90
}
91
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
92
store_cpu_field_constant(!s->thumb, thumb);
93
- gen_jmp(s, (read_pc(s) & ~3) + a->imm);
94
+ /* This jump is computed from an aligned PC: subtract off the low bits. */
95
+ gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
96
return true;
97
}
98
99
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
100
* when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
101
*/
102
}
103
- gen_jmp_tb(s, s->base.pc_next, 1);
104
+ gen_jmp_tb(s, curr_insn_len(s), 1);
105
106
gen_set_label(nextlabel);
107
- gen_jmp(s, read_pc(s) + a->imm);
108
+ gen_jmp(s, jmp_diff(s, a->imm));
109
return true;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
113
114
if (a->f) {
115
/* Loop-forever: just jump back to the loop start */
116
- gen_jmp(s, read_pc(s) - a->imm);
117
+ gen_jmp(s, jmp_diff(s, -a->imm));
118
return true;
119
}
120
121
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
122
tcg_temp_free_i32(decr);
123
}
124
/* Jump back to the loop start */
125
- gen_jmp(s, read_pc(s) - a->imm);
126
+ gen_jmp(s, jmp_diff(s, -a->imm));
127
128
gen_set_label(loopend);
129
if (a->tp) {
130
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
131
store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
132
}
133
/* End TB, continuing to following insn */
134
- gen_jmp_tb(s, s->base.pc_next, 1);
135
+ gen_jmp_tb(s, curr_insn_len(s), 1);
136
return true;
137
}
138
139
@@ -XXX,XX +XXX,XX @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
140
tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
141
tmp, 0, s->condlabel);
142
tcg_temp_free_i32(tmp);
143
- gen_jmp(s, read_pc(s) + a->imm);
144
+ gen_jmp(s, jmp_diff(s, a->imm));
145
return true;
146
}
147
222
--
148
--
223
2.20.1
149
2.25.1
224
225
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for SD.
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
4
4
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
7
Message-id: 20221020030641.2066807-8-richard.henderson@linaro.org
8
Message-id: 20200427181649.26851-11-edgar.iglesias@gmail.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
hw/arm/xlnx-versal-virt.c | 46 +++++++++++++++++++++++++++++++++++++++
10
target/arm/translate-a64.c | 41 +++++++++++++++++++++++++++-----------
12
1 file changed, 46 insertions(+)
11
1 file changed, 29 insertions(+), 12 deletions(-)
13
12
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-versal-virt.c
15
--- a/target/arm/translate-a64.c
17
+++ b/hw/arm/xlnx-versal-virt.c
16
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ static void reset_btype(DisasContext *s)
19
#include "hw/arm/sysbus-fdt.h"
20
#include "hw/arm/fdt.h"
21
#include "cpu.h"
22
+#include "hw/qdev-properties.h"
23
#include "hw/arm/xlnx-versal.h"
24
25
#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt")
26
@@ -XXX,XX +XXX,XX @@ static void fdt_add_zdma_nodes(VersalVirt *s)
27
}
18
}
28
}
19
}
29
20
30
+static void fdt_add_sd_nodes(VersalVirt *s)
21
+static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
31
+{
22
+{
32
+ const char clocknames[] = "clk_xin\0clk_ahb";
23
+ tcg_gen_movi_i64(dest, s->pc_curr + diff);
33
+ const char compat[] = "arasan,sdhci-8.9a";
34
+ int i;
35
+
36
+ for (i = ARRAY_SIZE(s->soc.pmc.iou.sd) - 1; i >= 0; i--) {
37
+ uint64_t addr = MM_PMC_SD0 + MM_PMC_SD0_SIZE * i;
38
+ char *name = g_strdup_printf("/sdhci@%" PRIx64, addr);
39
+
40
+ qemu_fdt_add_subnode(s->fdt, name);
41
+
42
+ qemu_fdt_setprop_cells(s->fdt, name, "clocks",
43
+ s->phandle.clk_25Mhz, s->phandle.clk_25Mhz);
44
+ qemu_fdt_setprop(s->fdt, name, "clock-names",
45
+ clocknames, sizeof(clocknames));
46
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
47
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_SD0_IRQ_0 + i * 2,
48
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
49
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
50
+ 2, addr, 2, MM_PMC_SD0_SIZE);
51
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
52
+ g_free(name);
53
+ }
54
+}
24
+}
55
+
25
+
56
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
26
void gen_a64_update_pc(DisasContext *s, target_long diff)
57
{
27
{
58
Error *err = NULL;
28
- tcg_gen_movi_i64(cpu_pc, s->pc_curr + diff);
59
@@ -XXX,XX +XXX,XX @@ static void create_virtio_regions(VersalVirt *s)
29
+ gen_pc_plus_diff(s, cpu_pc, diff);
30
}
31
32
/*
33
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
34
35
if (insn & (1U << 31)) {
36
/* BL Branch with link */
37
- tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
38
+ gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
60
}
39
}
40
41
/* B Branch / BL Branch with link */
42
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
43
default:
44
goto do_unallocated;
45
}
46
- gen_a64_set_pc(s, dst);
47
/* BLR also needs to load return address */
48
if (opc == 1) {
49
- tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
50
+ TCGv_i64 lr = cpu_reg(s, 30);
51
+ if (dst == lr) {
52
+ TCGv_i64 tmp = new_tmp_a64(s);
53
+ tcg_gen_mov_i64(tmp, dst);
54
+ dst = tmp;
55
+ }
56
+ gen_pc_plus_diff(s, lr, curr_insn_len(s));
57
}
58
+ gen_a64_set_pc(s, dst);
59
break;
60
61
case 8: /* BRAA */
62
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
63
} else {
64
dst = cpu_reg(s, rn);
65
}
66
- gen_a64_set_pc(s, dst);
67
/* BLRAA also needs to load return address */
68
if (opc == 9) {
69
- tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
70
+ TCGv_i64 lr = cpu_reg(s, 30);
71
+ if (dst == lr) {
72
+ TCGv_i64 tmp = new_tmp_a64(s);
73
+ tcg_gen_mov_i64(tmp, dst);
74
+ dst = tmp;
75
+ }
76
+ gen_pc_plus_diff(s, lr, curr_insn_len(s));
77
}
78
+ gen_a64_set_pc(s, dst);
79
break;
80
81
case 4: /* ERET */
82
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
83
84
tcg_rt = cpu_reg(s, rt);
85
86
- clean_addr = tcg_constant_i64(s->pc_curr + imm);
87
+ clean_addr = new_tmp_a64(s);
88
+ gen_pc_plus_diff(s, clean_addr, imm);
89
if (is_vector) {
90
do_fp_ld(s, rt, clean_addr, size);
91
} else {
92
@@ -XXX,XX +XXX,XX @@ static void disas_ldst(DisasContext *s, uint32_t insn)
93
static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
94
{
95
unsigned int page, rd;
96
- uint64_t base;
97
- uint64_t offset;
98
+ int64_t offset;
99
100
page = extract32(insn, 31, 1);
101
/* SignExtend(immhi:immlo) -> offset */
102
offset = sextract64(insn, 5, 19);
103
offset = offset << 2 | extract32(insn, 29, 2);
104
rd = extract32(insn, 0, 5);
105
- base = s->pc_curr;
106
107
if (page) {
108
/* ADRP (page based) */
109
- base &= ~0xfff;
110
offset <<= 12;
111
+ /* The page offset is ok for TARGET_TB_PCREL. */
112
+ offset -= s->pc_curr & 0xfff;
113
}
114
115
- tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
116
+ gen_pc_plus_diff(s, cpu_reg(s, rd), offset);
61
}
117
}
62
118
63
+static void sd_plugin_card(SDHCIState *sd, DriveInfo *di)
119
/*
64
+{
65
+ BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL;
66
+ DeviceState *card;
67
+
68
+ card = qdev_create(qdev_get_child_bus(DEVICE(sd), "sd-bus"), TYPE_SD_CARD);
69
+ object_property_add_child(OBJECT(sd), "card[*]", OBJECT(card),
70
+ &error_fatal);
71
+ qdev_prop_set_drive(card, "drive", blk, &error_fatal);
72
+ object_property_set_bool(OBJECT(card), true, "realized", &error_fatal);
73
+}
74
+
75
static void versal_virt_init(MachineState *machine)
76
{
77
VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine);
78
int psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
79
+ int i;
80
81
/*
82
* If the user provides an Operating System to be loaded, we expect them
83
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
84
fdt_add_gic_nodes(s);
85
fdt_add_timer_nodes(s);
86
fdt_add_zdma_nodes(s);
87
+ fdt_add_sd_nodes(s);
88
fdt_add_cpu_nodes(s, psci_conduit);
89
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
90
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
91
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
92
memory_region_add_subregion_overlap(get_system_memory(),
93
0, &s->soc.fpd.apu.mr, 0);
94
95
+ /* Plugin SD cards. */
96
+ for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) {
97
+ sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD));
98
+ }
99
+
100
s->binfo.ram_size = machine->ram_size;
101
s->binfo.loader_start = 0x0;
102
s->binfo.get_dtb = versal_virt_get_dtb;
103
--
120
--
104
2.20.1
121
2.25.1
105
106
diff view generated by jsdifflib
Deleted patch
1
Somewhere along theline we accidentally added a duplicate
2
"using D16-D31 when they don't exist" check to do_vfm_dp()
3
(probably an artifact of a patchseries rebase). Remove it.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20200430181003.21682-2-peter.maydell@linaro.org
9
---
10
target/arm/translate-vfp.inc.c | 6 ------
11
1 file changed, 6 deletions(-)
12
13
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.inc.c
16
+++ b/target/arm/translate-vfp.inc.c
17
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
18
return false;
19
}
20
21
- /* UNDEF accesses to D16-D31 if they don't exist. */
22
- if (!dc_isar_feature(aa32_simd_r32, s) &&
23
- ((a->vd | a->vn | a->vm) & 0x10)) {
24
- return false;
25
- }
26
-
27
if (!vfp_access_check(s)) {
28
return true;
29
}
30
--
31
2.20.1
32
33
diff view generated by jsdifflib
Deleted patch
1
We were accidentally permitting decode of Thumb Neon insns even if
2
the CPU didn't have the FEATURE_NEON bit set, because the feature
3
check was being done before the call to disas_neon_data_insn() and
4
disas_neon_ls_insn() in the Arm decoder but was omitted from the
5
Thumb decoder. Push the feature bit check down into the called
6
functions so it is done for both Arm and Thumb encodings.
7
1
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20200430181003.21682-3-peter.maydell@linaro.org
12
---
13
target/arm/translate.c | 16 ++++++++--------
14
1 file changed, 8 insertions(+), 8 deletions(-)
15
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
19
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
21
TCGv_i32 tmp2;
22
TCGv_i64 tmp64;
23
24
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
25
+ return 1;
26
+ }
27
+
28
/* FIXME: this access check should not take precedence over UNDEF
29
* for invalid encodings; we will generate incorrect syndrome information
30
* for attempts to execute invalid vfp/neon encodings with FP disabled.
31
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
32
TCGv_ptr ptr1, ptr2, ptr3;
33
TCGv_i64 tmp64;
34
35
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
36
+ return 1;
37
+ }
38
+
39
/* FIXME: this access check should not take precedence over UNDEF
40
* for invalid encodings; we will generate incorrect syndrome information
41
* for attempts to execute invalid vfp/neon encodings with FP disabled.
42
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
43
44
if (((insn >> 25) & 7) == 1) {
45
/* NEON Data processing. */
46
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
47
- goto illegal_op;
48
- }
49
-
50
if (disas_neon_data_insn(s, insn)) {
51
goto illegal_op;
52
}
53
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
54
}
55
if ((insn & 0x0f100000) == 0x04000000) {
56
/* NEON load/store. */
57
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
58
- goto illegal_op;
59
- }
60
-
61
if (disas_neon_ls_insn(s, insn)) {
62
goto illegal_op;
63
}
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
Deleted patch
1
Convert the VCMLA (vector) insns in the 3same extension group to
2
decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-5-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 11 ++++++++++
9
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 11 +---------
11
3 files changed, 49 insertions(+), 10 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@
18
# More specifically, this covers:
19
# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
20
# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
21
+
22
+# VFP/Neon register fields; same as vfp.decode
23
+%vm_dp 5:1 0:4
24
+%vm_sp 0:4 5:1
25
+%vn_dp 7:1 16:4
26
+%vn_sp 16:4 7:1
27
+%vd_dp 22:1 12:4
28
+%vd_sp 12:4 22:1
29
+
30
+VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
31
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
32
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-neon.inc.c
35
+++ b/target/arm/translate-neon.inc.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "decode-neon-dp.inc.c"
38
#include "decode-neon-ls.inc.c"
39
#include "decode-neon-shared.inc.c"
40
+
41
+static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
42
+{
43
+ int opr_sz;
44
+ TCGv_ptr fpst;
45
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
46
+
47
+ if (!dc_isar_feature(aa32_vcma, s)
48
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
49
+ return false;
50
+ }
51
+
52
+ /* UNDEF accesses to D16-D31 if they don't exist. */
53
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
54
+ ((a->vd | a->vn | a->vm) & 0x10)) {
55
+ return false;
56
+ }
57
+
58
+ if ((a->vn | a->vm | a->vd) & a->q) {
59
+ return false;
60
+ }
61
+
62
+ if (!vfp_access_check(s)) {
63
+ return true;
64
+ }
65
+
66
+ opr_sz = (1 + a->q) * 8;
67
+ fpst = get_fpstatus_ptr(1);
68
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
69
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
70
+ vfp_reg_offset(1, a->vn),
71
+ vfp_reg_offset(1, a->vm),
72
+ fpst, opr_sz, opr_sz, a->rot,
73
+ fn_gvec_ptr);
74
+ tcg_temp_free_ptr(fpst);
75
+ return true;
76
+}
77
diff --git a/target/arm/translate.c b/target/arm/translate.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate.c
80
+++ b/target/arm/translate.c
81
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
82
bool is_long = false, q = extract32(insn, 6, 1);
83
bool ptr_is_env = false;
84
85
- if ((insn & 0xfe200f10) == 0xfc200800) {
86
- /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
87
- int size = extract32(insn, 20, 1);
88
- data = extract32(insn, 23, 2); /* rot */
89
- if (!dc_isar_feature(aa32_vcma, s)
90
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
91
- return 1;
92
- }
93
- fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
94
- } else if ((insn & 0xfea00f10) == 0xfc800800) {
95
+ if ((insn & 0xfea00f10) == 0xfc800800) {
96
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
97
int size = extract32(insn, 20, 1);
98
data = extract32(insn, 24, 1); /* rot */
99
--
100
2.20.1
101
102
diff view generated by jsdifflib
Deleted patch
1
Convert the VCADD (vector) insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-6-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 3 +++
8
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 11 +---------
10
3 files changed, 41 insertions(+), 10 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@
17
18
VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
22
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
23
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/translate-neon.inc.c
26
+++ b/target/arm/translate-neon.inc.c
27
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
28
tcg_temp_free_ptr(fpst);
29
return true;
30
}
31
+
32
+static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
33
+{
34
+ int opr_sz;
35
+ TCGv_ptr fpst;
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+
38
+ if (!dc_isar_feature(aa32_vcma, s)
39
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn | a->vm) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vn | a->vm | a->vd) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ opr_sz = (1 + a->q) * 8;
58
+ fpst = get_fpstatus_ptr(1);
59
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
60
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->vm),
63
+ fpst, opr_sz, opr_sz, a->rot,
64
+ fn_gvec_ptr);
65
+ tcg_temp_free_ptr(fpst);
66
+ return true;
67
+}
68
diff --git a/target/arm/translate.c b/target/arm/translate.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/translate.c
71
+++ b/target/arm/translate.c
72
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
73
bool is_long = false, q = extract32(insn, 6, 1);
74
bool ptr_is_env = false;
75
76
- if ((insn & 0xfea00f10) == 0xfc800800) {
77
- /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
78
- int size = extract32(insn, 20, 1);
79
- data = extract32(insn, 24, 1); /* rot */
80
- if (!dc_isar_feature(aa32_vcma, s)
81
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
82
- return 1;
83
- }
84
- fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
85
- } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
86
+ if ((insn & 0xfeb00f00) == 0xfc200d00) {
87
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
88
bool u = extract32(insn, 4, 1);
89
if (!dc_isar_feature(aa32_dp, s)) {
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
Deleted patch
1
Convert the V[US]DOT (vector) insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-7-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 4 ++++
8
target/arm/translate-neon.inc.c | 32 ++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 9 +--------
10
3 files changed, 37 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
17
18
VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+# VUDOT and VSDOT
22
+VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
34
+{
35
+ int opr_sz;
36
+ gen_helper_gvec_3 *fn_gvec;
37
+
38
+ if (!dc_isar_feature(aa32_dp, s)) {
39
+ return false;
40
+ }
41
+
42
+ /* UNDEF accesses to D16-D31 if they don't exist. */
43
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
44
+ ((a->vd | a->vn | a->vm) & 0x10)) {
45
+ return false;
46
+ }
47
+
48
+ if ((a->vn | a->vm | a->vd) & a->q) {
49
+ return false;
50
+ }
51
+
52
+ if (!vfp_access_check(s)) {
53
+ return true;
54
+ }
55
+
56
+ opr_sz = (1 + a->q) * 8;
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
58
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
59
+ vfp_reg_offset(1, a->vn),
60
+ vfp_reg_offset(1, a->vm),
61
+ opr_sz, opr_sz, 0, fn_gvec);
62
+ return true;
63
+}
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
69
bool is_long = false, q = extract32(insn, 6, 1);
70
bool ptr_is_env = false;
71
72
- if ((insn & 0xfeb00f00) == 0xfc200d00) {
73
- /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
74
- bool u = extract32(insn, 4, 1);
75
- if (!dc_isar_feature(aa32_dp, s)) {
76
- return 1;
77
- }
78
- fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
79
- } else if ((insn & 0xff300f10) == 0xfc200810) {
80
+ if ((insn & 0xff300f10) == 0xfc200810) {
81
/* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
82
int is_s = extract32(insn, 23, 1);
83
if (!dc_isar_feature(aa32_fhm, s)) {
84
--
85
2.20.1
86
87
diff view generated by jsdifflib
1
Convert the Neon "load/store single structure to one lane" insns to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
decodetree.
3
2
4
As this is the last set of insns in the neon load/store group,
3
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
5
we can remove the whole disas_neon_ls_insn() function.
6
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221020030641.2066807-9-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200430181003.21682-14-peter.maydell@linaro.org
10
---
9
---
11
target/arm/neon-ls.decode | 11 +++
10
target/arm/translate.c | 38 +++++++++++++++++++++-----------------
12
target/arm/translate-neon.inc.c | 89 +++++++++++++++++++
11
1 file changed, 21 insertions(+), 17 deletions(-)
13
target/arm/translate.c | 147 --------------------------------
14
3 files changed, 100 insertions(+), 147 deletions(-)
15
12
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
19
+++ b/target/arm/neon-ls.decode
20
@@ -XXX,XX +XXX,XX @@ VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
21
22
VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
23
vd=%vd_dp
24
+
25
+# Neon load/store single structure to one lane
26
+%imm1_5_p1 5:1 !function=plus1
27
+%imm1_6_p1 6:1 !function=plus1
28
+
29
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
30
+ vd=%vd_dp size=0 stride=1
31
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
32
+ vd=%vd_dp size=1 stride=%imm1_5_p1
33
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
34
+ vd=%vd_dp size=2 stride=%imm1_6_p1
35
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/translate-neon.inc.c
38
+++ b/target/arm/translate-neon.inc.c
39
@@ -XXX,XX +XXX,XX @@
40
* It might be possible to convert it to a standalone .c file eventually.
41
*/
42
43
+static inline int plus1(DisasContext *s, int x)
44
+{
45
+ return x + 1;
46
+}
47
+
48
/* Include the generated Neon decoder */
49
#include "decode-neon-dp.inc.c"
50
#include "decode-neon-ls.inc.c"
51
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
52
53
return true;
54
}
55
+
56
+static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
57
+{
58
+ /* Neon load/store single structure to one lane */
59
+ int reg;
60
+ int nregs = a->n + 1;
61
+ int vd = a->vd;
62
+ TCGv_i32 addr, tmp;
63
+
64
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
65
+ return false;
66
+ }
67
+
68
+ /* UNDEF accesses to D16-D31 if they don't exist */
69
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
70
+ return false;
71
+ }
72
+
73
+ /* Catch the UNDEF cases. This is unavoidably a bit messy. */
74
+ switch (nregs) {
75
+ case 1:
76
+ if (((a->align & (1 << a->size)) != 0) ||
77
+ (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
78
+ return false;
79
+ }
80
+ break;
81
+ case 3:
82
+ if ((a->align & 1) != 0) {
83
+ return false;
84
+ }
85
+ /* fall through */
86
+ case 2:
87
+ if (a->size == 2 && (a->align & 2) != 0) {
88
+ return false;
89
+ }
90
+ break;
91
+ case 4:
92
+ if ((a->size == 2) && ((a->align & 3) == 3)) {
93
+ return false;
94
+ }
95
+ break;
96
+ default:
97
+ abort();
98
+ }
99
+ if ((vd + a->stride * (nregs - 1)) > 31) {
100
+ /*
101
+ * Attempts to write off the end of the register file are
102
+ * UNPREDICTABLE; we choose to UNDEF because otherwise we would
103
+ * access off the end of the array that holds the register data.
104
+ */
105
+ return false;
106
+ }
107
+
108
+ if (!vfp_access_check(s)) {
109
+ return true;
110
+ }
111
+
112
+ tmp = tcg_temp_new_i32();
113
+ addr = tcg_temp_new_i32();
114
+ load_reg_var(s, addr, a->rn);
115
+ /*
116
+ * TODO: if we implemented alignment exceptions, we should check
117
+ * addr against the alignment encoded in a->align here.
118
+ */
119
+ for (reg = 0; reg < nregs; reg++) {
120
+ if (a->l) {
121
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
122
+ s->be_data | a->size);
123
+ neon_store_element(vd, a->reg_idx, a->size, tmp);
124
+ } else { /* Store */
125
+ neon_load_element(tmp, vd, a->reg_idx, a->size);
126
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
127
+ s->be_data | a->size);
128
+ }
129
+ vd += a->stride;
130
+ tcg_gen_addi_i32(addr, addr, 1 << a->size);
131
+ }
132
+ tcg_temp_free_i32(addr);
133
+ tcg_temp_free_i32(tmp);
134
+
135
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
136
+
137
+ return true;
138
+}
139
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
140
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
141
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
142
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
143
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
17
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
144
tcg_temp_free_i32(rd);
18
}
145
}
19
}
146
20
147
-
21
-/* The architectural value of PC. */
148
-/* Translate a NEON load/store element instruction. Return nonzero if the
22
-static uint32_t read_pc(DisasContext *s)
149
- instruction is invalid. */
150
-static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
151
-{
23
-{
152
- int rd, rn, rm;
24
- return s->pc_curr + (s->thumb ? 4 : 8);
153
- int nregs;
154
- int stride;
155
- int size;
156
- int reg;
157
- int load;
158
- TCGv_i32 addr;
159
- TCGv_i32 tmp;
160
-
161
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
- return 1;
163
- }
164
-
165
- /* FIXME: this access check should not take precedence over UNDEF
166
- * for invalid encodings; we will generate incorrect syndrome information
167
- * for attempts to execute invalid vfp/neon encodings with FP disabled.
168
- */
169
- if (s->fp_excp_el) {
170
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
171
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
172
- return 0;
173
- }
174
-
175
- if (!s->vfp_enabled)
176
- return 1;
177
- VFP_DREG_D(rd, insn);
178
- rn = (insn >> 16) & 0xf;
179
- rm = insn & 0xf;
180
- load = (insn & (1 << 21)) != 0;
181
- if ((insn & (1 << 23)) == 0) {
182
- /* Load store all elements -- handled already by decodetree */
183
- return 1;
184
- } else {
185
- size = (insn >> 10) & 3;
186
- if (size == 3) {
187
- /* Load single element to all lanes -- handled by decodetree */
188
- return 1;
189
- } else {
190
- /* Single element. */
191
- int idx = (insn >> 4) & 0xf;
192
- int reg_idx;
193
- switch (size) {
194
- case 0:
195
- reg_idx = (insn >> 5) & 7;
196
- stride = 1;
197
- break;
198
- case 1:
199
- reg_idx = (insn >> 6) & 3;
200
- stride = (insn & (1 << 5)) ? 2 : 1;
201
- break;
202
- case 2:
203
- reg_idx = (insn >> 7) & 1;
204
- stride = (insn & (1 << 6)) ? 2 : 1;
205
- break;
206
- default:
207
- abort();
208
- }
209
- nregs = ((insn >> 8) & 3) + 1;
210
- /* Catch the UNDEF cases. This is unavoidably a bit messy. */
211
- switch (nregs) {
212
- case 1:
213
- if (((idx & (1 << size)) != 0) ||
214
- (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
215
- return 1;
216
- }
217
- break;
218
- case 3:
219
- if ((idx & 1) != 0) {
220
- return 1;
221
- }
222
- /* fall through */
223
- case 2:
224
- if (size == 2 && (idx & 2) != 0) {
225
- return 1;
226
- }
227
- break;
228
- case 4:
229
- if ((size == 2) && ((idx & 3) == 3)) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- abort();
235
- }
236
- if ((rd + stride * (nregs - 1)) > 31) {
237
- /* Attempts to write off the end of the register file
238
- * are UNPREDICTABLE; we choose to UNDEF because otherwise
239
- * the neon_load_reg() would write off the end of the array.
240
- */
241
- return 1;
242
- }
243
- tmp = tcg_temp_new_i32();
244
- addr = tcg_temp_new_i32();
245
- load_reg_var(s, addr, rn);
246
- for (reg = 0; reg < nregs; reg++) {
247
- if (load) {
248
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
249
- s->be_data | size);
250
- neon_store_element(rd, reg_idx, size, tmp);
251
- } else { /* Store */
252
- neon_load_element(tmp, rd, reg_idx, size);
253
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
254
- s->be_data | size);
255
- }
256
- rd += stride;
257
- tcg_gen_addi_i32(addr, addr, 1 << size);
258
- }
259
- tcg_temp_free_i32(addr);
260
- tcg_temp_free_i32(tmp);
261
- stride = nregs * (1 << size);
262
- }
263
- }
264
- if (rm != 15) {
265
- TCGv_i32 base;
266
-
267
- base = load_reg(s, rn);
268
- if (rm == 13) {
269
- tcg_gen_addi_i32(base, base, stride);
270
- } else {
271
- TCGv_i32 index;
272
- index = load_reg(s, rm);
273
- tcg_gen_add_i32(base, base, index);
274
- tcg_temp_free_i32(index);
275
- }
276
- store_reg(s, rn, base);
277
- }
278
- return 0;
279
-}
25
-}
280
-
26
-
281
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
27
/* The pc_curr difference for an architectural jump. */
28
static target_long jmp_diff(DisasContext *s, target_long diff)
282
{
29
{
283
switch (size) {
30
return diff + (s->thumb ? 4 : 8);
284
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
31
}
285
}
32
286
return;
33
+static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
287
}
34
+{
288
- if ((insn & 0x0f100000) == 0x04000000) {
35
+ tcg_gen_movi_i32(var, s->pc_curr + diff);
289
- /* NEON load/store. */
36
+}
290
- if (disas_neon_ls_insn(s, insn)) {
37
+
291
- goto illegal_op;
38
/* Set a variable to the value of a CPU register. */
292
- }
39
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
293
- return;
40
{
294
- }
41
if (reg == 15) {
295
if ((insn & 0x0e000f00) == 0x0c000100) {
42
- tcg_gen_movi_i32(var, read_pc(s));
296
if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
43
+ gen_pc_plus_diff(s, var, jmp_diff(s, 0));
297
/* iWMMXt register transfer. */
44
} else {
298
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
45
tcg_gen_mov_i32(var, cpu_R[reg]);
299
}
46
}
300
break;
47
@@ -XXX,XX +XXX,XX @@ TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
301
case 12:
48
TCGv_i32 tmp = tcg_temp_new_i32();
302
- if ((insn & 0x01100000) == 0x01000000) {
49
303
- if (disas_neon_ls_insn(s, insn)) {
50
if (reg == 15) {
304
- goto illegal_op;
51
- tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
305
- }
52
+ /*
306
- break;
53
+ * This address is computed from an aligned PC:
307
- }
54
+ * subtract off the low bits.
308
goto illegal_op;
55
+ */
309
default:
56
+ gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
310
illegal_op:
57
} else {
58
tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
59
}
60
@@ -XXX,XX +XXX,XX @@ void unallocated_encoding(DisasContext *s)
61
/* Force a TB lookup after an instruction that changes the CPU state. */
62
void gen_lookup_tb(DisasContext *s)
63
{
64
- tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
65
+ gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
66
s->base.is_jmp = DISAS_EXIT;
67
}
68
69
@@ -XXX,XX +XXX,XX @@ static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
70
return false;
71
}
72
tmp = load_reg(s, a->rm);
73
- tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
74
+ gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
75
gen_bx(s, tmp);
76
return true;
77
}
78
@@ -XXX,XX +XXX,XX @@ static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
79
80
static bool trans_BL(DisasContext *s, arg_i *a)
81
{
82
- tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
83
+ gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
84
gen_jmp(s, jmp_diff(s, a->imm));
85
return true;
86
}
87
@@ -XXX,XX +XXX,XX @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
88
if (s->thumb && (a->imm & 2)) {
89
return false;
90
}
91
- tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
92
+ gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
93
store_cpu_field_constant(!s->thumb, thumb);
94
/* This jump is computed from an aligned PC: subtract off the low bits. */
95
gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
96
@@ -XXX,XX +XXX,XX @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
97
static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
98
{
99
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
100
- tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
101
+ gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
102
return true;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
106
107
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
108
tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
109
- tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
110
+ gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
111
gen_bx(s, tmp);
112
return true;
113
}
114
@@ -XXX,XX +XXX,XX @@ static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
115
tmp = tcg_temp_new_i32();
116
tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
117
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
118
- tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
119
+ gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
120
gen_bx(s, tmp);
121
return true;
122
}
123
@@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
124
tcg_gen_add_i32(addr, addr, tmp);
125
126
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
127
- tcg_temp_free_i32(addr);
128
129
tcg_gen_add_i32(tmp, tmp, tmp);
130
- tcg_gen_addi_i32(tmp, tmp, read_pc(s));
131
+ gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
132
+ tcg_gen_add_i32(tmp, tmp, addr);
133
+ tcg_temp_free_i32(addr);
134
store_reg(s, 15, tmp);
135
return true;
136
}
311
--
137
--
312
2.20.1
138
2.25.1
313
139
314
140
diff view generated by jsdifflib
1
Convert the Neon VMUL, VMLA, VMLS and VSHL insns in the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3-reg-same grouping to decodetree.
3
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20221020030641.2066807-10-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-20-peter.maydell@linaro.org
7
---
7
---
8
target/arm/neon-dp.decode | 9 +++++++
8
target/arm/cpu-param.h | 2 +
9
target/arm/translate-neon.inc.c | 44 +++++++++++++++++++++++++++++++++
9
target/arm/translate.h | 50 +++++++++++++++-
10
target/arm/translate.c | 28 +++------------------
10
target/arm/cpu.c | 23 ++++----
11
3 files changed, 56 insertions(+), 25 deletions(-)
11
target/arm/translate-a64.c | 64 +++++++++++++-------
12
target/arm/translate-m-nocp.c | 2 +-
13
target/arm/translate.c | 108 +++++++++++++++++++++++-----------
14
6 files changed, 178 insertions(+), 71 deletions(-)
12
15
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
16
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
18
--- a/target/arm/cpu-param.h
16
+++ b/target/arm/neon-dp.decode
19
+++ b/target/arm/cpu-param.h
17
@@ -XXX,XX +XXX,XX @@ VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
20
@@ -XXX,XX +XXX,XX @@
18
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
21
# define TARGET_PAGE_BITS_VARY
19
VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
22
# define TARGET_PAGE_BITS_MIN 10
20
23
21
+VSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 0 .... @3same
24
+# define TARGET_TB_PCREL 1
22
+VSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 0 .... @3same
23
+
25
+
24
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
26
/*
25
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
27
* Cache the attrs and shareability fields from the page table entry.
26
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
28
*
27
@@ -XXX,XX +XXX,XX @@ VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
28
30
index XXXXXXX..XXXXXXX 100644
29
VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
31
--- a/target/arm/translate.h
30
VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
32
+++ b/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@
34
35
36
/* internal defines */
31
+
37
+
32
+VMLA_3s 1111 001 0 0 . .. .... .... 1001 . . . 0 .... @3same
38
+/*
33
+VMLS_3s 1111 001 1 0 . .. .... .... 1001 . . . 0 .... @3same
39
+ * Save pc_save across a branch, so that we may restore the value from
40
+ * before the branch at the point the label is emitted.
41
+ */
42
+typedef struct DisasLabel {
43
+ TCGLabel *label;
44
+ target_ulong pc_save;
45
+} DisasLabel;
34
+
46
+
35
+VMUL_3s 1111 001 0 0 . .. .... .... 1001 . . . 1 .... @3same
47
typedef struct DisasContext {
36
+VMUL_p_3s 1111 001 1 0 . .. .... .... 1001 . . . 1 .... @3same
48
DisasContextBase base;
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
49
const ARMISARegisters *isar;
38
index XXXXXXX..XXXXXXX 100644
50
39
--- a/target/arm/translate-neon.inc.c
51
/* The address of the current instruction being translated. */
40
+++ b/target/arm/translate-neon.inc.c
52
target_ulong pc_curr;
41
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
53
+ /*
42
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
54
+ * For TARGET_TB_PCREL, the full value of cpu_pc is not known
43
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
55
+ * (although the page offset is known). For convenience, the
44
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
56
+ * translation loop uses the full virtual address that triggered
45
+DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
57
+ * the translation, from base.pc_start through pc_curr.
46
58
+ * For efficiency, we do not update cpu_pc for every instruction.
47
#define DO_3SAME_CMP(INSN, COND) \
59
+ * Instead, pc_save has the value of pc_curr at the time of the
48
static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
60
+ * last update to cpu_pc, which allows us to compute the addend
49
@@ -XXX,XX +XXX,XX @@ DO_3SAME_GVEC4(VQADD_S, sqadd_op)
61
+ * needed to bring cpu_pc current: pc_curr - pc_save.
50
DO_3SAME_GVEC4(VQADD_U, uqadd_op)
62
+ * If cpu_pc now contains the destination of an indirect branch,
51
DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
63
+ * pc_save contains -1 to indicate that relative updates are no
52
DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
64
+ * longer possible.
53
+
65
+ */
54
+static void gen_VMUL_p_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
66
+ target_ulong pc_save;
55
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
67
target_ulong page_start;
68
uint32_t insn;
69
/* Nonzero if this instruction has been conditionally skipped. */
70
int condjmp;
71
/* The label that will be jumped to when the instruction is skipped. */
72
- TCGLabel *condlabel;
73
+ DisasLabel condlabel;
74
/* Thumb-2 conditional execution bits. */
75
int condexec_mask;
76
int condexec_cond;
77
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
78
* after decode (ie after any UNDEF checks)
79
*/
80
bool eci_handled;
81
- /* TCG op to rewind to if this turns out to be an invalid ECI state */
82
- TCGOp *insn_eci_rewind;
83
int sctlr_b;
84
MemOp be_data;
85
#if !defined(CONFIG_USER_ONLY)
86
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
87
*/
88
uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
89
90
+/*
91
+ * gen_disas_label:
92
+ * Create a label and cache a copy of pc_save.
93
+ */
94
+static inline DisasLabel gen_disas_label(DisasContext *s)
56
+{
95
+{
57
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz,
96
+ return (DisasLabel){
58
+ 0, gen_helper_gvec_pmul_b);
97
+ .label = gen_new_label(),
98
+ .pc_save = s->pc_save,
99
+ };
59
+}
100
+}
60
+
101
+
61
+static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
102
+/*
103
+ * set_disas_label:
104
+ * Emit a label and restore the cached copy of pc_save.
105
+ */
106
+static inline void set_disas_label(DisasContext *s, DisasLabel l)
62
+{
107
+{
63
+ if (a->size != 0) {
108
+ gen_set_label(l.label);
64
+ return false;
109
+ s->pc_save = l.pc_save;
65
+ }
66
+ return do_3same(s, a, gen_VMUL_p_3s);
67
+}
110
+}
68
+
111
+
69
+#define DO_3SAME_GVEC3_NO_SZ_3(INSN, OPARRAY) \
112
/*
70
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
113
* Helpers for implementing sets of trans_* functions.
71
+ uint32_t rn_ofs, uint32_t rm_ofs, \
114
* Defer the implementation of NAME to FUNC, with optional extra arguments.
72
+ uint32_t oprsz, uint32_t maxsz) \
115
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
73
+ { \
116
index XXXXXXX..XXXXXXX 100644
74
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, \
117
--- a/target/arm/cpu.c
75
+ oprsz, maxsz, &OPARRAY[vece]); \
118
+++ b/target/arm/cpu.c
76
+ } \
119
@@ -XXX,XX +XXX,XX @@ static vaddr arm_cpu_get_pc(CPUState *cs)
77
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
120
void arm_cpu_synchronize_from_tb(CPUState *cs,
121
const TranslationBlock *tb)
122
{
123
- ARMCPU *cpu = ARM_CPU(cs);
124
- CPUARMState *env = &cpu->env;
125
-
126
- /*
127
- * It's OK to look at env for the current mode here, because it's
128
- * never possible for an AArch64 TB to chain to an AArch32 TB.
129
- */
130
- if (is_a64(env)) {
131
- env->pc = tb_pc(tb);
132
- } else {
133
- env->regs[15] = tb_pc(tb);
134
+ /* The program counter is always up to date with TARGET_TB_PCREL. */
135
+ if (!TARGET_TB_PCREL) {
136
+ CPUARMState *env = cs->env_ptr;
137
+ /*
138
+ * It's OK to look at env for the current mode here, because it's
139
+ * never possible for an AArch64 TB to chain to an AArch32 TB.
140
+ */
141
+ if (is_a64(env)) {
142
+ env->pc = tb_pc(tb);
143
+ } else {
144
+ env->regs[15] = tb_pc(tb);
145
+ }
146
}
147
}
148
#endif /* CONFIG_TCG */
149
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/arm/translate-a64.c
152
+++ b/target/arm/translate-a64.c
153
@@ -XXX,XX +XXX,XX @@ static void reset_btype(DisasContext *s)
154
155
static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
156
{
157
- tcg_gen_movi_i64(dest, s->pc_curr + diff);
158
+ assert(s->pc_save != -1);
159
+ if (TARGET_TB_PCREL) {
160
+ tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
161
+ } else {
162
+ tcg_gen_movi_i64(dest, s->pc_curr + diff);
163
+ }
164
}
165
166
void gen_a64_update_pc(DisasContext *s, target_long diff)
167
{
168
gen_pc_plus_diff(s, cpu_pc, diff);
169
+ s->pc_save = s->pc_curr + diff;
170
}
171
172
/*
173
@@ -XXX,XX +XXX,XX @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
174
* then loading an address into the PC will clear out any tag.
175
*/
176
gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
177
+ s->pc_save = -1;
178
}
179
180
/*
181
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
182
183
static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
184
{
185
- uint64_t dest = s->pc_curr + diff;
186
-
187
- if (use_goto_tb(s, dest)) {
188
- tcg_gen_goto_tb(n);
189
- gen_a64_update_pc(s, diff);
190
+ if (use_goto_tb(s, s->pc_curr + diff)) {
191
+ /*
192
+ * For pcrel, the pc must always be up-to-date on entry to
193
+ * the linked TB, so that it can use simple additions for all
194
+ * further adjustments. For !pcrel, the linked TB is compiled
195
+ * to know its full virtual address, so we can delay the
196
+ * update to pc to the unlinked path. A long chain of links
197
+ * can thus avoid many updates to the PC.
198
+ */
199
+ if (TARGET_TB_PCREL) {
200
+ gen_a64_update_pc(s, diff);
201
+ tcg_gen_goto_tb(n);
202
+ } else {
203
+ tcg_gen_goto_tb(n);
204
+ gen_a64_update_pc(s, diff);
205
+ }
206
tcg_gen_exit_tb(s->base.tb, n);
207
s->base.is_jmp = DISAS_NORETURN;
208
} else {
209
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
210
{
211
unsigned int sf, op, rt;
212
int64_t diff;
213
- TCGLabel *label_match;
214
+ DisasLabel match;
215
TCGv_i64 tcg_cmp;
216
217
sf = extract32(insn, 31, 1);
218
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
219
diff = sextract32(insn, 5, 19) * 4;
220
221
tcg_cmp = read_cpu_reg(s, rt, sf);
222
- label_match = gen_new_label();
223
-
224
reset_btype(s);
225
- tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
226
- tcg_cmp, 0, label_match);
227
228
+ match = gen_disas_label(s);
229
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
230
+ tcg_cmp, 0, match.label);
231
gen_goto_tb(s, 0, 4);
232
- gen_set_label(label_match);
233
+ set_disas_label(s, match);
234
gen_goto_tb(s, 1, diff);
235
}
236
237
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
238
{
239
unsigned int bit_pos, op, rt;
240
int64_t diff;
241
- TCGLabel *label_match;
242
+ DisasLabel match;
243
TCGv_i64 tcg_cmp;
244
245
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
246
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
247
248
tcg_cmp = tcg_temp_new_i64();
249
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
250
- label_match = gen_new_label();
251
252
reset_btype(s);
78
+
253
+
79
+
254
+ match = gen_disas_label(s);
80
+DO_3SAME_GVEC3_NO_SZ_3(VMLA, mla_op)
255
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
81
+DO_3SAME_GVEC3_NO_SZ_3(VMLS, mls_op)
256
- tcg_cmp, 0, label_match);
82
+
257
+ tcg_cmp, 0, match.label);
83
+#define DO_3SAME_GVEC3_SHIFT(INSN, OPARRAY) \
258
tcg_temp_free_i64(tcg_cmp);
84
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
259
gen_goto_tb(s, 0, 4);
85
+ uint32_t rn_ofs, uint32_t rm_ofs, \
260
- gen_set_label(label_match);
86
+ uint32_t oprsz, uint32_t maxsz) \
261
+ set_disas_label(s, match);
87
+ { \
262
gen_goto_tb(s, 1, diff);
88
+ /* Note the operation is vshl vd,vm,vn */ \
263
}
89
+ tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, \
264
90
+ oprsz, maxsz, &OPARRAY[vece]); \
265
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
91
+ } \
266
reset_btype(s);
92
+ DO_3SAME(INSN, gen_##INSN##_3s)
267
if (cond < 0x0e) {
93
+
268
/* genuinely conditional branches */
94
+DO_3SAME_GVEC3_SHIFT(VSHL_S, sshl_op)
269
- TCGLabel *label_match = gen_new_label();
95
+DO_3SAME_GVEC3_SHIFT(VSHL_U, ushl_op)
270
- arm_gen_test_cc(cond, label_match);
271
+ DisasLabel match = gen_disas_label(s);
272
+ arm_gen_test_cc(cond, match.label);
273
gen_goto_tb(s, 0, 4);
274
- gen_set_label(label_match);
275
+ set_disas_label(s, match);
276
gen_goto_tb(s, 1, diff);
277
} else {
278
/* 0xe and 0xf are both "always" conditions */
279
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
280
281
dc->isar = &arm_cpu->isar;
282
dc->condjmp = 0;
283
-
284
+ dc->pc_save = dc->base.pc_first;
285
dc->aarch64 = true;
286
dc->thumb = false;
287
dc->sctlr_b = 0;
288
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
289
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
290
{
291
DisasContext *dc = container_of(dcbase, DisasContext, base);
292
+ target_ulong pc_arg = dc->base.pc_next;
293
294
- tcg_gen_insn_start(dc->base.pc_next, 0, 0);
295
+ if (TARGET_TB_PCREL) {
296
+ pc_arg &= ~TARGET_PAGE_MASK;
297
+ }
298
+ tcg_gen_insn_start(pc_arg, 0, 0);
299
dc->insn_start = tcg_last_op();
300
}
301
302
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/target/arm/translate-m-nocp.c
305
+++ b/target/arm/translate-m-nocp.c
306
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
307
tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
308
tcg_gen_or_i32(sfpa, sfpa, aspen);
309
arm_gen_condlabel(s);
310
- tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
311
+ tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label);
312
313
if (s->fp_excp_el != 0) {
314
gen_exception_insn_el(s, 0, EXCP_NOCP,
96
diff --git a/target/arm/translate.c b/target/arm/translate.c
315
diff --git a/target/arm/translate.c b/target/arm/translate.c
97
index XXXXXXX..XXXXXXX 100644
316
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate.c
317
--- a/target/arm/translate.c
99
+++ b/target/arm/translate.c
318
+++ b/target/arm/translate.c
100
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
319
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
101
}
320
void arm_gen_condlabel(DisasContext *s)
102
return 1;
321
{
103
322
if (!s->condjmp) {
104
- case NEON_3R_VMUL: /* VMUL */
323
- s->condlabel = gen_new_label();
105
- if (u) {
324
+ s->condlabel = gen_disas_label(s);
106
- /* Polynomial case allows only P8. */
325
s->condjmp = 1;
107
- if (size != 0) {
326
}
108
- return 1;
327
}
109
- }
328
@@ -XXX,XX +XXX,XX @@ static target_long jmp_diff(DisasContext *s, target_long diff)
110
- tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
329
111
- 0, gen_helper_gvec_pmul_b);
330
static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
112
- } else {
331
{
113
- tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
332
- tcg_gen_movi_i32(var, s->pc_curr + diff);
114
- vec_size, vec_size);
333
+ assert(s->pc_save != -1);
115
- }
334
+ if (TARGET_TB_PCREL) {
116
- return 0;
335
+ tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
336
+ } else {
337
+ tcg_gen_movi_i32(var, s->pc_curr + diff);
338
+ }
339
}
340
341
/* Set a variable to the value of a CPU register. */
342
@@ -XXX,XX +XXX,XX @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var)
343
*/
344
tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
345
s->base.is_jmp = DISAS_JUMP;
346
+ s->pc_save = -1;
347
} else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
348
/* For M-profile SP bits [1:0] are always zero */
349
tcg_gen_andi_i32(var, var, ~3);
350
@@ -XXX,XX +XXX,XX @@ void gen_set_condexec(DisasContext *s)
351
352
void gen_update_pc(DisasContext *s, target_long diff)
353
{
354
- tcg_gen_movi_i32(cpu_R[15], s->pc_curr + diff);
355
+ gen_pc_plus_diff(s, cpu_R[15], diff);
356
+ s->pc_save = s->pc_curr + diff;
357
}
358
359
/* Set PC and Thumb state from var. var is marked as dead. */
360
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
361
tcg_gen_andi_i32(cpu_R[15], var, ~1);
362
tcg_gen_andi_i32(var, var, 1);
363
store_cpu_field(var, thumb);
364
+ s->pc_save = -1;
365
}
366
367
/*
368
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
369
static inline void gen_bx_excret_final_code(DisasContext *s)
370
{
371
/* Generate the code to finish possible exception return and end the TB */
372
- TCGLabel *excret_label = gen_new_label();
373
+ DisasLabel excret_label = gen_disas_label(s);
374
uint32_t min_magic;
375
376
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
377
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret_final_code(DisasContext *s)
378
}
379
380
/* Is the new PC value in the magic range indicating exception return? */
381
- tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
382
+ tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
383
/* No: end the TB as we would for a DISAS_JMP */
384
if (s->ss_active) {
385
gen_singlestep_exception(s);
386
} else {
387
tcg_gen_exit_tb(NULL, 0);
388
}
389
- gen_set_label(excret_label);
390
+ set_disas_label(s, excret_label);
391
/* Yes: this is an exception return.
392
* At this point in runtime env->regs[15] and env->thumb will hold
393
* the exception-return magic number, which do_v7m_exception_exit()
394
@@ -XXX,XX +XXX,XX @@ static void gen_goto_ptr(void)
395
*/
396
static void gen_goto_tb(DisasContext *s, int n, target_long diff)
397
{
398
- target_ulong dest = s->pc_curr + diff;
117
-
399
-
118
- case NEON_3R_VML: /* VMLA, VMLS */
400
- if (translator_use_goto_tb(&s->base, dest)) {
119
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
401
- tcg_gen_goto_tb(n);
120
- u ? &mls_op[size] : &mla_op[size]);
402
- gen_update_pc(s, diff);
121
- return 0;
403
+ if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
404
+ /*
405
+ * For pcrel, the pc must always be up-to-date on entry to
406
+ * the linked TB, so that it can use simple additions for all
407
+ * further adjustments. For !pcrel, the linked TB is compiled
408
+ * to know its full virtual address, so we can delay the
409
+ * update to pc to the unlinked path. A long chain of links
410
+ * can thus avoid many updates to the PC.
411
+ */
412
+ if (TARGET_TB_PCREL) {
413
+ gen_update_pc(s, diff);
414
+ tcg_gen_goto_tb(n);
415
+ } else {
416
+ tcg_gen_goto_tb(n);
417
+ gen_update_pc(s, diff);
418
+ }
419
tcg_gen_exit_tb(s->base.tb, n);
420
} else {
421
gen_update_pc(s, diff);
422
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
423
static void arm_skip_unless(DisasContext *s, uint32_t cond)
424
{
425
arm_gen_condlabel(s);
426
- arm_gen_test_cc(cond ^ 1, s->condlabel);
427
+ arm_gen_test_cc(cond ^ 1, s->condlabel.label);
428
}
429
430
431
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
432
{
433
/* M-profile low-overhead while-loop start */
434
TCGv_i32 tmp;
435
- TCGLabel *nextlabel;
436
+ DisasLabel nextlabel;
437
438
if (!dc_isar_feature(aa32_lob, s)) {
439
return false;
440
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
441
}
442
}
443
444
- nextlabel = gen_new_label();
445
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel);
446
+ nextlabel = gen_disas_label(s);
447
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
448
tmp = load_reg(s, a->rn);
449
store_reg(s, 14, tmp);
450
if (a->size != 4) {
451
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
452
}
453
gen_jmp_tb(s, curr_insn_len(s), 1);
454
455
- gen_set_label(nextlabel);
456
+ set_disas_label(s, nextlabel);
457
gen_jmp(s, jmp_diff(s, a->imm));
458
return true;
459
}
460
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
461
* any faster.
462
*/
463
TCGv_i32 tmp;
464
- TCGLabel *loopend;
465
+ DisasLabel loopend;
466
bool fpu_active;
467
468
if (!dc_isar_feature(aa32_lob, s)) {
469
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
470
471
if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
472
/* Need to do a runtime check for LTPSIZE != 4 */
473
- TCGLabel *skipexc = gen_new_label();
474
+ DisasLabel skipexc = gen_disas_label(s);
475
tmp = load_cpu_field(v7m.ltpsize);
476
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc);
477
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
478
tcg_temp_free_i32(tmp);
479
gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
480
- gen_set_label(skipexc);
481
+ set_disas_label(s, skipexc);
482
}
483
484
if (a->f) {
485
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
486
* loop decrement value is 1. For LETP we need to calculate the decrement
487
* value from LTPSIZE.
488
*/
489
- loopend = gen_new_label();
490
+ loopend = gen_disas_label(s);
491
if (!a->tp) {
492
- tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend);
493
+ tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
494
tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
495
} else {
496
/*
497
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
498
tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
499
tcg_temp_free_i32(ltpsize);
500
501
- tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend);
502
+ tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
503
504
tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
505
tcg_temp_free_i32(decr);
506
@@ -XXX,XX +XXX,XX @@ static bool trans_LE(DisasContext *s, arg_LE *a)
507
/* Jump back to the loop start */
508
gen_jmp(s, jmp_diff(s, -a->imm));
509
510
- gen_set_label(loopend);
511
+ set_disas_label(s, loopend);
512
if (a->tp) {
513
/* Exits from tail-pred loops must reset LTPSIZE to 4 */
514
store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
515
@@ -XXX,XX +XXX,XX @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
516
517
arm_gen_condlabel(s);
518
tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
519
- tmp, 0, s->condlabel);
520
+ tmp, 0, s->condlabel.label);
521
tcg_temp_free_i32(tmp);
522
gen_jmp(s, jmp_diff(s, a->imm));
523
return true;
524
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
525
526
dc->isar = &cpu->isar;
527
dc->condjmp = 0;
122
-
528
-
123
- case NEON_3R_VSHL:
529
+ dc->pc_save = dc->base.pc_first;
124
- /* Note the operation is vshl vd,vm,vn */
530
dc->aarch64 = false;
125
- tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
531
dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
126
- u ? &ushl_op[size] : &sshl_op[size]);
532
dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
127
- return 0;
533
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
128
-
534
*/
129
case NEON_3R_VADD_VSUB:
535
dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
130
case NEON_3R_LOGIC:
536
dc->eci_handled = false;
131
case NEON_3R_VMAX:
537
- dc->insn_eci_rewind = NULL;
132
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
538
if (condexec & 0xf) {
133
case NEON_3R_VCGE:
539
dc->condexec_mask = (condexec & 0xf) << 1;
134
case NEON_3R_VQADD:
540
dc->condexec_cond = condexec >> 4;
135
case NEON_3R_VQSUB:
541
@@ -XXX,XX +XXX,XX @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
136
+ case NEON_3R_VMUL:
542
* fields here.
137
+ case NEON_3R_VML:
543
*/
138
+ case NEON_3R_VSHL:
544
uint32_t condexec_bits;
139
/* Already handled by decodetree */
545
+ target_ulong pc_arg = dc->base.pc_next;
140
return 1;
546
141
}
547
+ if (TARGET_TB_PCREL) {
548
+ pc_arg &= ~TARGET_PAGE_MASK;
549
+ }
550
if (dc->eci) {
551
condexec_bits = dc->eci << 4;
552
} else {
553
condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
554
}
555
- tcg_gen_insn_start(dc->base.pc_next, condexec_bits, 0);
556
+ tcg_gen_insn_start(pc_arg, condexec_bits, 0);
557
dc->insn_start = tcg_last_op();
558
}
559
560
@@ -XXX,XX +XXX,XX @@ static bool arm_check_ss_active(DisasContext *dc)
561
562
static void arm_post_translate_insn(DisasContext *dc)
563
{
564
- if (dc->condjmp && !dc->base.is_jmp) {
565
- gen_set_label(dc->condlabel);
566
+ if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
567
+ if (dc->pc_save != dc->condlabel.pc_save) {
568
+ gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
569
+ }
570
+ gen_set_label(dc->condlabel.label);
571
dc->condjmp = 0;
572
}
573
translator_loop_temp_check(&dc->base);
574
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
575
uint32_t pc = dc->base.pc_next;
576
uint32_t insn;
577
bool is_16bit;
578
+ /* TCG op to rewind to if this turns out to be an invalid ECI state */
579
+ TCGOp *insn_eci_rewind = NULL;
580
+ target_ulong insn_eci_pc_save = -1;
581
582
/* Misaligned thumb PC is architecturally impossible. */
583
assert((dc->base.pc_next & 1) == 0);
584
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
585
* insn" case. We will rewind to the marker (ie throwing away
586
* all the generated code) and instead emit "take exception".
587
*/
588
- dc->insn_eci_rewind = tcg_last_op();
589
+ insn_eci_rewind = tcg_last_op();
590
+ insn_eci_pc_save = dc->pc_save;
591
}
592
593
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
594
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
595
* Insn wasn't valid for ECI/ICI at all: undo what we
596
* just generated and instead emit an exception
597
*/
598
- tcg_remove_ops_after(dc->insn_eci_rewind);
599
+ tcg_remove_ops_after(insn_eci_rewind);
600
+ dc->pc_save = insn_eci_pc_save;
601
dc->condjmp = 0;
602
gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
603
}
604
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
605
606
if (dc->condjmp) {
607
/* "Condition failed" instruction codepath for the branch/trap insn */
608
- gen_set_label(dc->condlabel);
609
+ set_disas_label(dc, dc->condlabel);
610
gen_set_condexec(dc);
611
if (unlikely(dc->ss_active)) {
612
gen_update_pc(dc, curr_insn_len(dc));
613
@@ -XXX,XX +XXX,XX @@ void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
614
target_ulong *data)
615
{
616
if (is_a64(env)) {
617
- env->pc = data[0];
618
+ if (TARGET_TB_PCREL) {
619
+ env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
620
+ } else {
621
+ env->pc = data[0];
622
+ }
623
env->condexec_bits = 0;
624
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
625
} else {
626
- env->regs[15] = data[0];
627
+ if (TARGET_TB_PCREL) {
628
+ env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
629
+ } else {
630
+ env->regs[15] = data[0];
631
+ }
632
env->condexec_bits = data[1];
633
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
634
}
142
--
635
--
143
2.20.1
636
2.25.1
144
145
diff view generated by jsdifflib
1
Convert the VFM[AS]L (vector) insns to decodetree. This is the last
1
Currently the microdrive code uses device_legacy_reset() to reset
2
insn in the legacy decoder for the 3same_ext group, so we can
2
itself, and has its reset method call reset on the IDE bus as the
3
delete the legacy decoder function for the group entirely.
3
last thing it does. Switch to using device_cold_reset().
4
4
5
Note that in disas_thumb2_insn() the parts of this encoding space
5
The only concrete microdrive device is the TYPE_DSCM1XXXX; it is not
6
where the decodetree decoder returns false will correctly be directed
6
command-line pluggable, so it is used only by the old pxa2xx Arm
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
7
boards 'akita', 'borzoi', 'spitz', 'terrier' and 'tosa'.
8
into disas_coproc_insn() by mistake.
8
9
You might think that this would result in the IDE bus being
10
reset automatically, but it does not, because the IDEBus type
11
does not set the BusClass::reset method. Instead the controller
12
must explicitly call ide_bus_reset(). We therefore leave that
13
call in md_reset().
14
15
Note also that because the PCMCIA card device is a direct subclass of
16
TYPE_DEVICE and we don't model the PCMCIA controller-to-card
17
interface as a qbus, PCMCIA cards are not on any qbus and so they
18
don't get reset when the system is reset. The reset only happens via
19
the dscm1xxxx_attach() and dscm1xxxx_detach() functions during
20
machine creation.
21
22
Because our aim here is merely to try to get rid of calls to the
23
device_legacy_reset() function, we leave these other dubious
24
reset-related issues alone. (They all stem from this code being
25
absolutely ancient.)
9
26
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
28
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
12
Message-id: 20200430181003.21682-8-peter.maydell@linaro.org
29
Message-id: 20221013174042.1602926-1-peter.maydell@linaro.org
13
---
30
---
14
target/arm/neon-shared.decode | 6 +++
31
hw/ide/microdrive.c | 8 ++++----
15
target/arm/translate-neon.inc.c | 31 +++++++++++
32
1 file changed, 4 insertions(+), 4 deletions(-)
16
target/arm/translate.c | 92 +--------------------------------
17
3 files changed, 38 insertions(+), 91 deletions(-)
18
33
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
34
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
20
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/neon-shared.decode
36
--- a/hw/ide/microdrive.c
22
+++ b/target/arm/neon-shared.decode
37
+++ b/hw/ide/microdrive.c
23
@@ -XXX,XX +XXX,XX @@ VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
38
@@ -XXX,XX +XXX,XX @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value)
24
# VUDOT and VSDOT
39
case 0x00:    /* Configuration Option Register */
25
VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
40
s->opt = value & 0xcf;
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
41
if (value & OPT_SRESET) {
27
+
42
- device_legacy_reset(DEVICE(s));
28
+# VFM[AS]L
43
+ device_cold_reset(DEVICE(s));
29
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
44
}
30
+ vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
45
md_interrupt_update(s);
31
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
46
break;
32
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
47
@@ -XXX,XX +XXX,XX @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value)
33
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
48
case 0xe:    /* Device Control */
34
index XXXXXXX..XXXXXXX 100644
49
s->ctrl = value;
35
--- a/target/arm/translate-neon.inc.c
50
if (value & CTRL_SRST) {
36
+++ b/target/arm/translate-neon.inc.c
51
- device_legacy_reset(DEVICE(s));
37
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
52
+ device_cold_reset(DEVICE(s));
38
opr_sz, opr_sz, 0, fn_gvec);
53
}
39
return true;
54
md_interrupt_update(s);
40
}
55
break;
41
+
56
@@ -XXX,XX +XXX,XX @@ static int dscm1xxxx_attach(PCMCIACardState *card)
42
+static bool trans_VFML(DisasContext *s, arg_VFML *a)
57
md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8);
43
+{
58
md->io_base = 0x0;
44
+ int opr_sz;
59
45
+
60
- device_legacy_reset(DEVICE(md));
46
+ if (!dc_isar_feature(aa32_fhm, s)) {
61
+ device_cold_reset(DEVICE(md));
47
+ return false;
62
md_interrupt_update(md);
48
+ }
63
49
+
64
return 0;
50
+ /* UNDEF accesses to D16-D31 if they don't exist. */
65
@@ -XXX,XX +XXX,XX @@ static int dscm1xxxx_detach(PCMCIACardState *card)
51
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
66
{
52
+ (a->vd & 0x10)) {
67
MicroDriveState *md = MICRODRIVE(card);
53
+ return false;
68
54
+ }
69
- device_legacy_reset(DEVICE(md));
55
+
70
+ device_cold_reset(DEVICE(md));
56
+ if (a->vd & a->q) {
57
+ return false;
58
+ }
59
+
60
+ if (!vfp_access_check(s)) {
61
+ return true;
62
+ }
63
+
64
+ opr_sz = (1 + a->q) * 8;
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(a->q, a->vn),
67
+ vfp_reg_offset(a->q, a->vm),
68
+ cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
69
+ gen_helper_gvec_fmlal_a32);
70
+ return true;
71
+}
72
diff --git a/target/arm/translate.c b/target/arm/translate.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/translate.c
75
+++ b/target/arm/translate.c
76
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
77
return 0;
71
return 0;
78
}
72
}
79
73
80
-/* Advanced SIMD three registers of the same length extension.
81
- * 31 25 23 22 20 16 12 11 10 9 8 3 0
82
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
83
- * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
84
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
85
- */
86
-static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
87
-{
88
- gen_helper_gvec_3 *fn_gvec = NULL;
89
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
90
- int rd, rn, rm, opr_sz;
91
- int data = 0;
92
- int off_rn, off_rm;
93
- bool is_long = false, q = extract32(insn, 6, 1);
94
- bool ptr_is_env = false;
95
-
96
- if ((insn & 0xff300f10) == 0xfc200810) {
97
- /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
98
- int is_s = extract32(insn, 23, 1);
99
- if (!dc_isar_feature(aa32_fhm, s)) {
100
- return 1;
101
- }
102
- is_long = true;
103
- data = is_s; /* is_2 == 0 */
104
- fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
105
- ptr_is_env = true;
106
- } else {
107
- return 1;
108
- }
109
-
110
- VFP_DREG_D(rd, insn);
111
- if (rd & q) {
112
- return 1;
113
- }
114
- if (q || !is_long) {
115
- VFP_DREG_N(rn, insn);
116
- VFP_DREG_M(rm, insn);
117
- if ((rn | rm) & q & !is_long) {
118
- return 1;
119
- }
120
- off_rn = vfp_reg_offset(1, rn);
121
- off_rm = vfp_reg_offset(1, rm);
122
- } else {
123
- rn = VFP_SREG_N(insn);
124
- rm = VFP_SREG_M(insn);
125
- off_rn = vfp_reg_offset(0, rn);
126
- off_rm = vfp_reg_offset(0, rm);
127
- }
128
-
129
- if (s->fp_excp_el) {
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
131
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
132
- return 0;
133
- }
134
- if (!s->vfp_enabled) {
135
- return 1;
136
- }
137
-
138
- opr_sz = (1 + q) * 8;
139
- if (fn_gvec_ptr) {
140
- TCGv_ptr ptr;
141
- if (ptr_is_env) {
142
- ptr = cpu_env;
143
- } else {
144
- ptr = get_fpstatus_ptr(1);
145
- }
146
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
147
- opr_sz, opr_sz, data, fn_gvec_ptr);
148
- if (!ptr_is_env) {
149
- tcg_temp_free_ptr(ptr);
150
- }
151
- } else {
152
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
153
- opr_sz, opr_sz, data, fn_gvec);
154
- }
155
- return 0;
156
-}
157
-
158
/* Advanced SIMD two registers and a scalar extension.
159
* 31 24 23 22 20 16 12 11 10 9 8 3 0
160
* +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
161
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
162
}
163
}
164
}
165
- } else if ((insn & 0x0e000a00) == 0x0c000800
166
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
167
- if (disas_neon_insn_3same_ext(s, insn)) {
168
- goto illegal_op;
169
- }
170
- return;
171
} else if ((insn & 0x0f000a00) == 0x0e000800
172
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
173
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
174
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
175
}
176
break;
177
}
178
- if ((insn & 0xfe000a00) == 0xfc000800
179
+ if ((insn & 0xff000a00) == 0xfe000800
180
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
181
/* The Thumb2 and ARM encodings are identical. */
182
- if (disas_neon_insn_3same_ext(s, insn)) {
183
- goto illegal_op;
184
- }
185
- } else if ((insn & 0xff000a00) == 0xfe000800
186
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
187
- /* The Thumb2 and ARM encodings are identical. */
188
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
189
goto illegal_op;
190
}
191
--
74
--
192
2.20.1
75
2.25.1
193
76
194
77
diff view generated by jsdifflib
Deleted patch
1
Convert the V[US]DOT (scalar) insns in the 2reg-scalar-ext group
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-10-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 3 +++
9
target/arm/translate-neon.inc.c | 35 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 13 +-----------
11
3 files changed, 39 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
18
vn=%vn_dp vd=%vd_dp size=0
19
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
20
vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
21
+
22
+VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
34
+{
35
+ gen_helper_gvec_3 *fn_gvec;
36
+ int opr_sz;
37
+ TCGv_ptr fpst;
38
+
39
+ if (!dc_isar_feature(aa32_dp, s)) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vd | a->vn) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
58
+ opr_sz = (1 + a->q) * 8;
59
+ fpst = get_fpstatus_ptr(1);
60
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->rm),
63
+ opr_sz, opr_sz, a->index, fn_gvec);
64
+ tcg_temp_free_ptr(fpst);
65
+ return true;
66
+}
67
diff --git a/target/arm/translate.c b/target/arm/translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate.c
70
+++ b/target/arm/translate.c
71
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
72
bool is_long = false, q = extract32(insn, 6, 1);
73
bool ptr_is_env = false;
74
75
- if ((insn & 0xffb00f00) == 0xfe200d00) {
76
- /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
77
- int u = extract32(insn, 4, 1);
78
-
79
- if (!dc_isar_feature(aa32_dp, s)) {
80
- return 1;
81
- }
82
- fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
83
- /* rm is just Vm, and index is M. */
84
- data = extract32(insn, 5, 1); /* index */
85
- rm = extract32(insn, 0, 4);
86
- } else if ((insn & 0xffa00f10) == 0xfe000810) {
87
+ if ((insn & 0xffa00f10) == 0xfe000810) {
88
/* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
89
int is_s = extract32(insn, 20, 1);
90
int vm20 = extract32(insn, 0, 3);
91
--
92
2.20.1
93
94
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon "load single structure to all lanes" insns to
2
decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-13-peter.maydell@linaro.org
7
---
8
target/arm/neon-ls.decode | 5 +++
9
target/arm/translate-neon.inc.c | 73 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 55 +------------------------
11
3 files changed, 80 insertions(+), 53 deletions(-)
12
13
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-ls.decode
16
+++ b/target/arm/neon-ls.decode
17
@@ -XXX,XX +XXX,XX @@
18
19
VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
20
vd=%vd_dp
21
+
22
+# Neon load single element to all lanes
23
+
24
+VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
25
+ vd=%vd_dp
26
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-neon.inc.c
29
+++ b/target/arm/translate-neon.inc.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
31
gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
32
return true;
33
}
34
+
35
+static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
36
+{
37
+ /* Neon load single structure to all lanes */
38
+ int reg, stride, vec_size;
39
+ int vd = a->vd;
40
+ int size = a->size;
41
+ int nregs = a->n + 1;
42
+ TCGv_i32 addr, tmp;
43
+
44
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
45
+ return false;
46
+ }
47
+
48
+ /* UNDEF accesses to D16-D31 if they don't exist */
49
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
50
+ return false;
51
+ }
52
+
53
+ if (size == 3) {
54
+ if (nregs != 4 || a->a == 0) {
55
+ return false;
56
+ }
57
+ /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
58
+ size = 2;
59
+ }
60
+ if (nregs == 1 && a->a == 1 && size == 0) {
61
+ return false;
62
+ }
63
+ if (nregs == 3 && a->a == 1) {
64
+ return false;
65
+ }
66
+
67
+ if (!vfp_access_check(s)) {
68
+ return true;
69
+ }
70
+
71
+ /*
72
+ * VLD1 to all lanes: T bit indicates how many Dregs to write.
73
+ * VLD2/3/4 to all lanes: T bit indicates register stride.
74
+ */
75
+ stride = a->t ? 2 : 1;
76
+ vec_size = nregs == 1 ? stride * 8 : 8;
77
+
78
+ tmp = tcg_temp_new_i32();
79
+ addr = tcg_temp_new_i32();
80
+ load_reg_var(s, addr, a->rn);
81
+ for (reg = 0; reg < nregs; reg++) {
82
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
83
+ s->be_data | size);
84
+ if ((vd & 1) && vec_size == 16) {
85
+ /*
86
+ * We cannot write 16 bytes at once because the
87
+ * destination is unaligned.
88
+ */
89
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
90
+ 8, 8, tmp);
91
+ tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
92
+ neon_reg_offset(vd, 0), 8, 8);
93
+ } else {
94
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
95
+ vec_size, vec_size, tmp);
96
+ }
97
+ tcg_gen_addi_i32(addr, addr, 1 << size);
98
+ vd += stride;
99
+ }
100
+ tcg_temp_free_i32(tmp);
101
+ tcg_temp_free_i32(addr);
102
+
103
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
104
+
105
+ return true;
106
+}
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
112
int size;
113
int reg;
114
int load;
115
- int vec_size;
116
TCGv_i32 addr;
117
TCGv_i32 tmp;
118
119
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
120
} else {
121
size = (insn >> 10) & 3;
122
if (size == 3) {
123
- /* Load single element to all lanes. */
124
- int a = (insn >> 4) & 1;
125
- if (!load) {
126
- return 1;
127
- }
128
- size = (insn >> 6) & 3;
129
- nregs = ((insn >> 8) & 3) + 1;
130
-
131
- if (size == 3) {
132
- if (nregs != 4 || a == 0) {
133
- return 1;
134
- }
135
- /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
136
- size = 2;
137
- }
138
- if (nregs == 1 && a == 1 && size == 0) {
139
- return 1;
140
- }
141
- if (nregs == 3 && a == 1) {
142
- return 1;
143
- }
144
- addr = tcg_temp_new_i32();
145
- load_reg_var(s, addr, rn);
146
-
147
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
148
- * VLD2/3/4 to all lanes: bit 5 indicates register stride.
149
- */
150
- stride = (insn & (1 << 5)) ? 2 : 1;
151
- vec_size = nregs == 1 ? stride * 8 : 8;
152
-
153
- tmp = tcg_temp_new_i32();
154
- for (reg = 0; reg < nregs; reg++) {
155
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
156
- s->be_data | size);
157
- if ((rd & 1) && vec_size == 16) {
158
- /* We cannot write 16 bytes at once because the
159
- * destination is unaligned.
160
- */
161
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
162
- 8, 8, tmp);
163
- tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
164
- neon_reg_offset(rd, 0), 8, 8);
165
- } else {
166
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
167
- vec_size, vec_size, tmp);
168
- }
169
- tcg_gen_addi_i32(addr, addr, 1 << size);
170
- rd += stride;
171
- }
172
- tcg_temp_free_i32(tmp);
173
- tcg_temp_free_i32(addr);
174
- stride = (1 << size) * nregs;
175
+ /* Load single element to all lanes -- handled by decodetree */
176
+ return 1;
177
} else {
178
/* Single element. */
179
int idx = (insn >> 4) & 0xf;
180
--
181
2.20.1
182
183
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon logic ops in the 3-reg-same grouping to decodetree.
2
Note that for the logic ops the 'size' field forms part of their
3
decode and the actual operations are always bitwise.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200430181003.21682-16-peter.maydell@linaro.org
8
---
9
target/arm/neon-dp.decode | 12 +++++++++++
10
target/arm/translate-neon.inc.c | 19 +++++++++++++++++
11
target/arm/translate.c | 38 +--------------------------------
12
3 files changed, 32 insertions(+), 37 deletions(-)
13
14
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/neon-dp.decode
17
+++ b/target/arm/neon-dp.decode
18
@@ -XXX,XX +XXX,XX @@
19
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
20
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
21
22
+@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
23
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
24
+
25
+VAND_3s 1111 001 0 0 . 00 .... .... 0001 ... 1 .... @3same_logic
26
+VBIC_3s 1111 001 0 0 . 01 .... .... 0001 ... 1 .... @3same_logic
27
+VORR_3s 1111 001 0 0 . 10 .... .... 0001 ... 1 .... @3same_logic
28
+VORN_3s 1111 001 0 0 . 11 .... .... 0001 ... 1 .... @3same_logic
29
+VEOR_3s 1111 001 1 0 . 00 .... .... 0001 ... 1 .... @3same_logic
30
+VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
31
+VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
32
+VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
33
+
34
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
35
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
41
42
DO_3SAME(VADD, tcg_gen_gvec_add)
43
DO_3SAME(VSUB, tcg_gen_gvec_sub)
44
+DO_3SAME(VAND, tcg_gen_gvec_and)
45
+DO_3SAME(VBIC, tcg_gen_gvec_andc)
46
+DO_3SAME(VORR, tcg_gen_gvec_or)
47
+DO_3SAME(VORN, tcg_gen_gvec_orc)
48
+DO_3SAME(VEOR, tcg_gen_gvec_xor)
49
+
50
+/* These insns are all gvec_bitsel but with the inputs in various orders. */
51
+#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
52
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
53
+ uint32_t rn_ofs, uint32_t rm_ofs, \
54
+ uint32_t oprsz, uint32_t maxsz) \
55
+ { \
56
+ tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
57
+ } \
58
+ DO_3SAME(INSN, gen_##INSN##_3s)
59
+
60
+DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
61
+DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
62
+DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
63
diff --git a/target/arm/translate.c b/target/arm/translate.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate.c
66
+++ b/target/arm/translate.c
67
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
68
}
69
return 1;
70
71
- case NEON_3R_LOGIC: /* Logic ops. */
72
- switch ((u << 2) | size) {
73
- case 0: /* VAND */
74
- tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
75
- vec_size, vec_size);
76
- break;
77
- case 1: /* VBIC */
78
- tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
79
- vec_size, vec_size);
80
- break;
81
- case 2: /* VORR */
82
- tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
83
- vec_size, vec_size);
84
- break;
85
- case 3: /* VORN */
86
- tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
87
- vec_size, vec_size);
88
- break;
89
- case 4: /* VEOR */
90
- tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
91
- vec_size, vec_size);
92
- break;
93
- case 5: /* VBSL */
94
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
95
- vec_size, vec_size);
96
- break;
97
- case 6: /* VBIT */
98
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
99
- vec_size, vec_size);
100
- break;
101
- case 7: /* VBIF */
102
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
103
- vec_size, vec_size);
104
- break;
105
- }
106
- return 0;
107
-
108
case NEON_3R_VQADD:
109
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
110
rn_ofs, rm_ofs, vec_size, vec_size,
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
112
return 0;
113
114
case NEON_3R_VADD_VSUB:
115
+ case NEON_3R_LOGIC:
116
/* Already handled by decodetree */
117
return 1;
118
}
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon 3-reg-same VMAX and VMIN insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-17-peter.maydell@linaro.org
6
---
7
target/arm/neon-dp.decode | 5 +++++
8
target/arm/translate-neon.inc.c | 14 ++++++++++++++
9
target/arm/translate.c | 21 ++-------------------
10
3 files changed, 21 insertions(+), 19 deletions(-)
11
12
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-dp.decode
15
+++ b/target/arm/neon-dp.decode
16
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
17
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
18
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
19
20
+VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
21
+VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
22
+VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
23
+VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
24
+
25
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
26
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
30
+++ b/target/arm/translate-neon.inc.c
31
@@ -XXX,XX +XXX,XX @@ DO_3SAME(VEOR, tcg_gen_gvec_xor)
32
DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
33
DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
34
DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
35
+
36
+#define DO_3SAME_NO_SZ_3(INSN, FUNC) \
37
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
38
+ { \
39
+ if (a->size == 3) { \
40
+ return false; \
41
+ } \
42
+ return do_3same(s, a, FUNC); \
43
+ }
44
+
45
+DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
46
+DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
47
+DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
48
+DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
49
diff --git a/target/arm/translate.c b/target/arm/translate.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/translate.c
52
+++ b/target/arm/translate.c
53
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
54
rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
55
return 0;
56
57
- case NEON_3R_VMAX:
58
- if (u) {
59
- tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
60
- vec_size, vec_size);
61
- } else {
62
- tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
63
- vec_size, vec_size);
64
- }
65
- return 0;
66
- case NEON_3R_VMIN:
67
- if (u) {
68
- tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
69
- vec_size, vec_size);
70
- } else {
71
- tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
72
- vec_size, vec_size);
73
- }
74
- return 0;
75
-
76
case NEON_3R_VSHL:
77
/* Note the operation is vshl vd,vm,vn */
78
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
79
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
80
81
case NEON_3R_VADD_VSUB:
82
case NEON_3R_LOGIC:
83
+ case NEON_3R_VMAX:
84
+ case NEON_3R_VMIN:
85
/* Already handled by decodetree */
86
return 1;
87
}
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon comparison ops in the 3-reg-same grouping
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-18-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 8 ++++++++
9
target/arm/translate-neon.inc.c | 22 ++++++++++++++++++++++
10
target/arm/translate.c | 23 +++--------------------
11
3 files changed, 33 insertions(+), 20 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
18
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
19
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
20
21
+VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
22
+VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
23
+VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
24
+VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
25
+
26
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
27
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
28
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
29
@@ -XXX,XX +XXX,XX @@ VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
30
31
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
32
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
33
+
34
+VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
35
+VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
41
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
42
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
43
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
44
+
45
+#define DO_3SAME_CMP(INSN, COND) \
46
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
47
+ uint32_t rn_ofs, uint32_t rm_ofs, \
48
+ uint32_t oprsz, uint32_t maxsz) \
49
+ { \
50
+ tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
51
+ } \
52
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
53
+
54
+DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
55
+DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
56
+DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
57
+DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
58
+DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
59
+
60
+static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
61
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
62
+{
63
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
64
+}
65
+DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
66
diff --git a/target/arm/translate.c b/target/arm/translate.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate.c
69
+++ b/target/arm/translate.c
70
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
71
u ? &mls_op[size] : &mla_op[size]);
72
return 0;
73
74
- case NEON_3R_VTST_VCEQ:
75
- if (u) { /* VCEQ */
76
- tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
77
- vec_size, vec_size);
78
- } else { /* VTST */
79
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
80
- vec_size, vec_size, &cmtst_op[size]);
81
- }
82
- return 0;
83
-
84
- case NEON_3R_VCGT:
85
- tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
86
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
87
- return 0;
88
-
89
- case NEON_3R_VCGE:
90
- tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
91
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
92
- return 0;
93
-
94
case NEON_3R_VSHL:
95
/* Note the operation is vshl vd,vm,vn */
96
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
97
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
98
case NEON_3R_LOGIC:
99
case NEON_3R_VMAX:
100
case NEON_3R_VMIN:
101
+ case NEON_3R_VTST_VCEQ:
102
+ case NEON_3R_VCGT:
103
+ case NEON_3R_VCGE:
104
/* Already handled by decodetree */
105
return 1;
106
}
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon VQADD/VQSUB insns in the 3-reg-same grouping
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-19-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 6 ++++++
9
target/arm/translate-neon.inc.c | 15 +++++++++++++++
10
target/arm/translate.c | 14 ++------------
11
3 files changed, 23 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@
18
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
19
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
21
+VQADD_S_3s 1111 001 0 0 . .. .... .... 0000 . . . 1 .... @3same
22
+VQADD_U_3s 1111 001 1 0 . .. .... .... 0000 . . . 1 .... @3same
23
+
24
@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
25
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
26
27
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
28
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
29
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
30
31
+VQSUB_S_3s 1111 001 0 0 . .. .... .... 0010 . . . 1 .... @3same
32
+VQSUB_U_3s 1111 001 1 0 . .. .... .... 0010 . . . 1 .... @3same
33
+
34
VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
35
VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
36
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-neon.inc.c
40
+++ b/target/arm/translate-neon.inc.c
41
@@ -XXX,XX +XXX,XX @@ static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
42
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
43
}
44
DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
45
+
46
+#define DO_3SAME_GVEC4(INSN, OPARRAY) \
47
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
48
+ uint32_t rn_ofs, uint32_t rm_ofs, \
49
+ uint32_t oprsz, uint32_t maxsz) \
50
+ { \
51
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc), \
52
+ rn_ofs, rm_ofs, oprsz, maxsz, &OPARRAY[vece]); \
53
+ } \
54
+ DO_3SAME(INSN, gen_##INSN##_3s)
55
+
56
+DO_3SAME_GVEC4(VQADD_S, sqadd_op)
57
+DO_3SAME_GVEC4(VQADD_U, uqadd_op)
58
+DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
59
+DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
60
diff --git a/target/arm/translate.c b/target/arm/translate.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate.c
63
+++ b/target/arm/translate.c
64
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
65
}
66
return 1;
67
68
- case NEON_3R_VQADD:
69
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
70
- rn_ofs, rm_ofs, vec_size, vec_size,
71
- (u ? uqadd_op : sqadd_op) + size);
72
- return 0;
73
-
74
- case NEON_3R_VQSUB:
75
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
76
- rn_ofs, rm_ofs, vec_size, vec_size,
77
- (u ? uqsub_op : sqsub_op) + size);
78
- return 0;
79
-
80
case NEON_3R_VMUL: /* VMUL */
81
if (u) {
82
/* Polynomial case allows only P8. */
83
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
84
case NEON_3R_VTST_VCEQ:
85
case NEON_3R_VCGT:
86
case NEON_3R_VCGE:
87
+ case NEON_3R_VQADD:
88
+ case NEON_3R_VQSUB:
89
/* Already handled by decodetree */
90
return 1;
91
}
92
--
93
2.20.1
94
95
diff view generated by jsdifflib