1
Arm stuff, mostly patches from RTH.
1
Most of this is the Neon decodetree patches, followed by Edgar's versal cleanups.
2
2
3
thanks
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit 01a9a51ffaf4699827ea6425cb2b834a356e159d:
7
6
8
Merge remote-tracking branch 'remotes/kraxel/tags/ui-20190205-pull-request' into staging (2019-02-05 14:01:29 +0000)
7
The following changes since commit 2ef486e76d64436be90f7359a3071fb2a56ce835:
8
9
Merge remote-tracking branch 'remotes/marcel/tags/rdma-pull-request' into staging (2020-05-03 14:12:56 +0100)
9
10
10
are available in the Git repository at:
11
are available in the Git repository at:
11
12
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190205
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200504
13
14
14
for you to fetch changes up to a15945d98d3a3390c3da344d1b47218e91e49d8b:
15
for you to fetch changes up to 9aefc6cf9b73f66062d2f914a0136756e7a28211:
15
16
16
target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI (2019-02-05 16:52:42 +0000)
17
target/arm: Move gen_ function typedefs to translate.h (2020-05-04 12:59:26 +0100)
17
18
18
----------------------------------------------------------------
19
----------------------------------------------------------------
19
target-arm queue:
20
target-arm queue:
20
* Implement Armv8.5-BTI extension for system emulation mode
21
* Start of conversion of Neon insns to decodetree
21
* Implement the PR_PAC_RESET_KEYS prctl() for linux-user mode's Armv8.3-PAuth support
22
* versal board: support SD and RTC
22
* Support TBI (top-byte-ignore) properly for linux-user mode
23
* Implement ARMv8.2-TTS2UXN
23
* gdbstub: allow killing QEMU via vKill command
24
* Make VQDMULL undefined when U=1
24
* hw/arm/boot: Support DTB autoload for firmware-only boots
25
* Some minor code cleanups
25
* target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI
26
26
27
----------------------------------------------------------------
27
----------------------------------------------------------------
28
Max Filippov (1):
28
Edgar E. Iglesias (11):
29
gdbstub: allow killing QEMU via vKill command
29
hw/arm: versal: Remove inclusion of arm_gicv3_common.h
30
hw/arm: versal: Move misplaced comment
31
hw/arm: versal-virt: Fix typo xlnx-ve -> xlnx-versal
32
hw/arm: versal: Embed the UARTs into the SoC type
33
hw/arm: versal: Embed the GEMs into the SoC type
34
hw/arm: versal: Embed the ADMAs into the SoC type
35
hw/arm: versal: Embed the APUs into the SoC type
36
hw/arm: versal: Add support for SD
37
hw/arm: versal: Add support for the RTC
38
hw/arm: versal-virt: Add support for SD
39
hw/arm: versal-virt: Add support for the RTC
30
40
31
Peter Maydell (7):
41
Fredrik Strupe (1):
32
target/arm: Compute TB_FLAGS for TBI for user-only
42
target/arm: Make VQDMULL undefined when U=1
33
hw/arm/boot: Fix block comment style in arm_load_kernel()
34
hw/arm/boot: Factor out "direct kernel boot" code into its own function
35
hw/arm/boot: Factor out "set up firmware boot" code
36
hw/arm/boot: Clarify why arm_setup_firmware_boot() doesn't set env->boot_info
37
hw/arm/boot: Support DTB autoload for firmware-only boots
38
target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI
39
43
40
Richard Henderson (14):
44
Peter Maydell (25):
41
target/arm: Introduce isar_feature_aa64_bti
45
target/arm: Don't use a TLB for ARMMMUIdx_Stage2
42
target/arm: Add PSTATE.BTYPE
46
target/arm: Use enum constant in get_phys_addr_lpae() call
43
target/arm: Add BT and BTYPE to tb->flags
47
target/arm: Add new 's1_is_el0' argument to get_phys_addr_lpae()
44
exec: Add target-specific tlb bits to MemTxAttrs
48
target/arm: Implement ARMv8.2-TTS2UXN
45
target/arm: Cache the GP bit for a page in MemTxAttrs
49
target/arm: Use correct variable for setting 'max' cpu's ID_AA64DFR0
46
target/arm: Default handling of BTYPE during translation
50
target/arm/translate-vfp.inc.c: Remove duplicate simd_r32 check
47
target/arm: Reset btype for direct branches
51
target/arm: Don't allow Thumb Neon insns without FEATURE_NEON
48
target/arm: Set btype for indirect branches
52
target/arm: Add stubs for AArch32 Neon decodetree
49
target/arm: Enable BTI for -cpu max
53
target/arm: Convert VCMLA (vector) to decodetree
50
linux-user: Implement PR_PAC_RESET_KEYS
54
target/arm: Convert VCADD (vector) to decodetree
51
tests/tcg/aarch64: Add pauth smoke test
55
target/arm: Convert V[US]DOT (vector) to decodetree
52
target/arm: Add TBFLAG_A64_TBID, split out gen_top_byte_ignore
56
target/arm: Convert VFM[AS]L (vector) to decodetree
53
target/arm: Clean TBI for data operations in the translator
57
target/arm: Convert VCMLA (scalar) to decodetree
54
target/arm: Enable TBI for user-only
58
target/arm: Convert V[US]DOT (scalar) to decodetree
59
target/arm: Convert VFM[AS]L (scalar) to decodetree
60
target/arm: Convert Neon load/store multiple structures to decodetree
61
target/arm: Convert Neon 'load single structure to all lanes' to decodetree
62
target/arm: Convert Neon 'load/store single structure' to decodetree
63
target/arm: Convert Neon 3-reg-same VADD/VSUB to decodetree
64
target/arm: Convert Neon 3-reg-same logic ops to decodetree
65
target/arm: Convert Neon 3-reg-same VMAX/VMIN to decodetree
66
target/arm: Convert Neon 3-reg-same comparisons to decodetree
67
target/arm: Convert Neon 3-reg-same VQADD/VQSUB to decodetree
68
target/arm: Convert Neon 3-reg-same VMUL, VMLA, VMLS, VSHL to decodetree
69
target/arm: Move gen_ function typedefs to translate.h
55
70
56
tests/tcg/aarch64/Makefile.target | 6 +-
71
Philippe Mathieu-Daudé (2):
57
include/exec/memattrs.h | 10 +
72
hw/arm/mps2-tz: Use TYPE_IOTKIT instead of hardcoded string
58
linux-user/aarch64/target_syscall.h | 7 +
73
target/arm: Use uint64_t for midr field in CPU state struct
59
target/arm/cpu.h | 27 +-
60
target/arm/internals.h | 27 +-
61
target/arm/translate.h | 12 +-
62
gdbstub.c | 4 +
63
hw/arm/boot.c | 166 +++++++------
64
linux-user/syscall.c | 36 +++
65
target/arm/cpu.c | 6 +
66
target/arm/cpu64.c | 4 +
67
target/arm/helper.c | 80 +++---
68
target/arm/translate-a64.c | 476 +++++++++++++++++++++++++-----------
69
tests/tcg/aarch64/pauth-1.c | 23 ++
70
14 files changed, 623 insertions(+), 261 deletions(-)
71
create mode 100644 tests/tcg/aarch64/pauth-1.c
72
74
75
include/hw/arm/xlnx-versal.h | 31 +-
76
target/arm/cpu-param.h | 2 +-
77
target/arm/cpu.h | 38 ++-
78
target/arm/translate-a64.h | 9 -
79
target/arm/translate.h | 26 ++
80
target/arm/neon-dp.decode | 86 +++++
81
target/arm/neon-ls.decode | 52 +++
82
target/arm/neon-shared.decode | 66 ++++
83
hw/arm/mps2-tz.c | 2 +-
84
hw/arm/xlnx-versal-virt.c | 74 ++++-
85
hw/arm/xlnx-versal.c | 115 +++++--
86
target/arm/cpu.c | 3 +-
87
target/arm/cpu64.c | 8 +-
88
target/arm/helper.c | 183 ++++------
89
target/arm/translate-a64.c | 17 -
90
target/arm/translate-neon.inc.c | 714 +++++++++++++++++++++++++++++++++++++++
91
target/arm/translate-vfp.inc.c | 6 -
92
target/arm/translate.c | 716 +++-------------------------------------
93
target/arm/Makefile.objs | 18 +
94
19 files changed, 1302 insertions(+), 864 deletions(-)
95
create mode 100644 target/arm/neon-dp.decode
96
create mode 100644 target/arm/neon-ls.decode
97
create mode 100644 target/arm/neon-shared.decode
98
create mode 100644 target/arm/translate-neon.inc.c
99
diff view generated by jsdifflib
New patch
1
From: Fredrik Strupe <fredrik@strupe.net>
1
2
3
According to Arm ARM, VQDMULL is only valid when U=0, while having
4
U=1 is unallocated.
5
6
Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
7
Fixes: 695272dcb976 ("target-arm: Handle UNDEF cases for Neon 3-regs-different-widths")
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
19
{0, 0, 0, 0}, /* VMLSL */
20
{0, 0, 0, 9}, /* VQDMLSL */
21
{0, 0, 0, 0}, /* Integer VMULL */
22
- {0, 0, 0, 1}, /* VQDMULL */
23
+ {0, 0, 0, 9}, /* VQDMULL */
24
{0, 0, 0, 0xa}, /* Polynomial VMULL */
25
{0, 0, 0, 7}, /* Reserved: always UNDEF */
26
};
27
--
28
2.20.1
29
30
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
2
3
By using the TYPE_* definitions for devices, we can:
4
- quickly find where devices are used with 'git-grep'
5
- easily rename a device (one-line change).
6
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20200428154650.21991-1-f4bug@amsat.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/arm/mps2-tz.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/mps2-tz.c
18
+++ b/hw/arm/mps2-tz.c
19
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
20
exit(EXIT_FAILURE);
21
}
22
23
- sysbus_init_child_obj(OBJECT(machine), "iotkit", &mms->iotkit,
24
+ sysbus_init_child_obj(OBJECT(machine), TYPE_IOTKIT, &mms->iotkit,
25
sizeof(mms->iotkit), mmc->armsse_type);
26
iotkitdev = DEVICE(&mms->iotkit);
27
object_property_set_link(OBJECT(&mms->iotkit), OBJECT(system_memory),
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
We define ARMMMUIdx_Stage2 as being an MMU index which uses a QEMU
2
2
TLB. However we never actually use the TLB -- all stage 2 lookups
3
Split out gen_top_byte_ignore in preparation of handling these
3
are done by direct calls to get_phys_addr_lpae() followed by a
4
data accesses; the new tbflags field is not yet honored.
4
physical address load via address_space_ld*().
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Remove Stage2 from the list of ARM MMU indexes which correspond to
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
real core MMU indexes, and instead put it in the set of "NOTLB" ARM
8
Message-id: 20190204132126.3255-2-richard.henderson@linaro.org
8
MMU indexes.
9
10
This allows us to drop NB_MMU_MODES to 11. It also means we can
11
safely add support for the ARMv8.3-TTS2UXN extension, which adds
12
permission bits to the stage 2 descriptors which define execute
13
permission separatel for EL0 and EL1; supporting that while keeping
14
Stage2 in a QEMU TLB would require us to use separate TLBs for
15
"Stage2 for an EL0 access" and "Stage2 for an EL1 access", which is a
16
lot of extra complication given we aren't even using the QEMU TLB.
17
18
In the process of updating the comment on our MMU index use,
19
fix a couple of other minor errors:
20
* NS EL2 EL2&0 was missing from the list in the comment
21
* some text hadn't been updated from when we bumped NB_MMU_MODES
22
above 8
23
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
26
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
27
Message-id: 20200330210400.11724-2-peter.maydell@linaro.org
10
---
28
---
11
target/arm/cpu.h | 1 +
29
target/arm/cpu-param.h | 2 +-
12
target/arm/translate.h | 3 +-
30
target/arm/cpu.h | 21 +++++---
13
target/arm/helper.c | 1 +
31
target/arm/helper.c | 112 ++++-------------------------------------
14
target/arm/translate-a64.c | 72 +++++++++++++++++++-------------------
32
3 files changed, 27 insertions(+), 108 deletions(-)
15
4 files changed, 40 insertions(+), 37 deletions(-)
33
16
34
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/cpu-param.h
37
+++ b/target/arm/cpu-param.h
38
@@ -XXX,XX +XXX,XX @@
39
# define TARGET_PAGE_BITS_MIN 10
40
#endif
41
42
-#define NB_MMU_MODES 12
43
+#define NB_MMU_MODES 11
44
45
#endif
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
46
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
48
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
49
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
50
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
22
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
51
* handling via the TLB. The only way to do a stage 1 translation without
23
FIELD(TBFLAG_A64, BT, 9, 1)
52
* the immediate stage 2 translation is via the ATS or AT system insns,
24
FIELD(TBFLAG_A64, BTYPE, 10, 2)
53
* which can be slow-pathed and always do a page table walk.
25
+FIELD(TBFLAG_A64, TBID, 12, 2)
54
+ * The only use of stage 2 translations is either as part of an s1+2
26
55
+ * lookup or when loading the descriptors during a stage 1 page table walk,
27
static inline bool bswap_code(bool sctlr_b)
56
+ * and in both those cases we don't use the TLB.
28
{
57
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
58
* translation regimes, because they map reasonably well to each other
30
index XXXXXXX..XXXXXXX 100644
59
* and they can't both be active at the same time.
31
--- a/target/arm/translate.h
60
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
32
+++ b/target/arm/translate.h
61
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
62
* NS EL1 EL1&0 stage 1+2 +PAN
34
int user;
63
* NS EL0 EL2&0
35
#endif
64
+ * NS EL2 EL2&0
36
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
65
* NS EL2 EL2&0 +PAN
37
- uint8_t tbii; /* TBI1|TBI0 for EL0/1 or TBI for EL2/3 */
66
* NS EL2 (aka NS PL2)
38
+ uint8_t tbii; /* TBI1|TBI0 for insns */
67
* S EL0 EL1&0 (aka S PL0)
39
+ uint8_t tbid; /* TBI1|TBI0 for data */
68
* S EL1 EL1&0 (not used if EL3 is 32 bit)
40
bool ns; /* Use non-secure CPREG bank on access */
69
* S EL1 EL1&0 +PAN
41
int fp_excp_el; /* FP exception EL or 0 if enabled */
70
* S EL3 (aka S PL1)
42
int sve_excp_el; /* SVE exception EL or 0 if enabled */
71
- * NS EL1&0 stage 2
72
*
73
- * for a total of 12 different mmu_idx.
74
+ * for a total of 11 different mmu_idx.
75
*
76
* R profile CPUs have an MPU, but can use the same set of MMU indexes
77
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
78
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
79
* are not quite the same -- different CPU types (most notably M profile
80
* vs A/R profile) would like to use MMU indexes with different semantics,
81
* but since we don't ever need to use all of those in a single CPU we
82
- * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
83
+ * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
84
+ * modes + total number of M profile MMU modes". The lower bits of
85
* ARMMMUIdx are the core TLB mmu index, and the higher bits are always
86
* the same for any particular CPU.
87
* Variables of type ARMMUIdx are always full values, and the core
88
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
89
ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A,
90
ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A,
91
92
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
93
-
94
/*
95
* These are not allocated TLBs and are used only for AT system
96
* instructions or for the first stage of an S12 page table walk.
97
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
98
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
99
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
100
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
101
+ /*
102
+ * Not allocated a TLB: used only for second stage of an S12 page
103
+ * table walk, or for descriptor loads during first stage of an S1
104
+ * page table walk. Note that if we ever want to have a TLB for this
105
+ * then various TLB flush insns which currently are no-ops or flush
106
+ * only stage 1 MMU indexes will need to change to flush stage 2.
107
+ */
108
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
109
110
/*
111
* M-profile.
112
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
113
TO_CORE_BIT(SE10_1),
114
TO_CORE_BIT(SE10_1_PAN),
115
TO_CORE_BIT(SE3),
116
- TO_CORE_BIT(Stage2),
117
118
TO_CORE_BIT(MUser),
119
TO_CORE_BIT(MPriv),
43
diff --git a/target/arm/helper.c b/target/arm/helper.c
120
diff --git a/target/arm/helper.c b/target/arm/helper.c
44
index XXXXXXX..XXXXXXX 100644
121
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/helper.c
122
--- a/target/arm/helper.c
46
+++ b/target/arm/helper.c
123
+++ b/target/arm/helper.c
47
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
124
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
48
}
125
tlb_flush_by_mmuidx(cs,
49
126
ARMMMUIdxBit_E10_1 |
50
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
127
ARMMMUIdxBit_E10_1_PAN |
51
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
128
- ARMMMUIdxBit_E10_0 |
52
}
129
- ARMMMUIdxBit_Stage2);
53
#endif
130
+ ARMMMUIdxBit_E10_0);
54
55
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.c
58
+++ b/target/arm/translate-a64.c
59
@@ -XXX,XX +XXX,XX @@ void gen_a64_set_pc_im(uint64_t val)
60
tcg_gen_movi_i64(cpu_pc, val);
61
}
131
}
62
132
63
-/* Load the PC from a generic TCG variable.
133
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
64
+/*
134
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
65
+ * Handle Top Byte Ignore (TBI) bits.
135
tlb_flush_by_mmuidx_all_cpus_synced(cs,
66
*
136
ARMMMUIdxBit_E10_1 |
67
- * If address tagging is enabled via the TCR TBI bits, then loading
137
ARMMMUIdxBit_E10_1_PAN |
68
- * an address into the PC will clear out any tag in it:
138
- ARMMMUIdxBit_E10_0 |
69
+ * If address tagging is enabled via the TCR TBI bits:
139
- ARMMMUIdxBit_Stage2);
70
* + for EL2 and EL3 there is only one TBI bit, and if it is set
140
+ ARMMMUIdxBit_E10_0);
71
* then the address is zero-extended, clearing bits [63:56]
141
}
72
* + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
142
73
@@ -XXX,XX +XXX,XX @@ void gen_a64_set_pc_im(uint64_t val)
143
-static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
74
* If the appropriate TBI bit is set for the address then
144
- uint64_t value)
75
* the address is sign-extended from bit 55 into bits [63:56]
145
-{
76
*
146
- /* Invalidate by IPA. This has to invalidate any structures that
77
- * We can avoid doing this for relative-branches, because the
147
- * contain only stage 2 translation information, but does not need
78
- * PC + offset can never overflow into the tag bits (assuming
148
- * to apply to structures that contain combined stage 1 and stage 2
79
- * that virtual addresses are less than 56 bits wide, as they
149
- * translation information.
80
- * are currently), but we must handle it for branch-to-register.
150
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
81
+ * Here We have concatenated TBI{1,0} into tbi.
151
- */
82
*/
152
- CPUState *cs = env_cpu(env);
83
-static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
153
- uint64_t pageaddr;
84
+static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
154
-
85
+ TCGv_i64 src, int tbi)
155
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
156
- return;
157
- }
158
-
159
- pageaddr = sextract64(value << 12, 0, 40);
160
-
161
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
162
-}
163
-
164
-static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
165
- uint64_t value)
166
-{
167
- CPUState *cs = env_cpu(env);
168
- uint64_t pageaddr;
169
-
170
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
171
- return;
172
- }
173
-
174
- pageaddr = sextract64(value << 12, 0, 40);
175
-
176
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
177
- ARMMMUIdxBit_Stage2);
178
-}
179
180
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
181
uint64_t value)
182
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
183
tlb_flush_by_mmuidx(cs,
184
ARMMMUIdxBit_E10_1 |
185
ARMMMUIdxBit_E10_1_PAN |
186
- ARMMMUIdxBit_E10_0 |
187
- ARMMMUIdxBit_Stage2);
188
+ ARMMMUIdxBit_E10_0);
189
raw_write(env, ri, value);
190
}
191
}
192
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
193
return ARMMMUIdxBit_SE10_1 |
194
ARMMMUIdxBit_SE10_1_PAN |
195
ARMMMUIdxBit_SE10_0;
196
- } else if (arm_feature(env, ARM_FEATURE_EL2)) {
197
- return ARMMMUIdxBit_E10_1 |
198
- ARMMMUIdxBit_E10_1_PAN |
199
- ARMMMUIdxBit_E10_0 |
200
- ARMMMUIdxBit_Stage2;
201
} else {
202
return ARMMMUIdxBit_E10_1 |
203
ARMMMUIdxBit_E10_1_PAN |
204
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
205
ARMMMUIdxBit_SE3);
206
}
207
208
-static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
209
- uint64_t value)
210
-{
211
- /* Invalidate by IPA. This has to invalidate any structures that
212
- * contain only stage 2 translation information, but does not need
213
- * to apply to structures that contain combined stage 1 and stage 2
214
- * translation information.
215
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
216
- */
217
- ARMCPU *cpu = env_archcpu(env);
218
- CPUState *cs = CPU(cpu);
219
- uint64_t pageaddr;
220
-
221
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
222
- return;
223
- }
224
-
225
- pageaddr = sextract64(value << 12, 0, 48);
226
-
227
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
228
-}
229
-
230
-static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
231
- uint64_t value)
232
-{
233
- CPUState *cs = env_cpu(env);
234
- uint64_t pageaddr;
235
-
236
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
237
- return;
238
- }
239
-
240
- pageaddr = sextract64(value << 12, 0, 48);
241
-
242
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
243
- ARMMMUIdxBit_Stage2);
244
-}
245
-
246
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
247
bool isread)
86
{
248
{
87
- /* Note that TBII is TBI1:TBI0. */
249
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
88
- int tbi = s->tbii;
250
.writefn = tlbi_aa64_vae1_write },
89
-
251
{ .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
90
- if (s->current_el <= 1) {
252
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
91
- if (tbi != 0) {
253
- .access = PL2_W, .type = ARM_CP_NO_RAW,
92
- /* Sign-extend from bit 55. */
254
- .writefn = tlbi_aa64_ipas2e1is_write },
93
- tcg_gen_sextract_i64(cpu_pc, src, 0, 56);
255
+ .access = PL2_W, .type = ARM_CP_NOP },
94
-
256
{ .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
95
- if (tbi != 3) {
257
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
96
- TCGv_i64 tcg_zero = tcg_const_i64(0);
258
- .access = PL2_W, .type = ARM_CP_NO_RAW,
97
-
259
- .writefn = tlbi_aa64_ipas2e1is_write },
98
- /*
260
+ .access = PL2_W, .type = ARM_CP_NOP },
99
- * The two TBI bits differ.
261
{ .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
100
- * If tbi0, then !tbi1: only use the extension if positive.
262
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
101
- * if !tbi0, then tbi1: only use the extension if negative.
263
.access = PL2_W, .type = ARM_CP_NO_RAW,
102
- */
264
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
103
- tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
265
.writefn = tlbi_aa64_alle1is_write },
104
- cpu_pc, cpu_pc, tcg_zero, cpu_pc, src);
266
{ .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
105
- tcg_temp_free_i64(tcg_zero);
267
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
106
- }
268
- .access = PL2_W, .type = ARM_CP_NO_RAW,
107
- return;
269
- .writefn = tlbi_aa64_ipas2e1_write },
108
- }
270
+ .access = PL2_W, .type = ARM_CP_NOP },
109
+ if (tbi == 0) {
271
{ .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
110
+ /* Load unmodified address */
272
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
111
+ tcg_gen_mov_i64(dst, src);
273
- .access = PL2_W, .type = ARM_CP_NO_RAW,
112
+ } else if (s->current_el >= 2) {
274
- .writefn = tlbi_aa64_ipas2e1_write },
113
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
275
+ .access = PL2_W, .type = ARM_CP_NOP },
114
+ /* Force tag byte to all zero */
276
{ .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
115
+ tcg_gen_extract_i64(dst, src, 0, 56);
277
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
116
} else {
278
.access = PL2_W, .type = ARM_CP_NO_RAW,
117
- if (tbi != 0) {
279
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
118
- /* Force tag byte to all zero */
280
.writefn = tlbimva_hyp_is_write },
119
- tcg_gen_extract_i64(cpu_pc, src, 0, 56);
281
{ .name = "TLBIIPAS2",
120
- return;
282
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
121
+ /* Sign-extend from bit 55. */
283
- .type = ARM_CP_NO_RAW, .access = PL2_W,
122
+ tcg_gen_sextract_i64(dst, src, 0, 56);
284
- .writefn = tlbiipas2_write },
123
+
285
+ .type = ARM_CP_NOP, .access = PL2_W },
124
+ if (tbi != 3) {
286
{ .name = "TLBIIPAS2IS",
125
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
287
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
126
+
288
- .type = ARM_CP_NO_RAW, .access = PL2_W,
127
+ /*
289
- .writefn = tlbiipas2_is_write },
128
+ * The two TBI bits differ.
290
+ .type = ARM_CP_NOP, .access = PL2_W },
129
+ * If tbi0, then !tbi1: only use the extension if positive.
291
{ .name = "TLBIIPAS2L",
130
+ * if !tbi0, then tbi1: only use the extension if negative.
292
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
131
+ */
293
- .type = ARM_CP_NO_RAW, .access = PL2_W,
132
+ tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
294
- .writefn = tlbiipas2_write },
133
+ dst, dst, tcg_zero, dst, src);
295
+ .type = ARM_CP_NOP, .access = PL2_W },
134
+ tcg_temp_free_i64(tcg_zero);
296
{ .name = "TLBIIPAS2LIS",
135
}
297
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
136
}
298
- .type = ARM_CP_NO_RAW, .access = PL2_W,
137
+}
299
- .writefn = tlbiipas2_is_write },
138
300
+ .type = ARM_CP_NOP, .access = PL2_W },
139
- /* Load unmodified address */
301
/* 32 bit cache operations */
140
- tcg_gen_mov_i64(cpu_pc, src);
302
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
141
+static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
303
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
142
+{
143
+ /*
144
+ * If address tagging is enabled for instructions via the TCR TBI bits,
145
+ * then loading an address into the PC will clear out any tag.
146
+ */
147
+ gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
148
}
149
150
typedef struct DisasCompare64 {
151
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
152
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
153
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
154
dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
155
+ dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
156
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
157
#if !defined(CONFIG_USER_ONLY)
158
dc->user = (dc->current_el == 0);
159
--
304
--
160
2.20.1
305
2.20.1
161
306
162
307
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The access_type argument to get_phys_addr_lpae() is an MMUAccessType;
2
use the enum constant MMU_DATA_LOAD rather than a literal 0 when we
3
call it in S1_ptw_translate().
2
4
3
Caching the bit means that we will not have to re-walk the
4
page tables to look up the bit during translation.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20190128223118.5255-6-richard.henderson@linaro.org
9
[PMM: no need to OR in guarded bit status]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200330210400.11724-3-peter.maydell@linaro.org
11
---
9
---
12
target/arm/helper.c | 6 ++++++
10
target/arm/helper.c | 5 +++--
13
1 file changed, 6 insertions(+)
11
1 file changed, 3 insertions(+), 2 deletions(-)
14
12
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
15
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
16
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
17
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
20
bool ttbr1_valid;
18
pcacheattrs = &cacheattrs;
21
uint64_t descaddrmask;
22
bool aarch64 = arm_el_is_aa64(env, el);
23
+ bool guarded = false;
24
25
/* TODO:
26
* This code does not handle the different format TCR for VTCR_EL2.
27
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
28
}
19
}
29
/* Merge in attributes from table descriptors */
20
30
attrs |= nstable << 3; /* NS */
21
- ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa,
31
+ guarded = extract64(descriptor, 50, 1); /* GP */
22
- &txattrs, &s2prot, &s2size, fi, pcacheattrs);
32
if (param.hpd) {
23
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
33
/* HPD disables all the table attributes except NSTable. */
24
+ &s2pa, &txattrs, &s2prot, &s2size, fi,
34
break;
25
+ pcacheattrs);
35
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
26
if (ret) {
36
*/
27
assert(fi->type != ARMFault_None);
37
txattrs->secure = false;
28
fi->s2addr = addr;
38
}
39
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
40
+ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
41
+ txattrs->target_tlb_bit0 = true;
42
+ }
43
44
if (cacheattrs != NULL) {
45
if (mmu_idx == ARMMMUIdx_S2NS) {
46
--
29
--
47
2.20.1
30
2.20.1
48
31
49
32
diff view generated by jsdifflib
1
The {IOE, DZE, OFE, UFE, IXE, IDE} bits in the FPSCR/FPCR are for
1
For ARMv8.2-TTS2UXN, the stage 2 page table walk wants to know
2
enabling trapped IEEE floating point exceptions (where IEEE exception
2
whether the stage 1 access is for EL0 or not, because whether
3
conditions cause a CPU exception rather than updating the FPSR status
3
exec permission is given can depend on whether this is an EL0
4
bits). QEMU doesn't implement this (and nor does the hardware we're
4
or EL1 access. Add a new argument to get_phys_addr_lpae() so
5
modelling), but for implementations which don't implement trapped
5
the call sites can pass this information in.
6
exception handling these control bits are supposed to be RAZ/WI.
7
This allows guest code to test for whether the feature is present
8
by trying to write to the bit and checking whether it sticks.
9
6
10
QEMU is incorrectly making these bits read as written. Make them
7
Since get_phys_addr_lpae() doesn't already have a doc comment,
11
RAZ/WI as the architecture requires.
8
add one so we have a place to put the documentation of the
9
semantics of the new s1_is_el0 argument.
12
10
13
In particular this was causing problems for the NetBSD automatic
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
test suite.
12
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20200330210400.11724-4-peter.maydell@linaro.org
15
---
16
target/arm/helper.c | 29 ++++++++++++++++++++++++++++-
17
1 file changed, 28 insertions(+), 1 deletion(-)
15
18
16
Reported-by: Martin Husemann <martin@netbsd.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20190131130700.28392-1-peter.maydell@linaro.org
20
---
21
target/arm/cpu.h | 6 ++++++
22
target/arm/helper.c | 6 ++++++
23
2 files changed, 12 insertions(+)
24
25
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu.h
28
+++ b/target/arm/cpu.h
29
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
30
#define FPSR_MASK 0xf800009f
31
#define FPCR_MASK 0x07ff9f00
32
33
+#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
34
+#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
35
+#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
36
+#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
37
+#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
38
+#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
39
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
40
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
41
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper.c
21
--- a/target/arm/helper.c
45
+++ b/target/arm/helper.c
22
+++ b/target/arm/helper.c
46
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
23
@@ -XXX,XX +XXX,XX @@
47
val &= ~FPCR_FZ16;
24
25
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
26
MMUAccessType access_type, ARMMMUIdx mmu_idx,
27
+ bool s1_is_el0,
28
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
29
target_ulong *page_size_ptr,
30
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
31
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
32
}
33
34
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
35
+ false,
36
&s2pa, &txattrs, &s2prot, &s2size, fi,
37
pcacheattrs);
38
if (ret) {
39
@@ -XXX,XX +XXX,XX @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
40
};
41
}
42
43
+/**
44
+ * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
45
+ *
46
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
47
+ * prot and page_size may not be filled in, and the populated fsr value provides
48
+ * information on why the translation aborted, in the format of a long-format
49
+ * DFSR/IFSR fault register, with the following caveats:
50
+ * * the WnR bit is never set (the caller must do this).
51
+ *
52
+ * @env: CPUARMState
53
+ * @address: virtual address to get physical address for
54
+ * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
55
+ * @mmu_idx: MMU index indicating required translation regime
56
+ * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
57
+ * walk), must be true if this is stage 2 of a stage 1+2 walk for an
58
+ * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
59
+ * @phys_ptr: set to the physical address corresponding to the virtual address
60
+ * @attrs: set to the memory transaction attributes to use
61
+ * @prot: set to the permissions for the page containing phys_ptr
62
+ * @page_size_ptr: set to the size of the page containing phys_ptr
63
+ * @fi: set to fault info if the translation fails
64
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
65
+ */
66
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
67
MMUAccessType access_type, ARMMMUIdx mmu_idx,
68
+ bool s1_is_el0,
69
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
70
target_ulong *page_size_ptr,
71
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
72
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
73
74
/* S1 is done. Now do S2 translation. */
75
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
76
+ mmu_idx == ARMMMUIdx_E10_0,
77
phys_ptr, attrs, &s2_prot,
78
page_size, fi,
79
cacheattrs != NULL ? &cacheattrs2 : NULL);
80
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
48
}
81
}
49
82
50
+ /*
83
if (regime_using_lpae_format(env, mmu_idx)) {
51
+ * We don't implement trapped exception handling, so the
84
- return get_phys_addr_lpae(env, address, access_type, mmu_idx,
52
+ * trap enable bits are all RAZ/WI (not RES0!)
85
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
53
+ */
86
phys_ptr, attrs, prot, page_size,
54
+ val &= ~(FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE);
87
fi, cacheattrs);
55
+
88
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
56
changed = env->vfp.xregs[ARM_VFP_FPSCR];
57
env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
58
env->vfp.vec_len = (val >> 16) & 7;
59
--
89
--
60
2.20.1
90
2.20.1
61
91
62
92
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The ARMv8.2-TTS2UXN feature extends the XN field in stage 2
2
translation table descriptors from just bit [54] to bits [54:53],
3
allowing stage 2 to control execution permissions separately for EL0
4
and EL1. Implement the new semantics of the XN field and enable
5
the feature for our 'max' CPU.
2
6
3
Also create field definitions for id_aa64pfr1 from ARMv8.5.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190128223118.5255-2-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200330210400.11724-5-peter.maydell@linaro.org
9
---
11
---
10
target/arm/cpu.h | 10 ++++++++++
12
target/arm/cpu.h | 15 +++++++++++++++
11
1 file changed, 10 insertions(+)
13
target/arm/cpu.c | 1 +
14
target/arm/cpu64.c | 2 ++
15
target/arm/helper.c | 37 +++++++++++++++++++++++++++++++------
16
4 files changed, 49 insertions(+), 6 deletions(-)
12
17
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
20
--- a/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64PFR0, GIC, 24, 4)
22
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
18
FIELD(ID_AA64PFR0, RAS, 28, 4)
23
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
19
FIELD(ID_AA64PFR0, SVE, 32, 4)
24
}
20
25
21
+FIELD(ID_AA64PFR1, BT, 0, 4)
26
+static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
22
+FIELD(ID_AA64PFR1, SBSS, 4, 4)
27
+{
23
+FIELD(ID_AA64PFR1, MTE, 8, 4)
28
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
24
+FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
29
+}
25
+
30
+
26
FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
31
/*
27
FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
32
* 64-bit feature tests via id registers.
28
FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
33
*/
29
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
34
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
30
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
35
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
31
}
36
}
32
37
33
+static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
38
+static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
34
+{
39
+{
35
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
40
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
41
+}
42
+
43
/*
44
* Feature tests for "does this exist in either 32-bit or 64-bit?"
45
*/
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
47
return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
48
}
49
50
+static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
51
+{
52
+ return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
36
+}
53
+}
37
+
54
+
38
/*
55
/*
39
* Forward to the above feature tests given an ARMCPU pointer.
56
* Forward to the above feature tests given an ARMCPU pointer.
40
*/
57
*/
58
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/cpu.c
61
+++ b/target/arm/cpu.c
62
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
63
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
64
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
65
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
66
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
67
cpu->isar.id_mmfr4 = t;
68
}
69
#endif
70
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/cpu64.c
73
+++ b/target/arm/cpu64.c
74
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
75
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
76
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
77
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
78
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
79
cpu->isar.id_aa64mmfr1 = t;
80
81
t = cpu->isar.id_aa64mmfr2;
82
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
83
u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
84
u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
85
u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
86
+ u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
87
cpu->isar.id_mmfr4 = u;
88
89
u = cpu->isar.id_aa64dfr0;
90
diff --git a/target/arm/helper.c b/target/arm/helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/helper.c
93
+++ b/target/arm/helper.c
94
@@ -XXX,XX +XXX,XX @@ simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
95
*
96
* @env: CPUARMState
97
* @s2ap: The 2-bit stage2 access permissions (S2AP)
98
- * @xn: XN (execute-never) bit
99
+ * @xn: XN (execute-never) bits
100
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
101
*/
102
-static int get_S2prot(CPUARMState *env, int s2ap, int xn)
103
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
104
{
105
int prot = 0;
106
107
@@ -XXX,XX +XXX,XX @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn)
108
if (s2ap & 2) {
109
prot |= PAGE_WRITE;
110
}
111
- if (!xn) {
112
- if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
113
+
114
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
115
+ switch (xn) {
116
+ case 0:
117
prot |= PAGE_EXEC;
118
+ break;
119
+ case 1:
120
+ if (s1_is_el0) {
121
+ prot |= PAGE_EXEC;
122
+ }
123
+ break;
124
+ case 2:
125
+ break;
126
+ case 3:
127
+ if (!s1_is_el0) {
128
+ prot |= PAGE_EXEC;
129
+ }
130
+ break;
131
+ default:
132
+ g_assert_not_reached();
133
+ }
134
+ } else {
135
+ if (!extract32(xn, 1, 1)) {
136
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
137
+ prot |= PAGE_EXEC;
138
+ }
139
}
140
}
141
return prot;
142
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
143
}
144
145
ap = extract32(attrs, 4, 2);
146
- xn = extract32(attrs, 12, 1);
147
148
if (mmu_idx == ARMMMUIdx_Stage2) {
149
ns = true;
150
- *prot = get_S2prot(env, ap, xn);
151
+ xn = extract32(attrs, 11, 2);
152
+ *prot = get_S2prot(env, ap, xn, s1_is_el0);
153
} else {
154
ns = extract32(attrs, 3, 1);
155
+ xn = extract32(attrs, 12, 1);
156
pxn = extract32(attrs, 11, 1);
157
*prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
158
}
41
--
159
--
42
2.20.1
160
2.20.1
43
161
44
162
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In aarch64_max_initfn() we update both 32-bit and 64-bit ID
2
registers. The intended pattern is that for 64-bit ID registers we
3
use FIELD_DP64 and the uint64_t 't' register, while 32-bit ID
4
registers use FIELD_DP32 and the uint32_t 'u' register. For
5
ID_AA64DFR0 we accidentally used 'u', meaning that the top 32 bits of
6
this 64-bit ID register would end up always zero. Luckily at the
7
moment that's what they should be anyway, so this bug has no visible
8
effects.
2
9
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Use the right-sized variable.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
5
Message-id: 20190128223118.5255-11-richard.henderson@linaro.org
12
Fixes: 3bec78447a958d481991
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 20200423110915.10527-1-peter.maydell@linaro.org
7
---
17
---
8
target/arm/cpu64.c | 4 ++++
18
target/arm/cpu64.c | 6 +++---
9
1 file changed, 4 insertions(+)
19
1 file changed, 3 insertions(+), 3 deletions(-)
10
20
11
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
21
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
12
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/cpu64.c
23
--- a/target/arm/cpu64.c
14
+++ b/target/arm/cpu64.c
24
+++ b/target/arm/cpu64.c
15
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
16
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
26
u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
17
cpu->isar.id_aa64pfr0 = t;
27
cpu->isar.id_mmfr4 = u;
18
28
19
+ t = cpu->isar.id_aa64pfr1;
29
- u = cpu->isar.id_aa64dfr0;
20
+ t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
30
- u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
21
+ cpu->isar.id_aa64pfr1 = t;
31
- cpu->isar.id_aa64dfr0 = u;
22
+
32
+ t = cpu->isar.id_aa64dfr0;
23
t = cpu->isar.id_aa64mmfr1;
33
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
24
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
34
+ cpu->isar.id_aa64dfr0 = t;
25
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
35
36
u = cpu->isar.id_dfr0;
37
u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
26
--
38
--
27
2.20.1
39
2.20.1
28
40
29
41
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
Place this in its own field within ENV, as that will
3
MIDR_EL1 is a 64-bit system register with the top 32-bit being RES0.
4
make it easier to reset from within TCG generated code.
4
Represent it in QEMU's ARMCPU struct with a uint64_t, not a
5
uint32_t.
5
6
6
With the change to pstate_read/write, exception entry
7
This fixes an error when compiling with -Werror=conversion
7
and return are automatically handled.
8
because we were manipulating the register value using a
9
local uint64_t variable:
8
10
11
target/arm/cpu64.c: In function ‘aarch64_max_initfn’:
12
target/arm/cpu64.c:628:21: error: conversion from ‘uint64_t’ {aka ‘long unsigned int’} to ‘uint32_t’ {aka ‘unsigned int’} may change value [-Werror=conversion]
13
628 | cpu->midr = t;
14
| ^
15
16
and future-proofs us against a possible future architecture
17
change using some of the top 32 bits.
18
19
Suggested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
20
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
22
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
23
Message-id: 20200428172634.29707-1-f4bug@amsat.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20190128223118.5255-3-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
26
---
14
target/arm/cpu.h | 8 ++++++--
27
target/arm/cpu.h | 2 +-
15
target/arm/translate-a64.c | 3 +++
28
target/arm/cpu.c | 2 +-
16
2 files changed, 9 insertions(+), 2 deletions(-)
29
2 files changed, 2 insertions(+), 2 deletions(-)
17
30
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
33
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
35
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
23
* semantics as for AArch32, as described in the comments on each field)
36
uint64_t id_aa64dfr0;
24
* nRW (also known as M[4]) is kept, inverted, in env->aarch64
37
uint64_t id_aa64dfr1;
25
* DAIF (exception masks) are kept in env->daif
38
} isar;
26
+ * BTYPE is kept in env->btype
39
- uint32_t midr;
27
* all other bits are stored in their correct places in env->pstate
40
+ uint64_t midr;
28
*/
41
uint32_t revidr;
29
uint32_t pstate;
42
uint32_t reset_fpsid;
30
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
43
uint32_t ctr;
31
uint32_t GE; /* cpsr[19:16] */
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
32
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
33
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
34
+ uint32_t btype; /* BTI branch type. spsr[11:10]. */
35
uint64_t daif; /* exception masks, in the bits they are in PSTATE */
36
37
uint64_t elr_el[4]; /* AArch64 exception link regs */
38
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
39
#define PSTATE_I (1U << 7)
40
#define PSTATE_A (1U << 8)
41
#define PSTATE_D (1U << 9)
42
+#define PSTATE_BTYPE (3U << 10)
43
#define PSTATE_IL (1U << 20)
44
#define PSTATE_SS (1U << 21)
45
#define PSTATE_V (1U << 28)
46
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
47
#define PSTATE_N (1U << 31)
48
#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
49
#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
50
-#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
51
+#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
52
/* Mode values for AArch64 */
53
#define PSTATE_MODE_EL3h 13
54
#define PSTATE_MODE_EL3t 12
55
@@ -XXX,XX +XXX,XX @@ static inline uint32_t pstate_read(CPUARMState *env)
56
ZF = (env->ZF == 0);
57
return (env->NF & 0x80000000) | (ZF << 30)
58
| (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
59
- | env->pstate | env->daif;
60
+ | env->pstate | env->daif | (env->btype << 10);
61
}
62
63
static inline void pstate_write(CPUARMState *env, uint32_t val)
64
@@ -XXX,XX +XXX,XX @@ static inline void pstate_write(CPUARMState *env, uint32_t val)
65
env->CF = (val >> 29) & 1;
66
env->VF = (val << 3) & 0x80000000;
67
env->daif = val & PSTATE_DAIF;
68
+ env->btype = (val >> 10) & 3;
69
env->pstate = val & ~CACHED_PSTATE_BITS;
70
}
71
72
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
73
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/translate-a64.c
46
--- a/target/arm/cpu.c
75
+++ b/target/arm/translate-a64.c
47
+++ b/target/arm/cpu.c
76
@@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
48
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_cpus[] = {
77
el,
49
static Property arm_cpu_properties[] = {
78
psr & PSTATE_SP ? 'h' : 't');
50
DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
79
51
DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
80
+ if (cpu_isar_feature(aa64_bti, cpu)) {
52
- DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
81
+ cpu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
53
+ DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
82
+ }
54
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
83
if (!(flags & CPU_DUMP_FPU)) {
55
mp_affinity, ARM64_AFFINITY_INVALID),
84
cpu_fprintf(f, "\n");
56
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
85
return;
86
--
57
--
87
2.20.1
58
2.20.1
88
59
89
60
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Remove inclusion of arm_gicv3_common.h, this already gets
4
included via xlnx-versal.h.
5
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-2-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/arm/xlnx-versal.c | 1 -
13
1 file changed, 1 deletion(-)
14
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal.c
18
+++ b/hw/arm/xlnx-versal.c
19
@@ -XXX,XX +XXX,XX @@
20
#include "hw/arm/boot.h"
21
#include "kvm_arm.h"
22
#include "hw/misc/unimp.h"
23
-#include "hw/intc/arm_gicv3_common.h"
24
#include "hw/arm/xlnx-versal.h"
25
#include "hw/char/pl011.h"
26
27
--
28
2.20.1
29
30
diff view generated by jsdifflib
1
From: Max Filippov <jcmvbkbc@gmail.com>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
With multiprocess extensions gdb uses 'vKill' packet instead of 'k' to
3
Move misplaced comment.
4
kill the inferior. Handle 'vKill' the same way 'k' was handled in the
5
presence of single process.
6
4
7
Fixes: 7cf48f6752e5 ("gdbstub: add multiprocess support to
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
(f|s)ThreadInfo and ThreadExtraInfo")
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Cc: Luc Michel <luc.michel@greensocs.com>
11
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
12
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
13
Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com>
9
Message-id: 20200427181649.26851-3-edgar.iglesias@gmail.com
14
Tested-by: KONRAD Frederic <frederic.konrad@adacore.com>
15
Message-id: 20190130192403.13754-1-jcmvbkbc@gmail.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
11
---
18
gdbstub.c | 4 ++++
12
hw/arm/xlnx-versal.c | 2 +-
19
1 file changed, 4 insertions(+)
13
1 file changed, 1 insertion(+), 1 deletion(-)
20
14
21
diff --git a/gdbstub.c b/gdbstub.c
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
22
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
23
--- a/gdbstub.c
17
--- a/hw/arm/xlnx-versal.c
24
+++ b/gdbstub.c
18
+++ b/hw/arm/xlnx-versal.c
25
@@ -XXX,XX +XXX,XX @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
19
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
26
20
27
put_packet(s, buf);
21
obj = object_new(XLNX_VERSAL_ACPU_TYPE);
28
break;
22
if (!obj) {
29
+ } else if (strncmp(p, "Kill;", 5) == 0) {
23
- /* Secondary CPUs start in PSCI powered-down state */
30
+ /* Kill the target */
24
error_report("Unable to create apu.cpu[%d] of type %s",
31
+ error_report("QEMU: Terminated via GDBstub");
25
i, XLNX_VERSAL_ACPU_TYPE);
32
+ exit(0);
26
exit(EXIT_FAILURE);
33
} else {
27
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
34
goto unknown_command;
28
object_property_set_int(obj, s->cfg.psci_conduit,
29
"psci-conduit", &error_abort);
30
if (i) {
31
+ /* Secondary CPUs start in PSCI powered-down state */
32
object_property_set_bool(obj, true,
33
"start-powered-off", &error_abort);
35
}
34
}
36
--
35
--
37
2.20.1
36
2.20.1
38
37
39
38
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Fix typo xlnx-ve -> xlnx-versal.
4
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-4-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/arm/xlnx-versal-virt.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal-virt.c
18
+++ b/hw/arm/xlnx-versal-virt.c
19
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
20
psci_conduit = QEMU_PSCI_CONDUIT_SMC;
21
}
22
23
- sysbus_init_child_obj(OBJECT(machine), "xlnx-ve", &s->soc,
24
+ sysbus_init_child_obj(OBJECT(machine), "xlnx-versal", &s->soc,
25
sizeof(s->soc), TYPE_XLNX_VERSAL);
26
object_property_set_link(OBJECT(&s->soc), OBJECT(machine->ram),
27
"ddr", &error_abort);
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
This has been enabled in the linux kernel since v3.11
3
Embed the UARTs into the SoC type.
4
(commit d50240a5f6cea, 2013-09-03,
5
"arm64: mm: permit use of tagged pointers at EL0").
6
4
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Message-id: 20190204132126.3255-5-richard.henderson@linaro.org
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-5-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
target/arm/cpu.c | 6 ++++++
13
include/hw/arm/xlnx-versal.h | 3 ++-
13
1 file changed, 6 insertions(+)
14
hw/arm/xlnx-versal.c | 12 ++++++------
15
2 files changed, 8 insertions(+), 7 deletions(-)
14
16
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
19
--- a/include/hw/arm/xlnx-versal.h
18
+++ b/target/arm/cpu.c
20
+++ b/include/hw/arm/xlnx-versal.h
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
21
@@ -XXX,XX +XXX,XX @@
20
env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
22
#include "hw/sysbus.h"
21
env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
23
#include "hw/arm/boot.h"
22
env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
24
#include "hw/intc/arm_gicv3.h"
23
+ /*
25
+#include "hw/char/pl011.h"
24
+ * Enable TBI0 and TBI1. While the real kernel only enables TBI0,
26
25
+ * turning on both here will produce smaller code and otherwise
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
26
+ * make no difference to the user-level emulation.
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
27
+ */
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
28
+ env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
30
MemoryRegion mr_ocm;
29
#else
31
30
/* Reset into the highest available EL */
32
struct {
31
if (arm_feature(env, ARM_FEATURE_EL3)) {
33
- SysBusDevice *uart[XLNX_VERSAL_NR_UARTS];
34
+ PL011State uart[XLNX_VERSAL_NR_UARTS];
35
SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
36
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
37
} iou;
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "kvm_arm.h"
44
#include "hw/misc/unimp.h"
45
#include "hw/arm/xlnx-versal.h"
46
-#include "hw/char/pl011.h"
47
48
#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72")
49
#define GEM_REVISION 0x40070106
50
@@ -XXX,XX +XXX,XX @@ static void versal_create_uarts(Versal *s, qemu_irq *pic)
51
DeviceState *dev;
52
MemoryRegion *mr;
53
54
- dev = qdev_create(NULL, TYPE_PL011);
55
- s->lpd.iou.uart[i] = SYS_BUS_DEVICE(dev);
56
+ sysbus_init_child_obj(OBJECT(s), name,
57
+ &s->lpd.iou.uart[i], sizeof(s->lpd.iou.uart[i]),
58
+ TYPE_PL011);
59
+ dev = DEVICE(&s->lpd.iou.uart[i]);
60
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
61
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
62
qdev_init_nofail(dev);
63
64
- mr = sysbus_mmio_get_region(s->lpd.iou.uart[i], 0);
65
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
66
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
67
68
- sysbus_connect_irq(s->lpd.iou.uart[i], 0, pic[irqs[i]]);
69
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
70
g_free(name);
71
}
72
}
32
--
73
--
33
2.20.1
74
2.20.1
34
75
35
76
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Embed the GEMs into the SoC type.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20190201195404.30486-3-richard.henderson@linaro.org
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-6-edgar.iglesias@gmail.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
12
---
8
tests/tcg/aarch64/Makefile.target | 6 +++++-
13
include/hw/arm/xlnx-versal.h | 3 ++-
9
tests/tcg/aarch64/pauth-1.c | 23 +++++++++++++++++++++++
14
hw/arm/xlnx-versal.c | 15 ++++++++-------
10
2 files changed, 28 insertions(+), 1 deletion(-)
15
2 files changed, 10 insertions(+), 8 deletions(-)
11
create mode 100644 tests/tcg/aarch64/pauth-1.c
12
16
13
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/tests/tcg/aarch64/Makefile.target
19
--- a/include/hw/arm/xlnx-versal.h
16
+++ b/tests/tcg/aarch64/Makefile.target
20
+++ b/include/hw/arm/xlnx-versal.h
17
@@ -XXX,XX +XXX,XX @@ VPATH         += $(AARCH64_SRC)
18
# we don't build any of the ARM tests
19
AARCH64_TESTS=$(filter-out $(ARM_TESTS), $(TESTS))
20
AARCH64_TESTS+=fcvt
21
-TESTS:=$(AARCH64_TESTS)
22
23
fcvt: LDFLAGS+=-lm
24
25
run-fcvt: fcvt
26
    $(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
27
    $(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
28
+
29
+AARCH64_TESTS += pauth-1
30
+run-pauth-%: QEMU += -cpu max
31
+
32
+TESTS:=$(AARCH64_TESTS)
33
diff --git a/tests/tcg/aarch64/pauth-1.c b/tests/tcg/aarch64/pauth-1.c
34
new file mode 100644
35
index XXXXXXX..XXXXXXX
36
--- /dev/null
37
+++ b/tests/tcg/aarch64/pauth-1.c
38
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@
39
+#include <assert.h>
22
#include "hw/arm/boot.h"
40
+#include <sys/prctl.h>
23
#include "hw/intc/arm_gicv3.h"
41
+
24
#include "hw/char/pl011.h"
42
+asm(".arch armv8.4-a");
25
+#include "hw/net/cadence_gem.h"
43
+
26
44
+#ifndef PR_PAC_RESET_KEYS
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
45
+#define PR_PAC_RESET_KEYS 54
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
46
+#define PR_PAC_APDAKEY (1 << 2)
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
47
+#endif
30
48
+
31
struct {
49
+int main()
32
PL011State uart[XLNX_VERSAL_NR_UARTS];
50
+{
33
- SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
51
+ int x;
34
+ CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
52
+ void *p0 = &x, *p1, *p2;
35
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
53
+
36
} iou;
54
+ asm volatile("pacdza %0" : "=r"(p1) : "0"(p0));
37
} lpd;
55
+ prctl(PR_PAC_RESET_KEYS, PR_PAC_APDAKEY, 0, 0, 0);
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
56
+ asm volatile("pacdza %0" : "=r"(p2) : "0"(p0));
39
index XXXXXXX..XXXXXXX 100644
57
+
40
--- a/hw/arm/xlnx-versal.c
58
+ assert(p1 != p0);
41
+++ b/hw/arm/xlnx-versal.c
59
+ assert(p1 != p2);
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_gems(Versal *s, qemu_irq *pic)
60
+ return 0;
43
DeviceState *dev;
61
+}
44
MemoryRegion *mr;
45
46
- dev = qdev_create(NULL, "cadence_gem");
47
- s->lpd.iou.gem[i] = SYS_BUS_DEVICE(dev);
48
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
49
+ sysbus_init_child_obj(OBJECT(s), name,
50
+ &s->lpd.iou.gem[i], sizeof(s->lpd.iou.gem[i]),
51
+ TYPE_CADENCE_GEM);
52
+ dev = DEVICE(&s->lpd.iou.gem[i]);
53
if (nd->used) {
54
qemu_check_nic_model(nd, "cadence_gem");
55
qdev_set_nic_properties(dev, nd);
56
}
57
- object_property_set_int(OBJECT(s->lpd.iou.gem[i]),
58
+ object_property_set_int(OBJECT(dev),
59
2, "num-priority-queues",
60
&error_abort);
61
- object_property_set_link(OBJECT(s->lpd.iou.gem[i]),
62
+ object_property_set_link(OBJECT(dev),
63
OBJECT(&s->mr_ps), "dma",
64
&error_abort);
65
qdev_init_nofail(dev);
66
67
- mr = sysbus_mmio_get_region(s->lpd.iou.gem[i], 0);
68
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
69
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
70
71
- sysbus_connect_irq(s->lpd.iou.gem[i], 0, pic[irqs[i]]);
72
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
73
g_free(name);
74
}
75
}
62
--
76
--
63
2.20.1
77
2.20.1
64
78
65
79
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
These bits can be used to cache target-specific data in cputlb
3
Embed the ADMAs into the SoC type.
4
read from the page tables.
5
4
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Message-id: 20190128223118.5255-5-richard.henderson@linaro.org
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-7-edgar.iglesias@gmail.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
include/exec/memattrs.h | 10 ++++++++++
13
include/hw/arm/xlnx-versal.h | 3 ++-
12
1 file changed, 10 insertions(+)
14
hw/arm/xlnx-versal.c | 14 +++++++-------
15
2 files changed, 9 insertions(+), 8 deletions(-)
13
16
14
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memattrs.h
19
--- a/include/hw/arm/xlnx-versal.h
17
+++ b/include/exec/memattrs.h
20
+++ b/include/hw/arm/xlnx-versal.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs {
21
@@ -XXX,XX +XXX,XX @@
19
unsigned int user:1;
22
#include "hw/arm/boot.h"
20
/* Requester ID (for MSI for example) */
23
#include "hw/intc/arm_gicv3.h"
21
unsigned int requester_id:16;
24
#include "hw/char/pl011.h"
22
+ /*
25
+#include "hw/dma/xlnx-zdma.h"
23
+ * The following are target-specific page-table bits. These are not
26
#include "hw/net/cadence_gem.h"
24
+ * related to actual memory transactions at all. However, this structure
27
25
+ * is part of the tlb_fill interface, cached in the cputlb structure,
28
#define TYPE_XLNX_VERSAL "xlnx-versal"
26
+ * and has unused bits. These fields will be read by target-specific
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
27
+ * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
30
struct {
28
+ */
31
PL011State uart[XLNX_VERSAL_NR_UARTS];
29
+ unsigned int target_tlb_bit0 : 1;
32
CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
30
+ unsigned int target_tlb_bit1 : 1;
33
- SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
31
+ unsigned int target_tlb_bit2 : 1;
34
+ XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
32
} MemTxAttrs;
35
} iou;
33
36
} lpd;
34
/* Bus masters which don't specify any attributes will get this,
37
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
43
DeviceState *dev;
44
MemoryRegion *mr;
45
46
- dev = qdev_create(NULL, "xlnx.zdma");
47
- s->lpd.iou.adma[i] = SYS_BUS_DEVICE(dev);
48
- object_property_set_int(OBJECT(s->lpd.iou.adma[i]), 128, "bus-width",
49
- &error_abort);
50
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
51
+ sysbus_init_child_obj(OBJECT(s), name,
52
+ &s->lpd.iou.adma[i], sizeof(s->lpd.iou.adma[i]),
53
+ TYPE_XLNX_ZDMA);
54
+ dev = DEVICE(&s->lpd.iou.adma[i]);
55
+ object_property_set_int(OBJECT(dev), 128, "bus-width", &error_abort);
56
qdev_init_nofail(dev);
57
58
- mr = sysbus_mmio_get_region(s->lpd.iou.adma[i], 0);
59
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
60
memory_region_add_subregion(&s->mr_ps,
61
MM_ADMA_CH0 + i * MM_ADMA_CH0_SIZE, mr);
62
63
- sysbus_connect_irq(s->lpd.iou.adma[i], 0, pic[VERSAL_ADMA_IRQ_0 + i]);
64
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_ADMA_IRQ_0 + i]);
65
g_free(name);
66
}
67
}
35
--
68
--
36
2.20.1
69
2.20.1
37
70
38
71
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
This will allow TBI to be used in user-only mode, as well as
3
Embed the APUs into the SoC type.
4
avoid ping-ponging the softmmu TLB when TBI is in use. It
5
will also enable other armv8 extensions.
6
4
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Message-id: 20190204132126.3255-3-richard.henderson@linaro.org
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-8-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
target/arm/translate-a64.c | 217 ++++++++++++++++++++-----------------
13
include/hw/arm/xlnx-versal.h | 2 +-
13
1 file changed, 116 insertions(+), 101 deletions(-)
14
hw/arm/xlnx-versal-virt.c | 4 ++--
15
hw/arm/xlnx-versal.c | 19 +++++--------------
16
3 files changed, 8 insertions(+), 17 deletions(-)
14
17
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
20
--- a/include/hw/arm/xlnx-versal.h
18
+++ b/target/arm/translate-a64.c
21
+++ b/include/hw/arm/xlnx-versal.h
19
@@ -XXX,XX +XXX,XX @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
22
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
20
gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
23
struct {
21
}
24
struct {
22
25
MemoryRegion mr;
23
+/*
26
- ARMCPU *cpu[XLNX_VERSAL_NR_ACPUS];
24
+ * Return a "clean" address for ADDR according to TBID.
27
+ ARMCPU cpu[XLNX_VERSAL_NR_ACPUS];
25
+ * This is always a fresh temporary, as we need to be able to
28
GICv3State gic;
26
+ * increment this independently of a dirty write-back address.
29
} apu;
27
+ */
30
} fpd;
28
+static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
31
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
29
+{
32
index XXXXXXX..XXXXXXX 100644
30
+ TCGv_i64 clean = new_tmp_a64(s);
33
--- a/hw/arm/xlnx-versal-virt.c
31
+ gen_top_byte_ignore(s, clean, addr, s->tbid);
34
+++ b/hw/arm/xlnx-versal-virt.c
32
+ return clean;
35
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
33
+}
36
s->binfo.get_dtb = versal_virt_get_dtb;
34
+
37
s->binfo.modify_dtb = versal_virt_modify_dtb;
35
typedef struct DisasCompare64 {
38
if (machine->kernel_filename) {
36
TCGCond cond;
39
- arm_load_kernel(s->soc.fpd.apu.cpu[0], machine, &s->binfo);
37
TCGv_i64 value;
40
+ arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
38
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
39
TCGv_i64 tcg_rs = cpu_reg(s, rs);
40
TCGv_i64 tcg_rt = cpu_reg(s, rt);
41
int memidx = get_mem_index(s);
42
- TCGv_i64 addr = cpu_reg_sp(s, rn);
43
+ TCGv_i64 clean_addr;
44
45
if (rn == 31) {
46
gen_check_sp_alignment(s);
47
}
48
- tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
49
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
50
+ tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
51
size | MO_ALIGN | s->be_data);
52
}
53
54
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
55
TCGv_i64 s2 = cpu_reg(s, rs + 1);
56
TCGv_i64 t1 = cpu_reg(s, rt);
57
TCGv_i64 t2 = cpu_reg(s, rt + 1);
58
- TCGv_i64 addr = cpu_reg_sp(s, rn);
59
+ TCGv_i64 clean_addr;
60
int memidx = get_mem_index(s);
61
62
if (rn == 31) {
63
gen_check_sp_alignment(s);
64
}
65
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
66
67
if (size == 2) {
68
TCGv_i64 cmp = tcg_temp_new_i64();
69
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
70
tcg_gen_concat32_i64(cmp, s2, s1);
71
}
72
73
- tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
74
+ tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
75
MO_64 | MO_ALIGN | s->be_data);
76
tcg_temp_free_i64(val);
77
78
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
79
if (HAVE_CMPXCHG128) {
80
TCGv_i32 tcg_rs = tcg_const_i32(rs);
81
if (s->be_data == MO_LE) {
82
- gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
83
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs,
84
+ clean_addr, t1, t2);
85
} else {
86
- gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
87
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs,
88
+ clean_addr, t1, t2);
89
}
90
tcg_temp_free_i32(tcg_rs);
91
} else {
92
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
93
TCGv_i64 zero = tcg_const_i64(0);
94
95
/* Load the two words, in memory order. */
96
- tcg_gen_qemu_ld_i64(d1, addr, memidx,
97
+ tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
98
MO_64 | MO_ALIGN_16 | s->be_data);
99
- tcg_gen_addi_i64(a2, addr, 8);
100
- tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
101
+ tcg_gen_addi_i64(a2, clean_addr, 8);
102
+ tcg_gen_qemu_ld_i64(d2, clean_addr, memidx, MO_64 | s->be_data);
103
104
/* Compare the two words, also in memory order. */
105
tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
106
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
107
/* If compare equal, write back new data, else write back old data. */
108
tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
109
tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
110
- tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
111
+ tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
112
tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
113
tcg_temp_free_i64(a2);
114
tcg_temp_free_i64(c1);
115
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
116
int is_lasr = extract32(insn, 15, 1);
117
int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
118
int size = extract32(insn, 30, 2);
119
- TCGv_i64 tcg_addr;
120
+ TCGv_i64 clean_addr;
121
122
switch (o2_L_o1_o0) {
123
case 0x0: /* STXR */
124
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
125
if (is_lasr) {
126
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
127
}
128
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
129
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
130
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
131
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
132
return;
133
134
case 0x4: /* LDXR */
135
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
136
if (rn == 31) {
137
gen_check_sp_alignment(s);
138
}
139
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
140
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
141
s->is_ldex = true;
142
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
143
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
144
if (is_lasr) {
145
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
146
}
147
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
148
gen_check_sp_alignment(s);
149
}
150
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
151
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
152
- do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
153
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
154
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
155
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
156
return;
157
158
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
159
if (rn == 31) {
160
gen_check_sp_alignment(s);
161
}
162
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
163
- do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
164
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
165
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
166
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
167
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
168
return;
169
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
170
if (is_lasr) {
171
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
172
}
173
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
174
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
175
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
176
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
177
return;
178
}
179
if (rt2 == 31
180
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
181
if (rn == 31) {
182
gen_check_sp_alignment(s);
183
}
184
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
185
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
186
s->is_ldex = true;
187
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
188
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
189
if (is_lasr) {
190
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
191
}
192
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
193
int opc = extract32(insn, 30, 2);
194
bool is_signed = false;
195
int size = 2;
196
- TCGv_i64 tcg_rt, tcg_addr;
197
+ TCGv_i64 tcg_rt, clean_addr;
198
199
if (is_vector) {
200
if (opc == 3) {
201
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
202
203
tcg_rt = cpu_reg(s, rt);
204
205
- tcg_addr = tcg_const_i64((s->pc - 4) + imm);
206
+ clean_addr = tcg_const_i64((s->pc - 4) + imm);
207
if (is_vector) {
208
- do_fp_ld(s, rt, tcg_addr, size);
209
+ do_fp_ld(s, rt, clean_addr, size);
210
} else {
41
} else {
211
/* Only unsigned 32bit loads target 32bit registers. */
42
- AddressSpace *as = arm_boot_address_space(s->soc.fpd.apu.cpu[0],
212
bool iss_sf = opc != 0;
43
+ AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0],
213
44
&s->binfo);
214
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
45
/* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL).
215
+ do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
46
* Offset things by 4K. */
216
true, rt, iss_sf, false);
47
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
217
}
48
index XXXXXXX..XXXXXXX 100644
218
- tcg_temp_free_i64(tcg_addr);
49
--- a/hw/arm/xlnx-versal.c
219
+ tcg_temp_free_i64(clean_addr);
50
+++ b/hw/arm/xlnx-versal.c
220
}
51
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
221
52
222
/*
53
for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) {
223
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
54
Object *obj;
224
bool postindex = false;
55
- char *name;
225
bool wback = false;
226
227
- TCGv_i64 tcg_addr; /* calculated address */
228
+ TCGv_i64 clean_addr, dirty_addr;
229
+
230
int size;
231
232
if (opc == 3) {
233
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
234
gen_check_sp_alignment(s);
235
}
236
237
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
238
-
56
-
239
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
57
- obj = object_new(XLNX_VERSAL_ACPU_TYPE);
240
if (!postindex) {
58
- if (!obj) {
241
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
59
- error_report("Unable to create apu.cpu[%d] of type %s",
242
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
60
- i, XLNX_VERSAL_ACPU_TYPE);
243
}
61
- exit(EXIT_FAILURE);
244
+ clean_addr = clean_data_tbi(s, dirty_addr);
62
- }
245
63
-
246
if (is_vector) {
64
- name = g_strdup_printf("apu-cpu[%d]", i);
247
if (is_load) {
65
- object_property_add_child(OBJECT(s), name, obj, &error_fatal);
248
- do_fp_ld(s, rt, tcg_addr, size);
66
- g_free(name);
249
+ do_fp_ld(s, rt, clean_addr, size);
67
250
} else {
68
+ object_initialize_child(OBJECT(s), "apu-cpu[*]",
251
- do_fp_st(s, rt, tcg_addr, size);
69
+ &s->fpd.apu.cpu[i], sizeof(s->fpd.apu.cpu[i]),
252
+ do_fp_st(s, rt, clean_addr, size);
70
+ XLNX_VERSAL_ACPU_TYPE, &error_abort, NULL);
253
}
71
+ obj = OBJECT(&s->fpd.apu.cpu[i]);
254
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
72
object_property_set_int(obj, s->cfg.psci_conduit,
255
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
73
"psci-conduit", &error_abort);
256
if (is_load) {
74
if (i) {
257
- do_fp_ld(s, rt2, tcg_addr, size);
75
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
258
+ do_fp_ld(s, rt2, clean_addr, size);
76
object_property_set_link(obj, OBJECT(&s->fpd.apu.mr), "memory",
259
} else {
77
&error_abort);
260
- do_fp_st(s, rt2, tcg_addr, size);
78
object_property_set_bool(obj, true, "realized", &error_fatal);
261
+ do_fp_st(s, rt2, clean_addr, size);
79
- s->fpd.apu.cpu[i] = ARM_CPU(obj);
262
}
263
} else {
264
TCGv_i64 tcg_rt = cpu_reg(s, rt);
265
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
266
/* Do not modify tcg_rt before recognizing any exception
267
* from the second load.
268
*/
269
- do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
270
+ do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
271
false, 0, false, false);
272
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
273
- do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
274
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
275
+ do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
276
false, 0, false, false);
277
278
tcg_gen_mov_i64(tcg_rt, tmp);
279
tcg_temp_free_i64(tmp);
280
} else {
281
- do_gpr_st(s, tcg_rt, tcg_addr, size,
282
+ do_gpr_st(s, tcg_rt, clean_addr, size,
283
false, 0, false, false);
284
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
285
- do_gpr_st(s, tcg_rt2, tcg_addr, size,
286
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
287
+ do_gpr_st(s, tcg_rt2, clean_addr, size,
288
false, 0, false, false);
289
}
290
}
291
292
if (wback) {
293
if (postindex) {
294
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
295
- } else {
296
- tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
297
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
298
}
299
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
300
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
301
}
80
}
302
}
81
}
303
82
304
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
83
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_gic(Versal *s, qemu_irq *pic)
305
bool post_index;
306
bool writeback;
307
308
- TCGv_i64 tcg_addr;
309
+ TCGv_i64 clean_addr, dirty_addr;
310
311
if (is_vector) {
312
size |= (opc & 2) << 1;
313
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
314
if (rn == 31) {
315
gen_check_sp_alignment(s);
316
}
84
}
317
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
85
318
86
for (i = 0; i < nr_apu_cpus; i++) {
319
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
87
- DeviceState *cpudev = DEVICE(s->fpd.apu.cpu[i]);
320
if (!post_index) {
88
+ DeviceState *cpudev = DEVICE(&s->fpd.apu.cpu[i]);
321
- tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
89
int ppibase = XLNX_VERSAL_NR_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
322
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
90
qemu_irq maint_irq;
323
}
91
int ti;
324
+ clean_addr = clean_data_tbi(s, dirty_addr);
325
326
if (is_vector) {
327
if (is_store) {
328
- do_fp_st(s, rt, tcg_addr, size);
329
+ do_fp_st(s, rt, clean_addr, size);
330
} else {
331
- do_fp_ld(s, rt, tcg_addr, size);
332
+ do_fp_ld(s, rt, clean_addr, size);
333
}
334
} else {
335
TCGv_i64 tcg_rt = cpu_reg(s, rt);
336
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
337
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
338
339
if (is_store) {
340
- do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
341
+ do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
342
iss_valid, rt, iss_sf, false);
343
} else {
344
- do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
345
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
346
is_signed, is_extended, memidx,
347
iss_valid, rt, iss_sf, false);
348
}
349
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
350
if (writeback) {
351
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
352
if (post_index) {
353
- tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
354
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
355
}
356
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
357
+ tcg_gen_mov_i64(tcg_rn, dirty_addr);
358
}
359
}
360
361
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
362
bool is_store = false;
363
bool is_extended = false;
364
365
- TCGv_i64 tcg_rm;
366
- TCGv_i64 tcg_addr;
367
+ TCGv_i64 tcg_rm, clean_addr, dirty_addr;
368
369
if (extract32(opt, 1, 1) == 0) {
370
unallocated_encoding(s);
371
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
372
if (rn == 31) {
373
gen_check_sp_alignment(s);
374
}
375
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
376
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
377
378
tcg_rm = read_cpu_reg(s, rm, 1);
379
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
380
381
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
382
+ tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
383
+ clean_addr = clean_data_tbi(s, dirty_addr);
384
385
if (is_vector) {
386
if (is_store) {
387
- do_fp_st(s, rt, tcg_addr, size);
388
+ do_fp_st(s, rt, clean_addr, size);
389
} else {
390
- do_fp_ld(s, rt, tcg_addr, size);
391
+ do_fp_ld(s, rt, clean_addr, size);
392
}
393
} else {
394
TCGv_i64 tcg_rt = cpu_reg(s, rt);
395
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
396
if (is_store) {
397
- do_gpr_st(s, tcg_rt, tcg_addr, size,
398
+ do_gpr_st(s, tcg_rt, clean_addr, size,
399
true, rt, iss_sf, false);
400
} else {
401
- do_gpr_ld(s, tcg_rt, tcg_addr, size,
402
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
403
is_signed, is_extended,
404
true, rt, iss_sf, false);
405
}
406
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
407
unsigned int imm12 = extract32(insn, 10, 12);
408
unsigned int offset;
409
410
- TCGv_i64 tcg_addr;
411
+ TCGv_i64 clean_addr, dirty_addr;
412
413
bool is_store;
414
bool is_signed = false;
415
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
416
if (rn == 31) {
417
gen_check_sp_alignment(s);
418
}
419
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
420
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
421
offset = imm12 << size;
422
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
423
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
424
+ clean_addr = clean_data_tbi(s, dirty_addr);
425
426
if (is_vector) {
427
if (is_store) {
428
- do_fp_st(s, rt, tcg_addr, size);
429
+ do_fp_st(s, rt, clean_addr, size);
430
} else {
431
- do_fp_ld(s, rt, tcg_addr, size);
432
+ do_fp_ld(s, rt, clean_addr, size);
433
}
434
} else {
435
TCGv_i64 tcg_rt = cpu_reg(s, rt);
436
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
437
if (is_store) {
438
- do_gpr_st(s, tcg_rt, tcg_addr, size,
439
+ do_gpr_st(s, tcg_rt, clean_addr, size,
440
true, rt, iss_sf, false);
441
} else {
442
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
443
+ do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
444
true, rt, iss_sf, false);
445
}
446
}
447
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
448
int rs = extract32(insn, 16, 5);
449
int rn = extract32(insn, 5, 5);
450
int o3_opc = extract32(insn, 12, 4);
451
- TCGv_i64 tcg_rn, tcg_rs;
452
+ TCGv_i64 tcg_rs, clean_addr;
453
AtomicThreeOpFn *fn;
454
455
if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
456
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
457
if (rn == 31) {
458
gen_check_sp_alignment(s);
459
}
460
- tcg_rn = cpu_reg_sp(s, rn);
461
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
462
tcg_rs = read_cpu_reg(s, rs, true);
463
464
if (o3_opc == 1) { /* LDCLR */
465
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
466
/* The tcg atomic primitives are all full barriers. Therefore we
467
* can ignore the Acquire and Release bits of this instruction.
468
*/
469
- fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
470
+ fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
471
s->be_data | size | MO_ALIGN);
472
}
473
474
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
475
bool is_wback = extract32(insn, 11, 1);
476
bool use_key_a = !extract32(insn, 23, 1);
477
int offset;
478
- TCGv_i64 tcg_addr, tcg_rt;
479
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
480
481
if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
482
unallocated_encoding(s);
483
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
484
if (rn == 31) {
485
gen_check_sp_alignment(s);
486
}
487
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
488
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
489
490
if (s->pauth_active) {
491
if (use_key_a) {
492
- gen_helper_autda(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
493
+ gen_helper_autda(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
494
} else {
495
- gen_helper_autdb(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
496
+ gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
497
}
498
}
499
500
/* Form the 10-bit signed, scaled offset. */
501
offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
502
offset = sextract32(offset << size, 0, 10 + size);
503
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
504
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
505
+
506
+ /* Note that "clean" and "dirty" here refer to TBI not PAC. */
507
+ clean_addr = clean_data_tbi(s, dirty_addr);
508
509
tcg_rt = cpu_reg(s, rt);
510
-
511
- do_gpr_ld(s, tcg_rt, tcg_addr, size, /* is_signed */ false,
512
+ do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
513
/* extend */ false, /* iss_valid */ !is_wback,
514
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
515
516
if (is_wback) {
517
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
518
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
519
}
520
}
521
522
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
523
bool is_store = !extract32(insn, 22, 1);
524
bool is_postidx = extract32(insn, 23, 1);
525
bool is_q = extract32(insn, 30, 1);
526
- TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
527
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
528
TCGMemOp endian = s->be_data;
529
530
int ebytes; /* bytes per element */
531
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
532
elements = (is_q ? 16 : 8) / ebytes;
533
534
tcg_rn = cpu_reg_sp(s, rn);
535
- tcg_addr = tcg_temp_new_i64();
536
- tcg_gen_mov_i64(tcg_addr, tcg_rn);
537
+ clean_addr = clean_data_tbi(s, tcg_rn);
538
tcg_ebytes = tcg_const_i64(ebytes);
539
540
for (r = 0; r < rpt; r++) {
541
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
542
for (xs = 0; xs < selem; xs++) {
543
int tt = (rt + r + xs) % 32;
544
if (is_store) {
545
- do_vec_st(s, tt, e, tcg_addr, size, endian);
546
+ do_vec_st(s, tt, e, clean_addr, size, endian);
547
} else {
548
- do_vec_ld(s, tt, e, tcg_addr, size, endian);
549
+ do_vec_ld(s, tt, e, clean_addr, size, endian);
550
}
551
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
552
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
553
}
554
}
555
}
556
+ tcg_temp_free_i64(tcg_ebytes);
557
558
if (!is_store) {
559
/* For non-quad operations, setting a slice of the low
560
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
561
562
if (is_postidx) {
563
if (rm == 31) {
564
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
565
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
566
} else {
567
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
568
}
569
}
570
- tcg_temp_free_i64(tcg_ebytes);
571
- tcg_temp_free_i64(tcg_addr);
572
}
573
574
/* AdvSIMD load/store single structure
575
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
576
bool replicate = false;
577
int index = is_q << 3 | S << 2 | size;
578
int ebytes, xs;
579
- TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
580
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
581
582
if (extract32(insn, 31, 1)) {
583
unallocated_encoding(s);
584
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
585
}
586
587
tcg_rn = cpu_reg_sp(s, rn);
588
- tcg_addr = tcg_temp_new_i64();
589
- tcg_gen_mov_i64(tcg_addr, tcg_rn);
590
+ clean_addr = clean_data_tbi(s, tcg_rn);
591
tcg_ebytes = tcg_const_i64(ebytes);
592
593
for (xs = 0; xs < selem; xs++) {
594
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
595
/* Load and replicate to all elements */
596
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
597
598
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
599
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
600
get_mem_index(s), s->be_data + scale);
601
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
602
(is_q + 1) * 8, vec_full_reg_size(s),
603
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
604
} else {
605
/* Load/store one element per register */
606
if (is_load) {
607
- do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
608
+ do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
609
} else {
610
- do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
611
+ do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
612
}
613
}
614
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
615
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
616
rt = (rt + 1) % 32;
617
}
618
+ tcg_temp_free_i64(tcg_ebytes);
619
620
if (is_postidx) {
621
if (rm == 31) {
622
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
623
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
624
} else {
625
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
626
}
627
}
628
- tcg_temp_free_i64(tcg_ebytes);
629
- tcg_temp_free_i64(tcg_addr);
630
}
631
632
/* Loads and stores */
633
--
92
--
634
2.20.1
93
2.20.1
635
94
636
95
diff view generated by jsdifflib
1
Enables, but does not turn on, TBI for CONFIG_USER_ONLY.
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Add support for SD.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20190204132126.3255-4-richard.henderson@linaro.org
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
[PMM: adjusted #ifdeffery to placate clang, which otherwise complains
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
about static functions that are unused in the CONFIG_USER_ONLY build]
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-9-edgar.iglesias@gmail.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
11
---
10
target/arm/internals.h | 21 --------------------
12
include/hw/arm/xlnx-versal.h | 12 ++++++++++++
11
target/arm/helper.c | 45 ++++++++++++++++++++++--------------------
13
hw/arm/xlnx-versal.c | 31 +++++++++++++++++++++++++++++++
12
2 files changed, 24 insertions(+), 42 deletions(-)
14
2 files changed, 43 insertions(+)
13
15
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/internals.h
18
--- a/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/internals.h
19
+++ b/include/hw/arm/xlnx-versal.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
20
@@ -XXX,XX +XXX,XX @@
19
bool using64k : 1;
21
20
} ARMVAParameters;
22
#include "hw/sysbus.h"
21
23
#include "hw/arm/boot.h"
22
-#ifdef CONFIG_USER_ONLY
24
+#include "hw/sd/sdhci.h"
23
-static inline ARMVAParameters aa64_va_parameters_both(CPUARMState *env,
25
#include "hw/intc/arm_gicv3.h"
24
- uint64_t va,
26
#include "hw/char/pl011.h"
25
- ARMMMUIdx mmu_idx)
27
#include "hw/dma/xlnx-zdma.h"
26
-{
28
@@ -XXX,XX +XXX,XX @@
27
- return (ARMVAParameters) {
29
#define XLNX_VERSAL_NR_UARTS 2
28
- /* 48-bit address space */
30
#define XLNX_VERSAL_NR_GEMS 2
29
- .tsz = 16,
31
#define XLNX_VERSAL_NR_ADMAS 8
30
- /* We can't handle tagged addresses properly in user-only mode */
32
+#define XLNX_VERSAL_NR_SDS 2
31
- .tbi = false,
33
#define XLNX_VERSAL_NR_IRQS 192
32
- };
34
33
-}
35
typedef struct Versal {
34
-
36
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
35
-static inline ARMVAParameters aa64_va_parameters(CPUARMState *env,
37
} iou;
36
- uint64_t va,
38
} lpd;
37
- ARMMMUIdx mmu_idx, bool data)
39
38
-{
40
+ /* The Platform Management Controller subsystem. */
39
- return aa64_va_parameters_both(env, va, mmu_idx);
41
+ struct {
40
-}
42
+ struct {
41
-#else
43
+ SDHCIState sd[XLNX_VERSAL_NR_SDS];
42
ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
44
+ } iou;
43
ARMMMUIdx mmu_idx);
45
+ } pmc;
44
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
46
+
45
ARMMMUIdx mmu_idx, bool data);
47
struct {
46
-#endif
48
MemoryRegion *mr_ddr;
47
49
uint32_t psci_conduit;
50
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
51
#define VERSAL_GEM1_IRQ_0 58
52
#define VERSAL_GEM1_WAKE_IRQ_0 59
53
#define VERSAL_ADMA_IRQ_0 60
54
+#define VERSAL_SD0_IRQ_0 126
55
56
/* Architecturally reserved IRQs suitable for virtualization. */
57
#define VERSAL_RSVD_IRQ_FIRST 111
58
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
59
#define MM_FPD_CRF 0xfd1a0000U
60
#define MM_FPD_CRF_SIZE 0x140000
61
62
+#define MM_PMC_SD0 0xf1040000U
63
+#define MM_PMC_SD0_SIZE 0x10000
64
#define MM_PMC_CRP 0xf1260000U
65
#define MM_PMC_CRP_SIZE 0x10000
48
#endif
66
#endif
49
diff --git a/target/arm/helper.c b/target/arm/helper.c
67
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
50
index XXXXXXX..XXXXXXX 100644
68
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/helper.c
69
--- a/hw/arm/xlnx-versal.c
52
+++ b/target/arm/helper.c
70
+++ b/hw/arm/xlnx-versal.c
53
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rbit)(uint32_t x)
71
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
54
return revbit32(x);
55
}
56
57
-#if defined(CONFIG_USER_ONLY)
58
+#ifdef CONFIG_USER_ONLY
59
60
/* These should probably raise undefined insn exceptions. */
61
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
62
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
63
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
64
}
72
}
65
}
73
}
66
+#endif /* !CONFIG_USER_ONLY */
74
67
75
+#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */
68
/* Return the exception level which controls this address translation regime */
76
+static void versal_create_sds(Versal *s, qemu_irq *pic)
69
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
77
+{
70
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
78
+ int i;
71
}
72
}
73
74
+#ifndef CONFIG_USER_ONLY
75
+
79
+
76
/* Return the SCTLR value which controls this address translation regime */
80
+ for (i = 0; i < ARRAY_SIZE(s->pmc.iou.sd); i++) {
77
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
81
+ DeviceState *dev;
78
{
82
+ MemoryRegion *mr;
79
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_big_endian(CPUARMState *env,
83
+
80
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
84
+ sysbus_init_child_obj(OBJECT(s), "sd[*]",
81
}
85
+ &s->pmc.iou.sd[i], sizeof(s->pmc.iou.sd[i]),
82
86
+ TYPE_SYSBUS_SDHCI);
83
+/* Return the TTBR associated with this translation regime */
87
+ dev = DEVICE(&s->pmc.iou.sd[i]);
84
+static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
88
+
85
+ int ttbrn)
89
+ object_property_set_uint(OBJECT(dev),
86
+{
90
+ 3, "sd-spec-version", &error_fatal);
87
+ if (mmu_idx == ARMMMUIdx_S2NS) {
91
+ object_property_set_uint(OBJECT(dev), SDHCI_CAPABILITIES, "capareg",
88
+ return env->cp15.vttbr_el2;
92
+ &error_fatal);
89
+ }
93
+ object_property_set_uint(OBJECT(dev), UHS_I, "uhs", &error_fatal);
90
+ if (ttbrn == 0) {
94
+ qdev_init_nofail(dev);
91
+ return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
95
+
92
+ } else {
96
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
93
+ return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
97
+ memory_region_add_subregion(&s->mr_ps,
98
+ MM_PMC_SD0 + i * MM_PMC_SD0_SIZE, mr);
99
+
100
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
101
+ pic[VERSAL_SD0_IRQ_0 + i * 2]);
94
+ }
102
+ }
95
+}
103
+}
96
+
104
+
97
+#endif /* !CONFIG_USER_ONLY */
105
/* This takes the board allocated linear DDR memory and creates aliases
98
+
106
* for each split DDR range/aperture on the Versal address map.
99
/* Return the TCR controlling this translation regime */
107
*/
100
static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
108
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
101
{
109
versal_create_uarts(s, pic);
102
@@ -XXX,XX +XXX,XX @@ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
110
versal_create_gems(s, pic);
103
return mmu_idx;
111
versal_create_admas(s, pic);
104
}
112
+ versal_create_sds(s, pic);
105
113
versal_map_ddr(s);
106
-/* Return the TTBR associated with this translation regime */
114
versal_unimp(s);
107
-static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
115
108
- int ttbrn)
109
-{
110
- if (mmu_idx == ARMMMUIdx_S2NS) {
111
- return env->cp15.vttbr_el2;
112
- }
113
- if (ttbrn == 0) {
114
- return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
115
- } else {
116
- return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
117
- }
118
-}
119
-
120
/* Return true if the translation regime is using LPAE format page tables */
121
static inline bool regime_using_lpae_format(CPUARMState *env,
122
ARMMMUIdx mmu_idx)
123
@@ -XXX,XX +XXX,XX @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
124
return regime_using_lpae_format(env, mmu_idx);
125
}
126
127
+#ifndef CONFIG_USER_ONLY
128
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
129
{
130
switch (mmu_idx) {
131
@@ -XXX,XX +XXX,XX @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
132
133
return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
134
}
135
+#endif /* !CONFIG_USER_ONLY */
136
137
ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
138
ARMMMUIdx mmu_idx)
139
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
140
return ret;
141
}
142
143
+#ifndef CONFIG_USER_ONLY
144
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
145
ARMMMUIdx mmu_idx)
146
{
147
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
148
*pc = env->pc;
149
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
150
151
-#ifndef CONFIG_USER_ONLY
152
- /*
153
- * Get control bits for tagged addresses. Note that the
154
- * translator only uses this for instruction addresses.
155
- */
156
+ /* Get control bits for tagged addresses. */
157
{
158
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
159
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
160
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
161
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
162
flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
163
}
164
-#endif
165
166
if (cpu_isar_feature(aa64_sve, cpu)) {
167
int sve_el = sve_exception_el(env, current_el);
168
--
116
--
169
2.20.1
117
2.20.1
170
118
171
119
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
hw/arm: versal: Add support for the RTC.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20190128223118.5255-9-richard.henderson@linaro.org
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-10-edgar.iglesias@gmail.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
target/arm/translate-a64.c | 37 ++++++++++++++++++++++++++++++++++++-
12
include/hw/arm/xlnx-versal.h | 8 ++++++++
9
1 file changed, 36 insertions(+), 1 deletion(-)
13
hw/arm/xlnx-versal.c | 21 +++++++++++++++++++++
14
2 files changed, 29 insertions(+)
10
15
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
18
--- a/include/hw/arm/xlnx-versal.h
14
+++ b/target/arm/translate-a64.c
19
+++ b/include/hw/arm/xlnx-versal.h
15
@@ -XXX,XX +XXX,XX @@ static void reset_btype(DisasContext *s)
20
@@ -XXX,XX +XXX,XX @@
21
#include "hw/char/pl011.h"
22
#include "hw/dma/xlnx-zdma.h"
23
#include "hw/net/cadence_gem.h"
24
+#include "hw/rtc/xlnx-zynqmp-rtc.h"
25
26
#define TYPE_XLNX_VERSAL "xlnx-versal"
27
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
28
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
29
struct {
30
SDHCIState sd[XLNX_VERSAL_NR_SDS];
31
} iou;
32
+
33
+ XlnxZynqMPRTC rtc;
34
} pmc;
35
36
struct {
37
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
38
#define VERSAL_GEM1_IRQ_0 58
39
#define VERSAL_GEM1_WAKE_IRQ_0 59
40
#define VERSAL_ADMA_IRQ_0 60
41
+#define VERSAL_RTC_APB_ERR_IRQ 121
42
#define VERSAL_SD0_IRQ_0 126
43
+#define VERSAL_RTC_ALARM_IRQ 142
44
+#define VERSAL_RTC_SECONDS_IRQ 143
45
46
/* Architecturally reserved IRQs suitable for virtualization. */
47
#define VERSAL_RSVD_IRQ_FIRST 111
48
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
49
#define MM_PMC_SD0_SIZE 0x10000
50
#define MM_PMC_CRP 0xf1260000U
51
#define MM_PMC_CRP_SIZE 0x10000
52
+#define MM_PMC_RTC 0xf12a0000
53
+#define MM_PMC_RTC_SIZE 0x10000
54
#endif
55
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/hw/arm/xlnx-versal.c
58
+++ b/hw/arm/xlnx-versal.c
59
@@ -XXX,XX +XXX,XX @@ static void versal_create_sds(Versal *s, qemu_irq *pic)
16
}
60
}
17
}
61
}
18
62
19
+static void set_btype(DisasContext *s, int val)
63
+static void versal_create_rtc(Versal *s, qemu_irq *pic)
20
+{
64
+{
21
+ TCGv_i32 tcg_val;
65
+ SysBusDevice *sbd;
66
+ MemoryRegion *mr;
22
+
67
+
23
+ /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
68
+ sysbus_init_child_obj(OBJECT(s), "rtc", &s->pmc.rtc, sizeof(s->pmc.rtc),
24
+ tcg_debug_assert(val >= 1 && val <= 3);
69
+ TYPE_XLNX_ZYNQMP_RTC);
70
+ sbd = SYS_BUS_DEVICE(&s->pmc.rtc);
71
+ qdev_init_nofail(DEVICE(sbd));
25
+
72
+
26
+ tcg_val = tcg_const_i32(val);
73
+ mr = sysbus_mmio_get_region(sbd, 0);
27
+ tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
74
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr);
28
+ tcg_temp_free_i32(tcg_val);
75
+
29
+ s->btype = -1;
76
+ /*
77
+ * TODO: Connect the ALARM and SECONDS interrupts once our RTC model
78
+ * supports them.
79
+ */
80
+ sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]);
30
+}
81
+}
31
+
82
+
32
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
83
/* This takes the board allocated linear DDR memory and creates aliases
33
fprintf_function cpu_fprintf, int flags)
84
* for each split DDR range/aperture on the Versal address map.
34
{
85
*/
35
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
86
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
36
static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
87
versal_create_gems(s, pic);
37
{
88
versal_create_admas(s, pic);
38
unsigned int opc, op2, op3, rn, op4;
89
versal_create_sds(s, pic);
39
+ unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
90
+ versal_create_rtc(s, pic);
40
TCGv_i64 dst;
91
versal_map_ddr(s);
41
TCGv_i64 modifier;
92
versal_unimp(s);
42
43
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
44
case 0: /* BR */
45
case 1: /* BLR */
46
case 2: /* RET */
47
+ btype_mod = opc;
48
switch (op3) {
49
case 0:
50
/* BR, BLR, RET */
51
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
52
default:
53
goto do_unallocated;
54
}
55
-
56
gen_a64_set_pc(s, dst);
57
/* BLR also needs to load return address */
58
if (opc == 1) {
59
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
60
if ((op3 & ~1) != 2) {
61
goto do_unallocated;
62
}
63
+ btype_mod = opc & 1;
64
if (s->pauth_active) {
65
dst = new_tmp_a64(s);
66
modifier = cpu_reg_sp(s, op4);
67
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
68
return;
69
}
70
71
+ switch (btype_mod) {
72
+ case 0: /* BR */
73
+ if (dc_isar_feature(aa64_bti, s)) {
74
+ /* BR to {x16,x17} or !guard -> 1, else 3. */
75
+ set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
76
+ }
77
+ break;
78
+
79
+ case 1: /* BLR */
80
+ if (dc_isar_feature(aa64_bti, s)) {
81
+ /* BLR sets BTYPE to 2, regardless of source guarded page. */
82
+ set_btype(s, 2);
83
+ }
84
+ break;
85
+
86
+ default: /* RET or none of the above. */
87
+ /* BTYPE will be set to 0 by normal end-of-insn processing. */
88
+ break;
89
+ }
90
+
91
s->base.is_jmp = DISAS_JUMP;
92
}
93
93
94
--
94
--
95
2.20.1
95
2.20.1
96
96
97
97
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
The branch target exception for guarded pages has high priority,
3
Add support for SD.
4
and only 8 instructions are valid for that case. Perform this
5
check before doing any other decode.
6
4
7
Clear BTYPE after all insns that neither set BTYPE nor exit via
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
exception (DISAS_NORETURN).
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Not yet handled are insns that exit via DISAS_NORETURN for some
8
Message-id: 20200427181649.26851-11-edgar.iglesias@gmail.com
11
other reason, like direct branches.
12
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20190128223118.5255-7-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
10
---
18
target/arm/internals.h | 6 ++
11
hw/arm/xlnx-versal-virt.c | 46 +++++++++++++++++++++++++++++++++++++++
19
target/arm/translate.h | 9 ++-
12
1 file changed, 46 insertions(+)
20
target/arm/translate-a64.c | 139 +++++++++++++++++++++++++++++++++++++
21
3 files changed, 152 insertions(+), 2 deletions(-)
22
13
23
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
24
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/internals.h
16
--- a/hw/arm/xlnx-versal-virt.c
26
+++ b/target/arm/internals.h
17
+++ b/hw/arm/xlnx-versal-virt.c
27
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
18
@@ -XXX,XX +XXX,XX @@
28
EC_FPIDTRAP = 0x08,
19
#include "hw/arm/sysbus-fdt.h"
29
EC_PACTRAP = 0x09,
20
#include "hw/arm/fdt.h"
30
EC_CP14RRTTRAP = 0x0c,
21
#include "cpu.h"
31
+ EC_BTITRAP = 0x0d,
22
+#include "hw/qdev-properties.h"
32
EC_ILLEGALSTATE = 0x0e,
23
#include "hw/arm/xlnx-versal.h"
33
EC_AA32_SVC = 0x11,
24
34
EC_AA32_HVC = 0x12,
25
#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt")
35
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_pactrap(void)
26
@@ -XXX,XX +XXX,XX @@ static void fdt_add_zdma_nodes(VersalVirt *s)
36
return EC_PACTRAP << ARM_EL_EC_SHIFT;
27
}
37
}
28
}
38
29
39
+static inline uint32_t syn_btitrap(int btype)
30
+static void fdt_add_sd_nodes(VersalVirt *s)
40
+{
31
+{
41
+ return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
32
+ const char clocknames[] = "clk_xin\0clk_ahb";
42
+}
33
+ const char compat[] = "arasan,sdhci-8.9a";
34
+ int i;
43
+
35
+
44
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
36
+ for (i = ARRAY_SIZE(s->soc.pmc.iou.sd) - 1; i >= 0; i--) {
45
{
37
+ uint64_t addr = MM_PMC_SD0 + MM_PMC_SD0_SIZE * i;
46
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
38
+ char *name = g_strdup_printf("/sdhci@%" PRIx64, addr);
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
39
+
48
index XXXXXXX..XXXXXXX 100644
40
+ qemu_fdt_add_subnode(s->fdt, name);
49
--- a/target/arm/translate.h
41
+
50
+++ b/target/arm/translate.h
42
+ qemu_fdt_setprop_cells(s->fdt, name, "clocks",
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
43
+ s->phandle.clk_25Mhz, s->phandle.clk_25Mhz);
52
bool pauth_active;
44
+ qemu_fdt_setprop(s->fdt, name, "clock-names",
53
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
45
+ clocknames, sizeof(clocknames));
54
bool bt;
46
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
55
- /* A copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. */
47
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_SD0_IRQ_0 + i * 2,
56
- uint8_t btype;
48
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
57
+ /*
49
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
58
+ * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
50
+ 2, addr, 2, MM_PMC_SD0_SIZE);
59
+ * < 0, set by the current instruction.
51
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
60
+ */
52
+ g_free(name);
61
+ int8_t btype;
62
+ /* True if this page is guarded. */
63
+ bool guarded_page;
64
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
65
int c15_cpar;
66
/* TCG op of the current insn_start. */
67
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate-a64.c
70
+++ b/target/arm/translate-a64.c
71
@@ -XXX,XX +XXX,XX @@ static inline int get_a64_user_mem_index(DisasContext *s)
72
return arm_to_core_mmu_idx(useridx);
73
}
74
75
+static void reset_btype(DisasContext *s)
76
+{
77
+ if (s->btype != 0) {
78
+ TCGv_i32 zero = tcg_const_i32(0);
79
+ tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
80
+ tcg_temp_free_i32(zero);
81
+ s->btype = 0;
82
+ }
53
+ }
83
+}
54
+}
84
+
55
+
85
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
56
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
86
fprintf_function cpu_fprintf, int flags)
87
{
57
{
88
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
58
Error *err = NULL;
59
@@ -XXX,XX +XXX,XX @@ static void create_virtio_regions(VersalVirt *s)
89
}
60
}
90
}
61
}
91
62
92
+/**
63
+static void sd_plugin_card(SDHCIState *sd, DriveInfo *di)
93
+ * is_guarded_page:
94
+ * @env: The cpu environment
95
+ * @s: The DisasContext
96
+ *
97
+ * Return true if the page is guarded.
98
+ */
99
+static bool is_guarded_page(CPUARMState *env, DisasContext *s)
100
+{
64
+{
101
+#ifdef CONFIG_USER_ONLY
65
+ BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL;
102
+ return false; /* FIXME */
66
+ DeviceState *card;
103
+#else
104
+ uint64_t addr = s->base.pc_first;
105
+ int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
106
+ unsigned int index = tlb_index(env, mmu_idx, addr);
107
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
108
+
67
+
109
+ /*
68
+ card = qdev_create(qdev_get_child_bus(DEVICE(sd), "sd-bus"), TYPE_SD_CARD);
110
+ * We test this immediately after reading an insn, which means
69
+ object_property_add_child(OBJECT(sd), "card[*]", OBJECT(card),
111
+ * that any normal page must be in the TLB. The only exception
70
+ &error_fatal);
112
+ * would be for executing from flash or device memory, which
71
+ qdev_prop_set_drive(card, "drive", blk, &error_fatal);
113
+ * does not retain the TLB entry.
72
+ object_property_set_bool(OBJECT(card), true, "realized", &error_fatal);
114
+ *
115
+ * FIXME: Assume false for those, for now. We could use
116
+ * arm_cpu_get_phys_page_attrs_debug to re-read the page
117
+ * table entry even for that case.
118
+ */
119
+ return (tlb_hit(entry->addr_code, addr) &&
120
+ env->iotlb[mmu_idx][index].attrs.target_tlb_bit0);
121
+#endif
122
+}
73
+}
123
+
74
+
124
+/**
75
static void versal_virt_init(MachineState *machine)
125
+ * btype_destination_ok:
126
+ * @insn: The instruction at the branch destination
127
+ * @bt: SCTLR_ELx.BT
128
+ * @btype: PSTATE.BTYPE, and is non-zero
129
+ *
130
+ * On a guarded page, there are a limited number of insns
131
+ * that may be present at the branch target:
132
+ * - branch target identifiers,
133
+ * - paciasp, pacibsp,
134
+ * - BRK insn
135
+ * - HLT insn
136
+ * Anything else causes a Branch Target Exception.
137
+ *
138
+ * Return true if the branch is compatible, false to raise BTITRAP.
139
+ */
140
+static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
141
+{
142
+ if ((insn & 0xfffff01fu) == 0xd503201fu) {
143
+ /* HINT space */
144
+ switch (extract32(insn, 5, 7)) {
145
+ case 0b011001: /* PACIASP */
146
+ case 0b011011: /* PACIBSP */
147
+ /*
148
+ * If SCTLR_ELx.BT, then PACI*SP are not compatible
149
+ * with btype == 3. Otherwise all btype are ok.
150
+ */
151
+ return !bt || btype != 3;
152
+ case 0b100000: /* BTI */
153
+ /* Not compatible with any btype. */
154
+ return false;
155
+ case 0b100010: /* BTI c */
156
+ /* Not compatible with btype == 3 */
157
+ return btype != 3;
158
+ case 0b100100: /* BTI j */
159
+ /* Not compatible with btype == 2 */
160
+ return btype != 2;
161
+ case 0b100110: /* BTI jc */
162
+ /* Compatible with any btype. */
163
+ return true;
164
+ }
165
+ } else {
166
+ switch (insn & 0xffe0001fu) {
167
+ case 0xd4200000u: /* BRK */
168
+ case 0xd4400000u: /* HLT */
169
+ /* Give priority to the breakpoint exception. */
170
+ return true;
171
+ }
172
+ }
173
+ return false;
174
+}
175
+
176
/* C3.1 A64 instruction index by encoding */
177
static void disas_a64_insn(CPUARMState *env, DisasContext *s)
178
{
76
{
179
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
77
VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine);
180
78
int psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
181
s->fp_access_checked = false;
79
+ int i;
182
80
183
+ if (dc_isar_feature(aa64_bti, s)) {
81
/*
184
+ if (s->base.num_insns == 1) {
82
* If the user provides an Operating System to be loaded, we expect them
185
+ /*
83
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
186
+ * At the first insn of the TB, compute s->guarded_page.
84
fdt_add_gic_nodes(s);
187
+ * We delayed computing this until successfully reading
85
fdt_add_timer_nodes(s);
188
+ * the first insn of the TB, above. This (mostly) ensures
86
fdt_add_zdma_nodes(s);
189
+ * that the softmmu tlb entry has been populated, and the
87
+ fdt_add_sd_nodes(s);
190
+ * page table GP bit is available.
88
fdt_add_cpu_nodes(s, psci_conduit);
191
+ *
89
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
192
+ * Note that we need to compute this even if btype == 0,
90
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
193
+ * because this value is used for BR instructions later
91
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
194
+ * where ENV is not available.
92
memory_region_add_subregion_overlap(get_system_memory(),
195
+ */
93
0, &s->soc.fpd.apu.mr, 0);
196
+ s->guarded_page = is_guarded_page(env, s);
94
197
+
95
+ /* Plugin SD cards. */
198
+ /* First insn can have btype set to non-zero. */
96
+ for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) {
199
+ tcg_debug_assert(s->btype >= 0);
97
+ sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD));
200
+
201
+ /*
202
+ * Note that the Branch Target Exception has fairly high
203
+ * priority -- below debugging exceptions but above most
204
+ * everything else. This allows us to handle this now
205
+ * instead of waiting until the insn is otherwise decoded.
206
+ */
207
+ if (s->btype != 0
208
+ && s->guarded_page
209
+ && !btype_destination_ok(insn, s->bt, s->btype)) {
210
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
211
+ default_exception_el(s));
212
+ return;
213
+ }
214
+ } else {
215
+ /* Not the first insn: btype must be 0. */
216
+ tcg_debug_assert(s->btype == 0);
217
+ }
218
+ }
98
+ }
219
+
99
+
220
switch (extract32(insn, 25, 4)) {
100
s->binfo.ram_size = machine->ram_size;
221
case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
101
s->binfo.loader_start = 0x0;
222
unallocated_encoding(s);
102
s->binfo.get_dtb = versal_virt_get_dtb;
223
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
224
225
/* if we allocated any temporaries, free them here */
226
free_tmp_a64(s);
227
+
228
+ /*
229
+ * After execution of most insns, btype is reset to 0.
230
+ * Note that we set btype == -1 when the insn sets btype.
231
+ */
232
+ if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
233
+ reset_btype(s);
234
+ }
235
}
236
237
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
238
--
103
--
239
2.20.1
104
2.20.1
240
105
241
106
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
2
3
This is all of the non-exception cases of DISAS_NORETURN.
3
Add support for the RTC.
4
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-id: 20190128223118.5255-8-richard.henderson@linaro.org
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
8
Message-id: 20200427181649.26851-12-edgar.iglesias@gmail.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/translate-a64.c | 6 ++++++
11
hw/arm/xlnx-versal-virt.c | 22 ++++++++++++++++++++++
11
1 file changed, 6 insertions(+)
12
1 file changed, 22 insertions(+)
12
13
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
16
--- a/hw/arm/xlnx-versal-virt.c
16
+++ b/target/arm/translate-a64.c
17
+++ b/hw/arm/xlnx-versal-virt.c
17
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@ static void fdt_add_sd_nodes(VersalVirt *s)
18
}
19
}
19
20
/* B Branch / BL Branch with link */
21
+ reset_btype(s);
22
gen_goto_tb(s, 0, addr);
23
}
20
}
24
21
25
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
22
+static void fdt_add_rtc_node(VersalVirt *s)
26
tcg_cmp = read_cpu_reg(s, rt, sf);
23
+{
27
label_match = gen_new_label();
24
+ const char compat[] = "xlnx,zynqmp-rtc";
28
25
+ const char interrupt_names[] = "alarm\0sec";
29
+ reset_btype(s);
26
+ char *name = g_strdup_printf("/rtc@%x", MM_PMC_RTC);
30
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
31
tcg_cmp, 0, label_match);
32
33
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
34
tcg_cmp = tcg_temp_new_i64();
35
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
36
label_match = gen_new_label();
37
+
27
+
38
+ reset_btype(s);
28
+ qemu_fdt_add_subnode(s->fdt, name);
39
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
29
+
40
tcg_cmp, 0, label_match);
30
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
41
tcg_temp_free_i64(tcg_cmp);
31
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_ALARM_IRQ,
42
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
32
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI,
43
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
33
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_SECONDS_IRQ,
44
cond = extract32(insn, 0, 4);
34
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
45
35
+ qemu_fdt_setprop(s->fdt, name, "interrupt-names",
46
+ reset_btype(s);
36
+ interrupt_names, sizeof(interrupt_names));
47
if (cond < 0x0e) {
37
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
48
/* genuinely conditional branches */
38
+ 2, MM_PMC_RTC, 2, MM_PMC_RTC_SIZE);
49
TCGLabel *label_match = gen_new_label();
39
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
50
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
40
+ g_free(name);
51
* a self-modified code correctly and also to take
41
+}
52
* any pending interrupts immediately.
42
+
53
*/
43
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
54
+ reset_btype(s);
44
{
55
gen_goto_tb(s, 0, s->pc);
45
Error *err = NULL;
56
return;
46
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
57
default:
47
fdt_add_timer_nodes(s);
48
fdt_add_zdma_nodes(s);
49
fdt_add_sd_nodes(s);
50
+ fdt_add_rtc_node(s);
51
fdt_add_cpu_nodes(s, psci_conduit);
52
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
53
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
58
--
54
--
59
2.20.1
55
2.20.1
60
56
61
57
diff view generated by jsdifflib
1
The arm_boot_info struct has a skip_dtb_autoload flag: if this is
1
Somewhere along theline we accidentally added a duplicate
2
set to true by the board code then arm_load_kernel() will not
2
"using D16-D31 when they don't exist" check to do_vfm_dp()
3
load the DTB itself, but will leave this for the board code to
3
(probably an artifact of a patchseries rebase). Remove it.
4
do itself later. However, the check for this is done in a
5
code path which is only executed for the case where we load
6
a kernel image file. If we're taking the "boot via firmware"
7
code path then the flag isn't honoured and the DTB is never
8
loaded.
9
10
We didn't notice this because the only real user of "boot
11
via firmware" that cares about the DTB is the virt board
12
(for UEFI boot), and that always wants skip_dtb_autoload
13
anyway. But the SBSA reference board model we're planning to
14
add will want the flag to behave correctly.
15
16
Now we've refactored the arm_load_kernel() function, the
17
fix is simple: drop the early 'return' so we fall into
18
the same "load the DTB" code the boot-direct-kernel path uses.
19
4
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
23
Message-id: 20190131112240.8395-6-peter.maydell@linaro.org
8
Message-id: 20200430181003.21682-2-peter.maydell@linaro.org
24
---
9
---
25
hw/arm/boot.c | 1 -
10
target/arm/translate-vfp.inc.c | 6 ------
26
1 file changed, 1 deletion(-)
11
1 file changed, 6 deletions(-)
27
12
28
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
13
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
29
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/boot.c
15
--- a/target/arm/translate-vfp.inc.c
31
+++ b/hw/arm/boot.c
16
+++ b/target/arm/translate-vfp.inc.c
32
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
17
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
33
/* Load the kernel. */
18
return false;
34
if (!info->kernel_filename || info->firmware_loaded) {
19
}
35
arm_setup_firmware_boot(cpu, info);
20
36
- return;
21
- /* UNDEF accesses to D16-D31 if they don't exist. */
37
} else {
22
- if (!dc_isar_feature(aa32_simd_r32, s) &&
38
arm_setup_direct_kernel_boot(cpu, info);
23
- ((a->vd | a->vn | a->vm) & 0x10)) {
24
- return false;
25
- }
26
-
27
if (!vfp_access_check(s)) {
28
return true;
39
}
29
}
40
--
30
--
41
2.20.1
31
2.20.1
42
32
43
33
diff view generated by jsdifflib
New patch
1
We were accidentally permitting decode of Thumb Neon insns even if
2
the CPU didn't have the FEATURE_NEON bit set, because the feature
3
check was being done before the call to disas_neon_data_insn() and
4
disas_neon_ls_insn() in the Arm decoder but was omitted from the
5
Thumb decoder. Push the feature bit check down into the called
6
functions so it is done for both Arm and Thumb encodings.
1
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20200430181003.21682-3-peter.maydell@linaro.org
12
---
13
target/arm/translate.c | 16 ++++++++--------
14
1 file changed, 8 insertions(+), 8 deletions(-)
15
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
19
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
21
TCGv_i32 tmp2;
22
TCGv_i64 tmp64;
23
24
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
25
+ return 1;
26
+ }
27
+
28
/* FIXME: this access check should not take precedence over UNDEF
29
* for invalid encodings; we will generate incorrect syndrome information
30
* for attempts to execute invalid vfp/neon encodings with FP disabled.
31
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
32
TCGv_ptr ptr1, ptr2, ptr3;
33
TCGv_i64 tmp64;
34
35
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
36
+ return 1;
37
+ }
38
+
39
/* FIXME: this access check should not take precedence over UNDEF
40
* for invalid encodings; we will generate incorrect syndrome information
41
* for attempts to execute invalid vfp/neon encodings with FP disabled.
42
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
43
44
if (((insn >> 25) & 7) == 1) {
45
/* NEON Data processing. */
46
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
47
- goto illegal_op;
48
- }
49
-
50
if (disas_neon_data_insn(s, insn)) {
51
goto illegal_op;
52
}
53
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
54
}
55
if ((insn & 0x0f100000) == 0x04000000) {
56
/* NEON load/store. */
57
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
58
- goto illegal_op;
59
- }
60
-
61
if (disas_neon_ls_insn(s, insn)) {
62
goto illegal_op;
63
}
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
1
Fix the block comment style in arm_load_kernel() to QEMU's
1
Add the infrastructure for building and invoking a decodetree decoder
2
current style preferences. This will allow us to do some
2
for the AArch32 Neon encodings. At the moment the new decoder covers
3
refactoring of this function without checkpatch complaining
3
nothing, so we always fall back to the existing hand-written decode.
4
about the code-motion patches.
4
5
We follow the same pattern we did for the VFP decodetree conversion
6
(commit 78e138bc1f672c145ef6ace74617d and following): code that deals
7
with Neon will be moving gradually out to translate-neon.vfp.inc,
8
which we #include into translate.c.
9
10
In order to share the decode files between A32 and T32, we
11
split Neon into 3 parts:
12
* data-processing
13
* load-store
14
* 'shared' encodings
15
16
The first two groups of instructions have similar but not identical
17
A32 and T32 encodings, so we need to manually transform the T32
18
encoding into the A32 one before calling the decoder; the third group
19
covers the Neon instructions which are identical in A32 and T32.
5
20
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
23
Message-id: 20200430181003.21682-4-peter.maydell@linaro.org
9
Message-id: 20190131112240.8395-2-peter.maydell@linaro.org
10
---
24
---
11
hw/arm/boot.c | 30 ++++++++++++++++++++----------
25
target/arm/neon-dp.decode | 29 ++++++++++++++++++++++++++
12
1 file changed, 20 insertions(+), 10 deletions(-)
26
target/arm/neon-ls.decode | 29 ++++++++++++++++++++++++++
13
27
target/arm/neon-shared.decode | 27 +++++++++++++++++++++++++
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
28
target/arm/translate-neon.inc.c | 32 +++++++++++++++++++++++++++++
29
target/arm/translate.c | 36 +++++++++++++++++++++++++++++++--
30
target/arm/Makefile.objs | 18 +++++++++++++++++
31
6 files changed, 169 insertions(+), 2 deletions(-)
32
create mode 100644 target/arm/neon-dp.decode
33
create mode 100644 target/arm/neon-ls.decode
34
create mode 100644 target/arm/neon-shared.decode
35
create mode 100644 target/arm/translate-neon.inc.c
36
37
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
38
new file mode 100644
39
index XXXXXXX..XXXXXXX
40
--- /dev/null
41
+++ b/target/arm/neon-dp.decode
42
@@ -XXX,XX +XXX,XX @@
43
+# AArch32 Neon data-processing instruction descriptions
44
+#
45
+# Copyright (c) 2020 Linaro, Ltd
46
+#
47
+# This library is free software; you can redistribute it and/or
48
+# modify it under the terms of the GNU Lesser General Public
49
+# License as published by the Free Software Foundation; either
50
+# version 2 of the License, or (at your option) any later version.
51
+#
52
+# This library is distributed in the hope that it will be useful,
53
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
54
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
55
+# Lesser General Public License for more details.
56
+#
57
+# You should have received a copy of the GNU Lesser General Public
58
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
59
+
60
+#
61
+# This file is processed by scripts/decodetree.py
62
+#
63
+
64
+# Encodings for Neon data processing instructions where the T32 encoding
65
+# is a simple transformation of the A32 encoding.
66
+# More specifically, this file covers instructions where the A32 encoding is
67
+# 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
68
+# and the T32 encoding is
69
+# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
70
+# This file works on the A32 encoding only; calling code for T32 has to
71
+# transform the insn into the A32 version first.
72
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
73
new file mode 100644
74
index XXXXXXX..XXXXXXX
75
--- /dev/null
76
+++ b/target/arm/neon-ls.decode
77
@@ -XXX,XX +XXX,XX @@
78
+# AArch32 Neon load/store instruction descriptions
79
+#
80
+# Copyright (c) 2020 Linaro, Ltd
81
+#
82
+# This library is free software; you can redistribute it and/or
83
+# modify it under the terms of the GNU Lesser General Public
84
+# License as published by the Free Software Foundation; either
85
+# version 2 of the License, or (at your option) any later version.
86
+#
87
+# This library is distributed in the hope that it will be useful,
88
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
89
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
90
+# Lesser General Public License for more details.
91
+#
92
+# You should have received a copy of the GNU Lesser General Public
93
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
94
+
95
+#
96
+# This file is processed by scripts/decodetree.py
97
+#
98
+
99
+# Encodings for Neon load/store instructions where the T32 encoding
100
+# is a simple transformation of the A32 encoding.
101
+# More specifically, this file covers instructions where the A32 encoding is
102
+# 0b1111_0100_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
103
+# and the T32 encoding is
104
+# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
105
+# This file works on the A32 encoding only; calling code for T32 has to
106
+# transform the insn into the A32 version first.
107
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
108
new file mode 100644
109
index XXXXXXX..XXXXXXX
110
--- /dev/null
111
+++ b/target/arm/neon-shared.decode
112
@@ -XXX,XX +XXX,XX @@
113
+# AArch32 Neon instruction descriptions
114
+#
115
+# Copyright (c) 2020 Linaro, Ltd
116
+#
117
+# This library is free software; you can redistribute it and/or
118
+# modify it under the terms of the GNU Lesser General Public
119
+# License as published by the Free Software Foundation; either
120
+# version 2 of the License, or (at your option) any later version.
121
+#
122
+# This library is distributed in the hope that it will be useful,
123
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
124
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
125
+# Lesser General Public License for more details.
126
+#
127
+# You should have received a copy of the GNU Lesser General Public
128
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
129
+
130
+#
131
+# This file is processed by scripts/decodetree.py
132
+#
133
+
134
+# Encodings for Neon instructions whose encoding is the same for
135
+# both A32 and T32.
136
+
137
+# More specifically, this covers:
138
+# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
139
+# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
140
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
141
new file mode 100644
142
index XXXXXXX..XXXXXXX
143
--- /dev/null
144
+++ b/target/arm/translate-neon.inc.c
145
@@ -XXX,XX +XXX,XX @@
146
+/*
147
+ * ARM translation: AArch32 Neon instructions
148
+ *
149
+ * Copyright (c) 2003 Fabrice Bellard
150
+ * Copyright (c) 2005-2007 CodeSourcery
151
+ * Copyright (c) 2007 OpenedHand, Ltd.
152
+ * Copyright (c) 2020 Linaro, Ltd.
153
+ *
154
+ * This library is free software; you can redistribute it and/or
155
+ * modify it under the terms of the GNU Lesser General Public
156
+ * License as published by the Free Software Foundation; either
157
+ * version 2 of the License, or (at your option) any later version.
158
+ *
159
+ * This library is distributed in the hope that it will be useful,
160
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
161
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
162
+ * Lesser General Public License for more details.
163
+ *
164
+ * You should have received a copy of the GNU Lesser General Public
165
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
166
+ */
167
+
168
+/*
169
+ * This file is intended to be included from translate.c; it uses
170
+ * some macros and definitions provided by that file.
171
+ * It might be possible to convert it to a standalone .c file eventually.
172
+ */
173
+
174
+/* Include the generated Neon decoder */
175
+#include "decode-neon-dp.inc.c"
176
+#include "decode-neon-ls.inc.c"
177
+#include "decode-neon-shared.inc.c"
178
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
179
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/boot.c
180
--- a/target/arm/translate.c
17
+++ b/hw/arm/boot.c
181
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
182
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
19
static const ARMInsnFixup *primary_loader;
183
20
AddressSpace *as = arm_boot_address_space(cpu, info);
184
#define ARM_CP_RW_BIT (1 << 20)
21
185
22
- /* CPU objects (unlike devices) are not automatically reset on system
186
-/* Include the VFP decoder */
23
+ /*
187
+/* Include the VFP and Neon decoders */
24
+ * CPU objects (unlike devices) are not automatically reset on system
188
#include "translate-vfp.inc.c"
25
* reset, so we must always register a handler to do so. If we're
189
+#include "translate-neon.inc.c"
26
* actually loading a kernel, the handler is also responsible for
190
27
* arranging that we start it correctly.
191
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
28
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
192
{
29
qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
193
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
194
/* Unconditional instructions. */
195
/* TODO: Perhaps merge these into one decodetree output file. */
196
if (disas_a32_uncond(s, insn) ||
197
- disas_vfp_uncond(s, insn)) {
198
+ disas_vfp_uncond(s, insn) ||
199
+ disas_neon_dp(s, insn) ||
200
+ disas_neon_ls(s, insn) ||
201
+ disas_neon_shared(s, insn)) {
202
return;
203
}
204
/* fall back to legacy decoder */
205
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
206
ARCH(6T2);
30
}
207
}
31
208
32
- /* The board code is not supposed to set secure_board_setup unless
209
+ if ((insn & 0xef000000) == 0xef000000) {
33
+ /*
210
+ /*
34
+ * The board code is not supposed to set secure_board_setup unless
211
+ * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
35
* running its code in secure mode is actually possible, and KVM
212
+ * transform into
36
* doesn't support secure.
213
+ * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
214
+ */
215
+ uint32_t a32_insn = (insn & 0xe2ffffff) |
216
+ ((insn & (1 << 28)) >> 4) | (1 << 28);
217
+
218
+ if (disas_neon_dp(s, a32_insn)) {
219
+ return;
220
+ }
221
+ }
222
+
223
+ if ((insn & 0xff100000) == 0xf9000000) {
224
+ /*
225
+ * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
226
+ * transform into
227
+ * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
228
+ */
229
+ uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
230
+
231
+ if (disas_neon_ls(s, a32_insn)) {
232
+ return;
233
+ }
234
+ }
235
+
236
/*
237
* TODO: Perhaps merge these into one decodetree output file.
238
* Note disas_vfp is written for a32 with cond field in the
239
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
37
*/
240
*/
38
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
241
if (disas_t32(s, insn) ||
39
if (!info->kernel_filename || info->firmware_loaded) {
242
disas_vfp_uncond(s, insn) ||
40
243
+ disas_neon_shared(s, insn) ||
41
if (have_dtb(info)) {
244
((insn >> 28) == 0xe && disas_vfp(s, insn))) {
42
- /* If we have a device tree blob, but no kernel to supply it to (or
43
+ /*
44
+ * If we have a device tree blob, but no kernel to supply it to (or
45
* the kernel is supposed to be loaded by the bootloader), copy the
46
* DTB to the base of RAM for the bootloader to pick up.
47
*/
48
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
49
try_decompressing_kernel = arm_feature(&cpu->env,
50
ARM_FEATURE_AARCH64);
51
52
- /* Expose the kernel, the command line, and the initrd in fw_cfg.
53
+ /*
54
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
55
* We don't process them here at all, it's all left to the
56
* firmware.
57
*/
58
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
59
}
60
}
61
62
- /* We will start from address 0 (typically a boot ROM image) in the
63
+ /*
64
+ * We will start from address 0 (typically a boot ROM image) in the
65
* same way as hardware.
66
*/
67
return;
245
return;
68
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
246
}
69
if (info->nb_cpus == 0)
247
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
70
info->nb_cpus = 1;
248
index XXXXXXX..XXXXXXX 100644
71
249
--- a/target/arm/Makefile.objs
72
- /* We want to put the initrd far enough into RAM that when the
250
+++ b/target/arm/Makefile.objs
73
+ /*
251
@@ -XXX,XX +XXX,XX @@ target/arm/decode-sve.inc.c: $(SRC_PATH)/target/arm/sve.decode $(DECODETREE)
74
+ * We want to put the initrd far enough into RAM that when the
252
     $(PYTHON) $(DECODETREE) --decode disas_sve -o $@ $<,\
75
* kernel is uncompressed it will not clobber the initrd. However
253
     "GEN", $(TARGET_DIR)$@)
76
* on boards without much RAM we must ensure that we still leave
254
77
* enough room for a decent sized initrd, and on boards with large
255
+target/arm/decode-neon-shared.inc.c: $(SRC_PATH)/target/arm/neon-shared.decode $(DECODETREE)
78
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
256
+    $(call quiet-command,\
79
kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr,
257
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_shared -o $@ $<,\
80
&elf_high_addr, elf_machine, as);
258
+     "GEN", $(TARGET_DIR)$@)
81
if (kernel_size > 0 && have_dtb(info)) {
259
+
82
- /* If there is still some room left at the base of RAM, try and put
260
+target/arm/decode-neon-dp.inc.c: $(SRC_PATH)/target/arm/neon-dp.decode $(DECODETREE)
83
+ /*
261
+    $(call quiet-command,\
84
+ * If there is still some room left at the base of RAM, try and put
262
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_dp -o $@ $<,\
85
* the DTB there like we do for images loaded with -bios or -pflash.
263
+     "GEN", $(TARGET_DIR)$@)
86
*/
264
+
87
if (elf_low_addr > info->loader_start
265
+target/arm/decode-neon-ls.inc.c: $(SRC_PATH)/target/arm/neon-ls.decode $(DECODETREE)
88
|| elf_high_addr < info->loader_start) {
266
+    $(call quiet-command,\
89
- /* Set elf_low_addr as address limit for arm_load_dtb if it may be
267
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_ls -o $@ $<,\
90
+ /*
268
+     "GEN", $(TARGET_DIR)$@)
91
+ * Set elf_low_addr as address limit for arm_load_dtb if it may be
269
+
92
* pointing into RAM, otherwise pass '0' (no limit)
270
target/arm/decode-vfp.inc.c: $(SRC_PATH)/target/arm/vfp.decode $(DECODETREE)
93
*/
271
    $(call quiet-command,\
94
if (elf_low_addr < info->loader_start) {
272
     $(PYTHON) $(DECODETREE) --static-decode disas_vfp -o $@ $<,\
95
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
273
@@ -XXX,XX +XXX,XX @@ target/arm/decode-t16.inc.c: $(SRC_PATH)/target/arm/t16.decode $(DECODETREE)
96
fixupcontext[FIXUP_BOARDID] = info->board_id;
274
     "GEN", $(TARGET_DIR)$@)
97
fixupcontext[FIXUP_BOARD_SETUP] = info->board_setup_addr;
275
98
276
target/arm/translate-sve.o: target/arm/decode-sve.inc.c
99
- /* for device tree boot, we pass the DTB directly in r2. Otherwise
277
+target/arm/translate.o: target/arm/decode-neon-shared.inc.c
100
+ /*
278
+target/arm/translate.o: target/arm/decode-neon-dp.inc.c
101
+ * for device tree boot, we pass the DTB directly in r2. Otherwise
279
+target/arm/translate.o: target/arm/decode-neon-ls.inc.c
102
* we point to the kernel args.
280
target/arm/translate.o: target/arm/decode-vfp.inc.c
103
*/
281
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
104
if (have_dtb(info)) {
282
target/arm/translate.o: target/arm/decode-a32.inc.c
105
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
106
info->write_board_setup(cpu, info);
107
}
108
109
- /* Notify devices which need to fake up firmware initialization
110
+ /*
111
+ * Notify devices which need to fake up firmware initialization
112
* that we're doing a direct kernel boot.
113
*/
114
object_child_foreach_recursive(object_get_root(),
115
--
283
--
116
2.20.1
284
2.20.1
117
285
118
286
diff view generated by jsdifflib
New patch
1
Convert the VCMLA (vector) insns in the 3same extension group to
2
decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-5-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 11 ++++++++++
9
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 11 +---------
11
3 files changed, 49 insertions(+), 10 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@
18
# More specifically, this covers:
19
# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
20
# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
21
+
22
+# VFP/Neon register fields; same as vfp.decode
23
+%vm_dp 5:1 0:4
24
+%vm_sp 0:4 5:1
25
+%vn_dp 7:1 16:4
26
+%vn_sp 16:4 7:1
27
+%vd_dp 22:1 12:4
28
+%vd_sp 12:4 22:1
29
+
30
+VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
31
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
32
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-neon.inc.c
35
+++ b/target/arm/translate-neon.inc.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "decode-neon-dp.inc.c"
38
#include "decode-neon-ls.inc.c"
39
#include "decode-neon-shared.inc.c"
40
+
41
+static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
42
+{
43
+ int opr_sz;
44
+ TCGv_ptr fpst;
45
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
46
+
47
+ if (!dc_isar_feature(aa32_vcma, s)
48
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
49
+ return false;
50
+ }
51
+
52
+ /* UNDEF accesses to D16-D31 if they don't exist. */
53
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
54
+ ((a->vd | a->vn | a->vm) & 0x10)) {
55
+ return false;
56
+ }
57
+
58
+ if ((a->vn | a->vm | a->vd) & a->q) {
59
+ return false;
60
+ }
61
+
62
+ if (!vfp_access_check(s)) {
63
+ return true;
64
+ }
65
+
66
+ opr_sz = (1 + a->q) * 8;
67
+ fpst = get_fpstatus_ptr(1);
68
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
69
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
70
+ vfp_reg_offset(1, a->vn),
71
+ vfp_reg_offset(1, a->vm),
72
+ fpst, opr_sz, opr_sz, a->rot,
73
+ fn_gvec_ptr);
74
+ tcg_temp_free_ptr(fpst);
75
+ return true;
76
+}
77
diff --git a/target/arm/translate.c b/target/arm/translate.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate.c
80
+++ b/target/arm/translate.c
81
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
82
bool is_long = false, q = extract32(insn, 6, 1);
83
bool ptr_is_env = false;
84
85
- if ((insn & 0xfe200f10) == 0xfc200800) {
86
- /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
87
- int size = extract32(insn, 20, 1);
88
- data = extract32(insn, 23, 2); /* rot */
89
- if (!dc_isar_feature(aa32_vcma, s)
90
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
91
- return 1;
92
- }
93
- fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
94
- } else if ((insn & 0xfea00f10) == 0xfc800800) {
95
+ if ((insn & 0xfea00f10) == 0xfc800800) {
96
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
97
int size = extract32(insn, 20, 1);
98
data = extract32(insn, 24, 1); /* rot */
99
--
100
2.20.1
101
102
diff view generated by jsdifflib
New patch
1
Convert the VCADD (vector) insns to decodetree.
1
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-6-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 3 +++
8
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 11 +---------
10
3 files changed, 41 insertions(+), 10 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@
17
18
VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
22
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
23
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/translate-neon.inc.c
26
+++ b/target/arm/translate-neon.inc.c
27
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
28
tcg_temp_free_ptr(fpst);
29
return true;
30
}
31
+
32
+static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
33
+{
34
+ int opr_sz;
35
+ TCGv_ptr fpst;
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+
38
+ if (!dc_isar_feature(aa32_vcma, s)
39
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn | a->vm) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vn | a->vm | a->vd) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ opr_sz = (1 + a->q) * 8;
58
+ fpst = get_fpstatus_ptr(1);
59
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
60
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->vm),
63
+ fpst, opr_sz, opr_sz, a->rot,
64
+ fn_gvec_ptr);
65
+ tcg_temp_free_ptr(fpst);
66
+ return true;
67
+}
68
diff --git a/target/arm/translate.c b/target/arm/translate.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/translate.c
71
+++ b/target/arm/translate.c
72
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
73
bool is_long = false, q = extract32(insn, 6, 1);
74
bool ptr_is_env = false;
75
76
- if ((insn & 0xfea00f10) == 0xfc800800) {
77
- /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
78
- int size = extract32(insn, 20, 1);
79
- data = extract32(insn, 24, 1); /* rot */
80
- if (!dc_isar_feature(aa32_vcma, s)
81
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
82
- return 1;
83
- }
84
- fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
85
- } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
86
+ if ((insn & 0xfeb00f00) == 0xfc200d00) {
87
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
88
bool u = extract32(insn, 4, 1);
89
if (!dc_isar_feature(aa32_dp, s)) {
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
New patch
1
Convert the V[US]DOT (vector) insns to decodetree.
1
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-7-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 4 ++++
8
target/arm/translate-neon.inc.c | 32 ++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 9 +--------
10
3 files changed, 37 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
17
18
VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+# VUDOT and VSDOT
22
+VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
34
+{
35
+ int opr_sz;
36
+ gen_helper_gvec_3 *fn_gvec;
37
+
38
+ if (!dc_isar_feature(aa32_dp, s)) {
39
+ return false;
40
+ }
41
+
42
+ /* UNDEF accesses to D16-D31 if they don't exist. */
43
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
44
+ ((a->vd | a->vn | a->vm) & 0x10)) {
45
+ return false;
46
+ }
47
+
48
+ if ((a->vn | a->vm | a->vd) & a->q) {
49
+ return false;
50
+ }
51
+
52
+ if (!vfp_access_check(s)) {
53
+ return true;
54
+ }
55
+
56
+ opr_sz = (1 + a->q) * 8;
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
58
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
59
+ vfp_reg_offset(1, a->vn),
60
+ vfp_reg_offset(1, a->vm),
61
+ opr_sz, opr_sz, 0, fn_gvec);
62
+ return true;
63
+}
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
69
bool is_long = false, q = extract32(insn, 6, 1);
70
bool ptr_is_env = false;
71
72
- if ((insn & 0xfeb00f00) == 0xfc200d00) {
73
- /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
74
- bool u = extract32(insn, 4, 1);
75
- if (!dc_isar_feature(aa32_dp, s)) {
76
- return 1;
77
- }
78
- fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
79
- } else if ((insn & 0xff300f10) == 0xfc200810) {
80
+ if ((insn & 0xff300f10) == 0xfc200810) {
81
/* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
82
int is_s = extract32(insn, 23, 1);
83
if (!dc_isar_feature(aa32_fhm, s)) {
84
--
85
2.20.1
86
87
diff view generated by jsdifflib
New patch
1
Convert the VFM[AS]L (vector) insns to decodetree. This is the last
2
insn in the legacy decoder for the 3same_ext group, so we can
3
delete the legacy decoder function for the group entirely.
1
4
5
Note that in disas_thumb2_insn() the parts of this encoding space
6
where the decodetree decoder returns false will correctly be directed
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
8
into disas_coproc_insn() by mistake.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200430181003.21682-8-peter.maydell@linaro.org
13
---
14
target/arm/neon-shared.decode | 6 +++
15
target/arm/translate-neon.inc.c | 31 +++++++++++
16
target/arm/translate.c | 92 +--------------------------------
17
3 files changed, 38 insertions(+), 91 deletions(-)
18
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/neon-shared.decode
22
+++ b/target/arm/neon-shared.decode
23
@@ -XXX,XX +XXX,XX @@ VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
24
# VUDOT and VSDOT
25
VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
27
+
28
+# VFM[AS]L
29
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
30
+ vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
31
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
32
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
33
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/translate-neon.inc.c
36
+++ b/target/arm/translate-neon.inc.c
37
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
38
opr_sz, opr_sz, 0, fn_gvec);
39
return true;
40
}
41
+
42
+static bool trans_VFML(DisasContext *s, arg_VFML *a)
43
+{
44
+ int opr_sz;
45
+
46
+ if (!dc_isar_feature(aa32_fhm, s)) {
47
+ return false;
48
+ }
49
+
50
+ /* UNDEF accesses to D16-D31 if they don't exist. */
51
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
52
+ (a->vd & 0x10)) {
53
+ return false;
54
+ }
55
+
56
+ if (a->vd & a->q) {
57
+ return false;
58
+ }
59
+
60
+ if (!vfp_access_check(s)) {
61
+ return true;
62
+ }
63
+
64
+ opr_sz = (1 + a->q) * 8;
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(a->q, a->vn),
67
+ vfp_reg_offset(a->q, a->vm),
68
+ cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
69
+ gen_helper_gvec_fmlal_a32);
70
+ return true;
71
+}
72
diff --git a/target/arm/translate.c b/target/arm/translate.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/translate.c
75
+++ b/target/arm/translate.c
76
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
77
return 0;
78
}
79
80
-/* Advanced SIMD three registers of the same length extension.
81
- * 31 25 23 22 20 16 12 11 10 9 8 3 0
82
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
83
- * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
84
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
85
- */
86
-static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
87
-{
88
- gen_helper_gvec_3 *fn_gvec = NULL;
89
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
90
- int rd, rn, rm, opr_sz;
91
- int data = 0;
92
- int off_rn, off_rm;
93
- bool is_long = false, q = extract32(insn, 6, 1);
94
- bool ptr_is_env = false;
95
-
96
- if ((insn & 0xff300f10) == 0xfc200810) {
97
- /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
98
- int is_s = extract32(insn, 23, 1);
99
- if (!dc_isar_feature(aa32_fhm, s)) {
100
- return 1;
101
- }
102
- is_long = true;
103
- data = is_s; /* is_2 == 0 */
104
- fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
105
- ptr_is_env = true;
106
- } else {
107
- return 1;
108
- }
109
-
110
- VFP_DREG_D(rd, insn);
111
- if (rd & q) {
112
- return 1;
113
- }
114
- if (q || !is_long) {
115
- VFP_DREG_N(rn, insn);
116
- VFP_DREG_M(rm, insn);
117
- if ((rn | rm) & q & !is_long) {
118
- return 1;
119
- }
120
- off_rn = vfp_reg_offset(1, rn);
121
- off_rm = vfp_reg_offset(1, rm);
122
- } else {
123
- rn = VFP_SREG_N(insn);
124
- rm = VFP_SREG_M(insn);
125
- off_rn = vfp_reg_offset(0, rn);
126
- off_rm = vfp_reg_offset(0, rm);
127
- }
128
-
129
- if (s->fp_excp_el) {
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
131
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
132
- return 0;
133
- }
134
- if (!s->vfp_enabled) {
135
- return 1;
136
- }
137
-
138
- opr_sz = (1 + q) * 8;
139
- if (fn_gvec_ptr) {
140
- TCGv_ptr ptr;
141
- if (ptr_is_env) {
142
- ptr = cpu_env;
143
- } else {
144
- ptr = get_fpstatus_ptr(1);
145
- }
146
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
147
- opr_sz, opr_sz, data, fn_gvec_ptr);
148
- if (!ptr_is_env) {
149
- tcg_temp_free_ptr(ptr);
150
- }
151
- } else {
152
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
153
- opr_sz, opr_sz, data, fn_gvec);
154
- }
155
- return 0;
156
-}
157
-
158
/* Advanced SIMD two registers and a scalar extension.
159
* 31 24 23 22 20 16 12 11 10 9 8 3 0
160
* +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
161
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
162
}
163
}
164
}
165
- } else if ((insn & 0x0e000a00) == 0x0c000800
166
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
167
- if (disas_neon_insn_3same_ext(s, insn)) {
168
- goto illegal_op;
169
- }
170
- return;
171
} else if ((insn & 0x0f000a00) == 0x0e000800
172
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
173
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
174
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
175
}
176
break;
177
}
178
- if ((insn & 0xfe000a00) == 0xfc000800
179
+ if ((insn & 0xff000a00) == 0xfe000800
180
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
181
/* The Thumb2 and ARM encodings are identical. */
182
- if (disas_neon_insn_3same_ext(s, insn)) {
183
- goto illegal_op;
184
- }
185
- } else if ((insn & 0xff000a00) == 0xfe000800
186
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
187
- /* The Thumb2 and ARM encodings are identical. */
188
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
189
goto illegal_op;
190
}
191
--
192
2.20.1
193
194
diff view generated by jsdifflib
1
The code path for booting firmware doesn't set env->boot_info. At
1
Convert VCMLA (scalar) in the 2reg-scalar-ext group to decodetree.
2
first sight this looks odd, so add a comment saying why we don't.
3
2
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
5
Message-id: 20200430181003.21682-9-peter.maydell@linaro.org
7
Message-id: 20190131112240.8395-5-peter.maydell@linaro.org
8
---
6
---
9
hw/arm/boot.c | 3 ++-
7
target/arm/neon-shared.decode | 5 +++++
10
1 file changed, 2 insertions(+), 1 deletion(-)
8
target/arm/translate-neon.inc.c | 40 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 26 +--------------------
10
3 files changed, 46 insertions(+), 25 deletions(-)
11
11
12
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/arm/boot.c
14
--- a/target/arm/neon-shared.decode
15
+++ b/hw/arm/boot.c
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info)
16
@@ -XXX,XX +XXX,XX @@ VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
17
17
vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
18
/*
18
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
19
* We will start from address 0 (typically a boot ROM image) in the
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
20
- * same way as hardware.
20
+
21
+ * same way as hardware. Leave env->boot_info NULL, so that
21
+VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
22
+ * do_cpu_reset() knows it does not need to alter the PC on reset.
22
+ vn=%vn_dp vd=%vd_dp size=0
23
*/
23
+VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
24
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
25
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-neon.inc.c
28
+++ b/target/arm/translate-neon.inc.c
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
30
gen_helper_gvec_fmlal_a32);
31
return true;
24
}
32
}
33
+
34
+static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
35
+{
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+ int opr_sz;
38
+ TCGv_ptr fpst;
39
+
40
+ if (!dc_isar_feature(aa32_vcma, s)) {
41
+ return false;
42
+ }
43
+ if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
44
+ return false;
45
+ }
46
+
47
+ /* UNDEF accesses to D16-D31 if they don't exist. */
48
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
49
+ ((a->vd | a->vn | a->vm) & 0x10)) {
50
+ return false;
51
+ }
52
+
53
+ if ((a->vd | a->vn) & a->q) {
54
+ return false;
55
+ }
56
+
57
+ if (!vfp_access_check(s)) {
58
+ return true;
59
+ }
60
+
61
+ fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
62
+ : gen_helper_gvec_fcmlah_idx);
63
+ opr_sz = (1 + a->q) * 8;
64
+ fpst = get_fpstatus_ptr(1);
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(1, a->vn),
67
+ vfp_reg_offset(1, a->vm),
68
+ fpst, opr_sz, opr_sz,
69
+ (a->index << 2) | a->rot, fn_gvec_ptr);
70
+ tcg_temp_free_ptr(fpst);
71
+ return true;
72
+}
73
diff --git a/target/arm/translate.c b/target/arm/translate.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/translate.c
76
+++ b/target/arm/translate.c
77
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
78
bool is_long = false, q = extract32(insn, 6, 1);
79
bool ptr_is_env = false;
80
81
- if ((insn & 0xff000f10) == 0xfe000800) {
82
- /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
83
- int rot = extract32(insn, 20, 2);
84
- int size = extract32(insn, 23, 1);
85
- int index;
86
-
87
- if (!dc_isar_feature(aa32_vcma, s)) {
88
- return 1;
89
- }
90
- if (size == 0) {
91
- if (!dc_isar_feature(aa32_fp16_arith, s)) {
92
- return 1;
93
- }
94
- /* For fp16, rm is just Vm, and index is M. */
95
- rm = extract32(insn, 0, 4);
96
- index = extract32(insn, 5, 1);
97
- } else {
98
- /* For fp32, rm is the usual M:Vm, and index is 0. */
99
- VFP_DREG_M(rm, insn);
100
- index = 0;
101
- }
102
- data = (index << 2) | rot;
103
- fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
104
- : gen_helper_gvec_fcmlah_idx);
105
- } else if ((insn & 0xffb00f00) == 0xfe200d00) {
106
+ if ((insn & 0xffb00f00) == 0xfe200d00) {
107
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
108
int u = extract32(insn, 4, 1);
25
109
26
--
110
--
27
2.20.1
111
2.20.1
28
112
29
113
diff view generated by jsdifflib
New patch
1
Convert the V[US]DOT (scalar) insns in the 2reg-scalar-ext group
2
to decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-10-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 3 +++
9
target/arm/translate-neon.inc.c | 35 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 13 +-----------
11
3 files changed, 39 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
18
vn=%vn_dp vd=%vd_dp size=0
19
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
20
vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
21
+
22
+VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
34
+{
35
+ gen_helper_gvec_3 *fn_gvec;
36
+ int opr_sz;
37
+ TCGv_ptr fpst;
38
+
39
+ if (!dc_isar_feature(aa32_dp, s)) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vd | a->vn) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
58
+ opr_sz = (1 + a->q) * 8;
59
+ fpst = get_fpstatus_ptr(1);
60
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->rm),
63
+ opr_sz, opr_sz, a->index, fn_gvec);
64
+ tcg_temp_free_ptr(fpst);
65
+ return true;
66
+}
67
diff --git a/target/arm/translate.c b/target/arm/translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate.c
70
+++ b/target/arm/translate.c
71
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
72
bool is_long = false, q = extract32(insn, 6, 1);
73
bool ptr_is_env = false;
74
75
- if ((insn & 0xffb00f00) == 0xfe200d00) {
76
- /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
77
- int u = extract32(insn, 4, 1);
78
-
79
- if (!dc_isar_feature(aa32_dp, s)) {
80
- return 1;
81
- }
82
- fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
83
- /* rm is just Vm, and index is M. */
84
- data = extract32(insn, 5, 1); /* index */
85
- rm = extract32(insn, 0, 4);
86
- } else if ((insn & 0xffa00f10) == 0xfe000810) {
87
+ if ((insn & 0xffa00f10) == 0xfe000810) {
88
/* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
89
int is_s = extract32(insn, 20, 1);
90
int vm20 = extract32(insn, 0, 3);
91
--
92
2.20.1
93
94
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Convert the VFM[AS]L (scalar) insns in the 2reg-scalar-ext group
2
2
to decodetree. These are the last ones in the group so we can remove
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
all the legacy decode for the group.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20190201195404.30486-2-richard.henderson@linaro.org
5
Note that in disas_thumb2_insn() the parts of this encoding space
6
where the decodetree decoder returns false will correctly be directed
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
8
into disas_coproc_insn() by mistake.
9
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200430181003.21682-11-peter.maydell@linaro.org
7
---
13
---
8
linux-user/aarch64/target_syscall.h | 7 ++++++
14
target/arm/neon-shared.decode | 7 +++
9
linux-user/syscall.c | 36 +++++++++++++++++++++++++++++
15
target/arm/translate-neon.inc.c | 32 ++++++++++
10
2 files changed, 43 insertions(+)
16
target/arm/translate.c | 107 +-------------------------------
11
17
3 files changed, 40 insertions(+), 106 deletions(-)
12
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
18
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/aarch64/target_syscall.h
21
--- a/target/arm/neon-shared.decode
15
+++ b/linux-user/aarch64/target_syscall.h
22
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
23
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
17
#define TARGET_PR_SVE_SET_VL 50
24
18
#define TARGET_PR_SVE_GET_VL 51
25
VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
19
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+#define TARGET_PR_PAC_RESET_KEYS 54
27
+
21
+# define TARGET_PR_PAC_APIAKEY (1 << 0)
28
+%vfml_scalar_q0_rm 0:3 5:1
22
+# define TARGET_PR_PAC_APIBKEY (1 << 1)
29
+%vfml_scalar_q1_index 5:1 3:1
23
+# define TARGET_PR_PAC_APDAKEY (1 << 2)
30
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
24
+# define TARGET_PR_PAC_APDBKEY (1 << 3)
31
+ rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
25
+# define TARGET_PR_PAC_APGAKEY (1 << 4)
32
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
26
+
33
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
27
void arm_init_pauth_key(ARMPACKey *key);
34
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
29
#endif /* AARCH64_TARGET_SYSCALL_H */
30
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
31
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
32
--- a/linux-user/syscall.c
36
--- a/target/arm/translate-neon.inc.c
33
+++ b/linux-user/syscall.c
37
+++ b/target/arm/translate-neon.inc.c
34
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
39
tcg_temp_free_ptr(fpst);
40
return true;
41
}
42
+
43
+static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
44
+{
45
+ int opr_sz;
46
+
47
+ if (!dc_isar_feature(aa32_fhm, s)) {
48
+ return false;
49
+ }
50
+
51
+ /* UNDEF accesses to D16-D31 if they don't exist. */
52
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
53
+ ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
54
+ return false;
55
+ }
56
+
57
+ if (a->vd & a->q) {
58
+ return false;
59
+ }
60
+
61
+ if (!vfp_access_check(s)) {
62
+ return true;
63
+ }
64
+
65
+ opr_sz = (1 + a->q) * 8;
66
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
67
+ vfp_reg_offset(a->q, a->vn),
68
+ vfp_reg_offset(a->q, a->rm),
69
+ cpu_env, opr_sz, opr_sz,
70
+ (a->index << 2) | a->s, /* is_2 == 0 */
71
+ gen_helper_gvec_fmlal_idx_a32);
72
+ return true;
73
+}
74
diff --git a/target/arm/translate.c b/target/arm/translate.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.c
77
+++ b/target/arm/translate.c
78
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
79
}
80
81
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
82
-#define VFP_SREG(insn, bigbit, smallbit) \
83
- ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
84
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
85
if (dc_isar_feature(aa32_simd_r32, s)) { \
86
reg = (((insn) >> (bigbit)) & 0x0f) \
87
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
88
reg = ((insn) >> (bigbit)) & 0x0f; \
89
}} while (0)
90
91
-#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
92
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
93
-#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
94
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
95
-#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
96
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
97
98
static void gen_neon_dup_low16(TCGv_i32 var)
99
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
100
return 0;
101
}
102
103
-/* Advanced SIMD two registers and a scalar extension.
104
- * 31 24 23 22 20 16 12 11 10 9 8 3 0
105
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
106
- * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
107
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
108
- *
109
- */
110
-
111
-static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
112
-{
113
- gen_helper_gvec_3 *fn_gvec = NULL;
114
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
115
- int rd, rn, rm, opr_sz, data;
116
- int off_rn, off_rm;
117
- bool is_long = false, q = extract32(insn, 6, 1);
118
- bool ptr_is_env = false;
119
-
120
- if ((insn & 0xffa00f10) == 0xfe000810) {
121
- /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
122
- int is_s = extract32(insn, 20, 1);
123
- int vm20 = extract32(insn, 0, 3);
124
- int vm3 = extract32(insn, 3, 1);
125
- int m = extract32(insn, 5, 1);
126
- int index;
127
-
128
- if (!dc_isar_feature(aa32_fhm, s)) {
129
- return 1;
130
- }
131
- if (q) {
132
- rm = vm20;
133
- index = m * 2 + vm3;
134
- } else {
135
- rm = vm20 * 2 + m;
136
- index = vm3;
137
- }
138
- is_long = true;
139
- data = (index << 2) | is_s; /* is_2 == 0 */
140
- fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
141
- ptr_is_env = true;
142
- } else {
143
- return 1;
144
- }
145
-
146
- VFP_DREG_D(rd, insn);
147
- if (rd & q) {
148
- return 1;
149
- }
150
- if (q || !is_long) {
151
- VFP_DREG_N(rn, insn);
152
- if (rn & q & !is_long) {
153
- return 1;
154
- }
155
- off_rn = vfp_reg_offset(1, rn);
156
- off_rm = vfp_reg_offset(1, rm);
157
- } else {
158
- rn = VFP_SREG_N(insn);
159
- off_rn = vfp_reg_offset(0, rn);
160
- off_rm = vfp_reg_offset(0, rm);
161
- }
162
- if (s->fp_excp_el) {
163
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
165
- return 0;
166
- }
167
- if (!s->vfp_enabled) {
168
- return 1;
169
- }
170
-
171
- opr_sz = (1 + q) * 8;
172
- if (fn_gvec_ptr) {
173
- TCGv_ptr ptr;
174
- if (ptr_is_env) {
175
- ptr = cpu_env;
176
- } else {
177
- ptr = get_fpstatus_ptr(1);
178
- }
179
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
180
- opr_sz, opr_sz, data, fn_gvec_ptr);
181
- if (!ptr_is_env) {
182
- tcg_temp_free_ptr(ptr);
183
- }
184
- } else {
185
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
186
- opr_sz, opr_sz, data, fn_gvec);
187
- }
188
- return 0;
189
-}
190
-
191
static int disas_coproc_insn(DisasContext *s, uint32_t insn)
192
{
193
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
194
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
195
}
35
}
196
}
36
}
197
}
37
return ret;
198
- } else if ((insn & 0x0f000a00) == 0x0e000800
38
+ case TARGET_PR_PAC_RESET_KEYS:
199
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
39
+ {
200
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
40
+ CPUARMState *env = cpu_env;
201
- goto illegal_op;
41
+ ARMCPU *cpu = arm_env_get_cpu(env);
202
- }
42
+
203
- return;
43
+ if (arg3 || arg4 || arg5) {
204
}
44
+ return -TARGET_EINVAL;
205
goto illegal_op;
45
+ }
206
}
46
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
207
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
47
+ int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
208
}
48
+ TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
209
break;
49
+ TARGET_PR_PAC_APGAKEY);
210
}
50
+ if (arg2 == 0) {
211
- if ((insn & 0xff000a00) == 0xfe000800
51
+ arg2 = all;
212
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
52
+ } else if (arg2 & ~all) {
213
- /* The Thumb2 and ARM encodings are identical. */
53
+ return -TARGET_EINVAL;
214
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
54
+ }
215
- goto illegal_op;
55
+ if (arg2 & TARGET_PR_PAC_APIAKEY) {
216
- }
56
+ arm_init_pauth_key(&env->apia_key);
217
- } else if (((insn >> 24) & 3) == 3) {
57
+ }
218
+ if (((insn >> 24) & 3) == 3) {
58
+ if (arg2 & TARGET_PR_PAC_APIBKEY) {
219
/* Translate into the equivalent ARM encoding. */
59
+ arm_init_pauth_key(&env->apib_key);
220
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
60
+ }
221
if (disas_neon_data_insn(s, insn)) {
61
+ if (arg2 & TARGET_PR_PAC_APDAKEY) {
62
+ arm_init_pauth_key(&env->apda_key);
63
+ }
64
+ if (arg2 & TARGET_PR_PAC_APDBKEY) {
65
+ arm_init_pauth_key(&env->apdb_key);
66
+ }
67
+ if (arg2 & TARGET_PR_PAC_APGAKEY) {
68
+ arm_init_pauth_key(&env->apga_key);
69
+ }
70
+ return 0;
71
+ }
72
+ }
73
+ return -TARGET_EINVAL;
74
#endif /* AARCH64 */
75
case PR_GET_SECCOMP:
76
case PR_SET_SECCOMP:
77
--
222
--
78
2.20.1
223
2.20.1
79
224
80
225
diff view generated by jsdifflib
New patch
1
1
Convert the Neon "load/store multiple structures" insns to decodetree.
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-12-peter.maydell@linaro.org
6
---
7
target/arm/neon-ls.decode | 7 ++
8
target/arm/translate-neon.inc.c | 124 ++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 91 +----------------------
10
3 files changed, 133 insertions(+), 89 deletions(-)
11
12
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-ls.decode
15
+++ b/target/arm/neon-ls.decode
16
@@ -XXX,XX +XXX,XX @@
17
# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
18
# This file works on the A32 encoding only; calling code for T32 has to
19
# transform the insn into the A32 version first.
20
+
21
+%vd_dp 22:1 12:4
22
+
23
+# Neon load/store multiple structures
24
+
25
+VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
26
+ vd=%vd_dp
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
30
+++ b/target/arm/translate-neon.inc.c
31
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
32
gen_helper_gvec_fmlal_idx_a32);
33
return true;
34
}
35
+
36
+static struct {
37
+ int nregs;
38
+ int interleave;
39
+ int spacing;
40
+} const neon_ls_element_type[11] = {
41
+ {1, 4, 1},
42
+ {1, 4, 2},
43
+ {4, 1, 1},
44
+ {2, 2, 2},
45
+ {1, 3, 1},
46
+ {1, 3, 2},
47
+ {3, 1, 1},
48
+ {1, 1, 1},
49
+ {1, 2, 1},
50
+ {1, 2, 2},
51
+ {2, 1, 1}
52
+};
53
+
54
+static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
55
+ int stride)
56
+{
57
+ if (rm != 15) {
58
+ TCGv_i32 base;
59
+
60
+ base = load_reg(s, rn);
61
+ if (rm == 13) {
62
+ tcg_gen_addi_i32(base, base, stride);
63
+ } else {
64
+ TCGv_i32 index;
65
+ index = load_reg(s, rm);
66
+ tcg_gen_add_i32(base, base, index);
67
+ tcg_temp_free_i32(index);
68
+ }
69
+ store_reg(s, rn, base);
70
+ }
71
+}
72
+
73
+static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
74
+{
75
+ /* Neon load/store multiple structures */
76
+ int nregs, interleave, spacing, reg, n;
77
+ MemOp endian = s->be_data;
78
+ int mmu_idx = get_mem_index(s);
79
+ int size = a->size;
80
+ TCGv_i64 tmp64;
81
+ TCGv_i32 addr, tmp;
82
+
83
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
84
+ return false;
85
+ }
86
+
87
+ /* UNDEF accesses to D16-D31 if they don't exist */
88
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
89
+ return false;
90
+ }
91
+ if (a->itype > 10) {
92
+ return false;
93
+ }
94
+ /* Catch UNDEF cases for bad values of align field */
95
+ switch (a->itype & 0xc) {
96
+ case 4:
97
+ if (a->align >= 2) {
98
+ return false;
99
+ }
100
+ break;
101
+ case 8:
102
+ if (a->align == 3) {
103
+ return false;
104
+ }
105
+ break;
106
+ default:
107
+ break;
108
+ }
109
+ nregs = neon_ls_element_type[a->itype].nregs;
110
+ interleave = neon_ls_element_type[a->itype].interleave;
111
+ spacing = neon_ls_element_type[a->itype].spacing;
112
+ if (size == 3 && (interleave | spacing) != 1) {
113
+ return false;
114
+ }
115
+
116
+ if (!vfp_access_check(s)) {
117
+ return true;
118
+ }
119
+
120
+ /* For our purposes, bytes are always little-endian. */
121
+ if (size == 0) {
122
+ endian = MO_LE;
123
+ }
124
+ /*
125
+ * Consecutive little-endian elements from a single register
126
+ * can be promoted to a larger little-endian operation.
127
+ */
128
+ if (interleave == 1 && endian == MO_LE) {
129
+ size = 3;
130
+ }
131
+ tmp64 = tcg_temp_new_i64();
132
+ addr = tcg_temp_new_i32();
133
+ tmp = tcg_const_i32(1 << size);
134
+ load_reg_var(s, addr, a->rn);
135
+ for (reg = 0; reg < nregs; reg++) {
136
+ for (n = 0; n < 8 >> size; n++) {
137
+ int xs;
138
+ for (xs = 0; xs < interleave; xs++) {
139
+ int tt = a->vd + reg + spacing * xs;
140
+
141
+ if (a->l) {
142
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
143
+ neon_store_element64(tt, n, size, tmp64);
144
+ } else {
145
+ neon_load_element64(tmp64, tt, n, size);
146
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
147
+ }
148
+ tcg_gen_add_i32(addr, addr, tmp);
149
+ }
150
+ }
151
+ }
152
+ tcg_temp_free_i32(addr);
153
+ tcg_temp_free_i32(tmp);
154
+ tcg_temp_free_i64(tmp64);
155
+
156
+ gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
157
+ return true;
158
+}
159
diff --git a/target/arm/translate.c b/target/arm/translate.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/translate.c
162
+++ b/target/arm/translate.c
163
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
164
}
165
166
167
-static struct {
168
- int nregs;
169
- int interleave;
170
- int spacing;
171
-} const neon_ls_element_type[11] = {
172
- {1, 4, 1},
173
- {1, 4, 2},
174
- {4, 1, 1},
175
- {2, 2, 2},
176
- {1, 3, 1},
177
- {1, 3, 2},
178
- {3, 1, 1},
179
- {1, 1, 1},
180
- {1, 2, 1},
181
- {1, 2, 2},
182
- {2, 1, 1}
183
-};
184
-
185
/* Translate a NEON load/store element instruction. Return nonzero if the
186
instruction is invalid. */
187
static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
188
{
189
int rd, rn, rm;
190
- int op;
191
int nregs;
192
- int interleave;
193
- int spacing;
194
int stride;
195
int size;
196
int reg;
197
int load;
198
- int n;
199
int vec_size;
200
- int mmu_idx;
201
- MemOp endian;
202
TCGv_i32 addr;
203
TCGv_i32 tmp;
204
- TCGv_i32 tmp2;
205
- TCGv_i64 tmp64;
206
207
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
208
return 1;
209
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
210
rn = (insn >> 16) & 0xf;
211
rm = insn & 0xf;
212
load = (insn & (1 << 21)) != 0;
213
- endian = s->be_data;
214
- mmu_idx = get_mem_index(s);
215
if ((insn & (1 << 23)) == 0) {
216
- /* Load store all elements. */
217
- op = (insn >> 8) & 0xf;
218
- size = (insn >> 6) & 3;
219
- if (op > 10)
220
- return 1;
221
- /* Catch UNDEF cases for bad values of align field */
222
- switch (op & 0xc) {
223
- case 4:
224
- if (((insn >> 5) & 1) == 1) {
225
- return 1;
226
- }
227
- break;
228
- case 8:
229
- if (((insn >> 4) & 3) == 3) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- break;
235
- }
236
- nregs = neon_ls_element_type[op].nregs;
237
- interleave = neon_ls_element_type[op].interleave;
238
- spacing = neon_ls_element_type[op].spacing;
239
- if (size == 3 && (interleave | spacing) != 1) {
240
- return 1;
241
- }
242
- /* For our purposes, bytes are always little-endian. */
243
- if (size == 0) {
244
- endian = MO_LE;
245
- }
246
- /* Consecutive little-endian elements from a single register
247
- * can be promoted to a larger little-endian operation.
248
- */
249
- if (interleave == 1 && endian == MO_LE) {
250
- size = 3;
251
- }
252
- tmp64 = tcg_temp_new_i64();
253
- addr = tcg_temp_new_i32();
254
- tmp2 = tcg_const_i32(1 << size);
255
- load_reg_var(s, addr, rn);
256
- for (reg = 0; reg < nregs; reg++) {
257
- for (n = 0; n < 8 >> size; n++) {
258
- int xs;
259
- for (xs = 0; xs < interleave; xs++) {
260
- int tt = rd + reg + spacing * xs;
261
-
262
- if (load) {
263
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
264
- neon_store_element64(tt, n, size, tmp64);
265
- } else {
266
- neon_load_element64(tmp64, tt, n, size);
267
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
268
- }
269
- tcg_gen_add_i32(addr, addr, tmp2);
270
- }
271
- }
272
- }
273
- tcg_temp_free_i32(addr);
274
- tcg_temp_free_i32(tmp2);
275
- tcg_temp_free_i64(tmp64);
276
- stride = nregs * interleave * 8;
277
+ /* Load store all elements -- handled already by decodetree */
278
+ return 1;
279
} else {
280
size = (insn >> 10) & 3;
281
if (size == 3) {
282
--
283
2.20.1
284
285
diff view generated by jsdifflib
1
Factor out the "boot via firmware" code path from arm_load_kernel()
1
Convert the Neon "load single structure to all lanes" insns to
2
into its own function.
2
decodetree.
3
4
This commit only moves code around; no semantic changes.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
6
Message-id: 20200430181003.21682-13-peter.maydell@linaro.org
9
Message-id: 20190131112240.8395-4-peter.maydell@linaro.org
10
---
7
---
11
hw/arm/boot.c | 92 +++++++++++++++++++++++++++------------------------
8
target/arm/neon-ls.decode | 5 +++
12
1 file changed, 49 insertions(+), 43 deletions(-)
9
target/arm/translate-neon.inc.c | 73 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 55 +------------------------
11
3 files changed, 80 insertions(+), 53 deletions(-)
13
12
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
13
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/boot.c
15
--- a/target/arm/neon-ls.decode
17
+++ b/hw/arm/boot.c
16
+++ b/target/arm/neon-ls.decode
18
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
17
@@ -XXX,XX +XXX,XX @@
19
}
18
19
VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
20
vd=%vd_dp
21
+
22
+# Neon load single element to all lanes
23
+
24
+VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
25
+ vd=%vd_dp
26
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-neon.inc.c
29
+++ b/target/arm/translate-neon.inc.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
31
gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
32
return true;
20
}
33
}
21
34
+
22
+static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info)
35
+static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
23
+{
36
+{
24
+ /* Set up for booting firmware (which might load a kernel via fw_cfg) */
37
+ /* Neon load single structure to all lanes */
38
+ int reg, stride, vec_size;
39
+ int vd = a->vd;
40
+ int size = a->size;
41
+ int nregs = a->n + 1;
42
+ TCGv_i32 addr, tmp;
25
+
43
+
26
+ if (have_dtb(info)) {
44
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
27
+ /*
45
+ return false;
28
+ * If we have a device tree blob, but no kernel to supply it to (or
29
+ * the kernel is supposed to be loaded by the bootloader), copy the
30
+ * DTB to the base of RAM for the bootloader to pick up.
31
+ */
32
+ info->dtb_start = info->loader_start;
33
+ }
46
+ }
34
+
47
+
35
+ if (info->kernel_filename) {
48
+ /* UNDEF accesses to D16-D31 if they don't exist */
36
+ FWCfgState *fw_cfg;
49
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
37
+ bool try_decompressing_kernel;
50
+ return false;
51
+ }
38
+
52
+
39
+ fw_cfg = fw_cfg_find();
53
+ if (size == 3) {
40
+ try_decompressing_kernel = arm_feature(&cpu->env,
54
+ if (nregs != 4 || a->a == 0) {
41
+ ARM_FEATURE_AARCH64);
55
+ return false;
56
+ }
57
+ /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
58
+ size = 2;
59
+ }
60
+ if (nregs == 1 && a->a == 1 && size == 0) {
61
+ return false;
62
+ }
63
+ if (nregs == 3 && a->a == 1) {
64
+ return false;
65
+ }
42
+
66
+
43
+ /*
67
+ if (!vfp_access_check(s)) {
44
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
68
+ return true;
45
+ * We don't process them here at all, it's all left to the
46
+ * firmware.
47
+ */
48
+ load_image_to_fw_cfg(fw_cfg,
49
+ FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
50
+ info->kernel_filename,
51
+ try_decompressing_kernel);
52
+ load_image_to_fw_cfg(fw_cfg,
53
+ FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
54
+ info->initrd_filename, false);
55
+
56
+ if (info->kernel_cmdline) {
57
+ fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
58
+ strlen(info->kernel_cmdline) + 1);
59
+ fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
60
+ info->kernel_cmdline);
61
+ }
62
+ }
69
+ }
63
+
70
+
64
+ /*
71
+ /*
65
+ * We will start from address 0 (typically a boot ROM image) in the
72
+ * VLD1 to all lanes: T bit indicates how many Dregs to write.
66
+ * same way as hardware.
73
+ * VLD2/3/4 to all lanes: T bit indicates register stride.
67
+ */
74
+ */
75
+ stride = a->t ? 2 : 1;
76
+ vec_size = nregs == 1 ? stride * 8 : 8;
77
+
78
+ tmp = tcg_temp_new_i32();
79
+ addr = tcg_temp_new_i32();
80
+ load_reg_var(s, addr, a->rn);
81
+ for (reg = 0; reg < nregs; reg++) {
82
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
83
+ s->be_data | size);
84
+ if ((vd & 1) && vec_size == 16) {
85
+ /*
86
+ * We cannot write 16 bytes at once because the
87
+ * destination is unaligned.
88
+ */
89
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
90
+ 8, 8, tmp);
91
+ tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
92
+ neon_reg_offset(vd, 0), 8, 8);
93
+ } else {
94
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
95
+ vec_size, vec_size, tmp);
96
+ }
97
+ tcg_gen_addi_i32(addr, addr, 1 << size);
98
+ vd += stride;
99
+ }
100
+ tcg_temp_free_i32(tmp);
101
+ tcg_temp_free_i32(addr);
102
+
103
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
104
+
105
+ return true;
68
+}
106
+}
69
+
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
70
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
108
index XXXXXXX..XXXXXXX 100644
71
{
109
--- a/target/arm/translate.c
72
CPUState *cs;
110
+++ b/target/arm/translate.c
73
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
74
112
int size;
75
/* Load the kernel. */
113
int reg;
76
if (!info->kernel_filename || info->firmware_loaded) {
114
int load;
115
- int vec_size;
116
TCGv_i32 addr;
117
TCGv_i32 tmp;
118
119
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
120
} else {
121
size = (insn >> 10) & 3;
122
if (size == 3) {
123
- /* Load single element to all lanes. */
124
- int a = (insn >> 4) & 1;
125
- if (!load) {
126
- return 1;
127
- }
128
- size = (insn >> 6) & 3;
129
- nregs = ((insn >> 8) & 3) + 1;
77
-
130
-
78
- if (have_dtb(info)) {
131
- if (size == 3) {
79
- /*
132
- if (nregs != 4 || a == 0) {
80
- * If we have a device tree blob, but no kernel to supply it to (or
133
- return 1;
81
- * the kernel is supposed to be loaded by the bootloader), copy the
134
- }
82
- * DTB to the base of RAM for the bootloader to pick up.
135
- /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
136
- size = 2;
137
- }
138
- if (nregs == 1 && a == 1 && size == 0) {
139
- return 1;
140
- }
141
- if (nregs == 3 && a == 1) {
142
- return 1;
143
- }
144
- addr = tcg_temp_new_i32();
145
- load_reg_var(s, addr, rn);
146
-
147
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
148
- * VLD2/3/4 to all lanes: bit 5 indicates register stride.
83
- */
149
- */
84
- info->dtb_start = info->loader_start;
150
- stride = (insn & (1 << 5)) ? 2 : 1;
85
- }
151
- vec_size = nregs == 1 ? stride * 8 : 8;
86
-
152
-
87
- if (info->kernel_filename) {
153
- tmp = tcg_temp_new_i32();
88
- FWCfgState *fw_cfg;
154
- for (reg = 0; reg < nregs; reg++) {
89
- bool try_decompressing_kernel;
155
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
90
-
156
- s->be_data | size);
91
- fw_cfg = fw_cfg_find();
157
- if ((rd & 1) && vec_size == 16) {
92
- try_decompressing_kernel = arm_feature(&cpu->env,
158
- /* We cannot write 16 bytes at once because the
93
- ARM_FEATURE_AARCH64);
159
- * destination is unaligned.
94
-
160
- */
95
- /*
161
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
96
- * Expose the kernel, the command line, and the initrd in fw_cfg.
162
- 8, 8, tmp);
97
- * We don't process them here at all, it's all left to the
163
- tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
98
- * firmware.
164
- neon_reg_offset(rd, 0), 8, 8);
99
- */
165
- } else {
100
- load_image_to_fw_cfg(fw_cfg,
166
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
101
- FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
167
- vec_size, vec_size, tmp);
102
- info->kernel_filename,
168
- }
103
- try_decompressing_kernel);
169
- tcg_gen_addi_i32(addr, addr, 1 << size);
104
- load_image_to_fw_cfg(fw_cfg,
170
- rd += stride;
105
- FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
106
- info->initrd_filename, false);
107
-
108
- if (info->kernel_cmdline) {
109
- fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
110
- strlen(info->kernel_cmdline) + 1);
111
- fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
112
- info->kernel_cmdline);
113
- }
171
- }
114
- }
172
- tcg_temp_free_i32(tmp);
115
-
173
- tcg_temp_free_i32(addr);
116
- /*
174
- stride = (1 << size) * nregs;
117
- * We will start from address 0 (typically a boot ROM image) in the
175
+ /* Load single element to all lanes -- handled by decodetree */
118
- * same way as hardware.
176
+ return 1;
119
- */
177
} else {
120
+ arm_setup_firmware_boot(cpu, info);
178
/* Single element. */
121
return;
179
int idx = (insn >> 4) & 0xf;
122
} else {
123
arm_setup_direct_kernel_boot(cpu, info);
124
--
180
--
125
2.20.1
181
2.20.1
126
182
127
183
diff view generated by jsdifflib
New patch
1
1
Convert the Neon "load/store single structure to one lane" insns to
2
decodetree.
3
4
As this is the last set of insns in the neon load/store group,
5
we can remove the whole disas_neon_ls_insn() function.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200430181003.21682-14-peter.maydell@linaro.org
10
---
11
target/arm/neon-ls.decode | 11 +++
12
target/arm/translate-neon.inc.c | 89 +++++++++++++++++++
13
target/arm/translate.c | 147 --------------------------------
14
3 files changed, 100 insertions(+), 147 deletions(-)
15
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
19
+++ b/target/arm/neon-ls.decode
20
@@ -XXX,XX +XXX,XX @@ VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
21
22
VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
23
vd=%vd_dp
24
+
25
+# Neon load/store single structure to one lane
26
+%imm1_5_p1 5:1 !function=plus1
27
+%imm1_6_p1 6:1 !function=plus1
28
+
29
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
30
+ vd=%vd_dp size=0 stride=1
31
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
32
+ vd=%vd_dp size=1 stride=%imm1_5_p1
33
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
34
+ vd=%vd_dp size=2 stride=%imm1_6_p1
35
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/translate-neon.inc.c
38
+++ b/target/arm/translate-neon.inc.c
39
@@ -XXX,XX +XXX,XX @@
40
* It might be possible to convert it to a standalone .c file eventually.
41
*/
42
43
+static inline int plus1(DisasContext *s, int x)
44
+{
45
+ return x + 1;
46
+}
47
+
48
/* Include the generated Neon decoder */
49
#include "decode-neon-dp.inc.c"
50
#include "decode-neon-ls.inc.c"
51
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
52
53
return true;
54
}
55
+
56
+static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
57
+{
58
+ /* Neon load/store single structure to one lane */
59
+ int reg;
60
+ int nregs = a->n + 1;
61
+ int vd = a->vd;
62
+ TCGv_i32 addr, tmp;
63
+
64
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
65
+ return false;
66
+ }
67
+
68
+ /* UNDEF accesses to D16-D31 if they don't exist */
69
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
70
+ return false;
71
+ }
72
+
73
+ /* Catch the UNDEF cases. This is unavoidably a bit messy. */
74
+ switch (nregs) {
75
+ case 1:
76
+ if (((a->align & (1 << a->size)) != 0) ||
77
+ (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
78
+ return false;
79
+ }
80
+ break;
81
+ case 3:
82
+ if ((a->align & 1) != 0) {
83
+ return false;
84
+ }
85
+ /* fall through */
86
+ case 2:
87
+ if (a->size == 2 && (a->align & 2) != 0) {
88
+ return false;
89
+ }
90
+ break;
91
+ case 4:
92
+ if ((a->size == 2) && ((a->align & 3) == 3)) {
93
+ return false;
94
+ }
95
+ break;
96
+ default:
97
+ abort();
98
+ }
99
+ if ((vd + a->stride * (nregs - 1)) > 31) {
100
+ /*
101
+ * Attempts to write off the end of the register file are
102
+ * UNPREDICTABLE; we choose to UNDEF because otherwise we would
103
+ * access off the end of the array that holds the register data.
104
+ */
105
+ return false;
106
+ }
107
+
108
+ if (!vfp_access_check(s)) {
109
+ return true;
110
+ }
111
+
112
+ tmp = tcg_temp_new_i32();
113
+ addr = tcg_temp_new_i32();
114
+ load_reg_var(s, addr, a->rn);
115
+ /*
116
+ * TODO: if we implemented alignment exceptions, we should check
117
+ * addr against the alignment encoded in a->align here.
118
+ */
119
+ for (reg = 0; reg < nregs; reg++) {
120
+ if (a->l) {
121
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
122
+ s->be_data | a->size);
123
+ neon_store_element(vd, a->reg_idx, a->size, tmp);
124
+ } else { /* Store */
125
+ neon_load_element(tmp, vd, a->reg_idx, a->size);
126
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
127
+ s->be_data | a->size);
128
+ }
129
+ vd += a->stride;
130
+ tcg_gen_addi_i32(addr, addr, 1 << a->size);
131
+ }
132
+ tcg_temp_free_i32(addr);
133
+ tcg_temp_free_i32(tmp);
134
+
135
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
136
+
137
+ return true;
138
+}
139
diff --git a/target/arm/translate.c b/target/arm/translate.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/target/arm/translate.c
142
+++ b/target/arm/translate.c
143
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
144
tcg_temp_free_i32(rd);
145
}
146
147
-
148
-/* Translate a NEON load/store element instruction. Return nonzero if the
149
- instruction is invalid. */
150
-static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
151
-{
152
- int rd, rn, rm;
153
- int nregs;
154
- int stride;
155
- int size;
156
- int reg;
157
- int load;
158
- TCGv_i32 addr;
159
- TCGv_i32 tmp;
160
-
161
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
- return 1;
163
- }
164
-
165
- /* FIXME: this access check should not take precedence over UNDEF
166
- * for invalid encodings; we will generate incorrect syndrome information
167
- * for attempts to execute invalid vfp/neon encodings with FP disabled.
168
- */
169
- if (s->fp_excp_el) {
170
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
171
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
172
- return 0;
173
- }
174
-
175
- if (!s->vfp_enabled)
176
- return 1;
177
- VFP_DREG_D(rd, insn);
178
- rn = (insn >> 16) & 0xf;
179
- rm = insn & 0xf;
180
- load = (insn & (1 << 21)) != 0;
181
- if ((insn & (1 << 23)) == 0) {
182
- /* Load store all elements -- handled already by decodetree */
183
- return 1;
184
- } else {
185
- size = (insn >> 10) & 3;
186
- if (size == 3) {
187
- /* Load single element to all lanes -- handled by decodetree */
188
- return 1;
189
- } else {
190
- /* Single element. */
191
- int idx = (insn >> 4) & 0xf;
192
- int reg_idx;
193
- switch (size) {
194
- case 0:
195
- reg_idx = (insn >> 5) & 7;
196
- stride = 1;
197
- break;
198
- case 1:
199
- reg_idx = (insn >> 6) & 3;
200
- stride = (insn & (1 << 5)) ? 2 : 1;
201
- break;
202
- case 2:
203
- reg_idx = (insn >> 7) & 1;
204
- stride = (insn & (1 << 6)) ? 2 : 1;
205
- break;
206
- default:
207
- abort();
208
- }
209
- nregs = ((insn >> 8) & 3) + 1;
210
- /* Catch the UNDEF cases. This is unavoidably a bit messy. */
211
- switch (nregs) {
212
- case 1:
213
- if (((idx & (1 << size)) != 0) ||
214
- (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
215
- return 1;
216
- }
217
- break;
218
- case 3:
219
- if ((idx & 1) != 0) {
220
- return 1;
221
- }
222
- /* fall through */
223
- case 2:
224
- if (size == 2 && (idx & 2) != 0) {
225
- return 1;
226
- }
227
- break;
228
- case 4:
229
- if ((size == 2) && ((idx & 3) == 3)) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- abort();
235
- }
236
- if ((rd + stride * (nregs - 1)) > 31) {
237
- /* Attempts to write off the end of the register file
238
- * are UNPREDICTABLE; we choose to UNDEF because otherwise
239
- * the neon_load_reg() would write off the end of the array.
240
- */
241
- return 1;
242
- }
243
- tmp = tcg_temp_new_i32();
244
- addr = tcg_temp_new_i32();
245
- load_reg_var(s, addr, rn);
246
- for (reg = 0; reg < nregs; reg++) {
247
- if (load) {
248
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
249
- s->be_data | size);
250
- neon_store_element(rd, reg_idx, size, tmp);
251
- } else { /* Store */
252
- neon_load_element(tmp, rd, reg_idx, size);
253
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
254
- s->be_data | size);
255
- }
256
- rd += stride;
257
- tcg_gen_addi_i32(addr, addr, 1 << size);
258
- }
259
- tcg_temp_free_i32(addr);
260
- tcg_temp_free_i32(tmp);
261
- stride = nregs * (1 << size);
262
- }
263
- }
264
- if (rm != 15) {
265
- TCGv_i32 base;
266
-
267
- base = load_reg(s, rn);
268
- if (rm == 13) {
269
- tcg_gen_addi_i32(base, base, stride);
270
- } else {
271
- TCGv_i32 index;
272
- index = load_reg(s, rm);
273
- tcg_gen_add_i32(base, base, index);
274
- tcg_temp_free_i32(index);
275
- }
276
- store_reg(s, rn, base);
277
- }
278
- return 0;
279
-}
280
-
281
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
282
{
283
switch (size) {
284
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
285
}
286
return;
287
}
288
- if ((insn & 0x0f100000) == 0x04000000) {
289
- /* NEON load/store. */
290
- if (disas_neon_ls_insn(s, insn)) {
291
- goto illegal_op;
292
- }
293
- return;
294
- }
295
if ((insn & 0x0e000f00) == 0x0c000100) {
296
if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
297
/* iWMMXt register transfer. */
298
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
299
}
300
break;
301
case 12:
302
- if ((insn & 0x01100000) == 0x01000000) {
303
- if (disas_neon_ls_insn(s, insn)) {
304
- goto illegal_op;
305
- }
306
- break;
307
- }
308
goto illegal_op;
309
default:
310
illegal_op:
311
--
312
2.20.1
313
314
diff view generated by jsdifflib
1
Factor out the "direct kernel boot" code path from arm_load_kernel()
1
Convert the Neon 3-reg-same VADD and VSUB insns to decodetree.
2
into its own function; this function is getting long enough that
3
the code flow is a bit confusing.
4
2
5
This commit only moves code around; no semantic changes.
3
Note that we don't need the neon_3r_sizes[op] check here because all
4
size values are OK for VADD and VSUB; we'll add this when we convert
5
the first insn that has size restrictions.
6
6
7
We leave the "load the dtb" code in arm_load_kernel() -- this
7
For this we need one of the GVecGen*Fn typedefs currently in
8
is currently only used by the "direct kernel boot" path, but
8
translate-a64.h; move them all to translate.h as a block so they
9
this is a bug which we will fix shortly.
9
are visible to the 32-bit decoder.
10
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
13
Message-id: 20200430181003.21682-15-peter.maydell@linaro.org
14
Message-id: 20190131112240.8395-3-peter.maydell@linaro.org
15
---
14
---
16
hw/arm/boot.c | 150 +++++++++++++++++++++++++++-----------------------
15
target/arm/translate-a64.h | 9 --------
17
1 file changed, 80 insertions(+), 70 deletions(-)
16
target/arm/translate.h | 9 ++++++++
17
target/arm/neon-dp.decode | 17 +++++++++++++++
18
target/arm/translate-neon.inc.c | 38 +++++++++++++++++++++++++++++++++
19
target/arm/translate.c | 14 ++++--------
20
5 files changed, 68 insertions(+), 19 deletions(-)
18
21
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
22
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
20
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/boot.c
24
--- a/target/arm/translate-a64.h
22
+++ b/hw/arm/boot.c
25
+++ b/target/arm/translate-a64.h
23
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
26
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
24
return size;
27
28
bool disas_sve(DisasContext *, uint32_t);
29
30
-/* Note that the gvec expanders operate on offsets + sizes. */
31
-typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
32
-typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
33
- uint32_t, uint32_t);
34
-typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
35
- uint32_t, uint32_t, uint32_t);
36
-typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
37
- uint32_t, uint32_t, uint32_t);
38
-
39
#endif /* TARGET_ARM_TRANSLATE_A64_H */
40
diff --git a/target/arm/translate.h b/target/arm/translate.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate.h
43
+++ b/target/arm/translate.h
44
@@ -XXX,XX +XXX,XX @@ void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
45
#define dc_isar_feature(name, ctx) \
46
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
47
48
+/* Note that the gvec expanders operate on offsets + sizes. */
49
+typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
50
+typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
51
+ uint32_t, uint32_t);
52
+typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
53
+ uint32_t, uint32_t, uint32_t);
54
+typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
55
+ uint32_t, uint32_t, uint32_t);
56
+
57
#endif /* TARGET_ARM_TRANSLATE_H */
58
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/neon-dp.decode
61
+++ b/target/arm/neon-dp.decode
62
@@ -XXX,XX +XXX,XX @@
63
#
64
# This file is processed by scripts/decodetree.py
65
#
66
+# VFP/Neon register fields; same as vfp.decode
67
+%vm_dp 5:1 0:4
68
+%vn_dp 7:1 16:4
69
+%vd_dp 22:1 12:4
70
71
# Encodings for Neon data processing instructions where the T32 encoding
72
# is a simple transformation of the A32 encoding.
73
@@ -XXX,XX +XXX,XX @@
74
# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
75
# This file works on the A32 encoding only; calling code for T32 has to
76
# transform the insn into the A32 version first.
77
+
78
+######################################################################
79
+# 3-reg-same grouping:
80
+# 1111 001 U 0 D sz:2 Vn:4 Vd:4 opc:4 N Q M op Vm:4
81
+######################################################################
82
+
83
+&3same vm vn vd q size
84
+
85
+@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
86
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
87
+
88
+VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
89
+VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
90
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/translate-neon.inc.c
93
+++ b/target/arm/translate-neon.inc.c
94
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
95
96
return true;
25
}
97
}
26
98
+
27
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
99
+static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
28
+static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
100
+{
29
+ struct arm_boot_info *info)
101
+ int vec_size = a->q ? 16 : 8;
30
{
102
+ int rd_ofs = neon_reg_offset(a->vd, 0);
31
+ /* Set up for a direct boot of a kernel image file. */
103
+ int rn_ofs = neon_reg_offset(a->vn, 0);
32
CPUState *cs;
104
+ int rm_ofs = neon_reg_offset(a->vm, 0);
33
+ AddressSpace *as = arm_boot_address_space(cpu, info);
105
+
34
int kernel_size;
106
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
35
int initrd_size;
107
+ return false;
36
int is_linux = 0;
108
+ }
37
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
109
+
38
int elf_machine;
110
+ /* UNDEF accesses to D16-D31 if they don't exist. */
39
hwaddr entry;
111
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
40
static const ARMInsnFixup *primary_loader;
112
+ ((a->vd | a->vn | a->vm) & 0x10)) {
41
- AddressSpace *as = arm_boot_address_space(cpu, info);
113
+ return false;
42
-
114
+ }
43
- /*
115
+
44
- * CPU objects (unlike devices) are not automatically reset on system
116
+ if ((a->vn | a->vm | a->vd) & a->q) {
45
- * reset, so we must always register a handler to do so. If we're
117
+ return false;
46
- * actually loading a kernel, the handler is also responsible for
118
+ }
47
- * arranging that we start it correctly.
119
+
48
- */
120
+ if (!vfp_access_check(s)) {
49
- for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
121
+ return true;
50
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
122
+ }
51
- }
123
+
52
-
124
+ fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
53
- /*
125
+ return true;
54
- * The board code is not supposed to set secure_board_setup unless
55
- * running its code in secure mode is actually possible, and KVM
56
- * doesn't support secure.
57
- */
58
- assert(!(info->secure_board_setup && kvm_enabled()));
59
-
60
- info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
61
- info->dtb_limit = 0;
62
-
63
- /* Load the kernel. */
64
- if (!info->kernel_filename || info->firmware_loaded) {
65
-
66
- if (have_dtb(info)) {
67
- /*
68
- * If we have a device tree blob, but no kernel to supply it to (or
69
- * the kernel is supposed to be loaded by the bootloader), copy the
70
- * DTB to the base of RAM for the bootloader to pick up.
71
- */
72
- info->dtb_start = info->loader_start;
73
- }
74
-
75
- if (info->kernel_filename) {
76
- FWCfgState *fw_cfg;
77
- bool try_decompressing_kernel;
78
-
79
- fw_cfg = fw_cfg_find();
80
- try_decompressing_kernel = arm_feature(&cpu->env,
81
- ARM_FEATURE_AARCH64);
82
-
83
- /*
84
- * Expose the kernel, the command line, and the initrd in fw_cfg.
85
- * We don't process them here at all, it's all left to the
86
- * firmware.
87
- */
88
- load_image_to_fw_cfg(fw_cfg,
89
- FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
90
- info->kernel_filename,
91
- try_decompressing_kernel);
92
- load_image_to_fw_cfg(fw_cfg,
93
- FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
94
- info->initrd_filename, false);
95
-
96
- if (info->kernel_cmdline) {
97
- fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
98
- strlen(info->kernel_cmdline) + 1);
99
- fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
100
- info->kernel_cmdline);
101
- }
102
- }
103
-
104
- /*
105
- * We will start from address 0 (typically a boot ROM image) in the
106
- * same way as hardware.
107
- */
108
- return;
109
- }
110
111
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
112
primary_loader = bootloader_aarch64;
113
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
114
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
115
ARM_CPU(cs)->env.boot_info = info;
116
}
117
+}
126
+}
118
+
127
+
119
+void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
128
+#define DO_3SAME(INSN, FUNC) \
120
+{
129
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
121
+ CPUState *cs;
130
+ { \
122
+ AddressSpace *as = arm_boot_address_space(cpu, info);
131
+ return do_3same(s, a, FUNC); \
123
+
124
+ /*
125
+ * CPU objects (unlike devices) are not automatically reset on system
126
+ * reset, so we must always register a handler to do so. If we're
127
+ * actually loading a kernel, the handler is also responsible for
128
+ * arranging that we start it correctly.
129
+ */
130
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
131
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
132
+ }
132
+ }
133
+
133
+
134
+ /*
134
+DO_3SAME(VADD, tcg_gen_gvec_add)
135
+ * The board code is not supposed to set secure_board_setup unless
135
+DO_3SAME(VSUB, tcg_gen_gvec_sub)
136
+ * running its code in secure mode is actually possible, and KVM
136
diff --git a/target/arm/translate.c b/target/arm/translate.c
137
+ * doesn't support secure.
137
index XXXXXXX..XXXXXXX 100644
138
+ */
138
--- a/target/arm/translate.c
139
+ assert(!(info->secure_board_setup && kvm_enabled()));
139
+++ b/target/arm/translate.c
140
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
141
}
142
return 0;
143
144
- case NEON_3R_VADD_VSUB:
145
- if (u) {
146
- tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
147
- vec_size, vec_size);
148
- } else {
149
- tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
150
- vec_size, vec_size);
151
- }
152
- return 0;
153
-
154
case NEON_3R_VQADD:
155
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
156
rn_ofs, rm_ofs, vec_size, vec_size,
157
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
158
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
159
u ? &ushl_op[size] : &sshl_op[size]);
160
return 0;
140
+
161
+
141
+ info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
162
+ case NEON_3R_VADD_VSUB:
142
+ info->dtb_limit = 0;
163
+ /* Already handled by decodetree */
143
+
164
+ return 1;
144
+ /* Load the kernel. */
165
}
145
+ if (!info->kernel_filename || info->firmware_loaded) {
166
146
+
167
if (size == 3) {
147
+ if (have_dtb(info)) {
148
+ /*
149
+ * If we have a device tree blob, but no kernel to supply it to (or
150
+ * the kernel is supposed to be loaded by the bootloader), copy the
151
+ * DTB to the base of RAM for the bootloader to pick up.
152
+ */
153
+ info->dtb_start = info->loader_start;
154
+ }
155
+
156
+ if (info->kernel_filename) {
157
+ FWCfgState *fw_cfg;
158
+ bool try_decompressing_kernel;
159
+
160
+ fw_cfg = fw_cfg_find();
161
+ try_decompressing_kernel = arm_feature(&cpu->env,
162
+ ARM_FEATURE_AARCH64);
163
+
164
+ /*
165
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
166
+ * We don't process them here at all, it's all left to the
167
+ * firmware.
168
+ */
169
+ load_image_to_fw_cfg(fw_cfg,
170
+ FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
171
+ info->kernel_filename,
172
+ try_decompressing_kernel);
173
+ load_image_to_fw_cfg(fw_cfg,
174
+ FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
175
+ info->initrd_filename, false);
176
+
177
+ if (info->kernel_cmdline) {
178
+ fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
179
+ strlen(info->kernel_cmdline) + 1);
180
+ fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
181
+ info->kernel_cmdline);
182
+ }
183
+ }
184
+
185
+ /*
186
+ * We will start from address 0 (typically a boot ROM image) in the
187
+ * same way as hardware.
188
+ */
189
+ return;
190
+ } else {
191
+ arm_setup_direct_kernel_boot(cpu, info);
192
+ }
193
194
if (!info->skip_dtb_autoload && have_dtb(info)) {
195
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
196
--
168
--
197
2.20.1
169
2.20.1
198
170
199
171
diff view generated by jsdifflib
New patch
1
Convert the Neon logic ops in the 3-reg-same grouping to decodetree.
2
Note that for the logic ops the 'size' field forms part of their
3
decode and the actual operations are always bitwise.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200430181003.21682-16-peter.maydell@linaro.org
8
---
9
target/arm/neon-dp.decode | 12 +++++++++++
10
target/arm/translate-neon.inc.c | 19 +++++++++++++++++
11
target/arm/translate.c | 38 +--------------------------------
12
3 files changed, 32 insertions(+), 37 deletions(-)
13
14
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/neon-dp.decode
17
+++ b/target/arm/neon-dp.decode
18
@@ -XXX,XX +XXX,XX @@
19
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
20
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
21
22
+@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
23
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
24
+
25
+VAND_3s 1111 001 0 0 . 00 .... .... 0001 ... 1 .... @3same_logic
26
+VBIC_3s 1111 001 0 0 . 01 .... .... 0001 ... 1 .... @3same_logic
27
+VORR_3s 1111 001 0 0 . 10 .... .... 0001 ... 1 .... @3same_logic
28
+VORN_3s 1111 001 0 0 . 11 .... .... 0001 ... 1 .... @3same_logic
29
+VEOR_3s 1111 001 1 0 . 00 .... .... 0001 ... 1 .... @3same_logic
30
+VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
31
+VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
32
+VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
33
+
34
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
35
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
41
42
DO_3SAME(VADD, tcg_gen_gvec_add)
43
DO_3SAME(VSUB, tcg_gen_gvec_sub)
44
+DO_3SAME(VAND, tcg_gen_gvec_and)
45
+DO_3SAME(VBIC, tcg_gen_gvec_andc)
46
+DO_3SAME(VORR, tcg_gen_gvec_or)
47
+DO_3SAME(VORN, tcg_gen_gvec_orc)
48
+DO_3SAME(VEOR, tcg_gen_gvec_xor)
49
+
50
+/* These insns are all gvec_bitsel but with the inputs in various orders. */
51
+#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
52
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
53
+ uint32_t rn_ofs, uint32_t rm_ofs, \
54
+ uint32_t oprsz, uint32_t maxsz) \
55
+ { \
56
+ tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
57
+ } \
58
+ DO_3SAME(INSN, gen_##INSN##_3s)
59
+
60
+DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
61
+DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
62
+DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
63
diff --git a/target/arm/translate.c b/target/arm/translate.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate.c
66
+++ b/target/arm/translate.c
67
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
68
}
69
return 1;
70
71
- case NEON_3R_LOGIC: /* Logic ops. */
72
- switch ((u << 2) | size) {
73
- case 0: /* VAND */
74
- tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
75
- vec_size, vec_size);
76
- break;
77
- case 1: /* VBIC */
78
- tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
79
- vec_size, vec_size);
80
- break;
81
- case 2: /* VORR */
82
- tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
83
- vec_size, vec_size);
84
- break;
85
- case 3: /* VORN */
86
- tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
87
- vec_size, vec_size);
88
- break;
89
- case 4: /* VEOR */
90
- tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
91
- vec_size, vec_size);
92
- break;
93
- case 5: /* VBSL */
94
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
95
- vec_size, vec_size);
96
- break;
97
- case 6: /* VBIT */
98
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
99
- vec_size, vec_size);
100
- break;
101
- case 7: /* VBIF */
102
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
103
- vec_size, vec_size);
104
- break;
105
- }
106
- return 0;
107
-
108
case NEON_3R_VQADD:
109
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
110
rn_ofs, rm_ofs, vec_size, vec_size,
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
112
return 0;
113
114
case NEON_3R_VADD_VSUB:
115
+ case NEON_3R_LOGIC:
116
/* Already handled by decodetree */
117
return 1;
118
}
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
New patch
1
Convert the Neon 3-reg-same VMAX and VMIN insns to decodetree.
1
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-17-peter.maydell@linaro.org
6
---
7
target/arm/neon-dp.decode | 5 +++++
8
target/arm/translate-neon.inc.c | 14 ++++++++++++++
9
target/arm/translate.c | 21 ++-------------------
10
3 files changed, 21 insertions(+), 19 deletions(-)
11
12
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-dp.decode
15
+++ b/target/arm/neon-dp.decode
16
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
17
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
18
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
19
20
+VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
21
+VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
22
+VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
23
+VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
24
+
25
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
26
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
30
+++ b/target/arm/translate-neon.inc.c
31
@@ -XXX,XX +XXX,XX @@ DO_3SAME(VEOR, tcg_gen_gvec_xor)
32
DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
33
DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
34
DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
35
+
36
+#define DO_3SAME_NO_SZ_3(INSN, FUNC) \
37
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
38
+ { \
39
+ if (a->size == 3) { \
40
+ return false; \
41
+ } \
42
+ return do_3same(s, a, FUNC); \
43
+ }
44
+
45
+DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
46
+DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
47
+DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
48
+DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
49
diff --git a/target/arm/translate.c b/target/arm/translate.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/translate.c
52
+++ b/target/arm/translate.c
53
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
54
rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
55
return 0;
56
57
- case NEON_3R_VMAX:
58
- if (u) {
59
- tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
60
- vec_size, vec_size);
61
- } else {
62
- tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
63
- vec_size, vec_size);
64
- }
65
- return 0;
66
- case NEON_3R_VMIN:
67
- if (u) {
68
- tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
69
- vec_size, vec_size);
70
- } else {
71
- tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
72
- vec_size, vec_size);
73
- }
74
- return 0;
75
-
76
case NEON_3R_VSHL:
77
/* Note the operation is vshl vd,vm,vn */
78
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
79
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
80
81
case NEON_3R_VADD_VSUB:
82
case NEON_3R_LOGIC:
83
+ case NEON_3R_VMAX:
84
+ case NEON_3R_VMIN:
85
/* Already handled by decodetree */
86
return 1;
87
}
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
New patch
1
Convert the Neon comparison ops in the 3-reg-same grouping
2
to decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-18-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 8 ++++++++
9
target/arm/translate-neon.inc.c | 22 ++++++++++++++++++++++
10
target/arm/translate.c | 23 +++--------------------
11
3 files changed, 33 insertions(+), 20 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
18
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
19
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
20
21
+VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
22
+VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
23
+VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
24
+VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
25
+
26
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
27
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
28
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
29
@@ -XXX,XX +XXX,XX @@ VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
30
31
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
32
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
33
+
34
+VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
35
+VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
41
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
42
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
43
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
44
+
45
+#define DO_3SAME_CMP(INSN, COND) \
46
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
47
+ uint32_t rn_ofs, uint32_t rm_ofs, \
48
+ uint32_t oprsz, uint32_t maxsz) \
49
+ { \
50
+ tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
51
+ } \
52
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
53
+
54
+DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
55
+DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
56
+DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
57
+DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
58
+DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
59
+
60
+static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
61
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
62
+{
63
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
64
+}
65
+DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
66
diff --git a/target/arm/translate.c b/target/arm/translate.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate.c
69
+++ b/target/arm/translate.c
70
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
71
u ? &mls_op[size] : &mla_op[size]);
72
return 0;
73
74
- case NEON_3R_VTST_VCEQ:
75
- if (u) { /* VCEQ */
76
- tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
77
- vec_size, vec_size);
78
- } else { /* VTST */
79
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
80
- vec_size, vec_size, &cmtst_op[size]);
81
- }
82
- return 0;
83
-
84
- case NEON_3R_VCGT:
85
- tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
86
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
87
- return 0;
88
-
89
- case NEON_3R_VCGE:
90
- tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
91
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
92
- return 0;
93
-
94
case NEON_3R_VSHL:
95
/* Note the operation is vshl vd,vm,vn */
96
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
97
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
98
case NEON_3R_LOGIC:
99
case NEON_3R_VMAX:
100
case NEON_3R_VMIN:
101
+ case NEON_3R_VTST_VCEQ:
102
+ case NEON_3R_VCGT:
103
+ case NEON_3R_VCGE:
104
/* Already handled by decodetree */
105
return 1;
106
}
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
New patch
1
Convert the Neon VQADD/VQSUB insns in the 3-reg-same grouping
2
to decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-19-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 6 ++++++
9
target/arm/translate-neon.inc.c | 15 +++++++++++++++
10
target/arm/translate.c | 14 ++------------
11
3 files changed, 23 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@
18
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
19
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
21
+VQADD_S_3s 1111 001 0 0 . .. .... .... 0000 . . . 1 .... @3same
22
+VQADD_U_3s 1111 001 1 0 . .. .... .... 0000 . . . 1 .... @3same
23
+
24
@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
25
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
26
27
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
28
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
29
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
30
31
+VQSUB_S_3s 1111 001 0 0 . .. .... .... 0010 . . . 1 .... @3same
32
+VQSUB_U_3s 1111 001 1 0 . .. .... .... 0010 . . . 1 .... @3same
33
+
34
VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
35
VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
36
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-neon.inc.c
40
+++ b/target/arm/translate-neon.inc.c
41
@@ -XXX,XX +XXX,XX @@ static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
42
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
43
}
44
DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
45
+
46
+#define DO_3SAME_GVEC4(INSN, OPARRAY) \
47
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
48
+ uint32_t rn_ofs, uint32_t rm_ofs, \
49
+ uint32_t oprsz, uint32_t maxsz) \
50
+ { \
51
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc), \
52
+ rn_ofs, rm_ofs, oprsz, maxsz, &OPARRAY[vece]); \
53
+ } \
54
+ DO_3SAME(INSN, gen_##INSN##_3s)
55
+
56
+DO_3SAME_GVEC4(VQADD_S, sqadd_op)
57
+DO_3SAME_GVEC4(VQADD_U, uqadd_op)
58
+DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
59
+DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
60
diff --git a/target/arm/translate.c b/target/arm/translate.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate.c
63
+++ b/target/arm/translate.c
64
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
65
}
66
return 1;
67
68
- case NEON_3R_VQADD:
69
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
70
- rn_ofs, rm_ofs, vec_size, vec_size,
71
- (u ? uqadd_op : sqadd_op) + size);
72
- return 0;
73
-
74
- case NEON_3R_VQSUB:
75
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
76
- rn_ofs, rm_ofs, vec_size, vec_size,
77
- (u ? uqsub_op : sqsub_op) + size);
78
- return 0;
79
-
80
case NEON_3R_VMUL: /* VMUL */
81
if (u) {
82
/* Polynomial case allows only P8. */
83
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
84
case NEON_3R_VTST_VCEQ:
85
case NEON_3R_VCGT:
86
case NEON_3R_VCGE:
87
+ case NEON_3R_VQADD:
88
+ case NEON_3R_VQSUB:
89
/* Already handled by decodetree */
90
return 1;
91
}
92
--
93
2.20.1
94
95
diff view generated by jsdifflib
New patch
1
Convert the Neon VMUL, VMLA, VMLS and VSHL insns in the
2
3-reg-same grouping to decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-20-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 9 +++++++
9
target/arm/translate-neon.inc.c | 44 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 28 +++------------------
11
3 files changed, 56 insertions(+), 25 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@ VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
18
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
19
VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
20
21
+VSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 0 .... @3same
22
+VSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 0 .... @3same
23
+
24
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
25
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
26
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
27
@@ -XXX,XX +XXX,XX @@ VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
28
29
VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
30
VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
31
+
32
+VMLA_3s 1111 001 0 0 . .. .... .... 1001 . . . 0 .... @3same
33
+VMLS_3s 1111 001 1 0 . .. .... .... 1001 . . . 0 .... @3same
34
+
35
+VMUL_3s 1111 001 0 0 . .. .... .... 1001 . . . 1 .... @3same
36
+VMUL_p_3s 1111 001 1 0 . .. .... .... 1001 . . . 1 .... @3same
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-neon.inc.c
40
+++ b/target/arm/translate-neon.inc.c
41
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
42
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
43
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
44
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
45
+DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
46
47
#define DO_3SAME_CMP(INSN, COND) \
48
static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
49
@@ -XXX,XX +XXX,XX @@ DO_3SAME_GVEC4(VQADD_S, sqadd_op)
50
DO_3SAME_GVEC4(VQADD_U, uqadd_op)
51
DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
52
DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
53
+
54
+static void gen_VMUL_p_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
55
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
56
+{
57
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz,
58
+ 0, gen_helper_gvec_pmul_b);
59
+}
60
+
61
+static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
62
+{
63
+ if (a->size != 0) {
64
+ return false;
65
+ }
66
+ return do_3same(s, a, gen_VMUL_p_3s);
67
+}
68
+
69
+#define DO_3SAME_GVEC3_NO_SZ_3(INSN, OPARRAY) \
70
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
71
+ uint32_t rn_ofs, uint32_t rm_ofs, \
72
+ uint32_t oprsz, uint32_t maxsz) \
73
+ { \
74
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, \
75
+ oprsz, maxsz, &OPARRAY[vece]); \
76
+ } \
77
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
78
+
79
+
80
+DO_3SAME_GVEC3_NO_SZ_3(VMLA, mla_op)
81
+DO_3SAME_GVEC3_NO_SZ_3(VMLS, mls_op)
82
+
83
+#define DO_3SAME_GVEC3_SHIFT(INSN, OPARRAY) \
84
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
85
+ uint32_t rn_ofs, uint32_t rm_ofs, \
86
+ uint32_t oprsz, uint32_t maxsz) \
87
+ { \
88
+ /* Note the operation is vshl vd,vm,vn */ \
89
+ tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, \
90
+ oprsz, maxsz, &OPARRAY[vece]); \
91
+ } \
92
+ DO_3SAME(INSN, gen_##INSN##_3s)
93
+
94
+DO_3SAME_GVEC3_SHIFT(VSHL_S, sshl_op)
95
+DO_3SAME_GVEC3_SHIFT(VSHL_U, ushl_op)
96
diff --git a/target/arm/translate.c b/target/arm/translate.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate.c
99
+++ b/target/arm/translate.c
100
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
101
}
102
return 1;
103
104
- case NEON_3R_VMUL: /* VMUL */
105
- if (u) {
106
- /* Polynomial case allows only P8. */
107
- if (size != 0) {
108
- return 1;
109
- }
110
- tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
111
- 0, gen_helper_gvec_pmul_b);
112
- } else {
113
- tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
114
- vec_size, vec_size);
115
- }
116
- return 0;
117
-
118
- case NEON_3R_VML: /* VMLA, VMLS */
119
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
120
- u ? &mls_op[size] : &mla_op[size]);
121
- return 0;
122
-
123
- case NEON_3R_VSHL:
124
- /* Note the operation is vshl vd,vm,vn */
125
- tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
126
- u ? &ushl_op[size] : &sshl_op[size]);
127
- return 0;
128
-
129
case NEON_3R_VADD_VSUB:
130
case NEON_3R_LOGIC:
131
case NEON_3R_VMAX:
132
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
133
case NEON_3R_VCGE:
134
case NEON_3R_VQADD:
135
case NEON_3R_VQSUB:
136
+ case NEON_3R_VMUL:
137
+ case NEON_3R_VML:
138
+ case NEON_3R_VSHL:
139
/* Already handled by decodetree */
140
return 1;
141
}
142
--
143
2.20.1
144
145
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
We're going to want at least some of the NeonGen* typedefs
2
for the refactored 32-bit Neon decoder, so move them all
3
to translate.h since it makes more sense to keep them in
4
one group.
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190128223118.5255-4-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200430181003.21682-23-peter.maydell@linaro.org
7
---
9
---
8
target/arm/cpu.h | 2 ++
10
target/arm/translate.h | 17 +++++++++++++++++
9
target/arm/translate.h | 4 ++++
11
target/arm/translate-a64.c | 17 -----------------
10
target/arm/helper.c | 22 +++++++++++++++-------
12
2 files changed, 17 insertions(+), 17 deletions(-)
11
target/arm/translate-a64.c | 2 ++
12
4 files changed, 23 insertions(+), 7 deletions(-)
13
13
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TBII, 0, 2)
19
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
20
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
21
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
22
+FIELD(TBFLAG_A64, BT, 9, 1)
23
+FIELD(TBFLAG_A64, BTYPE, 10, 2)
24
25
static inline bool bswap_code(bool sctlr_b)
26
{
27
diff --git a/target/arm/translate.h b/target/arm/translate.h
14
diff --git a/target/arm/translate.h b/target/arm/translate.h
28
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate.h
16
--- a/target/arm/translate.h
30
+++ b/target/arm/translate.h
17
+++ b/target/arm/translate.h
31
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
18
@@ -XXX,XX +XXX,XX @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
32
bool ss_same_el;
19
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
33
/* True if v8.3-PAuth is active. */
20
uint32_t, uint32_t, uint32_t);
34
bool pauth_active;
21
35
+ /* True with v8.5-BTI and SCTLR_ELx.BT* set. */
22
+/* Function prototype for gen_ functions for calling Neon helpers */
36
+ bool bt;
23
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
37
+ /* A copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. */
24
+typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
38
+ uint8_t btype;
25
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
39
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
26
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
40
int c15_cpar;
27
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
41
/* TCG op of the current insn_start. */
28
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
29
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
43
index XXXXXXX..XXXXXXX 100644
30
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
44
--- a/target/arm/helper.c
31
+typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
45
+++ b/target/arm/helper.c
32
+typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
46
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
33
+typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
47
34
+typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
48
if (is_a64(env)) {
35
+typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
49
ARMCPU *cpu = arm_env_get_cpu(env);
36
+typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
50
+ uint64_t sctlr;
37
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
51
52
*pc = env->pc;
53
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
54
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
55
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
56
}
57
58
+ if (current_el == 0) {
59
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
60
+ sctlr = env->cp15.sctlr_el[1];
61
+ } else {
62
+ sctlr = env->cp15.sctlr_el[current_el];
63
+ }
64
if (cpu_isar_feature(aa64_pauth, cpu)) {
65
/*
66
* In order to save space in flags, we record only whether
67
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
68
* a nop, or "active" when some action must be performed.
69
* The decision of which action to take is left to a helper.
70
*/
71
- uint64_t sctlr;
72
- if (current_el == 0) {
73
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
74
- sctlr = env->cp15.sctlr_el[1];
75
- } else {
76
- sctlr = env->cp15.sctlr_el[current_el];
77
- }
78
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
79
flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
80
}
81
}
82
+
38
+
83
+ if (cpu_isar_feature(aa64_bti, cpu)) {
39
#endif /* TARGET_ARM_TRANSLATE_H */
84
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
85
+ if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
86
+ flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
87
+ }
88
+ flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
89
+ }
90
} else {
91
*pc = env->regs[15];
92
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
93
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
40
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
94
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-a64.c
42
--- a/target/arm/translate-a64.c
96
+++ b/target/arm/translate-a64.c
43
+++ b/target/arm/translate-a64.c
97
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
44
@@ -XXX,XX +XXX,XX @@ typedef struct AArch64DecodeTable {
98
dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
45
AArch64DecodeFn *disas_fn;
99
dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
46
} AArch64DecodeTable;
100
dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
47
101
+ dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
48
-/* Function prototype for gen_ functions for calling Neon helpers */
102
+ dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
49
-typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
103
dc->vec_len = 0;
50
-typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
104
dc->vec_stride = 0;
51
-typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
105
dc->cp_regs = arm_cpu->cp_regs;
52
-typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
53
-typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
54
-typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
55
-typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
56
-typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
57
-typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
58
-typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
59
-typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
60
-typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
61
-typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
62
-typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
63
-typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
64
-
65
/* initialize TCG globals. */
66
void a64_translate_init(void)
67
{
106
--
68
--
107
2.20.1
69
2.20.1
108
70
109
71
diff view generated by jsdifflib