1 | As promised, another pullreq... This one's mostly RTH's patches. | 1 | Hi; this pullreq includes FEAT_LSE2 support, the new |
---|---|---|---|
2 | bpim2u board, and some other smaller patchsets. | ||
2 | 3 | ||
3 | thanks | 4 | thanks |
4 | -- PMM | 5 | -- PMM |
5 | 6 | ||
6 | The following changes since commit 784c2e4f232adf5ef47a84a262ec72a07d068d6a: | 7 | The following changes since commit 369081c4558e7e940fa36ce59bf17b2e390f55d3: |
7 | 8 | ||
8 | Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2018-10-19 15:30:40 +0100) | 9 | Merge tag 'pull-tcg-20230605' of https://gitlab.com/rth7680/qemu into staging (2023-06-05 13:16:56 -0700) |
9 | 10 | ||
10 | are available in the Git repository at: | 11 | are available in the Git repository at: |
11 | 12 | ||
12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20181019 | 13 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230606 |
13 | 14 | ||
14 | for you to fetch changes up to 88c9add25e7120e8622796c81ad3f3fb7f8d40e7: | 15 | for you to fetch changes up to f9ac778898cb28307e0f91421aba34d43c34b679: |
15 | 16 | ||
16 | target/arm: Only flush tlb if ASID changes (2018-10-19 17:38:48 +0100) | 17 | target/arm: trap DCC access in user mode emulation (2023-06-06 10:19:40 +0100) |
17 | 18 | ||
18 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
19 | target-arm queue: | 20 | target-arm queue: |
20 | * ssi-sd: Make devices picking up backends unavailable with -device | 21 | * Support gdbstub (guest debug) in HVF |
21 | * Add support for VCPU event states | 22 | * xnlx-versal: Support CANFD controller |
22 | * Move towards making ID registers the source of truth for | 23 | * bpim2u: New board model: Banana Pi BPI-M2 Ultra |
23 | whether a guest CPU implements a feature, rather than having | 24 | * Emulate FEAT_LSE2 |
24 | parallel ID registers and feature bit flags | 25 | * allow DC CVA[D]P in user mode emulation |
25 | * Implement various HCR hypervisor trap/config bits | 26 | * trap DCC access in user mode emulation |
26 | * Get IL bit correct for v7 syndrome values | ||
27 | * Report correct syndrome for FP/SIMD traps to Hyp mode | ||
28 | * hw/arm/boot: Increase compliance with kernel arm64 boot protocol | ||
29 | * Refactor A32 Neon to use generic vector infrastructure | ||
30 | * Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn | ||
31 | * net: cadence_gem: Report features correctly in ID register | ||
32 | * Avoid some unnecessary TLB flushes on TTBR register writes | ||
33 | 27 | ||
34 | ---------------------------------------------------------------- | 28 | ---------------------------------------------------------------- |
35 | Dongjiu Geng (1): | 29 | Francesco Cagnin (4): |
36 | target/arm: Add support for VCPU event states | 30 | arm: move KVM breakpoints helpers |
31 | hvf: handle access for more registers | ||
32 | hvf: add breakpoint handlers | ||
33 | hvf: add guest debugging handlers for Apple Silicon hosts | ||
37 | 34 | ||
38 | Edgar E. Iglesias (2): | 35 | Richard Henderson (20): |
39 | net: cadence_gem: Announce availability of priority queues | 36 | target/arm: Add commentary for CPUARMState.exclusive_high |
40 | net: cadence_gem: Announce 64bit addressing support | 37 | target/arm: Add feature test for FEAT_LSE2 |
38 | target/arm: Introduce finalize_memop_{atom,pair} | ||
39 | target/arm: Use tcg_gen_qemu_ld_i128 for LDXP | ||
40 | target/arm: Use tcg_gen_qemu_{st, ld}_i128 for do_fp_{st, ld} | ||
41 | target/arm: Use tcg_gen_qemu_st_i128 for STZG, STZ2G | ||
42 | target/arm: Use tcg_gen_qemu_{ld, st}_i128 in gen_sve_{ld, st}r | ||
43 | target/arm: Sink gen_mte_check1 into load/store_exclusive | ||
44 | target/arm: Load/store integer pair with one tcg operation | ||
45 | target/arm: Hoist finalize_memop out of do_gpr_{ld, st} | ||
46 | target/arm: Hoist finalize_memop out of do_fp_{ld, st} | ||
47 | target/arm: Pass memop to gen_mte_check1* | ||
48 | target/arm: Pass single_memop to gen_mte_checkN | ||
49 | target/arm: Check alignment in helper_mte_check | ||
50 | target/arm: Add SCTLR.nAA to TBFLAG_A64 | ||
51 | target/arm: Relax ordered/atomic alignment checks for LSE2 | ||
52 | target/arm: Move mte check for store-exclusive | ||
53 | tests/tcg/aarch64: Use stz2g in mte-7.c | ||
54 | tests/tcg/multiarch: Adjust sigbus.c | ||
55 | target/arm: Enable FEAT_LSE2 for -cpu max | ||
41 | 56 | ||
42 | Markus Armbruster (1): | 57 | Vikram Garhwal (4): |
43 | ssi-sd: Make devices picking up backends unavailable with -device | 58 | hw/net/can: Introduce Xilinx Versal CANFD controller |
59 | xlnx-versal: Connect Xilinx VERSAL CANFD controllers | ||
60 | MAINTAINERS: Include canfd tests under Xilinx CAN | ||
61 | tests/qtest: Introduce tests for Xilinx VERSAL CANFD controller | ||
44 | 62 | ||
45 | Peter Maydell (10): | 63 | Zhuojia Shen (3): |
46 | target/arm: Improve debug logging of AArch32 exception return | 64 | target/arm: allow DC CVA[D]P in user mode emulation |
47 | target/arm: Make switch_mode() file-local | 65 | tests/tcg/aarch64: add DC CVA[D]P tests |
48 | target/arm: Implement HCR.FB | 66 | target/arm: trap DCC access in user mode emulation |
49 | target/arm: Implement HCR.DC | ||
50 | target/arm: ISR_EL1 bits track virtual interrupts if IMO/FMO set | ||
51 | target/arm: Implement HCR.VI and VF | ||
52 | target/arm: Implement HCR.PTW | ||
53 | target/arm: New utility function to extract EC from syndrome | ||
54 | target/arm: Get IL bit correct for v7 syndrome values | ||
55 | target/arm: Report correct syndrome for FP/SIMD traps to Hyp mode | ||
56 | 67 | ||
57 | Richard Henderson (30): | 68 | qianfan Zhao (11): |
58 | target/arm: Move some system registers into a substructure | 69 | hw: arm: Add bananapi M2-Ultra and allwinner-r40 support |
59 | target/arm: V8M should not imply V7VE | 70 | hw/arm/allwinner-r40: add Clock Control Unit |
60 | target/arm: Convert v8 extensions from feature bits to isar tests | 71 | hw: allwinner-r40: Complete uart devices |
61 | target/arm: Convert division from feature bits to isar0 tests | 72 | hw: arm: allwinner-r40: Add i2c0 device |
62 | target/arm: Convert jazelle from feature bit to isar1 test | 73 | hw/misc: Rename axp209 to axp22x and add support AXP221 PMU |
63 | target/arm: Convert t32ee from feature bit to isar3 test | 74 | hw/arm/allwinner-r40: add SDRAM controller device |
64 | target/arm: Convert sve from feature bit to aa64pfr0 test | 75 | hw: sd: allwinner-sdhost: Add sun50i-a64 SoC support |
65 | target/arm: Convert v8.2-fp16 from feature bit to aa64pfr0 test | 76 | hw: arm: allwinner-r40: Add emac and gmac support |
66 | target/arm: Hoist address increment for vector memory ops | 77 | hw: arm: allwinner-sramc: Add SRAM Controller support for R40 |
67 | target/arm: Don't call tcg_clear_temp_count | 78 | tests: avocado: boot_linux_console: Add test case for bpim2u |
68 | target/arm: Use tcg_gen_gvec_dup_i64 for LD[1-4]R | 79 | docs: system: arm: Introduce bananapi_m2u |
69 | target/arm: Promote consecutive memory ops for aa64 | ||
70 | target/arm: Mark some arrays const | ||
71 | target/arm: Use gvec for NEON VDUP | ||
72 | target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate) | ||
73 | target/arm: Use gvec for NEON_3R_LOGIC insns | ||
74 | target/arm: Use gvec for NEON_3R_VADD_VSUB insns | ||
75 | target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG | ||
76 | target/arm: Use gvec for NEON_3R_VMUL | ||
77 | target/arm: Use gvec for VSHR, VSHL | ||
78 | target/arm: Use gvec for VSRA | ||
79 | target/arm: Use gvec for VSRI, VSLI | ||
80 | target/arm: Use gvec for NEON_3R_VML | ||
81 | target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE | ||
82 | target/arm: Use gvec for NEON VLD all lanes | ||
83 | target/arm: Reorg NEON VLD/VST all elements | ||
84 | target/arm: Promote consecutive memory ops for aa32 | ||
85 | target/arm: Reorg NEON VLD/VST single element to one lane | ||
86 | target/arm: Remove writefn from TTBR0_EL3 | ||
87 | target/arm: Only flush tlb if ASID changes | ||
88 | 80 | ||
89 | Stewart Hildebrand (1): | 81 | MAINTAINERS | 2 +- |
90 | hw/arm/boot: Increase compliance with kernel arm64 boot protocol | 82 | docs/system/arm/bananapi_m2u.rst | 139 +++ |
91 | 83 | docs/system/arm/emulation.rst | 1 + | |
92 | target/arm/cpu.h | 227 ++++++- | 84 | docs/system/arm/xlnx-versal-virt.rst | 31 + |
93 | target/arm/internals.h | 45 +- | 85 | docs/system/target-arm.rst | 1 + |
94 | target/arm/kvm_arm.h | 24 + | 86 | include/hw/arm/allwinner-r40.h | 143 +++ |
95 | target/arm/translate.h | 21 + | 87 | include/hw/arm/xlnx-versal.h | 12 + |
96 | hw/arm/boot.c | 18 + | 88 | include/hw/misc/allwinner-r40-ccu.h | 65 + |
97 | hw/intc/armv7m_nvic.c | 12 +- | 89 | include/hw/misc/allwinner-r40-dramc.h | 108 ++ |
98 | hw/net/cadence_gem.c | 9 +- | 90 | include/hw/misc/allwinner-sramc.h | 69 ++ |
99 | hw/sd/ssi-sd.c | 2 + | 91 | include/hw/net/xlnx-versal-canfd.h | 87 ++ |
100 | linux-user/aarch64/signal.c | 4 +- | 92 | include/hw/sd/allwinner-sdhost.h | 9 + |
101 | linux-user/elfload.c | 60 +- | 93 | include/sysemu/hvf.h | 37 + |
102 | linux-user/syscall.c | 10 +- | 94 | include/sysemu/hvf_int.h | 2 + |
103 | target/arm/cpu.c | 242 ++++---- | 95 | target/arm/cpu.h | 16 +- |
104 | target/arm/cpu64.c | 148 +++-- | 96 | target/arm/hvf_arm.h | 7 + |
105 | target/arm/helper.c | 397 ++++++++---- | 97 | target/arm/internals.h | 53 +- |
106 | target/arm/kvm.c | 60 ++ | 98 | target/arm/tcg/helper-a64.h | 3 + |
107 | target/arm/kvm32.c | 13 + | 99 | target/arm/tcg/translate-a64.h | 4 +- |
108 | target/arm/kvm64.c | 15 +- | 100 | target/arm/tcg/translate.h | 65 +- |
109 | target/arm/machine.c | 28 +- | 101 | accel/hvf/hvf-accel-ops.c | 119 ++ |
110 | target/arm/op_helper.c | 2 +- | 102 | accel/hvf/hvf-all.c | 23 + |
111 | target/arm/translate-a64.c | 715 ++++----------------- | 103 | hw/arm/allwinner-r40.c | 526 ++++++++ |
112 | target/arm/translate.c | 1451 ++++++++++++++++++++++++++++--------------- | 104 | hw/arm/bananapi_m2u.c | 145 +++ |
113 | 21 files changed, 2021 insertions(+), 1482 deletions(-) | 105 | hw/arm/xlnx-versal-virt.c | 53 + |
114 | 106 | hw/arm/xlnx-versal.c | 37 + | |
107 | hw/misc/allwinner-r40-ccu.c | 209 ++++ | ||
108 | hw/misc/allwinner-r40-dramc.c | 513 ++++++++ | ||
109 | hw/misc/allwinner-sramc.c | 184 +++ | ||
110 | hw/misc/axp209.c | 238 ---- | ||
111 | hw/misc/axp2xx.c | 283 +++++ | ||
112 | hw/net/can/xlnx-versal-canfd.c | 2107 +++++++++++++++++++++++++++++++++ | ||
113 | hw/sd/allwinner-sdhost.c | 72 +- | ||
114 | target/arm/cpu.c | 2 + | ||
115 | target/arm/debug_helper.c | 5 + | ||
116 | target/arm/helper.c | 6 +- | ||
117 | target/arm/hvf/hvf.c | 750 +++++++++++- | ||
118 | target/arm/hyp_gdbstub.c | 253 ++++ | ||
119 | target/arm/kvm64.c | 276 ----- | ||
120 | target/arm/tcg/cpu64.c | 1 + | ||
121 | target/arm/tcg/helper-a64.c | 7 + | ||
122 | target/arm/tcg/hflags.c | 6 + | ||
123 | target/arm/tcg/mte_helper.c | 18 + | ||
124 | target/arm/tcg/translate-a64.c | 477 +++++--- | ||
125 | target/arm/tcg/translate-sve.c | 106 +- | ||
126 | target/arm/tcg/translate.c | 1 + | ||
127 | target/i386/hvf/hvf.c | 33 + | ||
128 | tests/qtest/xlnx-canfd-test.c | 423 +++++++ | ||
129 | tests/tcg/aarch64/dcpodp.c | 63 + | ||
130 | tests/tcg/aarch64/dcpop.c | 63 + | ||
131 | tests/tcg/aarch64/mte-7.c | 3 +- | ||
132 | tests/tcg/multiarch/sigbus.c | 13 +- | ||
133 | hw/arm/Kconfig | 14 +- | ||
134 | hw/arm/meson.build | 1 + | ||
135 | hw/misc/Kconfig | 5 +- | ||
136 | hw/misc/meson.build | 5 +- | ||
137 | hw/misc/trace-events | 26 +- | ||
138 | hw/net/can/meson.build | 1 + | ||
139 | hw/net/can/trace-events | 7 + | ||
140 | target/arm/meson.build | 3 +- | ||
141 | tests/avocado/boot_linux_console.py | 176 +++ | ||
142 | tests/qtest/meson.build | 1 + | ||
143 | tests/tcg/aarch64/Makefile.target | 11 + | ||
144 | 63 files changed, 7386 insertions(+), 733 deletions(-) | ||
145 | create mode 100644 docs/system/arm/bananapi_m2u.rst | ||
146 | create mode 100644 include/hw/arm/allwinner-r40.h | ||
147 | create mode 100644 include/hw/misc/allwinner-r40-ccu.h | ||
148 | create mode 100644 include/hw/misc/allwinner-r40-dramc.h | ||
149 | create mode 100644 include/hw/misc/allwinner-sramc.h | ||
150 | create mode 100644 include/hw/net/xlnx-versal-canfd.h | ||
151 | create mode 100644 hw/arm/allwinner-r40.c | ||
152 | create mode 100644 hw/arm/bananapi_m2u.c | ||
153 | create mode 100644 hw/misc/allwinner-r40-ccu.c | ||
154 | create mode 100644 hw/misc/allwinner-r40-dramc.c | ||
155 | create mode 100644 hw/misc/allwinner-sramc.c | ||
156 | delete mode 100644 hw/misc/axp209.c | ||
157 | create mode 100644 hw/misc/axp2xx.c | ||
158 | create mode 100644 hw/net/can/xlnx-versal-canfd.c | ||
159 | create mode 100644 target/arm/hyp_gdbstub.c | ||
160 | create mode 100644 tests/qtest/xlnx-canfd-test.c | ||
161 | create mode 100644 tests/tcg/aarch64/dcpodp.c | ||
162 | create mode 100644 tests/tcg/aarch64/dcpop.c | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Francesco Cagnin <fcagnin@quarkslab.com> |
---|---|---|---|
2 | 2 | ||
3 | Move ssra_op and usra_op expanders from translate-a64.c. | 3 | These helpers will be also used for HVF. Aside from reformatting a |
4 | couple of comments for 'checkpatch.pl' and updating meson to compile | ||
5 | 'hyp_gdbstub.c', this is just code motion. | ||
4 | 6 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Francesco Cagnin <fcagnin@quarkslab.com> |
6 | Message-id: 20181011205206.3552-14-richard.henderson@linaro.org | 8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Message-id: 20230601153107.81955-2-fcagnin@quarkslab.com | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 12 | --- |
10 | target/arm/translate.h | 2 + | 13 | target/arm/internals.h | 50 +++++++ |
11 | target/arm/translate-a64.c | 106 ---------------------------- | 14 | target/arm/hyp_gdbstub.c | 253 +++++++++++++++++++++++++++++++++++ |
12 | target/arm/translate.c | 139 ++++++++++++++++++++++++++++++++++--- | 15 | target/arm/kvm64.c | 276 --------------------------------------- |
13 | 3 files changed, 130 insertions(+), 117 deletions(-) | 16 | target/arm/meson.build | 3 +- |
17 | 4 files changed, 305 insertions(+), 277 deletions(-) | ||
18 | create mode 100644 target/arm/hyp_gdbstub.c | ||
14 | 19 | ||
15 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 20 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
16 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate.h | 22 | --- a/target/arm/internals.h |
18 | +++ b/target/arm/translate.h | 23 | +++ b/target/arm/internals.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void) | 24 | @@ -XXX,XX +XXX,XX @@ static inline bool arm_fgt_active(CPUARMState *env, int el) |
20 | extern const GVecGen3 bsl_op; | 25 | } |
21 | extern const GVecGen3 bit_op; | 26 | |
22 | extern const GVecGen3 bif_op; | 27 | void assert_hflags_rebuild_correctly(CPUARMState *env); |
23 | +extern const GVecGen2i ssra_op[4]; | 28 | + |
24 | +extern const GVecGen2i usra_op[4]; | 29 | +/* |
25 | 30 | + * Although the ARM implementation of hardware assisted debugging | |
26 | /* | 31 | + * allows for different breakpoints per-core, the current GDB |
27 | * Forward to the isar_feature_* tests given a DisasContext pointer. | 32 | + * interface treats them as a global pool of registers (which seems to |
28 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 33 | + * be the case for x86, ppc and s390). As a result we store one copy |
34 | + * of registers which is used for all active cores. | ||
35 | + * | ||
36 | + * Write access is serialised by virtue of the GDB protocol which | ||
37 | + * updates things. Read access (i.e. when the values are copied to the | ||
38 | + * vCPU) is also gated by GDB's run control. | ||
39 | + * | ||
40 | + * This is not unreasonable as most of the time debugging kernels you | ||
41 | + * never know which core will eventually execute your function. | ||
42 | + */ | ||
43 | + | ||
44 | +typedef struct { | ||
45 | + uint64_t bcr; | ||
46 | + uint64_t bvr; | ||
47 | +} HWBreakpoint; | ||
48 | + | ||
49 | +/* | ||
50 | + * The watchpoint registers can cover more area than the requested | ||
51 | + * watchpoint so we need to store the additional information | ||
52 | + * somewhere. We also need to supply a CPUWatchpoint to the GDB stub | ||
53 | + * when the watchpoint is hit. | ||
54 | + */ | ||
55 | +typedef struct { | ||
56 | + uint64_t wcr; | ||
57 | + uint64_t wvr; | ||
58 | + CPUWatchpoint details; | ||
59 | +} HWWatchpoint; | ||
60 | + | ||
61 | +/* Maximum and current break/watch point counts */ | ||
62 | +extern int max_hw_bps, max_hw_wps; | ||
63 | +extern GArray *hw_breakpoints, *hw_watchpoints; | ||
64 | + | ||
65 | +#define cur_hw_wps (hw_watchpoints->len) | ||
66 | +#define cur_hw_bps (hw_breakpoints->len) | ||
67 | +#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) | ||
68 | +#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) | ||
69 | + | ||
70 | +bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); | ||
71 | +int insert_hw_breakpoint(target_ulong pc); | ||
72 | +int delete_hw_breakpoint(target_ulong pc); | ||
73 | + | ||
74 | +bool check_watchpoint_in_range(int i, target_ulong addr); | ||
75 | +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); | ||
76 | +int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); | ||
77 | +int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); | ||
78 | #endif | ||
79 | diff --git a/target/arm/hyp_gdbstub.c b/target/arm/hyp_gdbstub.c | ||
80 | new file mode 100644 | ||
81 | index XXXXXXX..XXXXXXX | ||
82 | --- /dev/null | ||
83 | +++ b/target/arm/hyp_gdbstub.c | ||
84 | @@ -XXX,XX +XXX,XX @@ | ||
85 | +/* | ||
86 | + * ARM implementation of KVM and HVF hooks, 64 bit specific code | ||
87 | + * | ||
88 | + * Copyright Mian-M. Hamayun 2013, Virtual Open Systems | ||
89 | + * Copyright Alex Bennée 2014, Linaro | ||
90 | + * | ||
91 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
92 | + * See the COPYING file in the top-level directory. | ||
93 | + * | ||
94 | + */ | ||
95 | + | ||
96 | +#include "qemu/osdep.h" | ||
97 | +#include "cpu.h" | ||
98 | +#include "internals.h" | ||
99 | +#include "exec/gdbstub.h" | ||
100 | + | ||
101 | +/* Maximum and current break/watch point counts */ | ||
102 | +int max_hw_bps, max_hw_wps; | ||
103 | +GArray *hw_breakpoints, *hw_watchpoints; | ||
104 | + | ||
105 | +/** | ||
106 | + * insert_hw_breakpoint() | ||
107 | + * @addr: address of breakpoint | ||
108 | + * | ||
109 | + * See ARM ARM D2.9.1 for details but here we are only going to create | ||
110 | + * simple un-linked breakpoints (i.e. we don't chain breakpoints | ||
111 | + * together to match address and context or vmid). The hardware is | ||
112 | + * capable of fancier matching but that will require exposing that | ||
113 | + * fanciness to GDB's interface | ||
114 | + * | ||
115 | + * DBGBCR<n>_EL1, Debug Breakpoint Control Registers | ||
116 | + * | ||
117 | + * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0 | ||
118 | + * +------+------+-------+-----+----+------+-----+------+-----+---+ | ||
119 | + * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E | | ||
120 | + * +------+------+-------+-----+----+------+-----+------+-----+---+ | ||
121 | + * | ||
122 | + * BT: Breakpoint type (0 = unlinked address match) | ||
123 | + * LBN: Linked BP number (0 = unused) | ||
124 | + * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12) | ||
125 | + * BAS: Byte Address Select (RES1 for AArch64) | ||
126 | + * E: Enable bit | ||
127 | + * | ||
128 | + * DBGBVR<n>_EL1, Debug Breakpoint Value Registers | ||
129 | + * | ||
130 | + * 63 53 52 49 48 2 1 0 | ||
131 | + * +------+-----------+----------+-----+ | ||
132 | + * | RESS | VA[52:49] | VA[48:2] | 0 0 | | ||
133 | + * +------+-----------+----------+-----+ | ||
134 | + * | ||
135 | + * Depending on the addressing mode bits the top bits of the register | ||
136 | + * are a sign extension of the highest applicable VA bit. Some | ||
137 | + * versions of GDB don't do it correctly so we ensure they are correct | ||
138 | + * here so future PC comparisons will work properly. | ||
139 | + */ | ||
140 | + | ||
141 | +int insert_hw_breakpoint(target_ulong addr) | ||
142 | +{ | ||
143 | + HWBreakpoint brk = { | ||
144 | + .bcr = 0x1, /* BCR E=1, enable */ | ||
145 | + .bvr = sextract64(addr, 0, 53) | ||
146 | + }; | ||
147 | + | ||
148 | + if (cur_hw_bps >= max_hw_bps) { | ||
149 | + return -ENOBUFS; | ||
150 | + } | ||
151 | + | ||
152 | + brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */ | ||
153 | + brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */ | ||
154 | + | ||
155 | + g_array_append_val(hw_breakpoints, brk); | ||
156 | + | ||
157 | + return 0; | ||
158 | +} | ||
159 | + | ||
160 | +/** | ||
161 | + * delete_hw_breakpoint() | ||
162 | + * @pc: address of breakpoint | ||
163 | + * | ||
164 | + * Delete a breakpoint and shuffle any above down | ||
165 | + */ | ||
166 | + | ||
167 | +int delete_hw_breakpoint(target_ulong pc) | ||
168 | +{ | ||
169 | + int i; | ||
170 | + for (i = 0; i < hw_breakpoints->len; i++) { | ||
171 | + HWBreakpoint *brk = get_hw_bp(i); | ||
172 | + if (brk->bvr == pc) { | ||
173 | + g_array_remove_index(hw_breakpoints, i); | ||
174 | + return 0; | ||
175 | + } | ||
176 | + } | ||
177 | + return -ENOENT; | ||
178 | +} | ||
179 | + | ||
180 | +/** | ||
181 | + * insert_hw_watchpoint() | ||
182 | + * @addr: address of watch point | ||
183 | + * @len: size of area | ||
184 | + * @type: type of watch point | ||
185 | + * | ||
186 | + * See ARM ARM D2.10. As with the breakpoints we can do some advanced | ||
187 | + * stuff if we want to. The watch points can be linked with the break | ||
188 | + * points above to make them context aware. However for simplicity | ||
189 | + * currently we only deal with simple read/write watch points. | ||
190 | + * | ||
191 | + * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers | ||
192 | + * | ||
193 | + * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0 | ||
194 | + * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | ||
195 | + * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E | | ||
196 | + * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | ||
197 | + * | ||
198 | + * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes)) | ||
199 | + * WT: 0 - unlinked, 1 - linked (not currently used) | ||
200 | + * LBN: Linked BP number (not currently used) | ||
201 | + * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11) | ||
202 | + * BAS: Byte Address Select | ||
203 | + * LSC: Load/Store control (01: load, 10: store, 11: both) | ||
204 | + * E: Enable | ||
205 | + * | ||
206 | + * The bottom 2 bits of the value register are masked. Therefore to | ||
207 | + * break on any sizes smaller than an unaligned word you need to set | ||
208 | + * MASK=0, BAS=bit per byte in question. For larger regions (^2) you | ||
209 | + * need to ensure you mask the address as required and set BAS=0xff | ||
210 | + */ | ||
211 | + | ||
212 | +int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type) | ||
213 | +{ | ||
214 | + HWWatchpoint wp = { | ||
215 | + .wcr = R_DBGWCR_E_MASK, /* E=1, enable */ | ||
216 | + .wvr = addr & (~0x7ULL), | ||
217 | + .details = { .vaddr = addr, .len = len } | ||
218 | + }; | ||
219 | + | ||
220 | + if (cur_hw_wps >= max_hw_wps) { | ||
221 | + return -ENOBUFS; | ||
222 | + } | ||
223 | + | ||
224 | + /* | ||
225 | + * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state, | ||
226 | + * valid whether EL3 is implemented or not | ||
227 | + */ | ||
228 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, PAC, 3); | ||
229 | + | ||
230 | + switch (type) { | ||
231 | + case GDB_WATCHPOINT_READ: | ||
232 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 1); | ||
233 | + wp.details.flags = BP_MEM_READ; | ||
234 | + break; | ||
235 | + case GDB_WATCHPOINT_WRITE: | ||
236 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 2); | ||
237 | + wp.details.flags = BP_MEM_WRITE; | ||
238 | + break; | ||
239 | + case GDB_WATCHPOINT_ACCESS: | ||
240 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 3); | ||
241 | + wp.details.flags = BP_MEM_ACCESS; | ||
242 | + break; | ||
243 | + default: | ||
244 | + g_assert_not_reached(); | ||
245 | + break; | ||
246 | + } | ||
247 | + if (len <= 8) { | ||
248 | + /* we align the address and set the bits in BAS */ | ||
249 | + int off = addr & 0x7; | ||
250 | + int bas = (1 << len) - 1; | ||
251 | + | ||
252 | + wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas); | ||
253 | + } else { | ||
254 | + /* For ranges above 8 bytes we need to be a power of 2 */ | ||
255 | + if (is_power_of_2(len)) { | ||
256 | + int bits = ctz64(len); | ||
257 | + | ||
258 | + wp.wvr &= ~((1 << bits) - 1); | ||
259 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, MASK, bits); | ||
260 | + wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, BAS, 0xff); | ||
261 | + } else { | ||
262 | + return -ENOBUFS; | ||
263 | + } | ||
264 | + } | ||
265 | + | ||
266 | + g_array_append_val(hw_watchpoints, wp); | ||
267 | + return 0; | ||
268 | +} | ||
269 | + | ||
270 | +bool check_watchpoint_in_range(int i, target_ulong addr) | ||
271 | +{ | ||
272 | + HWWatchpoint *wp = get_hw_wp(i); | ||
273 | + uint64_t addr_top, addr_bottom = wp->wvr; | ||
274 | + int bas = extract32(wp->wcr, 5, 8); | ||
275 | + int mask = extract32(wp->wcr, 24, 4); | ||
276 | + | ||
277 | + if (mask) { | ||
278 | + addr_top = addr_bottom + (1 << mask); | ||
279 | + } else { | ||
280 | + /* | ||
281 | + * BAS must be contiguous but can offset against the base | ||
282 | + * address in DBGWVR | ||
283 | + */ | ||
284 | + addr_bottom = addr_bottom + ctz32(bas); | ||
285 | + addr_top = addr_bottom + clo32(bas); | ||
286 | + } | ||
287 | + | ||
288 | + if (addr >= addr_bottom && addr <= addr_top) { | ||
289 | + return true; | ||
290 | + } | ||
291 | + | ||
292 | + return false; | ||
293 | +} | ||
294 | + | ||
295 | +/** | ||
296 | + * delete_hw_watchpoint() | ||
297 | + * @addr: address of breakpoint | ||
298 | + * | ||
299 | + * Delete a breakpoint and shuffle any above down | ||
300 | + */ | ||
301 | + | ||
302 | +int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type) | ||
303 | +{ | ||
304 | + int i; | ||
305 | + for (i = 0; i < cur_hw_wps; i++) { | ||
306 | + if (check_watchpoint_in_range(i, addr)) { | ||
307 | + g_array_remove_index(hw_watchpoints, i); | ||
308 | + return 0; | ||
309 | + } | ||
310 | + } | ||
311 | + return -ENOENT; | ||
312 | +} | ||
313 | + | ||
314 | +bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) | ||
315 | +{ | ||
316 | + int i; | ||
317 | + | ||
318 | + for (i = 0; i < cur_hw_bps; i++) { | ||
319 | + HWBreakpoint *bp = get_hw_bp(i); | ||
320 | + if (bp->bvr == pc) { | ||
321 | + return true; | ||
322 | + } | ||
323 | + } | ||
324 | + return false; | ||
325 | +} | ||
326 | + | ||
327 | +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) | ||
328 | +{ | ||
329 | + int i; | ||
330 | + | ||
331 | + for (i = 0; i < cur_hw_wps; i++) { | ||
332 | + if (check_watchpoint_in_range(i, addr)) { | ||
333 | + return &get_hw_wp(i)->details; | ||
334 | + } | ||
335 | + } | ||
336 | + return NULL; | ||
337 | +} | ||
338 | diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | 339 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/translate-a64.c | 340 | --- a/target/arm/kvm64.c |
31 | +++ b/target/arm/translate-a64.c | 341 | +++ b/target/arm/kvm64.c |
32 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) | 342 | @@ -XXX,XX +XXX,XX @@ |
33 | } | 343 | |
344 | static bool have_guest_debug; | ||
345 | |||
346 | -/* | ||
347 | - * Although the ARM implementation of hardware assisted debugging | ||
348 | - * allows for different breakpoints per-core, the current GDB | ||
349 | - * interface treats them as a global pool of registers (which seems to | ||
350 | - * be the case for x86, ppc and s390). As a result we store one copy | ||
351 | - * of registers which is used for all active cores. | ||
352 | - * | ||
353 | - * Write access is serialised by virtue of the GDB protocol which | ||
354 | - * updates things. Read access (i.e. when the values are copied to the | ||
355 | - * vCPU) is also gated by GDB's run control. | ||
356 | - * | ||
357 | - * This is not unreasonable as most of the time debugging kernels you | ||
358 | - * never know which core will eventually execute your function. | ||
359 | - */ | ||
360 | - | ||
361 | -typedef struct { | ||
362 | - uint64_t bcr; | ||
363 | - uint64_t bvr; | ||
364 | -} HWBreakpoint; | ||
365 | - | ||
366 | -/* The watchpoint registers can cover more area than the requested | ||
367 | - * watchpoint so we need to store the additional information | ||
368 | - * somewhere. We also need to supply a CPUWatchpoint to the GDB stub | ||
369 | - * when the watchpoint is hit. | ||
370 | - */ | ||
371 | -typedef struct { | ||
372 | - uint64_t wcr; | ||
373 | - uint64_t wvr; | ||
374 | - CPUWatchpoint details; | ||
375 | -} HWWatchpoint; | ||
376 | - | ||
377 | -/* Maximum and current break/watch point counts */ | ||
378 | -int max_hw_bps, max_hw_wps; | ||
379 | -GArray *hw_breakpoints, *hw_watchpoints; | ||
380 | - | ||
381 | -#define cur_hw_wps (hw_watchpoints->len) | ||
382 | -#define cur_hw_bps (hw_breakpoints->len) | ||
383 | -#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) | ||
384 | -#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) | ||
385 | - | ||
386 | void kvm_arm_init_debug(KVMState *s) | ||
387 | { | ||
388 | have_guest_debug = kvm_check_extension(s, | ||
389 | @@ -XXX,XX +XXX,XX @@ void kvm_arm_init_debug(KVMState *s) | ||
390 | return; | ||
34 | } | 391 | } |
35 | 392 | ||
36 | -static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 393 | -/** |
394 | - * insert_hw_breakpoint() | ||
395 | - * @addr: address of breakpoint | ||
396 | - * | ||
397 | - * See ARM ARM D2.9.1 for details but here we are only going to create | ||
398 | - * simple un-linked breakpoints (i.e. we don't chain breakpoints | ||
399 | - * together to match address and context or vmid). The hardware is | ||
400 | - * capable of fancier matching but that will require exposing that | ||
401 | - * fanciness to GDB's interface | ||
402 | - * | ||
403 | - * DBGBCR<n>_EL1, Debug Breakpoint Control Registers | ||
404 | - * | ||
405 | - * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0 | ||
406 | - * +------+------+-------+-----+----+------+-----+------+-----+---+ | ||
407 | - * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E | | ||
408 | - * +------+------+-------+-----+----+------+-----+------+-----+---+ | ||
409 | - * | ||
410 | - * BT: Breakpoint type (0 = unlinked address match) | ||
411 | - * LBN: Linked BP number (0 = unused) | ||
412 | - * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12) | ||
413 | - * BAS: Byte Address Select (RES1 for AArch64) | ||
414 | - * E: Enable bit | ||
415 | - * | ||
416 | - * DBGBVR<n>_EL1, Debug Breakpoint Value Registers | ||
417 | - * | ||
418 | - * 63 53 52 49 48 2 1 0 | ||
419 | - * +------+-----------+----------+-----+ | ||
420 | - * | RESS | VA[52:49] | VA[48:2] | 0 0 | | ||
421 | - * +------+-----------+----------+-----+ | ||
422 | - * | ||
423 | - * Depending on the addressing mode bits the top bits of the register | ||
424 | - * are a sign extension of the highest applicable VA bit. Some | ||
425 | - * versions of GDB don't do it correctly so we ensure they are correct | ||
426 | - * here so future PC comparisons will work properly. | ||
427 | - */ | ||
428 | - | ||
429 | -static int insert_hw_breakpoint(target_ulong addr) | ||
37 | -{ | 430 | -{ |
38 | - tcg_gen_vec_sar8i_i64(a, a, shift); | 431 | - HWBreakpoint brk = { |
39 | - tcg_gen_vec_add8_i64(d, d, a); | 432 | - .bcr = 0x1, /* BCR E=1, enable */ |
433 | - .bvr = sextract64(addr, 0, 53) | ||
434 | - }; | ||
435 | - | ||
436 | - if (cur_hw_bps >= max_hw_bps) { | ||
437 | - return -ENOBUFS; | ||
438 | - } | ||
439 | - | ||
440 | - brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */ | ||
441 | - brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */ | ||
442 | - | ||
443 | - g_array_append_val(hw_breakpoints, brk); | ||
444 | - | ||
445 | - return 0; | ||
40 | -} | 446 | -} |
41 | - | 447 | - |
42 | -static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 448 | -/** |
449 | - * delete_hw_breakpoint() | ||
450 | - * @pc: address of breakpoint | ||
451 | - * | ||
452 | - * Delete a breakpoint and shuffle any above down | ||
453 | - */ | ||
454 | - | ||
455 | -static int delete_hw_breakpoint(target_ulong pc) | ||
43 | -{ | 456 | -{ |
44 | - tcg_gen_vec_sar16i_i64(a, a, shift); | 457 | - int i; |
45 | - tcg_gen_vec_add16_i64(d, d, a); | 458 | - for (i = 0; i < hw_breakpoints->len; i++) { |
459 | - HWBreakpoint *brk = get_hw_bp(i); | ||
460 | - if (brk->bvr == pc) { | ||
461 | - g_array_remove_index(hw_breakpoints, i); | ||
462 | - return 0; | ||
463 | - } | ||
464 | - } | ||
465 | - return -ENOENT; | ||
46 | -} | 466 | -} |
47 | - | 467 | - |
48 | -static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | 468 | -/** |
469 | - * insert_hw_watchpoint() | ||
470 | - * @addr: address of watch point | ||
471 | - * @len: size of area | ||
472 | - * @type: type of watch point | ||
473 | - * | ||
474 | - * See ARM ARM D2.10. As with the breakpoints we can do some advanced | ||
475 | - * stuff if we want to. The watch points can be linked with the break | ||
476 | - * points above to make them context aware. However for simplicity | ||
477 | - * currently we only deal with simple read/write watch points. | ||
478 | - * | ||
479 | - * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers | ||
480 | - * | ||
481 | - * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0 | ||
482 | - * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | ||
483 | - * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E | | ||
484 | - * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | ||
485 | - * | ||
486 | - * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes)) | ||
487 | - * WT: 0 - unlinked, 1 - linked (not currently used) | ||
488 | - * LBN: Linked BP number (not currently used) | ||
489 | - * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11) | ||
490 | - * BAS: Byte Address Select | ||
491 | - * LSC: Load/Store control (01: load, 10: store, 11: both) | ||
492 | - * E: Enable | ||
493 | - * | ||
494 | - * The bottom 2 bits of the value register are masked. Therefore to | ||
495 | - * break on any sizes smaller than an unaligned word you need to set | ||
496 | - * MASK=0, BAS=bit per byte in question. For larger regions (^2) you | ||
497 | - * need to ensure you mask the address as required and set BAS=0xff | ||
498 | - */ | ||
499 | - | ||
500 | -static int insert_hw_watchpoint(target_ulong addr, | ||
501 | - target_ulong len, int type) | ||
49 | -{ | 502 | -{ |
50 | - tcg_gen_sari_i32(a, a, shift); | 503 | - HWWatchpoint wp = { |
51 | - tcg_gen_add_i32(d, d, a); | 504 | - .wcr = R_DBGWCR_E_MASK, /* E=1, enable */ |
505 | - .wvr = addr & (~0x7ULL), | ||
506 | - .details = { .vaddr = addr, .len = len } | ||
507 | - }; | ||
508 | - | ||
509 | - if (cur_hw_wps >= max_hw_wps) { | ||
510 | - return -ENOBUFS; | ||
511 | - } | ||
512 | - | ||
513 | - /* | ||
514 | - * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state, | ||
515 | - * valid whether EL3 is implemented or not | ||
516 | - */ | ||
517 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, PAC, 3); | ||
518 | - | ||
519 | - switch (type) { | ||
520 | - case GDB_WATCHPOINT_READ: | ||
521 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 1); | ||
522 | - wp.details.flags = BP_MEM_READ; | ||
523 | - break; | ||
524 | - case GDB_WATCHPOINT_WRITE: | ||
525 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 2); | ||
526 | - wp.details.flags = BP_MEM_WRITE; | ||
527 | - break; | ||
528 | - case GDB_WATCHPOINT_ACCESS: | ||
529 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 3); | ||
530 | - wp.details.flags = BP_MEM_ACCESS; | ||
531 | - break; | ||
532 | - default: | ||
533 | - g_assert_not_reached(); | ||
534 | - break; | ||
535 | - } | ||
536 | - if (len <= 8) { | ||
537 | - /* we align the address and set the bits in BAS */ | ||
538 | - int off = addr & 0x7; | ||
539 | - int bas = (1 << len) - 1; | ||
540 | - | ||
541 | - wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas); | ||
542 | - } else { | ||
543 | - /* For ranges above 8 bytes we need to be a power of 2 */ | ||
544 | - if (is_power_of_2(len)) { | ||
545 | - int bits = ctz64(len); | ||
546 | - | ||
547 | - wp.wvr &= ~((1 << bits) - 1); | ||
548 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, MASK, bits); | ||
549 | - wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, BAS, 0xff); | ||
550 | - } else { | ||
551 | - return -ENOBUFS; | ||
552 | - } | ||
553 | - } | ||
554 | - | ||
555 | - g_array_append_val(hw_watchpoints, wp); | ||
556 | - return 0; | ||
52 | -} | 557 | -} |
53 | - | 558 | - |
54 | -static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 559 | - |
560 | -static bool check_watchpoint_in_range(int i, target_ulong addr) | ||
55 | -{ | 561 | -{ |
56 | - tcg_gen_sari_i64(a, a, shift); | 562 | - HWWatchpoint *wp = get_hw_wp(i); |
57 | - tcg_gen_add_i64(d, d, a); | 563 | - uint64_t addr_top, addr_bottom = wp->wvr; |
564 | - int bas = extract32(wp->wcr, 5, 8); | ||
565 | - int mask = extract32(wp->wcr, 24, 4); | ||
566 | - | ||
567 | - if (mask) { | ||
568 | - addr_top = addr_bottom + (1 << mask); | ||
569 | - } else { | ||
570 | - /* BAS must be contiguous but can offset against the base | ||
571 | - * address in DBGWVR */ | ||
572 | - addr_bottom = addr_bottom + ctz32(bas); | ||
573 | - addr_top = addr_bottom + clo32(bas); | ||
574 | - } | ||
575 | - | ||
576 | - if (addr >= addr_bottom && addr <= addr_top) { | ||
577 | - return true; | ||
578 | - } | ||
579 | - | ||
580 | - return false; | ||
58 | -} | 581 | -} |
59 | - | 582 | - |
60 | -static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | 583 | -/** |
584 | - * delete_hw_watchpoint() | ||
585 | - * @addr: address of breakpoint | ||
586 | - * | ||
587 | - * Delete a breakpoint and shuffle any above down | ||
588 | - */ | ||
589 | - | ||
590 | -static int delete_hw_watchpoint(target_ulong addr, | ||
591 | - target_ulong len, int type) | ||
61 | -{ | 592 | -{ |
62 | - tcg_gen_sari_vec(vece, a, a, sh); | 593 | - int i; |
63 | - tcg_gen_add_vec(vece, d, d, a); | 594 | - for (i = 0; i < cur_hw_wps; i++) { |
595 | - if (check_watchpoint_in_range(i, addr)) { | ||
596 | - g_array_remove_index(hw_watchpoints, i); | ||
597 | - return 0; | ||
598 | - } | ||
599 | - } | ||
600 | - return -ENOENT; | ||
64 | -} | 601 | -} |
65 | - | 602 | - |
66 | -static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 603 | - |
604 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, | ||
605 | target_ulong len, int type) | ||
606 | { | ||
607 | @@ -XXX,XX +XXX,XX @@ bool kvm_arm_hw_debug_active(CPUState *cs) | ||
608 | return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); | ||
609 | } | ||
610 | |||
611 | -static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) | ||
67 | -{ | 612 | -{ |
68 | - tcg_gen_vec_shr8i_i64(a, a, shift); | 613 | - int i; |
69 | - tcg_gen_vec_add8_i64(d, d, a); | 614 | - |
615 | - for (i = 0; i < cur_hw_bps; i++) { | ||
616 | - HWBreakpoint *bp = get_hw_bp(i); | ||
617 | - if (bp->bvr == pc) { | ||
618 | - return true; | ||
619 | - } | ||
620 | - } | ||
621 | - return false; | ||
70 | -} | 622 | -} |
71 | - | 623 | - |
72 | -static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 624 | -static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) |
73 | -{ | 625 | -{ |
74 | - tcg_gen_vec_shr16i_i64(a, a, shift); | 626 | - int i; |
75 | - tcg_gen_vec_add16_i64(d, d, a); | 627 | - |
628 | - for (i = 0; i < cur_hw_wps; i++) { | ||
629 | - if (check_watchpoint_in_range(i, addr)) { | ||
630 | - return &get_hw_wp(i)->details; | ||
631 | - } | ||
632 | - } | ||
633 | - return NULL; | ||
76 | -} | 634 | -} |
77 | - | 635 | - |
78 | -static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | 636 | static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, |
79 | -{ | 637 | const char *name) |
80 | - tcg_gen_shri_i32(a, a, shift); | ||
81 | - tcg_gen_add_i32(d, d, a); | ||
82 | -} | ||
83 | - | ||
84 | -static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
85 | -{ | ||
86 | - tcg_gen_shri_i64(a, a, shift); | ||
87 | - tcg_gen_add_i64(d, d, a); | ||
88 | -} | ||
89 | - | ||
90 | -static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
91 | -{ | ||
92 | - tcg_gen_shri_vec(vece, a, a, sh); | ||
93 | - tcg_gen_add_vec(vece, d, d, a); | ||
94 | -} | ||
95 | - | ||
96 | static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
97 | { | 638 | { |
98 | uint64_t mask = dup_const(MO_8, 0xff >> shift); | 639 | diff --git a/target/arm/meson.build b/target/arm/meson.build |
99 | @@ -XXX,XX +XXX,XX @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
100 | static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, | ||
101 | int immh, int immb, int opcode, int rn, int rd) | ||
102 | { | ||
103 | - static const GVecGen2i ssra_op[4] = { | ||
104 | - { .fni8 = gen_ssra8_i64, | ||
105 | - .fniv = gen_ssra_vec, | ||
106 | - .load_dest = true, | ||
107 | - .opc = INDEX_op_sari_vec, | ||
108 | - .vece = MO_8 }, | ||
109 | - { .fni8 = gen_ssra16_i64, | ||
110 | - .fniv = gen_ssra_vec, | ||
111 | - .load_dest = true, | ||
112 | - .opc = INDEX_op_sari_vec, | ||
113 | - .vece = MO_16 }, | ||
114 | - { .fni4 = gen_ssra32_i32, | ||
115 | - .fniv = gen_ssra_vec, | ||
116 | - .load_dest = true, | ||
117 | - .opc = INDEX_op_sari_vec, | ||
118 | - .vece = MO_32 }, | ||
119 | - { .fni8 = gen_ssra64_i64, | ||
120 | - .fniv = gen_ssra_vec, | ||
121 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
122 | - .load_dest = true, | ||
123 | - .opc = INDEX_op_sari_vec, | ||
124 | - .vece = MO_64 }, | ||
125 | - }; | ||
126 | - static const GVecGen2i usra_op[4] = { | ||
127 | - { .fni8 = gen_usra8_i64, | ||
128 | - .fniv = gen_usra_vec, | ||
129 | - .load_dest = true, | ||
130 | - .opc = INDEX_op_shri_vec, | ||
131 | - .vece = MO_8, }, | ||
132 | - { .fni8 = gen_usra16_i64, | ||
133 | - .fniv = gen_usra_vec, | ||
134 | - .load_dest = true, | ||
135 | - .opc = INDEX_op_shri_vec, | ||
136 | - .vece = MO_16, }, | ||
137 | - { .fni4 = gen_usra32_i32, | ||
138 | - .fniv = gen_usra_vec, | ||
139 | - .load_dest = true, | ||
140 | - .opc = INDEX_op_shri_vec, | ||
141 | - .vece = MO_32, }, | ||
142 | - { .fni8 = gen_usra64_i64, | ||
143 | - .fniv = gen_usra_vec, | ||
144 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
145 | - .load_dest = true, | ||
146 | - .opc = INDEX_op_shri_vec, | ||
147 | - .vece = MO_64, }, | ||
148 | - }; | ||
149 | static const GVecGen2i sri_op[4] = { | ||
150 | { .fni8 = gen_shr8_ins_i64, | ||
151 | .fniv = gen_shr_ins_vec, | ||
152 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
153 | index XXXXXXX..XXXXXXX 100644 | 640 | index XXXXXXX..XXXXXXX 100644 |
154 | --- a/target/arm/translate.c | 641 | --- a/target/arm/meson.build |
155 | +++ b/target/arm/translate.c | 642 | +++ b/target/arm/meson.build |
156 | @@ -XXX,XX +XXX,XX @@ const GVecGen3 bif_op = { | 643 | @@ -XXX,XX +XXX,XX @@ arm_ss.add(files( |
157 | .load_dest = true | 644 | )) |
158 | }; | 645 | arm_ss.add(zlib) |
159 | 646 | ||
160 | +static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 647 | -arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) |
161 | +{ | 648 | +arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) |
162 | + tcg_gen_vec_sar8i_i64(a, a, shift); | 649 | +arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) |
163 | + tcg_gen_vec_add8_i64(d, d, a); | 650 | |
164 | +} | 651 | arm_ss.add(when: 'TARGET_AARCH64', if_true: files( |
165 | + | 652 | 'cpu64.c', |
166 | +static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
167 | +{ | ||
168 | + tcg_gen_vec_sar16i_i64(a, a, shift); | ||
169 | + tcg_gen_vec_add16_i64(d, d, a); | ||
170 | +} | ||
171 | + | ||
172 | +static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | ||
173 | +{ | ||
174 | + tcg_gen_sari_i32(a, a, shift); | ||
175 | + tcg_gen_add_i32(d, d, a); | ||
176 | +} | ||
177 | + | ||
178 | +static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
179 | +{ | ||
180 | + tcg_gen_sari_i64(a, a, shift); | ||
181 | + tcg_gen_add_i64(d, d, a); | ||
182 | +} | ||
183 | + | ||
184 | +static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
185 | +{ | ||
186 | + tcg_gen_sari_vec(vece, a, a, sh); | ||
187 | + tcg_gen_add_vec(vece, d, d, a); | ||
188 | +} | ||
189 | + | ||
190 | +const GVecGen2i ssra_op[4] = { | ||
191 | + { .fni8 = gen_ssra8_i64, | ||
192 | + .fniv = gen_ssra_vec, | ||
193 | + .load_dest = true, | ||
194 | + .opc = INDEX_op_sari_vec, | ||
195 | + .vece = MO_8 }, | ||
196 | + { .fni8 = gen_ssra16_i64, | ||
197 | + .fniv = gen_ssra_vec, | ||
198 | + .load_dest = true, | ||
199 | + .opc = INDEX_op_sari_vec, | ||
200 | + .vece = MO_16 }, | ||
201 | + { .fni4 = gen_ssra32_i32, | ||
202 | + .fniv = gen_ssra_vec, | ||
203 | + .load_dest = true, | ||
204 | + .opc = INDEX_op_sari_vec, | ||
205 | + .vece = MO_32 }, | ||
206 | + { .fni8 = gen_ssra64_i64, | ||
207 | + .fniv = gen_ssra_vec, | ||
208 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
209 | + .load_dest = true, | ||
210 | + .opc = INDEX_op_sari_vec, | ||
211 | + .vece = MO_64 }, | ||
212 | +}; | ||
213 | + | ||
214 | +static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
215 | +{ | ||
216 | + tcg_gen_vec_shr8i_i64(a, a, shift); | ||
217 | + tcg_gen_vec_add8_i64(d, d, a); | ||
218 | +} | ||
219 | + | ||
220 | +static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
221 | +{ | ||
222 | + tcg_gen_vec_shr16i_i64(a, a, shift); | ||
223 | + tcg_gen_vec_add16_i64(d, d, a); | ||
224 | +} | ||
225 | + | ||
226 | +static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | ||
227 | +{ | ||
228 | + tcg_gen_shri_i32(a, a, shift); | ||
229 | + tcg_gen_add_i32(d, d, a); | ||
230 | +} | ||
231 | + | ||
232 | +static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
233 | +{ | ||
234 | + tcg_gen_shri_i64(a, a, shift); | ||
235 | + tcg_gen_add_i64(d, d, a); | ||
236 | +} | ||
237 | + | ||
238 | +static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
239 | +{ | ||
240 | + tcg_gen_shri_vec(vece, a, a, sh); | ||
241 | + tcg_gen_add_vec(vece, d, d, a); | ||
242 | +} | ||
243 | + | ||
244 | +const GVecGen2i usra_op[4] = { | ||
245 | + { .fni8 = gen_usra8_i64, | ||
246 | + .fniv = gen_usra_vec, | ||
247 | + .load_dest = true, | ||
248 | + .opc = INDEX_op_shri_vec, | ||
249 | + .vece = MO_8, }, | ||
250 | + { .fni8 = gen_usra16_i64, | ||
251 | + .fniv = gen_usra_vec, | ||
252 | + .load_dest = true, | ||
253 | + .opc = INDEX_op_shri_vec, | ||
254 | + .vece = MO_16, }, | ||
255 | + { .fni4 = gen_usra32_i32, | ||
256 | + .fniv = gen_usra_vec, | ||
257 | + .load_dest = true, | ||
258 | + .opc = INDEX_op_shri_vec, | ||
259 | + .vece = MO_32, }, | ||
260 | + { .fni8 = gen_usra64_i64, | ||
261 | + .fniv = gen_usra_vec, | ||
262 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
263 | + .load_dest = true, | ||
264 | + .opc = INDEX_op_shri_vec, | ||
265 | + .vece = MO_64, }, | ||
266 | +}; | ||
267 | |||
268 | /* Translate a NEON data processing instruction. Return nonzero if the | ||
269 | instruction is invalid. | ||
270 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
271 | } | ||
272 | return 0; | ||
273 | |||
274 | + case 1: /* VSRA */ | ||
275 | + /* Right shift comes here negative. */ | ||
276 | + shift = -shift; | ||
277 | + /* Shifts larger than the element size are architecturally | ||
278 | + * valid. Unsigned results in all zeros; signed results | ||
279 | + * in all sign bits. | ||
280 | + */ | ||
281 | + if (!u) { | ||
282 | + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, | ||
283 | + MIN(shift, (8 << size) - 1), | ||
284 | + &ssra_op[size]); | ||
285 | + } else if (shift >= 8 << size) { | ||
286 | + /* rd += 0 */ | ||
287 | + } else { | ||
288 | + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, | ||
289 | + shift, &usra_op[size]); | ||
290 | + } | ||
291 | + return 0; | ||
292 | + | ||
293 | case 5: /* VSHL, VSLI */ | ||
294 | if (!u) { /* VSHL */ | ||
295 | /* Shifts larger than the element size are | ||
296 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
297 | neon_load_reg64(cpu_V0, rm + pass); | ||
298 | tcg_gen_movi_i64(cpu_V1, imm); | ||
299 | switch (op) { | ||
300 | - case 1: /* VSRA */ | ||
301 | - if (u) | ||
302 | - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); | ||
303 | - else | ||
304 | - gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); | ||
305 | - break; | ||
306 | case 2: /* VRSHR */ | ||
307 | case 3: /* VRSRA */ | ||
308 | if (u) | ||
309 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
310 | default: | ||
311 | g_assert_not_reached(); | ||
312 | } | ||
313 | - if (op == 1 || op == 3) { | ||
314 | + if (op == 3) { | ||
315 | /* Accumulate. */ | ||
316 | neon_load_reg64(cpu_V1, rd + pass); | ||
317 | tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1); | ||
318 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
319 | tmp2 = tcg_temp_new_i32(); | ||
320 | tcg_gen_movi_i32(tmp2, imm); | ||
321 | switch (op) { | ||
322 | - case 1: /* VSRA */ | ||
323 | - GEN_NEON_INTEGER_OP(shl); | ||
324 | - break; | ||
325 | case 2: /* VRSHR */ | ||
326 | case 3: /* VRSRA */ | ||
327 | GEN_NEON_INTEGER_OP(rshl); | ||
328 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
329 | } | ||
330 | tcg_temp_free_i32(tmp2); | ||
331 | |||
332 | - if (op == 1 || op == 3) { | ||
333 | + if (op == 3) { | ||
334 | /* Accumulate. */ | ||
335 | tmp2 = neon_load_reg(rd, pass); | ||
336 | gen_neon_add(size, tmp, tmp2); | ||
337 | -- | 653 | -- |
338 | 2.19.1 | 654 | 2.34.1 |
339 | 655 | ||
340 | 656 | diff view generated by jsdifflib |
1 | From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com> | 1 | From: Francesco Cagnin <fcagnin@quarkslab.com> |
---|---|---|---|
2 | 2 | ||
3 | Announce 64bit addressing support. | 3 | Required for guest debugging. |
4 | 4 | ||
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 5 | Signed-off-by: Francesco Cagnin <fcagnin@quarkslab.com> |
6 | Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 6 | Message-id: 20230601153107.81955-3-fcagnin@quarkslab.com |
7 | Message-id: 20181017213932.19973-3-edgar.iglesias@gmail.com | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 9 | --- |
11 | hw/net/cadence_gem.c | 3 ++- | 10 | target/arm/hvf/hvf.c | 213 +++++++++++++++++++++++++++++++++++++++++++ |
12 | 1 file changed, 2 insertions(+), 1 deletion(-) | 11 | 1 file changed, 213 insertions(+) |
13 | 12 | ||
14 | diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c | 13 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c |
15 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/net/cadence_gem.c | 15 | --- a/target/arm/hvf/hvf.c |
17 | +++ b/hw/net/cadence_gem.c | 16 | +++ b/target/arm/hvf/hvf.c |
18 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ |
19 | #define GEM_DESCONF4 (0x0000028C/4) | 18 | #define SYSREG_ICC_SGI1R_EL1 SYSREG(3, 0, 12, 11, 5) |
20 | #define GEM_DESCONF5 (0x00000290/4) | 19 | #define SYSREG_ICC_SRE_EL1 SYSREG(3, 0, 12, 12, 5) |
21 | #define GEM_DESCONF6 (0x00000294/4) | 20 | |
22 | +#define GEM_DESCONF6_64B_MASK (1U << 23) | 21 | +#define SYSREG_MDSCR_EL1 SYSREG(2, 0, 0, 2, 2) |
23 | #define GEM_DESCONF7 (0x00000298/4) | 22 | +#define SYSREG_DBGBVR0_EL1 SYSREG(2, 0, 0, 0, 4) |
24 | 23 | +#define SYSREG_DBGBCR0_EL1 SYSREG(2, 0, 0, 0, 5) | |
25 | #define GEM_INT_Q1_STATUS (0x00000400 / 4) | 24 | +#define SYSREG_DBGWVR0_EL1 SYSREG(2, 0, 0, 0, 6) |
26 | @@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d) | 25 | +#define SYSREG_DBGWCR0_EL1 SYSREG(2, 0, 0, 0, 7) |
27 | s->regs[GEM_DESCONF] = 0x02500111; | 26 | +#define SYSREG_DBGBVR1_EL1 SYSREG(2, 0, 0, 1, 4) |
28 | s->regs[GEM_DESCONF2] = 0x2ab13fff; | 27 | +#define SYSREG_DBGBCR1_EL1 SYSREG(2, 0, 0, 1, 5) |
29 | s->regs[GEM_DESCONF5] = 0x002f2045; | 28 | +#define SYSREG_DBGWVR1_EL1 SYSREG(2, 0, 0, 1, 6) |
30 | - s->regs[GEM_DESCONF6] = 0x0; | 29 | +#define SYSREG_DBGWCR1_EL1 SYSREG(2, 0, 0, 1, 7) |
31 | + s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK; | 30 | +#define SYSREG_DBGBVR2_EL1 SYSREG(2, 0, 0, 2, 4) |
32 | 31 | +#define SYSREG_DBGBCR2_EL1 SYSREG(2, 0, 0, 2, 5) | |
33 | if (s->num_priority_queues > 1) { | 32 | +#define SYSREG_DBGWVR2_EL1 SYSREG(2, 0, 0, 2, 6) |
34 | queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); | 33 | +#define SYSREG_DBGWCR2_EL1 SYSREG(2, 0, 0, 2, 7) |
34 | +#define SYSREG_DBGBVR3_EL1 SYSREG(2, 0, 0, 3, 4) | ||
35 | +#define SYSREG_DBGBCR3_EL1 SYSREG(2, 0, 0, 3, 5) | ||
36 | +#define SYSREG_DBGWVR3_EL1 SYSREG(2, 0, 0, 3, 6) | ||
37 | +#define SYSREG_DBGWCR3_EL1 SYSREG(2, 0, 0, 3, 7) | ||
38 | +#define SYSREG_DBGBVR4_EL1 SYSREG(2, 0, 0, 4, 4) | ||
39 | +#define SYSREG_DBGBCR4_EL1 SYSREG(2, 0, 0, 4, 5) | ||
40 | +#define SYSREG_DBGWVR4_EL1 SYSREG(2, 0, 0, 4, 6) | ||
41 | +#define SYSREG_DBGWCR4_EL1 SYSREG(2, 0, 0, 4, 7) | ||
42 | +#define SYSREG_DBGBVR5_EL1 SYSREG(2, 0, 0, 5, 4) | ||
43 | +#define SYSREG_DBGBCR5_EL1 SYSREG(2, 0, 0, 5, 5) | ||
44 | +#define SYSREG_DBGWVR5_EL1 SYSREG(2, 0, 0, 5, 6) | ||
45 | +#define SYSREG_DBGWCR5_EL1 SYSREG(2, 0, 0, 5, 7) | ||
46 | +#define SYSREG_DBGBVR6_EL1 SYSREG(2, 0, 0, 6, 4) | ||
47 | +#define SYSREG_DBGBCR6_EL1 SYSREG(2, 0, 0, 6, 5) | ||
48 | +#define SYSREG_DBGWVR6_EL1 SYSREG(2, 0, 0, 6, 6) | ||
49 | +#define SYSREG_DBGWCR6_EL1 SYSREG(2, 0, 0, 6, 7) | ||
50 | +#define SYSREG_DBGBVR7_EL1 SYSREG(2, 0, 0, 7, 4) | ||
51 | +#define SYSREG_DBGBCR7_EL1 SYSREG(2, 0, 0, 7, 5) | ||
52 | +#define SYSREG_DBGWVR7_EL1 SYSREG(2, 0, 0, 7, 6) | ||
53 | +#define SYSREG_DBGWCR7_EL1 SYSREG(2, 0, 0, 7, 7) | ||
54 | +#define SYSREG_DBGBVR8_EL1 SYSREG(2, 0, 0, 8, 4) | ||
55 | +#define SYSREG_DBGBCR8_EL1 SYSREG(2, 0, 0, 8, 5) | ||
56 | +#define SYSREG_DBGWVR8_EL1 SYSREG(2, 0, 0, 8, 6) | ||
57 | +#define SYSREG_DBGWCR8_EL1 SYSREG(2, 0, 0, 8, 7) | ||
58 | +#define SYSREG_DBGBVR9_EL1 SYSREG(2, 0, 0, 9, 4) | ||
59 | +#define SYSREG_DBGBCR9_EL1 SYSREG(2, 0, 0, 9, 5) | ||
60 | +#define SYSREG_DBGWVR9_EL1 SYSREG(2, 0, 0, 9, 6) | ||
61 | +#define SYSREG_DBGWCR9_EL1 SYSREG(2, 0, 0, 9, 7) | ||
62 | +#define SYSREG_DBGBVR10_EL1 SYSREG(2, 0, 0, 10, 4) | ||
63 | +#define SYSREG_DBGBCR10_EL1 SYSREG(2, 0, 0, 10, 5) | ||
64 | +#define SYSREG_DBGWVR10_EL1 SYSREG(2, 0, 0, 10, 6) | ||
65 | +#define SYSREG_DBGWCR10_EL1 SYSREG(2, 0, 0, 10, 7) | ||
66 | +#define SYSREG_DBGBVR11_EL1 SYSREG(2, 0, 0, 11, 4) | ||
67 | +#define SYSREG_DBGBCR11_EL1 SYSREG(2, 0, 0, 11, 5) | ||
68 | +#define SYSREG_DBGWVR11_EL1 SYSREG(2, 0, 0, 11, 6) | ||
69 | +#define SYSREG_DBGWCR11_EL1 SYSREG(2, 0, 0, 11, 7) | ||
70 | +#define SYSREG_DBGBVR12_EL1 SYSREG(2, 0, 0, 12, 4) | ||
71 | +#define SYSREG_DBGBCR12_EL1 SYSREG(2, 0, 0, 12, 5) | ||
72 | +#define SYSREG_DBGWVR12_EL1 SYSREG(2, 0, 0, 12, 6) | ||
73 | +#define SYSREG_DBGWCR12_EL1 SYSREG(2, 0, 0, 12, 7) | ||
74 | +#define SYSREG_DBGBVR13_EL1 SYSREG(2, 0, 0, 13, 4) | ||
75 | +#define SYSREG_DBGBCR13_EL1 SYSREG(2, 0, 0, 13, 5) | ||
76 | +#define SYSREG_DBGWVR13_EL1 SYSREG(2, 0, 0, 13, 6) | ||
77 | +#define SYSREG_DBGWCR13_EL1 SYSREG(2, 0, 0, 13, 7) | ||
78 | +#define SYSREG_DBGBVR14_EL1 SYSREG(2, 0, 0, 14, 4) | ||
79 | +#define SYSREG_DBGBCR14_EL1 SYSREG(2, 0, 0, 14, 5) | ||
80 | +#define SYSREG_DBGWVR14_EL1 SYSREG(2, 0, 0, 14, 6) | ||
81 | +#define SYSREG_DBGWCR14_EL1 SYSREG(2, 0, 0, 14, 7) | ||
82 | +#define SYSREG_DBGBVR15_EL1 SYSREG(2, 0, 0, 15, 4) | ||
83 | +#define SYSREG_DBGBCR15_EL1 SYSREG(2, 0, 0, 15, 5) | ||
84 | +#define SYSREG_DBGWVR15_EL1 SYSREG(2, 0, 0, 15, 6) | ||
85 | +#define SYSREG_DBGWCR15_EL1 SYSREG(2, 0, 0, 15, 7) | ||
86 | + | ||
87 | #define WFX_IS_WFE (1 << 0) | ||
88 | |||
89 | #define TMR_CTL_ENABLE (1 << 0) | ||
90 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) | ||
91 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
92 | } | ||
93 | break; | ||
94 | + case SYSREG_DBGBVR0_EL1: | ||
95 | + case SYSREG_DBGBVR1_EL1: | ||
96 | + case SYSREG_DBGBVR2_EL1: | ||
97 | + case SYSREG_DBGBVR3_EL1: | ||
98 | + case SYSREG_DBGBVR4_EL1: | ||
99 | + case SYSREG_DBGBVR5_EL1: | ||
100 | + case SYSREG_DBGBVR6_EL1: | ||
101 | + case SYSREG_DBGBVR7_EL1: | ||
102 | + case SYSREG_DBGBVR8_EL1: | ||
103 | + case SYSREG_DBGBVR9_EL1: | ||
104 | + case SYSREG_DBGBVR10_EL1: | ||
105 | + case SYSREG_DBGBVR11_EL1: | ||
106 | + case SYSREG_DBGBVR12_EL1: | ||
107 | + case SYSREG_DBGBVR13_EL1: | ||
108 | + case SYSREG_DBGBVR14_EL1: | ||
109 | + case SYSREG_DBGBVR15_EL1: | ||
110 | + val = env->cp15.dbgbvr[SYSREG_CRM(reg)]; | ||
111 | + break; | ||
112 | + case SYSREG_DBGBCR0_EL1: | ||
113 | + case SYSREG_DBGBCR1_EL1: | ||
114 | + case SYSREG_DBGBCR2_EL1: | ||
115 | + case SYSREG_DBGBCR3_EL1: | ||
116 | + case SYSREG_DBGBCR4_EL1: | ||
117 | + case SYSREG_DBGBCR5_EL1: | ||
118 | + case SYSREG_DBGBCR6_EL1: | ||
119 | + case SYSREG_DBGBCR7_EL1: | ||
120 | + case SYSREG_DBGBCR8_EL1: | ||
121 | + case SYSREG_DBGBCR9_EL1: | ||
122 | + case SYSREG_DBGBCR10_EL1: | ||
123 | + case SYSREG_DBGBCR11_EL1: | ||
124 | + case SYSREG_DBGBCR12_EL1: | ||
125 | + case SYSREG_DBGBCR13_EL1: | ||
126 | + case SYSREG_DBGBCR14_EL1: | ||
127 | + case SYSREG_DBGBCR15_EL1: | ||
128 | + val = env->cp15.dbgbcr[SYSREG_CRM(reg)]; | ||
129 | + break; | ||
130 | + case SYSREG_DBGWVR0_EL1: | ||
131 | + case SYSREG_DBGWVR1_EL1: | ||
132 | + case SYSREG_DBGWVR2_EL1: | ||
133 | + case SYSREG_DBGWVR3_EL1: | ||
134 | + case SYSREG_DBGWVR4_EL1: | ||
135 | + case SYSREG_DBGWVR5_EL1: | ||
136 | + case SYSREG_DBGWVR6_EL1: | ||
137 | + case SYSREG_DBGWVR7_EL1: | ||
138 | + case SYSREG_DBGWVR8_EL1: | ||
139 | + case SYSREG_DBGWVR9_EL1: | ||
140 | + case SYSREG_DBGWVR10_EL1: | ||
141 | + case SYSREG_DBGWVR11_EL1: | ||
142 | + case SYSREG_DBGWVR12_EL1: | ||
143 | + case SYSREG_DBGWVR13_EL1: | ||
144 | + case SYSREG_DBGWVR14_EL1: | ||
145 | + case SYSREG_DBGWVR15_EL1: | ||
146 | + val = env->cp15.dbgwvr[SYSREG_CRM(reg)]; | ||
147 | + break; | ||
148 | + case SYSREG_DBGWCR0_EL1: | ||
149 | + case SYSREG_DBGWCR1_EL1: | ||
150 | + case SYSREG_DBGWCR2_EL1: | ||
151 | + case SYSREG_DBGWCR3_EL1: | ||
152 | + case SYSREG_DBGWCR4_EL1: | ||
153 | + case SYSREG_DBGWCR5_EL1: | ||
154 | + case SYSREG_DBGWCR6_EL1: | ||
155 | + case SYSREG_DBGWCR7_EL1: | ||
156 | + case SYSREG_DBGWCR8_EL1: | ||
157 | + case SYSREG_DBGWCR9_EL1: | ||
158 | + case SYSREG_DBGWCR10_EL1: | ||
159 | + case SYSREG_DBGWCR11_EL1: | ||
160 | + case SYSREG_DBGWCR12_EL1: | ||
161 | + case SYSREG_DBGWCR13_EL1: | ||
162 | + case SYSREG_DBGWCR14_EL1: | ||
163 | + case SYSREG_DBGWCR15_EL1: | ||
164 | + val = env->cp15.dbgwcr[SYSREG_CRM(reg)]; | ||
165 | + break; | ||
166 | default: | ||
167 | if (is_id_sysreg(reg)) { | ||
168 | /* ID system registers read as RES0 */ | ||
169 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | ||
170 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
171 | } | ||
172 | break; | ||
173 | + case SYSREG_MDSCR_EL1: | ||
174 | + env->cp15.mdscr_el1 = val; | ||
175 | + break; | ||
176 | + case SYSREG_DBGBVR0_EL1: | ||
177 | + case SYSREG_DBGBVR1_EL1: | ||
178 | + case SYSREG_DBGBVR2_EL1: | ||
179 | + case SYSREG_DBGBVR3_EL1: | ||
180 | + case SYSREG_DBGBVR4_EL1: | ||
181 | + case SYSREG_DBGBVR5_EL1: | ||
182 | + case SYSREG_DBGBVR6_EL1: | ||
183 | + case SYSREG_DBGBVR7_EL1: | ||
184 | + case SYSREG_DBGBVR8_EL1: | ||
185 | + case SYSREG_DBGBVR9_EL1: | ||
186 | + case SYSREG_DBGBVR10_EL1: | ||
187 | + case SYSREG_DBGBVR11_EL1: | ||
188 | + case SYSREG_DBGBVR12_EL1: | ||
189 | + case SYSREG_DBGBVR13_EL1: | ||
190 | + case SYSREG_DBGBVR14_EL1: | ||
191 | + case SYSREG_DBGBVR15_EL1: | ||
192 | + env->cp15.dbgbvr[SYSREG_CRM(reg)] = val; | ||
193 | + break; | ||
194 | + case SYSREG_DBGBCR0_EL1: | ||
195 | + case SYSREG_DBGBCR1_EL1: | ||
196 | + case SYSREG_DBGBCR2_EL1: | ||
197 | + case SYSREG_DBGBCR3_EL1: | ||
198 | + case SYSREG_DBGBCR4_EL1: | ||
199 | + case SYSREG_DBGBCR5_EL1: | ||
200 | + case SYSREG_DBGBCR6_EL1: | ||
201 | + case SYSREG_DBGBCR7_EL1: | ||
202 | + case SYSREG_DBGBCR8_EL1: | ||
203 | + case SYSREG_DBGBCR9_EL1: | ||
204 | + case SYSREG_DBGBCR10_EL1: | ||
205 | + case SYSREG_DBGBCR11_EL1: | ||
206 | + case SYSREG_DBGBCR12_EL1: | ||
207 | + case SYSREG_DBGBCR13_EL1: | ||
208 | + case SYSREG_DBGBCR14_EL1: | ||
209 | + case SYSREG_DBGBCR15_EL1: | ||
210 | + env->cp15.dbgbcr[SYSREG_CRM(reg)] = val; | ||
211 | + break; | ||
212 | + case SYSREG_DBGWVR0_EL1: | ||
213 | + case SYSREG_DBGWVR1_EL1: | ||
214 | + case SYSREG_DBGWVR2_EL1: | ||
215 | + case SYSREG_DBGWVR3_EL1: | ||
216 | + case SYSREG_DBGWVR4_EL1: | ||
217 | + case SYSREG_DBGWVR5_EL1: | ||
218 | + case SYSREG_DBGWVR6_EL1: | ||
219 | + case SYSREG_DBGWVR7_EL1: | ||
220 | + case SYSREG_DBGWVR8_EL1: | ||
221 | + case SYSREG_DBGWVR9_EL1: | ||
222 | + case SYSREG_DBGWVR10_EL1: | ||
223 | + case SYSREG_DBGWVR11_EL1: | ||
224 | + case SYSREG_DBGWVR12_EL1: | ||
225 | + case SYSREG_DBGWVR13_EL1: | ||
226 | + case SYSREG_DBGWVR14_EL1: | ||
227 | + case SYSREG_DBGWVR15_EL1: | ||
228 | + env->cp15.dbgwvr[SYSREG_CRM(reg)] = val; | ||
229 | + break; | ||
230 | + case SYSREG_DBGWCR0_EL1: | ||
231 | + case SYSREG_DBGWCR1_EL1: | ||
232 | + case SYSREG_DBGWCR2_EL1: | ||
233 | + case SYSREG_DBGWCR3_EL1: | ||
234 | + case SYSREG_DBGWCR4_EL1: | ||
235 | + case SYSREG_DBGWCR5_EL1: | ||
236 | + case SYSREG_DBGWCR6_EL1: | ||
237 | + case SYSREG_DBGWCR7_EL1: | ||
238 | + case SYSREG_DBGWCR8_EL1: | ||
239 | + case SYSREG_DBGWCR9_EL1: | ||
240 | + case SYSREG_DBGWCR10_EL1: | ||
241 | + case SYSREG_DBGWCR11_EL1: | ||
242 | + case SYSREG_DBGWCR12_EL1: | ||
243 | + case SYSREG_DBGWCR13_EL1: | ||
244 | + case SYSREG_DBGWCR14_EL1: | ||
245 | + case SYSREG_DBGWCR15_EL1: | ||
246 | + env->cp15.dbgwcr[SYSREG_CRM(reg)] = val; | ||
247 | + break; | ||
248 | default: | ||
249 | cpu_synchronize_state(cpu); | ||
250 | trace_hvf_unhandled_sysreg_write(env->pc, reg, | ||
35 | -- | 251 | -- |
36 | 2.19.1 | 252 | 2.34.1 |
37 | |||
38 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Francesco Cagnin <fcagnin@quarkslab.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Required for guest debugging. The code has been structured like the KVM |
4 | Message-id: 20181011205206.3552-18-richard.henderson@linaro.org | 4 | counterpart. |
5 | [PMM: added parens in ?: expression] | 5 | |
6 | Signed-off-by: Francesco Cagnin <fcagnin@quarkslab.com> | ||
7 | Message-id: 20230601153107.81955-4-fcagnin@quarkslab.com | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | --- | 10 | --- |
9 | target/arm/translate.c | 81 ++++++++++++++---------------------------- | 11 | include/sysemu/hvf.h | 22 ++++++++ |
10 | 1 file changed, 26 insertions(+), 55 deletions(-) | 12 | include/sysemu/hvf_int.h | 1 + |
13 | accel/hvf/hvf-accel-ops.c | 109 ++++++++++++++++++++++++++++++++++++++ | ||
14 | accel/hvf/hvf-all.c | 17 ++++++ | ||
15 | target/arm/hvf/hvf.c | 63 ++++++++++++++++++++++ | ||
16 | target/i386/hvf/hvf.c | 24 +++++++++ | ||
17 | 6 files changed, 236 insertions(+) | ||
11 | 18 | ||
12 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 19 | diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h |
13 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/translate.c | 21 | --- a/include/sysemu/hvf.h |
15 | +++ b/target/arm/translate.c | 22 | +++ b/include/sysemu/hvf.h |
16 | @@ -XXX,XX +XXX,XX @@ static void gen_vfp_msr(TCGv_i32 tmp) | 23 | @@ -XXX,XX +XXX,XX @@ |
17 | tcg_temp_free_i32(tmp); | 24 | #include "qom/object.h" |
25 | |||
26 | #ifdef NEED_CPU_H | ||
27 | +#include "cpu.h" | ||
28 | |||
29 | #ifdef CONFIG_HVF | ||
30 | uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, | ||
31 | @@ -XXX,XX +XXX,XX @@ typedef struct HVFState HVFState; | ||
32 | DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE, | ||
33 | TYPE_HVF_ACCEL) | ||
34 | |||
35 | +#ifdef NEED_CPU_H | ||
36 | +struct hvf_sw_breakpoint { | ||
37 | + target_ulong pc; | ||
38 | + target_ulong saved_insn; | ||
39 | + int use_count; | ||
40 | + QTAILQ_ENTRY(hvf_sw_breakpoint) entry; | ||
41 | +}; | ||
42 | + | ||
43 | +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, | ||
44 | + target_ulong pc); | ||
45 | +int hvf_sw_breakpoints_active(CPUState *cpu); | ||
46 | + | ||
47 | +int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); | ||
48 | +int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); | ||
49 | +int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, | ||
50 | + int type); | ||
51 | +int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, | ||
52 | + int type); | ||
53 | +void hvf_arch_remove_all_hw_breakpoints(void); | ||
54 | +#endif /* NEED_CPU_H */ | ||
55 | + | ||
56 | #endif | ||
57 | diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/include/sysemu/hvf_int.h | ||
60 | +++ b/include/sysemu/hvf_int.h | ||
61 | @@ -XXX,XX +XXX,XX @@ struct HVFState { | ||
62 | |||
63 | hvf_vcpu_caps *hvf_caps; | ||
64 | uint64_t vtimer_offset; | ||
65 | + QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints; | ||
66 | }; | ||
67 | extern HVFState *hvf_state; | ||
68 | |||
69 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/accel/hvf/hvf-accel-ops.c | ||
72 | +++ b/accel/hvf/hvf-accel-ops.c | ||
73 | @@ -XXX,XX +XXX,XX @@ | ||
74 | #include "qemu/main-loop.h" | ||
75 | #include "exec/address-spaces.h" | ||
76 | #include "exec/exec-all.h" | ||
77 | +#include "exec/gdbstub.h" | ||
78 | #include "sysemu/cpus.h" | ||
79 | #include "sysemu/hvf.h" | ||
80 | #include "sysemu/hvf_int.h" | ||
81 | @@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms) | ||
82 | s->slots[x].slot_id = x; | ||
83 | } | ||
84 | |||
85 | + QTAILQ_INIT(&s->hvf_sw_breakpoints); | ||
86 | + | ||
87 | hvf_state = s; | ||
88 | memory_listener_register(&hvf_memory_listener, &address_space_memory); | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ static void hvf_start_vcpu_thread(CPUState *cpu) | ||
91 | cpu, QEMU_THREAD_JOINABLE); | ||
18 | } | 92 | } |
19 | 93 | ||
20 | -static void gen_neon_dup_u8(TCGv_i32 var, int shift) | 94 | +static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) |
21 | -{ | 95 | +{ |
22 | - TCGv_i32 tmp = tcg_temp_new_i32(); | 96 | + struct hvf_sw_breakpoint *bp; |
23 | - if (shift) | 97 | + int err; |
24 | - tcg_gen_shri_i32(var, var, shift); | 98 | + |
25 | - tcg_gen_ext8u_i32(var, var); | 99 | + if (type == GDB_BREAKPOINT_SW) { |
26 | - tcg_gen_shli_i32(tmp, var, 8); | 100 | + bp = hvf_find_sw_breakpoint(cpu, addr); |
27 | - tcg_gen_or_i32(var, var, tmp); | 101 | + if (bp) { |
28 | - tcg_gen_shli_i32(tmp, var, 16); | 102 | + bp->use_count++; |
29 | - tcg_gen_or_i32(var, var, tmp); | 103 | + return 0; |
30 | - tcg_temp_free_i32(tmp); | 104 | + } |
31 | -} | 105 | + |
32 | - | 106 | + bp = g_new(struct hvf_sw_breakpoint, 1); |
33 | static void gen_neon_dup_low16(TCGv_i32 var) | 107 | + bp->pc = addr; |
108 | + bp->use_count = 1; | ||
109 | + err = hvf_arch_insert_sw_breakpoint(cpu, bp); | ||
110 | + if (err) { | ||
111 | + g_free(bp); | ||
112 | + return err; | ||
113 | + } | ||
114 | + | ||
115 | + QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||
116 | + } else { | ||
117 | + err = hvf_arch_insert_hw_breakpoint(addr, len, type); | ||
118 | + if (err) { | ||
119 | + return err; | ||
120 | + } | ||
121 | + } | ||
122 | + | ||
123 | + CPU_FOREACH(cpu) { | ||
124 | + err = hvf_update_guest_debug(cpu); | ||
125 | + if (err) { | ||
126 | + return err; | ||
127 | + } | ||
128 | + } | ||
129 | + return 0; | ||
130 | +} | ||
131 | + | ||
132 | +static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) | ||
133 | +{ | ||
134 | + struct hvf_sw_breakpoint *bp; | ||
135 | + int err; | ||
136 | + | ||
137 | + if (type == GDB_BREAKPOINT_SW) { | ||
138 | + bp = hvf_find_sw_breakpoint(cpu, addr); | ||
139 | + if (!bp) { | ||
140 | + return -ENOENT; | ||
141 | + } | ||
142 | + | ||
143 | + if (bp->use_count > 1) { | ||
144 | + bp->use_count--; | ||
145 | + return 0; | ||
146 | + } | ||
147 | + | ||
148 | + err = hvf_arch_remove_sw_breakpoint(cpu, bp); | ||
149 | + if (err) { | ||
150 | + return err; | ||
151 | + } | ||
152 | + | ||
153 | + QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||
154 | + g_free(bp); | ||
155 | + } else { | ||
156 | + err = hvf_arch_remove_hw_breakpoint(addr, len, type); | ||
157 | + if (err) { | ||
158 | + return err; | ||
159 | + } | ||
160 | + } | ||
161 | + | ||
162 | + CPU_FOREACH(cpu) { | ||
163 | + err = hvf_update_guest_debug(cpu); | ||
164 | + if (err) { | ||
165 | + return err; | ||
166 | + } | ||
167 | + } | ||
168 | + return 0; | ||
169 | +} | ||
170 | + | ||
171 | +static void hvf_remove_all_breakpoints(CPUState *cpu) | ||
172 | +{ | ||
173 | + struct hvf_sw_breakpoint *bp, *next; | ||
174 | + CPUState *tmpcpu; | ||
175 | + | ||
176 | + QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) { | ||
177 | + if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) { | ||
178 | + /* Try harder to find a CPU that currently sees the breakpoint. */ | ||
179 | + CPU_FOREACH(tmpcpu) | ||
180 | + { | ||
181 | + if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { | ||
182 | + break; | ||
183 | + } | ||
184 | + } | ||
185 | + } | ||
186 | + QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||
187 | + g_free(bp); | ||
188 | + } | ||
189 | + hvf_arch_remove_all_hw_breakpoints(); | ||
190 | + | ||
191 | + CPU_FOREACH(cpu) { | ||
192 | + hvf_update_guest_debug(cpu); | ||
193 | + } | ||
194 | +} | ||
195 | + | ||
196 | static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) | ||
34 | { | 197 | { |
35 | TCGv_i32 tmp = tcg_temp_new_i32(); | 198 | AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); |
36 | @@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var) | 199 | @@ -XXX,XX +XXX,XX @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) |
37 | tcg_temp_free_i32(tmp); | 200 | ops->synchronize_post_init = hvf_cpu_synchronize_post_init; |
201 | ops->synchronize_state = hvf_cpu_synchronize_state; | ||
202 | ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm; | ||
203 | + | ||
204 | + ops->insert_breakpoint = hvf_insert_breakpoint; | ||
205 | + ops->remove_breakpoint = hvf_remove_breakpoint; | ||
206 | + ops->remove_all_breakpoints = hvf_remove_all_breakpoints; | ||
207 | }; | ||
208 | static const TypeInfo hvf_accel_ops_type = { | ||
209 | .name = ACCEL_OPS_NAME("hvf"), | ||
210 | diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c | ||
211 | index XXXXXXX..XXXXXXX 100644 | ||
212 | --- a/accel/hvf/hvf-all.c | ||
213 | +++ b/accel/hvf/hvf-all.c | ||
214 | @@ -XXX,XX +XXX,XX @@ void assert_hvf_ok(hv_return_t ret) | ||
215 | |||
216 | abort(); | ||
38 | } | 217 | } |
39 | 218 | + | |
40 | -static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size) | 219 | +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc) |
41 | -{ | 220 | +{ |
42 | - /* Load a single Neon element and replicate into a 32 bit TCG reg */ | 221 | + struct hvf_sw_breakpoint *bp; |
43 | - TCGv_i32 tmp = tcg_temp_new_i32(); | 222 | + |
44 | - switch (size) { | 223 | + QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { |
45 | - case 0: | 224 | + if (bp->pc == pc) { |
46 | - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); | 225 | + return bp; |
47 | - gen_neon_dup_u8(tmp, 0); | 226 | + } |
48 | - break; | 227 | + } |
49 | - case 1: | 228 | + return NULL; |
50 | - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); | 229 | +} |
51 | - gen_neon_dup_low16(tmp); | 230 | + |
52 | - break; | 231 | +int hvf_sw_breakpoints_active(CPUState *cpu) |
53 | - case 2: | 232 | +{ |
54 | - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); | 233 | + return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); |
55 | - break; | 234 | +} |
56 | - default: /* Avoid compiler warnings. */ | 235 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c |
57 | - abort(); | 236 | index XXXXXXX..XXXXXXX 100644 |
58 | - } | 237 | --- a/target/arm/hvf/hvf.c |
59 | - return tmp; | 238 | +++ b/target/arm/hvf/hvf.c |
60 | -} | 239 | @@ -XXX,XX +XXX,XX @@ |
61 | - | 240 | #include "trace/trace-target_arm_hvf.h" |
62 | static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, | 241 | #include "migration/vmstate.h" |
63 | uint32_t dp) | 242 | |
64 | { | 243 | +#include "exec/gdbstub.h" |
65 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 244 | + |
66 | int load; | 245 | #define HVF_SYSREG(crn, crm, op0, op1, op2) \ |
67 | int shift; | 246 | ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) |
68 | int n; | 247 | #define PL1_WRITE_MASK 0x4 |
69 | + int vec_size; | 248 | @@ -XXX,XX +XXX,XX @@ int hvf_arch_init(void) |
70 | TCGv_i32 addr; | 249 | qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer); |
71 | TCGv_i32 tmp; | 250 | return 0; |
72 | TCGv_i32 tmp2; | 251 | } |
73 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 252 | + |
74 | } | 253 | +static const uint32_t brk_insn = 0xd4200000; |
75 | addr = tcg_temp_new_i32(); | 254 | + |
76 | load_reg_var(s, addr, rn); | 255 | +int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) |
77 | - if (nregs == 1) { | 256 | +{ |
78 | - /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ | 257 | + if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || |
79 | - tmp = gen_load_and_replicate(s, addr, size); | 258 | + cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { |
80 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); | 259 | + return -EINVAL; |
81 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); | 260 | + } |
82 | - if (insn & (1 << 5)) { | 261 | + return 0; |
83 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0)); | 262 | +} |
84 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1)); | 263 | + |
85 | - } | 264 | +int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) |
86 | - tcg_temp_free_i32(tmp); | 265 | +{ |
87 | - } else { | 266 | + static uint32_t brk; |
88 | - /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ | 267 | + |
89 | - stride = (insn & (1 << 5)) ? 2 : 1; | 268 | + if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) || |
90 | - for (reg = 0; reg < nregs; reg++) { | 269 | + brk != brk_insn || |
91 | - tmp = gen_load_and_replicate(s, addr, size); | 270 | + cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { |
92 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); | 271 | + return -EINVAL; |
93 | - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); | 272 | + } |
94 | - tcg_temp_free_i32(tmp); | 273 | + return 0; |
95 | - tcg_gen_addi_i32(addr, addr, 1 << size); | 274 | +} |
96 | - rd += stride; | 275 | + |
97 | + | 276 | +int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type) |
98 | + /* VLD1 to all lanes: bit 5 indicates how many Dregs to write. | 277 | +{ |
99 | + * VLD2/3/4 to all lanes: bit 5 indicates register stride. | 278 | + switch (type) { |
100 | + */ | 279 | + case GDB_BREAKPOINT_HW: |
101 | + stride = (insn & (1 << 5)) ? 2 : 1; | 280 | + return insert_hw_breakpoint(addr); |
102 | + vec_size = nregs == 1 ? stride * 8 : 8; | 281 | + case GDB_WATCHPOINT_READ: |
103 | + | 282 | + case GDB_WATCHPOINT_WRITE: |
104 | + tmp = tcg_temp_new_i32(); | 283 | + case GDB_WATCHPOINT_ACCESS: |
105 | + for (reg = 0; reg < nregs; reg++) { | 284 | + return insert_hw_watchpoint(addr, len, type); |
106 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), | 285 | + default: |
107 | + s->be_data | size); | 286 | + return -ENOSYS; |
108 | + if ((rd & 1) && vec_size == 16) { | 287 | + } |
109 | + /* We cannot write 16 bytes at once because the | 288 | +} |
110 | + * destination is unaligned. | 289 | + |
111 | + */ | 290 | +int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) |
112 | + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0), | 291 | +{ |
113 | + 8, 8, tmp); | 292 | + switch (type) { |
114 | + tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0), | 293 | + case GDB_BREAKPOINT_HW: |
115 | + neon_reg_offset(rd, 0), 8, 8); | 294 | + return delete_hw_breakpoint(addr); |
116 | + } else { | 295 | + case GDB_WATCHPOINT_READ: |
117 | + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0), | 296 | + case GDB_WATCHPOINT_WRITE: |
118 | + vec_size, vec_size, tmp); | 297 | + case GDB_WATCHPOINT_ACCESS: |
119 | } | 298 | + return delete_hw_watchpoint(addr, len, type); |
120 | + tcg_gen_addi_i32(addr, addr, 1 << size); | 299 | + default: |
121 | + rd += stride; | 300 | + return -ENOSYS; |
122 | } | 301 | + } |
123 | + tcg_temp_free_i32(tmp); | 302 | +} |
124 | tcg_temp_free_i32(addr); | 303 | + |
125 | stride = (1 << size) * nregs; | 304 | +void hvf_arch_remove_all_hw_breakpoints(void) |
126 | } else { | 305 | +{ |
306 | + if (cur_hw_wps > 0) { | ||
307 | + g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); | ||
308 | + } | ||
309 | + if (cur_hw_bps > 0) { | ||
310 | + g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); | ||
311 | + } | ||
312 | +} | ||
313 | diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c | ||
314 | index XXXXXXX..XXXXXXX 100644 | ||
315 | --- a/target/i386/hvf/hvf.c | ||
316 | +++ b/target/i386/hvf/hvf.c | ||
317 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
318 | |||
319 | return ret; | ||
320 | } | ||
321 | + | ||
322 | +int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) | ||
323 | +{ | ||
324 | + return -ENOSYS; | ||
325 | +} | ||
326 | + | ||
327 | +int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) | ||
328 | +{ | ||
329 | + return -ENOSYS; | ||
330 | +} | ||
331 | + | ||
332 | +int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type) | ||
333 | +{ | ||
334 | + return -ENOSYS; | ||
335 | +} | ||
336 | + | ||
337 | +int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) | ||
338 | +{ | ||
339 | + return -ENOSYS; | ||
340 | +} | ||
341 | + | ||
342 | +void hvf_arch_remove_all_hw_breakpoints(void) | ||
343 | +{ | ||
344 | +} | ||
127 | -- | 345 | -- |
128 | 2.19.1 | 346 | 2.34.1 |
129 | |||
130 | diff view generated by jsdifflib |
1 | The switch_mode() function is defined in target/arm/helper.c and used | 1 | From: Francesco Cagnin <fcagnin@quarkslab.com> |
---|---|---|---|
2 | only in that file and nowhere else, so we can make it file-local | ||
3 | rather than global. | ||
4 | 2 | ||
3 | Guests can now be debugged through the gdbstub. Support is added for | ||
4 | single-stepping, software breakpoints, hardware breakpoints and | ||
5 | watchpoints. The code has been structured like the KVM counterpart. | ||
6 | |||
7 | While guest debugging is enabled, the guest can still read and write the | ||
8 | DBG*_EL1 registers but they don't have any effect. | ||
9 | |||
10 | Signed-off-by: Francesco Cagnin <fcagnin@quarkslab.com> | ||
11 | Message-id: 20230601153107.81955-5-fcagnin@quarkslab.com | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 20181012144235.19646-3-peter.maydell@linaro.org | ||
8 | --- | 14 | --- |
9 | target/arm/internals.h | 1 - | 15 | include/sysemu/hvf.h | 15 ++ |
10 | target/arm/helper.c | 6 ++++-- | 16 | include/sysemu/hvf_int.h | 1 + |
11 | 2 files changed, 4 insertions(+), 3 deletions(-) | 17 | target/arm/hvf_arm.h | 7 + |
18 | accel/hvf/hvf-accel-ops.c | 10 + | ||
19 | accel/hvf/hvf-all.c | 6 + | ||
20 | target/arm/hvf/hvf.c | 474 +++++++++++++++++++++++++++++++++++++- | ||
21 | target/i386/hvf/hvf.c | 9 + | ||
22 | 7 files changed, 520 insertions(+), 2 deletions(-) | ||
12 | 23 | ||
13 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 24 | diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h |
14 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/internals.h | 26 | --- a/include/sysemu/hvf.h |
16 | +++ b/target/arm/internals.h | 27 | +++ b/include/sysemu/hvf.h |
17 | @@ -XXX,XX +XXX,XX @@ static inline int bank_number(int mode) | 28 | @@ -XXX,XX +XXX,XX @@ int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, |
18 | g_assert_not_reached(); | 29 | int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, |
19 | } | 30 | int type); |
20 | 31 | void hvf_arch_remove_all_hw_breakpoints(void); | |
21 | -void switch_mode(CPUARMState *, int); | 32 | + |
22 | void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); | 33 | +/* |
23 | void arm_translate_init(void); | 34 | + * hvf_update_guest_debug: |
24 | 35 | + * @cs: CPUState for the CPU to update | |
25 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 36 | + * |
37 | + * Update guest to enable or disable debugging. Per-arch specifics will be | ||
38 | + * handled by calling down to hvf_arch_update_guest_debug. | ||
39 | + */ | ||
40 | +int hvf_update_guest_debug(CPUState *cpu); | ||
41 | +void hvf_arch_update_guest_debug(CPUState *cpu); | ||
42 | + | ||
43 | +/* | ||
44 | + * Return whether the guest supports debugging. | ||
45 | + */ | ||
46 | +bool hvf_arch_supports_guest_debug(void); | ||
47 | #endif /* NEED_CPU_H */ | ||
48 | |||
49 | #endif | ||
50 | diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h | ||
26 | index XXXXXXX..XXXXXXX 100644 | 51 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/target/arm/helper.c | 52 | --- a/include/sysemu/hvf_int.h |
28 | +++ b/target/arm/helper.c | 53 | +++ b/include/sysemu/hvf_int.h |
29 | @@ -XXX,XX +XXX,XX @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, | 54 | @@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state { |
30 | V8M_SAttributes *sattrs); | 55 | void *exit; |
56 | bool vtimer_masked; | ||
57 | sigset_t unblock_ipi_mask; | ||
58 | + bool guest_debug_enabled; | ||
59 | }; | ||
60 | |||
61 | void assert_hvf_ok(hv_return_t ret); | ||
62 | diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h | ||
63 | index XXXXXXX..XXXXXXX 100644 | ||
64 | --- a/target/arm/hvf_arm.h | ||
65 | +++ b/target/arm/hvf_arm.h | ||
66 | @@ -XXX,XX +XXX,XX @@ | ||
67 | |||
68 | #include "cpu.h" | ||
69 | |||
70 | +/** | ||
71 | + * hvf_arm_init_debug() - initialize guest debug capabilities | ||
72 | + * | ||
73 | + * Should be called only once before using guest debug capabilities. | ||
74 | + */ | ||
75 | +void hvf_arm_init_debug(void); | ||
76 | + | ||
77 | void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu); | ||
78 | |||
31 | #endif | 79 | #endif |
32 | 80 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c | |
33 | +static void switch_mode(CPUARMState *env, int mode); | 81 | index XXXXXXX..XXXXXXX 100644 |
34 | + | 82 | --- a/accel/hvf/hvf-accel-ops.c |
35 | static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) | 83 | +++ b/accel/hvf/hvf-accel-ops.c |
84 | @@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms) | ||
85 | return hvf_arch_init(); | ||
86 | } | ||
87 | |||
88 | +static inline int hvf_gdbstub_sstep_flags(void) | ||
89 | +{ | ||
90 | + return SSTEP_ENABLE | SSTEP_NOIRQ; | ||
91 | +} | ||
92 | + | ||
93 | static void hvf_accel_class_init(ObjectClass *oc, void *data) | ||
36 | { | 94 | { |
37 | int nregs; | 95 | AccelClass *ac = ACCEL_CLASS(oc); |
38 | @@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) | 96 | ac->name = "HVF"; |
97 | ac->init_machine = hvf_accel_init; | ||
98 | ac->allowed = &hvf_allowed; | ||
99 | + ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; | ||
100 | } | ||
101 | |||
102 | static const TypeInfo hvf_accel_type = { | ||
103 | @@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu) | ||
104 | cpu->vcpu_dirty = 1; | ||
105 | assert_hvf_ok(r); | ||
106 | |||
107 | + cpu->hvf->guest_debug_enabled = false; | ||
108 | + | ||
109 | return hvf_arch_init_vcpu(cpu); | ||
110 | } | ||
111 | |||
112 | @@ -XXX,XX +XXX,XX @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) | ||
113 | ops->insert_breakpoint = hvf_insert_breakpoint; | ||
114 | ops->remove_breakpoint = hvf_remove_breakpoint; | ||
115 | ops->remove_all_breakpoints = hvf_remove_all_breakpoints; | ||
116 | + ops->update_guest_debug = hvf_update_guest_debug; | ||
117 | + ops->supports_guest_debug = hvf_arch_supports_guest_debug; | ||
118 | }; | ||
119 | static const TypeInfo hvf_accel_ops_type = { | ||
120 | .name = ACCEL_OPS_NAME("hvf"), | ||
121 | diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/accel/hvf/hvf-all.c | ||
124 | +++ b/accel/hvf/hvf-all.c | ||
125 | @@ -XXX,XX +XXX,XX @@ int hvf_sw_breakpoints_active(CPUState *cpu) | ||
126 | { | ||
127 | return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); | ||
128 | } | ||
129 | + | ||
130 | +int hvf_update_guest_debug(CPUState *cpu) | ||
131 | +{ | ||
132 | + hvf_arch_update_guest_debug(cpu); | ||
133 | + return 0; | ||
134 | +} | ||
135 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
136 | index XXXXXXX..XXXXXXX 100644 | ||
137 | --- a/target/arm/hvf/hvf.c | ||
138 | +++ b/target/arm/hvf/hvf.c | ||
139 | @@ -XXX,XX +XXX,XX @@ | ||
140 | |||
141 | #include "exec/gdbstub.h" | ||
142 | |||
143 | +#define MDSCR_EL1_SS_SHIFT 0 | ||
144 | +#define MDSCR_EL1_MDE_SHIFT 15 | ||
145 | + | ||
146 | +static uint16_t dbgbcr_regs[] = { | ||
147 | + HV_SYS_REG_DBGBCR0_EL1, | ||
148 | + HV_SYS_REG_DBGBCR1_EL1, | ||
149 | + HV_SYS_REG_DBGBCR2_EL1, | ||
150 | + HV_SYS_REG_DBGBCR3_EL1, | ||
151 | + HV_SYS_REG_DBGBCR4_EL1, | ||
152 | + HV_SYS_REG_DBGBCR5_EL1, | ||
153 | + HV_SYS_REG_DBGBCR6_EL1, | ||
154 | + HV_SYS_REG_DBGBCR7_EL1, | ||
155 | + HV_SYS_REG_DBGBCR8_EL1, | ||
156 | + HV_SYS_REG_DBGBCR9_EL1, | ||
157 | + HV_SYS_REG_DBGBCR10_EL1, | ||
158 | + HV_SYS_REG_DBGBCR11_EL1, | ||
159 | + HV_SYS_REG_DBGBCR12_EL1, | ||
160 | + HV_SYS_REG_DBGBCR13_EL1, | ||
161 | + HV_SYS_REG_DBGBCR14_EL1, | ||
162 | + HV_SYS_REG_DBGBCR15_EL1, | ||
163 | +}; | ||
164 | +static uint16_t dbgbvr_regs[] = { | ||
165 | + HV_SYS_REG_DBGBVR0_EL1, | ||
166 | + HV_SYS_REG_DBGBVR1_EL1, | ||
167 | + HV_SYS_REG_DBGBVR2_EL1, | ||
168 | + HV_SYS_REG_DBGBVR3_EL1, | ||
169 | + HV_SYS_REG_DBGBVR4_EL1, | ||
170 | + HV_SYS_REG_DBGBVR5_EL1, | ||
171 | + HV_SYS_REG_DBGBVR6_EL1, | ||
172 | + HV_SYS_REG_DBGBVR7_EL1, | ||
173 | + HV_SYS_REG_DBGBVR8_EL1, | ||
174 | + HV_SYS_REG_DBGBVR9_EL1, | ||
175 | + HV_SYS_REG_DBGBVR10_EL1, | ||
176 | + HV_SYS_REG_DBGBVR11_EL1, | ||
177 | + HV_SYS_REG_DBGBVR12_EL1, | ||
178 | + HV_SYS_REG_DBGBVR13_EL1, | ||
179 | + HV_SYS_REG_DBGBVR14_EL1, | ||
180 | + HV_SYS_REG_DBGBVR15_EL1, | ||
181 | +}; | ||
182 | +static uint16_t dbgwcr_regs[] = { | ||
183 | + HV_SYS_REG_DBGWCR0_EL1, | ||
184 | + HV_SYS_REG_DBGWCR1_EL1, | ||
185 | + HV_SYS_REG_DBGWCR2_EL1, | ||
186 | + HV_SYS_REG_DBGWCR3_EL1, | ||
187 | + HV_SYS_REG_DBGWCR4_EL1, | ||
188 | + HV_SYS_REG_DBGWCR5_EL1, | ||
189 | + HV_SYS_REG_DBGWCR6_EL1, | ||
190 | + HV_SYS_REG_DBGWCR7_EL1, | ||
191 | + HV_SYS_REG_DBGWCR8_EL1, | ||
192 | + HV_SYS_REG_DBGWCR9_EL1, | ||
193 | + HV_SYS_REG_DBGWCR10_EL1, | ||
194 | + HV_SYS_REG_DBGWCR11_EL1, | ||
195 | + HV_SYS_REG_DBGWCR12_EL1, | ||
196 | + HV_SYS_REG_DBGWCR13_EL1, | ||
197 | + HV_SYS_REG_DBGWCR14_EL1, | ||
198 | + HV_SYS_REG_DBGWCR15_EL1, | ||
199 | +}; | ||
200 | +static uint16_t dbgwvr_regs[] = { | ||
201 | + HV_SYS_REG_DBGWVR0_EL1, | ||
202 | + HV_SYS_REG_DBGWVR1_EL1, | ||
203 | + HV_SYS_REG_DBGWVR2_EL1, | ||
204 | + HV_SYS_REG_DBGWVR3_EL1, | ||
205 | + HV_SYS_REG_DBGWVR4_EL1, | ||
206 | + HV_SYS_REG_DBGWVR5_EL1, | ||
207 | + HV_SYS_REG_DBGWVR6_EL1, | ||
208 | + HV_SYS_REG_DBGWVR7_EL1, | ||
209 | + HV_SYS_REG_DBGWVR8_EL1, | ||
210 | + HV_SYS_REG_DBGWVR9_EL1, | ||
211 | + HV_SYS_REG_DBGWVR10_EL1, | ||
212 | + HV_SYS_REG_DBGWVR11_EL1, | ||
213 | + HV_SYS_REG_DBGWVR12_EL1, | ||
214 | + HV_SYS_REG_DBGWVR13_EL1, | ||
215 | + HV_SYS_REG_DBGWVR14_EL1, | ||
216 | + HV_SYS_REG_DBGWVR15_EL1, | ||
217 | +}; | ||
218 | + | ||
219 | +static inline int hvf_arm_num_brps(hv_vcpu_config_t config) | ||
220 | +{ | ||
221 | + uint64_t val; | ||
222 | + hv_return_t ret; | ||
223 | + ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1, | ||
224 | + &val); | ||
225 | + assert_hvf_ok(ret); | ||
226 | + return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1; | ||
227 | +} | ||
228 | + | ||
229 | +static inline int hvf_arm_num_wrps(hv_vcpu_config_t config) | ||
230 | +{ | ||
231 | + uint64_t val; | ||
232 | + hv_return_t ret; | ||
233 | + ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1, | ||
234 | + &val); | ||
235 | + assert_hvf_ok(ret); | ||
236 | + return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1; | ||
237 | +} | ||
238 | + | ||
239 | +void hvf_arm_init_debug(void) | ||
240 | +{ | ||
241 | + hv_vcpu_config_t config; | ||
242 | + config = hv_vcpu_config_create(); | ||
243 | + | ||
244 | + max_hw_bps = hvf_arm_num_brps(config); | ||
245 | + hw_breakpoints = | ||
246 | + g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps); | ||
247 | + | ||
248 | + max_hw_wps = hvf_arm_num_wrps(config); | ||
249 | + hw_watchpoints = | ||
250 | + g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps); | ||
251 | +} | ||
252 | + | ||
253 | #define HVF_SYSREG(crn, crm, op0, op1, op2) \ | ||
254 | ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) | ||
255 | #define PL1_WRITE_MASK 0x4 | ||
256 | @@ -XXX,XX +XXX,XX @@ int hvf_get_registers(CPUState *cpu) | ||
257 | continue; | ||
258 | } | ||
259 | |||
260 | + if (cpu->hvf->guest_debug_enabled) { | ||
261 | + /* Handle debug registers */ | ||
262 | + switch (hvf_sreg_match[i].reg) { | ||
263 | + case HV_SYS_REG_DBGBVR0_EL1: | ||
264 | + case HV_SYS_REG_DBGBCR0_EL1: | ||
265 | + case HV_SYS_REG_DBGWVR0_EL1: | ||
266 | + case HV_SYS_REG_DBGWCR0_EL1: | ||
267 | + case HV_SYS_REG_DBGBVR1_EL1: | ||
268 | + case HV_SYS_REG_DBGBCR1_EL1: | ||
269 | + case HV_SYS_REG_DBGWVR1_EL1: | ||
270 | + case HV_SYS_REG_DBGWCR1_EL1: | ||
271 | + case HV_SYS_REG_DBGBVR2_EL1: | ||
272 | + case HV_SYS_REG_DBGBCR2_EL1: | ||
273 | + case HV_SYS_REG_DBGWVR2_EL1: | ||
274 | + case HV_SYS_REG_DBGWCR2_EL1: | ||
275 | + case HV_SYS_REG_DBGBVR3_EL1: | ||
276 | + case HV_SYS_REG_DBGBCR3_EL1: | ||
277 | + case HV_SYS_REG_DBGWVR3_EL1: | ||
278 | + case HV_SYS_REG_DBGWCR3_EL1: | ||
279 | + case HV_SYS_REG_DBGBVR4_EL1: | ||
280 | + case HV_SYS_REG_DBGBCR4_EL1: | ||
281 | + case HV_SYS_REG_DBGWVR4_EL1: | ||
282 | + case HV_SYS_REG_DBGWCR4_EL1: | ||
283 | + case HV_SYS_REG_DBGBVR5_EL1: | ||
284 | + case HV_SYS_REG_DBGBCR5_EL1: | ||
285 | + case HV_SYS_REG_DBGWVR5_EL1: | ||
286 | + case HV_SYS_REG_DBGWCR5_EL1: | ||
287 | + case HV_SYS_REG_DBGBVR6_EL1: | ||
288 | + case HV_SYS_REG_DBGBCR6_EL1: | ||
289 | + case HV_SYS_REG_DBGWVR6_EL1: | ||
290 | + case HV_SYS_REG_DBGWCR6_EL1: | ||
291 | + case HV_SYS_REG_DBGBVR7_EL1: | ||
292 | + case HV_SYS_REG_DBGBCR7_EL1: | ||
293 | + case HV_SYS_REG_DBGWVR7_EL1: | ||
294 | + case HV_SYS_REG_DBGWCR7_EL1: | ||
295 | + case HV_SYS_REG_DBGBVR8_EL1: | ||
296 | + case HV_SYS_REG_DBGBCR8_EL1: | ||
297 | + case HV_SYS_REG_DBGWVR8_EL1: | ||
298 | + case HV_SYS_REG_DBGWCR8_EL1: | ||
299 | + case HV_SYS_REG_DBGBVR9_EL1: | ||
300 | + case HV_SYS_REG_DBGBCR9_EL1: | ||
301 | + case HV_SYS_REG_DBGWVR9_EL1: | ||
302 | + case HV_SYS_REG_DBGWCR9_EL1: | ||
303 | + case HV_SYS_REG_DBGBVR10_EL1: | ||
304 | + case HV_SYS_REG_DBGBCR10_EL1: | ||
305 | + case HV_SYS_REG_DBGWVR10_EL1: | ||
306 | + case HV_SYS_REG_DBGWCR10_EL1: | ||
307 | + case HV_SYS_REG_DBGBVR11_EL1: | ||
308 | + case HV_SYS_REG_DBGBCR11_EL1: | ||
309 | + case HV_SYS_REG_DBGWVR11_EL1: | ||
310 | + case HV_SYS_REG_DBGWCR11_EL1: | ||
311 | + case HV_SYS_REG_DBGBVR12_EL1: | ||
312 | + case HV_SYS_REG_DBGBCR12_EL1: | ||
313 | + case HV_SYS_REG_DBGWVR12_EL1: | ||
314 | + case HV_SYS_REG_DBGWCR12_EL1: | ||
315 | + case HV_SYS_REG_DBGBVR13_EL1: | ||
316 | + case HV_SYS_REG_DBGBCR13_EL1: | ||
317 | + case HV_SYS_REG_DBGWVR13_EL1: | ||
318 | + case HV_SYS_REG_DBGWCR13_EL1: | ||
319 | + case HV_SYS_REG_DBGBVR14_EL1: | ||
320 | + case HV_SYS_REG_DBGBCR14_EL1: | ||
321 | + case HV_SYS_REG_DBGWVR14_EL1: | ||
322 | + case HV_SYS_REG_DBGWCR14_EL1: | ||
323 | + case HV_SYS_REG_DBGBVR15_EL1: | ||
324 | + case HV_SYS_REG_DBGBCR15_EL1: | ||
325 | + case HV_SYS_REG_DBGWVR15_EL1: | ||
326 | + case HV_SYS_REG_DBGWCR15_EL1: { | ||
327 | + /* | ||
328 | + * If the guest is being debugged, the vCPU's debug registers | ||
329 | + * are holding the gdbstub's view of the registers (set in | ||
330 | + * hvf_arch_update_guest_debug()). | ||
331 | + * Since the environment is used to store only the guest's view | ||
332 | + * of the registers, don't update it with the values from the | ||
333 | + * vCPU but simply keep the values from the previous | ||
334 | + * environment. | ||
335 | + */ | ||
336 | + const ARMCPRegInfo *ri; | ||
337 | + ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key); | ||
338 | + val = read_raw_cp_reg(env, ri); | ||
339 | + | ||
340 | + arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; | ||
341 | + continue; | ||
342 | + } | ||
343 | + } | ||
344 | + } | ||
345 | + | ||
346 | ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); | ||
347 | assert_hvf_ok(ret); | ||
348 | |||
349 | @@ -XXX,XX +XXX,XX @@ int hvf_put_registers(CPUState *cpu) | ||
350 | continue; | ||
351 | } | ||
352 | |||
353 | + if (cpu->hvf->guest_debug_enabled) { | ||
354 | + /* Handle debug registers */ | ||
355 | + switch (hvf_sreg_match[i].reg) { | ||
356 | + case HV_SYS_REG_DBGBVR0_EL1: | ||
357 | + case HV_SYS_REG_DBGBCR0_EL1: | ||
358 | + case HV_SYS_REG_DBGWVR0_EL1: | ||
359 | + case HV_SYS_REG_DBGWCR0_EL1: | ||
360 | + case HV_SYS_REG_DBGBVR1_EL1: | ||
361 | + case HV_SYS_REG_DBGBCR1_EL1: | ||
362 | + case HV_SYS_REG_DBGWVR1_EL1: | ||
363 | + case HV_SYS_REG_DBGWCR1_EL1: | ||
364 | + case HV_SYS_REG_DBGBVR2_EL1: | ||
365 | + case HV_SYS_REG_DBGBCR2_EL1: | ||
366 | + case HV_SYS_REG_DBGWVR2_EL1: | ||
367 | + case HV_SYS_REG_DBGWCR2_EL1: | ||
368 | + case HV_SYS_REG_DBGBVR3_EL1: | ||
369 | + case HV_SYS_REG_DBGBCR3_EL1: | ||
370 | + case HV_SYS_REG_DBGWVR3_EL1: | ||
371 | + case HV_SYS_REG_DBGWCR3_EL1: | ||
372 | + case HV_SYS_REG_DBGBVR4_EL1: | ||
373 | + case HV_SYS_REG_DBGBCR4_EL1: | ||
374 | + case HV_SYS_REG_DBGWVR4_EL1: | ||
375 | + case HV_SYS_REG_DBGWCR4_EL1: | ||
376 | + case HV_SYS_REG_DBGBVR5_EL1: | ||
377 | + case HV_SYS_REG_DBGBCR5_EL1: | ||
378 | + case HV_SYS_REG_DBGWVR5_EL1: | ||
379 | + case HV_SYS_REG_DBGWCR5_EL1: | ||
380 | + case HV_SYS_REG_DBGBVR6_EL1: | ||
381 | + case HV_SYS_REG_DBGBCR6_EL1: | ||
382 | + case HV_SYS_REG_DBGWVR6_EL1: | ||
383 | + case HV_SYS_REG_DBGWCR6_EL1: | ||
384 | + case HV_SYS_REG_DBGBVR7_EL1: | ||
385 | + case HV_SYS_REG_DBGBCR7_EL1: | ||
386 | + case HV_SYS_REG_DBGWVR7_EL1: | ||
387 | + case HV_SYS_REG_DBGWCR7_EL1: | ||
388 | + case HV_SYS_REG_DBGBVR8_EL1: | ||
389 | + case HV_SYS_REG_DBGBCR8_EL1: | ||
390 | + case HV_SYS_REG_DBGWVR8_EL1: | ||
391 | + case HV_SYS_REG_DBGWCR8_EL1: | ||
392 | + case HV_SYS_REG_DBGBVR9_EL1: | ||
393 | + case HV_SYS_REG_DBGBCR9_EL1: | ||
394 | + case HV_SYS_REG_DBGWVR9_EL1: | ||
395 | + case HV_SYS_REG_DBGWCR9_EL1: | ||
396 | + case HV_SYS_REG_DBGBVR10_EL1: | ||
397 | + case HV_SYS_REG_DBGBCR10_EL1: | ||
398 | + case HV_SYS_REG_DBGWVR10_EL1: | ||
399 | + case HV_SYS_REG_DBGWCR10_EL1: | ||
400 | + case HV_SYS_REG_DBGBVR11_EL1: | ||
401 | + case HV_SYS_REG_DBGBCR11_EL1: | ||
402 | + case HV_SYS_REG_DBGWVR11_EL1: | ||
403 | + case HV_SYS_REG_DBGWCR11_EL1: | ||
404 | + case HV_SYS_REG_DBGBVR12_EL1: | ||
405 | + case HV_SYS_REG_DBGBCR12_EL1: | ||
406 | + case HV_SYS_REG_DBGWVR12_EL1: | ||
407 | + case HV_SYS_REG_DBGWCR12_EL1: | ||
408 | + case HV_SYS_REG_DBGBVR13_EL1: | ||
409 | + case HV_SYS_REG_DBGBCR13_EL1: | ||
410 | + case HV_SYS_REG_DBGWVR13_EL1: | ||
411 | + case HV_SYS_REG_DBGWCR13_EL1: | ||
412 | + case HV_SYS_REG_DBGBVR14_EL1: | ||
413 | + case HV_SYS_REG_DBGBCR14_EL1: | ||
414 | + case HV_SYS_REG_DBGWVR14_EL1: | ||
415 | + case HV_SYS_REG_DBGWCR14_EL1: | ||
416 | + case HV_SYS_REG_DBGBVR15_EL1: | ||
417 | + case HV_SYS_REG_DBGBCR15_EL1: | ||
418 | + case HV_SYS_REG_DBGWVR15_EL1: | ||
419 | + case HV_SYS_REG_DBGWCR15_EL1: | ||
420 | + /* | ||
421 | + * If the guest is being debugged, the vCPU's debug registers | ||
422 | + * are already holding the gdbstub's view of the registers (set | ||
423 | + * in hvf_arch_update_guest_debug()). | ||
424 | + */ | ||
425 | + continue; | ||
426 | + } | ||
427 | + } | ||
428 | + | ||
429 | val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; | ||
430 | ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); | ||
431 | assert_hvf_ok(ret); | ||
432 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
433 | { | ||
434 | ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
435 | CPUARMState *env = &arm_cpu->env; | ||
436 | + int ret; | ||
437 | hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; | ||
438 | hv_return_t r; | ||
439 | bool advance_pc = false; | ||
440 | |||
441 | - if (hvf_inject_interrupts(cpu)) { | ||
442 | + if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) && | ||
443 | + hvf_inject_interrupts(cpu)) { | ||
444 | return EXCP_INTERRUPT; | ||
445 | } | ||
446 | |||
447 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
448 | uint64_t syndrome = hvf_exit->exception.syndrome; | ||
449 | uint32_t ec = syn_get_ec(syndrome); | ||
450 | |||
451 | + ret = 0; | ||
452 | qemu_mutex_lock_iothread(); | ||
453 | switch (exit_reason) { | ||
454 | case HV_EXIT_REASON_EXCEPTION: | ||
455 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
456 | hvf_sync_vtimer(cpu); | ||
457 | |||
458 | switch (ec) { | ||
459 | + case EC_SOFTWARESTEP: { | ||
460 | + ret = EXCP_DEBUG; | ||
461 | + | ||
462 | + if (!cpu->singlestep_enabled) { | ||
463 | + error_report("EC_SOFTWARESTEP but single-stepping not enabled"); | ||
464 | + } | ||
465 | + break; | ||
466 | + } | ||
467 | + case EC_AA64_BKPT: { | ||
468 | + ret = EXCP_DEBUG; | ||
469 | + | ||
470 | + cpu_synchronize_state(cpu); | ||
471 | + | ||
472 | + if (!hvf_find_sw_breakpoint(cpu, env->pc)) { | ||
473 | + /* Re-inject into the guest */ | ||
474 | + ret = 0; | ||
475 | + hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0)); | ||
476 | + } | ||
477 | + break; | ||
478 | + } | ||
479 | + case EC_BREAKPOINT: { | ||
480 | + ret = EXCP_DEBUG; | ||
481 | + | ||
482 | + cpu_synchronize_state(cpu); | ||
483 | + | ||
484 | + if (!find_hw_breakpoint(cpu, env->pc)) { | ||
485 | + error_report("EC_BREAKPOINT but unknown hw breakpoint"); | ||
486 | + } | ||
487 | + break; | ||
488 | + } | ||
489 | + case EC_WATCHPOINT: { | ||
490 | + ret = EXCP_DEBUG; | ||
491 | + | ||
492 | + cpu_synchronize_state(cpu); | ||
493 | + | ||
494 | + CPUWatchpoint *wp = | ||
495 | + find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address); | ||
496 | + if (!wp) { | ||
497 | + error_report("EXCP_DEBUG but unknown hw watchpoint"); | ||
498 | + } | ||
499 | + cpu->watchpoint_hit = wp; | ||
500 | + break; | ||
501 | + } | ||
502 | case EC_DATAABORT: { | ||
503 | bool isv = syndrome & ARM_EL_ISV; | ||
504 | bool iswrite = (syndrome >> 6) & 1; | ||
505 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
506 | pc += 4; | ||
507 | r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); | ||
508 | assert_hvf_ok(r); | ||
509 | + | ||
510 | + /* Handle single-stepping over instructions which trigger a VM exit */ | ||
511 | + if (cpu->singlestep_enabled) { | ||
512 | + ret = EXCP_DEBUG; | ||
513 | + } | ||
514 | } | ||
515 | |||
516 | - return 0; | ||
517 | + return ret; | ||
518 | } | ||
519 | |||
520 | static const VMStateDescription vmstate_hvf_vtimer = { | ||
521 | @@ -XXX,XX +XXX,XX @@ int hvf_arch_init(void) | ||
522 | hvf_state->vtimer_offset = mach_absolute_time(); | ||
523 | vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer); | ||
524 | qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer); | ||
525 | + | ||
526 | + hvf_arm_init_debug(); | ||
527 | + | ||
39 | return 0; | 528 | return 0; |
40 | } | 529 | } |
41 | 530 | ||
42 | -void switch_mode(CPUARMState *env, int mode) | 531 | @@ -XXX,XX +XXX,XX @@ void hvf_arch_remove_all_hw_breakpoints(void) |
43 | +static void switch_mode(CPUARMState *env, int mode) | 532 | g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); |
533 | } | ||
534 | } | ||
535 | + | ||
536 | +/* | ||
537 | + * Update the vCPU with the gdbstub's view of debug registers. This view | ||
538 | + * consists of all hardware breakpoints and watchpoints inserted so far while | ||
539 | + * debugging the guest. | ||
540 | + */ | ||
541 | +static void hvf_put_gdbstub_debug_registers(CPUState *cpu) | ||
542 | +{ | ||
543 | + hv_return_t r = HV_SUCCESS; | ||
544 | + int i; | ||
545 | + | ||
546 | + for (i = 0; i < cur_hw_bps; i++) { | ||
547 | + HWBreakpoint *bp = get_hw_bp(i); | ||
548 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], bp->bcr); | ||
549 | + assert_hvf_ok(r); | ||
550 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], bp->bvr); | ||
551 | + assert_hvf_ok(r); | ||
552 | + } | ||
553 | + for (i = cur_hw_bps; i < max_hw_bps; i++) { | ||
554 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], 0); | ||
555 | + assert_hvf_ok(r); | ||
556 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], 0); | ||
557 | + assert_hvf_ok(r); | ||
558 | + } | ||
559 | + | ||
560 | + for (i = 0; i < cur_hw_wps; i++) { | ||
561 | + HWWatchpoint *wp = get_hw_wp(i); | ||
562 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], wp->wcr); | ||
563 | + assert_hvf_ok(r); | ||
564 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], wp->wvr); | ||
565 | + assert_hvf_ok(r); | ||
566 | + } | ||
567 | + for (i = cur_hw_wps; i < max_hw_wps; i++) { | ||
568 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], 0); | ||
569 | + assert_hvf_ok(r); | ||
570 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], 0); | ||
571 | + assert_hvf_ok(r); | ||
572 | + } | ||
573 | +} | ||
574 | + | ||
575 | +/* | ||
576 | + * Update the vCPU with the guest's view of debug registers. This view is kept | ||
577 | + * in the environment at all times. | ||
578 | + */ | ||
579 | +static void hvf_put_guest_debug_registers(CPUState *cpu) | ||
580 | +{ | ||
581 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
582 | + CPUARMState *env = &arm_cpu->env; | ||
583 | + hv_return_t r = HV_SUCCESS; | ||
584 | + int i; | ||
585 | + | ||
586 | + for (i = 0; i < max_hw_bps; i++) { | ||
587 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], | ||
588 | + env->cp15.dbgbcr[i]); | ||
589 | + assert_hvf_ok(r); | ||
590 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], | ||
591 | + env->cp15.dbgbvr[i]); | ||
592 | + assert_hvf_ok(r); | ||
593 | + } | ||
594 | + | ||
595 | + for (i = 0; i < max_hw_wps; i++) { | ||
596 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], | ||
597 | + env->cp15.dbgwcr[i]); | ||
598 | + assert_hvf_ok(r); | ||
599 | + r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], | ||
600 | + env->cp15.dbgwvr[i]); | ||
601 | + assert_hvf_ok(r); | ||
602 | + } | ||
603 | +} | ||
604 | + | ||
605 | +static inline bool hvf_arm_hw_debug_active(CPUState *cpu) | ||
606 | +{ | ||
607 | + return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); | ||
608 | +} | ||
609 | + | ||
610 | +static void hvf_arch_set_traps(void) | ||
611 | +{ | ||
612 | + CPUState *cpu; | ||
613 | + bool should_enable_traps = false; | ||
614 | + hv_return_t r = HV_SUCCESS; | ||
615 | + | ||
616 | + /* Check whether guest debugging is enabled for at least one vCPU; if it | ||
617 | + * is, enable exiting the guest on all vCPUs */ | ||
618 | + CPU_FOREACH(cpu) { | ||
619 | + should_enable_traps |= cpu->hvf->guest_debug_enabled; | ||
620 | + } | ||
621 | + CPU_FOREACH(cpu) { | ||
622 | + /* Set whether debug exceptions exit the guest */ | ||
623 | + r = hv_vcpu_set_trap_debug_exceptions(cpu->hvf->fd, | ||
624 | + should_enable_traps); | ||
625 | + assert_hvf_ok(r); | ||
626 | + | ||
627 | + /* Set whether accesses to debug registers exit the guest */ | ||
628 | + r = hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf->fd, | ||
629 | + should_enable_traps); | ||
630 | + assert_hvf_ok(r); | ||
631 | + } | ||
632 | +} | ||
633 | + | ||
634 | +void hvf_arch_update_guest_debug(CPUState *cpu) | ||
635 | +{ | ||
636 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
637 | + CPUARMState *env = &arm_cpu->env; | ||
638 | + | ||
639 | + /* Check whether guest debugging is enabled */ | ||
640 | + cpu->hvf->guest_debug_enabled = cpu->singlestep_enabled || | ||
641 | + hvf_sw_breakpoints_active(cpu) || | ||
642 | + hvf_arm_hw_debug_active(cpu); | ||
643 | + | ||
644 | + /* Update debug registers */ | ||
645 | + if (cpu->hvf->guest_debug_enabled) { | ||
646 | + hvf_put_gdbstub_debug_registers(cpu); | ||
647 | + } else { | ||
648 | + hvf_put_guest_debug_registers(cpu); | ||
649 | + } | ||
650 | + | ||
651 | + cpu_synchronize_state(cpu); | ||
652 | + | ||
653 | + /* Enable/disable single-stepping */ | ||
654 | + if (cpu->singlestep_enabled) { | ||
655 | + env->cp15.mdscr_el1 = | ||
656 | + deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1); | ||
657 | + pstate_write(env, pstate_read(env) | PSTATE_SS); | ||
658 | + } else { | ||
659 | + env->cp15.mdscr_el1 = | ||
660 | + deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0); | ||
661 | + } | ||
662 | + | ||
663 | + /* Enable/disable Breakpoint exceptions */ | ||
664 | + if (hvf_arm_hw_debug_active(cpu)) { | ||
665 | + env->cp15.mdscr_el1 = | ||
666 | + deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1); | ||
667 | + } else { | ||
668 | + env->cp15.mdscr_el1 = | ||
669 | + deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0); | ||
670 | + } | ||
671 | + | ||
672 | + hvf_arch_set_traps(); | ||
673 | +} | ||
674 | + | ||
675 | +inline bool hvf_arch_supports_guest_debug(void) | ||
676 | +{ | ||
677 | + return true; | ||
678 | +} | ||
679 | diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c | ||
680 | index XXXXXXX..XXXXXXX 100644 | ||
681 | --- a/target/i386/hvf/hvf.c | ||
682 | +++ b/target/i386/hvf/hvf.c | ||
683 | @@ -XXX,XX +XXX,XX @@ int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) | ||
684 | void hvf_arch_remove_all_hw_breakpoints(void) | ||
44 | { | 685 | { |
45 | ARMCPU *cpu = arm_env_get_cpu(env); | 686 | } |
46 | 687 | + | |
47 | @@ -XXX,XX +XXX,XX @@ void aarch64_sync_64_to_32(CPUARMState *env) | 688 | +void hvf_arch_update_guest_debug(CPUState *cpu) |
48 | 689 | +{ | |
49 | #else | 690 | +} |
50 | 691 | + | |
51 | -void switch_mode(CPUARMState *env, int mode) | 692 | +inline bool hvf_arch_supports_guest_debug(void) |
52 | +static void switch_mode(CPUARMState *env, int mode) | 693 | +{ |
53 | { | 694 | + return false; |
54 | int old_mode; | 695 | +} |
55 | int i; | ||
56 | -- | 696 | -- |
57 | 2.19.1 | 697 | 2.34.1 |
58 | |||
59 | diff view generated by jsdifflib |
1 | For the v7 version of the Arm architecture, the IL bit in | 1 | From: Vikram Garhwal <vikram.garhwal@amd.com> |
---|---|---|---|
2 | syndrome register values where the field is not valid was | ||
3 | defined to be UNK/SBZP. In v8 this is RES1, which is what | ||
4 | QEMU currently implements. Handle the desired v7 behaviour | ||
5 | by squashing the IL bit for the affected cases: | ||
6 | * EC == EC_UNCATEGORIZED | ||
7 | * prefetch aborts | ||
8 | * data aborts where ISV is 0 | ||
9 | 2 | ||
10 | (The fourth case listed in the v8 Arm ARM DDI 0487C.a in | 3 | The Xilinx Versal CANFD controller is developed based on SocketCAN, QEMU CAN bus |
11 | section G7.2.70, "illegal state exception", can't happen | 4 | implementation. Bus connection and socketCAN connection for each CAN module |
12 | on a v7 CPU.) | 5 | can be set through command lines. |
13 | 6 | ||
14 | This deals with a corner case noted in a comment. | 7 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> |
8 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | include/hw/net/xlnx-versal-canfd.h | 87 ++ | ||
12 | hw/net/can/xlnx-versal-canfd.c | 2107 ++++++++++++++++++++++++++++ | ||
13 | hw/net/can/meson.build | 1 + | ||
14 | hw/net/can/trace-events | 7 + | ||
15 | 4 files changed, 2202 insertions(+) | ||
16 | create mode 100644 include/hw/net/xlnx-versal-canfd.h | ||
17 | create mode 100644 hw/net/can/xlnx-versal-canfd.c | ||
15 | 18 | ||
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 19 | diff --git a/include/hw/net/xlnx-versal-canfd.h b/include/hw/net/xlnx-versal-canfd.h |
17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 20 | new file mode 100644 |
18 | Message-id: 20181012144235.19646-10-peter.maydell@linaro.org | 21 | index XXXXXXX..XXXXXXX |
19 | --- | 22 | --- /dev/null |
20 | target/arm/internals.h | 7 ++----- | 23 | +++ b/include/hw/net/xlnx-versal-canfd.h |
21 | target/arm/helper.c | 13 +++++++++++++ | 24 | @@ -XXX,XX +XXX,XX @@ |
22 | 2 files changed, 15 insertions(+), 5 deletions(-) | 25 | +/* |
23 | 26 | + * QEMU model of the Xilinx Versal CANFD Controller. | |
24 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 27 | + * |
25 | index XXXXXXX..XXXXXXX 100644 | 28 | + * Copyright (c) 2023 Advanced Micro Devices, Inc. |
26 | --- a/target/arm/internals.h | 29 | + * |
27 | +++ b/target/arm/internals.h | 30 | + * Written-by: Vikram Garhwal<vikram.garhwal@amd.com> |
28 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn) | 31 | + * Based on QEMU CANFD Device emulation implemented by Jin Yang, Deniz Eren and |
29 | /* Utility functions for constructing various kinds of syndrome value. | 32 | + * Pavel Pisa. |
30 | * Note that in general we follow the AArch64 syndrome values; in a | 33 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
31 | * few cases the value in HSR for exceptions taken to AArch32 Hyp | 34 | + * of this software and associated documentation files (the "Software"), to deal |
32 | - * mode differs slightly, so if we ever implemented Hyp mode then the | 35 | + * in the Software without restriction, including without limitation the rights |
33 | - * syndrome value would need some massaging on exception entry. | 36 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
34 | - * (One example of this is that AArch64 defaults to IL bit set for | 37 | + * copies of the Software, and to permit persons to whom the Software is |
35 | - * exceptions which don't specifically indicate information about the | 38 | + * furnished to do so, subject to the following conditions: |
36 | - * trapping instruction, whereas AArch32 defaults to IL bit clear.) | 39 | + * |
37 | + * mode differs slightly, and we fix this up when populating HSR in | 40 | + * The above copyright notice and this permission notice shall be included in |
38 | + * arm_cpu_do_interrupt_aarch32_hyp(). | 41 | + * all copies or substantial portions of the Software. |
39 | */ | 42 | + * |
40 | static inline uint32_t syn_uncategorized(void) | 43 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
41 | { | 44 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
42 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 45 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
43 | index XXXXXXX..XXXXXXX 100644 | 46 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
44 | --- a/target/arm/helper.c | 47 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
45 | +++ b/target/arm/helper.c | 48 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
46 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) | 49 | + * THE SOFTWARE. |
47 | } | 50 | + */ |
48 | 51 | + | |
49 | if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { | 52 | +#ifndef HW_CANFD_XILINX_H |
50 | + if (!arm_feature(env, ARM_FEATURE_V8)) { | 53 | +#define HW_CANFD_XILINX_H |
51 | + /* | 54 | + |
52 | + * QEMU syndrome values are v8-style. v7 has the IL bit | 55 | +#include "hw/register.h" |
53 | + * UNK/SBZP for "field not valid" cases, where v8 uses RES1. | 56 | +#include "hw/ptimer.h" |
54 | + * If this is a v7 CPU, squash the IL bit in those cases. | 57 | +#include "net/can_emu.h" |
55 | + */ | 58 | +#include "hw/qdev-clock.h" |
56 | + if (cs->exception_index == EXCP_PREFETCH_ABORT || | 59 | + |
57 | + (cs->exception_index == EXCP_DATA_ABORT && | 60 | +#define TYPE_XILINX_CANFD "xlnx.versal-canfd" |
58 | + !(env->exception.syndrome & ARM_EL_ISV)) || | 61 | + |
59 | + syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { | 62 | +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCANFDState, XILINX_CANFD) |
60 | + env->exception.syndrome &= ~ARM_EL_IL; | 63 | + |
64 | +#define NUM_REGS_PER_MSG_SPACE 18 /* 1 ID + 1 DLC + 16 Data(DW0 - DW15) regs. */ | ||
65 | +#define MAX_NUM_RX 64 | ||
66 | +#define OFFSET_RX1_DW15 (0x4144 / 4) | ||
67 | +#define CANFD_TIMER_MAX 0xFFFFUL | ||
68 | +#define CANFD_DEFAULT_CLOCK (25 * 1000 * 1000) | ||
69 | + | ||
70 | +#define XLNX_VERSAL_CANFD_R_MAX (OFFSET_RX1_DW15 + \ | ||
71 | + ((MAX_NUM_RX - 1) * NUM_REGS_PER_MSG_SPACE) + 1) | ||
72 | + | ||
73 | +typedef struct XlnxVersalCANFDState { | ||
74 | + SysBusDevice parent_obj; | ||
75 | + MemoryRegion iomem; | ||
76 | + | ||
77 | + qemu_irq irq_canfd_int; | ||
78 | + qemu_irq irq_addr_err; | ||
79 | + | ||
80 | + RegisterInfo reg_info[XLNX_VERSAL_CANFD_R_MAX]; | ||
81 | + RegisterAccessInfo *tx_regs; | ||
82 | + RegisterAccessInfo *rx0_regs; | ||
83 | + RegisterAccessInfo *rx1_regs; | ||
84 | + RegisterAccessInfo *af_regs; | ||
85 | + RegisterAccessInfo *txe_regs; | ||
86 | + RegisterAccessInfo *rx_mailbox_regs; | ||
87 | + RegisterAccessInfo *af_mask_regs_mailbox; | ||
88 | + | ||
89 | + uint32_t regs[XLNX_VERSAL_CANFD_R_MAX]; | ||
90 | + | ||
91 | + ptimer_state *canfd_timer; | ||
92 | + | ||
93 | + CanBusClientState bus_client; | ||
94 | + CanBusState *canfdbus; | ||
95 | + | ||
96 | + struct { | ||
97 | + uint8_t rx0_fifo; | ||
98 | + uint8_t rx1_fifo; | ||
99 | + uint8_t tx_fifo; | ||
100 | + bool enable_rx_fifo1; | ||
101 | + uint32_t ext_clk_freq; | ||
102 | + } cfg; | ||
103 | + | ||
104 | +} XlnxVersalCANFDState; | ||
105 | + | ||
106 | +typedef struct tx_ready_reg_info { | ||
107 | + uint32_t can_id; | ||
108 | + uint32_t reg_num; | ||
109 | +} tx_ready_reg_info; | ||
110 | + | ||
111 | +#endif | ||
112 | diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c | ||
113 | new file mode 100644 | ||
114 | index XXXXXXX..XXXXXXX | ||
115 | --- /dev/null | ||
116 | +++ b/hw/net/can/xlnx-versal-canfd.c | ||
117 | @@ -XXX,XX +XXX,XX @@ | ||
118 | +/* | ||
119 | + * QEMU model of the Xilinx Versal CANFD device. | ||
120 | + * | ||
121 | + * This implementation is based on the following datasheet: | ||
122 | + * https://docs.xilinx.com/v/u/2.0-English/pg223-canfd | ||
123 | + * | ||
124 | + * Copyright (c) 2023 Advanced Micro Devices, Inc. | ||
125 | + * | ||
126 | + * Written-by: Vikram Garhwal <vikram.garhwal@amd.com> | ||
127 | + * | ||
128 | + * Based on QEMU CANFD Device emulation implemented by Jin Yang, Deniz Eren and | ||
129 | + * Pavel Pisa | ||
130 | + * | ||
131 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
132 | + * of this software and associated documentation files (the "Software"), to deal | ||
133 | + * in the Software without restriction, including without limitation the rights | ||
134 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
135 | + * copies of the Software, and to permit persons to whom the Software is | ||
136 | + * furnished to do so, subject to the following conditions: | ||
137 | + * | ||
138 | + * The above copyright notice and this permission notice shall be included in | ||
139 | + * all copies or substantial portions of the Software. | ||
140 | + * | ||
141 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
142 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
143 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
144 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
145 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
146 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
147 | + * THE SOFTWARE. | ||
148 | + */ | ||
149 | + | ||
150 | +#include "qemu/osdep.h" | ||
151 | +#include "hw/sysbus.h" | ||
152 | +#include "hw/irq.h" | ||
153 | +#include "hw/register.h" | ||
154 | +#include "qapi/error.h" | ||
155 | +#include "qemu/bitops.h" | ||
156 | +#include "qemu/log.h" | ||
157 | +#include "qemu/cutils.h" | ||
158 | +#include "qemu/event_notifier.h" | ||
159 | +#include "hw/qdev-properties.h" | ||
160 | +#include "qom/object_interfaces.h" | ||
161 | +#include "migration/vmstate.h" | ||
162 | +#include "hw/net/xlnx-versal-canfd.h" | ||
163 | +#include "trace.h" | ||
164 | + | ||
165 | +REG32(SOFTWARE_RESET_REGISTER, 0x0) | ||
166 | + FIELD(SOFTWARE_RESET_REGISTER, CEN, 1, 1) | ||
167 | + FIELD(SOFTWARE_RESET_REGISTER, SRST, 0, 1) | ||
168 | +REG32(MODE_SELECT_REGISTER, 0x4) | ||
169 | + FIELD(MODE_SELECT_REGISTER, ITO, 8, 8) | ||
170 | + FIELD(MODE_SELECT_REGISTER, ABR, 7, 1) | ||
171 | + FIELD(MODE_SELECT_REGISTER, SBR, 6, 1) | ||
172 | + FIELD(MODE_SELECT_REGISTER, DPEE, 5, 1) | ||
173 | + FIELD(MODE_SELECT_REGISTER, DAR, 4, 1) | ||
174 | + FIELD(MODE_SELECT_REGISTER, BRSD, 3, 1) | ||
175 | + FIELD(MODE_SELECT_REGISTER, SNOOP, 2, 1) | ||
176 | + FIELD(MODE_SELECT_REGISTER, LBACK, 1, 1) | ||
177 | + FIELD(MODE_SELECT_REGISTER, SLEEP, 0, 1) | ||
178 | +REG32(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x8) | ||
179 | + FIELD(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, BRP, 0, 8) | ||
180 | +REG32(ARBITRATION_PHASE_BIT_TIMING_REGISTER, 0xc) | ||
181 | + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, SJW, 16, 7) | ||
182 | + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS2, 8, 7) | ||
183 | + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS1, 0, 8) | ||
184 | +REG32(ERROR_COUNTER_REGISTER, 0x10) | ||
185 | + FIELD(ERROR_COUNTER_REGISTER, REC, 8, 8) | ||
186 | + FIELD(ERROR_COUNTER_REGISTER, TEC, 0, 8) | ||
187 | +REG32(ERROR_STATUS_REGISTER, 0x14) | ||
188 | + FIELD(ERROR_STATUS_REGISTER, F_BERR, 11, 1) | ||
189 | + FIELD(ERROR_STATUS_REGISTER, F_STER, 10, 1) | ||
190 | + FIELD(ERROR_STATUS_REGISTER, F_FMER, 9, 1) | ||
191 | + FIELD(ERROR_STATUS_REGISTER, F_CRCER, 8, 1) | ||
192 | + FIELD(ERROR_STATUS_REGISTER, ACKER, 4, 1) | ||
193 | + FIELD(ERROR_STATUS_REGISTER, BERR, 3, 1) | ||
194 | + FIELD(ERROR_STATUS_REGISTER, STER, 2, 1) | ||
195 | + FIELD(ERROR_STATUS_REGISTER, FMER, 1, 1) | ||
196 | + FIELD(ERROR_STATUS_REGISTER, CRCER, 0, 1) | ||
197 | +REG32(STATUS_REGISTER, 0x18) | ||
198 | + FIELD(STATUS_REGISTER, TDCV, 16, 7) | ||
199 | + FIELD(STATUS_REGISTER, SNOOP, 12, 1) | ||
200 | + FIELD(STATUS_REGISTER, BSFR_CONFIG, 10, 1) | ||
201 | + FIELD(STATUS_REGISTER, PEE_CONFIG, 9, 1) | ||
202 | + FIELD(STATUS_REGISTER, ESTAT, 7, 2) | ||
203 | + FIELD(STATUS_REGISTER, ERRWRN, 6, 1) | ||
204 | + FIELD(STATUS_REGISTER, BBSY, 5, 1) | ||
205 | + FIELD(STATUS_REGISTER, BIDLE, 4, 1) | ||
206 | + FIELD(STATUS_REGISTER, NORMAL, 3, 1) | ||
207 | + FIELD(STATUS_REGISTER, SLEEP, 2, 1) | ||
208 | + FIELD(STATUS_REGISTER, LBACK, 1, 1) | ||
209 | + FIELD(STATUS_REGISTER, CONFIG, 0, 1) | ||
210 | +REG32(INTERRUPT_STATUS_REGISTER, 0x1c) | ||
211 | + FIELD(INTERRUPT_STATUS_REGISTER, TXEWMFLL, 31, 1) | ||
212 | + FIELD(INTERRUPT_STATUS_REGISTER, TXEOFLW, 30, 1) | ||
213 | + FIELD(INTERRUPT_STATUS_REGISTER, RXBOFLW_BI, 24, 6) | ||
214 | + FIELD(INTERRUPT_STATUS_REGISTER, RXLRM_BI, 18, 6) | ||
215 | + FIELD(INTERRUPT_STATUS_REGISTER, RXMNF, 17, 1) | ||
216 | + FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL_1, 16, 1) | ||
217 | + FIELD(INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 15, 1) | ||
218 | + FIELD(INTERRUPT_STATUS_REGISTER, TXCRS, 14, 1) | ||
219 | + FIELD(INTERRUPT_STATUS_REGISTER, TXRRS, 13, 1) | ||
220 | + FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL, 12, 1) | ||
221 | + FIELD(INTERRUPT_STATUS_REGISTER, WKUP, 11, 1) | ||
222 | + FIELD(INTERRUPT_STATUS_REGISTER, SLP, 10, 1) | ||
223 | + FIELD(INTERRUPT_STATUS_REGISTER, BSOFF, 9, 1) | ||
224 | + /* | ||
225 | + * In the original HW description below bit is named as ERROR but an ERROR | ||
226 | + * field name collides with a macro in Windows build. To avoid Windows build | ||
227 | + * failures, the bit is renamed to ERROR_BIT. | ||
228 | + */ | ||
229 | + FIELD(INTERRUPT_STATUS_REGISTER, ERROR_BIT, 8, 1) | ||
230 | + FIELD(INTERRUPT_STATUS_REGISTER, RXFOFLW, 6, 1) | ||
231 | + FIELD(INTERRUPT_STATUS_REGISTER, TSCNT_OFLW, 5, 1) | ||
232 | + FIELD(INTERRUPT_STATUS_REGISTER, RXOK, 4, 1) | ||
233 | + FIELD(INTERRUPT_STATUS_REGISTER, BSFRD, 3, 1) | ||
234 | + FIELD(INTERRUPT_STATUS_REGISTER, PEE, 2, 1) | ||
235 | + FIELD(INTERRUPT_STATUS_REGISTER, TXOK, 1, 1) | ||
236 | + FIELD(INTERRUPT_STATUS_REGISTER, ARBLST, 0, 1) | ||
237 | +REG32(INTERRUPT_ENABLE_REGISTER, 0x20) | ||
238 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETXEWMFLL, 31, 1) | ||
239 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETXEOFLW, 30, 1) | ||
240 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERXMNF, 17, 1) | ||
241 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL_1, 16, 1) | ||
242 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFOFLW_1, 15, 1) | ||
243 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETXCRS, 14, 1) | ||
244 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETXRRS, 13, 1) | ||
245 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL, 12, 1) | ||
246 | + FIELD(INTERRUPT_ENABLE_REGISTER, EWKUP, 11, 1) | ||
247 | + FIELD(INTERRUPT_ENABLE_REGISTER, ESLP, 10, 1) | ||
248 | + FIELD(INTERRUPT_ENABLE_REGISTER, EBSOFF, 9, 1) | ||
249 | + FIELD(INTERRUPT_ENABLE_REGISTER, EERROR, 8, 1) | ||
250 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERFXOFLW, 6, 1) | ||
251 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETSCNT_OFLW, 5, 1) | ||
252 | + FIELD(INTERRUPT_ENABLE_REGISTER, ERXOK, 4, 1) | ||
253 | + FIELD(INTERRUPT_ENABLE_REGISTER, EBSFRD, 3, 1) | ||
254 | + FIELD(INTERRUPT_ENABLE_REGISTER, EPEE, 2, 1) | ||
255 | + FIELD(INTERRUPT_ENABLE_REGISTER, ETXOK, 1, 1) | ||
256 | + FIELD(INTERRUPT_ENABLE_REGISTER, EARBLOST, 0, 1) | ||
257 | +REG32(INTERRUPT_CLEAR_REGISTER, 0x24) | ||
258 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTXEWMFLL, 31, 1) | ||
259 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTXEOFLW, 30, 1) | ||
260 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRXMNF, 17, 1) | ||
261 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL_1, 16, 1) | ||
262 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFOFLW_1, 15, 1) | ||
263 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTXCRS, 14, 1) | ||
264 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTXRRS, 13, 1) | ||
265 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL, 12, 1) | ||
266 | + FIELD(INTERRUPT_CLEAR_REGISTER, CWKUP, 11, 1) | ||
267 | + FIELD(INTERRUPT_CLEAR_REGISTER, CSLP, 10, 1) | ||
268 | + FIELD(INTERRUPT_CLEAR_REGISTER, CBSOFF, 9, 1) | ||
269 | + FIELD(INTERRUPT_CLEAR_REGISTER, CERROR, 8, 1) | ||
270 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRFXOFLW, 6, 1) | ||
271 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTSCNT_OFLW, 5, 1) | ||
272 | + FIELD(INTERRUPT_CLEAR_REGISTER, CRXOK, 4, 1) | ||
273 | + FIELD(INTERRUPT_CLEAR_REGISTER, CBSFRD, 3, 1) | ||
274 | + FIELD(INTERRUPT_CLEAR_REGISTER, CPEE, 2, 1) | ||
275 | + FIELD(INTERRUPT_CLEAR_REGISTER, CTXOK, 1, 1) | ||
276 | + FIELD(INTERRUPT_CLEAR_REGISTER, CARBLOST, 0, 1) | ||
277 | +REG32(TIMESTAMP_REGISTER, 0x28) | ||
278 | + FIELD(TIMESTAMP_REGISTER, TIMESTAMP_CNT, 16, 16) | ||
279 | + FIELD(TIMESTAMP_REGISTER, CTS, 0, 1) | ||
280 | +REG32(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x88) | ||
281 | + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, TDC, 16, 1) | ||
282 | + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, TDCOFF, 8, 6) | ||
283 | + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, DP_BRP, 0, 8) | ||
284 | +REG32(DATA_PHASE_BIT_TIMING_REGISTER, 0x8c) | ||
285 | + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_SJW, 16, 4) | ||
286 | + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_TS2, 8, 4) | ||
287 | + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_TS1, 0, 5) | ||
288 | +REG32(TX_BUFFER_READY_REQUEST_REGISTER, 0x90) | ||
289 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR31, 31, 1) | ||
290 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR30, 30, 1) | ||
291 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR29, 29, 1) | ||
292 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR28, 28, 1) | ||
293 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR27, 27, 1) | ||
294 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR26, 26, 1) | ||
295 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR25, 25, 1) | ||
296 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR24, 24, 1) | ||
297 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR23, 23, 1) | ||
298 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR22, 22, 1) | ||
299 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR21, 21, 1) | ||
300 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR20, 20, 1) | ||
301 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR19, 19, 1) | ||
302 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR18, 18, 1) | ||
303 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR17, 17, 1) | ||
304 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR16, 16, 1) | ||
305 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR15, 15, 1) | ||
306 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR14, 14, 1) | ||
307 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR13, 13, 1) | ||
308 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR12, 12, 1) | ||
309 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR11, 11, 1) | ||
310 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR10, 10, 1) | ||
311 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR9, 9, 1) | ||
312 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR8, 8, 1) | ||
313 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR7, 7, 1) | ||
314 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR6, 6, 1) | ||
315 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR5, 5, 1) | ||
316 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR4, 4, 1) | ||
317 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR3, 3, 1) | ||
318 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR2, 2, 1) | ||
319 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR1, 1, 1) | ||
320 | + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR0, 0, 1) | ||
321 | +REG32(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, 0x94) | ||
322 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS31, 31, 1) | ||
323 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS30, 30, 1) | ||
324 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS29, 29, 1) | ||
325 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS28, 28, 1) | ||
326 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS27, 27, 1) | ||
327 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS26, 26, 1) | ||
328 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS25, 25, 1) | ||
329 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS24, 24, 1) | ||
330 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS23, 23, 1) | ||
331 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS22, 22, 1) | ||
332 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS21, 21, 1) | ||
333 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS20, 20, 1) | ||
334 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS19, 19, 1) | ||
335 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS18, 18, 1) | ||
336 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS17, 17, 1) | ||
337 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS16, 16, 1) | ||
338 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS15, 15, 1) | ||
339 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS14, 14, 1) | ||
340 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS13, 13, 1) | ||
341 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS12, 12, 1) | ||
342 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS11, 11, 1) | ||
343 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS10, 10, 1) | ||
344 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS9, 9, 1) | ||
345 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS8, 8, 1) | ||
346 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS7, 7, 1) | ||
347 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS6, 6, 1) | ||
348 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS5, 5, 1) | ||
349 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS4, 4, 1) | ||
350 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS3, 3, 1) | ||
351 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS2, 2, 1) | ||
352 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS1, 1, 1) | ||
353 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS0, 0, 1) | ||
354 | +REG32(TX_BUFFER_CANCEL_REQUEST_REGISTER, 0x98) | ||
355 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR31, 31, 1) | ||
356 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR30, 30, 1) | ||
357 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR29, 29, 1) | ||
358 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR28, 28, 1) | ||
359 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR27, 27, 1) | ||
360 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR26, 26, 1) | ||
361 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR25, 25, 1) | ||
362 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR24, 24, 1) | ||
363 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR23, 23, 1) | ||
364 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR22, 22, 1) | ||
365 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR21, 21, 1) | ||
366 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR20, 20, 1) | ||
367 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR19, 19, 1) | ||
368 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR18, 18, 1) | ||
369 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR17, 17, 1) | ||
370 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR16, 16, 1) | ||
371 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR15, 15, 1) | ||
372 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR14, 14, 1) | ||
373 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR13, 13, 1) | ||
374 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR12, 12, 1) | ||
375 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR11, 11, 1) | ||
376 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR10, 10, 1) | ||
377 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR9, 9, 1) | ||
378 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR8, 8, 1) | ||
379 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR7, 7, 1) | ||
380 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR6, 6, 1) | ||
381 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR5, 5, 1) | ||
382 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR4, 4, 1) | ||
383 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR3, 3, 1) | ||
384 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR2, 2, 1) | ||
385 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR1, 1, 1) | ||
386 | + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR0, 0, 1) | ||
387 | +REG32(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, 0x9c) | ||
388 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS31, 31, | ||
389 | + 1) | ||
390 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS30, 30, | ||
391 | + 1) | ||
392 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS29, 29, | ||
393 | + 1) | ||
394 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS28, 28, | ||
395 | + 1) | ||
396 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS27, 27, | ||
397 | + 1) | ||
398 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS26, 26, | ||
399 | + 1) | ||
400 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS25, 25, | ||
401 | + 1) | ||
402 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS24, 24, | ||
403 | + 1) | ||
404 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS23, 23, | ||
405 | + 1) | ||
406 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS22, 22, | ||
407 | + 1) | ||
408 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS21, 21, | ||
409 | + 1) | ||
410 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS20, 20, | ||
411 | + 1) | ||
412 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS19, 19, | ||
413 | + 1) | ||
414 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS18, 18, | ||
415 | + 1) | ||
416 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS17, 17, | ||
417 | + 1) | ||
418 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS16, 16, | ||
419 | + 1) | ||
420 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS15, 15, | ||
421 | + 1) | ||
422 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS14, 14, | ||
423 | + 1) | ||
424 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS13, 13, | ||
425 | + 1) | ||
426 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS12, 12, | ||
427 | + 1) | ||
428 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS11, 11, | ||
429 | + 1) | ||
430 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS10, 10, | ||
431 | + 1) | ||
432 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS9, 9, 1) | ||
433 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS8, 8, 1) | ||
434 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS7, 7, 1) | ||
435 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS6, 6, 1) | ||
436 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS5, 5, 1) | ||
437 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS4, 4, 1) | ||
438 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS3, 3, 1) | ||
439 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS2, 2, 1) | ||
440 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS1, 1, 1) | ||
441 | + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS0, 0, 1) | ||
442 | +REG32(TX_EVENT_FIFO_STATUS_REGISTER, 0xa0) | ||
443 | + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, 8, 6) | ||
444 | + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_IRI, 7, 1) | ||
445 | + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, 0, 5) | ||
446 | +REG32(TX_EVENT_FIFO_WATERMARK_REGISTER, 0xa4) | ||
447 | + FIELD(TX_EVENT_FIFO_WATERMARK_REGISTER, TXE_FWM, 0, 5) | ||
448 | +REG32(ACCEPTANCE_FILTER_CONTROL_REGISTER, 0xe0) | ||
449 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF31, 31, 1) | ||
450 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF30, 30, 1) | ||
451 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF29, 29, 1) | ||
452 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF28, 28, 1) | ||
453 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF27, 27, 1) | ||
454 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF26, 26, 1) | ||
455 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF25, 25, 1) | ||
456 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF24, 24, 1) | ||
457 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF23, 23, 1) | ||
458 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF22, 22, 1) | ||
459 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF21, 21, 1) | ||
460 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF20, 20, 1) | ||
461 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF19, 19, 1) | ||
462 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF18, 18, 1) | ||
463 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF17, 17, 1) | ||
464 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF16, 16, 1) | ||
465 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF15, 15, 1) | ||
466 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF14, 14, 1) | ||
467 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF13, 13, 1) | ||
468 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF12, 12, 1) | ||
469 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF11, 11, 1) | ||
470 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF10, 10, 1) | ||
471 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF9, 9, 1) | ||
472 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF8, 8, 1) | ||
473 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF7, 7, 1) | ||
474 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF6, 6, 1) | ||
475 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF5, 5, 1) | ||
476 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF4, 4, 1) | ||
477 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF3, 3, 1) | ||
478 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF2, 2, 1) | ||
479 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF1, 1, 1) | ||
480 | + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF0, 0, 1) | ||
481 | +REG32(RX_FIFO_STATUS_REGISTER, 0xe8) | ||
482 | + FIELD(RX_FIFO_STATUS_REGISTER, FL_1, 24, 7) | ||
483 | + FIELD(RX_FIFO_STATUS_REGISTER, IRI_1, 23, 1) | ||
484 | + FIELD(RX_FIFO_STATUS_REGISTER, RI_1, 16, 6) | ||
485 | + FIELD(RX_FIFO_STATUS_REGISTER, FL, 8, 7) | ||
486 | + FIELD(RX_FIFO_STATUS_REGISTER, IRI, 7, 1) | ||
487 | + FIELD(RX_FIFO_STATUS_REGISTER, RI, 0, 6) | ||
488 | +REG32(RX_FIFO_WATERMARK_REGISTER, 0xec) | ||
489 | + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFP, 16, 5) | ||
490 | + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFWM_1, 8, 6) | ||
491 | + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFWM, 0, 6) | ||
492 | +REG32(TB_ID_REGISTER, 0x100) | ||
493 | + FIELD(TB_ID_REGISTER, ID, 21, 11) | ||
494 | + FIELD(TB_ID_REGISTER, SRR_RTR_RRS, 20, 1) | ||
495 | + FIELD(TB_ID_REGISTER, IDE, 19, 1) | ||
496 | + FIELD(TB_ID_REGISTER, ID_EXT, 1, 18) | ||
497 | + FIELD(TB_ID_REGISTER, RTR_RRS, 0, 1) | ||
498 | +REG32(TB0_DLC_REGISTER, 0x104) | ||
499 | + FIELD(TB0_DLC_REGISTER, DLC, 28, 4) | ||
500 | + FIELD(TB0_DLC_REGISTER, FDF, 27, 1) | ||
501 | + FIELD(TB0_DLC_REGISTER, BRS, 26, 1) | ||
502 | + FIELD(TB0_DLC_REGISTER, RSVD2, 25, 1) | ||
503 | + FIELD(TB0_DLC_REGISTER, EFC, 24, 1) | ||
504 | + FIELD(TB0_DLC_REGISTER, MM, 16, 8) | ||
505 | + FIELD(TB0_DLC_REGISTER, RSVD1, 0, 16) | ||
506 | +REG32(TB_DW0_REGISTER, 0x108) | ||
507 | + FIELD(TB_DW0_REGISTER, DATA_BYTES0, 24, 8) | ||
508 | + FIELD(TB_DW0_REGISTER, DATA_BYTES1, 16, 8) | ||
509 | + FIELD(TB_DW0_REGISTER, DATA_BYTES2, 8, 8) | ||
510 | + FIELD(TB_DW0_REGISTER, DATA_BYTES3, 0, 8) | ||
511 | +REG32(TB_DW1_REGISTER, 0x10c) | ||
512 | + FIELD(TB_DW1_REGISTER, DATA_BYTES4, 24, 8) | ||
513 | + FIELD(TB_DW1_REGISTER, DATA_BYTES5, 16, 8) | ||
514 | + FIELD(TB_DW1_REGISTER, DATA_BYTES6, 8, 8) | ||
515 | + FIELD(TB_DW1_REGISTER, DATA_BYTES7, 0, 8) | ||
516 | +REG32(TB_DW2_REGISTER, 0x110) | ||
517 | + FIELD(TB_DW2_REGISTER, DATA_BYTES8, 24, 8) | ||
518 | + FIELD(TB_DW2_REGISTER, DATA_BYTES9, 16, 8) | ||
519 | + FIELD(TB_DW2_REGISTER, DATA_BYTES10, 8, 8) | ||
520 | + FIELD(TB_DW2_REGISTER, DATA_BYTES11, 0, 8) | ||
521 | +REG32(TB_DW3_REGISTER, 0x114) | ||
522 | + FIELD(TB_DW3_REGISTER, DATA_BYTES12, 24, 8) | ||
523 | + FIELD(TB_DW3_REGISTER, DATA_BYTES13, 16, 8) | ||
524 | + FIELD(TB_DW3_REGISTER, DATA_BYTES14, 8, 8) | ||
525 | + FIELD(TB_DW3_REGISTER, DATA_BYTES15, 0, 8) | ||
526 | +REG32(TB_DW4_REGISTER, 0x118) | ||
527 | + FIELD(TB_DW4_REGISTER, DATA_BYTES16, 24, 8) | ||
528 | + FIELD(TB_DW4_REGISTER, DATA_BYTES17, 16, 8) | ||
529 | + FIELD(TB_DW4_REGISTER, DATA_BYTES18, 8, 8) | ||
530 | + FIELD(TB_DW4_REGISTER, DATA_BYTES19, 0, 8) | ||
531 | +REG32(TB_DW5_REGISTER, 0x11c) | ||
532 | + FIELD(TB_DW5_REGISTER, DATA_BYTES20, 24, 8) | ||
533 | + FIELD(TB_DW5_REGISTER, DATA_BYTES21, 16, 8) | ||
534 | + FIELD(TB_DW5_REGISTER, DATA_BYTES22, 8, 8) | ||
535 | + FIELD(TB_DW5_REGISTER, DATA_BYTES23, 0, 8) | ||
536 | +REG32(TB_DW6_REGISTER, 0x120) | ||
537 | + FIELD(TB_DW6_REGISTER, DATA_BYTES24, 24, 8) | ||
538 | + FIELD(TB_DW6_REGISTER, DATA_BYTES25, 16, 8) | ||
539 | + FIELD(TB_DW6_REGISTER, DATA_BYTES26, 8, 8) | ||
540 | + FIELD(TB_DW6_REGISTER, DATA_BYTES27, 0, 8) | ||
541 | +REG32(TB_DW7_REGISTER, 0x124) | ||
542 | + FIELD(TB_DW7_REGISTER, DATA_BYTES28, 24, 8) | ||
543 | + FIELD(TB_DW7_REGISTER, DATA_BYTES29, 16, 8) | ||
544 | + FIELD(TB_DW7_REGISTER, DATA_BYTES30, 8, 8) | ||
545 | + FIELD(TB_DW7_REGISTER, DATA_BYTES31, 0, 8) | ||
546 | +REG32(TB_DW8_REGISTER, 0x128) | ||
547 | + FIELD(TB_DW8_REGISTER, DATA_BYTES32, 24, 8) | ||
548 | + FIELD(TB_DW8_REGISTER, DATA_BYTES33, 16, 8) | ||
549 | + FIELD(TB_DW8_REGISTER, DATA_BYTES34, 8, 8) | ||
550 | + FIELD(TB_DW8_REGISTER, DATA_BYTES35, 0, 8) | ||
551 | +REG32(TB_DW9_REGISTER, 0x12c) | ||
552 | + FIELD(TB_DW9_REGISTER, DATA_BYTES36, 24, 8) | ||
553 | + FIELD(TB_DW9_REGISTER, DATA_BYTES37, 16, 8) | ||
554 | + FIELD(TB_DW9_REGISTER, DATA_BYTES38, 8, 8) | ||
555 | + FIELD(TB_DW9_REGISTER, DATA_BYTES39, 0, 8) | ||
556 | +REG32(TB_DW10_REGISTER, 0x130) | ||
557 | + FIELD(TB_DW10_REGISTER, DATA_BYTES40, 24, 8) | ||
558 | + FIELD(TB_DW10_REGISTER, DATA_BYTES41, 16, 8) | ||
559 | + FIELD(TB_DW10_REGISTER, DATA_BYTES42, 8, 8) | ||
560 | + FIELD(TB_DW10_REGISTER, DATA_BYTES43, 0, 8) | ||
561 | +REG32(TB_DW11_REGISTER, 0x134) | ||
562 | + FIELD(TB_DW11_REGISTER, DATA_BYTES44, 24, 8) | ||
563 | + FIELD(TB_DW11_REGISTER, DATA_BYTES45, 16, 8) | ||
564 | + FIELD(TB_DW11_REGISTER, DATA_BYTES46, 8, 8) | ||
565 | + FIELD(TB_DW11_REGISTER, DATA_BYTES47, 0, 8) | ||
566 | +REG32(TB_DW12_REGISTER, 0x138) | ||
567 | + FIELD(TB_DW12_REGISTER, DATA_BYTES48, 24, 8) | ||
568 | + FIELD(TB_DW12_REGISTER, DATA_BYTES49, 16, 8) | ||
569 | + FIELD(TB_DW12_REGISTER, DATA_BYTES50, 8, 8) | ||
570 | + FIELD(TB_DW12_REGISTER, DATA_BYTES51, 0, 8) | ||
571 | +REG32(TB_DW13_REGISTER, 0x13c) | ||
572 | + FIELD(TB_DW13_REGISTER, DATA_BYTES52, 24, 8) | ||
573 | + FIELD(TB_DW13_REGISTER, DATA_BYTES53, 16, 8) | ||
574 | + FIELD(TB_DW13_REGISTER, DATA_BYTES54, 8, 8) | ||
575 | + FIELD(TB_DW13_REGISTER, DATA_BYTES55, 0, 8) | ||
576 | +REG32(TB_DW14_REGISTER, 0x140) | ||
577 | + FIELD(TB_DW14_REGISTER, DATA_BYTES56, 24, 8) | ||
578 | + FIELD(TB_DW14_REGISTER, DATA_BYTES57, 16, 8) | ||
579 | + FIELD(TB_DW14_REGISTER, DATA_BYTES58, 8, 8) | ||
580 | + FIELD(TB_DW14_REGISTER, DATA_BYTES59, 0, 8) | ||
581 | +REG32(TB_DW15_REGISTER, 0x144) | ||
582 | + FIELD(TB_DW15_REGISTER, DATA_BYTES60, 24, 8) | ||
583 | + FIELD(TB_DW15_REGISTER, DATA_BYTES61, 16, 8) | ||
584 | + FIELD(TB_DW15_REGISTER, DATA_BYTES62, 8, 8) | ||
585 | + FIELD(TB_DW15_REGISTER, DATA_BYTES63, 0, 8) | ||
586 | +REG32(AFMR_REGISTER, 0xa00) | ||
587 | + FIELD(AFMR_REGISTER, AMID, 21, 11) | ||
588 | + FIELD(AFMR_REGISTER, AMSRR, 20, 1) | ||
589 | + FIELD(AFMR_REGISTER, AMIDE, 19, 1) | ||
590 | + FIELD(AFMR_REGISTER, AMID_EXT, 1, 18) | ||
591 | + FIELD(AFMR_REGISTER, AMRTR, 0, 1) | ||
592 | +REG32(AFIR_REGISTER, 0xa04) | ||
593 | + FIELD(AFIR_REGISTER, AIID, 21, 11) | ||
594 | + FIELD(AFIR_REGISTER, AISRR, 20, 1) | ||
595 | + FIELD(AFIR_REGISTER, AIIDE, 19, 1) | ||
596 | + FIELD(AFIR_REGISTER, AIID_EXT, 1, 18) | ||
597 | + FIELD(AFIR_REGISTER, AIRTR, 0, 1) | ||
598 | +REG32(TXE_FIFO_TB_ID_REGISTER, 0x2000) | ||
599 | + FIELD(TXE_FIFO_TB_ID_REGISTER, ID, 21, 11) | ||
600 | + FIELD(TXE_FIFO_TB_ID_REGISTER, SRR_RTR_RRS, 20, 1) | ||
601 | + FIELD(TXE_FIFO_TB_ID_REGISTER, IDE, 19, 1) | ||
602 | + FIELD(TXE_FIFO_TB_ID_REGISTER, ID_EXT, 1, 18) | ||
603 | + FIELD(TXE_FIFO_TB_ID_REGISTER, RTR_RRS, 0, 1) | ||
604 | +REG32(TXE_FIFO_TB_DLC_REGISTER, 0x2004) | ||
605 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, DLC, 28, 4) | ||
606 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, FDF, 27, 1) | ||
607 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, BRS, 26, 1) | ||
608 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, ET, 24, 2) | ||
609 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, MM, 16, 8) | ||
610 | + FIELD(TXE_FIFO_TB_DLC_REGISTER, TIMESTAMP, 0, 16) | ||
611 | +REG32(RB_ID_REGISTER, 0x2100) | ||
612 | + FIELD(RB_ID_REGISTER, ID, 21, 11) | ||
613 | + FIELD(RB_ID_REGISTER, SRR_RTR_RRS, 20, 1) | ||
614 | + FIELD(RB_ID_REGISTER, IDE, 19, 1) | ||
615 | + FIELD(RB_ID_REGISTER, ID_EXT, 1, 18) | ||
616 | + FIELD(RB_ID_REGISTER, RTR_RRS, 0, 1) | ||
617 | +REG32(RB_DLC_REGISTER, 0x2104) | ||
618 | + FIELD(RB_DLC_REGISTER, DLC, 28, 4) | ||
619 | + FIELD(RB_DLC_REGISTER, FDF, 27, 1) | ||
620 | + FIELD(RB_DLC_REGISTER, BRS, 26, 1) | ||
621 | + FIELD(RB_DLC_REGISTER, ESI, 25, 1) | ||
622 | + FIELD(RB_DLC_REGISTER, MATCHED_FILTER_INDEX, 16, 5) | ||
623 | + FIELD(RB_DLC_REGISTER, TIMESTAMP, 0, 16) | ||
624 | +REG32(RB_DW0_REGISTER, 0x2108) | ||
625 | + FIELD(RB_DW0_REGISTER, DATA_BYTES0, 24, 8) | ||
626 | + FIELD(RB_DW0_REGISTER, DATA_BYTES1, 16, 8) | ||
627 | + FIELD(RB_DW0_REGISTER, DATA_BYTES2, 8, 8) | ||
628 | + FIELD(RB_DW0_REGISTER, DATA_BYTES3, 0, 8) | ||
629 | +REG32(RB_DW1_REGISTER, 0x210c) | ||
630 | + FIELD(RB_DW1_REGISTER, DATA_BYTES4, 24, 8) | ||
631 | + FIELD(RB_DW1_REGISTER, DATA_BYTES5, 16, 8) | ||
632 | + FIELD(RB_DW1_REGISTER, DATA_BYTES6, 8, 8) | ||
633 | + FIELD(RB_DW1_REGISTER, DATA_BYTES7, 0, 8) | ||
634 | +REG32(RB_DW2_REGISTER, 0x2110) | ||
635 | + FIELD(RB_DW2_REGISTER, DATA_BYTES8, 24, 8) | ||
636 | + FIELD(RB_DW2_REGISTER, DATA_BYTES9, 16, 8) | ||
637 | + FIELD(RB_DW2_REGISTER, DATA_BYTES10, 8, 8) | ||
638 | + FIELD(RB_DW2_REGISTER, DATA_BYTES11, 0, 8) | ||
639 | +REG32(RB_DW3_REGISTER, 0x2114) | ||
640 | + FIELD(RB_DW3_REGISTER, DATA_BYTES12, 24, 8) | ||
641 | + FIELD(RB_DW3_REGISTER, DATA_BYTES13, 16, 8) | ||
642 | + FIELD(RB_DW3_REGISTER, DATA_BYTES14, 8, 8) | ||
643 | + FIELD(RB_DW3_REGISTER, DATA_BYTES15, 0, 8) | ||
644 | +REG32(RB_DW4_REGISTER, 0x2118) | ||
645 | + FIELD(RB_DW4_REGISTER, DATA_BYTES16, 24, 8) | ||
646 | + FIELD(RB_DW4_REGISTER, DATA_BYTES17, 16, 8) | ||
647 | + FIELD(RB_DW4_REGISTER, DATA_BYTES18, 8, 8) | ||
648 | + FIELD(RB_DW4_REGISTER, DATA_BYTES19, 0, 8) | ||
649 | +REG32(RB_DW5_REGISTER, 0x211c) | ||
650 | + FIELD(RB_DW5_REGISTER, DATA_BYTES20, 24, 8) | ||
651 | + FIELD(RB_DW5_REGISTER, DATA_BYTES21, 16, 8) | ||
652 | + FIELD(RB_DW5_REGISTER, DATA_BYTES22, 8, 8) | ||
653 | + FIELD(RB_DW5_REGISTER, DATA_BYTES23, 0, 8) | ||
654 | +REG32(RB_DW6_REGISTER, 0x2120) | ||
655 | + FIELD(RB_DW6_REGISTER, DATA_BYTES24, 24, 8) | ||
656 | + FIELD(RB_DW6_REGISTER, DATA_BYTES25, 16, 8) | ||
657 | + FIELD(RB_DW6_REGISTER, DATA_BYTES26, 8, 8) | ||
658 | + FIELD(RB_DW6_REGISTER, DATA_BYTES27, 0, 8) | ||
659 | +REG32(RB_DW7_REGISTER, 0x2124) | ||
660 | + FIELD(RB_DW7_REGISTER, DATA_BYTES28, 24, 8) | ||
661 | + FIELD(RB_DW7_REGISTER, DATA_BYTES29, 16, 8) | ||
662 | + FIELD(RB_DW7_REGISTER, DATA_BYTES30, 8, 8) | ||
663 | + FIELD(RB_DW7_REGISTER, DATA_BYTES31, 0, 8) | ||
664 | +REG32(RB_DW8_REGISTER, 0x2128) | ||
665 | + FIELD(RB_DW8_REGISTER, DATA_BYTES32, 24, 8) | ||
666 | + FIELD(RB_DW8_REGISTER, DATA_BYTES33, 16, 8) | ||
667 | + FIELD(RB_DW8_REGISTER, DATA_BYTES34, 8, 8) | ||
668 | + FIELD(RB_DW8_REGISTER, DATA_BYTES35, 0, 8) | ||
669 | +REG32(RB_DW9_REGISTER, 0x212c) | ||
670 | + FIELD(RB_DW9_REGISTER, DATA_BYTES36, 24, 8) | ||
671 | + FIELD(RB_DW9_REGISTER, DATA_BYTES37, 16, 8) | ||
672 | + FIELD(RB_DW9_REGISTER, DATA_BYTES38, 8, 8) | ||
673 | + FIELD(RB_DW9_REGISTER, DATA_BYTES39, 0, 8) | ||
674 | +REG32(RB_DW10_REGISTER, 0x2130) | ||
675 | + FIELD(RB_DW10_REGISTER, DATA_BYTES40, 24, 8) | ||
676 | + FIELD(RB_DW10_REGISTER, DATA_BYTES41, 16, 8) | ||
677 | + FIELD(RB_DW10_REGISTER, DATA_BYTES42, 8, 8) | ||
678 | + FIELD(RB_DW10_REGISTER, DATA_BYTES43, 0, 8) | ||
679 | +REG32(RB_DW11_REGISTER, 0x2134) | ||
680 | + FIELD(RB_DW11_REGISTER, DATA_BYTES44, 24, 8) | ||
681 | + FIELD(RB_DW11_REGISTER, DATA_BYTES45, 16, 8) | ||
682 | + FIELD(RB_DW11_REGISTER, DATA_BYTES46, 8, 8) | ||
683 | + FIELD(RB_DW11_REGISTER, DATA_BYTES47, 0, 8) | ||
684 | +REG32(RB_DW12_REGISTER, 0x2138) | ||
685 | + FIELD(RB_DW12_REGISTER, DATA_BYTES48, 24, 8) | ||
686 | + FIELD(RB_DW12_REGISTER, DATA_BYTES49, 16, 8) | ||
687 | + FIELD(RB_DW12_REGISTER, DATA_BYTES50, 8, 8) | ||
688 | + FIELD(RB_DW12_REGISTER, DATA_BYTES51, 0, 8) | ||
689 | +REG32(RB_DW13_REGISTER, 0x213c) | ||
690 | + FIELD(RB_DW13_REGISTER, DATA_BYTES52, 24, 8) | ||
691 | + FIELD(RB_DW13_REGISTER, DATA_BYTES53, 16, 8) | ||
692 | + FIELD(RB_DW13_REGISTER, DATA_BYTES54, 8, 8) | ||
693 | + FIELD(RB_DW13_REGISTER, DATA_BYTES55, 0, 8) | ||
694 | +REG32(RB_DW14_REGISTER, 0x2140) | ||
695 | + FIELD(RB_DW14_REGISTER, DATA_BYTES56, 24, 8) | ||
696 | + FIELD(RB_DW14_REGISTER, DATA_BYTES57, 16, 8) | ||
697 | + FIELD(RB_DW14_REGISTER, DATA_BYTES58, 8, 8) | ||
698 | + FIELD(RB_DW14_REGISTER, DATA_BYTES59, 0, 8) | ||
699 | +REG32(RB_DW15_REGISTER, 0x2144) | ||
700 | + FIELD(RB_DW15_REGISTER, DATA_BYTES60, 24, 8) | ||
701 | + FIELD(RB_DW15_REGISTER, DATA_BYTES61, 16, 8) | ||
702 | + FIELD(RB_DW15_REGISTER, DATA_BYTES62, 8, 8) | ||
703 | + FIELD(RB_DW15_REGISTER, DATA_BYTES63, 0, 8) | ||
704 | +REG32(RB_ID_REGISTER_1, 0x4100) | ||
705 | + FIELD(RB_ID_REGISTER_1, ID, 21, 11) | ||
706 | + FIELD(RB_ID_REGISTER_1, SRR_RTR_RRS, 20, 1) | ||
707 | + FIELD(RB_ID_REGISTER_1, IDE, 19, 1) | ||
708 | + FIELD(RB_ID_REGISTER_1, ID_EXT, 1, 18) | ||
709 | + FIELD(RB_ID_REGISTER_1, RTR_RRS, 0, 1) | ||
710 | +REG32(RB_DLC_REGISTER_1, 0x4104) | ||
711 | + FIELD(RB_DLC_REGISTER_1, DLC, 28, 4) | ||
712 | + FIELD(RB_DLC_REGISTER_1, FDF, 27, 1) | ||
713 | + FIELD(RB_DLC_REGISTER_1, BRS, 26, 1) | ||
714 | + FIELD(RB_DLC_REGISTER_1, ESI, 25, 1) | ||
715 | + FIELD(RB_DLC_REGISTER_1, MATCHED_FILTER_INDEX, 16, 5) | ||
716 | + FIELD(RB_DLC_REGISTER_1, TIMESTAMP, 0, 16) | ||
717 | +REG32(RB0_DW0_REGISTER_1, 0x4108) | ||
718 | + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES0, 24, 8) | ||
719 | + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES1, 16, 8) | ||
720 | + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES2, 8, 8) | ||
721 | + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES3, 0, 8) | ||
722 | +REG32(RB_DW1_REGISTER_1, 0x410c) | ||
723 | + FIELD(RB_DW1_REGISTER_1, DATA_BYTES4, 24, 8) | ||
724 | + FIELD(RB_DW1_REGISTER_1, DATA_BYTES5, 16, 8) | ||
725 | + FIELD(RB_DW1_REGISTER_1, DATA_BYTES6, 8, 8) | ||
726 | + FIELD(RB_DW1_REGISTER_1, DATA_BYTES7, 0, 8) | ||
727 | +REG32(RB_DW2_REGISTER_1, 0x4110) | ||
728 | + FIELD(RB_DW2_REGISTER_1, DATA_BYTES8, 24, 8) | ||
729 | + FIELD(RB_DW2_REGISTER_1, DATA_BYTES9, 16, 8) | ||
730 | + FIELD(RB_DW2_REGISTER_1, DATA_BYTES10, 8, 8) | ||
731 | + FIELD(RB_DW2_REGISTER_1, DATA_BYTES11, 0, 8) | ||
732 | +REG32(RB_DW3_REGISTER_1, 0x4114) | ||
733 | + FIELD(RB_DW3_REGISTER_1, DATA_BYTES12, 24, 8) | ||
734 | + FIELD(RB_DW3_REGISTER_1, DATA_BYTES13, 16, 8) | ||
735 | + FIELD(RB_DW3_REGISTER_1, DATA_BYTES14, 8, 8) | ||
736 | + FIELD(RB_DW3_REGISTER_1, DATA_BYTES15, 0, 8) | ||
737 | +REG32(RB_DW4_REGISTER_1, 0x4118) | ||
738 | + FIELD(RB_DW4_REGISTER_1, DATA_BYTES16, 24, 8) | ||
739 | + FIELD(RB_DW4_REGISTER_1, DATA_BYTES17, 16, 8) | ||
740 | + FIELD(RB_DW4_REGISTER_1, DATA_BYTES18, 8, 8) | ||
741 | + FIELD(RB_DW4_REGISTER_1, DATA_BYTES19, 0, 8) | ||
742 | +REG32(RB_DW5_REGISTER_1, 0x411c) | ||
743 | + FIELD(RB_DW5_REGISTER_1, DATA_BYTES20, 24, 8) | ||
744 | + FIELD(RB_DW5_REGISTER_1, DATA_BYTES21, 16, 8) | ||
745 | + FIELD(RB_DW5_REGISTER_1, DATA_BYTES22, 8, 8) | ||
746 | + FIELD(RB_DW5_REGISTER_1, DATA_BYTES23, 0, 8) | ||
747 | +REG32(RB_DW6_REGISTER_1, 0x4120) | ||
748 | + FIELD(RB_DW6_REGISTER_1, DATA_BYTES24, 24, 8) | ||
749 | + FIELD(RB_DW6_REGISTER_1, DATA_BYTES25, 16, 8) | ||
750 | + FIELD(RB_DW6_REGISTER_1, DATA_BYTES26, 8, 8) | ||
751 | + FIELD(RB_DW6_REGISTER_1, DATA_BYTES27, 0, 8) | ||
752 | +REG32(RB_DW7_REGISTER_1, 0x4124) | ||
753 | + FIELD(RB_DW7_REGISTER_1, DATA_BYTES28, 24, 8) | ||
754 | + FIELD(RB_DW7_REGISTER_1, DATA_BYTES29, 16, 8) | ||
755 | + FIELD(RB_DW7_REGISTER_1, DATA_BYTES30, 8, 8) | ||
756 | + FIELD(RB_DW7_REGISTER_1, DATA_BYTES31, 0, 8) | ||
757 | +REG32(RB_DW8_REGISTER_1, 0x4128) | ||
758 | + FIELD(RB_DW8_REGISTER_1, DATA_BYTES32, 24, 8) | ||
759 | + FIELD(RB_DW8_REGISTER_1, DATA_BYTES33, 16, 8) | ||
760 | + FIELD(RB_DW8_REGISTER_1, DATA_BYTES34, 8, 8) | ||
761 | + FIELD(RB_DW8_REGISTER_1, DATA_BYTES35, 0, 8) | ||
762 | +REG32(RB_DW9_REGISTER_1, 0x412c) | ||
763 | + FIELD(RB_DW9_REGISTER_1, DATA_BYTES36, 24, 8) | ||
764 | + FIELD(RB_DW9_REGISTER_1, DATA_BYTES37, 16, 8) | ||
765 | + FIELD(RB_DW9_REGISTER_1, DATA_BYTES38, 8, 8) | ||
766 | + FIELD(RB_DW9_REGISTER_1, DATA_BYTES39, 0, 8) | ||
767 | +REG32(RB_DW10_REGISTER_1, 0x4130) | ||
768 | + FIELD(RB_DW10_REGISTER_1, DATA_BYTES40, 24, 8) | ||
769 | + FIELD(RB_DW10_REGISTER_1, DATA_BYTES41, 16, 8) | ||
770 | + FIELD(RB_DW10_REGISTER_1, DATA_BYTES42, 8, 8) | ||
771 | + FIELD(RB_DW10_REGISTER_1, DATA_BYTES43, 0, 8) | ||
772 | +REG32(RB_DW11_REGISTER_1, 0x4134) | ||
773 | + FIELD(RB_DW11_REGISTER_1, DATA_BYTES44, 24, 8) | ||
774 | + FIELD(RB_DW11_REGISTER_1, DATA_BYTES45, 16, 8) | ||
775 | + FIELD(RB_DW11_REGISTER_1, DATA_BYTES46, 8, 8) | ||
776 | + FIELD(RB_DW11_REGISTER_1, DATA_BYTES47, 0, 8) | ||
777 | +REG32(RB_DW12_REGISTER_1, 0x4138) | ||
778 | + FIELD(RB_DW12_REGISTER_1, DATA_BYTES48, 24, 8) | ||
779 | + FIELD(RB_DW12_REGISTER_1, DATA_BYTES49, 16, 8) | ||
780 | + FIELD(RB_DW12_REGISTER_1, DATA_BYTES50, 8, 8) | ||
781 | + FIELD(RB_DW12_REGISTER_1, DATA_BYTES51, 0, 8) | ||
782 | +REG32(RB_DW13_REGISTER_1, 0x413c) | ||
783 | + FIELD(RB_DW13_REGISTER_1, DATA_BYTES52, 24, 8) | ||
784 | + FIELD(RB_DW13_REGISTER_1, DATA_BYTES53, 16, 8) | ||
785 | + FIELD(RB_DW13_REGISTER_1, DATA_BYTES54, 8, 8) | ||
786 | + FIELD(RB_DW13_REGISTER_1, DATA_BYTES55, 0, 8) | ||
787 | +REG32(RB_DW14_REGISTER_1, 0x4140) | ||
788 | + FIELD(RB_DW14_REGISTER_1, DATA_BYTES56, 24, 8) | ||
789 | + FIELD(RB_DW14_REGISTER_1, DATA_BYTES57, 16, 8) | ||
790 | + FIELD(RB_DW14_REGISTER_1, DATA_BYTES58, 8, 8) | ||
791 | + FIELD(RB_DW14_REGISTER_1, DATA_BYTES59, 0, 8) | ||
792 | +REG32(RB_DW15_REGISTER_1, 0x4144) | ||
793 | + FIELD(RB_DW15_REGISTER_1, DATA_BYTES60, 24, 8) | ||
794 | + FIELD(RB_DW15_REGISTER_1, DATA_BYTES61, 16, 8) | ||
795 | + FIELD(RB_DW15_REGISTER_1, DATA_BYTES62, 8, 8) | ||
796 | + FIELD(RB_DW15_REGISTER_1, DATA_BYTES63, 0, 8) | ||
797 | + | ||
798 | +static uint8_t canfd_dlc_array[8] = {8, 12, 16, 20, 24, 32, 48, 64}; | ||
799 | + | ||
800 | +static void canfd_update_irq(XlnxVersalCANFDState *s) | ||
801 | +{ | ||
802 | + unsigned int irq = s->regs[R_INTERRUPT_STATUS_REGISTER] & | ||
803 | + s->regs[R_INTERRUPT_ENABLE_REGISTER]; | ||
804 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
805 | + | ||
806 | + /* RX watermark interrupts. */ | ||
807 | + if (ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL) > | ||
808 | + ARRAY_FIELD_EX32(s->regs, RX_FIFO_WATERMARK_REGISTER, RXFWM)) { | ||
809 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL, 1); | ||
810 | + } | ||
811 | + | ||
812 | + if (ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1) > | ||
813 | + ARRAY_FIELD_EX32(s->regs, RX_FIFO_WATERMARK_REGISTER, RXFWM_1)) { | ||
814 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL_1, 1); | ||
815 | + } | ||
816 | + | ||
817 | + /* TX watermark interrupt. */ | ||
818 | + if (ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL) > | ||
819 | + ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_WATERMARK_REGISTER, TXE_FWM)) { | ||
820 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEWMFLL, 1); | ||
821 | + } | ||
822 | + | ||
823 | + trace_xlnx_canfd_update_irq(path, s->regs[R_INTERRUPT_STATUS_REGISTER], | ||
824 | + s->regs[R_INTERRUPT_ENABLE_REGISTER], irq); | ||
825 | + | ||
826 | + qemu_set_irq(s->irq_canfd_int, irq); | ||
827 | +} | ||
828 | + | ||
829 | +static void canfd_ier_post_write(RegisterInfo *reg, uint64_t val64) | ||
830 | +{ | ||
831 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
832 | + | ||
833 | + canfd_update_irq(s); | ||
834 | +} | ||
835 | + | ||
836 | +static uint64_t canfd_icr_pre_write(RegisterInfo *reg, uint64_t val64) | ||
837 | +{ | ||
838 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
839 | + uint32_t val = val64; | ||
840 | + | ||
841 | + s->regs[R_INTERRUPT_STATUS_REGISTER] &= ~val; | ||
842 | + | ||
843 | + /* | ||
844 | + * RXBOFLW_BI field is automatically cleared to default if RXBOFLW bit is | ||
845 | + * cleared in ISR. | ||
846 | + */ | ||
847 | + if (ARRAY_FIELD_EX32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL_1)) { | ||
848 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXBOFLW_BI, 0); | ||
849 | + } | ||
850 | + | ||
851 | + canfd_update_irq(s); | ||
852 | + | ||
853 | + return 0; | ||
854 | +} | ||
855 | + | ||
856 | +static void canfd_config_reset(XlnxVersalCANFDState *s) | ||
857 | +{ | ||
858 | + | ||
859 | + unsigned int i; | ||
860 | + | ||
861 | + /* Reset all the configuration registers. */ | ||
862 | + for (i = 0; i < R_RX_FIFO_WATERMARK_REGISTER; ++i) { | ||
863 | + register_reset(&s->reg_info[i]); | ||
864 | + } | ||
865 | + | ||
866 | + canfd_update_irq(s); | ||
867 | +} | ||
868 | + | ||
869 | +static void canfd_config_mode(XlnxVersalCANFDState *s) | ||
870 | +{ | ||
871 | + register_reset(&s->reg_info[R_ERROR_COUNTER_REGISTER]); | ||
872 | + register_reset(&s->reg_info[R_ERROR_STATUS_REGISTER]); | ||
873 | + register_reset(&s->reg_info[R_STATUS_REGISTER]); | ||
874 | + | ||
875 | + /* Put XlnxVersalCANFDState in configuration mode. */ | ||
876 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 1); | ||
877 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, 0); | ||
878 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, 0); | ||
879 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, BSOFF, 0); | ||
880 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ERROR_BIT, 0); | ||
881 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW, 0); | ||
882 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 0); | ||
883 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 0); | ||
884 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 0); | ||
885 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ARBLST, 0); | ||
886 | + | ||
887 | + /* Clear the time stamp. */ | ||
888 | + ptimer_transaction_begin(s->canfd_timer); | ||
889 | + ptimer_set_count(s->canfd_timer, 0); | ||
890 | + ptimer_transaction_commit(s->canfd_timer); | ||
891 | + | ||
892 | + canfd_update_irq(s); | ||
893 | +} | ||
894 | + | ||
895 | +static void update_status_register_mode_bits(XlnxVersalCANFDState *s) | ||
896 | +{ | ||
897 | + bool sleep_status = ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP); | ||
898 | + bool sleep_mode = ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP); | ||
899 | + /* Wake up interrupt bit. */ | ||
900 | + bool wakeup_irq_val = !sleep_mode && sleep_status; | ||
901 | + /* Sleep interrupt bit. */ | ||
902 | + bool sleep_irq_val = sleep_mode && !sleep_status; | ||
903 | + | ||
904 | + /* Clear previous core mode status bits. */ | ||
905 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 0); | ||
906 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 0); | ||
907 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 0); | ||
908 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 0); | ||
909 | + | ||
910 | + /* set current mode bit and generate irqs accordingly. */ | ||
911 | + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, LBACK)) { | ||
912 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 1); | ||
913 | + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP)) { | ||
914 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 1); | ||
915 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, | ||
916 | + sleep_irq_val); | ||
917 | + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { | ||
918 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 1); | ||
919 | + } else { | ||
920 | + /* If all bits are zero, XlnxVersalCANFDState is set in normal mode. */ | ||
921 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 1); | ||
922 | + /* Set wakeup interrupt bit. */ | ||
923 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, | ||
924 | + wakeup_irq_val); | ||
925 | + } | ||
926 | + | ||
927 | + /* Put the CANFD in error active state. */ | ||
928 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ESTAT, 1); | ||
929 | + | ||
930 | + canfd_update_irq(s); | ||
931 | +} | ||
932 | + | ||
933 | +static uint64_t canfd_msr_pre_write(RegisterInfo *reg, uint64_t val64) | ||
934 | +{ | ||
935 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
936 | + uint32_t val = val64; | ||
937 | + uint8_t multi_mode = 0; | ||
938 | + | ||
939 | + /* | ||
940 | + * Multiple mode set check. This is done to make sure user doesn't set | ||
941 | + * multiple modes. | ||
942 | + */ | ||
943 | + multi_mode = FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK) + | ||
944 | + FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP) + | ||
945 | + FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP); | ||
946 | + | ||
947 | + if (multi_mode > 1) { | ||
948 | + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to configure several modes" | ||
949 | + " simultaneously. One mode will be selected according to" | ||
950 | + " their priority: LBACK > SLEEP > SNOOP.\n"); | ||
951 | + } | ||
952 | + | ||
953 | + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { | ||
954 | + /* In configuration mode, any mode can be selected. */ | ||
955 | + s->regs[R_MODE_SELECT_REGISTER] = val; | ||
956 | + } else { | ||
957 | + bool sleep_mode_bit = FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP); | ||
958 | + | ||
959 | + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, sleep_mode_bit); | ||
960 | + | ||
961 | + if (FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK)) { | ||
962 | + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to set LBACK mode" | ||
963 | + " without setting CEN bit as 0\n"); | ||
964 | + } else if (FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP)) { | ||
965 | + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to set SNOOP mode" | ||
966 | + " without setting CEN bit as 0\n"); | ||
967 | + } | ||
968 | + | ||
969 | + update_status_register_mode_bits(s); | ||
970 | + } | ||
971 | + | ||
972 | + return s->regs[R_MODE_SELECT_REGISTER]; | ||
973 | +} | ||
974 | + | ||
975 | +static void canfd_exit_sleep_mode(XlnxVersalCANFDState *s) | ||
976 | +{ | ||
977 | + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, 0); | ||
978 | + update_status_register_mode_bits(s); | ||
979 | +} | ||
980 | + | ||
981 | +static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame, | ||
982 | + uint32_t reg_num) | ||
983 | +{ | ||
984 | + uint32_t i = 0; | ||
985 | + uint32_t j = 0; | ||
986 | + uint32_t val = 0; | ||
987 | + uint32_t dlc_reg_val = 0; | ||
988 | + uint32_t dlc_value = 0; | ||
989 | + | ||
990 | + /* Check that reg_num should be within TX register space. */ | ||
991 | + assert(reg_num <= R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE * | ||
992 | + s->cfg.tx_fifo)); | ||
993 | + | ||
994 | + dlc_reg_val = s->regs[reg_num + 1]; | ||
995 | + dlc_value = FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, DLC); | ||
996 | + | ||
997 | + frame->can_id = s->regs[reg_num]; | ||
998 | + | ||
999 | + if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, FDF)) { | ||
1000 | + /* | ||
1001 | + * CANFD frame. | ||
1002 | + * Converting dlc(0 to 15) 4 Byte data to plain length(i.e. 0 to 64) | ||
1003 | + * 1 Byte data. This is done to make it work with SocketCAN. | ||
1004 | + * On actual CANFD frame, this value can't be more than 0xF. | ||
1005 | + * Conversion table for DLC to plain length: | ||
1006 | + * | ||
1007 | + * DLC Plain Length | ||
1008 | + * 0 - 8 0 - 8 | ||
1009 | + * 9 9 - 12 | ||
1010 | + * 10 13 - 16 | ||
1011 | + * 11 17 - 20 | ||
1012 | + * 12 21 - 24 | ||
1013 | + * 13 25 - 32 | ||
1014 | + * 14 33 - 48 | ||
1015 | + * 15 49 - 64 | ||
1016 | + */ | ||
1017 | + | ||
1018 | + frame->flags = QEMU_CAN_FRMF_TYPE_FD; | ||
1019 | + | ||
1020 | + if (dlc_value < 8) { | ||
1021 | + frame->can_dlc = dlc_value; | ||
1022 | + } else { | ||
1023 | + assert((dlc_value - 8) < ARRAY_SIZE(canfd_dlc_array)); | ||
1024 | + frame->can_dlc = canfd_dlc_array[dlc_value - 8]; | ||
1025 | + } | ||
1026 | + } else { | ||
1027 | + /* | ||
1028 | + * FD Format bit not set that means it is a CAN Frame. | ||
1029 | + * Conversion table for classic CAN: | ||
1030 | + * | ||
1031 | + * DLC Plain Length | ||
1032 | + * 0 - 7 0 - 7 | ||
1033 | + * 8 - 15 8 | ||
1034 | + */ | ||
1035 | + | ||
1036 | + if (dlc_value > 8) { | ||
1037 | + frame->can_dlc = 8; | ||
1038 | + qemu_log_mask(LOG_GUEST_ERROR, "Maximum DLC value for Classic CAN" | ||
1039 | + " frame is 8. Only 8 byte data will be sent.\n"); | ||
1040 | + } else { | ||
1041 | + frame->can_dlc = dlc_value; | ||
1042 | + } | ||
1043 | + } | ||
1044 | + | ||
1045 | + for (j = 0; j < frame->can_dlc; j++) { | ||
1046 | + val = 8 * i; | ||
1047 | + | ||
1048 | + frame->data[j] = extract32(s->regs[reg_num + 2 + (j / 4)], val, 8); | ||
1049 | + i++; | ||
1050 | + | ||
1051 | + if (i % 4 == 0) { | ||
1052 | + i = 0; | ||
1053 | + } | ||
1054 | + } | ||
1055 | +} | ||
1056 | + | ||
1057 | +static void process_cancellation_requests(XlnxVersalCANFDState *s) | ||
1058 | +{ | ||
1059 | + uint32_t clear_mask = s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] & | ||
1060 | + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER]; | ||
1061 | + | ||
1062 | + s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] &= ~clear_mask; | ||
1063 | + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER] &= ~clear_mask; | ||
1064 | + | ||
1065 | + canfd_update_irq(s); | ||
1066 | +} | ||
1067 | + | ||
1068 | +static void store_rx_sequential(XlnxVersalCANFDState *s, | ||
1069 | + const qemu_can_frame *frame, | ||
1070 | + uint32_t fill_level, uint32_t read_index, | ||
1071 | + uint32_t store_location, uint8_t rx_fifo, | ||
1072 | + bool rx_fifo_id, uint8_t filter_index) | ||
1073 | +{ | ||
1074 | + int i; | ||
1075 | + bool is_canfd_frame; | ||
1076 | + uint8_t dlc = frame->can_dlc; | ||
1077 | + uint8_t rx_reg_num = 0; | ||
1078 | + uint32_t dlc_reg_val = 0; | ||
1079 | + uint32_t data_reg_val = 0; | ||
1080 | + | ||
1081 | + /* Getting RX0/1 fill level */ | ||
1082 | + if ((fill_level) > rx_fifo - 1) { | ||
1083 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1084 | + | ||
1085 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: RX%d Buffer is full. Discarding the" | ||
1086 | + " message\n", path, rx_fifo_id); | ||
1087 | + | ||
1088 | + /* Set the corresponding RF buffer overflow interrupt. */ | ||
1089 | + if (rx_fifo_id == 0) { | ||
1090 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW, 1); | ||
1091 | + } else { | ||
1092 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 1); | ||
1093 | + } | ||
1094 | + } else { | ||
1095 | + uint16_t rx_timestamp = CANFD_TIMER_MAX - | ||
1096 | + ptimer_get_count(s->canfd_timer); | ||
1097 | + | ||
1098 | + if (rx_timestamp == 0xFFFF) { | ||
1099 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TSCNT_OFLW, 1); | ||
1100 | + } else { | ||
1101 | + ARRAY_FIELD_DP32(s->regs, TIMESTAMP_REGISTER, TIMESTAMP_CNT, | ||
1102 | + rx_timestamp); | ||
1103 | + } | ||
1104 | + | ||
1105 | + if (rx_fifo_id == 0) { | ||
1106 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL, | ||
1107 | + fill_level + 1); | ||
1108 | + assert(store_location <= | ||
1109 | + R_RB_ID_REGISTER + (s->cfg.rx0_fifo * | ||
1110 | + NUM_REGS_PER_MSG_SPACE)); | ||
1111 | + } else { | ||
1112 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1, | ||
1113 | + fill_level + 1); | ||
1114 | + assert(store_location <= | ||
1115 | + R_RB_ID_REGISTER_1 + (s->cfg.rx1_fifo * | ||
1116 | + NUM_REGS_PER_MSG_SPACE)); | ||
1117 | + } | ||
1118 | + | ||
1119 | + s->regs[store_location] = frame->can_id; | ||
1120 | + | ||
1121 | + dlc = frame->can_dlc; | ||
1122 | + | ||
1123 | + if (frame->flags == QEMU_CAN_FRMF_TYPE_FD) { | ||
1124 | + is_canfd_frame = true; | ||
1125 | + | ||
1126 | + /* Store dlc value in Xilinx specific format. */ | ||
1127 | + for (i = 0; i < ARRAY_SIZE(canfd_dlc_array); i++) { | ||
1128 | + if (canfd_dlc_array[i] == frame->can_dlc) { | ||
1129 | + dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, 8 + i); | ||
1130 | + } | ||
1131 | + } | ||
1132 | + } else { | ||
1133 | + is_canfd_frame = false; | ||
1134 | + | ||
1135 | + if (frame->can_dlc > 8) { | ||
1136 | + dlc = 8; | ||
1137 | + } | ||
1138 | + | ||
1139 | + dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, dlc); | ||
1140 | + } | ||
1141 | + | ||
1142 | + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, is_canfd_frame); | ||
1143 | + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, TIMESTAMP, rx_timestamp); | ||
1144 | + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, MATCHED_FILTER_INDEX, | ||
1145 | + filter_index); | ||
1146 | + s->regs[store_location + 1] = dlc_reg_val; | ||
1147 | + | ||
1148 | + for (i = 0; i < dlc; i++) { | ||
1149 | + /* Register size is 4 byte but frame->data each is 1 byte. */ | ||
1150 | + switch (i % 4) { | ||
1151 | + case 0: | ||
1152 | + rx_reg_num = i / 4; | ||
1153 | + | ||
1154 | + data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3, | ||
1155 | + frame->data[i]); | ||
1156 | + break; | ||
1157 | + case 1: | ||
1158 | + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2, | ||
1159 | + frame->data[i]); | ||
1160 | + break; | ||
1161 | + case 2: | ||
1162 | + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1, | ||
1163 | + frame->data[i]); | ||
1164 | + break; | ||
1165 | + case 3: | ||
1166 | + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0, | ||
1167 | + frame->data[i]); | ||
1168 | + /* | ||
1169 | + * Last Bytes data which means we have all 4 bytes ready to | ||
1170 | + * store in one rx regs. | ||
1171 | + */ | ||
1172 | + s->regs[store_location + rx_reg_num + 2] = data_reg_val; | ||
1173 | + break; | ||
61 | + } | 1174 | + } |
62 | + } | 1175 | + } |
63 | env->cp15.esr_el[2] = env->exception.syndrome; | 1176 | + |
64 | } | 1177 | + if (i % 4) { |
65 | 1178 | + /* | |
1179 | + * In case DLC is not multiplier of 4, data is not saved to RX FIFO | ||
1180 | + * in above switch case. Store the remaining bytes here. | ||
1181 | + */ | ||
1182 | + s->regs[store_location + rx_reg_num + 2] = data_reg_val; | ||
1183 | + } | ||
1184 | + | ||
1185 | + /* set the interrupt as RXOK. */ | ||
1186 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); | ||
1187 | + } | ||
1188 | +} | ||
1189 | + | ||
1190 | +static void update_rx_sequential(XlnxVersalCANFDState *s, | ||
1191 | + const qemu_can_frame *frame) | ||
1192 | +{ | ||
1193 | + bool filter_pass = false; | ||
1194 | + uint8_t filter_index = 0; | ||
1195 | + int i; | ||
1196 | + int filter_partition = ARRAY_FIELD_EX32(s->regs, | ||
1197 | + RX_FIFO_WATERMARK_REGISTER, RXFP); | ||
1198 | + uint32_t store_location; | ||
1199 | + uint32_t fill_level; | ||
1200 | + uint32_t read_index; | ||
1201 | + uint8_t store_index = 0; | ||
1202 | + g_autofree char *path = NULL; | ||
1203 | + /* | ||
1204 | + * If all UAF bits are set to 0, then received messages are not stored | ||
1205 | + * in the RX buffers. | ||
1206 | + */ | ||
1207 | + if (s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]) { | ||
1208 | + uint32_t acceptance_filter_status = | ||
1209 | + s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]; | ||
1210 | + | ||
1211 | + for (i = 0; i < 32; i++) { | ||
1212 | + if (acceptance_filter_status & 0x1) { | ||
1213 | + uint32_t msg_id_masked = s->regs[R_AFMR_REGISTER + 2 * i] & | ||
1214 | + frame->can_id; | ||
1215 | + uint32_t afir_id_masked = s->regs[R_AFIR_REGISTER + 2 * i] & | ||
1216 | + s->regs[R_AFMR_REGISTER + 2 * i]; | ||
1217 | + uint16_t std_msg_id_masked = FIELD_EX32(msg_id_masked, | ||
1218 | + AFIR_REGISTER, AIID); | ||
1219 | + uint16_t std_afir_id_masked = FIELD_EX32(afir_id_masked, | ||
1220 | + AFIR_REGISTER, AIID); | ||
1221 | + uint32_t ext_msg_id_masked = FIELD_EX32(msg_id_masked, | ||
1222 | + AFIR_REGISTER, | ||
1223 | + AIID_EXT); | ||
1224 | + uint32_t ext_afir_id_masked = FIELD_EX32(afir_id_masked, | ||
1225 | + AFIR_REGISTER, | ||
1226 | + AIID_EXT); | ||
1227 | + bool ext_ide = FIELD_EX32(s->regs[R_AFMR_REGISTER + 2 * i], | ||
1228 | + AFMR_REGISTER, AMIDE); | ||
1229 | + | ||
1230 | + if (std_msg_id_masked == std_afir_id_masked) { | ||
1231 | + if (ext_ide) { | ||
1232 | + /* Extended message ID message. */ | ||
1233 | + if (ext_msg_id_masked == ext_afir_id_masked) { | ||
1234 | + filter_pass = true; | ||
1235 | + filter_index = i; | ||
1236 | + | ||
1237 | + break; | ||
1238 | + } | ||
1239 | + } else { | ||
1240 | + /* Standard message ID. */ | ||
1241 | + filter_pass = true; | ||
1242 | + filter_index = i; | ||
1243 | + | ||
1244 | + break; | ||
1245 | + } | ||
1246 | + } | ||
1247 | + } | ||
1248 | + acceptance_filter_status >>= 1; | ||
1249 | + } | ||
1250 | + } | ||
1251 | + | ||
1252 | + if (!filter_pass) { | ||
1253 | + path = object_get_canonical_path(OBJECT(s)); | ||
1254 | + | ||
1255 | + trace_xlnx_canfd_rx_fifo_filter_reject(path, frame->can_id, | ||
1256 | + frame->can_dlc); | ||
1257 | + } else { | ||
1258 | + if (filter_index <= filter_partition) { | ||
1259 | + fill_level = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL); | ||
1260 | + read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, RI); | ||
1261 | + store_index = read_index + fill_level; | ||
1262 | + | ||
1263 | + if (read_index == s->cfg.rx0_fifo - 1) { | ||
1264 | + /* | ||
1265 | + * When ri is s->cfg.rx0_fifo - 1 i.e. max, it goes cyclic that | ||
1266 | + * means we reset the ri to 0x0. | ||
1267 | + */ | ||
1268 | + read_index = 0; | ||
1269 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI, | ||
1270 | + read_index); | ||
1271 | + } | ||
1272 | + | ||
1273 | + if (store_index > s->cfg.rx0_fifo - 1) { | ||
1274 | + store_index -= s->cfg.rx0_fifo - 1; | ||
1275 | + } | ||
1276 | + | ||
1277 | + store_location = R_RB_ID_REGISTER + | ||
1278 | + (store_index * NUM_REGS_PER_MSG_SPACE); | ||
1279 | + | ||
1280 | + store_rx_sequential(s, frame, fill_level, read_index, | ||
1281 | + store_location, s->cfg.rx0_fifo, 0, | ||
1282 | + filter_index); | ||
1283 | + } else { | ||
1284 | + /* RX 1 fill level message */ | ||
1285 | + fill_level = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, | ||
1286 | + FL_1); | ||
1287 | + read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, | ||
1288 | + RI_1); | ||
1289 | + store_index = read_index + fill_level; | ||
1290 | + | ||
1291 | + if (read_index == s->cfg.rx1_fifo - 1) { | ||
1292 | + /* | ||
1293 | + * When ri is s->cfg.rx1_fifo - 1 i.e. max, it goes cyclic that | ||
1294 | + * means we reset the ri to 0x0. | ||
1295 | + */ | ||
1296 | + read_index = 0; | ||
1297 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1, | ||
1298 | + read_index); | ||
1299 | + } | ||
1300 | + | ||
1301 | + if (store_index > s->cfg.rx1_fifo - 1) { | ||
1302 | + store_index -= s->cfg.rx1_fifo - 1; | ||
1303 | + } | ||
1304 | + | ||
1305 | + store_location = R_RB_ID_REGISTER_1 + | ||
1306 | + (store_index * NUM_REGS_PER_MSG_SPACE); | ||
1307 | + | ||
1308 | + store_rx_sequential(s, frame, fill_level, read_index, | ||
1309 | + store_location, s->cfg.rx1_fifo, 1, | ||
1310 | + filter_index); | ||
1311 | + } | ||
1312 | + | ||
1313 | + path = object_get_canonical_path(OBJECT(s)); | ||
1314 | + | ||
1315 | + trace_xlnx_canfd_rx_data(path, frame->can_id, frame->can_dlc, | ||
1316 | + frame->flags); | ||
1317 | + canfd_update_irq(s); | ||
1318 | + } | ||
1319 | +} | ||
1320 | + | ||
1321 | +static bool tx_ready_check(XlnxVersalCANFDState *s) | ||
1322 | +{ | ||
1323 | + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) { | ||
1324 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1325 | + | ||
1326 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" | ||
1327 | + " XlnxVersalCANFDState is in reset mode\n", path); | ||
1328 | + | ||
1329 | + return false; | ||
1330 | + } | ||
1331 | + | ||
1332 | + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { | ||
1333 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1334 | + | ||
1335 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" | ||
1336 | + " XlnxVersalCANFDState is in configuration mode." | ||
1337 | + " Reset the core so operations can start fresh\n", | ||
1338 | + path); | ||
1339 | + return false; | ||
1340 | + } | ||
1341 | + | ||
1342 | + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { | ||
1343 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1344 | + | ||
1345 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" | ||
1346 | + " XlnxVersalCANFDState is in SNOOP MODE\n", | ||
1347 | + path); | ||
1348 | + return false; | ||
1349 | + } | ||
1350 | + | ||
1351 | + return true; | ||
1352 | +} | ||
1353 | + | ||
1354 | +static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid) | ||
1355 | +{ | ||
1356 | + /* | ||
1357 | + * If EFC bit in DLC message is set, this means we will store the | ||
1358 | + * event of this transmitted message with time stamp. | ||
1359 | + */ | ||
1360 | + uint32_t dlc_reg_val = 0; | ||
1361 | + | ||
1362 | + if (FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, EFC)) { | ||
1363 | + uint8_t dlc_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, | ||
1364 | + DLC); | ||
1365 | + bool fdf_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, | ||
1366 | + FDF); | ||
1367 | + bool brs_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, | ||
1368 | + BRS); | ||
1369 | + uint8_t mm_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, | ||
1370 | + MM); | ||
1371 | + uint8_t fill_level = ARRAY_FIELD_EX32(s->regs, | ||
1372 | + TX_EVENT_FIFO_STATUS_REGISTER, | ||
1373 | + TXE_FL); | ||
1374 | + uint8_t read_index = ARRAY_FIELD_EX32(s->regs, | ||
1375 | + TX_EVENT_FIFO_STATUS_REGISTER, | ||
1376 | + TXE_RI); | ||
1377 | + uint8_t store_index = fill_level + read_index; | ||
1378 | + | ||
1379 | + if ((fill_level) > s->cfg.tx_fifo - 1) { | ||
1380 | + qemu_log_mask(LOG_GUEST_ERROR, "TX Event Buffer is full." | ||
1381 | + " Discarding the message\n"); | ||
1382 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEOFLW, 1); | ||
1383 | + } else { | ||
1384 | + if (read_index == s->cfg.tx_fifo - 1) { | ||
1385 | + /* | ||
1386 | + * When ri is s->cfg.tx_fifo - 1 i.e. max, it goes cyclic that | ||
1387 | + * means we reset the ri to 0x0. | ||
1388 | + */ | ||
1389 | + read_index = 0; | ||
1390 | + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, | ||
1391 | + read_index); | ||
1392 | + } | ||
1393 | + | ||
1394 | + if (store_index > s->cfg.tx_fifo - 1) { | ||
1395 | + store_index -= s->cfg.tx_fifo - 1; | ||
1396 | + } | ||
1397 | + | ||
1398 | + assert(store_index < s->cfg.tx_fifo); | ||
1399 | + | ||
1400 | + uint32_t tx_event_reg0_id = R_TXE_FIFO_TB_ID_REGISTER + | ||
1401 | + (store_index * 2); | ||
1402 | + | ||
1403 | + /* Store message ID in TX event register. */ | ||
1404 | + s->regs[tx_event_reg0_id] = s->regs[tb0_regid]; | ||
1405 | + | ||
1406 | + uint16_t tx_timestamp = CANFD_TIMER_MAX - | ||
1407 | + ptimer_get_count(s->canfd_timer); | ||
1408 | + | ||
1409 | + /* Store DLC with time stamp in DLC regs. */ | ||
1410 | + dlc_reg_val = FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, DLC, dlc_val); | ||
1411 | + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, FDF, | ||
1412 | + fdf_val); | ||
1413 | + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, BRS, | ||
1414 | + brs_val); | ||
1415 | + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, ET, 0x3); | ||
1416 | + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, MM, mm_val); | ||
1417 | + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, TIMESTAMP, | ||
1418 | + tx_timestamp); | ||
1419 | + s->regs[tx_event_reg0_id + 1] = dlc_reg_val; | ||
1420 | + | ||
1421 | + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, | ||
1422 | + fill_level + 1); | ||
1423 | + } | ||
1424 | + } | ||
1425 | +} | ||
1426 | + | ||
1427 | +static gint g_cmp_ids(gconstpointer data1, gconstpointer data2) | ||
1428 | +{ | ||
1429 | + tx_ready_reg_info *tx_reg_1 = (tx_ready_reg_info *) data1; | ||
1430 | + tx_ready_reg_info *tx_reg_2 = (tx_ready_reg_info *) data2; | ||
1431 | + | ||
1432 | + return tx_reg_1->can_id - tx_reg_2->can_id; | ||
1433 | +} | ||
1434 | + | ||
1435 | +static void free_list(GSList *list) | ||
1436 | +{ | ||
1437 | + GSList *iterator = NULL; | ||
1438 | + | ||
1439 | + for (iterator = list; iterator != NULL; iterator = iterator->next) { | ||
1440 | + g_free((tx_ready_reg_info *)iterator->data); | ||
1441 | + } | ||
1442 | + | ||
1443 | + g_slist_free(list); | ||
1444 | + | ||
1445 | + return; | ||
1446 | +} | ||
1447 | + | ||
1448 | +static GSList *prepare_tx_data(XlnxVersalCANFDState *s) | ||
1449 | +{ | ||
1450 | + uint8_t i = 0; | ||
1451 | + GSList *list = NULL; | ||
1452 | + uint32_t reg_num = 0; | ||
1453 | + uint32_t reg_ready = s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER]; | ||
1454 | + | ||
1455 | + /* First find the messages which are ready for transmission. */ | ||
1456 | + for (i = 0; i < s->cfg.tx_fifo; i++) { | ||
1457 | + if (reg_ready & 1) { | ||
1458 | + reg_num = R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE * i); | ||
1459 | + tx_ready_reg_info *temp = g_new(tx_ready_reg_info, 1); | ||
1460 | + | ||
1461 | + temp->can_id = s->regs[reg_num]; | ||
1462 | + temp->reg_num = reg_num; | ||
1463 | + list = g_slist_prepend(list, temp); | ||
1464 | + list = g_slist_sort(list, g_cmp_ids); | ||
1465 | + } | ||
1466 | + | ||
1467 | + reg_ready >>= 1; | ||
1468 | + } | ||
1469 | + | ||
1470 | + s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] = 0; | ||
1471 | + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER] = 0; | ||
1472 | + | ||
1473 | + return list; | ||
1474 | +} | ||
1475 | + | ||
1476 | +static void transfer_data(XlnxVersalCANFDState *s) | ||
1477 | +{ | ||
1478 | + bool canfd_tx = tx_ready_check(s); | ||
1479 | + GSList *list, *iterator = NULL; | ||
1480 | + qemu_can_frame frame; | ||
1481 | + | ||
1482 | + if (!canfd_tx) { | ||
1483 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1484 | + | ||
1485 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller not enabled for data" | ||
1486 | + " transfer\n", path); | ||
1487 | + return; | ||
1488 | + } | ||
1489 | + | ||
1490 | + list = prepare_tx_data(s); | ||
1491 | + if (list == NULL) { | ||
1492 | + return; | ||
1493 | + } | ||
1494 | + | ||
1495 | + for (iterator = list; iterator != NULL; iterator = iterator->next) { | ||
1496 | + regs2frame(s, &frame, | ||
1497 | + ((tx_ready_reg_info *)iterator->data)->reg_num); | ||
1498 | + | ||
1499 | + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) { | ||
1500 | + update_rx_sequential(s, &frame); | ||
1501 | + tx_fifo_stamp(s, ((tx_ready_reg_info *)iterator->data)->reg_num); | ||
1502 | + | ||
1503 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); | ||
1504 | + } else { | ||
1505 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1506 | + | ||
1507 | + trace_xlnx_canfd_tx_data(path, frame.can_id, frame.can_dlc, | ||
1508 | + frame.flags); | ||
1509 | + can_bus_client_send(&s->bus_client, &frame, 1); | ||
1510 | + tx_fifo_stamp(s, | ||
1511 | + ((tx_ready_reg_info *)iterator->data)->reg_num); | ||
1512 | + | ||
1513 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXRRS, 1); | ||
1514 | + | ||
1515 | + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) { | ||
1516 | + canfd_exit_sleep_mode(s); | ||
1517 | + } | ||
1518 | + } | ||
1519 | + } | ||
1520 | + | ||
1521 | + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 1); | ||
1522 | + free_list(list); | ||
1523 | + | ||
1524 | + canfd_update_irq(s); | ||
1525 | +} | ||
1526 | + | ||
1527 | +static uint64_t canfd_srr_pre_write(RegisterInfo *reg, uint64_t val64) | ||
1528 | +{ | ||
1529 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1530 | + uint32_t val = val64; | ||
1531 | + | ||
1532 | + ARRAY_FIELD_DP32(s->regs, SOFTWARE_RESET_REGISTER, CEN, | ||
1533 | + FIELD_EX32(val, SOFTWARE_RESET_REGISTER, CEN)); | ||
1534 | + | ||
1535 | + if (FIELD_EX32(val, SOFTWARE_RESET_REGISTER, SRST)) { | ||
1536 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1537 | + | ||
1538 | + trace_xlnx_canfd_reset(path, val64); | ||
1539 | + | ||
1540 | + /* First, core will do software reset then will enter in config mode. */ | ||
1541 | + canfd_config_reset(s); | ||
1542 | + } else if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { | ||
1543 | + canfd_config_mode(s); | ||
1544 | + } else { | ||
1545 | + /* | ||
1546 | + * Leave config mode. Now XlnxVersalCANFD core will enter Normal, Sleep, | ||
1547 | + * snoop or Loopback mode depending upon LBACK, SLEEP, SNOOP register | ||
1548 | + * states. | ||
1549 | + */ | ||
1550 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 0); | ||
1551 | + | ||
1552 | + ptimer_transaction_begin(s->canfd_timer); | ||
1553 | + ptimer_set_count(s->canfd_timer, 0); | ||
1554 | + ptimer_transaction_commit(s->canfd_timer); | ||
1555 | + update_status_register_mode_bits(s); | ||
1556 | + transfer_data(s); | ||
1557 | + } | ||
1558 | + | ||
1559 | + return s->regs[R_SOFTWARE_RESET_REGISTER]; | ||
1560 | +} | ||
1561 | + | ||
1562 | +static uint64_t filter_mask(RegisterInfo *reg, uint64_t val64) | ||
1563 | +{ | ||
1564 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1565 | + uint32_t reg_idx = (reg->access->addr) / 4; | ||
1566 | + uint32_t val = val64; | ||
1567 | + uint32_t filter_offset = (reg_idx - R_AFMR_REGISTER) / 2; | ||
1568 | + | ||
1569 | + if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & | ||
1570 | + (1 << filter_offset))) { | ||
1571 | + s->regs[reg_idx] = val; | ||
1572 | + } else { | ||
1573 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1574 | + | ||
1575 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", | ||
1576 | + path, filter_offset + 1); | ||
1577 | + } | ||
1578 | + | ||
1579 | + return s->regs[reg_idx]; | ||
1580 | +} | ||
1581 | + | ||
1582 | +static uint64_t filter_id(RegisterInfo *reg, uint64_t val64) | ||
1583 | +{ | ||
1584 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1585 | + hwaddr reg_idx = (reg->access->addr) / 4; | ||
1586 | + uint32_t val = val64; | ||
1587 | + uint32_t filter_offset = (reg_idx - R_AFIR_REGISTER) / 2; | ||
1588 | + | ||
1589 | + if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & | ||
1590 | + (1 << filter_offset))) { | ||
1591 | + s->regs[reg_idx] = val; | ||
1592 | + } else { | ||
1593 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1594 | + | ||
1595 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", | ||
1596 | + path, filter_offset + 1); | ||
1597 | + } | ||
1598 | + | ||
1599 | + return s->regs[reg_idx]; | ||
1600 | +} | ||
1601 | + | ||
1602 | +static uint64_t canfd_tx_fifo_status_prew(RegisterInfo *reg, uint64_t val64) | ||
1603 | +{ | ||
1604 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1605 | + uint32_t val = val64; | ||
1606 | + uint8_t read_ind = 0; | ||
1607 | + uint8_t fill_ind = ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, | ||
1608 | + TXE_FL); | ||
1609 | + | ||
1610 | + if (FIELD_EX32(val, TX_EVENT_FIFO_STATUS_REGISTER, TXE_IRI) && fill_ind) { | ||
1611 | + read_ind = ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, | ||
1612 | + TXE_RI) + 1; | ||
1613 | + | ||
1614 | + if (read_ind > s->cfg.tx_fifo - 1) { | ||
1615 | + read_ind = 0; | ||
1616 | + } | ||
1617 | + | ||
1618 | + /* | ||
1619 | + * Increase the read index by 1 and decrease the fill level by 1. | ||
1620 | + */ | ||
1621 | + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, | ||
1622 | + read_ind); | ||
1623 | + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, | ||
1624 | + fill_ind - 1); | ||
1625 | + } | ||
1626 | + | ||
1627 | + return s->regs[R_TX_EVENT_FIFO_STATUS_REGISTER]; | ||
1628 | +} | ||
1629 | + | ||
1630 | +static uint64_t canfd_rx_fifo_status_prew(RegisterInfo *reg, uint64_t val64) | ||
1631 | +{ | ||
1632 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1633 | + uint32_t val = val64; | ||
1634 | + uint8_t read_ind = 0; | ||
1635 | + uint8_t fill_ind = 0; | ||
1636 | + | ||
1637 | + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, IRI)) { | ||
1638 | + /* FL index is zero, setting IRI bit has no effect. */ | ||
1639 | + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL) != 0) { | ||
1640 | + read_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, RI) + 1; | ||
1641 | + | ||
1642 | + if (read_ind > s->cfg.rx0_fifo - 1) { | ||
1643 | + read_ind = 0; | ||
1644 | + } | ||
1645 | + | ||
1646 | + fill_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL) - 1; | ||
1647 | + | ||
1648 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI, read_ind); | ||
1649 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL, fill_ind); | ||
1650 | + } | ||
1651 | + } | ||
1652 | + | ||
1653 | + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, IRI_1)) { | ||
1654 | + /* FL_1 index is zero, setting IRI_1 bit has no effect. */ | ||
1655 | + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL_1) != 0) { | ||
1656 | + read_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, RI_1) + 1; | ||
1657 | + | ||
1658 | + if (read_ind > s->cfg.rx1_fifo - 1) { | ||
1659 | + read_ind = 0; | ||
1660 | + } | ||
1661 | + | ||
1662 | + fill_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL_1) - 1; | ||
1663 | + | ||
1664 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1, read_ind); | ||
1665 | + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1, fill_ind); | ||
1666 | + } | ||
1667 | + } | ||
1668 | + | ||
1669 | + return s->regs[R_RX_FIFO_STATUS_REGISTER]; | ||
1670 | +} | ||
1671 | + | ||
1672 | +static uint64_t canfd_tsr_pre_write(RegisterInfo *reg, uint64_t val64) | ||
1673 | +{ | ||
1674 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1675 | + uint32_t val = val64; | ||
1676 | + | ||
1677 | + if (FIELD_EX32(val, TIMESTAMP_REGISTER, CTS)) { | ||
1678 | + ARRAY_FIELD_DP32(s->regs, TIMESTAMP_REGISTER, TIMESTAMP_CNT, 0); | ||
1679 | + ptimer_transaction_begin(s->canfd_timer); | ||
1680 | + ptimer_set_count(s->canfd_timer, 0); | ||
1681 | + ptimer_transaction_commit(s->canfd_timer); | ||
1682 | + } | ||
1683 | + | ||
1684 | + return 0; | ||
1685 | +} | ||
1686 | + | ||
1687 | +static uint64_t canfd_trr_reg_prew(RegisterInfo *reg, uint64_t val64) | ||
1688 | +{ | ||
1689 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1690 | + | ||
1691 | + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { | ||
1692 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
1693 | + | ||
1694 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is in SNOOP mode." | ||
1695 | + " tx_ready_register will stay in reset mode\n", path); | ||
1696 | + return 0; | ||
1697 | + } else { | ||
1698 | + return val64; | ||
1699 | + } | ||
1700 | +} | ||
1701 | + | ||
1702 | +static void canfd_trr_reg_postw(RegisterInfo *reg, uint64_t val64) | ||
1703 | +{ | ||
1704 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1705 | + | ||
1706 | + transfer_data(s); | ||
1707 | +} | ||
1708 | + | ||
1709 | +static void canfd_cancel_reg_postw(RegisterInfo *reg, uint64_t val64) | ||
1710 | +{ | ||
1711 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1712 | + | ||
1713 | + process_cancellation_requests(s); | ||
1714 | +} | ||
1715 | + | ||
1716 | +static uint64_t canfd_write_check_prew(RegisterInfo *reg, uint64_t val64) | ||
1717 | +{ | ||
1718 | + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); | ||
1719 | + uint32_t val = val64; | ||
1720 | + | ||
1721 | + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { | ||
1722 | + return val; | ||
1723 | + } | ||
1724 | + return 0; | ||
1725 | +} | ||
1726 | + | ||
1727 | +static const RegisterAccessInfo canfd_tx_regs[] = { | ||
1728 | + { .name = "TB_ID_REGISTER", .addr = A_TB_ID_REGISTER, | ||
1729 | + },{ .name = "TB0_DLC_REGISTER", .addr = A_TB0_DLC_REGISTER, | ||
1730 | + },{ .name = "TB_DW0_REGISTER", .addr = A_TB_DW0_REGISTER, | ||
1731 | + },{ .name = "TB_DW1_REGISTER", .addr = A_TB_DW1_REGISTER, | ||
1732 | + },{ .name = "TB_DW2_REGISTER", .addr = A_TB_DW2_REGISTER, | ||
1733 | + },{ .name = "TB_DW3_REGISTER", .addr = A_TB_DW3_REGISTER, | ||
1734 | + },{ .name = "TB_DW4_REGISTER", .addr = A_TB_DW4_REGISTER, | ||
1735 | + },{ .name = "TB_DW5_REGISTER", .addr = A_TB_DW5_REGISTER, | ||
1736 | + },{ .name = "TB_DW6_REGISTER", .addr = A_TB_DW6_REGISTER, | ||
1737 | + },{ .name = "TB_DW7_REGISTER", .addr = A_TB_DW7_REGISTER, | ||
1738 | + },{ .name = "TB_DW8_REGISTER", .addr = A_TB_DW8_REGISTER, | ||
1739 | + },{ .name = "TB_DW9_REGISTER", .addr = A_TB_DW9_REGISTER, | ||
1740 | + },{ .name = "TB_DW10_REGISTER", .addr = A_TB_DW10_REGISTER, | ||
1741 | + },{ .name = "TB_DW11_REGISTER", .addr = A_TB_DW11_REGISTER, | ||
1742 | + },{ .name = "TB_DW12_REGISTER", .addr = A_TB_DW12_REGISTER, | ||
1743 | + },{ .name = "TB_DW13_REGISTER", .addr = A_TB_DW13_REGISTER, | ||
1744 | + },{ .name = "TB_DW14_REGISTER", .addr = A_TB_DW14_REGISTER, | ||
1745 | + },{ .name = "TB_DW15_REGISTER", .addr = A_TB_DW15_REGISTER, | ||
1746 | + } | ||
1747 | +}; | ||
1748 | + | ||
1749 | +static const RegisterAccessInfo canfd_rx0_regs[] = { | ||
1750 | + { .name = "RB_ID_REGISTER", .addr = A_RB_ID_REGISTER, | ||
1751 | + .ro = 0xffffffff, | ||
1752 | + },{ .name = "RB_DLC_REGISTER", .addr = A_RB_DLC_REGISTER, | ||
1753 | + .ro = 0xfe1fffff, | ||
1754 | + },{ .name = "RB_DW0_REGISTER", .addr = A_RB_DW0_REGISTER, | ||
1755 | + .ro = 0xffffffff, | ||
1756 | + },{ .name = "RB_DW1_REGISTER", .addr = A_RB_DW1_REGISTER, | ||
1757 | + .ro = 0xffffffff, | ||
1758 | + },{ .name = "RB_DW2_REGISTER", .addr = A_RB_DW2_REGISTER, | ||
1759 | + .ro = 0xffffffff, | ||
1760 | + },{ .name = "RB_DW3_REGISTER", .addr = A_RB_DW3_REGISTER, | ||
1761 | + .ro = 0xffffffff, | ||
1762 | + },{ .name = "RB_DW4_REGISTER", .addr = A_RB_DW4_REGISTER, | ||
1763 | + .ro = 0xffffffff, | ||
1764 | + },{ .name = "RB_DW5_REGISTER", .addr = A_RB_DW5_REGISTER, | ||
1765 | + .ro = 0xffffffff, | ||
1766 | + },{ .name = "RB_DW6_REGISTER", .addr = A_RB_DW6_REGISTER, | ||
1767 | + .ro = 0xffffffff, | ||
1768 | + },{ .name = "RB_DW7_REGISTER", .addr = A_RB_DW7_REGISTER, | ||
1769 | + .ro = 0xffffffff, | ||
1770 | + },{ .name = "RB_DW8_REGISTER", .addr = A_RB_DW8_REGISTER, | ||
1771 | + .ro = 0xffffffff, | ||
1772 | + },{ .name = "RB_DW9_REGISTER", .addr = A_RB_DW9_REGISTER, | ||
1773 | + .ro = 0xffffffff, | ||
1774 | + },{ .name = "RB_DW10_REGISTER", .addr = A_RB_DW10_REGISTER, | ||
1775 | + .ro = 0xffffffff, | ||
1776 | + },{ .name = "RB_DW11_REGISTER", .addr = A_RB_DW11_REGISTER, | ||
1777 | + .ro = 0xffffffff, | ||
1778 | + },{ .name = "RB_DW12_REGISTER", .addr = A_RB_DW12_REGISTER, | ||
1779 | + .ro = 0xffffffff, | ||
1780 | + },{ .name = "RB_DW13_REGISTER", .addr = A_RB_DW13_REGISTER, | ||
1781 | + .ro = 0xffffffff, | ||
1782 | + },{ .name = "RB_DW14_REGISTER", .addr = A_RB_DW14_REGISTER, | ||
1783 | + .ro = 0xffffffff, | ||
1784 | + },{ .name = "RB_DW15_REGISTER", .addr = A_RB_DW15_REGISTER, | ||
1785 | + .ro = 0xffffffff, | ||
1786 | + } | ||
1787 | +}; | ||
1788 | + | ||
1789 | +static const RegisterAccessInfo canfd_rx1_regs[] = { | ||
1790 | + { .name = "RB_ID_REGISTER_1", .addr = A_RB_ID_REGISTER_1, | ||
1791 | + .ro = 0xffffffff, | ||
1792 | + },{ .name = "RB_DLC_REGISTER_1", .addr = A_RB_DLC_REGISTER_1, | ||
1793 | + .ro = 0xfe1fffff, | ||
1794 | + },{ .name = "RB0_DW0_REGISTER_1", .addr = A_RB0_DW0_REGISTER_1, | ||
1795 | + .ro = 0xffffffff, | ||
1796 | + },{ .name = "RB_DW1_REGISTER_1", .addr = A_RB_DW1_REGISTER_1, | ||
1797 | + .ro = 0xffffffff, | ||
1798 | + },{ .name = "RB_DW2_REGISTER_1", .addr = A_RB_DW2_REGISTER_1, | ||
1799 | + .ro = 0xffffffff, | ||
1800 | + },{ .name = "RB_DW3_REGISTER_1", .addr = A_RB_DW3_REGISTER_1, | ||
1801 | + .ro = 0xffffffff, | ||
1802 | + },{ .name = "RB_DW4_REGISTER_1", .addr = A_RB_DW4_REGISTER_1, | ||
1803 | + .ro = 0xffffffff, | ||
1804 | + },{ .name = "RB_DW5_REGISTER_1", .addr = A_RB_DW5_REGISTER_1, | ||
1805 | + .ro = 0xffffffff, | ||
1806 | + },{ .name = "RB_DW6_REGISTER_1", .addr = A_RB_DW6_REGISTER_1, | ||
1807 | + .ro = 0xffffffff, | ||
1808 | + },{ .name = "RB_DW7_REGISTER_1", .addr = A_RB_DW7_REGISTER_1, | ||
1809 | + .ro = 0xffffffff, | ||
1810 | + },{ .name = "RB_DW8_REGISTER_1", .addr = A_RB_DW8_REGISTER_1, | ||
1811 | + .ro = 0xffffffff, | ||
1812 | + },{ .name = "RB_DW9_REGISTER_1", .addr = A_RB_DW9_REGISTER_1, | ||
1813 | + .ro = 0xffffffff, | ||
1814 | + },{ .name = "RB_DW10_REGISTER_1", .addr = A_RB_DW10_REGISTER_1, | ||
1815 | + .ro = 0xffffffff, | ||
1816 | + },{ .name = "RB_DW11_REGISTER_1", .addr = A_RB_DW11_REGISTER_1, | ||
1817 | + .ro = 0xffffffff, | ||
1818 | + },{ .name = "RB_DW12_REGISTER_1", .addr = A_RB_DW12_REGISTER_1, | ||
1819 | + .ro = 0xffffffff, | ||
1820 | + },{ .name = "RB_DW13_REGISTER_1", .addr = A_RB_DW13_REGISTER_1, | ||
1821 | + .ro = 0xffffffff, | ||
1822 | + },{ .name = "RB_DW14_REGISTER_1", .addr = A_RB_DW14_REGISTER_1, | ||
1823 | + .ro = 0xffffffff, | ||
1824 | + },{ .name = "RB_DW15_REGISTER_1", .addr = A_RB_DW15_REGISTER_1, | ||
1825 | + .ro = 0xffffffff, | ||
1826 | + } | ||
1827 | +}; | ||
1828 | + | ||
1829 | +/* Acceptance filter registers. */ | ||
1830 | +static const RegisterAccessInfo canfd_af_regs[] = { | ||
1831 | + { .name = "AFMR_REGISTER", .addr = A_AFMR_REGISTER, | ||
1832 | + .pre_write = filter_mask, | ||
1833 | + },{ .name = "AFIR_REGISTER", .addr = A_AFIR_REGISTER, | ||
1834 | + .pre_write = filter_id, | ||
1835 | + } | ||
1836 | +}; | ||
1837 | + | ||
1838 | +static const RegisterAccessInfo canfd_txe_regs[] = { | ||
1839 | + { .name = "TXE_FIFO_TB_ID_REGISTER", .addr = A_TXE_FIFO_TB_ID_REGISTER, | ||
1840 | + .ro = 0xffffffff, | ||
1841 | + },{ .name = "TXE_FIFO_TB_DLC_REGISTER", .addr = A_TXE_FIFO_TB_DLC_REGISTER, | ||
1842 | + .ro = 0xffffffff, | ||
1843 | + } | ||
1844 | +}; | ||
1845 | + | ||
1846 | +static const RegisterAccessInfo canfd_regs_info[] = { | ||
1847 | + { .name = "SOFTWARE_RESET_REGISTER", .addr = A_SOFTWARE_RESET_REGISTER, | ||
1848 | + .pre_write = canfd_srr_pre_write, | ||
1849 | + },{ .name = "MODE_SELECT_REGISTER", .addr = A_MODE_SELECT_REGISTER, | ||
1850 | + .pre_write = canfd_msr_pre_write, | ||
1851 | + },{ .name = "ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER", | ||
1852 | + .addr = A_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, | ||
1853 | + .pre_write = canfd_write_check_prew, | ||
1854 | + },{ .name = "ARBITRATION_PHASE_BIT_TIMING_REGISTER", | ||
1855 | + .addr = A_ARBITRATION_PHASE_BIT_TIMING_REGISTER, | ||
1856 | + .pre_write = canfd_write_check_prew, | ||
1857 | + },{ .name = "ERROR_COUNTER_REGISTER", .addr = A_ERROR_COUNTER_REGISTER, | ||
1858 | + .ro = 0xffff, | ||
1859 | + },{ .name = "ERROR_STATUS_REGISTER", .addr = A_ERROR_STATUS_REGISTER, | ||
1860 | + .w1c = 0xf1f, | ||
1861 | + },{ .name = "STATUS_REGISTER", .addr = A_STATUS_REGISTER, | ||
1862 | + .reset = 0x1, | ||
1863 | + .ro = 0x7f17ff, | ||
1864 | + },{ .name = "INTERRUPT_STATUS_REGISTER", | ||
1865 | + .addr = A_INTERRUPT_STATUS_REGISTER, | ||
1866 | + .ro = 0xffffff7f, | ||
1867 | + },{ .name = "INTERRUPT_ENABLE_REGISTER", | ||
1868 | + .addr = A_INTERRUPT_ENABLE_REGISTER, | ||
1869 | + .post_write = canfd_ier_post_write, | ||
1870 | + },{ .name = "INTERRUPT_CLEAR_REGISTER", | ||
1871 | + .addr = A_INTERRUPT_CLEAR_REGISTER, .pre_write = canfd_icr_pre_write, | ||
1872 | + },{ .name = "TIMESTAMP_REGISTER", .addr = A_TIMESTAMP_REGISTER, | ||
1873 | + .ro = 0xffff0000, | ||
1874 | + .pre_write = canfd_tsr_pre_write, | ||
1875 | + },{ .name = "DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER", | ||
1876 | + .addr = A_DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, | ||
1877 | + .pre_write = canfd_write_check_prew, | ||
1878 | + },{ .name = "DATA_PHASE_BIT_TIMING_REGISTER", | ||
1879 | + .addr = A_DATA_PHASE_BIT_TIMING_REGISTER, | ||
1880 | + .pre_write = canfd_write_check_prew, | ||
1881 | + },{ .name = "TX_BUFFER_READY_REQUEST_REGISTER", | ||
1882 | + .addr = A_TX_BUFFER_READY_REQUEST_REGISTER, | ||
1883 | + .pre_write = canfd_trr_reg_prew, | ||
1884 | + .post_write = canfd_trr_reg_postw, | ||
1885 | + },{ .name = "INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER", | ||
1886 | + .addr = A_INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, | ||
1887 | + },{ .name = "TX_BUFFER_CANCEL_REQUEST_REGISTER", | ||
1888 | + .addr = A_TX_BUFFER_CANCEL_REQUEST_REGISTER, | ||
1889 | + .post_write = canfd_cancel_reg_postw, | ||
1890 | + },{ .name = "INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER", | ||
1891 | + .addr = A_INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, | ||
1892 | + },{ .name = "TX_EVENT_FIFO_STATUS_REGISTER", | ||
1893 | + .addr = A_TX_EVENT_FIFO_STATUS_REGISTER, | ||
1894 | + .ro = 0x3f1f, .pre_write = canfd_tx_fifo_status_prew, | ||
1895 | + },{ .name = "TX_EVENT_FIFO_WATERMARK_REGISTER", | ||
1896 | + .addr = A_TX_EVENT_FIFO_WATERMARK_REGISTER, | ||
1897 | + .reset = 0xf, | ||
1898 | + .pre_write = canfd_write_check_prew, | ||
1899 | + },{ .name = "ACCEPTANCE_FILTER_CONTROL_REGISTER", | ||
1900 | + .addr = A_ACCEPTANCE_FILTER_CONTROL_REGISTER, | ||
1901 | + },{ .name = "RX_FIFO_STATUS_REGISTER", .addr = A_RX_FIFO_STATUS_REGISTER, | ||
1902 | + .ro = 0x7f3f7f3f, .pre_write = canfd_rx_fifo_status_prew, | ||
1903 | + },{ .name = "RX_FIFO_WATERMARK_REGISTER", | ||
1904 | + .addr = A_RX_FIFO_WATERMARK_REGISTER, | ||
1905 | + .reset = 0x1f0f0f, | ||
1906 | + .pre_write = canfd_write_check_prew, | ||
1907 | + } | ||
1908 | +}; | ||
1909 | + | ||
1910 | +static void xlnx_versal_canfd_ptimer_cb(void *opaque) | ||
1911 | +{ | ||
1912 | + /* No action required on the timer rollover. */ | ||
1913 | +} | ||
1914 | + | ||
1915 | +static const MemoryRegionOps canfd_ops = { | ||
1916 | + .read = register_read_memory, | ||
1917 | + .write = register_write_memory, | ||
1918 | + .endianness = DEVICE_LITTLE_ENDIAN, | ||
1919 | + .valid = { | ||
1920 | + .min_access_size = 4, | ||
1921 | + .max_access_size = 4, | ||
1922 | + }, | ||
1923 | +}; | ||
1924 | + | ||
1925 | +static void canfd_reset(DeviceState *dev) | ||
1926 | +{ | ||
1927 | + XlnxVersalCANFDState *s = XILINX_CANFD(dev); | ||
1928 | + unsigned int i; | ||
1929 | + | ||
1930 | + for (i = 0; i < ARRAY_SIZE(s->reg_info); ++i) { | ||
1931 | + register_reset(&s->reg_info[i]); | ||
1932 | + } | ||
1933 | + | ||
1934 | + ptimer_transaction_begin(s->canfd_timer); | ||
1935 | + ptimer_set_count(s->canfd_timer, 0); | ||
1936 | + ptimer_transaction_commit(s->canfd_timer); | ||
1937 | +} | ||
1938 | + | ||
1939 | +static bool can_xilinx_canfd_receive(CanBusClientState *client) | ||
1940 | +{ | ||
1941 | + XlnxVersalCANFDState *s = container_of(client, XlnxVersalCANFDState, | ||
1942 | + bus_client); | ||
1943 | + | ||
1944 | + bool reset_state = ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST); | ||
1945 | + bool can_enabled = ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN); | ||
1946 | + | ||
1947 | + return !reset_state && can_enabled; | ||
1948 | +} | ||
1949 | + | ||
1950 | +static ssize_t canfd_xilinx_receive(CanBusClientState *client, | ||
1951 | + const qemu_can_frame *buf, | ||
1952 | + size_t buf_size) | ||
1953 | +{ | ||
1954 | + XlnxVersalCANFDState *s = container_of(client, XlnxVersalCANFDState, | ||
1955 | + bus_client); | ||
1956 | + const qemu_can_frame *frame = buf; | ||
1957 | + | ||
1958 | + assert(buf_size > 0); | ||
1959 | + | ||
1960 | + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) { | ||
1961 | + /* | ||
1962 | + * XlnxVersalCANFDState will not participate in normal bus communication | ||
1963 | + * and does not receive any messages transmitted by other CAN nodes. | ||
1964 | + */ | ||
1965 | + return 1; | ||
1966 | + } | ||
1967 | + | ||
1968 | + /* Update the status register that we are receiving message. */ | ||
1969 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, BBSY, 1); | ||
1970 | + | ||
1971 | + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) { | ||
1972 | + /* Snoop Mode: Just keep the data. no response back. */ | ||
1973 | + update_rx_sequential(s, frame); | ||
1974 | + } else { | ||
1975 | + if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP))) { | ||
1976 | + /* | ||
1977 | + * XlnxVersalCANFDState is in sleep mode. Any data on bus will bring | ||
1978 | + * it to the wake up state. | ||
1979 | + */ | ||
1980 | + canfd_exit_sleep_mode(s); | ||
1981 | + } | ||
1982 | + | ||
1983 | + update_rx_sequential(s, frame); | ||
1984 | + } | ||
1985 | + | ||
1986 | + /* Message processing done. Update the status back to !busy */ | ||
1987 | + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, BBSY, 0); | ||
1988 | + return 1; | ||
1989 | +} | ||
1990 | + | ||
1991 | +static CanBusClientInfo canfd_xilinx_bus_client_info = { | ||
1992 | + .can_receive = can_xilinx_canfd_receive, | ||
1993 | + .receive = canfd_xilinx_receive, | ||
1994 | +}; | ||
1995 | + | ||
1996 | +static int xlnx_canfd_connect_to_bus(XlnxVersalCANFDState *s, | ||
1997 | + CanBusState *bus) | ||
1998 | +{ | ||
1999 | + s->bus_client.info = &canfd_xilinx_bus_client_info; | ||
2000 | + | ||
2001 | + return can_bus_insert_client(bus, &s->bus_client); | ||
2002 | +} | ||
2003 | + | ||
2004 | +#define NUM_REG_PER_AF ARRAY_SIZE(canfd_af_regs) | ||
2005 | +#define NUM_AF 32 | ||
2006 | +#define NUM_REG_PER_TXE ARRAY_SIZE(canfd_txe_regs) | ||
2007 | +#define NUM_TXE 32 | ||
2008 | + | ||
2009 | +static int canfd_populate_regarray(XlnxVersalCANFDState *s, | ||
2010 | + RegisterInfoArray *r_array, int pos, | ||
2011 | + const RegisterAccessInfo *rae, | ||
2012 | + int num_rae) | ||
2013 | +{ | ||
2014 | + int i; | ||
2015 | + | ||
2016 | + for (i = 0; i < num_rae; i++) { | ||
2017 | + int index = rae[i].addr / 4; | ||
2018 | + RegisterInfo *r = &s->reg_info[index]; | ||
2019 | + | ||
2020 | + object_initialize(r, sizeof(*r), TYPE_REGISTER); | ||
2021 | + | ||
2022 | + *r = (RegisterInfo) { | ||
2023 | + .data = &s->regs[index], | ||
2024 | + .data_size = sizeof(uint32_t), | ||
2025 | + .access = &rae[i], | ||
2026 | + .opaque = OBJECT(s), | ||
2027 | + }; | ||
2028 | + | ||
2029 | + r_array->r[i + pos] = r; | ||
2030 | + } | ||
2031 | + return i + pos; | ||
2032 | +} | ||
2033 | + | ||
2034 | +static void canfd_create_rai(RegisterAccessInfo *rai_array, | ||
2035 | + const RegisterAccessInfo *canfd_regs, | ||
2036 | + int template_rai_array_sz, | ||
2037 | + int num_template_to_copy) | ||
2038 | +{ | ||
2039 | + int i; | ||
2040 | + int reg_num; | ||
2041 | + | ||
2042 | + for (reg_num = 0; reg_num < num_template_to_copy; reg_num++) { | ||
2043 | + int pos = reg_num * template_rai_array_sz; | ||
2044 | + | ||
2045 | + memcpy(rai_array + pos, canfd_regs, | ||
2046 | + template_rai_array_sz * sizeof(RegisterAccessInfo)); | ||
2047 | + | ||
2048 | + for (i = 0; i < template_rai_array_sz; i++) { | ||
2049 | + const char *name = canfd_regs[i].name; | ||
2050 | + uint64_t addr = canfd_regs[i].addr; | ||
2051 | + rai_array[i + pos].name = g_strdup_printf("%s%d", name, reg_num); | ||
2052 | + rai_array[i + pos].addr = addr + pos * 4; | ||
2053 | + } | ||
2054 | + } | ||
2055 | +} | ||
2056 | + | ||
2057 | +static RegisterInfoArray *canfd_create_regarray(XlnxVersalCANFDState *s) | ||
2058 | +{ | ||
2059 | + const char *device_prefix = object_get_typename(OBJECT(s)); | ||
2060 | + uint64_t memory_size = XLNX_VERSAL_CANFD_R_MAX * 4; | ||
2061 | + int num_regs; | ||
2062 | + int pos = 0; | ||
2063 | + RegisterInfoArray *r_array; | ||
2064 | + | ||
2065 | + num_regs = ARRAY_SIZE(canfd_regs_info) + | ||
2066 | + s->cfg.tx_fifo * NUM_REGS_PER_MSG_SPACE + | ||
2067 | + s->cfg.rx0_fifo * NUM_REGS_PER_MSG_SPACE + | ||
2068 | + NUM_AF * NUM_REG_PER_AF + | ||
2069 | + NUM_TXE * NUM_REG_PER_TXE; | ||
2070 | + | ||
2071 | + s->tx_regs = g_new0(RegisterAccessInfo, | ||
2072 | + s->cfg.tx_fifo * ARRAY_SIZE(canfd_tx_regs)); | ||
2073 | + | ||
2074 | + canfd_create_rai(s->tx_regs, canfd_tx_regs, | ||
2075 | + ARRAY_SIZE(canfd_tx_regs), s->cfg.tx_fifo); | ||
2076 | + | ||
2077 | + s->rx0_regs = g_new0(RegisterAccessInfo, | ||
2078 | + s->cfg.rx0_fifo * ARRAY_SIZE(canfd_rx0_regs)); | ||
2079 | + | ||
2080 | + canfd_create_rai(s->rx0_regs, canfd_rx0_regs, | ||
2081 | + ARRAY_SIZE(canfd_rx0_regs), s->cfg.rx0_fifo); | ||
2082 | + | ||
2083 | + s->af_regs = g_new0(RegisterAccessInfo, | ||
2084 | + NUM_AF * ARRAY_SIZE(canfd_af_regs)); | ||
2085 | + | ||
2086 | + canfd_create_rai(s->af_regs, canfd_af_regs, | ||
2087 | + ARRAY_SIZE(canfd_af_regs), NUM_AF); | ||
2088 | + | ||
2089 | + s->txe_regs = g_new0(RegisterAccessInfo, | ||
2090 | + NUM_TXE * ARRAY_SIZE(canfd_txe_regs)); | ||
2091 | + | ||
2092 | + canfd_create_rai(s->txe_regs, canfd_txe_regs, | ||
2093 | + ARRAY_SIZE(canfd_txe_regs), NUM_TXE); | ||
2094 | + | ||
2095 | + if (s->cfg.enable_rx_fifo1) { | ||
2096 | + num_regs += s->cfg.rx1_fifo * NUM_REGS_PER_MSG_SPACE; | ||
2097 | + | ||
2098 | + s->rx1_regs = g_new0(RegisterAccessInfo, | ||
2099 | + s->cfg.rx1_fifo * ARRAY_SIZE(canfd_rx1_regs)); | ||
2100 | + | ||
2101 | + canfd_create_rai(s->rx1_regs, canfd_rx1_regs, | ||
2102 | + ARRAY_SIZE(canfd_rx1_regs), s->cfg.rx1_fifo); | ||
2103 | + } | ||
2104 | + | ||
2105 | + r_array = g_new0(RegisterInfoArray, 1); | ||
2106 | + r_array->r = g_new0(RegisterInfo * , num_regs); | ||
2107 | + r_array->num_elements = num_regs; | ||
2108 | + r_array->prefix = device_prefix; | ||
2109 | + | ||
2110 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2111 | + canfd_regs_info, | ||
2112 | + ARRAY_SIZE(canfd_regs_info)); | ||
2113 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2114 | + s->tx_regs, s->cfg.tx_fifo * | ||
2115 | + NUM_REGS_PER_MSG_SPACE); | ||
2116 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2117 | + s->rx0_regs, s->cfg.rx0_fifo * | ||
2118 | + NUM_REGS_PER_MSG_SPACE); | ||
2119 | + if (s->cfg.enable_rx_fifo1) { | ||
2120 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2121 | + s->rx1_regs, s->cfg.rx1_fifo * | ||
2122 | + NUM_REGS_PER_MSG_SPACE); | ||
2123 | + } | ||
2124 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2125 | + s->af_regs, NUM_AF * NUM_REG_PER_AF); | ||
2126 | + pos = canfd_populate_regarray(s, r_array, pos, | ||
2127 | + s->txe_regs, NUM_TXE * NUM_REG_PER_TXE); | ||
2128 | + | ||
2129 | + memory_region_init_io(&r_array->mem, OBJECT(s), &canfd_ops, r_array, | ||
2130 | + device_prefix, memory_size); | ||
2131 | + return r_array; | ||
2132 | +} | ||
2133 | + | ||
2134 | +static void canfd_realize(DeviceState *dev, Error **errp) | ||
2135 | +{ | ||
2136 | + XlnxVersalCANFDState *s = XILINX_CANFD(dev); | ||
2137 | + RegisterInfoArray *reg_array; | ||
2138 | + | ||
2139 | + reg_array = canfd_create_regarray(s); | ||
2140 | + memory_region_add_subregion(&s->iomem, 0x00, ®_array->mem); | ||
2141 | + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); | ||
2142 | + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_canfd_int); | ||
2143 | + | ||
2144 | + if (s->canfdbus) { | ||
2145 | + if (xlnx_canfd_connect_to_bus(s, s->canfdbus) < 0) { | ||
2146 | + g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
2147 | + | ||
2148 | + error_setg(errp, "%s: xlnx_canfd_connect_to_bus failed", path); | ||
2149 | + return; | ||
2150 | + } | ||
2151 | + | ||
2152 | + } | ||
2153 | + | ||
2154 | + /* Allocate a new timer. */ | ||
2155 | + s->canfd_timer = ptimer_init(xlnx_versal_canfd_ptimer_cb, s, | ||
2156 | + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | | ||
2157 | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | | ||
2158 | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD); | ||
2159 | + | ||
2160 | + ptimer_transaction_begin(s->canfd_timer); | ||
2161 | + | ||
2162 | + ptimer_set_freq(s->canfd_timer, s->cfg.ext_clk_freq); | ||
2163 | + ptimer_set_limit(s->canfd_timer, CANFD_TIMER_MAX, 1); | ||
2164 | + ptimer_run(s->canfd_timer, 0); | ||
2165 | + ptimer_transaction_commit(s->canfd_timer); | ||
2166 | +} | ||
2167 | + | ||
2168 | +static void canfd_init(Object *obj) | ||
2169 | +{ | ||
2170 | + XlnxVersalCANFDState *s = XILINX_CANFD(obj); | ||
2171 | + | ||
2172 | + memory_region_init(&s->iomem, obj, TYPE_XILINX_CANFD, | ||
2173 | + XLNX_VERSAL_CANFD_R_MAX * 4); | ||
2174 | +} | ||
2175 | + | ||
2176 | +static const VMStateDescription vmstate_canfd = { | ||
2177 | + .name = TYPE_XILINX_CANFD, | ||
2178 | + .version_id = 1, | ||
2179 | + .minimum_version_id = 1, | ||
2180 | + .fields = (VMStateField[]) { | ||
2181 | + VMSTATE_UINT32_ARRAY(regs, XlnxVersalCANFDState, | ||
2182 | + XLNX_VERSAL_CANFD_R_MAX), | ||
2183 | + VMSTATE_PTIMER(canfd_timer, XlnxVersalCANFDState), | ||
2184 | + VMSTATE_END_OF_LIST(), | ||
2185 | + } | ||
2186 | +}; | ||
2187 | + | ||
2188 | +static Property canfd_core_properties[] = { | ||
2189 | + DEFINE_PROP_UINT8("rx-fifo0", XlnxVersalCANFDState, cfg.rx0_fifo, 0x40), | ||
2190 | + DEFINE_PROP_UINT8("rx-fifo1", XlnxVersalCANFDState, cfg.rx1_fifo, 0x40), | ||
2191 | + DEFINE_PROP_UINT8("tx-fifo", XlnxVersalCANFDState, cfg.tx_fifo, 0x20), | ||
2192 | + DEFINE_PROP_BOOL("enable-rx-fifo1", XlnxVersalCANFDState, | ||
2193 | + cfg.enable_rx_fifo1, true), | ||
2194 | + DEFINE_PROP_UINT32("ext_clk_freq", XlnxVersalCANFDState, cfg.ext_clk_freq, | ||
2195 | + CANFD_DEFAULT_CLOCK), | ||
2196 | + DEFINE_PROP_LINK("canfdbus", XlnxVersalCANFDState, canfdbus, TYPE_CAN_BUS, | ||
2197 | + CanBusState *), | ||
2198 | + DEFINE_PROP_END_OF_LIST(), | ||
2199 | +}; | ||
2200 | + | ||
2201 | +static void canfd_class_init(ObjectClass *klass, void *data) | ||
2202 | +{ | ||
2203 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
2204 | + | ||
2205 | + dc->reset = canfd_reset; | ||
2206 | + dc->realize = canfd_realize; | ||
2207 | + device_class_set_props(dc, canfd_core_properties); | ||
2208 | + dc->vmsd = &vmstate_canfd; | ||
2209 | +} | ||
2210 | + | ||
2211 | +static const TypeInfo canfd_info = { | ||
2212 | + .name = TYPE_XILINX_CANFD, | ||
2213 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
2214 | + .instance_size = sizeof(XlnxVersalCANFDState), | ||
2215 | + .class_init = canfd_class_init, | ||
2216 | + .instance_init = canfd_init, | ||
2217 | +}; | ||
2218 | + | ||
2219 | +static void canfd_register_types(void) | ||
2220 | +{ | ||
2221 | + type_register_static(&canfd_info); | ||
2222 | +} | ||
2223 | + | ||
2224 | +type_init(canfd_register_types) | ||
2225 | diff --git a/hw/net/can/meson.build b/hw/net/can/meson.build | ||
2226 | index XXXXXXX..XXXXXXX 100644 | ||
2227 | --- a/hw/net/can/meson.build | ||
2228 | +++ b/hw/net/can/meson.build | ||
2229 | @@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c')) | ||
2230 | softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c')) | ||
2231 | softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c')) | ||
2232 | softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c')) | ||
2233 | +softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-canfd.c')) | ||
2234 | diff --git a/hw/net/can/trace-events b/hw/net/can/trace-events | ||
2235 | index XXXXXXX..XXXXXXX 100644 | ||
2236 | --- a/hw/net/can/trace-events | ||
2237 | +++ b/hw/net/can/trace-events | ||
2238 | @@ -XXX,XX +XXX,XX @@ xlnx_can_filter_mask_pre_write(uint8_t filter_num, uint32_t value) "Filter%d MAS | ||
2239 | xlnx_can_tx_data(uint32_t id, uint8_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" | ||
2240 | xlnx_can_rx_data(uint32_t id, uint32_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" | ||
2241 | xlnx_can_rx_discard(uint32_t status) "Controller is not enabled for bus communication. Status Register: 0x%08x" | ||
2242 | + | ||
2243 | +# xlnx-versal-canfd.c | ||
2244 | +xlnx_canfd_update_irq(char *path, uint32_t isr, uint32_t ier, uint32_t irq) "%s: ISR: 0x%08x IER: 0x%08x IRQ: 0x%08x" | ||
2245 | +xlnx_canfd_rx_fifo_filter_reject(char *path, uint32_t id, uint8_t dlc) "%s: Frame: ID: 0x%08x DLC: 0x%02x" | ||
2246 | +xlnx_canfd_rx_data(char *path, uint32_t id, uint8_t dlc, uint8_t flags) "%s: Frame: ID: 0x%08x DLC: 0x%02x CANFD Flag: 0x%02x" | ||
2247 | +xlnx_canfd_tx_data(char *path, uint32_t id, uint8_t dlc, uint8_t flgas) "%s: Frame: ID: 0x%08x DLC: 0x%02x CANFD Flag: 0x%02x" | ||
2248 | +xlnx_canfd_reset(char *path, uint32_t val) "%s: Resetting controller with value = 0x%08x" | ||
66 | -- | 2249 | -- |
67 | 2.19.1 | 2250 | 2.34.1 |
68 | |||
69 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Vikram Garhwal <vikram.garhwal@amd.com> |
---|---|---|---|
2 | 2 | ||
3 | Instead of shifts and masks, use direct loads and stores from the neon | 3 | Connect CANFD0 and CANFD1 on the Versal-virt machine and update xlnx-versal-virt |
4 | register file. Mirror the iteration structure of the ARM pseudocode | 4 | document with CANFD command line examples. |
5 | more closely. Correct the parameters of the VLD2 A2 insn. | ||
6 | 5 | ||
7 | Note that this includes a bugfix for handling of the insn | 6 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> |
8 | "VLD2 (multiple 2-element structures)" -- we were using an | ||
9 | incorrect stride value. | ||
10 | |||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 20181011205206.3552-19-richard.henderson@linaro.org | ||
13 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 10 | --- |
16 | target/arm/translate.c | 170 ++++++++++++++++++----------------------- | 11 | docs/system/arm/xlnx-versal-virt.rst | 31 ++++++++++++++++ |
17 | 1 file changed, 74 insertions(+), 96 deletions(-) | 12 | include/hw/arm/xlnx-versal.h | 12 +++++++ |
13 | hw/arm/xlnx-versal-virt.c | 53 ++++++++++++++++++++++++++++ | ||
14 | hw/arm/xlnx-versal.c | 37 +++++++++++++++++++ | ||
15 | 4 files changed, 133 insertions(+) | ||
18 | 16 | ||
19 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 17 | diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst |
20 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/translate.c | 19 | --- a/docs/system/arm/xlnx-versal-virt.rst |
22 | +++ b/target/arm/translate.c | 20 | +++ b/docs/system/arm/xlnx-versal-virt.rst |
23 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass) | 21 | @@ -XXX,XX +XXX,XX @@ Implemented devices: |
24 | return tmp; | 22 | - DDR memory |
23 | - BBRAM (36 bytes of Battery-backed RAM) | ||
24 | - eFUSE (3072 bytes of one-time field-programmable bit array) | ||
25 | +- 2 CANFDs | ||
26 | |||
27 | QEMU does not yet model any other devices, including the PL and the AI Engine. | ||
28 | |||
29 | @@ -XXX,XX +XXX,XX @@ To use a different index value, N, from default of 1, add: | ||
30 | |||
31 | Better yet, do not use actual product data when running guest image | ||
32 | on this Xilinx Versal Virt board. | ||
33 | + | ||
34 | +Using CANFDs for Versal Virt | ||
35 | +"""""""""""""""""""""""""""" | ||
36 | +Versal CANFD controller is developed based on SocketCAN and QEMU CAN bus | ||
37 | +implementation. Bus connection and socketCAN connection for each CAN module | ||
38 | +can be set through command lines. | ||
39 | + | ||
40 | +To connect both CANFD0 and CANFD1 on the same bus: | ||
41 | + | ||
42 | +.. code-block:: bash | ||
43 | + | ||
44 | + -object can-bus,id=canbus -machine canbus0=canbus -machine canbus1=canbus | ||
45 | + | ||
46 | +To connect CANFD0 and CANFD1 to separate buses: | ||
47 | + | ||
48 | +.. code-block:: bash | ||
49 | + | ||
50 | + -object can-bus,id=canbus0 -object can-bus,id=canbus1 \ | ||
51 | + -machine canbus0=canbus0 -machine canbus1=canbus1 | ||
52 | + | ||
53 | +The SocketCAN interface can connect to a Physical or a Virtual CAN interfaces on | ||
54 | +the host machine. Please check this document to learn about CAN interface on | ||
55 | +Linux: docs/system/devices/can.rst | ||
56 | + | ||
57 | +To connect CANFD0 and CANFD1 to host machine's CAN interface can0: | ||
58 | + | ||
59 | +.. code-block:: bash | ||
60 | + | ||
61 | + -object can-bus,id=canbus -machine canbus0=canbus -machine canbus1=canbus | ||
62 | + -object can-host-socketcan,id=canhost0,if=can0,canbus=canbus | ||
63 | diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/include/hw/arm/xlnx-versal.h | ||
66 | +++ b/include/hw/arm/xlnx-versal.h | ||
67 | @@ -XXX,XX +XXX,XX @@ | ||
68 | #include "hw/dma/xlnx_csu_dma.h" | ||
69 | #include "hw/misc/xlnx-versal-crl.h" | ||
70 | #include "hw/misc/xlnx-versal-pmc-iou-slcr.h" | ||
71 | +#include "hw/net/xlnx-versal-canfd.h" | ||
72 | |||
73 | #define TYPE_XLNX_VERSAL "xlnx-versal" | ||
74 | OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) | ||
75 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) | ||
76 | #define XLNX_VERSAL_NR_SDS 2 | ||
77 | #define XLNX_VERSAL_NR_XRAM 4 | ||
78 | #define XLNX_VERSAL_NR_IRQS 192 | ||
79 | +#define XLNX_VERSAL_NR_CANFD 2 | ||
80 | +#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000) | ||
81 | |||
82 | struct Versal { | ||
83 | /*< private >*/ | ||
84 | @@ -XXX,XX +XXX,XX @@ struct Versal { | ||
85 | CadenceGEMState gem[XLNX_VERSAL_NR_GEMS]; | ||
86 | XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS]; | ||
87 | VersalUsb2 usb; | ||
88 | + CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; | ||
89 | + XlnxVersalCANFDState canfd[XLNX_VERSAL_NR_CANFD]; | ||
90 | } iou; | ||
91 | |||
92 | /* Real-time Processing Unit. */ | ||
93 | @@ -XXX,XX +XXX,XX @@ struct Versal { | ||
94 | #define VERSAL_CRL_IRQ 10 | ||
95 | #define VERSAL_UART0_IRQ_0 18 | ||
96 | #define VERSAL_UART1_IRQ_0 19 | ||
97 | +#define VERSAL_CANFD0_IRQ_0 20 | ||
98 | +#define VERSAL_CANFD1_IRQ_0 21 | ||
99 | #define VERSAL_USB0_IRQ_0 22 | ||
100 | #define VERSAL_GEM0_IRQ_0 56 | ||
101 | #define VERSAL_GEM0_WAKE_IRQ_0 57 | ||
102 | @@ -XXX,XX +XXX,XX @@ struct Versal { | ||
103 | #define MM_UART1 0xff010000U | ||
104 | #define MM_UART1_SIZE 0x10000 | ||
105 | |||
106 | +#define MM_CANFD0 0xff060000U | ||
107 | +#define MM_CANFD0_SIZE 0x10000 | ||
108 | +#define MM_CANFD1 0xff070000U | ||
109 | +#define MM_CANFD1_SIZE 0x10000 | ||
110 | + | ||
111 | #define MM_GEM0 0xff0c0000U | ||
112 | #define MM_GEM0_SIZE 0x10000 | ||
113 | #define MM_GEM1 0xff0d0000U | ||
114 | diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c | ||
115 | index XXXXXXX..XXXXXXX 100644 | ||
116 | --- a/hw/arm/xlnx-versal-virt.c | ||
117 | +++ b/hw/arm/xlnx-versal-virt.c | ||
118 | @@ -XXX,XX +XXX,XX @@ struct VersalVirt { | ||
119 | uint32_t clk_25Mhz; | ||
120 | uint32_t usb; | ||
121 | uint32_t dwc; | ||
122 | + uint32_t canfd[2]; | ||
123 | } phandle; | ||
124 | struct arm_boot_info binfo; | ||
125 | |||
126 | + CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; | ||
127 | struct { | ||
128 | bool secure; | ||
129 | } cfg; | ||
130 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_uart_nodes(VersalVirt *s) | ||
131 | } | ||
25 | } | 132 | } |
26 | 133 | ||
27 | +static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) | 134 | +static void fdt_add_canfd_nodes(VersalVirt *s) |
28 | +{ | 135 | +{ |
29 | + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | 136 | + uint64_t addrs[] = { MM_CANFD1, MM_CANFD0 }; |
30 | + | 137 | + uint32_t size[] = { MM_CANFD1_SIZE, MM_CANFD0_SIZE }; |
31 | + switch (mop) { | 138 | + unsigned int irqs[] = { VERSAL_CANFD1_IRQ_0, VERSAL_CANFD0_IRQ_0 }; |
32 | + case MO_UB: | 139 | + const char clocknames[] = "can_clk\0s_axi_aclk"; |
33 | + tcg_gen_ld8u_i64(var, cpu_env, offset); | 140 | + int i; |
34 | + break; | 141 | + |
35 | + case MO_UW: | 142 | + /* Create and connect CANFD0 and CANFD1 nodes to canbus0. */ |
36 | + tcg_gen_ld16u_i64(var, cpu_env, offset); | 143 | + for (i = 0; i < ARRAY_SIZE(addrs); i++) { |
37 | + break; | 144 | + char *name = g_strdup_printf("/canfd@%" PRIx64, addrs[i]); |
38 | + case MO_UL: | 145 | + qemu_fdt_add_subnode(s->fdt, name); |
39 | + tcg_gen_ld32u_i64(var, cpu_env, offset); | 146 | + |
40 | + break; | 147 | + qemu_fdt_setprop_cell(s->fdt, name, "rx-fifo-depth", 0x40); |
41 | + case MO_Q: | 148 | + qemu_fdt_setprop_cell(s->fdt, name, "tx-mailbox-count", 0x20); |
42 | + tcg_gen_ld_i64(var, cpu_env, offset); | 149 | + |
43 | + break; | 150 | + qemu_fdt_setprop_cells(s->fdt, name, "clocks", |
44 | + default: | 151 | + s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); |
45 | + g_assert_not_reached(); | 152 | + qemu_fdt_setprop(s->fdt, name, "clock-names", |
153 | + clocknames, sizeof(clocknames)); | ||
154 | + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", | ||
155 | + GIC_FDT_IRQ_TYPE_SPI, irqs[i], | ||
156 | + GIC_FDT_IRQ_FLAGS_LEVEL_HI); | ||
157 | + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", | ||
158 | + 2, addrs[i], 2, size[i]); | ||
159 | + qemu_fdt_setprop_string(s->fdt, name, "compatible", | ||
160 | + "xlnx,canfd-2.0"); | ||
161 | + | ||
162 | + g_free(name); | ||
46 | + } | 163 | + } |
47 | +} | 164 | +} |
48 | + | 165 | + |
49 | static void neon_store_reg(int reg, int pass, TCGv_i32 var) | 166 | static void fdt_add_fixed_link_nodes(VersalVirt *s, char *gemname, |
167 | uint32_t phandle) | ||
50 | { | 168 | { |
51 | tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); | 169 | @@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine) |
52 | tcg_temp_free_i32(var); | 170 | TYPE_XLNX_VERSAL); |
171 | object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram), | ||
172 | &error_abort); | ||
173 | + object_property_set_link(OBJECT(&s->soc), "canbus0", OBJECT(s->canbus[0]), | ||
174 | + &error_abort); | ||
175 | + object_property_set_link(OBJECT(&s->soc), "canbus1", OBJECT(s->canbus[1]), | ||
176 | + &error_abort); | ||
177 | sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); | ||
178 | |||
179 | fdt_create(s); | ||
180 | create_virtio_regions(s); | ||
181 | fdt_add_gem_nodes(s); | ||
182 | fdt_add_uart_nodes(s); | ||
183 | + fdt_add_canfd_nodes(s); | ||
184 | fdt_add_gic_nodes(s); | ||
185 | fdt_add_timer_nodes(s); | ||
186 | fdt_add_zdma_nodes(s); | ||
187 | @@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine) | ||
188 | |||
189 | static void versal_virt_machine_instance_init(Object *obj) | ||
190 | { | ||
191 | + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); | ||
192 | + | ||
193 | + /* | ||
194 | + * User can set canbus0 and canbus1 properties to can-bus object and connect | ||
195 | + * to socketcan(optional) interface via command line. | ||
196 | + */ | ||
197 | + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, | ||
198 | + (Object **)&s->canbus[0], | ||
199 | + object_property_allow_set_link, | ||
200 | + 0); | ||
201 | + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, | ||
202 | + (Object **)&s->canbus[1], | ||
203 | + object_property_allow_set_link, | ||
204 | + 0); | ||
53 | } | 205 | } |
54 | 206 | ||
55 | +static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) | 207 | static void versal_virt_machine_class_init(ObjectClass *oc, void *data) |
208 | diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c | ||
209 | index XXXXXXX..XXXXXXX 100644 | ||
210 | --- a/hw/arm/xlnx-versal.c | ||
211 | +++ b/hw/arm/xlnx-versal.c | ||
212 | @@ -XXX,XX +XXX,XX @@ static void versal_create_uarts(Versal *s, qemu_irq *pic) | ||
213 | } | ||
214 | } | ||
215 | |||
216 | +static void versal_create_canfds(Versal *s, qemu_irq *pic) | ||
56 | +{ | 217 | +{ |
57 | + long offset = neon_element_offset(reg, ele, size); | 218 | + int i; |
58 | + | 219 | + uint32_t irqs[] = { VERSAL_CANFD0_IRQ_0, VERSAL_CANFD1_IRQ_0}; |
59 | + switch (size) { | 220 | + uint64_t addrs[] = { MM_CANFD0, MM_CANFD1 }; |
60 | + case MO_8: | 221 | + |
61 | + tcg_gen_st8_i64(var, cpu_env, offset); | 222 | + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.canfd); i++) { |
62 | + break; | 223 | + char *name = g_strdup_printf("canfd%d", i); |
63 | + case MO_16: | 224 | + SysBusDevice *sbd; |
64 | + tcg_gen_st16_i64(var, cpu_env, offset); | 225 | + MemoryRegion *mr; |
65 | + break; | 226 | + |
66 | + case MO_32: | 227 | + object_initialize_child(OBJECT(s), name, &s->lpd.iou.canfd[i], |
67 | + tcg_gen_st32_i64(var, cpu_env, offset); | 228 | + TYPE_XILINX_CANFD); |
68 | + break; | 229 | + sbd = SYS_BUS_DEVICE(&s->lpd.iou.canfd[i]); |
69 | + case MO_64: | 230 | + |
70 | + tcg_gen_st_i64(var, cpu_env, offset); | 231 | + object_property_set_int(OBJECT(&s->lpd.iou.canfd[i]), "ext_clk_freq", |
71 | + break; | 232 | + XLNX_VERSAL_CANFD_REF_CLK , &error_abort); |
72 | + default: | 233 | + |
73 | + g_assert_not_reached(); | 234 | + object_property_set_link(OBJECT(&s->lpd.iou.canfd[i]), "canfdbus", |
235 | + OBJECT(s->lpd.iou.canbus[i]), | ||
236 | + &error_abort); | ||
237 | + | ||
238 | + sysbus_realize(sbd, &error_fatal); | ||
239 | + | ||
240 | + mr = sysbus_mmio_get_region(sbd, 0); | ||
241 | + memory_region_add_subregion(&s->mr_ps, addrs[i], mr); | ||
242 | + | ||
243 | + sysbus_connect_irq(sbd, 0, pic[irqs[i]]); | ||
244 | + g_free(name); | ||
74 | + } | 245 | + } |
75 | +} | 246 | +} |
76 | + | 247 | + |
77 | static inline void neon_load_reg64(TCGv_i64 var, int reg) | 248 | static void versal_create_usbs(Versal *s, qemu_irq *pic) |
78 | { | 249 | { |
79 | tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); | 250 | DeviceState *dev; |
80 | @@ -XXX,XX +XXX,XX @@ static struct { | 251 | @@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp) |
81 | int interleave; | 252 | versal_create_apu_gic(s, pic); |
82 | int spacing; | 253 | versal_create_rpu_cpus(s); |
83 | } const neon_ls_element_type[11] = { | 254 | versal_create_uarts(s, pic); |
84 | - {4, 4, 1}, | 255 | + versal_create_canfds(s, pic); |
85 | - {4, 4, 2}, | 256 | versal_create_usbs(s, pic); |
86 | + {1, 4, 1}, | 257 | versal_create_gems(s, pic); |
87 | + {1, 4, 2}, | 258 | versal_create_admas(s, pic); |
88 | {4, 1, 1}, | 259 | @@ -XXX,XX +XXX,XX @@ static void versal_init(Object *obj) |
89 | - {4, 2, 1}, | 260 | static Property versal_properties[] = { |
90 | - {3, 3, 1}, | 261 | DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, |
91 | - {3, 3, 2}, | 262 | MemoryRegion *), |
92 | + {2, 2, 2}, | 263 | + DEFINE_PROP_LINK("canbus0", Versal, lpd.iou.canbus[0], |
93 | + {1, 3, 1}, | 264 | + TYPE_CAN_BUS, CanBusState *), |
94 | + {1, 3, 2}, | 265 | + DEFINE_PROP_LINK("canbus1", Versal, lpd.iou.canbus[1], |
95 | {3, 1, 1}, | 266 | + TYPE_CAN_BUS, CanBusState *), |
96 | {1, 1, 1}, | 267 | DEFINE_PROP_END_OF_LIST() |
97 | - {2, 2, 1}, | ||
98 | - {2, 2, 2}, | ||
99 | + {1, 2, 1}, | ||
100 | + {1, 2, 2}, | ||
101 | {2, 1, 1} | ||
102 | }; | 268 | }; |
103 | 269 | ||
104 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
105 | int shift; | ||
106 | int n; | ||
107 | int vec_size; | ||
108 | + int mmu_idx; | ||
109 | + TCGMemOp endian; | ||
110 | TCGv_i32 addr; | ||
111 | TCGv_i32 tmp; | ||
112 | TCGv_i32 tmp2; | ||
113 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
114 | rn = (insn >> 16) & 0xf; | ||
115 | rm = insn & 0xf; | ||
116 | load = (insn & (1 << 21)) != 0; | ||
117 | + endian = s->be_data; | ||
118 | + mmu_idx = get_mem_index(s); | ||
119 | if ((insn & (1 << 23)) == 0) { | ||
120 | /* Load store all elements. */ | ||
121 | op = (insn >> 8) & 0xf; | ||
122 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
123 | nregs = neon_ls_element_type[op].nregs; | ||
124 | interleave = neon_ls_element_type[op].interleave; | ||
125 | spacing = neon_ls_element_type[op].spacing; | ||
126 | - if (size == 3 && (interleave | spacing) != 1) | ||
127 | + if (size == 3 && (interleave | spacing) != 1) { | ||
128 | return 1; | ||
129 | + } | ||
130 | + tmp64 = tcg_temp_new_i64(); | ||
131 | addr = tcg_temp_new_i32(); | ||
132 | + tmp2 = tcg_const_i32(1 << size); | ||
133 | load_reg_var(s, addr, rn); | ||
134 | - stride = (1 << size) * interleave; | ||
135 | for (reg = 0; reg < nregs; reg++) { | ||
136 | - if (interleave > 2 || (interleave == 2 && nregs == 2)) { | ||
137 | - load_reg_var(s, addr, rn); | ||
138 | - tcg_gen_addi_i32(addr, addr, (1 << size) * reg); | ||
139 | - } else if (interleave == 2 && nregs == 4 && reg == 2) { | ||
140 | - load_reg_var(s, addr, rn); | ||
141 | - tcg_gen_addi_i32(addr, addr, 1 << size); | ||
142 | - } | ||
143 | - if (size == 3) { | ||
144 | - tmp64 = tcg_temp_new_i64(); | ||
145 | - if (load) { | ||
146 | - gen_aa32_ld64(s, tmp64, addr, get_mem_index(s)); | ||
147 | - neon_store_reg64(tmp64, rd); | ||
148 | - } else { | ||
149 | - neon_load_reg64(tmp64, rd); | ||
150 | - gen_aa32_st64(s, tmp64, addr, get_mem_index(s)); | ||
151 | - } | ||
152 | - tcg_temp_free_i64(tmp64); | ||
153 | - tcg_gen_addi_i32(addr, addr, stride); | ||
154 | - } else { | ||
155 | - for (pass = 0; pass < 2; pass++) { | ||
156 | - if (size == 2) { | ||
157 | - if (load) { | ||
158 | - tmp = tcg_temp_new_i32(); | ||
159 | - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); | ||
160 | - neon_store_reg(rd, pass, tmp); | ||
161 | - } else { | ||
162 | - tmp = neon_load_reg(rd, pass); | ||
163 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | ||
164 | - tcg_temp_free_i32(tmp); | ||
165 | - } | ||
166 | - tcg_gen_addi_i32(addr, addr, stride); | ||
167 | - } else if (size == 1) { | ||
168 | - if (load) { | ||
169 | - tmp = tcg_temp_new_i32(); | ||
170 | - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); | ||
171 | - tcg_gen_addi_i32(addr, addr, stride); | ||
172 | - tmp2 = tcg_temp_new_i32(); | ||
173 | - gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s)); | ||
174 | - tcg_gen_addi_i32(addr, addr, stride); | ||
175 | - tcg_gen_shli_i32(tmp2, tmp2, 16); | ||
176 | - tcg_gen_or_i32(tmp, tmp, tmp2); | ||
177 | - tcg_temp_free_i32(tmp2); | ||
178 | - neon_store_reg(rd, pass, tmp); | ||
179 | - } else { | ||
180 | - tmp = neon_load_reg(rd, pass); | ||
181 | - tmp2 = tcg_temp_new_i32(); | ||
182 | - tcg_gen_shri_i32(tmp2, tmp, 16); | ||
183 | - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); | ||
184 | - tcg_temp_free_i32(tmp); | ||
185 | - tcg_gen_addi_i32(addr, addr, stride); | ||
186 | - gen_aa32_st16(s, tmp2, addr, get_mem_index(s)); | ||
187 | - tcg_temp_free_i32(tmp2); | ||
188 | - tcg_gen_addi_i32(addr, addr, stride); | ||
189 | - } | ||
190 | - } else /* size == 0 */ { | ||
191 | - if (load) { | ||
192 | - tmp2 = NULL; | ||
193 | - for (n = 0; n < 4; n++) { | ||
194 | - tmp = tcg_temp_new_i32(); | ||
195 | - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); | ||
196 | - tcg_gen_addi_i32(addr, addr, stride); | ||
197 | - if (n == 0) { | ||
198 | - tmp2 = tmp; | ||
199 | - } else { | ||
200 | - tcg_gen_shli_i32(tmp, tmp, n * 8); | ||
201 | - tcg_gen_or_i32(tmp2, tmp2, tmp); | ||
202 | - tcg_temp_free_i32(tmp); | ||
203 | - } | ||
204 | - } | ||
205 | - neon_store_reg(rd, pass, tmp2); | ||
206 | - } else { | ||
207 | - tmp2 = neon_load_reg(rd, pass); | ||
208 | - for (n = 0; n < 4; n++) { | ||
209 | - tmp = tcg_temp_new_i32(); | ||
210 | - if (n == 0) { | ||
211 | - tcg_gen_mov_i32(tmp, tmp2); | ||
212 | - } else { | ||
213 | - tcg_gen_shri_i32(tmp, tmp2, n * 8); | ||
214 | - } | ||
215 | - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); | ||
216 | - tcg_temp_free_i32(tmp); | ||
217 | - tcg_gen_addi_i32(addr, addr, stride); | ||
218 | - } | ||
219 | - tcg_temp_free_i32(tmp2); | ||
220 | - } | ||
221 | + for (n = 0; n < 8 >> size; n++) { | ||
222 | + int xs; | ||
223 | + for (xs = 0; xs < interleave; xs++) { | ||
224 | + int tt = rd + reg + spacing * xs; | ||
225 | + | ||
226 | + if (load) { | ||
227 | + gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size); | ||
228 | + neon_store_element64(tt, n, size, tmp64); | ||
229 | + } else { | ||
230 | + neon_load_element64(tmp64, tt, n, size); | ||
231 | + gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size); | ||
232 | } | ||
233 | + tcg_gen_add_i32(addr, addr, tmp2); | ||
234 | } | ||
235 | } | ||
236 | - rd += spacing; | ||
237 | } | ||
238 | tcg_temp_free_i32(addr); | ||
239 | - stride = nregs * 8; | ||
240 | + tcg_temp_free_i32(tmp2); | ||
241 | + tcg_temp_free_i64(tmp64); | ||
242 | + stride = nregs * interleave * 8; | ||
243 | } else { | ||
244 | size = (insn >> 10) & 3; | ||
245 | if (size == 3) { | ||
246 | -- | 270 | -- |
247 | 2.19.1 | 271 | 2.34.1 |
248 | |||
249 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Vikram Garhwal <vikram.garhwal@amd.com> |
---|---|---|---|
2 | 2 | ||
3 | The EL3 version of this register does not include an ASID, | 3 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> |
4 | and so the tlb_flush performed by vmsa_ttbr_write is not needed. | ||
5 | |||
6 | Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Message-id: 20181019015617.22583-2-richard.henderson@linaro.org | 5 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> |
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 7 | --- |
12 | target/arm/helper.c | 2 +- | 8 | MAINTAINERS | 2 +- |
13 | 1 file changed, 1 insertion(+), 1 deletion(-) | 9 | 1 file changed, 1 insertion(+), 1 deletion(-) |
14 | 10 | ||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 11 | diff --git a/MAINTAINERS b/MAINTAINERS |
16 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/helper.c | 13 | --- a/MAINTAINERS |
18 | +++ b/target/arm/helper.c | 14 | +++ b/MAINTAINERS |
19 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = { | 15 | @@ -XXX,XX +XXX,XX @@ M: Francisco Iglesias <francisco.iglesias@amd.com> |
20 | .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, | 16 | S: Maintained |
21 | { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, | 17 | F: hw/net/can/xlnx-* |
22 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, | 18 | F: include/hw/net/xlnx-* |
23 | - .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, | 19 | -F: tests/qtest/xlnx-can-test* |
24 | + .access = PL3_RW, .resetvalue = 0, | 20 | +F: tests/qtest/xlnx-can*-test* |
25 | .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, | 21 | |
26 | { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, | 22 | EDU |
27 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, | 23 | M: Jiri Slaby <jslaby@suse.cz> |
28 | -- | 24 | -- |
29 | 2.19.1 | 25 | 2.34.1 |
30 | |||
31 | diff view generated by jsdifflib |
1 | The A/I/F bits in ISR_EL1 should track the virtual interrupt | 1 | From: Vikram Garhwal <vikram.garhwal@amd.com> |
---|---|---|---|
2 | status, not the physical interrupt status, if the associated | ||
3 | HCR_EL2.AMO/IMO/FMO bit is set. Implement this, rather than | ||
4 | always showing the physical interrupt status. | ||
5 | 2 | ||
6 | We don't currently implement anything to do with external | 3 | The QTests perform three tests on the Xilinx VERSAL CANFD controller: |
7 | aborts, so this applies only to the I and F bits (though it | 4 | Tests the CANFD controllers in loopback. |
8 | ought to be possible for the outer guest to present a virtual | 5 | Tests the CANFD controllers in normal mode with CAN frame. |
9 | external abort to the inner guest, even if QEMU doesn't | 6 | Tests the CANFD controllers in normal mode with CANFD frame. |
10 | emulate physical external aborts, so there is missing | ||
11 | functionality in this area). | ||
12 | 7 | ||
8 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> | ||
9 | Acked-by: Thomas Huth <thuth@redhat.com> | ||
10 | Reviewed-by: Francisco Iglesias <francisco.iglesias@amd.com> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
15 | Message-id: 20181012144235.19646-6-peter.maydell@linaro.org | ||
16 | --- | 13 | --- |
17 | target/arm/helper.c | 22 ++++++++++++++++++---- | 14 | tests/qtest/xlnx-canfd-test.c | 423 ++++++++++++++++++++++++++++++++++ |
18 | 1 file changed, 18 insertions(+), 4 deletions(-) | 15 | tests/qtest/meson.build | 1 + |
16 | 2 files changed, 424 insertions(+) | ||
17 | create mode 100644 tests/qtest/xlnx-canfd-test.c | ||
19 | 18 | ||
20 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 19 | diff --git a/tests/qtest/xlnx-canfd-test.c b/tests/qtest/xlnx-canfd-test.c |
21 | index XXXXXXX..XXXXXXX 100644 | 20 | new file mode 100644 |
22 | --- a/target/arm/helper.c | 21 | index XXXXXXX..XXXXXXX |
23 | +++ b/target/arm/helper.c | 22 | --- /dev/null |
24 | @@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) | 23 | +++ b/tests/qtest/xlnx-canfd-test.c |
25 | CPUState *cs = ENV_GET_CPU(env); | 24 | @@ -XXX,XX +XXX,XX @@ |
26 | uint64_t ret = 0; | 25 | +/* |
27 | 26 | + * SPDX-License-Identifier: MIT | |
28 | - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { | 27 | + * |
29 | - ret |= CPSR_I; | 28 | + * QTests for the Xilinx Versal CANFD controller. |
30 | + if (arm_hcr_el2_imo(env)) { | 29 | + * |
31 | + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { | 30 | + * Copyright (c) 2022 AMD Inc. |
32 | + ret |= CPSR_I; | 31 | + * |
32 | + * Written-by: Vikram Garhwal<vikram.garhwal@amd.com> | ||
33 | + * | ||
34 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
35 | + * of this software and associated documentation files (the "Software"), to deal | ||
36 | + * in the Software without restriction, including without limitation the rights | ||
37 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
38 | + * copies of the Software, and to permit persons to whom the Software is | ||
39 | + * furnished to do so, subject to the following conditions: | ||
40 | + * | ||
41 | + * The above copyright notice and this permission notice shall be included in | ||
42 | + * all copies or substantial portions of the Software. | ||
43 | + * | ||
44 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
45 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
46 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
47 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
48 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
49 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
50 | + * THE SOFTWARE. | ||
51 | + */ | ||
52 | + | ||
53 | +#include "qemu/osdep.h" | ||
54 | +#include "libqtest.h" | ||
55 | + | ||
56 | +/* Base address. */ | ||
57 | +#define CANFD0_BASE_ADDR 0xff060000 | ||
58 | +#define CANFD1_BASE_ADDR 0xff070000 | ||
59 | + | ||
60 | +/* Register addresses. */ | ||
61 | +#define R_SRR_OFFSET 0x00 | ||
62 | +#define R_MSR_OFFSET 0x04 | ||
63 | +#define R_FILTER_CONTROL_REGISTER 0xe0 | ||
64 | +#define R_SR_OFFSET 0x18 | ||
65 | +#define R_ISR_OFFSET 0x1c | ||
66 | +#define R_IER_OFFSET 0x20 | ||
67 | +#define R_ICR_OFFSET 0x24 | ||
68 | +#define R_TX_READY_REQ_REGISTER 0x90 | ||
69 | +#define RX_FIFO_STATUS_REGISTER 0xe8 | ||
70 | +#define R_TXID_OFFSET 0x100 | ||
71 | +#define R_TXDLC_OFFSET 0x104 | ||
72 | +#define R_TXDATA1_OFFSET 0x108 | ||
73 | +#define R_TXDATA2_OFFSET 0x10c | ||
74 | +#define R_AFMR_REGISTER0 0xa00 | ||
75 | +#define R_AFIR_REGISTER0 0xa04 | ||
76 | +#define R_RX0_ID_OFFSET 0x2100 | ||
77 | +#define R_RX0_DLC_OFFSET 0x2104 | ||
78 | +#define R_RX0_DATA1_OFFSET 0x2108 | ||
79 | +#define R_RX0_DATA2_OFFSET 0x210c | ||
80 | + | ||
81 | +/* CANFD modes. */ | ||
82 | +#define SRR_CONFIG_MODE 0x00 | ||
83 | +#define MSR_NORMAL_MODE 0x00 | ||
84 | +#define MSR_LOOPBACK_MODE (1 << 1) | ||
85 | +#define ENABLE_CANFD (1 << 1) | ||
86 | + | ||
87 | +/* CANFD status. */ | ||
88 | +#define STATUS_CONFIG_MODE (1 << 0) | ||
89 | +#define STATUS_NORMAL_MODE (1 << 3) | ||
90 | +#define STATUS_LOOPBACK_MODE (1 << 1) | ||
91 | +#define ISR_TXOK (1 << 1) | ||
92 | +#define ISR_RXOK (1 << 4) | ||
93 | + | ||
94 | +#define ENABLE_ALL_FILTERS 0xffffffff | ||
95 | +#define ENABLE_ALL_INTERRUPTS 0xffffffff | ||
96 | + | ||
97 | +/* We are sending one canfd message. */ | ||
98 | +#define TX_READY_REG_VAL 0x1 | ||
99 | + | ||
100 | +#define FIRST_RX_STORE_INDEX 0x1 | ||
101 | +#define STATUS_REG_MASK 0xf | ||
102 | +#define DLC_FD_BIT_SHIFT 0x1b | ||
103 | +#define DLC_FD_BIT_MASK 0xf8000000 | ||
104 | +#define FIFO_STATUS_READ_INDEX_MASK 0x3f | ||
105 | +#define FIFO_STATUS_FILL_LEVEL_MASK 0x7f00 | ||
106 | +#define FILL_LEVEL_SHIFT 0x8 | ||
107 | + | ||
108 | +/* CANFD frame size ID, DLC and 16 DATA word. */ | ||
109 | +#define CANFD_FRAME_SIZE 18 | ||
110 | +/* CAN frame size ID, DLC and 2 DATA word. */ | ||
111 | +#define CAN_FRAME_SIZE 4 | ||
112 | + | ||
113 | +/* Set the filters for CANFD controller. */ | ||
114 | +static void enable_filters(QTestState *qts) | ||
115 | +{ | ||
116 | + const uint32_t arr_afmr[32] = { 0xb423deaa, 0xa2a40bdc, 0x1b64f486, | ||
117 | + 0x95c0d4ee, 0xe0c44528, 0x4b407904, | ||
118 | + 0xd2673f46, 0x9fc638d6, 0x8844f3d8, | ||
119 | + 0xa607d1e8, 0x67871bf4, 0xc2557dc, | ||
120 | + 0x9ea5b53e, 0x3643c0cc, 0x5a05ea8e, | ||
121 | + 0x83a46d84, 0x4a25c2b8, 0x93a66008, | ||
122 | + 0x2e467470, 0xedc66118, 0x9086f9f2, | ||
123 | + 0xfa23dd36, 0xb6654b90, 0xb221b8ca, | ||
124 | + 0x3467d1e2, 0xa3a55542, 0x5b26a012, | ||
125 | + 0x2281ea7e, 0xcea0ece8, 0xdc61e588, | ||
126 | + 0x2e5676a, 0x16821320 }; | ||
127 | + | ||
128 | + const uint32_t arr_afir[32] = { 0xa833dfa1, 0x255a477e, 0x3a4bb1c5, | ||
129 | + 0x8f560a6c, 0x27f38903, 0x2fecec4d, | ||
130 | + 0xa014c66d, 0xec289b8, 0x7e52dead, | ||
131 | + 0x82e94f3c, 0xcf3e3c5c, 0x66059871, | ||
132 | + 0x3f213df4, 0x25ac3959, 0xa12e9bef, | ||
133 | + 0xa3ad3af, 0xbafd7fe, 0xb3cb40fd, | ||
134 | + 0x5d9caa81, 0x2ed61902, 0x7cd64a0, | ||
135 | + 0x4b1fa538, 0x9b5ced8c, 0x150de059, | ||
136 | + 0xd2794227, 0x635e820a, 0xbb6b02cf, | ||
137 | + 0xbb58176, 0x570025bb, 0xa78d9658, | ||
138 | + 0x49d735df, 0xe5399d2f }; | ||
139 | + | ||
140 | + /* Passing the respective array values to all the AFMR and AFIR pairs. */ | ||
141 | + for (int i = 0; i < 32; i++) { | ||
142 | + /* For CANFD0. */ | ||
143 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_AFMR_REGISTER0 + 8 * i, | ||
144 | + arr_afmr[i]); | ||
145 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_AFIR_REGISTER0 + 8 * i, | ||
146 | + arr_afir[i]); | ||
147 | + | ||
148 | + /* For CANFD1. */ | ||
149 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_AFMR_REGISTER0 + 8 * i, | ||
150 | + arr_afmr[i]); | ||
151 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_AFIR_REGISTER0 + 8 * i, | ||
152 | + arr_afir[i]); | ||
153 | + } | ||
154 | + | ||
155 | + /* Enable all the pairs from AFR register. */ | ||
156 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_FILTER_CONTROL_REGISTER, | ||
157 | + ENABLE_ALL_FILTERS); | ||
158 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_FILTER_CONTROL_REGISTER, | ||
159 | + ENABLE_ALL_FILTERS); | ||
160 | +} | ||
161 | + | ||
162 | +static void configure_canfd(QTestState *qts, uint8_t mode) | ||
163 | +{ | ||
164 | + uint32_t status = 0; | ||
165 | + | ||
166 | + /* Put CANFD0 and CANFD1 in config mode. */ | ||
167 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_SRR_OFFSET, SRR_CONFIG_MODE); | ||
168 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_SRR_OFFSET, SRR_CONFIG_MODE); | ||
169 | + | ||
170 | + /* Write mode of operation in Mode select register. */ | ||
171 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_MSR_OFFSET, mode); | ||
172 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_MSR_OFFSET, mode); | ||
173 | + | ||
174 | + enable_filters(qts); | ||
175 | + | ||
176 | + /* Check here if CANFD0 and CANFD1 are in config mode. */ | ||
177 | + status = qtest_readl(qts, CANFD0_BASE_ADDR + R_SR_OFFSET); | ||
178 | + status = status & STATUS_REG_MASK; | ||
179 | + g_assert_cmpint(status, ==, STATUS_CONFIG_MODE); | ||
180 | + | ||
181 | + status = qtest_readl(qts, CANFD1_BASE_ADDR + R_SR_OFFSET); | ||
182 | + status = status & STATUS_REG_MASK; | ||
183 | + g_assert_cmpint(status, ==, STATUS_CONFIG_MODE); | ||
184 | + | ||
185 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_IER_OFFSET, ENABLE_ALL_INTERRUPTS); | ||
186 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_IER_OFFSET, ENABLE_ALL_INTERRUPTS); | ||
187 | + | ||
188 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CANFD); | ||
189 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CANFD); | ||
190 | +} | ||
191 | + | ||
192 | +static void generate_random_data(uint32_t *buf_tx, bool is_canfd_frame) | ||
193 | +{ | ||
194 | + /* Generate random TX data for CANFD frame. */ | ||
195 | + if (is_canfd_frame) { | ||
196 | + for (int i = 0; i < CANFD_FRAME_SIZE - 2; i++) { | ||
197 | + buf_tx[2 + i] = rand(); | ||
33 | + } | 198 | + } |
34 | + } else { | 199 | + } else { |
35 | + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { | 200 | + /* Generate random TX data for CAN frame. */ |
36 | + ret |= CPSR_I; | 201 | + for (int i = 0; i < CAN_FRAME_SIZE - 2; i++) { |
202 | + buf_tx[2 + i] = rand(); | ||
37 | + } | 203 | + } |
38 | } | 204 | + } |
39 | - if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { | 205 | +} |
40 | - ret |= CPSR_F; | 206 | + |
41 | + | 207 | +static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx) |
42 | + if (arm_hcr_el2_fmo(env)) { | 208 | +{ |
43 | + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { | 209 | + uint32_t int_status; |
44 | + ret |= CPSR_F; | 210 | + uint32_t fifo_status_reg_value; |
211 | + /* At which RX FIFO the received data is stored. */ | ||
212 | + uint8_t store_ind = 0; | ||
213 | + bool is_canfd_frame = false; | ||
214 | + | ||
215 | + /* Read the interrupt on CANFD rx. */ | ||
216 | + int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_RXOK; | ||
217 | + | ||
218 | + g_assert_cmpint(int_status, ==, ISR_RXOK); | ||
219 | + | ||
220 | + /* Find the fill level and read index. */ | ||
221 | + fifo_status_reg_value = qtest_readl(qts, can_base_addr + | ||
222 | + RX_FIFO_STATUS_REGISTER); | ||
223 | + | ||
224 | + store_ind = (fifo_status_reg_value & FIFO_STATUS_READ_INDEX_MASK) + | ||
225 | + ((fifo_status_reg_value & FIFO_STATUS_FILL_LEVEL_MASK) >> | ||
226 | + FILL_LEVEL_SHIFT); | ||
227 | + | ||
228 | + g_assert_cmpint(store_ind, ==, FIRST_RX_STORE_INDEX); | ||
229 | + | ||
230 | + /* Read the RX register data for CANFD. */ | ||
231 | + buf_rx[0] = qtest_readl(qts, can_base_addr + R_RX0_ID_OFFSET); | ||
232 | + buf_rx[1] = qtest_readl(qts, can_base_addr + R_RX0_DLC_OFFSET); | ||
233 | + | ||
234 | + is_canfd_frame = (buf_rx[1] >> DLC_FD_BIT_SHIFT) & 1; | ||
235 | + | ||
236 | + if (is_canfd_frame) { | ||
237 | + for (int i = 0; i < CANFD_FRAME_SIZE - 2; i++) { | ||
238 | + buf_rx[i + 2] = qtest_readl(qts, | ||
239 | + can_base_addr + R_RX0_DATA1_OFFSET + 4 * i); | ||
45 | + } | 240 | + } |
46 | + } else { | 241 | + } else { |
47 | + if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { | 242 | + buf_rx[2] = qtest_readl(qts, can_base_addr + R_RX0_DATA1_OFFSET); |
48 | + ret |= CPSR_F; | 243 | + buf_rx[3] = qtest_readl(qts, can_base_addr + R_RX0_DATA2_OFFSET); |
244 | + } | ||
245 | + | ||
246 | + /* Clear the RX interrupt. */ | ||
247 | + qtest_writel(qts, CANFD1_BASE_ADDR + R_ICR_OFFSET, ISR_RXOK); | ||
248 | +} | ||
249 | + | ||
250 | +static void write_data(QTestState *qts, uint64_t can_base_addr, | ||
251 | + const uint32_t *buf_tx, bool is_canfd_frame) | ||
252 | +{ | ||
253 | + /* Write the TX register data for CANFD. */ | ||
254 | + qtest_writel(qts, can_base_addr + R_TXID_OFFSET, buf_tx[0]); | ||
255 | + qtest_writel(qts, can_base_addr + R_TXDLC_OFFSET, buf_tx[1]); | ||
256 | + | ||
257 | + if (is_canfd_frame) { | ||
258 | + for (int i = 0; i < CANFD_FRAME_SIZE - 2; i++) { | ||
259 | + qtest_writel(qts, can_base_addr + R_TXDATA1_OFFSET + 4 * i, | ||
260 | + buf_tx[2 + i]); | ||
49 | + } | 261 | + } |
50 | } | 262 | + } else { |
51 | + | 263 | + qtest_writel(qts, can_base_addr + R_TXDATA1_OFFSET, buf_tx[2]); |
52 | /* External aborts are not possible in QEMU so A bit is always clear */ | 264 | + qtest_writel(qts, can_base_addr + R_TXDATA2_OFFSET, buf_tx[3]); |
53 | return ret; | 265 | + } |
54 | } | 266 | +} |
267 | + | ||
268 | +static void send_data(QTestState *qts, uint64_t can_base_addr) | ||
269 | +{ | ||
270 | + uint32_t int_status; | ||
271 | + | ||
272 | + qtest_writel(qts, can_base_addr + R_TX_READY_REQ_REGISTER, | ||
273 | + TX_READY_REG_VAL); | ||
274 | + | ||
275 | + /* Read the interrupt on CANFD for tx. */ | ||
276 | + int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_TXOK; | ||
277 | + | ||
278 | + g_assert_cmpint(int_status, ==, ISR_TXOK); | ||
279 | + | ||
280 | + /* Clear the interrupt for tx. */ | ||
281 | + qtest_writel(qts, CANFD0_BASE_ADDR + R_ICR_OFFSET, ISR_TXOK); | ||
282 | +} | ||
283 | + | ||
284 | +static void match_rx_tx_data(const uint32_t *buf_tx, const uint32_t *buf_rx, | ||
285 | + bool is_canfd_frame) | ||
286 | +{ | ||
287 | + uint16_t size = 0; | ||
288 | + uint8_t len = CAN_FRAME_SIZE; | ||
289 | + | ||
290 | + if (is_canfd_frame) { | ||
291 | + len = CANFD_FRAME_SIZE; | ||
292 | + } | ||
293 | + | ||
294 | + while (size < len) { | ||
295 | + if (R_RX0_ID_OFFSET + 4 * size == R_RX0_DLC_OFFSET) { | ||
296 | + g_assert_cmpint((buf_rx[size] & DLC_FD_BIT_MASK), ==, | ||
297 | + (buf_tx[size] & DLC_FD_BIT_MASK)); | ||
298 | + } else { | ||
299 | + if (!is_canfd_frame && size == 4) { | ||
300 | + break; | ||
301 | + } | ||
302 | + | ||
303 | + g_assert_cmpint(buf_rx[size], ==, buf_tx[size]); | ||
304 | + } | ||
305 | + | ||
306 | + size++; | ||
307 | + } | ||
308 | +} | ||
309 | +/* | ||
310 | + * Xilinx CANFD supports both CAN and CANFD frames. This test will be | ||
311 | + * transferring CAN frame i.e. 8 bytes of data from CANFD0 and CANFD1 through | ||
312 | + * canbus. CANFD0 initiate the data transfer to can-bus, CANFD1 receives the | ||
313 | + * data. Test compares the can frame data sent from CANFD0 and received on | ||
314 | + * CANFD1. | ||
315 | + */ | ||
316 | +static void test_can_data_transfer(void) | ||
317 | +{ | ||
318 | + uint32_t buf_tx[CAN_FRAME_SIZE] = { 0x5a5bb9a4, 0x80000000, | ||
319 | + 0x12345678, 0x87654321 }; | ||
320 | + uint32_t buf_rx[CAN_FRAME_SIZE] = { 0x00, 0x00, 0x00, 0x00 }; | ||
321 | + uint32_t status = 0; | ||
322 | + | ||
323 | + generate_random_data(buf_tx, false); | ||
324 | + | ||
325 | + QTestState *qts = qtest_init("-machine xlnx-versal-virt" | ||
326 | + " -object can-bus,id=canbus" | ||
327 | + " -machine canbus0=canbus" | ||
328 | + " -machine canbus1=canbus" | ||
329 | + ); | ||
330 | + | ||
331 | + configure_canfd(qts, MSR_NORMAL_MODE); | ||
332 | + | ||
333 | + /* Check if CANFD0 and CANFD1 are in Normal mode. */ | ||
334 | + status = qtest_readl(qts, CANFD0_BASE_ADDR + R_SR_OFFSET); | ||
335 | + status = status & STATUS_REG_MASK; | ||
336 | + g_assert_cmpint(status, ==, STATUS_NORMAL_MODE); | ||
337 | + | ||
338 | + status = qtest_readl(qts, CANFD1_BASE_ADDR + R_SR_OFFSET); | ||
339 | + status = status & STATUS_REG_MASK; | ||
340 | + g_assert_cmpint(status, ==, STATUS_NORMAL_MODE); | ||
341 | + | ||
342 | + write_data(qts, CANFD0_BASE_ADDR, buf_tx, false); | ||
343 | + | ||
344 | + send_data(qts, CANFD0_BASE_ADDR); | ||
345 | + read_data(qts, CANFD1_BASE_ADDR, buf_rx); | ||
346 | + match_rx_tx_data(buf_tx, buf_rx, false); | ||
347 | + | ||
348 | + qtest_quit(qts); | ||
349 | +} | ||
350 | + | ||
351 | +/* | ||
352 | + * This test will be transferring CANFD frame i.e. 64 bytes of data from CANFD0 | ||
353 | + * and CANFD1 through canbus. CANFD0 initiate the data transfer to can-bus, | ||
354 | + * CANFD1 receives the data. Test compares the CANFD frame data sent from CANFD0 | ||
355 | + * with received on CANFD1. | ||
356 | + */ | ||
357 | +static void test_canfd_data_transfer(void) | ||
358 | +{ | ||
359 | + uint32_t buf_tx[CANFD_FRAME_SIZE] = { 0x5a5bb9a4, 0xf8000000 }; | ||
360 | + uint32_t buf_rx[CANFD_FRAME_SIZE] = { 0x00, 0x00, 0x00, 0x00 }; | ||
361 | + uint32_t status = 0; | ||
362 | + | ||
363 | + generate_random_data(buf_tx, true); | ||
364 | + | ||
365 | + QTestState *qts = qtest_init("-machine xlnx-versal-virt" | ||
366 | + " -object can-bus,id=canbus" | ||
367 | + " -machine canbus0=canbus" | ||
368 | + " -machine canbus1=canbus" | ||
369 | + ); | ||
370 | + | ||
371 | + configure_canfd(qts, MSR_NORMAL_MODE); | ||
372 | + | ||
373 | + /* Check if CANFD0 and CANFD1 are in Normal mode. */ | ||
374 | + status = qtest_readl(qts, CANFD0_BASE_ADDR + R_SR_OFFSET); | ||
375 | + status = status & STATUS_REG_MASK; | ||
376 | + g_assert_cmpint(status, ==, STATUS_NORMAL_MODE); | ||
377 | + | ||
378 | + status = qtest_readl(qts, CANFD1_BASE_ADDR + R_SR_OFFSET); | ||
379 | + status = status & STATUS_REG_MASK; | ||
380 | + g_assert_cmpint(status, ==, STATUS_NORMAL_MODE); | ||
381 | + | ||
382 | + write_data(qts, CANFD0_BASE_ADDR, buf_tx, true); | ||
383 | + | ||
384 | + send_data(qts, CANFD0_BASE_ADDR); | ||
385 | + read_data(qts, CANFD1_BASE_ADDR, buf_rx); | ||
386 | + match_rx_tx_data(buf_tx, buf_rx, true); | ||
387 | + | ||
388 | + qtest_quit(qts); | ||
389 | +} | ||
390 | + | ||
391 | +/* | ||
392 | + * This test is performing loopback mode on CANFD0 and CANFD1. Data sent from | ||
393 | + * TX of each CANFD0 and CANFD1 are compared with RX register data for | ||
394 | + * respective CANFD Controller. | ||
395 | + */ | ||
396 | +static void test_can_loopback(void) | ||
397 | +{ | ||
398 | + uint32_t buf_tx[CANFD_FRAME_SIZE] = { 0x5a5bb9a4, 0xf8000000 }; | ||
399 | + uint32_t buf_rx[CANFD_FRAME_SIZE] = { 0x00, 0x00, 0x00, 0x00 }; | ||
400 | + uint32_t status = 0; | ||
401 | + | ||
402 | + generate_random_data(buf_tx, true); | ||
403 | + | ||
404 | + QTestState *qts = qtest_init("-machine xlnx-versal-virt" | ||
405 | + " -object can-bus,id=canbus" | ||
406 | + " -machine canbus0=canbus" | ||
407 | + " -machine canbus1=canbus" | ||
408 | + ); | ||
409 | + | ||
410 | + configure_canfd(qts, MSR_LOOPBACK_MODE); | ||
411 | + | ||
412 | + /* Check if CANFD0 and CANFD1 are set in correct loopback mode. */ | ||
413 | + status = qtest_readl(qts, CANFD0_BASE_ADDR + R_SR_OFFSET); | ||
414 | + status = status & STATUS_REG_MASK; | ||
415 | + g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE); | ||
416 | + | ||
417 | + status = qtest_readl(qts, CANFD1_BASE_ADDR + R_SR_OFFSET); | ||
418 | + status = status & STATUS_REG_MASK; | ||
419 | + g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE); | ||
420 | + | ||
421 | + write_data(qts, CANFD0_BASE_ADDR, buf_tx, true); | ||
422 | + | ||
423 | + send_data(qts, CANFD0_BASE_ADDR); | ||
424 | + read_data(qts, CANFD0_BASE_ADDR, buf_rx); | ||
425 | + match_rx_tx_data(buf_tx, buf_rx, true); | ||
426 | + | ||
427 | + generate_random_data(buf_tx, true); | ||
428 | + | ||
429 | + write_data(qts, CANFD1_BASE_ADDR, buf_tx, true); | ||
430 | + | ||
431 | + send_data(qts, CANFD1_BASE_ADDR); | ||
432 | + read_data(qts, CANFD1_BASE_ADDR, buf_rx); | ||
433 | + match_rx_tx_data(buf_tx, buf_rx, true); | ||
434 | + | ||
435 | + qtest_quit(qts); | ||
436 | +} | ||
437 | + | ||
438 | +int main(int argc, char **argv) | ||
439 | +{ | ||
440 | + g_test_init(&argc, &argv, NULL); | ||
441 | + | ||
442 | + qtest_add_func("/net/canfd/can_data_transfer", test_can_data_transfer); | ||
443 | + qtest_add_func("/net/canfd/canfd_data_transfer", test_canfd_data_transfer); | ||
444 | + qtest_add_func("/net/canfd/can_loopback", test_can_loopback); | ||
445 | + | ||
446 | + return g_test_run(); | ||
447 | +} | ||
448 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build | ||
449 | index XXXXXXX..XXXXXXX 100644 | ||
450 | --- a/tests/qtest/meson.build | ||
451 | +++ b/tests/qtest/meson.build | ||
452 | @@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \ | ||
453 | (config_all.has_key('CONFIG_TCG') and config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? \ | ||
454 | ['tpm-tis-device-test', 'tpm-tis-device-swtpm-test'] : []) + \ | ||
455 | (config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \ | ||
456 | + (config_all_devices.has_key('CONFIG_XLNX_VERSAL') ? ['xlnx-canfd-test'] : []) + \ | ||
457 | (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test'] : []) + \ | ||
458 | (config_all.has_key('CONFIG_TCG') and \ | ||
459 | config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ | ||
55 | -- | 460 | -- |
56 | 2.19.1 | 461 | 2.34.1 |
57 | |||
58 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Since QEMU does not implement ASIDs, changes to the ASID must flush the | 3 | Allwinner R40 (sun8i) SoC features a Quad-Core Cortex-A7 ARM CPU, |
4 | tlb. However, if the ASID does not change there is no reason to flush. | 4 | and a Mali400 MP2 GPU from ARM. It's also known as the Allwinner T3 |
5 | for In-Car Entertainment usage, A40i and A40pro are variants that | ||
6 | differ in applicable temperatures range (industrial and military). | ||
5 | 7 | ||
6 | In testing a boot of the Ubuntu installer to the first menu, this reduces | 8 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
7 | the number of flushes by 30%, or nearly 600k instances. | 9 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
8 | |||
9 | Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
13 | Message-id: 20181019015617.22583-3-richard.henderson@linaro.org | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 11 | --- |
16 | target/arm/helper.c | 8 +++----- | 12 | include/hw/arm/allwinner-r40.h | 110 +++++++++ |
17 | 1 file changed, 3 insertions(+), 5 deletions(-) | 13 | hw/arm/allwinner-r40.c | 415 +++++++++++++++++++++++++++++++++ |
14 | hw/arm/bananapi_m2u.c | 129 ++++++++++ | ||
15 | hw/arm/Kconfig | 10 + | ||
16 | hw/arm/meson.build | 1 + | ||
17 | 5 files changed, 665 insertions(+) | ||
18 | create mode 100644 include/hw/arm/allwinner-r40.h | ||
19 | create mode 100644 hw/arm/allwinner-r40.c | ||
20 | create mode 100644 hw/arm/bananapi_m2u.c | ||
18 | 21 | ||
19 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 22 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
23 | new file mode 100644 | ||
24 | index XXXXXXX..XXXXXXX | ||
25 | --- /dev/null | ||
26 | +++ b/include/hw/arm/allwinner-r40.h | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | +/* | ||
29 | + * Allwinner R40/A40i/T3 System on Chip emulation | ||
30 | + * | ||
31 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
32 | + * | ||
33 | + * This program is free software: you can redistribute it and/or modify | ||
34 | + * it under the terms of the GNU General Public License as published by | ||
35 | + * the Free Software Foundation, either version 2 of the License, or | ||
36 | + * (at your option) any later version. | ||
37 | + * | ||
38 | + * This program is distributed in the hope that it will be useful, | ||
39 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
40 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
41 | + * GNU General Public License for more details. | ||
42 | + * | ||
43 | + * You should have received a copy of the GNU General Public License | ||
44 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
45 | + */ | ||
46 | + | ||
47 | +#ifndef HW_ARM_ALLWINNER_R40_H | ||
48 | +#define HW_ARM_ALLWINNER_R40_H | ||
49 | + | ||
50 | +#include "qom/object.h" | ||
51 | +#include "hw/arm/boot.h" | ||
52 | +#include "hw/timer/allwinner-a10-pit.h" | ||
53 | +#include "hw/intc/arm_gic.h" | ||
54 | +#include "hw/sd/allwinner-sdhost.h" | ||
55 | +#include "target/arm/cpu.h" | ||
56 | +#include "sysemu/block-backend.h" | ||
57 | + | ||
58 | +enum { | ||
59 | + AW_R40_DEV_SRAM_A1, | ||
60 | + AW_R40_DEV_SRAM_A2, | ||
61 | + AW_R40_DEV_SRAM_A3, | ||
62 | + AW_R40_DEV_SRAM_A4, | ||
63 | + AW_R40_DEV_MMC0, | ||
64 | + AW_R40_DEV_MMC1, | ||
65 | + AW_R40_DEV_MMC2, | ||
66 | + AW_R40_DEV_MMC3, | ||
67 | + AW_R40_DEV_CCU, | ||
68 | + AW_R40_DEV_PIT, | ||
69 | + AW_R40_DEV_UART0, | ||
70 | + AW_R40_DEV_GIC_DIST, | ||
71 | + AW_R40_DEV_GIC_CPU, | ||
72 | + AW_R40_DEV_GIC_HYP, | ||
73 | + AW_R40_DEV_GIC_VCPU, | ||
74 | + AW_R40_DEV_SDRAM | ||
75 | +}; | ||
76 | + | ||
77 | +#define AW_R40_NUM_CPUS (4) | ||
78 | + | ||
79 | +/** | ||
80 | + * Allwinner R40 object model | ||
81 | + * @{ | ||
82 | + */ | ||
83 | + | ||
84 | +/** Object type for the Allwinner R40 SoC */ | ||
85 | +#define TYPE_AW_R40 "allwinner-r40" | ||
86 | + | ||
87 | +/** Convert input object to Allwinner R40 state object */ | ||
88 | +OBJECT_DECLARE_SIMPLE_TYPE(AwR40State, AW_R40) | ||
89 | + | ||
90 | +/** @} */ | ||
91 | + | ||
92 | +/** | ||
93 | + * Allwinner R40 object | ||
94 | + * | ||
95 | + * This struct contains the state of all the devices | ||
96 | + * which are currently emulated by the R40 SoC code. | ||
97 | + */ | ||
98 | +#define AW_R40_NUM_MMCS 4 | ||
99 | + | ||
100 | +struct AwR40State { | ||
101 | + /*< private >*/ | ||
102 | + DeviceState parent_obj; | ||
103 | + /*< public >*/ | ||
104 | + | ||
105 | + ARMCPU cpus[AW_R40_NUM_CPUS]; | ||
106 | + const hwaddr *memmap; | ||
107 | + AwA10PITState timer; | ||
108 | + AwSdHostState mmc[AW_R40_NUM_MMCS]; | ||
109 | + GICState gic; | ||
110 | + MemoryRegion sram_a1; | ||
111 | + MemoryRegion sram_a2; | ||
112 | + MemoryRegion sram_a3; | ||
113 | + MemoryRegion sram_a4; | ||
114 | +}; | ||
115 | + | ||
116 | +/** | ||
117 | + * Emulate Boot ROM firmware setup functionality. | ||
118 | + * | ||
119 | + * A real Allwinner R40 SoC contains a Boot ROM | ||
120 | + * which is the first code that runs right after | ||
121 | + * the SoC is powered on. The Boot ROM is responsible | ||
122 | + * for loading user code (e.g. a bootloader) from any | ||
123 | + * of the supported external devices and writing the | ||
124 | + * downloaded code to internal SRAM. After loading the SoC | ||
125 | + * begins executing the code written to SRAM. | ||
126 | + * | ||
127 | + * This function emulates the Boot ROM by copying 32 KiB | ||
128 | + * of data from the given block device and writes it to | ||
129 | + * the start of the first internal SRAM memory. | ||
130 | + * | ||
131 | + * @s: Allwinner R40 state object pointer | ||
132 | + * @blk: Block backend device object pointer | ||
133 | + * @unit: the mmc control's unit | ||
134 | + */ | ||
135 | +bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit); | ||
136 | + | ||
137 | +#endif /* HW_ARM_ALLWINNER_R40_H */ | ||
138 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
139 | new file mode 100644 | ||
140 | index XXXXXXX..XXXXXXX | ||
141 | --- /dev/null | ||
142 | +++ b/hw/arm/allwinner-r40.c | ||
143 | @@ -XXX,XX +XXX,XX @@ | ||
144 | +/* | ||
145 | + * Allwinner R40/A40i/T3 System on Chip emulation | ||
146 | + * | ||
147 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
148 | + * | ||
149 | + * This program is free software: you can redistribute it and/or modify | ||
150 | + * it under the terms of the GNU General Public License as published by | ||
151 | + * the Free Software Foundation, either version 2 of the License, or | ||
152 | + * (at your option) any later version. | ||
153 | + * | ||
154 | + * This program is distributed in the hope that it will be useful, | ||
155 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
156 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
157 | + * GNU General Public License for more details. | ||
158 | + * | ||
159 | + * You should have received a copy of the GNU General Public License | ||
160 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
161 | + */ | ||
162 | + | ||
163 | +#include "qemu/osdep.h" | ||
164 | +#include "qapi/error.h" | ||
165 | +#include "qemu/error-report.h" | ||
166 | +#include "qemu/bswap.h" | ||
167 | +#include "qemu/module.h" | ||
168 | +#include "qemu/units.h" | ||
169 | +#include "hw/qdev-core.h" | ||
170 | +#include "hw/sysbus.h" | ||
171 | +#include "hw/char/serial.h" | ||
172 | +#include "hw/misc/unimp.h" | ||
173 | +#include "hw/usb/hcd-ehci.h" | ||
174 | +#include "hw/loader.h" | ||
175 | +#include "sysemu/sysemu.h" | ||
176 | +#include "hw/arm/allwinner-r40.h" | ||
177 | + | ||
178 | +/* Memory map */ | ||
179 | +const hwaddr allwinner_r40_memmap[] = { | ||
180 | + [AW_R40_DEV_SRAM_A1] = 0x00000000, | ||
181 | + [AW_R40_DEV_SRAM_A2] = 0x00004000, | ||
182 | + [AW_R40_DEV_SRAM_A3] = 0x00008000, | ||
183 | + [AW_R40_DEV_SRAM_A4] = 0x0000b400, | ||
184 | + [AW_R40_DEV_MMC0] = 0x01c0f000, | ||
185 | + [AW_R40_DEV_MMC1] = 0x01c10000, | ||
186 | + [AW_R40_DEV_MMC2] = 0x01c11000, | ||
187 | + [AW_R40_DEV_MMC3] = 0x01c12000, | ||
188 | + [AW_R40_DEV_PIT] = 0x01c20c00, | ||
189 | + [AW_R40_DEV_UART0] = 0x01c28000, | ||
190 | + [AW_R40_DEV_GIC_DIST] = 0x01c81000, | ||
191 | + [AW_R40_DEV_GIC_CPU] = 0x01c82000, | ||
192 | + [AW_R40_DEV_GIC_HYP] = 0x01c84000, | ||
193 | + [AW_R40_DEV_GIC_VCPU] = 0x01c86000, | ||
194 | + [AW_R40_DEV_SDRAM] = 0x40000000 | ||
195 | +}; | ||
196 | + | ||
197 | +/* List of unimplemented devices */ | ||
198 | +struct AwR40Unimplemented { | ||
199 | + const char *device_name; | ||
200 | + hwaddr base; | ||
201 | + hwaddr size; | ||
202 | +}; | ||
203 | + | ||
204 | +static struct AwR40Unimplemented r40_unimplemented[] = { | ||
205 | + { "d-engine", 0x01000000, 4 * MiB }, | ||
206 | + { "d-inter", 0x01400000, 128 * KiB }, | ||
207 | + { "sram-c", 0x01c00000, 4 * KiB }, | ||
208 | + { "dma", 0x01c02000, 4 * KiB }, | ||
209 | + { "nfdc", 0x01c03000, 4 * KiB }, | ||
210 | + { "ts", 0x01c04000, 4 * KiB }, | ||
211 | + { "spi0", 0x01c05000, 4 * KiB }, | ||
212 | + { "spi1", 0x01c06000, 4 * KiB }, | ||
213 | + { "cs0", 0x01c09000, 4 * KiB }, | ||
214 | + { "keymem", 0x01c0a000, 4 * KiB }, | ||
215 | + { "emac", 0x01c0b000, 4 * KiB }, | ||
216 | + { "usb0-otg", 0x01c13000, 4 * KiB }, | ||
217 | + { "usb0-host", 0x01c14000, 4 * KiB }, | ||
218 | + { "crypto", 0x01c15000, 4 * KiB }, | ||
219 | + { "spi2", 0x01c17000, 4 * KiB }, | ||
220 | + { "sata", 0x01c18000, 4 * KiB }, | ||
221 | + { "usb1-host", 0x01c19000, 4 * KiB }, | ||
222 | + { "sid", 0x01c1b000, 4 * KiB }, | ||
223 | + { "usb2-host", 0x01c1c000, 4 * KiB }, | ||
224 | + { "cs1", 0x01c1d000, 4 * KiB }, | ||
225 | + { "spi3", 0x01c1f000, 4 * KiB }, | ||
226 | + { "ccu", 0x01c20000, 1 * KiB }, | ||
227 | + { "rtc", 0x01c20400, 1 * KiB }, | ||
228 | + { "pio", 0x01c20800, 1 * KiB }, | ||
229 | + { "owa", 0x01c21000, 1 * KiB }, | ||
230 | + { "ac97", 0x01c21400, 1 * KiB }, | ||
231 | + { "cir0", 0x01c21800, 1 * KiB }, | ||
232 | + { "cir1", 0x01c21c00, 1 * KiB }, | ||
233 | + { "pcm0", 0x01c22000, 1 * KiB }, | ||
234 | + { "pcm1", 0x01c22400, 1 * KiB }, | ||
235 | + { "pcm2", 0x01c22800, 1 * KiB }, | ||
236 | + { "audio", 0x01c22c00, 1 * KiB }, | ||
237 | + { "keypad", 0x01c23000, 1 * KiB }, | ||
238 | + { "pwm", 0x01c23400, 1 * KiB }, | ||
239 | + { "keyadc", 0x01c24400, 1 * KiB }, | ||
240 | + { "ths", 0x01c24c00, 1 * KiB }, | ||
241 | + { "rtp", 0x01c25000, 1 * KiB }, | ||
242 | + { "pmu", 0x01c25400, 1 * KiB }, | ||
243 | + { "cpu-cfg", 0x01c25c00, 1 * KiB }, | ||
244 | + { "uart0", 0x01c28000, 1 * KiB }, | ||
245 | + { "uart1", 0x01c28400, 1 * KiB }, | ||
246 | + { "uart2", 0x01c28800, 1 * KiB }, | ||
247 | + { "uart3", 0x01c28c00, 1 * KiB }, | ||
248 | + { "uart4", 0x01c29000, 1 * KiB }, | ||
249 | + { "uart5", 0x01c29400, 1 * KiB }, | ||
250 | + { "uart6", 0x01c29800, 1 * KiB }, | ||
251 | + { "uart7", 0x01c29c00, 1 * KiB }, | ||
252 | + { "ps20", 0x01c2a000, 1 * KiB }, | ||
253 | + { "ps21", 0x01c2a400, 1 * KiB }, | ||
254 | + { "twi0", 0x01c2ac00, 1 * KiB }, | ||
255 | + { "twi1", 0x01c2b000, 1 * KiB }, | ||
256 | + { "twi2", 0x01c2b400, 1 * KiB }, | ||
257 | + { "twi3", 0x01c2b800, 1 * KiB }, | ||
258 | + { "twi4", 0x01c2c000, 1 * KiB }, | ||
259 | + { "scr", 0x01c2c400, 1 * KiB }, | ||
260 | + { "tvd-top", 0x01c30000, 4 * KiB }, | ||
261 | + { "tvd0", 0x01c31000, 4 * KiB }, | ||
262 | + { "tvd1", 0x01c32000, 4 * KiB }, | ||
263 | + { "tvd2", 0x01c33000, 4 * KiB }, | ||
264 | + { "tvd3", 0x01c34000, 4 * KiB }, | ||
265 | + { "gpu", 0x01c40000, 64 * KiB }, | ||
266 | + { "gmac", 0x01c50000, 64 * KiB }, | ||
267 | + { "hstmr", 0x01c60000, 4 * KiB }, | ||
268 | + { "dram-com", 0x01c62000, 4 * KiB }, | ||
269 | + { "dram-ctl", 0x01c63000, 4 * KiB }, | ||
270 | + { "tcon-top", 0x01c70000, 4 * KiB }, | ||
271 | + { "lcd0", 0x01c71000, 4 * KiB }, | ||
272 | + { "lcd1", 0x01c72000, 4 * KiB }, | ||
273 | + { "tv0", 0x01c73000, 4 * KiB }, | ||
274 | + { "tv1", 0x01c74000, 4 * KiB }, | ||
275 | + { "tve-top", 0x01c90000, 16 * KiB }, | ||
276 | + { "tve0", 0x01c94000, 16 * KiB }, | ||
277 | + { "tve1", 0x01c98000, 16 * KiB }, | ||
278 | + { "mipi_dsi", 0x01ca0000, 4 * KiB }, | ||
279 | + { "mipi_dphy", 0x01ca1000, 4 * KiB }, | ||
280 | + { "ve", 0x01d00000, 1024 * KiB }, | ||
281 | + { "mp", 0x01e80000, 128 * KiB }, | ||
282 | + { "hdmi", 0x01ee0000, 128 * KiB }, | ||
283 | + { "prcm", 0x01f01400, 1 * KiB }, | ||
284 | + { "debug", 0x3f500000, 64 * KiB }, | ||
285 | + { "cpubist", 0x3f501000, 4 * KiB }, | ||
286 | + { "dcu", 0x3fff0000, 64 * KiB }, | ||
287 | + { "hstmr", 0x01c60000, 4 * KiB }, | ||
288 | + { "brom", 0xffff0000, 36 * KiB } | ||
289 | +}; | ||
290 | + | ||
291 | +/* Per Processor Interrupts */ | ||
292 | +enum { | ||
293 | + AW_R40_GIC_PPI_MAINT = 9, | ||
294 | + AW_R40_GIC_PPI_HYPTIMER = 10, | ||
295 | + AW_R40_GIC_PPI_VIRTTIMER = 11, | ||
296 | + AW_R40_GIC_PPI_SECTIMER = 13, | ||
297 | + AW_R40_GIC_PPI_PHYSTIMER = 14 | ||
298 | +}; | ||
299 | + | ||
300 | +/* Shared Processor Interrupts */ | ||
301 | +enum { | ||
302 | + AW_R40_GIC_SPI_UART0 = 1, | ||
303 | + AW_R40_GIC_SPI_TIMER0 = 22, | ||
304 | + AW_R40_GIC_SPI_TIMER1 = 23, | ||
305 | + AW_R40_GIC_SPI_MMC0 = 32, | ||
306 | + AW_R40_GIC_SPI_MMC1 = 33, | ||
307 | + AW_R40_GIC_SPI_MMC2 = 34, | ||
308 | + AW_R40_GIC_SPI_MMC3 = 35, | ||
309 | +}; | ||
310 | + | ||
311 | +/* Allwinner R40 general constants */ | ||
312 | +enum { | ||
313 | + AW_R40_GIC_NUM_SPI = 128 | ||
314 | +}; | ||
315 | + | ||
316 | +#define BOOT0_MAGIC "eGON.BT0" | ||
317 | + | ||
318 | +/* The low 8-bits of the 'boot_media' field in the SPL header */ | ||
319 | +#define SUNXI_BOOTED_FROM_MMC0 0 | ||
320 | +#define SUNXI_BOOTED_FROM_NAND 1 | ||
321 | +#define SUNXI_BOOTED_FROM_MMC2 2 | ||
322 | +#define SUNXI_BOOTED_FROM_SPI 3 | ||
323 | + | ||
324 | +struct boot_file_head { | ||
325 | + uint32_t b_instruction; | ||
326 | + uint8_t magic[8]; | ||
327 | + uint32_t check_sum; | ||
328 | + uint32_t length; | ||
329 | + uint32_t pub_head_size; | ||
330 | + uint32_t fel_script_address; | ||
331 | + uint32_t fel_uEnv_length; | ||
332 | + uint32_t dt_name_offset; | ||
333 | + uint32_t dram_size; | ||
334 | + uint32_t boot_media; | ||
335 | + uint32_t string_pool[13]; | ||
336 | +}; | ||
337 | + | ||
338 | +bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit) | ||
339 | +{ | ||
340 | + const int64_t rom_size = 32 * KiB; | ||
341 | + g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); | ||
342 | + struct boot_file_head *head = (struct boot_file_head *)buffer; | ||
343 | + | ||
344 | + if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { | ||
345 | + error_setg(&error_fatal, "%s: failed to read BlockBackend data", | ||
346 | + __func__); | ||
347 | + return false; | ||
348 | + } | ||
349 | + | ||
350 | + /* we only check the magic string here. */ | ||
351 | + if (memcmp(head->magic, BOOT0_MAGIC, sizeof(head->magic))) { | ||
352 | + return false; | ||
353 | + } | ||
354 | + | ||
355 | + /* | ||
356 | + * Simulate the behavior of the bootROM, it will change the boot_media | ||
357 | + * flag to indicate where the chip is booting from. R40 can boot from | ||
358 | + * mmc0 or mmc2, the default value of boot_media is zero | ||
359 | + * (SUNXI_BOOTED_FROM_MMC0), let's fix this flag when it is booting from | ||
360 | + * the others. | ||
361 | + */ | ||
362 | + if (unit == 2) { | ||
363 | + head->boot_media = cpu_to_le32(SUNXI_BOOTED_FROM_MMC2); | ||
364 | + } else { | ||
365 | + head->boot_media = cpu_to_le32(SUNXI_BOOTED_FROM_MMC0); | ||
366 | + } | ||
367 | + | ||
368 | + rom_add_blob("allwinner-r40.bootrom", buffer, rom_size, | ||
369 | + rom_size, s->memmap[AW_R40_DEV_SRAM_A1], | ||
370 | + NULL, NULL, NULL, NULL, false); | ||
371 | + return true; | ||
372 | +} | ||
373 | + | ||
374 | +static void allwinner_r40_init(Object *obj) | ||
375 | +{ | ||
376 | + static const char *mmc_names[AW_R40_NUM_MMCS] = { | ||
377 | + "mmc0", "mmc1", "mmc2", "mmc3" | ||
378 | + }; | ||
379 | + AwR40State *s = AW_R40(obj); | ||
380 | + | ||
381 | + s->memmap = allwinner_r40_memmap; | ||
382 | + | ||
383 | + for (int i = 0; i < AW_R40_NUM_CPUS; i++) { | ||
384 | + object_initialize_child(obj, "cpu[*]", &s->cpus[i], | ||
385 | + ARM_CPU_TYPE_NAME("cortex-a7")); | ||
386 | + } | ||
387 | + | ||
388 | + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); | ||
389 | + | ||
390 | + object_initialize_child(obj, "timer", &s->timer, TYPE_AW_A10_PIT); | ||
391 | + object_property_add_alias(obj, "clk0-freq", OBJECT(&s->timer), | ||
392 | + "clk0-freq"); | ||
393 | + object_property_add_alias(obj, "clk1-freq", OBJECT(&s->timer), | ||
394 | + "clk1-freq"); | ||
395 | + | ||
396 | + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { | ||
397 | + object_initialize_child(obj, mmc_names[i], &s->mmc[i], | ||
398 | + TYPE_AW_SDHOST_SUN5I); | ||
399 | + } | ||
400 | +} | ||
401 | + | ||
402 | +static void allwinner_r40_realize(DeviceState *dev, Error **errp) | ||
403 | +{ | ||
404 | + AwR40State *s = AW_R40(dev); | ||
405 | + unsigned i; | ||
406 | + | ||
407 | + /* CPUs */ | ||
408 | + for (i = 0; i < AW_R40_NUM_CPUS; i++) { | ||
409 | + | ||
410 | + /* | ||
411 | + * Disable secondary CPUs. Guest EL3 firmware will start | ||
412 | + * them via CPU reset control registers. | ||
413 | + */ | ||
414 | + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off", | ||
415 | + i > 0); | ||
416 | + | ||
417 | + /* All exception levels required */ | ||
418 | + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el3", true); | ||
419 | + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el2", true); | ||
420 | + | ||
421 | + /* Mark realized */ | ||
422 | + qdev_realize(DEVICE(&s->cpus[i]), NULL, &error_fatal); | ||
423 | + } | ||
424 | + | ||
425 | + /* Generic Interrupt Controller */ | ||
426 | + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", AW_R40_GIC_NUM_SPI + | ||
427 | + GIC_INTERNAL); | ||
428 | + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); | ||
429 | + qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", AW_R40_NUM_CPUS); | ||
430 | + qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", false); | ||
431 | + qdev_prop_set_bit(DEVICE(&s->gic), "has-virtualization-extensions", true); | ||
432 | + sysbus_realize(SYS_BUS_DEVICE(&s->gic), &error_fatal); | ||
433 | + | ||
434 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, s->memmap[AW_R40_DEV_GIC_DIST]); | ||
435 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, s->memmap[AW_R40_DEV_GIC_CPU]); | ||
436 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 2, s->memmap[AW_R40_DEV_GIC_HYP]); | ||
437 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 3, s->memmap[AW_R40_DEV_GIC_VCPU]); | ||
438 | + | ||
439 | + /* | ||
440 | + * Wire the outputs from each CPU's generic timer and the GICv2 | ||
441 | + * maintenance interrupt signal to the appropriate GIC PPI inputs, | ||
442 | + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. | ||
443 | + */ | ||
444 | + for (i = 0; i < AW_R40_NUM_CPUS; i++) { | ||
445 | + DeviceState *cpudev = DEVICE(&s->cpus[i]); | ||
446 | + int ppibase = AW_R40_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS; | ||
447 | + int irq; | ||
448 | + /* | ||
449 | + * Mapping from the output timer irq lines from the CPU to the | ||
450 | + * GIC PPI inputs used for this board. | ||
451 | + */ | ||
452 | + const int timer_irq[] = { | ||
453 | + [GTIMER_PHYS] = AW_R40_GIC_PPI_PHYSTIMER, | ||
454 | + [GTIMER_VIRT] = AW_R40_GIC_PPI_VIRTTIMER, | ||
455 | + [GTIMER_HYP] = AW_R40_GIC_PPI_HYPTIMER, | ||
456 | + [GTIMER_SEC] = AW_R40_GIC_PPI_SECTIMER, | ||
457 | + }; | ||
458 | + | ||
459 | + /* Connect CPU timer outputs to GIC PPI inputs */ | ||
460 | + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { | ||
461 | + qdev_connect_gpio_out(cpudev, irq, | ||
462 | + qdev_get_gpio_in(DEVICE(&s->gic), | ||
463 | + ppibase + timer_irq[irq])); | ||
464 | + } | ||
465 | + | ||
466 | + /* Connect GIC outputs to CPU interrupt inputs */ | ||
467 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, | ||
468 | + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); | ||
469 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + AW_R40_NUM_CPUS, | ||
470 | + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); | ||
471 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (2 * AW_R40_NUM_CPUS), | ||
472 | + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); | ||
473 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (3 * AW_R40_NUM_CPUS), | ||
474 | + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); | ||
475 | + | ||
476 | + /* GIC maintenance signal */ | ||
477 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (4 * AW_R40_NUM_CPUS), | ||
478 | + qdev_get_gpio_in(DEVICE(&s->gic), | ||
479 | + ppibase + AW_R40_GIC_PPI_MAINT)); | ||
480 | + } | ||
481 | + | ||
482 | + /* Timer */ | ||
483 | + sysbus_realize(SYS_BUS_DEVICE(&s->timer), &error_fatal); | ||
484 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer), 0, s->memmap[AW_R40_DEV_PIT]); | ||
485 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 0, | ||
486 | + qdev_get_gpio_in(DEVICE(&s->gic), | ||
487 | + AW_R40_GIC_SPI_TIMER0)); | ||
488 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 1, | ||
489 | + qdev_get_gpio_in(DEVICE(&s->gic), | ||
490 | + AW_R40_GIC_SPI_TIMER1)); | ||
491 | + | ||
492 | + /* SRAM */ | ||
493 | + memory_region_init_ram(&s->sram_a1, OBJECT(dev), "sram A1", | ||
494 | + 16 * KiB, &error_abort); | ||
495 | + memory_region_init_ram(&s->sram_a2, OBJECT(dev), "sram A2", | ||
496 | + 16 * KiB, &error_abort); | ||
497 | + memory_region_init_ram(&s->sram_a3, OBJECT(dev), "sram A3", | ||
498 | + 13 * KiB, &error_abort); | ||
499 | + memory_region_init_ram(&s->sram_a4, OBJECT(dev), "sram A4", | ||
500 | + 3 * KiB, &error_abort); | ||
501 | + memory_region_add_subregion(get_system_memory(), | ||
502 | + s->memmap[AW_R40_DEV_SRAM_A1], &s->sram_a1); | ||
503 | + memory_region_add_subregion(get_system_memory(), | ||
504 | + s->memmap[AW_R40_DEV_SRAM_A2], &s->sram_a2); | ||
505 | + memory_region_add_subregion(get_system_memory(), | ||
506 | + s->memmap[AW_R40_DEV_SRAM_A3], &s->sram_a3); | ||
507 | + memory_region_add_subregion(get_system_memory(), | ||
508 | + s->memmap[AW_R40_DEV_SRAM_A4], &s->sram_a4); | ||
509 | + | ||
510 | + /* SD/MMC */ | ||
511 | + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { | ||
512 | + qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->gic), | ||
513 | + AW_R40_GIC_SPI_MMC0 + i); | ||
514 | + const hwaddr addr = s->memmap[AW_R40_DEV_MMC0 + i]; | ||
515 | + | ||
516 | + object_property_set_link(OBJECT(&s->mmc[i]), "dma-memory", | ||
517 | + OBJECT(get_system_memory()), &error_fatal); | ||
518 | + sysbus_realize(SYS_BUS_DEVICE(&s->mmc[i]), &error_fatal); | ||
519 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc[i]), 0, addr); | ||
520 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc[i]), 0, irq); | ||
521 | + } | ||
522 | + | ||
523 | + /* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */ | ||
524 | + serial_mm_init(get_system_memory(), s->memmap[AW_R40_DEV_UART0], 2, | ||
525 | + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_UART0), | ||
526 | + 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); | ||
527 | + | ||
528 | + /* Unimplemented devices */ | ||
529 | + for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { | ||
530 | + create_unimplemented_device(r40_unimplemented[i].device_name, | ||
531 | + r40_unimplemented[i].base, | ||
532 | + r40_unimplemented[i].size); | ||
533 | + } | ||
534 | +} | ||
535 | + | ||
536 | +static void allwinner_r40_class_init(ObjectClass *oc, void *data) | ||
537 | +{ | ||
538 | + DeviceClass *dc = DEVICE_CLASS(oc); | ||
539 | + | ||
540 | + dc->realize = allwinner_r40_realize; | ||
541 | + /* Reason: uses serial_hd() in realize function */ | ||
542 | + dc->user_creatable = false; | ||
543 | +} | ||
544 | + | ||
545 | +static const TypeInfo allwinner_r40_type_info = { | ||
546 | + .name = TYPE_AW_R40, | ||
547 | + .parent = TYPE_DEVICE, | ||
548 | + .instance_size = sizeof(AwR40State), | ||
549 | + .instance_init = allwinner_r40_init, | ||
550 | + .class_init = allwinner_r40_class_init, | ||
551 | +}; | ||
552 | + | ||
553 | +static void allwinner_r40_register_types(void) | ||
554 | +{ | ||
555 | + type_register_static(&allwinner_r40_type_info); | ||
556 | +} | ||
557 | + | ||
558 | +type_init(allwinner_r40_register_types) | ||
559 | diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c | ||
560 | new file mode 100644 | ||
561 | index XXXXXXX..XXXXXXX | ||
562 | --- /dev/null | ||
563 | +++ b/hw/arm/bananapi_m2u.c | ||
564 | @@ -XXX,XX +XXX,XX @@ | ||
565 | +/* | ||
566 | + * Bananapi M2U emulation | ||
567 | + * | ||
568 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
569 | + * | ||
570 | + * This program is free software: you can redistribute it and/or modify | ||
571 | + * it under the terms of the GNU General Public License as published by | ||
572 | + * the Free Software Foundation, either version 2 of the License, or | ||
573 | + * (at your option) any later version. | ||
574 | + * | ||
575 | + * This program is distributed in the hope that it will be useful, | ||
576 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
577 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
578 | + * GNU General Public License for more details. | ||
579 | + * | ||
580 | + * You should have received a copy of the GNU General Public License | ||
581 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
582 | + */ | ||
583 | + | ||
584 | +#include "qemu/osdep.h" | ||
585 | +#include "qemu/units.h" | ||
586 | +#include "exec/address-spaces.h" | ||
587 | +#include "qapi/error.h" | ||
588 | +#include "qemu/error-report.h" | ||
589 | +#include "hw/boards.h" | ||
590 | +#include "hw/qdev-properties.h" | ||
591 | +#include "hw/arm/allwinner-r40.h" | ||
592 | + | ||
593 | +static struct arm_boot_info bpim2u_binfo; | ||
594 | + | ||
595 | +/* | ||
596 | + * R40 can boot from mmc0 and mmc2, and bpim2u has two mmc interface, one is | ||
597 | + * connected to sdcard and another mount an emmc media. | ||
598 | + * Attach the mmc driver and try loading bootloader. | ||
599 | + */ | ||
600 | +static void mmc_attach_drive(AwR40State *s, AwSdHostState *mmc, int unit, | ||
601 | + bool load_bootroom, bool *bootroom_loaded) | ||
602 | +{ | ||
603 | + DriveInfo *di = drive_get(IF_SD, 0, unit); | ||
604 | + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; | ||
605 | + BusState *bus; | ||
606 | + DeviceState *carddev; | ||
607 | + | ||
608 | + bus = qdev_get_child_bus(DEVICE(mmc), "sd-bus"); | ||
609 | + if (bus == NULL) { | ||
610 | + error_report("No SD bus found in SOC object"); | ||
611 | + exit(1); | ||
612 | + } | ||
613 | + | ||
614 | + carddev = qdev_new(TYPE_SD_CARD); | ||
615 | + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); | ||
616 | + qdev_realize_and_unref(carddev, bus, &error_fatal); | ||
617 | + | ||
618 | + if (load_bootroom && blk && blk_is_available(blk)) { | ||
619 | + /* Use Boot ROM to copy data from SD card to SRAM */ | ||
620 | + *bootroom_loaded = allwinner_r40_bootrom_setup(s, blk, unit); | ||
621 | + } | ||
622 | +} | ||
623 | + | ||
624 | +static void bpim2u_init(MachineState *machine) | ||
625 | +{ | ||
626 | + bool bootroom_loaded = false; | ||
627 | + AwR40State *r40; | ||
628 | + | ||
629 | + /* BIOS is not supported by this board */ | ||
630 | + if (machine->firmware) { | ||
631 | + error_report("BIOS not supported for this machine"); | ||
632 | + exit(1); | ||
633 | + } | ||
634 | + | ||
635 | + /* Only allow Cortex-A7 for this board */ | ||
636 | + if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { | ||
637 | + error_report("This board can only be used with cortex-a7 CPU"); | ||
638 | + exit(1); | ||
639 | + } | ||
640 | + | ||
641 | + r40 = AW_R40(object_new(TYPE_AW_R40)); | ||
642 | + object_property_add_child(OBJECT(machine), "soc", OBJECT(r40)); | ||
643 | + object_unref(OBJECT(r40)); | ||
644 | + | ||
645 | + /* Setup timer properties */ | ||
646 | + object_property_set_int(OBJECT(r40), "clk0-freq", 32768, &error_abort); | ||
647 | + object_property_set_int(OBJECT(r40), "clk1-freq", 24 * 1000 * 1000, | ||
648 | + &error_abort); | ||
649 | + | ||
650 | + /* Mark R40 object realized */ | ||
651 | + qdev_realize(DEVICE(r40), NULL, &error_abort); | ||
652 | + | ||
653 | + /* | ||
654 | + * Plug in SD card and try load bootrom, R40 has 4 mmc controllers but can | ||
655 | + * only booting from mmc0 and mmc2. | ||
656 | + */ | ||
657 | + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { | ||
658 | + switch (i) { | ||
659 | + case 0: | ||
660 | + case 2: | ||
661 | + mmc_attach_drive(r40, &r40->mmc[i], i, | ||
662 | + !machine->kernel_filename && !bootroom_loaded, | ||
663 | + &bootroom_loaded); | ||
664 | + break; | ||
665 | + default: | ||
666 | + mmc_attach_drive(r40, &r40->mmc[i], i, false, NULL); | ||
667 | + break; | ||
668 | + } | ||
669 | + } | ||
670 | + | ||
671 | + /* SDRAM */ | ||
672 | + memory_region_add_subregion(get_system_memory(), | ||
673 | + r40->memmap[AW_R40_DEV_SDRAM], machine->ram); | ||
674 | + | ||
675 | + bpim2u_binfo.loader_start = r40->memmap[AW_R40_DEV_SDRAM]; | ||
676 | + bpim2u_binfo.ram_size = machine->ram_size; | ||
677 | + bpim2u_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; | ||
678 | + arm_load_kernel(ARM_CPU(first_cpu), machine, &bpim2u_binfo); | ||
679 | +} | ||
680 | + | ||
681 | +static void bpim2u_machine_init(MachineClass *mc) | ||
682 | +{ | ||
683 | + mc->desc = "Bananapi M2U (Cortex-A7)"; | ||
684 | + mc->init = bpim2u_init; | ||
685 | + mc->min_cpus = AW_R40_NUM_CPUS; | ||
686 | + mc->max_cpus = AW_R40_NUM_CPUS; | ||
687 | + mc->default_cpus = AW_R40_NUM_CPUS; | ||
688 | + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); | ||
689 | + mc->default_ram_size = 1 * GiB; | ||
690 | + mc->default_ram_id = "bpim2u.ram"; | ||
691 | +} | ||
692 | + | ||
693 | +DEFINE_MACHINE("bpim2u", bpim2u_machine_init) | ||
694 | diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig | ||
20 | index XXXXXXX..XXXXXXX 100644 | 695 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/helper.c | 696 | --- a/hw/arm/Kconfig |
22 | +++ b/target/arm/helper.c | 697 | +++ b/hw/arm/Kconfig |
23 | @@ -XXX,XX +XXX,XX @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 698 | @@ -XXX,XX +XXX,XX @@ config ALLWINNER_H3 |
24 | static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, | 699 | select USB_EHCI_SYSBUS |
25 | uint64_t value) | 700 | select SD |
26 | { | 701 | |
27 | - /* 64 bit accesses to the TTBRs can change the ASID and so we | 702 | +config ALLWINNER_R40 |
28 | - * must flush the TLB. | 703 | + bool |
29 | - */ | 704 | + default y if TCG && ARM |
30 | - if (cpreg_field_is_64bit(ri)) { | 705 | + select ALLWINNER_A10_PIT |
31 | + /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ | 706 | + select SERIAL |
32 | + if (cpreg_field_is_64bit(ri) && | 707 | + select ARM_TIMER |
33 | + extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { | 708 | + select ARM_GIC |
34 | ARMCPU *cpu = arm_env_get_cpu(env); | 709 | + select UNIMP |
35 | - | 710 | + select SD |
36 | tlb_flush(CPU(cpu)); | 711 | + |
37 | } | 712 | config RASPI |
38 | raw_write(env, ri, value); | 713 | bool |
714 | default y | ||
715 | diff --git a/hw/arm/meson.build b/hw/arm/meson.build | ||
716 | index XXXXXXX..XXXXXXX 100644 | ||
717 | --- a/hw/arm/meson.build | ||
718 | +++ b/hw/arm/meson.build | ||
719 | @@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c', 'omap2.c')) | ||
720 | arm_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) | ||
721 | arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) | ||
722 | arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) | ||
723 | +arm_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c')) | ||
724 | arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c')) | ||
725 | arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) | ||
726 | arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) | ||
39 | -- | 727 | -- |
40 | 2.19.1 | 728 | 2.34.1 |
41 | |||
42 | diff view generated by jsdifflib |
1 | From: Dongjiu Geng <gengdongjiu@huawei.com> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | This patch extends the qemu-kvm state sync logic with support for | 3 | The CCU provides the registers to program the PLLs and the controls |
4 | KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception. | 4 | most of the clock generation, division, distribution, synchronization |
5 | And also it can support the exception state migration. | 5 | and gating. |
6 | 6 | ||
7 | The SError exception states include SError pending state and ESR value, | 7 | This commit adds support for the Clock Control Unit which emulates |
8 | the kvm_put/get_vcpu_events() will be called when set or get system | 8 | a simple read/write register interface. |
9 | registers. When do migration, if source machine has SError pending, | ||
10 | QEMU will do this migration regardless whether the target machine supports | ||
11 | to specify guest ESR value, because if target machine does not support that, | ||
12 | it can also inject the SError with zero ESR value. | ||
13 | 9 | ||
14 | Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com> | 10 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
15 | Reviewed-by: Andrew Jones <drjones@redhat.com> | 11 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
16 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
17 | Message-id: 1538067351-23931-3-git-send-email-gengdongjiu@huawei.com | ||
18 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
19 | --- | 13 | --- |
20 | target/arm/cpu.h | 7 ++++++ | 14 | include/hw/arm/allwinner-r40.h | 2 + |
21 | target/arm/kvm_arm.h | 24 ++++++++++++++++++ | 15 | include/hw/misc/allwinner-r40-ccu.h | 65 +++++++++ |
22 | target/arm/kvm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++ | 16 | hw/arm/allwinner-r40.c | 8 +- |
23 | target/arm/kvm32.c | 13 ++++++++++ | 17 | hw/misc/allwinner-r40-ccu.c | 209 ++++++++++++++++++++++++++++ |
24 | target/arm/kvm64.c | 13 ++++++++++ | 18 | hw/misc/meson.build | 1 + |
25 | target/arm/machine.c | 22 ++++++++++++++++ | 19 | 5 files changed, 284 insertions(+), 1 deletion(-) |
26 | 6 files changed, 139 insertions(+) | 20 | create mode 100644 include/hw/misc/allwinner-r40-ccu.h |
21 | create mode 100644 hw/misc/allwinner-r40-ccu.c | ||
27 | 22 | ||
28 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 23 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
29 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/cpu.h | 25 | --- a/include/hw/arm/allwinner-r40.h |
31 | +++ b/target/arm/cpu.h | 26 | +++ b/include/hw/arm/allwinner-r40.h |
32 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | 27 | @@ -XXX,XX +XXX,XX @@ |
33 | */ | 28 | #include "hw/timer/allwinner-a10-pit.h" |
34 | } exception; | 29 | #include "hw/intc/arm_gic.h" |
35 | 30 | #include "hw/sd/allwinner-sdhost.h" | |
36 | + /* Information associated with an SError */ | 31 | +#include "hw/misc/allwinner-r40-ccu.h" |
37 | + struct { | 32 | #include "target/arm/cpu.h" |
38 | + uint8_t pending; | 33 | #include "sysemu/block-backend.h" |
39 | + uint8_t has_esr; | 34 | |
40 | + uint64_t esr; | 35 | @@ -XXX,XX +XXX,XX @@ struct AwR40State { |
41 | + } serror; | 36 | const hwaddr *memmap; |
42 | + | 37 | AwA10PITState timer; |
43 | /* Thumb-2 EE state. */ | 38 | AwSdHostState mmc[AW_R40_NUM_MMCS]; |
44 | uint32_t teecr; | 39 | + AwR40ClockCtlState ccu; |
45 | uint32_t teehbr; | 40 | GICState gic; |
46 | diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h | 41 | MemoryRegion sram_a1; |
42 | MemoryRegion sram_a2; | ||
43 | diff --git a/include/hw/misc/allwinner-r40-ccu.h b/include/hw/misc/allwinner-r40-ccu.h | ||
44 | new file mode 100644 | ||
45 | index XXXXXXX..XXXXXXX | ||
46 | --- /dev/null | ||
47 | +++ b/include/hw/misc/allwinner-r40-ccu.h | ||
48 | @@ -XXX,XX +XXX,XX @@ | ||
49 | +/* | ||
50 | + * Allwinner R40 Clock Control Unit emulation | ||
51 | + * | ||
52 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
53 | + * | ||
54 | + * This program is free software: you can redistribute it and/or modify | ||
55 | + * it under the terms of the GNU General Public License as published by | ||
56 | + * the Free Software Foundation, either version 2 of the License, or | ||
57 | + * (at your option) any later version. | ||
58 | + * | ||
59 | + * This program is distributed in the hope that it will be useful, | ||
60 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
61 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
62 | + * GNU General Public License for more details. | ||
63 | + * | ||
64 | + * You should have received a copy of the GNU General Public License | ||
65 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
66 | + */ | ||
67 | + | ||
68 | +#ifndef HW_MISC_ALLWINNER_R40_CCU_H | ||
69 | +#define HW_MISC_ALLWINNER_R40_CCU_H | ||
70 | + | ||
71 | +#include "qom/object.h" | ||
72 | +#include "hw/sysbus.h" | ||
73 | + | ||
74 | +/** | ||
75 | + * @name Constants | ||
76 | + * @{ | ||
77 | + */ | ||
78 | + | ||
79 | +/** Size of register I/O address space used by CCU device */ | ||
80 | +#define AW_R40_CCU_IOSIZE (0x400) | ||
81 | + | ||
82 | +/** Total number of known registers */ | ||
83 | +#define AW_R40_CCU_REGS_NUM (AW_R40_CCU_IOSIZE / sizeof(uint32_t)) | ||
84 | + | ||
85 | +/** @} */ | ||
86 | + | ||
87 | +/** | ||
88 | + * @name Object model | ||
89 | + * @{ | ||
90 | + */ | ||
91 | + | ||
92 | +#define TYPE_AW_R40_CCU "allwinner-r40-ccu" | ||
93 | +OBJECT_DECLARE_SIMPLE_TYPE(AwR40ClockCtlState, AW_R40_CCU) | ||
94 | + | ||
95 | +/** @} */ | ||
96 | + | ||
97 | +/** | ||
98 | + * Allwinner R40 CCU object instance state. | ||
99 | + */ | ||
100 | +struct AwR40ClockCtlState { | ||
101 | + /*< private >*/ | ||
102 | + SysBusDevice parent_obj; | ||
103 | + /*< public >*/ | ||
104 | + | ||
105 | + /** Maps I/O registers in physical memory */ | ||
106 | + MemoryRegion iomem; | ||
107 | + | ||
108 | + /** Array of hardware registers */ | ||
109 | + uint32_t regs[AW_R40_CCU_REGS_NUM]; | ||
110 | + | ||
111 | +}; | ||
112 | + | ||
113 | +#endif /* HW_MISC_ALLWINNER_R40_CCU_H */ | ||
114 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
47 | index XXXXXXX..XXXXXXX 100644 | 115 | index XXXXXXX..XXXXXXX 100644 |
48 | --- a/target/arm/kvm_arm.h | 116 | --- a/hw/arm/allwinner-r40.c |
49 | +++ b/target/arm/kvm_arm.h | 117 | +++ b/hw/arm/allwinner-r40.c |
50 | @@ -XXX,XX +XXX,XX @@ bool write_kvmstate_to_list(ARMCPU *cpu); | 118 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { |
51 | */ | 119 | [AW_R40_DEV_MMC1] = 0x01c10000, |
52 | void kvm_arm_reset_vcpu(ARMCPU *cpu); | 120 | [AW_R40_DEV_MMC2] = 0x01c11000, |
53 | 121 | [AW_R40_DEV_MMC3] = 0x01c12000, | |
54 | +/** | 122 | + [AW_R40_DEV_CCU] = 0x01c20000, |
55 | + * kvm_arm_init_serror_injection: | 123 | [AW_R40_DEV_PIT] = 0x01c20c00, |
56 | + * @cs: CPUState | 124 | [AW_R40_DEV_UART0] = 0x01c28000, |
57 | + * | 125 | [AW_R40_DEV_GIC_DIST] = 0x01c81000, |
58 | + * Check whether KVM can set guest SError syndrome. | 126 | @@ -XXX,XX +XXX,XX @@ static struct AwR40Unimplemented r40_unimplemented[] = { |
59 | + */ | 127 | { "usb2-host", 0x01c1c000, 4 * KiB }, |
60 | +void kvm_arm_init_serror_injection(CPUState *cs); | 128 | { "cs1", 0x01c1d000, 4 * KiB }, |
61 | + | 129 | { "spi3", 0x01c1f000, 4 * KiB }, |
62 | +/** | 130 | - { "ccu", 0x01c20000, 1 * KiB }, |
63 | + * kvm_get_vcpu_events: | 131 | { "rtc", 0x01c20400, 1 * KiB }, |
64 | + * @cpu: ARMCPU | 132 | { "pio", 0x01c20800, 1 * KiB }, |
65 | + * | 133 | { "owa", 0x01c21000, 1 * KiB }, |
66 | + * Get VCPU related state from kvm. | 134 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) |
67 | + */ | 135 | object_property_add_alias(obj, "clk1-freq", OBJECT(&s->timer), |
68 | +int kvm_get_vcpu_events(ARMCPU *cpu); | 136 | "clk1-freq"); |
69 | + | 137 | |
70 | +/** | 138 | + object_initialize_child(obj, "ccu", &s->ccu, TYPE_AW_R40_CCU); |
71 | + * kvm_put_vcpu_events: | 139 | + |
72 | + * @cpu: ARMCPU | 140 | for (int i = 0; i < AW_R40_NUM_MMCS; i++) { |
73 | + * | 141 | object_initialize_child(obj, mmc_names[i], &s->mmc[i], |
74 | + * Put VCPU related state to kvm. | 142 | TYPE_AW_SDHOST_SUN5I); |
75 | + */ | 143 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
76 | +int kvm_put_vcpu_events(ARMCPU *cpu); | 144 | memory_region_add_subregion(get_system_memory(), |
77 | + | 145 | s->memmap[AW_R40_DEV_SRAM_A4], &s->sram_a4); |
78 | #ifdef CONFIG_KVM | 146 | |
79 | /** | 147 | + /* Clock Control Unit */ |
80 | * kvm_arm_create_scratch_host_vcpu: | 148 | + sysbus_realize(SYS_BUS_DEVICE(&s->ccu), &error_fatal); |
81 | diff --git a/target/arm/kvm.c b/target/arm/kvm.c | 149 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccu), 0, s->memmap[AW_R40_DEV_CCU]); |
82 | index XXXXXXX..XXXXXXX 100644 | 150 | + |
83 | --- a/target/arm/kvm.c | 151 | /* SD/MMC */ |
84 | +++ b/target/arm/kvm.c | 152 | for (int i = 0; i < AW_R40_NUM_MMCS; i++) { |
85 | @@ -XXX,XX +XXX,XX @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { | 153 | qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->gic), |
86 | }; | 154 | diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c |
87 | 155 | new file mode 100644 | |
88 | static bool cap_has_mp_state; | 156 | index XXXXXXX..XXXXXXX |
89 | +static bool cap_has_inject_serror_esr; | 157 | --- /dev/null |
90 | 158 | +++ b/hw/misc/allwinner-r40-ccu.c | |
91 | static ARMHostCPUFeatures arm_host_cpu_features; | 159 | @@ -XXX,XX +XXX,XX @@ |
92 | 160 | +/* | |
93 | @@ -XXX,XX +XXX,XX @@ int kvm_arm_vcpu_init(CPUState *cs) | 161 | + * Allwinner R40 Clock Control Unit emulation |
94 | return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); | 162 | + * |
95 | } | 163 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> |
96 | 164 | + * | |
97 | +void kvm_arm_init_serror_injection(CPUState *cs) | 165 | + * This program is free software: you can redistribute it and/or modify |
98 | +{ | 166 | + * it under the terms of the GNU General Public License as published by |
99 | + cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, | 167 | + * the Free Software Foundation, either version 2 of the License, or |
100 | + KVM_CAP_ARM_INJECT_SERROR_ESR); | 168 | + * (at your option) any later version. |
101 | +} | 169 | + * |
102 | + | 170 | + * This program is distributed in the hope that it will be useful, |
103 | bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, | 171 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
104 | int *fdarray, | 172 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
105 | struct kvm_vcpu_init *init) | 173 | + * GNU General Public License for more details. |
106 | @@ -XXX,XX +XXX,XX @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) | 174 | + * |
107 | return 0; | 175 | + * You should have received a copy of the GNU General Public License |
108 | } | 176 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. |
109 | 177 | + */ | |
110 | +int kvm_put_vcpu_events(ARMCPU *cpu) | 178 | + |
111 | +{ | 179 | +#include "qemu/osdep.h" |
112 | + CPUARMState *env = &cpu->env; | 180 | +#include "qemu/units.h" |
113 | + struct kvm_vcpu_events events; | 181 | +#include "hw/sysbus.h" |
114 | + int ret; | 182 | +#include "migration/vmstate.h" |
115 | + | 183 | +#include "qemu/log.h" |
116 | + if (!kvm_has_vcpu_events()) { | 184 | +#include "qemu/module.h" |
185 | +#include "hw/misc/allwinner-r40-ccu.h" | ||
186 | + | ||
187 | +/* CCU register offsets */ | ||
188 | +enum { | ||
189 | + REG_PLL_CPUX_CTRL = 0x0000, | ||
190 | + REG_PLL_AUDIO_CTRL = 0x0008, | ||
191 | + REG_PLL_VIDEO0_CTRL = 0x0010, | ||
192 | + REG_PLL_VE_CTRL = 0x0018, | ||
193 | + REG_PLL_DDR0_CTRL = 0x0020, | ||
194 | + REG_PLL_PERIPH0_CTRL = 0x0028, | ||
195 | + REG_PLL_PERIPH1_CTRL = 0x002c, | ||
196 | + REG_PLL_VIDEO1_CTRL = 0x0030, | ||
197 | + REG_PLL_SATA_CTRL = 0x0034, | ||
198 | + REG_PLL_GPU_CTRL = 0x0038, | ||
199 | + REG_PLL_MIPI_CTRL = 0x0040, | ||
200 | + REG_PLL_DE_CTRL = 0x0048, | ||
201 | + REG_PLL_DDR1_CTRL = 0x004c, | ||
202 | + REG_AHB1_APB1_CFG = 0x0054, | ||
203 | + REG_APB2_CFG = 0x0058, | ||
204 | + REG_MMC0_CLK = 0x0088, | ||
205 | + REG_MMC1_CLK = 0x008c, | ||
206 | + REG_MMC2_CLK = 0x0090, | ||
207 | + REG_MMC3_CLK = 0x0094, | ||
208 | + REG_USBPHY_CFG = 0x00cc, | ||
209 | + REG_PLL_DDR_AUX = 0x00f0, | ||
210 | + REG_DRAM_CFG = 0x00f4, | ||
211 | + REG_PLL_DDR1_CFG = 0x00f8, | ||
212 | + REG_DRAM_CLK_GATING = 0x0100, | ||
213 | + REG_GMAC_CLK = 0x0164, | ||
214 | + REG_SYS_32K_CLK = 0x0310, | ||
215 | + REG_PLL_LOCK_CTRL = 0x0320, | ||
216 | +}; | ||
217 | + | ||
218 | +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) | ||
219 | + | ||
220 | +/* CCU register flags */ | ||
221 | +enum { | ||
222 | + REG_PLL_ENABLE = (1 << 31), | ||
223 | + REG_PLL_LOCK = (1 << 28), | ||
224 | +}; | ||
225 | + | ||
226 | +static uint64_t allwinner_r40_ccu_read(void *opaque, hwaddr offset, | ||
227 | + unsigned size) | ||
228 | +{ | ||
229 | + const AwR40ClockCtlState *s = AW_R40_CCU(opaque); | ||
230 | + const uint32_t idx = REG_INDEX(offset); | ||
231 | + | ||
232 | + switch (offset) { | ||
233 | + case 0x324 ... AW_R40_CCU_IOSIZE: | ||
234 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
235 | + __func__, (uint32_t)offset); | ||
117 | + return 0; | 236 | + return 0; |
118 | + } | 237 | + } |
119 | + | 238 | + |
120 | + memset(&events, 0, sizeof(events)); | 239 | + return s->regs[idx]; |
121 | + events.exception.serror_pending = env->serror.pending; | 240 | +} |
122 | + | 241 | + |
123 | + /* Inject SError to guest with specified syndrome if host kernel | 242 | +static void allwinner_r40_ccu_write(void *opaque, hwaddr offset, |
124 | + * supports it, otherwise inject SError without syndrome. | 243 | + uint64_t val, unsigned size) |
125 | + */ | 244 | +{ |
126 | + if (cap_has_inject_serror_esr) { | 245 | + AwR40ClockCtlState *s = AW_R40_CCU(opaque); |
127 | + events.exception.serror_has_esr = env->serror.has_esr; | 246 | + |
128 | + events.exception.serror_esr = env->serror.esr; | 247 | + switch (offset) { |
248 | + case REG_DRAM_CFG: /* DRAM Configuration(for DDR0) */ | ||
249 | + /* bit16: SDRCLK_UPD (SDRCLK configuration 0 update) */ | ||
250 | + val &= ~(1 << 16); | ||
251 | + break; | ||
252 | + case REG_PLL_DDR1_CTRL: /* DDR1 Control register */ | ||
253 | + /* bit30: SDRPLL_UPD */ | ||
254 | + val &= ~(1 << 30); | ||
255 | + if (val & REG_PLL_ENABLE) { | ||
256 | + val |= REG_PLL_LOCK; | ||
257 | + } | ||
258 | + break; | ||
259 | + case REG_PLL_CPUX_CTRL: | ||
260 | + case REG_PLL_AUDIO_CTRL: | ||
261 | + case REG_PLL_VE_CTRL: | ||
262 | + case REG_PLL_VIDEO0_CTRL: | ||
263 | + case REG_PLL_DDR0_CTRL: | ||
264 | + case REG_PLL_PERIPH0_CTRL: | ||
265 | + case REG_PLL_PERIPH1_CTRL: | ||
266 | + case REG_PLL_VIDEO1_CTRL: | ||
267 | + case REG_PLL_SATA_CTRL: | ||
268 | + case REG_PLL_GPU_CTRL: | ||
269 | + case REG_PLL_MIPI_CTRL: | ||
270 | + case REG_PLL_DE_CTRL: | ||
271 | + if (val & REG_PLL_ENABLE) { | ||
272 | + val |= REG_PLL_LOCK; | ||
273 | + } | ||
274 | + break; | ||
275 | + case 0x324 ... AW_R40_CCU_IOSIZE: | ||
276 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
277 | + __func__, (uint32_t)offset); | ||
278 | + break; | ||
279 | + default: | ||
280 | + qemu_log_mask(LOG_UNIMP, "%s: unimplemented write offset 0x%04x\n", | ||
281 | + __func__, (uint32_t)offset); | ||
282 | + break; | ||
129 | + } | 283 | + } |
130 | + | 284 | + |
131 | + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); | 285 | + s->regs[REG_INDEX(offset)] = (uint32_t) val; |
132 | + if (ret) { | 286 | +} |
133 | + error_report("failed to put vcpu events"); | 287 | + |
134 | + } | 288 | +static const MemoryRegionOps allwinner_r40_ccu_ops = { |
135 | + | 289 | + .read = allwinner_r40_ccu_read, |
136 | + return ret; | 290 | + .write = allwinner_r40_ccu_write, |
137 | +} | 291 | + .endianness = DEVICE_NATIVE_ENDIAN, |
138 | + | 292 | + .valid = { |
139 | +int kvm_get_vcpu_events(ARMCPU *cpu) | 293 | + .min_access_size = 4, |
140 | +{ | 294 | + .max_access_size = 4, |
141 | + CPUARMState *env = &cpu->env; | 295 | + }, |
142 | + struct kvm_vcpu_events events; | 296 | + .impl.min_access_size = 4, |
143 | + int ret; | 297 | +}; |
144 | + | 298 | + |
145 | + if (!kvm_has_vcpu_events()) { | 299 | +static void allwinner_r40_ccu_reset(DeviceState *dev) |
146 | + return 0; | 300 | +{ |
147 | + } | 301 | + AwR40ClockCtlState *s = AW_R40_CCU(dev); |
148 | + | 302 | + |
149 | + memset(&events, 0, sizeof(events)); | 303 | + memset(s->regs, 0, sizeof(s->regs)); |
150 | + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); | 304 | + |
151 | + if (ret) { | 305 | + /* Set default values for registers */ |
152 | + error_report("failed to get vcpu events"); | 306 | + s->regs[REG_INDEX(REG_PLL_CPUX_CTRL)] = 0x00001000; |
153 | + return ret; | 307 | + s->regs[REG_INDEX(REG_PLL_AUDIO_CTRL)] = 0x00035514; |
154 | + } | 308 | + s->regs[REG_INDEX(REG_PLL_VIDEO0_CTRL)] = 0x03006207; |
155 | + | 309 | + s->regs[REG_INDEX(REG_PLL_VE_CTRL)] = 0x03006207; |
156 | + env->serror.pending = events.exception.serror_pending; | 310 | + s->regs[REG_INDEX(REG_PLL_DDR0_CTRL)] = 0x00001000, |
157 | + env->serror.has_esr = events.exception.serror_has_esr; | 311 | + s->regs[REG_INDEX(REG_PLL_PERIPH0_CTRL)] = 0x00041811; |
158 | + env->serror.esr = events.exception.serror_esr; | 312 | + s->regs[REG_INDEX(REG_PLL_PERIPH1_CTRL)] = 0x00041811; |
159 | + | 313 | + s->regs[REG_INDEX(REG_PLL_VIDEO1_CTRL)] = 0x03006207; |
160 | + return 0; | 314 | + s->regs[REG_INDEX(REG_PLL_SATA_CTRL)] = 0x00001811; |
161 | +} | 315 | + s->regs[REG_INDEX(REG_PLL_GPU_CTRL)] = 0x03006207; |
162 | + | 316 | + s->regs[REG_INDEX(REG_PLL_MIPI_CTRL)] = 0x00000515; |
163 | void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) | 317 | + s->regs[REG_INDEX(REG_PLL_DE_CTRL)] = 0x03006207; |
164 | { | 318 | + s->regs[REG_INDEX(REG_PLL_DDR1_CTRL)] = 0x00001800; |
165 | } | 319 | + s->regs[REG_INDEX(REG_AHB1_APB1_CFG)] = 0x00001010; |
166 | diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c | 320 | + s->regs[REG_INDEX(REG_APB2_CFG)] = 0x01000000; |
167 | index XXXXXXX..XXXXXXX 100644 | 321 | + s->regs[REG_INDEX(REG_PLL_DDR_AUX)] = 0x00000001; |
168 | --- a/target/arm/kvm32.c | 322 | + s->regs[REG_INDEX(REG_PLL_DDR1_CFG)] = 0x0ccca000; |
169 | +++ b/target/arm/kvm32.c | 323 | + s->regs[REG_INDEX(REG_SYS_32K_CLK)] = 0x0000000f; |
170 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs) | 324 | +} |
171 | } | 325 | + |
172 | cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK; | 326 | +static void allwinner_r40_ccu_init(Object *obj) |
173 | 327 | +{ | |
174 | + /* Check whether userspace can specify guest syndrome value */ | 328 | + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); |
175 | + kvm_arm_init_serror_injection(cs); | 329 | + AwR40ClockCtlState *s = AW_R40_CCU(obj); |
176 | + | 330 | + |
177 | return kvm_arm_init_cpreg_list(cpu); | 331 | + /* Memory mapping */ |
178 | } | 332 | + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_r40_ccu_ops, s, |
179 | 333 | + TYPE_AW_R40_CCU, AW_R40_CCU_IOSIZE); | |
180 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level) | 334 | + sysbus_init_mmio(sbd, &s->iomem); |
181 | return ret; | 335 | +} |
182 | } | 336 | + |
183 | 337 | +static const VMStateDescription allwinner_r40_ccu_vmstate = { | |
184 | + ret = kvm_put_vcpu_events(cpu); | 338 | + .name = "allwinner-r40-ccu", |
185 | + if (ret) { | ||
186 | + return ret; | ||
187 | + } | ||
188 | + | ||
189 | /* Note that we do not call write_cpustate_to_list() | ||
190 | * here, so we are only writing the tuple list back to | ||
191 | * KVM. This is safe because nothing can change the | ||
192 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs) | ||
193 | } | ||
194 | vfp_set_fpscr(env, fpscr); | ||
195 | |||
196 | + ret = kvm_get_vcpu_events(cpu); | ||
197 | + if (ret) { | ||
198 | + return ret; | ||
199 | + } | ||
200 | + | ||
201 | if (!write_kvmstate_to_list(cpu)) { | ||
202 | return EINVAL; | ||
203 | } | ||
204 | diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c | ||
205 | index XXXXXXX..XXXXXXX 100644 | ||
206 | --- a/target/arm/kvm64.c | ||
207 | +++ b/target/arm/kvm64.c | ||
208 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs) | ||
209 | |||
210 | kvm_arm_init_debug(cs); | ||
211 | |||
212 | + /* Check whether user space can specify guest syndrome value */ | ||
213 | + kvm_arm_init_serror_injection(cs); | ||
214 | + | ||
215 | return kvm_arm_init_cpreg_list(cpu); | ||
216 | } | ||
217 | |||
218 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level) | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | + ret = kvm_put_vcpu_events(cpu); | ||
223 | + if (ret) { | ||
224 | + return ret; | ||
225 | + } | ||
226 | + | ||
227 | if (!write_list_to_kvmstate(cpu, level)) { | ||
228 | return EINVAL; | ||
229 | } | ||
230 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs) | ||
231 | } | ||
232 | vfp_set_fpcr(env, fpr); | ||
233 | |||
234 | + ret = kvm_get_vcpu_events(cpu); | ||
235 | + if (ret) { | ||
236 | + return ret; | ||
237 | + } | ||
238 | + | ||
239 | if (!write_kvmstate_to_list(cpu)) { | ||
240 | return EINVAL; | ||
241 | } | ||
242 | diff --git a/target/arm/machine.c b/target/arm/machine.c | ||
243 | index XXXXXXX..XXXXXXX 100644 | ||
244 | --- a/target/arm/machine.c | ||
245 | +++ b/target/arm/machine.c | ||
246 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sve = { | ||
247 | }; | ||
248 | #endif /* AARCH64 */ | ||
249 | |||
250 | +static bool serror_needed(void *opaque) | ||
251 | +{ | ||
252 | + ARMCPU *cpu = opaque; | ||
253 | + CPUARMState *env = &cpu->env; | ||
254 | + | ||
255 | + return env->serror.pending != 0; | ||
256 | +} | ||
257 | + | ||
258 | +static const VMStateDescription vmstate_serror = { | ||
259 | + .name = "cpu/serror", | ||
260 | + .version_id = 1, | 339 | + .version_id = 1, |
261 | + .minimum_version_id = 1, | 340 | + .minimum_version_id = 1, |
262 | + .needed = serror_needed, | ||
263 | + .fields = (VMStateField[]) { | 341 | + .fields = (VMStateField[]) { |
264 | + VMSTATE_UINT8(env.serror.pending, ARMCPU), | 342 | + VMSTATE_UINT32_ARRAY(regs, AwR40ClockCtlState, AW_R40_CCU_REGS_NUM), |
265 | + VMSTATE_UINT8(env.serror.has_esr, ARMCPU), | ||
266 | + VMSTATE_UINT64(env.serror.esr, ARMCPU), | ||
267 | + VMSTATE_END_OF_LIST() | 343 | + VMSTATE_END_OF_LIST() |
268 | + } | 344 | + } |
269 | +}; | 345 | +}; |
270 | + | 346 | + |
271 | static bool m_needed(void *opaque) | 347 | +static void allwinner_r40_ccu_class_init(ObjectClass *klass, void *data) |
272 | { | 348 | +{ |
273 | ARMCPU *cpu = opaque; | 349 | + DeviceClass *dc = DEVICE_CLASS(klass); |
274 | @@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_arm_cpu = { | 350 | + |
275 | #ifdef TARGET_AARCH64 | 351 | + dc->reset = allwinner_r40_ccu_reset; |
276 | &vmstate_sve, | 352 | + dc->vmsd = &allwinner_r40_ccu_vmstate; |
277 | #endif | 353 | +} |
278 | + &vmstate_serror, | 354 | + |
279 | NULL | 355 | +static const TypeInfo allwinner_r40_ccu_info = { |
280 | } | 356 | + .name = TYPE_AW_R40_CCU, |
281 | }; | 357 | + .parent = TYPE_SYS_BUS_DEVICE, |
358 | + .instance_init = allwinner_r40_ccu_init, | ||
359 | + .instance_size = sizeof(AwR40ClockCtlState), | ||
360 | + .class_init = allwinner_r40_ccu_class_init, | ||
361 | +}; | ||
362 | + | ||
363 | +static void allwinner_r40_ccu_register(void) | ||
364 | +{ | ||
365 | + type_register_static(&allwinner_r40_ccu_info); | ||
366 | +} | ||
367 | + | ||
368 | +type_init(allwinner_r40_ccu_register) | ||
369 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
370 | index XXXXXXX..XXXXXXX 100644 | ||
371 | --- a/hw/misc/meson.build | ||
372 | +++ b/hw/misc/meson.build | ||
373 | @@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-cpucfg.c' | ||
374 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c')) | ||
375 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) | ||
376 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) | ||
377 | +softmmu_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c')) | ||
378 | softmmu_ss.add(when: 'CONFIG_AXP209_PMU', if_true: files('axp209.c')) | ||
379 | softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) | ||
380 | softmmu_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) | ||
282 | -- | 381 | -- |
283 | 2.19.1 | 382 | 2.34.1 |
284 | |||
285 | diff view generated by jsdifflib |
1 | From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Announce the availability of the various priority queues. | 3 | R40 has eight UARTs, support both 16450 and 16550 compatible modes. |
4 | This fixes an issue where guest kernels would miss to | ||
5 | configure secondary queues due to inproper feature bits. | ||
6 | 4 | ||
7 | Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 5 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
8 | Message-id: 20181017213932.19973-2-edgar.iglesias@gmail.com | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 7 | --- |
12 | hw/net/cadence_gem.c | 8 +++++++- | 8 | include/hw/arm/allwinner-r40.h | 8 ++++++++ |
13 | 1 file changed, 7 insertions(+), 1 deletion(-) | 9 | hw/arm/allwinner-r40.c | 34 +++++++++++++++++++++++++++++++--- |
10 | 2 files changed, 39 insertions(+), 3 deletions(-) | ||
14 | 11 | ||
15 | diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c | 12 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
16 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/hw/net/cadence_gem.c | 14 | --- a/include/hw/arm/allwinner-r40.h |
18 | +++ b/hw/net/cadence_gem.c | 15 | +++ b/include/hw/arm/allwinner-r40.h |
19 | @@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d) | 16 | @@ -XXX,XX +XXX,XX @@ enum { |
20 | int i; | 17 | AW_R40_DEV_CCU, |
21 | CadenceGEMState *s = CADENCE_GEM(d); | 18 | AW_R40_DEV_PIT, |
22 | const uint8_t *a; | 19 | AW_R40_DEV_UART0, |
23 | + uint32_t queues_mask = 0; | 20 | + AW_R40_DEV_UART1, |
24 | 21 | + AW_R40_DEV_UART2, | |
25 | DB_PRINT("\n"); | 22 | + AW_R40_DEV_UART3, |
26 | 23 | + AW_R40_DEV_UART4, | |
27 | @@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d) | 24 | + AW_R40_DEV_UART5, |
28 | s->regs[GEM_DESCONF] = 0x02500111; | 25 | + AW_R40_DEV_UART6, |
29 | s->regs[GEM_DESCONF2] = 0x2ab13fff; | 26 | + AW_R40_DEV_UART7, |
30 | s->regs[GEM_DESCONF5] = 0x002f2045; | 27 | AW_R40_DEV_GIC_DIST, |
31 | - s->regs[GEM_DESCONF6] = 0x00000200; | 28 | AW_R40_DEV_GIC_CPU, |
32 | + s->regs[GEM_DESCONF6] = 0x0; | 29 | AW_R40_DEV_GIC_HYP, |
30 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(AwR40State, AW_R40) | ||
31 | * which are currently emulated by the R40 SoC code. | ||
32 | */ | ||
33 | #define AW_R40_NUM_MMCS 4 | ||
34 | +#define AW_R40_NUM_UARTS 8 | ||
35 | |||
36 | struct AwR40State { | ||
37 | /*< private >*/ | ||
38 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/hw/arm/allwinner-r40.c | ||
41 | +++ b/hw/arm/allwinner-r40.c | ||
42 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { | ||
43 | [AW_R40_DEV_CCU] = 0x01c20000, | ||
44 | [AW_R40_DEV_PIT] = 0x01c20c00, | ||
45 | [AW_R40_DEV_UART0] = 0x01c28000, | ||
46 | + [AW_R40_DEV_UART1] = 0x01c28400, | ||
47 | + [AW_R40_DEV_UART2] = 0x01c28800, | ||
48 | + [AW_R40_DEV_UART3] = 0x01c28c00, | ||
49 | + [AW_R40_DEV_UART4] = 0x01c29000, | ||
50 | + [AW_R40_DEV_UART5] = 0x01c29400, | ||
51 | + [AW_R40_DEV_UART6] = 0x01c29800, | ||
52 | + [AW_R40_DEV_UART7] = 0x01c29c00, | ||
53 | [AW_R40_DEV_GIC_DIST] = 0x01c81000, | ||
54 | [AW_R40_DEV_GIC_CPU] = 0x01c82000, | ||
55 | [AW_R40_DEV_GIC_HYP] = 0x01c84000, | ||
56 | @@ -XXX,XX +XXX,XX @@ enum { | ||
57 | /* Shared Processor Interrupts */ | ||
58 | enum { | ||
59 | AW_R40_GIC_SPI_UART0 = 1, | ||
60 | + AW_R40_GIC_SPI_UART1 = 2, | ||
61 | + AW_R40_GIC_SPI_UART2 = 3, | ||
62 | + AW_R40_GIC_SPI_UART3 = 4, | ||
63 | + AW_R40_GIC_SPI_UART4 = 17, | ||
64 | + AW_R40_GIC_SPI_UART5 = 18, | ||
65 | + AW_R40_GIC_SPI_UART6 = 19, | ||
66 | + AW_R40_GIC_SPI_UART7 = 20, | ||
67 | AW_R40_GIC_SPI_TIMER0 = 22, | ||
68 | AW_R40_GIC_SPI_TIMER1 = 23, | ||
69 | AW_R40_GIC_SPI_MMC0 = 32, | ||
70 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) | ||
71 | } | ||
72 | |||
73 | /* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */ | ||
74 | - serial_mm_init(get_system_memory(), s->memmap[AW_R40_DEV_UART0], 2, | ||
75 | - qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_UART0), | ||
76 | - 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); | ||
77 | + for (int i = 0; i < AW_R40_NUM_UARTS; i++) { | ||
78 | + static const int uart_irqs[AW_R40_NUM_UARTS] = { | ||
79 | + AW_R40_GIC_SPI_UART0, | ||
80 | + AW_R40_GIC_SPI_UART1, | ||
81 | + AW_R40_GIC_SPI_UART2, | ||
82 | + AW_R40_GIC_SPI_UART3, | ||
83 | + AW_R40_GIC_SPI_UART4, | ||
84 | + AW_R40_GIC_SPI_UART5, | ||
85 | + AW_R40_GIC_SPI_UART6, | ||
86 | + AW_R40_GIC_SPI_UART7, | ||
87 | + }; | ||
88 | + const hwaddr addr = s->memmap[AW_R40_DEV_UART0 + i]; | ||
33 | + | 89 | + |
34 | + if (s->num_priority_queues > 1) { | 90 | + serial_mm_init(get_system_memory(), addr, 2, |
35 | + queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); | 91 | + qdev_get_gpio_in(DEVICE(&s->gic), uart_irqs[i]), |
36 | + s->regs[GEM_DESCONF6] |= queues_mask; | 92 | + 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN); |
37 | + } | 93 | + } |
38 | 94 | ||
39 | /* Set MAC address */ | 95 | /* Unimplemented devices */ |
40 | a = &s->conf.macaddr.a[0]; | 96 | for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { |
41 | -- | 97 | -- |
42 | 2.19.1 | 98 | 2.34.1 |
43 | |||
44 | diff view generated by jsdifflib |
1 | From: Richard Henderson <rth@twiddle.net> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | This can reduce the number of opcodes required for certain | 3 | TWI(i2c) is designed to be used as an interface between CPU host and the |
4 | complex forms of load-multiple (e.g. ld4.16b). | 4 | serial 2-Wire bus. It can support all standard 2-Wire transfer, can be |
5 | operated in standard mode(100kbit/s) or fast-mode, supporting data rate | ||
6 | up to 400kbit/s. | ||
5 | 7 | ||
6 | Signed-off-by: Richard Henderson <rth@twiddle.net> | 8 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
7 | Message-id: 20181011205206.3552-2-richard.henderson@linaro.org | 9 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 11 | --- |
11 | target/arm/translate-a64.c | 12 ++++++++---- | 12 | include/hw/arm/allwinner-r40.h | 3 +++ |
12 | 1 file changed, 8 insertions(+), 4 deletions(-) | 13 | hw/arm/allwinner-r40.c | 11 ++++++++++- |
14 | 2 files changed, 13 insertions(+), 1 deletion(-) | ||
13 | 15 | ||
14 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 16 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
15 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/translate-a64.c | 18 | --- a/include/hw/arm/allwinner-r40.h |
17 | +++ b/target/arm/translate-a64.c | 19 | +++ b/include/hw/arm/allwinner-r40.h |
18 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 20 | @@ -XXX,XX +XXX,XX @@ |
19 | bool is_store = !extract32(insn, 22, 1); | 21 | #include "hw/intc/arm_gic.h" |
20 | bool is_postidx = extract32(insn, 23, 1); | 22 | #include "hw/sd/allwinner-sdhost.h" |
21 | bool is_q = extract32(insn, 30, 1); | 23 | #include "hw/misc/allwinner-r40-ccu.h" |
22 | - TCGv_i64 tcg_addr, tcg_rn; | 24 | +#include "hw/i2c/allwinner-i2c.h" |
23 | + TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes; | 25 | #include "target/arm/cpu.h" |
24 | 26 | #include "sysemu/block-backend.h" | |
25 | int ebytes = 1 << size; | 27 | |
26 | int elements = (is_q ? 128 : 64) / (8 << size); | 28 | @@ -XXX,XX +XXX,XX @@ enum { |
27 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 29 | AW_R40_DEV_UART5, |
28 | tcg_rn = cpu_reg_sp(s, rn); | 30 | AW_R40_DEV_UART6, |
29 | tcg_addr = tcg_temp_new_i64(); | 31 | AW_R40_DEV_UART7, |
30 | tcg_gen_mov_i64(tcg_addr, tcg_rn); | 32 | + AW_R40_DEV_TWI0, |
31 | + tcg_ebytes = tcg_const_i64(ebytes); | 33 | AW_R40_DEV_GIC_DIST, |
32 | 34 | AW_R40_DEV_GIC_CPU, | |
33 | for (r = 0; r < rpt; r++) { | 35 | AW_R40_DEV_GIC_HYP, |
34 | int e; | 36 | @@ -XXX,XX +XXX,XX @@ struct AwR40State { |
35 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 37 | AwA10PITState timer; |
36 | clear_vec_high(s, is_q, tt); | 38 | AwSdHostState mmc[AW_R40_NUM_MMCS]; |
37 | } | 39 | AwR40ClockCtlState ccu; |
38 | } | 40 | + AWI2CState i2c0; |
39 | - tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes); | 41 | GICState gic; |
40 | + tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); | 42 | MemoryRegion sram_a1; |
41 | tt = (tt + 1) % 32; | 43 | MemoryRegion sram_a2; |
42 | } | 44 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c |
43 | } | 45 | index XXXXXXX..XXXXXXX 100644 |
44 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | 46 | --- a/hw/arm/allwinner-r40.c |
45 | tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); | 47 | +++ b/hw/arm/allwinner-r40.c |
46 | } | 48 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { |
49 | [AW_R40_DEV_UART5] = 0x01c29400, | ||
50 | [AW_R40_DEV_UART6] = 0x01c29800, | ||
51 | [AW_R40_DEV_UART7] = 0x01c29c00, | ||
52 | + [AW_R40_DEV_TWI0] = 0x01c2ac00, | ||
53 | [AW_R40_DEV_GIC_DIST] = 0x01c81000, | ||
54 | [AW_R40_DEV_GIC_CPU] = 0x01c82000, | ||
55 | [AW_R40_DEV_GIC_HYP] = 0x01c84000, | ||
56 | @@ -XXX,XX +XXX,XX @@ static struct AwR40Unimplemented r40_unimplemented[] = { | ||
57 | { "uart7", 0x01c29c00, 1 * KiB }, | ||
58 | { "ps20", 0x01c2a000, 1 * KiB }, | ||
59 | { "ps21", 0x01c2a400, 1 * KiB }, | ||
60 | - { "twi0", 0x01c2ac00, 1 * KiB }, | ||
61 | { "twi1", 0x01c2b000, 1 * KiB }, | ||
62 | { "twi2", 0x01c2b400, 1 * KiB }, | ||
63 | { "twi3", 0x01c2b800, 1 * KiB }, | ||
64 | @@ -XXX,XX +XXX,XX @@ enum { | ||
65 | AW_R40_GIC_SPI_UART1 = 2, | ||
66 | AW_R40_GIC_SPI_UART2 = 3, | ||
67 | AW_R40_GIC_SPI_UART3 = 4, | ||
68 | + AW_R40_GIC_SPI_TWI0 = 7, | ||
69 | AW_R40_GIC_SPI_UART4 = 17, | ||
70 | AW_R40_GIC_SPI_UART5 = 18, | ||
71 | AW_R40_GIC_SPI_UART6 = 19, | ||
72 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) | ||
73 | object_initialize_child(obj, mmc_names[i], &s->mmc[i], | ||
74 | TYPE_AW_SDHOST_SUN5I); | ||
47 | } | 75 | } |
48 | + tcg_temp_free_i64(tcg_ebytes); | 76 | + |
49 | tcg_temp_free_i64(tcg_addr); | 77 | + object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); |
50 | } | 78 | } |
51 | 79 | ||
52 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 80 | static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
53 | bool replicate = false; | 81 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
54 | int index = is_q << 3 | S << 2 | size; | 82 | 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN); |
55 | int ebytes, xs; | ||
56 | - TCGv_i64 tcg_addr, tcg_rn; | ||
57 | + TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes; | ||
58 | |||
59 | switch (scale) { | ||
60 | case 3: | ||
61 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | ||
62 | tcg_rn = cpu_reg_sp(s, rn); | ||
63 | tcg_addr = tcg_temp_new_i64(); | ||
64 | tcg_gen_mov_i64(tcg_addr, tcg_rn); | ||
65 | + tcg_ebytes = tcg_const_i64(ebytes); | ||
66 | |||
67 | for (xs = 0; xs < selem; xs++) { | ||
68 | if (replicate) { | ||
69 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | ||
70 | do_vec_st(s, rt, index, tcg_addr, scale); | ||
71 | } | ||
72 | } | ||
73 | - tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes); | ||
74 | + tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); | ||
75 | rt = (rt + 1) % 32; | ||
76 | } | 83 | } |
77 | 84 | ||
78 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 85 | + /* I2C */ |
79 | tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); | 86 | + sysbus_realize(SYS_BUS_DEVICE(&s->i2c0), &error_fatal); |
80 | } | 87 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c0), 0, s->memmap[AW_R40_DEV_TWI0]); |
81 | } | 88 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, |
82 | + tcg_temp_free_i64(tcg_ebytes); | 89 | + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_TWI0)); |
83 | tcg_temp_free_i64(tcg_addr); | 90 | + |
84 | } | 91 | /* Unimplemented devices */ |
85 | 92 | for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { | |
93 | create_unimplemented_device(r40_unimplemented[i].device_name, | ||
86 | -- | 94 | -- |
87 | 2.19.1 | 95 | 2.34.1 |
88 | |||
89 | diff view generated by jsdifflib |
1 | The HCR.DC virtualization configuration register bit has the | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | following effects: | ||
3 | * SCTLR.M behaves as if it is 0 for all purposes except | ||
4 | direct reads of the bit | ||
5 | * HCR.VM behaves as if it is 1 for all purposes except | ||
6 | direct reads of the bit | ||
7 | * the memory type produced by the first stage of the EL1&EL0 | ||
8 | translation regime is Normal Non-Shareable, | ||
9 | Inner Write-Back Read-Allocate Write-Allocate, | ||
10 | Outer Write-Back Read-Allocate Write-Allocate. | ||
11 | 2 | ||
12 | Implement this behaviour. | 3 | This patch adds minimal support for AXP-221 PMU and connect it to |
4 | bananapi M2U board. | ||
13 | 5 | ||
6 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Message-id: 20181012144235.19646-5-peter.maydell@linaro.org | ||
17 | --- | 8 | --- |
18 | target/arm/helper.c | 23 +++++++++++++++++++++-- | 9 | hw/arm/bananapi_m2u.c | 6 + |
19 | 1 file changed, 21 insertions(+), 2 deletions(-) | 10 | hw/misc/axp209.c | 238 ----------------------------------- |
11 | hw/misc/axp2xx.c | 283 ++++++++++++++++++++++++++++++++++++++++++ | ||
12 | hw/arm/Kconfig | 3 +- | ||
13 | hw/misc/Kconfig | 2 +- | ||
14 | hw/misc/meson.build | 2 +- | ||
15 | hw/misc/trace-events | 8 +- | ||
16 | 7 files changed, 297 insertions(+), 245 deletions(-) | ||
17 | delete mode 100644 hw/misc/axp209.c | ||
18 | create mode 100644 hw/misc/axp2xx.c | ||
20 | 19 | ||
21 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 20 | diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c |
22 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/target/arm/helper.c | 22 | --- a/hw/arm/bananapi_m2u.c |
24 | +++ b/target/arm/helper.c | 23 | +++ b/hw/arm/bananapi_m2u.c |
25 | @@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, | 24 | @@ -XXX,XX +XXX,XX @@ |
26 | * * The Non-secure TTBCR.EAE bit is set to 1 | 25 | #include "qapi/error.h" |
27 | * * The implementation includes EL2, and the value of HCR.VM is 1 | 26 | #include "qemu/error-report.h" |
28 | * | 27 | #include "hw/boards.h" |
29 | + * (Note that HCR.DC makes HCR.VM behave as if it is 1.) | 28 | +#include "hw/i2c/i2c.h" |
30 | + * | 29 | #include "hw/qdev-properties.h" |
31 | * ATS1Hx always uses the 64bit format (not supported yet). | 30 | #include "hw/arm/allwinner-r40.h" |
32 | */ | 31 | |
33 | format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); | 32 | @@ -XXX,XX +XXX,XX @@ static void bpim2u_init(MachineState *machine) |
34 | 33 | { | |
35 | if (arm_feature(env, ARM_FEATURE_EL2)) { | 34 | bool bootroom_loaded = false; |
36 | if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { | 35 | AwR40State *r40; |
37 | - format64 |= env->cp15.hcr_el2 & HCR_VM; | 36 | + I2CBus *i2c; |
38 | + format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); | 37 | |
39 | } else { | 38 | /* BIOS is not supported by this board */ |
40 | format64 |= arm_current_el(env) == 2; | 39 | if (machine->firmware) { |
41 | } | 40 | @@ -XXX,XX +XXX,XX @@ static void bpim2u_init(MachineState *machine) |
42 | @@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env, | ||
43 | } | ||
44 | |||
45 | if (mmu_idx == ARMMMUIdx_S2NS) { | ||
46 | - return (env->cp15.hcr_el2 & HCR_VM) == 0; | ||
47 | + /* HCR.DC means HCR.VM behaves as 1 */ | ||
48 | + return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; | ||
49 | } | ||
50 | |||
51 | if (env->cp15.hcr_el2 & HCR_TGE) { | ||
52 | @@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env, | ||
53 | } | 41 | } |
54 | } | 42 | } |
55 | 43 | ||
56 | + if ((env->cp15.hcr_el2 & HCR_DC) && | 44 | + /* Connect AXP221 */ |
57 | + (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) { | 45 | + i2c = I2C_BUS(qdev_get_child_bus(DEVICE(&r40->i2c0), "i2c")); |
58 | + /* HCR.DC means SCTLR_EL1.M behaves as 0 */ | 46 | + i2c_slave_create_simple(i2c, "axp221_pmu", 0x34); |
59 | + return true; | 47 | + |
48 | /* SDRAM */ | ||
49 | memory_region_add_subregion(get_system_memory(), | ||
50 | r40->memmap[AW_R40_DEV_SDRAM], machine->ram); | ||
51 | diff --git a/hw/misc/axp209.c b/hw/misc/axp209.c | ||
52 | deleted file mode 100644 | ||
53 | index XXXXXXX..XXXXXXX | ||
54 | --- a/hw/misc/axp209.c | ||
55 | +++ /dev/null | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | -/* | ||
58 | - * AXP-209 PMU Emulation | ||
59 | - * | ||
60 | - * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com> | ||
61 | - * | ||
62 | - * Permission is hereby granted, free of charge, to any person obtaining a | ||
63 | - * copy of this software and associated documentation files (the "Software"), | ||
64 | - * to deal in the Software without restriction, including without limitation | ||
65 | - * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
66 | - * and/or sell copies of the Software, and to permit persons to whom the | ||
67 | - * Software is furnished to do so, subject to the following conditions: | ||
68 | - * | ||
69 | - * The above copyright notice and this permission notice shall be included in | ||
70 | - * all copies or substantial portions of the Software. | ||
71 | - * | ||
72 | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
73 | - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
74 | - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
75 | - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
76 | - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
77 | - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
78 | - * DEALINGS IN THE SOFTWARE. | ||
79 | - * | ||
80 | - * SPDX-License-Identifier: MIT | ||
81 | - */ | ||
82 | - | ||
83 | -#include "qemu/osdep.h" | ||
84 | -#include "qemu/log.h" | ||
85 | -#include "trace.h" | ||
86 | -#include "hw/i2c/i2c.h" | ||
87 | -#include "migration/vmstate.h" | ||
88 | - | ||
89 | -#define TYPE_AXP209_PMU "axp209_pmu" | ||
90 | - | ||
91 | -#define AXP209(obj) \ | ||
92 | - OBJECT_CHECK(AXP209I2CState, (obj), TYPE_AXP209_PMU) | ||
93 | - | ||
94 | -/* registers */ | ||
95 | -enum { | ||
96 | - REG_POWER_STATUS = 0x0u, | ||
97 | - REG_OPERATING_MODE, | ||
98 | - REG_OTG_VBUS_STATUS, | ||
99 | - REG_CHIP_VERSION, | ||
100 | - REG_DATA_CACHE_0, | ||
101 | - REG_DATA_CACHE_1, | ||
102 | - REG_DATA_CACHE_2, | ||
103 | - REG_DATA_CACHE_3, | ||
104 | - REG_DATA_CACHE_4, | ||
105 | - REG_DATA_CACHE_5, | ||
106 | - REG_DATA_CACHE_6, | ||
107 | - REG_DATA_CACHE_7, | ||
108 | - REG_DATA_CACHE_8, | ||
109 | - REG_DATA_CACHE_9, | ||
110 | - REG_DATA_CACHE_A, | ||
111 | - REG_DATA_CACHE_B, | ||
112 | - REG_POWER_OUTPUT_CTRL = 0x12u, | ||
113 | - REG_DC_DC2_OUT_V_CTRL = 0x23u, | ||
114 | - REG_DC_DC2_DVS_CTRL = 0x25u, | ||
115 | - REG_DC_DC3_OUT_V_CTRL = 0x27u, | ||
116 | - REG_LDO2_4_OUT_V_CTRL, | ||
117 | - REG_LDO3_OUT_V_CTRL, | ||
118 | - REG_VBUS_CH_MGMT = 0x30u, | ||
119 | - REG_SHUTDOWN_V_CTRL, | ||
120 | - REG_SHUTDOWN_CTRL, | ||
121 | - REG_CHARGE_CTRL_1, | ||
122 | - REG_CHARGE_CTRL_2, | ||
123 | - REG_SPARE_CHARGE_CTRL, | ||
124 | - REG_PEK_KEY_CTRL, | ||
125 | - REG_DC_DC_FREQ_SET, | ||
126 | - REG_CHR_TEMP_TH_SET, | ||
127 | - REG_CHR_HIGH_TEMP_TH_CTRL, | ||
128 | - REG_IPSOUT_WARN_L1, | ||
129 | - REG_IPSOUT_WARN_L2, | ||
130 | - REG_DISCHR_TEMP_TH_SET, | ||
131 | - REG_DISCHR_HIGH_TEMP_TH_CTRL, | ||
132 | - REG_IRQ_BANK_1_CTRL = 0x40u, | ||
133 | - REG_IRQ_BANK_2_CTRL, | ||
134 | - REG_IRQ_BANK_3_CTRL, | ||
135 | - REG_IRQ_BANK_4_CTRL, | ||
136 | - REG_IRQ_BANK_5_CTRL, | ||
137 | - REG_IRQ_BANK_1_STAT = 0x48u, | ||
138 | - REG_IRQ_BANK_2_STAT, | ||
139 | - REG_IRQ_BANK_3_STAT, | ||
140 | - REG_IRQ_BANK_4_STAT, | ||
141 | - REG_IRQ_BANK_5_STAT, | ||
142 | - REG_ADC_ACIN_V_H = 0x56u, | ||
143 | - REG_ADC_ACIN_V_L, | ||
144 | - REG_ADC_ACIN_CURR_H, | ||
145 | - REG_ADC_ACIN_CURR_L, | ||
146 | - REG_ADC_VBUS_V_H, | ||
147 | - REG_ADC_VBUS_V_L, | ||
148 | - REG_ADC_VBUS_CURR_H, | ||
149 | - REG_ADC_VBUS_CURR_L, | ||
150 | - REG_ADC_INT_TEMP_H, | ||
151 | - REG_ADC_INT_TEMP_L, | ||
152 | - REG_ADC_TEMP_SENS_V_H = 0x62u, | ||
153 | - REG_ADC_TEMP_SENS_V_L, | ||
154 | - REG_ADC_BAT_V_H = 0x78u, | ||
155 | - REG_ADC_BAT_V_L, | ||
156 | - REG_ADC_BAT_DISCHR_CURR_H, | ||
157 | - REG_ADC_BAT_DISCHR_CURR_L, | ||
158 | - REG_ADC_BAT_CHR_CURR_H, | ||
159 | - REG_ADC_BAT_CHR_CURR_L, | ||
160 | - REG_ADC_IPSOUT_V_H, | ||
161 | - REG_ADC_IPSOUT_V_L, | ||
162 | - REG_DC_DC_MOD_SEL = 0x80u, | ||
163 | - REG_ADC_EN_1, | ||
164 | - REG_ADC_EN_2, | ||
165 | - REG_ADC_SR_CTRL, | ||
166 | - REG_ADC_IN_RANGE, | ||
167 | - REG_GPIO1_ADC_IRQ_RISING_TH, | ||
168 | - REG_GPIO1_ADC_IRQ_FALLING_TH, | ||
169 | - REG_TIMER_CTRL = 0x8au, | ||
170 | - REG_VBUS_CTRL_MON_SRP, | ||
171 | - REG_OVER_TEMP_SHUTDOWN = 0x8fu, | ||
172 | - REG_GPIO0_FEAT_SET, | ||
173 | - REG_GPIO_OUT_HIGH_SET, | ||
174 | - REG_GPIO1_FEAT_SET, | ||
175 | - REG_GPIO2_FEAT_SET, | ||
176 | - REG_GPIO_SIG_STATE_SET_MON, | ||
177 | - REG_GPIO3_SET, | ||
178 | - REG_COULOMB_CNTR_CTRL = 0xb8u, | ||
179 | - REG_POWER_MEAS_RES, | ||
180 | - NR_REGS | ||
181 | -}; | ||
182 | - | ||
183 | -#define AXP209_CHIP_VERSION_ID (0x01) | ||
184 | -#define AXP209_DC_DC2_OUT_V_CTRL_RESET (0x16) | ||
185 | -#define AXP209_IRQ_BANK_1_CTRL_RESET (0xd8) | ||
186 | - | ||
187 | -/* A simple I2C slave which returns values of ID or CNT register. */ | ||
188 | -typedef struct AXP209I2CState { | ||
189 | - /*< private >*/ | ||
190 | - I2CSlave i2c; | ||
191 | - /*< public >*/ | ||
192 | - uint8_t regs[NR_REGS]; /* peripheral registers */ | ||
193 | - uint8_t ptr; /* current register index */ | ||
194 | - uint8_t count; /* counter used for tx/rx */ | ||
195 | -} AXP209I2CState; | ||
196 | - | ||
197 | -/* Reset all counters and load ID register */ | ||
198 | -static void axp209_reset_enter(Object *obj, ResetType type) | ||
199 | -{ | ||
200 | - AXP209I2CState *s = AXP209(obj); | ||
201 | - | ||
202 | - memset(s->regs, 0, NR_REGS); | ||
203 | - s->ptr = 0; | ||
204 | - s->count = 0; | ||
205 | - s->regs[REG_CHIP_VERSION] = AXP209_CHIP_VERSION_ID; | ||
206 | - s->regs[REG_DC_DC2_OUT_V_CTRL] = AXP209_DC_DC2_OUT_V_CTRL_RESET; | ||
207 | - s->regs[REG_IRQ_BANK_1_CTRL] = AXP209_IRQ_BANK_1_CTRL_RESET; | ||
208 | -} | ||
209 | - | ||
210 | -/* Handle events from master. */ | ||
211 | -static int axp209_event(I2CSlave *i2c, enum i2c_event event) | ||
212 | -{ | ||
213 | - AXP209I2CState *s = AXP209(i2c); | ||
214 | - | ||
215 | - s->count = 0; | ||
216 | - | ||
217 | - return 0; | ||
218 | -} | ||
219 | - | ||
220 | -/* Called when master requests read */ | ||
221 | -static uint8_t axp209_rx(I2CSlave *i2c) | ||
222 | -{ | ||
223 | - AXP209I2CState *s = AXP209(i2c); | ||
224 | - uint8_t ret = 0xff; | ||
225 | - | ||
226 | - if (s->ptr < NR_REGS) { | ||
227 | - ret = s->regs[s->ptr++]; | ||
228 | - } | ||
229 | - | ||
230 | - trace_axp209_rx(s->ptr - 1, ret); | ||
231 | - | ||
232 | - return ret; | ||
233 | -} | ||
234 | - | ||
235 | -/* | ||
236 | - * Called when master sends write. | ||
237 | - * Update ptr with byte 0, then perform write with second byte. | ||
238 | - */ | ||
239 | -static int axp209_tx(I2CSlave *i2c, uint8_t data) | ||
240 | -{ | ||
241 | - AXP209I2CState *s = AXP209(i2c); | ||
242 | - | ||
243 | - if (s->count == 0) { | ||
244 | - /* Store register address */ | ||
245 | - s->ptr = data; | ||
246 | - s->count++; | ||
247 | - trace_axp209_select(data); | ||
248 | - } else { | ||
249 | - trace_axp209_tx(s->ptr, data); | ||
250 | - if (s->ptr == REG_DC_DC2_OUT_V_CTRL) { | ||
251 | - s->regs[s->ptr++] = data; | ||
252 | - } | ||
253 | - } | ||
254 | - | ||
255 | - return 0; | ||
256 | -} | ||
257 | - | ||
258 | -static const VMStateDescription vmstate_axp209 = { | ||
259 | - .name = TYPE_AXP209_PMU, | ||
260 | - .version_id = 1, | ||
261 | - .fields = (VMStateField[]) { | ||
262 | - VMSTATE_UINT8_ARRAY(regs, AXP209I2CState, NR_REGS), | ||
263 | - VMSTATE_UINT8(count, AXP209I2CState), | ||
264 | - VMSTATE_UINT8(ptr, AXP209I2CState), | ||
265 | - VMSTATE_END_OF_LIST() | ||
266 | - } | ||
267 | -}; | ||
268 | - | ||
269 | -static void axp209_class_init(ObjectClass *oc, void *data) | ||
270 | -{ | ||
271 | - DeviceClass *dc = DEVICE_CLASS(oc); | ||
272 | - I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); | ||
273 | - ResettableClass *rc = RESETTABLE_CLASS(oc); | ||
274 | - | ||
275 | - rc->phases.enter = axp209_reset_enter; | ||
276 | - dc->vmsd = &vmstate_axp209; | ||
277 | - isc->event = axp209_event; | ||
278 | - isc->recv = axp209_rx; | ||
279 | - isc->send = axp209_tx; | ||
280 | -} | ||
281 | - | ||
282 | -static const TypeInfo axp209_info = { | ||
283 | - .name = TYPE_AXP209_PMU, | ||
284 | - .parent = TYPE_I2C_SLAVE, | ||
285 | - .instance_size = sizeof(AXP209I2CState), | ||
286 | - .class_init = axp209_class_init | ||
287 | -}; | ||
288 | - | ||
289 | -static void axp209_register_devices(void) | ||
290 | -{ | ||
291 | - type_register_static(&axp209_info); | ||
292 | -} | ||
293 | - | ||
294 | -type_init(axp209_register_devices); | ||
295 | diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c | ||
296 | new file mode 100644 | ||
297 | index XXXXXXX..XXXXXXX | ||
298 | --- /dev/null | ||
299 | +++ b/hw/misc/axp2xx.c | ||
300 | @@ -XXX,XX +XXX,XX @@ | ||
301 | +/* | ||
302 | + * AXP-2XX PMU Emulation, supported lists: | ||
303 | + * AXP209 | ||
304 | + * AXP221 | ||
305 | + * | ||
306 | + * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com> | ||
307 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
308 | + * | ||
309 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
310 | + * copy of this software and associated documentation files (the "Software"), | ||
311 | + * to deal in the Software without restriction, including without limitation | ||
312 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
313 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
314 | + * Software is furnished to do so, subject to the following conditions: | ||
315 | + * | ||
316 | + * The above copyright notice and this permission notice shall be included in | ||
317 | + * all copies or substantial portions of the Software. | ||
318 | + * | ||
319 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
320 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
321 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
322 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
323 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
324 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
325 | + * DEALINGS IN THE SOFTWARE. | ||
326 | + * | ||
327 | + * SPDX-License-Identifier: MIT | ||
328 | + */ | ||
329 | + | ||
330 | +#include "qemu/osdep.h" | ||
331 | +#include "qemu/log.h" | ||
332 | +#include "qom/object.h" | ||
333 | +#include "trace.h" | ||
334 | +#include "hw/i2c/i2c.h" | ||
335 | +#include "migration/vmstate.h" | ||
336 | + | ||
337 | +#define TYPE_AXP2XX "axp2xx_pmu" | ||
338 | +#define TYPE_AXP209_PMU "axp209_pmu" | ||
339 | +#define TYPE_AXP221_PMU "axp221_pmu" | ||
340 | + | ||
341 | +OBJECT_DECLARE_TYPE(AXP2xxI2CState, AXP2xxClass, AXP2XX) | ||
342 | + | ||
343 | +#define NR_REGS (0xff) | ||
344 | + | ||
345 | +/* A simple I2C slave which returns values of ID or CNT register. */ | ||
346 | +typedef struct AXP2xxI2CState { | ||
347 | + /*< private >*/ | ||
348 | + I2CSlave i2c; | ||
349 | + /*< public >*/ | ||
350 | + uint8_t regs[NR_REGS]; /* peripheral registers */ | ||
351 | + uint8_t ptr; /* current register index */ | ||
352 | + uint8_t count; /* counter used for tx/rx */ | ||
353 | +} AXP2xxI2CState; | ||
354 | + | ||
355 | +typedef struct AXP2xxClass { | ||
356 | + /*< private >*/ | ||
357 | + I2CSlaveClass parent_class; | ||
358 | + /*< public >*/ | ||
359 | + void (*reset_enter)(AXP2xxI2CState *s, ResetType type); | ||
360 | +} AXP2xxClass; | ||
361 | + | ||
362 | +#define AXP209_CHIP_VERSION_ID (0x01) | ||
363 | +#define AXP209_DC_DC2_OUT_V_CTRL_RESET (0x16) | ||
364 | + | ||
365 | +/* Reset all counters and load ID register */ | ||
366 | +static void axp209_reset_enter(AXP2xxI2CState *s, ResetType type) | ||
367 | +{ | ||
368 | + memset(s->regs, 0, NR_REGS); | ||
369 | + s->ptr = 0; | ||
370 | + s->count = 0; | ||
371 | + | ||
372 | + s->regs[0x03] = AXP209_CHIP_VERSION_ID; | ||
373 | + s->regs[0x23] = AXP209_DC_DC2_OUT_V_CTRL_RESET; | ||
374 | + | ||
375 | + s->regs[0x30] = 0x60; | ||
376 | + s->regs[0x32] = 0x46; | ||
377 | + s->regs[0x34] = 0x41; | ||
378 | + s->regs[0x35] = 0x22; | ||
379 | + s->regs[0x36] = 0x5d; | ||
380 | + s->regs[0x37] = 0x08; | ||
381 | + s->regs[0x38] = 0xa5; | ||
382 | + s->regs[0x39] = 0x1f; | ||
383 | + s->regs[0x3a] = 0x68; | ||
384 | + s->regs[0x3b] = 0x5f; | ||
385 | + s->regs[0x3c] = 0xfc; | ||
386 | + s->regs[0x3d] = 0x16; | ||
387 | + s->regs[0x40] = 0xd8; | ||
388 | + s->regs[0x42] = 0xff; | ||
389 | + s->regs[0x43] = 0x3b; | ||
390 | + s->regs[0x80] = 0xe0; | ||
391 | + s->regs[0x82] = 0x83; | ||
392 | + s->regs[0x83] = 0x80; | ||
393 | + s->regs[0x84] = 0x32; | ||
394 | + s->regs[0x86] = 0xff; | ||
395 | + s->regs[0x90] = 0x07; | ||
396 | + s->regs[0x91] = 0xa0; | ||
397 | + s->regs[0x92] = 0x07; | ||
398 | + s->regs[0x93] = 0x07; | ||
399 | +} | ||
400 | + | ||
401 | +#define AXP221_PWR_STATUS_ACIN_PRESENT BIT(7) | ||
402 | +#define AXP221_PWR_STATUS_ACIN_AVAIL BIT(6) | ||
403 | +#define AXP221_PWR_STATUS_VBUS_PRESENT BIT(5) | ||
404 | +#define AXP221_PWR_STATUS_VBUS_USED BIT(4) | ||
405 | +#define AXP221_PWR_STATUS_BAT_CHARGING BIT(2) | ||
406 | +#define AXP221_PWR_STATUS_ACIN_VBUS_POWERED BIT(1) | ||
407 | + | ||
408 | +/* Reset all counters and load ID register */ | ||
409 | +static void axp221_reset_enter(AXP2xxI2CState *s, ResetType type) | ||
410 | +{ | ||
411 | + memset(s->regs, 0, NR_REGS); | ||
412 | + s->ptr = 0; | ||
413 | + s->count = 0; | ||
414 | + | ||
415 | + /* input power status register */ | ||
416 | + s->regs[0x00] = AXP221_PWR_STATUS_ACIN_PRESENT | ||
417 | + | AXP221_PWR_STATUS_ACIN_AVAIL | ||
418 | + | AXP221_PWR_STATUS_ACIN_VBUS_POWERED; | ||
419 | + | ||
420 | + s->regs[0x01] = 0x00; /* no battery is connected */ | ||
421 | + | ||
422 | + /* | ||
423 | + * CHIPID register, no documented on datasheet, but it is checked in | ||
424 | + * u-boot spl. I had read it from AXP221s and got 0x06 value. | ||
425 | + * So leave 06h here. | ||
426 | + */ | ||
427 | + s->regs[0x03] = 0x06; | ||
428 | + | ||
429 | + s->regs[0x10] = 0xbf; | ||
430 | + s->regs[0x13] = 0x01; | ||
431 | + s->regs[0x30] = 0x60; | ||
432 | + s->regs[0x31] = 0x03; | ||
433 | + s->regs[0x32] = 0x43; | ||
434 | + s->regs[0x33] = 0xc6; | ||
435 | + s->regs[0x34] = 0x45; | ||
436 | + s->regs[0x35] = 0x0e; | ||
437 | + s->regs[0x36] = 0x5d; | ||
438 | + s->regs[0x37] = 0x08; | ||
439 | + s->regs[0x38] = 0xa5; | ||
440 | + s->regs[0x39] = 0x1f; | ||
441 | + s->regs[0x3c] = 0xfc; | ||
442 | + s->regs[0x3d] = 0x16; | ||
443 | + s->regs[0x80] = 0x80; | ||
444 | + s->regs[0x82] = 0xe0; | ||
445 | + s->regs[0x84] = 0x32; | ||
446 | + s->regs[0x8f] = 0x01; | ||
447 | + | ||
448 | + s->regs[0x90] = 0x07; | ||
449 | + s->regs[0x91] = 0x1f; | ||
450 | + s->regs[0x92] = 0x07; | ||
451 | + s->regs[0x93] = 0x1f; | ||
452 | + | ||
453 | + s->regs[0x40] = 0xd8; | ||
454 | + s->regs[0x41] = 0xff; | ||
455 | + s->regs[0x42] = 0x03; | ||
456 | + s->regs[0x43] = 0x03; | ||
457 | + | ||
458 | + s->regs[0xb8] = 0xc0; | ||
459 | + s->regs[0xb9] = 0x64; | ||
460 | + s->regs[0xe6] = 0xa0; | ||
461 | +} | ||
462 | + | ||
463 | +static void axp2xx_reset_enter(Object *obj, ResetType type) | ||
464 | +{ | ||
465 | + AXP2xxI2CState *s = AXP2XX(obj); | ||
466 | + AXP2xxClass *sc = AXP2XX_GET_CLASS(s); | ||
467 | + | ||
468 | + sc->reset_enter(s, type); | ||
469 | +} | ||
470 | + | ||
471 | +/* Handle events from master. */ | ||
472 | +static int axp2xx_event(I2CSlave *i2c, enum i2c_event event) | ||
473 | +{ | ||
474 | + AXP2xxI2CState *s = AXP2XX(i2c); | ||
475 | + | ||
476 | + s->count = 0; | ||
477 | + | ||
478 | + return 0; | ||
479 | +} | ||
480 | + | ||
481 | +/* Called when master requests read */ | ||
482 | +static uint8_t axp2xx_rx(I2CSlave *i2c) | ||
483 | +{ | ||
484 | + AXP2xxI2CState *s = AXP2XX(i2c); | ||
485 | + uint8_t ret = 0xff; | ||
486 | + | ||
487 | + if (s->ptr < NR_REGS) { | ||
488 | + ret = s->regs[s->ptr++]; | ||
60 | + } | 489 | + } |
61 | + | 490 | + |
62 | return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; | 491 | + trace_axp2xx_rx(s->ptr - 1, ret); |
63 | } | 492 | + |
64 | 493 | + return ret; | |
65 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, | 494 | +} |
66 | 495 | + | |
67 | /* Combine the S1 and S2 cache attributes, if needed */ | 496 | +/* |
68 | if (!ret && cacheattrs != NULL) { | 497 | + * Called when master sends write. |
69 | + if (env->cp15.hcr_el2 & HCR_DC) { | 498 | + * Update ptr with byte 0, then perform write with second byte. |
70 | + /* | 499 | + */ |
71 | + * HCR.DC forces the first stage attributes to | 500 | +static int axp2xx_tx(I2CSlave *i2c, uint8_t data) |
72 | + * Normal Non-Shareable, | 501 | +{ |
73 | + * Inner Write-Back Read-Allocate Write-Allocate, | 502 | + AXP2xxI2CState *s = AXP2XX(i2c); |
74 | + * Outer Write-Back Read-Allocate Write-Allocate. | 503 | + |
75 | + */ | 504 | + if (s->count == 0) { |
76 | + cacheattrs->attrs = 0xff; | 505 | + /* Store register address */ |
77 | + cacheattrs->shareability = 0; | 506 | + s->ptr = data; |
78 | + } | 507 | + s->count++; |
79 | *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); | 508 | + trace_axp2xx_select(data); |
80 | } | 509 | + } else { |
81 | 510 | + trace_axp2xx_tx(s->ptr, data); | |
511 | + s->regs[s->ptr++] = data; | ||
512 | + } | ||
513 | + | ||
514 | + return 0; | ||
515 | +} | ||
516 | + | ||
517 | +static const VMStateDescription vmstate_axp2xx = { | ||
518 | + .name = TYPE_AXP2XX, | ||
519 | + .version_id = 1, | ||
520 | + .fields = (VMStateField[]) { | ||
521 | + VMSTATE_UINT8_ARRAY(regs, AXP2xxI2CState, NR_REGS), | ||
522 | + VMSTATE_UINT8(ptr, AXP2xxI2CState), | ||
523 | + VMSTATE_UINT8(count, AXP2xxI2CState), | ||
524 | + VMSTATE_END_OF_LIST() | ||
525 | + } | ||
526 | +}; | ||
527 | + | ||
528 | +static void axp2xx_class_init(ObjectClass *oc, void *data) | ||
529 | +{ | ||
530 | + DeviceClass *dc = DEVICE_CLASS(oc); | ||
531 | + I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); | ||
532 | + ResettableClass *rc = RESETTABLE_CLASS(oc); | ||
533 | + | ||
534 | + rc->phases.enter = axp2xx_reset_enter; | ||
535 | + dc->vmsd = &vmstate_axp2xx; | ||
536 | + isc->event = axp2xx_event; | ||
537 | + isc->recv = axp2xx_rx; | ||
538 | + isc->send = axp2xx_tx; | ||
539 | +} | ||
540 | + | ||
541 | +static const TypeInfo axp2xx_info = { | ||
542 | + .name = TYPE_AXP2XX, | ||
543 | + .parent = TYPE_I2C_SLAVE, | ||
544 | + .instance_size = sizeof(AXP2xxI2CState), | ||
545 | + .class_size = sizeof(AXP2xxClass), | ||
546 | + .class_init = axp2xx_class_init, | ||
547 | + .abstract = true, | ||
548 | +}; | ||
549 | + | ||
550 | +static void axp209_class_init(ObjectClass *oc, void *data) | ||
551 | +{ | ||
552 | + AXP2xxClass *sc = AXP2XX_CLASS(oc); | ||
553 | + | ||
554 | + sc->reset_enter = axp209_reset_enter; | ||
555 | +} | ||
556 | + | ||
557 | +static const TypeInfo axp209_info = { | ||
558 | + .name = TYPE_AXP209_PMU, | ||
559 | + .parent = TYPE_AXP2XX, | ||
560 | + .class_init = axp209_class_init | ||
561 | +}; | ||
562 | + | ||
563 | +static void axp221_class_init(ObjectClass *oc, void *data) | ||
564 | +{ | ||
565 | + AXP2xxClass *sc = AXP2XX_CLASS(oc); | ||
566 | + | ||
567 | + sc->reset_enter = axp221_reset_enter; | ||
568 | +} | ||
569 | + | ||
570 | +static const TypeInfo axp221_info = { | ||
571 | + .name = TYPE_AXP221_PMU, | ||
572 | + .parent = TYPE_AXP2XX, | ||
573 | + .class_init = axp221_class_init, | ||
574 | +}; | ||
575 | + | ||
576 | +static void axp2xx_register_devices(void) | ||
577 | +{ | ||
578 | + type_register_static(&axp2xx_info); | ||
579 | + type_register_static(&axp209_info); | ||
580 | + type_register_static(&axp221_info); | ||
581 | +} | ||
582 | + | ||
583 | +type_init(axp2xx_register_devices); | ||
584 | diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig | ||
585 | index XXXXXXX..XXXXXXX 100644 | ||
586 | --- a/hw/arm/Kconfig | ||
587 | +++ b/hw/arm/Kconfig | ||
588 | @@ -XXX,XX +XXX,XX @@ config ALLWINNER_A10 | ||
589 | select ALLWINNER_WDT | ||
590 | select ALLWINNER_EMAC | ||
591 | select ALLWINNER_I2C | ||
592 | - select AXP209_PMU | ||
593 | + select AXP2XX_PMU | ||
594 | select SERIAL | ||
595 | select UNIMP | ||
596 | |||
597 | @@ -XXX,XX +XXX,XX @@ config ALLWINNER_R40 | ||
598 | bool | ||
599 | default y if TCG && ARM | ||
600 | select ALLWINNER_A10_PIT | ||
601 | + select AXP2XX_PMU | ||
602 | select SERIAL | ||
603 | select ARM_TIMER | ||
604 | select ARM_GIC | ||
605 | diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig | ||
606 | index XXXXXXX..XXXXXXX 100644 | ||
607 | --- a/hw/misc/Kconfig | ||
608 | +++ b/hw/misc/Kconfig | ||
609 | @@ -XXX,XX +XXX,XX @@ config ALLWINNER_A10_CCM | ||
610 | config ALLWINNER_A10_DRAMC | ||
611 | bool | ||
612 | |||
613 | -config AXP209_PMU | ||
614 | +config AXP2XX_PMU | ||
615 | bool | ||
616 | depends on I2C | ||
617 | |||
618 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
619 | index XXXXXXX..XXXXXXX 100644 | ||
620 | --- a/hw/misc/meson.build | ||
621 | +++ b/hw/misc/meson.build | ||
622 | @@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c | ||
623 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) | ||
624 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) | ||
625 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c')) | ||
626 | -softmmu_ss.add(when: 'CONFIG_AXP209_PMU', if_true: files('axp209.c')) | ||
627 | +softmmu_ss.add(when: 'CONFIG_AXP2XX_PMU', if_true: files('axp2xx.c')) | ||
628 | softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) | ||
629 | softmmu_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) | ||
630 | softmmu_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c')) | ||
631 | diff --git a/hw/misc/trace-events b/hw/misc/trace-events | ||
632 | index XXXXXXX..XXXXXXX 100644 | ||
633 | --- a/hw/misc/trace-events | ||
634 | +++ b/hw/misc/trace-events | ||
635 | @@ -XXX,XX +XXX,XX @@ allwinner_sid_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" | ||
636 | avr_power_read(uint8_t value) "power_reduc read value:%u" | ||
637 | avr_power_write(uint8_t value) "power_reduc write value:%u" | ||
638 | |||
639 | -# axp209.c | ||
640 | -axp209_rx(uint8_t reg, uint8_t data) "Read reg 0x%" PRIx8 " : 0x%" PRIx8 | ||
641 | -axp209_select(uint8_t reg) "Accessing reg 0x%" PRIx8 | ||
642 | -axp209_tx(uint8_t reg, uint8_t data) "Write reg 0x%" PRIx8 " : 0x%" PRIx8 | ||
643 | +# axp2xx | ||
644 | +axp2xx_rx(uint8_t reg, uint8_t data) "Read reg 0x%" PRIx8 " : 0x%" PRIx8 | ||
645 | +axp2xx_select(uint8_t reg) "Accessing reg 0x%" PRIx8 | ||
646 | +axp2xx_tx(uint8_t reg, uint8_t data) "Write reg 0x%" PRIx8 " : 0x%" PRIx8 | ||
647 | |||
648 | # eccmemctl.c | ||
649 | ecc_mem_writel_mer(uint32_t val) "Write memory enable 0x%08x" | ||
82 | -- | 650 | -- |
83 | 2.19.1 | 651 | 2.34.1 |
84 | |||
85 | diff view generated by jsdifflib |
1 | The HCR.FB virtualization configuration register bit requests that | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | TLB maintenance, branch predictor invalidate-all and icache | ||
3 | invalidate-all operations performed in NS EL1 should be upgraded | ||
4 | from "local CPU only to "broadcast within Inner Shareable domain". | ||
5 | For QEMU we NOP the branch predictor and icache operations, so | ||
6 | we only need to upgrade the TLB invalidates: | ||
7 | AArch32 TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID, | ||
8 | ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVAL, TLBIMVAAL | ||
9 | AArch64 TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1, | ||
10 | TLBI VALE1, TLBI VAALE1 | ||
11 | 2 | ||
3 | Types of memory that the SDRAM controller supports are DDR2/DDR3 | ||
4 | and capacities of up to 2GiB. This commit adds emulation support | ||
5 | of the Allwinner R40 SDRAM controller. | ||
6 | |||
7 | This driver only support 256M, 512M and 1024M memory now. | ||
8 | |||
9 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-id: 20181012144235.19646-4-peter.maydell@linaro.org | ||
15 | --- | 11 | --- |
16 | target/arm/helper.c | 191 +++++++++++++++++++++++++++----------------- | 12 | include/hw/arm/allwinner-r40.h | 13 +- |
17 | 1 file changed, 116 insertions(+), 75 deletions(-) | 13 | include/hw/misc/allwinner-r40-dramc.h | 108 ++++++ |
14 | hw/arm/allwinner-r40.c | 21 +- | ||
15 | hw/arm/bananapi_m2u.c | 7 + | ||
16 | hw/misc/allwinner-r40-dramc.c | 513 ++++++++++++++++++++++++++ | ||
17 | hw/misc/meson.build | 1 + | ||
18 | hw/misc/trace-events | 14 + | ||
19 | 7 files changed, 674 insertions(+), 3 deletions(-) | ||
20 | create mode 100644 include/hw/misc/allwinner-r40-dramc.h | ||
21 | create mode 100644 hw/misc/allwinner-r40-dramc.c | ||
18 | 22 | ||
19 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 23 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
20 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/helper.c | 25 | --- a/include/hw/arm/allwinner-r40.h |
22 | +++ b/target/arm/helper.c | 26 | +++ b/include/hw/arm/allwinner-r40.h |
23 | @@ -XXX,XX +XXX,XX @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, | 27 | @@ -XXX,XX +XXX,XX @@ |
24 | raw_write(env, ri, value); | 28 | #include "hw/intc/arm_gic.h" |
29 | #include "hw/sd/allwinner-sdhost.h" | ||
30 | #include "hw/misc/allwinner-r40-ccu.h" | ||
31 | +#include "hw/misc/allwinner-r40-dramc.h" | ||
32 | #include "hw/i2c/allwinner-i2c.h" | ||
33 | #include "target/arm/cpu.h" | ||
34 | #include "sysemu/block-backend.h" | ||
35 | @@ -XXX,XX +XXX,XX @@ enum { | ||
36 | AW_R40_DEV_GIC_CPU, | ||
37 | AW_R40_DEV_GIC_HYP, | ||
38 | AW_R40_DEV_GIC_VCPU, | ||
39 | - AW_R40_DEV_SDRAM | ||
40 | + AW_R40_DEV_SDRAM, | ||
41 | + AW_R40_DEV_DRAMCOM, | ||
42 | + AW_R40_DEV_DRAMCTL, | ||
43 | + AW_R40_DEV_DRAMPHY, | ||
44 | }; | ||
45 | |||
46 | #define AW_R40_NUM_CPUS (4) | ||
47 | @@ -XXX,XX +XXX,XX @@ struct AwR40State { | ||
48 | DeviceState parent_obj; | ||
49 | /*< public >*/ | ||
50 | |||
51 | + /** Physical base address for start of RAM */ | ||
52 | + hwaddr ram_addr; | ||
53 | + | ||
54 | + /** Total RAM size in megabytes */ | ||
55 | + uint32_t ram_size; | ||
56 | + | ||
57 | ARMCPU cpus[AW_R40_NUM_CPUS]; | ||
58 | const hwaddr *memmap; | ||
59 | AwA10PITState timer; | ||
60 | AwSdHostState mmc[AW_R40_NUM_MMCS]; | ||
61 | AwR40ClockCtlState ccu; | ||
62 | + AwR40DramCtlState dramc; | ||
63 | AWI2CState i2c0; | ||
64 | GICState gic; | ||
65 | MemoryRegion sram_a1; | ||
66 | diff --git a/include/hw/misc/allwinner-r40-dramc.h b/include/hw/misc/allwinner-r40-dramc.h | ||
67 | new file mode 100644 | ||
68 | index XXXXXXX..XXXXXXX | ||
69 | --- /dev/null | ||
70 | +++ b/include/hw/misc/allwinner-r40-dramc.h | ||
71 | @@ -XXX,XX +XXX,XX @@ | ||
72 | +/* | ||
73 | + * Allwinner R40 SDRAM Controller emulation | ||
74 | + * | ||
75 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
76 | + * | ||
77 | + * This program is free software: you can redistribute it and/or modify | ||
78 | + * it under the terms of the GNU General Public License as published by | ||
79 | + * the Free Software Foundation, either version 2 of the License, or | ||
80 | + * (at your option) any later version. | ||
81 | + * | ||
82 | + * This program is distributed in the hope that it will be useful, | ||
83 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
84 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
85 | + * GNU General Public License for more details. | ||
86 | + * | ||
87 | + * You should have received a copy of the GNU General Public License | ||
88 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
89 | + */ | ||
90 | + | ||
91 | +#ifndef HW_MISC_ALLWINNER_R40_DRAMC_H | ||
92 | +#define HW_MISC_ALLWINNER_R40_DRAMC_H | ||
93 | + | ||
94 | +#include "qom/object.h" | ||
95 | +#include "hw/sysbus.h" | ||
96 | +#include "exec/hwaddr.h" | ||
97 | + | ||
98 | +/** | ||
99 | + * Constants | ||
100 | + * @{ | ||
101 | + */ | ||
102 | + | ||
103 | +/** Highest register address used by DRAMCOM module */ | ||
104 | +#define AW_R40_DRAMCOM_REGS_MAXADDR (0x804) | ||
105 | + | ||
106 | +/** Total number of known DRAMCOM registers */ | ||
107 | +#define AW_R40_DRAMCOM_REGS_NUM (AW_R40_DRAMCOM_REGS_MAXADDR / \ | ||
108 | + sizeof(uint32_t)) | ||
109 | + | ||
110 | +/** Highest register address used by DRAMCTL module */ | ||
111 | +#define AW_R40_DRAMCTL_REGS_MAXADDR (0x88c) | ||
112 | + | ||
113 | +/** Total number of known DRAMCTL registers */ | ||
114 | +#define AW_R40_DRAMCTL_REGS_NUM (AW_R40_DRAMCTL_REGS_MAXADDR / \ | ||
115 | + sizeof(uint32_t)) | ||
116 | + | ||
117 | +/** Highest register address used by DRAMPHY module */ | ||
118 | +#define AW_R40_DRAMPHY_REGS_MAXADDR (0x4) | ||
119 | + | ||
120 | +/** Total number of known DRAMPHY registers */ | ||
121 | +#define AW_R40_DRAMPHY_REGS_NUM (AW_R40_DRAMPHY_REGS_MAXADDR / \ | ||
122 | + sizeof(uint32_t)) | ||
123 | + | ||
124 | +/** @} */ | ||
125 | + | ||
126 | +/** | ||
127 | + * Object model | ||
128 | + * @{ | ||
129 | + */ | ||
130 | + | ||
131 | +#define TYPE_AW_R40_DRAMC "allwinner-r40-dramc" | ||
132 | +OBJECT_DECLARE_SIMPLE_TYPE(AwR40DramCtlState, AW_R40_DRAMC) | ||
133 | + | ||
134 | +/** @} */ | ||
135 | + | ||
136 | +/** | ||
137 | + * Allwinner R40 SDRAM Controller object instance state. | ||
138 | + */ | ||
139 | +struct AwR40DramCtlState { | ||
140 | + /*< private >*/ | ||
141 | + SysBusDevice parent_obj; | ||
142 | + /*< public >*/ | ||
143 | + | ||
144 | + /** Physical base address for start of RAM */ | ||
145 | + hwaddr ram_addr; | ||
146 | + | ||
147 | + /** Total RAM size in megabytes */ | ||
148 | + uint32_t ram_size; | ||
149 | + | ||
150 | + uint8_t set_row_bits; | ||
151 | + uint8_t set_bank_bits; | ||
152 | + uint8_t set_col_bits; | ||
153 | + | ||
154 | + /** | ||
155 | + * @name Memory Regions | ||
156 | + * @{ | ||
157 | + */ | ||
158 | + MemoryRegion dramcom_iomem; /**< DRAMCOM module I/O registers */ | ||
159 | + MemoryRegion dramctl_iomem; /**< DRAMCTL module I/O registers */ | ||
160 | + MemoryRegion dramphy_iomem; /**< DRAMPHY module I/O registers */ | ||
161 | + MemoryRegion dram_high; /**< The high 1G dram for dualrank detect */ | ||
162 | + MemoryRegion detect_cells; /**< DRAM memory cells for auto detect */ | ||
163 | + | ||
164 | + /** @} */ | ||
165 | + | ||
166 | + /** | ||
167 | + * @name Hardware Registers | ||
168 | + * @{ | ||
169 | + */ | ||
170 | + | ||
171 | + uint32_t dramcom[AW_R40_DRAMCOM_REGS_NUM]; /**< DRAMCOM registers */ | ||
172 | + uint32_t dramctl[AW_R40_DRAMCTL_REGS_NUM]; /**< DRAMCTL registers */ | ||
173 | + uint32_t dramphy[AW_R40_DRAMPHY_REGS_NUM] ;/**< DRAMPHY registers */ | ||
174 | + | ||
175 | + /** @} */ | ||
176 | + | ||
177 | +}; | ||
178 | + | ||
179 | +#endif /* HW_MISC_ALLWINNER_R40_DRAMC_H */ | ||
180 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
181 | index XXXXXXX..XXXXXXX 100644 | ||
182 | --- a/hw/arm/allwinner-r40.c | ||
183 | +++ b/hw/arm/allwinner-r40.c | ||
184 | @@ -XXX,XX +XXX,XX @@ | ||
185 | #include "hw/loader.h" | ||
186 | #include "sysemu/sysemu.h" | ||
187 | #include "hw/arm/allwinner-r40.h" | ||
188 | +#include "hw/misc/allwinner-r40-dramc.h" | ||
189 | |||
190 | /* Memory map */ | ||
191 | const hwaddr allwinner_r40_memmap[] = { | ||
192 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { | ||
193 | [AW_R40_DEV_UART6] = 0x01c29800, | ||
194 | [AW_R40_DEV_UART7] = 0x01c29c00, | ||
195 | [AW_R40_DEV_TWI0] = 0x01c2ac00, | ||
196 | + [AW_R40_DEV_DRAMCOM] = 0x01c62000, | ||
197 | + [AW_R40_DEV_DRAMCTL] = 0x01c63000, | ||
198 | + [AW_R40_DEV_DRAMPHY] = 0x01c65000, | ||
199 | [AW_R40_DEV_GIC_DIST] = 0x01c81000, | ||
200 | [AW_R40_DEV_GIC_CPU] = 0x01c82000, | ||
201 | [AW_R40_DEV_GIC_HYP] = 0x01c84000, | ||
202 | @@ -XXX,XX +XXX,XX @@ static struct AwR40Unimplemented r40_unimplemented[] = { | ||
203 | { "gpu", 0x01c40000, 64 * KiB }, | ||
204 | { "gmac", 0x01c50000, 64 * KiB }, | ||
205 | { "hstmr", 0x01c60000, 4 * KiB }, | ||
206 | - { "dram-com", 0x01c62000, 4 * KiB }, | ||
207 | - { "dram-ctl", 0x01c63000, 4 * KiB }, | ||
208 | { "tcon-top", 0x01c70000, 4 * KiB }, | ||
209 | { "lcd0", 0x01c71000, 4 * KiB }, | ||
210 | { "lcd1", 0x01c72000, 4 * KiB }, | ||
211 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) | ||
212 | } | ||
213 | |||
214 | object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); | ||
215 | + | ||
216 | + object_initialize_child(obj, "dramc", &s->dramc, TYPE_AW_R40_DRAMC); | ||
217 | + object_property_add_alias(obj, "ram-addr", OBJECT(&s->dramc), | ||
218 | + "ram-addr"); | ||
219 | + object_property_add_alias(obj, "ram-size", OBJECT(&s->dramc), | ||
220 | + "ram-size"); | ||
25 | } | 221 | } |
26 | 222 | ||
27 | -static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, | 223 | static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
28 | - uint64_t value) | 224 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
29 | -{ | 225 | sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, |
30 | - /* Invalidate all (TLBIALL) */ | 226 | qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_TWI0)); |
31 | - ARMCPU *cpu = arm_env_get_cpu(env); | 227 | |
32 | - | 228 | + /* DRAMC */ |
33 | - tlb_flush(CPU(cpu)); | 229 | + sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal); |
34 | -} | 230 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 0, |
35 | - | 231 | + s->memmap[AW_R40_DEV_DRAMCOM]); |
36 | -static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, | 232 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 1, |
37 | - uint64_t value) | 233 | + s->memmap[AW_R40_DEV_DRAMCTL]); |
38 | -{ | 234 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 2, |
39 | - /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ | 235 | + s->memmap[AW_R40_DEV_DRAMPHY]); |
40 | - ARMCPU *cpu = arm_env_get_cpu(env); | 236 | + |
41 | - | 237 | /* Unimplemented devices */ |
42 | - tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); | 238 | for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { |
43 | -} | 239 | create_unimplemented_device(r40_unimplemented[i].device_name, |
44 | - | 240 | diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c |
45 | -static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, | 241 | index XXXXXXX..XXXXXXX 100644 |
46 | - uint64_t value) | 242 | --- a/hw/arm/bananapi_m2u.c |
47 | -{ | 243 | +++ b/hw/arm/bananapi_m2u.c |
48 | - /* Invalidate by ASID (TLBIASID) */ | 244 | @@ -XXX,XX +XXX,XX @@ static void bpim2u_init(MachineState *machine) |
49 | - ARMCPU *cpu = arm_env_get_cpu(env); | 245 | object_property_set_int(OBJECT(r40), "clk1-freq", 24 * 1000 * 1000, |
50 | - | 246 | &error_abort); |
51 | - tlb_flush(CPU(cpu)); | 247 | |
52 | -} | 248 | + /* DRAMC */ |
53 | - | 249 | + r40->ram_size = machine->ram_size / MiB; |
54 | -static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, | 250 | + object_property_set_uint(OBJECT(r40), "ram-addr", |
55 | - uint64_t value) | 251 | + r40->memmap[AW_R40_DEV_SDRAM], &error_abort); |
56 | -{ | 252 | + object_property_set_int(OBJECT(r40), "ram-size", |
57 | - /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ | 253 | + r40->ram_size, &error_abort); |
58 | - ARMCPU *cpu = arm_env_get_cpu(env); | 254 | + |
59 | - | 255 | /* Mark R40 object realized */ |
60 | - tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); | 256 | qdev_realize(DEVICE(r40), NULL, &error_abort); |
61 | -} | 257 | |
62 | - | 258 | diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c |
63 | /* IS variants of TLB operations must affect all cores */ | 259 | new file mode 100644 |
64 | static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 260 | index XXXXXXX..XXXXXXX |
65 | uint64_t value) | 261 | --- /dev/null |
66 | @@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 262 | +++ b/hw/misc/allwinner-r40-dramc.c |
67 | tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); | 263 | @@ -XXX,XX +XXX,XX @@ |
68 | } | ||
69 | |||
70 | +/* | 264 | +/* |
71 | + * Non-IS variants of TLB operations are upgraded to | 265 | + * Allwinner R40 SDRAM Controller emulation |
72 | + * IS versions if we are at NS EL1 and HCR_EL2.FB is set to | 266 | + * |
73 | + * force broadcast of these operations. | 267 | + * CCopyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> |
268 | + * | ||
269 | + * This program is free software: you can redistribute it and/or modify | ||
270 | + * it under the terms of the GNU General Public License as published by | ||
271 | + * the Free Software Foundation, either version 2 of the License, or | ||
272 | + * (at your option) any later version. | ||
273 | + * | ||
274 | + * This program is distributed in the hope that it will be useful, | ||
275 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
276 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
277 | + * GNU General Public License for more details. | ||
278 | + * | ||
279 | + * You should have received a copy of the GNU General Public License | ||
280 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
74 | + */ | 281 | + */ |
75 | +static bool tlb_force_broadcast(CPUARMState *env) | 282 | + |
76 | +{ | 283 | +#include "qemu/osdep.h" |
77 | + return (env->cp15.hcr_el2 & HCR_FB) && | 284 | +#include "qemu/units.h" |
78 | + arm_current_el(env) == 1 && arm_is_secure_below_el3(env); | 285 | +#include "qemu/error-report.h" |
79 | +} | 286 | +#include "hw/sysbus.h" |
80 | + | 287 | +#include "migration/vmstate.h" |
81 | +static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, | 288 | +#include "qemu/log.h" |
82 | + uint64_t value) | 289 | +#include "qemu/module.h" |
83 | +{ | 290 | +#include "exec/address-spaces.h" |
84 | + /* Invalidate all (TLBIALL) */ | 291 | +#include "hw/qdev-properties.h" |
85 | + ARMCPU *cpu = arm_env_get_cpu(env); | 292 | +#include "qapi/error.h" |
86 | + | 293 | +#include "qemu/bitops.h" |
87 | + if (tlb_force_broadcast(env)) { | 294 | +#include "hw/misc/allwinner-r40-dramc.h" |
88 | + tlbiall_is_write(env, NULL, value); | 295 | +#include "trace.h" |
296 | + | ||
297 | +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) | ||
298 | + | ||
299 | +/* DRAMCOM register offsets */ | ||
300 | +enum { | ||
301 | + REG_DRAMCOM_CR = 0x0000, /* Control Register */ | ||
302 | +}; | ||
303 | + | ||
304 | +/* DRAMCOMM register flags */ | ||
305 | +enum { | ||
306 | + REG_DRAMCOM_CR_DUAL_RANK = (1 << 0), | ||
307 | +}; | ||
308 | + | ||
309 | +/* DRAMCTL register offsets */ | ||
310 | +enum { | ||
311 | + REG_DRAMCTL_PIR = 0x0000, /* PHY Initialization Register */ | ||
312 | + REG_DRAMCTL_PGSR = 0x0010, /* PHY General Status Register */ | ||
313 | + REG_DRAMCTL_STATR = 0x0018, /* Status Register */ | ||
314 | + REG_DRAMCTL_PGCR = 0x0100, /* PHY general configuration registers */ | ||
315 | +}; | ||
316 | + | ||
317 | +/* DRAMCTL register flags */ | ||
318 | +enum { | ||
319 | + REG_DRAMCTL_PGSR_INITDONE = (1 << 0), | ||
320 | + REG_DRAMCTL_PGSR_READ_TIMEOUT = (1 << 13), | ||
321 | + REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT = (1 << 25), | ||
322 | +}; | ||
323 | + | ||
324 | +enum { | ||
325 | + REG_DRAMCTL_STATR_ACTIVE = (1 << 0), | ||
326 | +}; | ||
327 | + | ||
328 | +#define DRAM_MAX_ROW_BITS 16 | ||
329 | +#define DRAM_MAX_COL_BITS 13 /* 8192 */ | ||
330 | +#define DRAM_MAX_BANK 3 | ||
331 | + | ||
332 | +static uint64_t dram_autodetect_cells[DRAM_MAX_ROW_BITS] | ||
333 | + [DRAM_MAX_BANK] | ||
334 | + [DRAM_MAX_COL_BITS]; | ||
335 | +struct VirtualDDRChip { | ||
336 | + uint32_t ram_size; | ||
337 | + uint8_t bank_bits; | ||
338 | + uint8_t row_bits; | ||
339 | + uint8_t col_bits; | ||
340 | +}; | ||
341 | + | ||
342 | +/* | ||
343 | + * Only power of 2 RAM sizes from 256MiB up to 2048MiB are supported, | ||
344 | + * 2GiB memory is not supported due to dual rank feature. | ||
345 | + */ | ||
346 | +static const struct VirtualDDRChip dummy_ddr_chips[] = { | ||
347 | + { | ||
348 | + .ram_size = 256, | ||
349 | + .bank_bits = 3, | ||
350 | + .row_bits = 12, | ||
351 | + .col_bits = 13, | ||
352 | + }, { | ||
353 | + .ram_size = 512, | ||
354 | + .bank_bits = 3, | ||
355 | + .row_bits = 13, | ||
356 | + .col_bits = 13, | ||
357 | + }, { | ||
358 | + .ram_size = 1024, | ||
359 | + .bank_bits = 3, | ||
360 | + .row_bits = 14, | ||
361 | + .col_bits = 13, | ||
362 | + }, { | ||
363 | + 0 | ||
364 | + } | ||
365 | +}; | ||
366 | + | ||
367 | +static const struct VirtualDDRChip *get_match_ddr(uint32_t ram_size) | ||
368 | +{ | ||
369 | + const struct VirtualDDRChip *ddr; | ||
370 | + | ||
371 | + for (ddr = &dummy_ddr_chips[0]; ddr->ram_size; ddr++) { | ||
372 | + if (ddr->ram_size == ram_size) { | ||
373 | + return ddr; | ||
374 | + } | ||
375 | + } | ||
376 | + | ||
377 | + return NULL; | ||
378 | +} | ||
379 | + | ||
380 | +static uint64_t *address_to_autodetect_cells(AwR40DramCtlState *s, | ||
381 | + const struct VirtualDDRChip *ddr, | ||
382 | + uint32_t offset) | ||
383 | +{ | ||
384 | + int row_index = 0, bank_index = 0, col_index = 0; | ||
385 | + uint32_t row_addr, bank_addr, col_addr; | ||
386 | + | ||
387 | + row_addr = extract32(offset, s->set_col_bits + s->set_bank_bits, | ||
388 | + s->set_row_bits); | ||
389 | + bank_addr = extract32(offset, s->set_col_bits, s->set_bank_bits); | ||
390 | + col_addr = extract32(offset, 0, s->set_col_bits); | ||
391 | + | ||
392 | + for (int i = 0; i < ddr->row_bits; i++) { | ||
393 | + if (row_addr & BIT(i)) { | ||
394 | + row_index = i; | ||
395 | + } | ||
396 | + } | ||
397 | + | ||
398 | + for (int i = 0; i < ddr->bank_bits; i++) { | ||
399 | + if (bank_addr & BIT(i)) { | ||
400 | + bank_index = i; | ||
401 | + } | ||
402 | + } | ||
403 | + | ||
404 | + for (int i = 0; i < ddr->col_bits; i++) { | ||
405 | + if (col_addr & BIT(i)) { | ||
406 | + col_index = i; | ||
407 | + } | ||
408 | + } | ||
409 | + | ||
410 | + trace_allwinner_r40_dramc_offset_to_cell(offset, row_index, bank_index, | ||
411 | + col_index); | ||
412 | + return &dram_autodetect_cells[row_index][bank_index][col_index]; | ||
413 | +} | ||
414 | + | ||
415 | +static void allwinner_r40_dramc_map_rows(AwR40DramCtlState *s, uint8_t row_bits, | ||
416 | + uint8_t bank_bits, uint8_t col_bits) | ||
417 | +{ | ||
418 | + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); | ||
419 | + bool enable_detect_cells; | ||
420 | + | ||
421 | + trace_allwinner_r40_dramc_map_rows(row_bits, bank_bits, col_bits); | ||
422 | + | ||
423 | + if (!ddr) { | ||
89 | + return; | 424 | + return; |
90 | + } | 425 | + } |
91 | + | 426 | + |
92 | + tlb_flush(CPU(cpu)); | 427 | + s->set_row_bits = row_bits; |
93 | +} | 428 | + s->set_bank_bits = bank_bits; |
94 | + | 429 | + s->set_col_bits = col_bits; |
95 | +static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, | 430 | + |
96 | + uint64_t value) | 431 | + enable_detect_cells = ddr->bank_bits != bank_bits |
97 | +{ | 432 | + || ddr->row_bits != row_bits |
98 | + /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ | 433 | + || ddr->col_bits != col_bits; |
99 | + ARMCPU *cpu = arm_env_get_cpu(env); | 434 | + |
100 | + | 435 | + if (enable_detect_cells) { |
101 | + if (tlb_force_broadcast(env)) { | 436 | + trace_allwinner_r40_dramc_detect_cells_enable(); |
102 | + tlbimva_is_write(env, NULL, value); | 437 | + } else { |
438 | + trace_allwinner_r40_dramc_detect_cells_disable(); | ||
439 | + } | ||
440 | + | ||
441 | + memory_region_set_enabled(&s->detect_cells, enable_detect_cells); | ||
442 | +} | ||
443 | + | ||
444 | +static uint64_t allwinner_r40_dramcom_read(void *opaque, hwaddr offset, | ||
445 | + unsigned size) | ||
446 | +{ | ||
447 | + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
448 | + const uint32_t idx = REG_INDEX(offset); | ||
449 | + | ||
450 | + if (idx >= AW_R40_DRAMCOM_REGS_NUM) { | ||
451 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
452 | + __func__, (uint32_t)offset); | ||
453 | + return 0; | ||
454 | + } | ||
455 | + | ||
456 | + trace_allwinner_r40_dramcom_read(offset, s->dramcom[idx], size); | ||
457 | + return s->dramcom[idx]; | ||
458 | +} | ||
459 | + | ||
460 | +static void allwinner_r40_dramcom_write(void *opaque, hwaddr offset, | ||
461 | + uint64_t val, unsigned size) | ||
462 | +{ | ||
463 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
464 | + const uint32_t idx = REG_INDEX(offset); | ||
465 | + | ||
466 | + trace_allwinner_r40_dramcom_write(offset, val, size); | ||
467 | + | ||
468 | + if (idx >= AW_R40_DRAMCOM_REGS_NUM) { | ||
469 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
470 | + __func__, (uint32_t)offset); | ||
103 | + return; | 471 | + return; |
104 | + } | 472 | + } |
105 | + | 473 | + |
106 | + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); | 474 | + switch (offset) { |
107 | +} | 475 | + case REG_DRAMCOM_CR: /* Control Register */ |
108 | + | 476 | + if (!(val & REG_DRAMCOM_CR_DUAL_RANK)) { |
109 | +static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, | 477 | + allwinner_r40_dramc_map_rows(s, ((val >> 4) & 0xf) + 1, |
110 | + uint64_t value) | 478 | + ((val >> 2) & 0x1) + 2, |
111 | +{ | 479 | + (((val >> 8) & 0xf) + 3)); |
112 | + /* Invalidate by ASID (TLBIASID) */ | 480 | + } |
113 | + ARMCPU *cpu = arm_env_get_cpu(env); | 481 | + break; |
114 | + | 482 | + }; |
115 | + if (tlb_force_broadcast(env)) { | 483 | + |
116 | + tlbiasid_is_write(env, NULL, value); | 484 | + s->dramcom[idx] = (uint32_t) val; |
485 | +} | ||
486 | + | ||
487 | +static uint64_t allwinner_r40_dramctl_read(void *opaque, hwaddr offset, | ||
488 | + unsigned size) | ||
489 | +{ | ||
490 | + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
491 | + const uint32_t idx = REG_INDEX(offset); | ||
492 | + | ||
493 | + if (idx >= AW_R40_DRAMCTL_REGS_NUM) { | ||
494 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
495 | + __func__, (uint32_t)offset); | ||
496 | + return 0; | ||
497 | + } | ||
498 | + | ||
499 | + trace_allwinner_r40_dramctl_read(offset, s->dramctl[idx], size); | ||
500 | + return s->dramctl[idx]; | ||
501 | +} | ||
502 | + | ||
503 | +static void allwinner_r40_dramctl_write(void *opaque, hwaddr offset, | ||
504 | + uint64_t val, unsigned size) | ||
505 | +{ | ||
506 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
507 | + const uint32_t idx = REG_INDEX(offset); | ||
508 | + | ||
509 | + trace_allwinner_r40_dramctl_write(offset, val, size); | ||
510 | + | ||
511 | + if (idx >= AW_R40_DRAMCTL_REGS_NUM) { | ||
512 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
513 | + __func__, (uint32_t)offset); | ||
117 | + return; | 514 | + return; |
118 | + } | 515 | + } |
119 | + | 516 | + |
120 | + tlb_flush(CPU(cpu)); | 517 | + switch (offset) { |
121 | +} | 518 | + case REG_DRAMCTL_PIR: /* PHY Initialization Register */ |
122 | + | 519 | + s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |= REG_DRAMCTL_PGSR_INITDONE; |
123 | +static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, | 520 | + s->dramctl[REG_INDEX(REG_DRAMCTL_STATR)] |= REG_DRAMCTL_STATR_ACTIVE; |
124 | + uint64_t value) | 521 | + break; |
125 | +{ | 522 | + } |
126 | + /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ | 523 | + |
127 | + ARMCPU *cpu = arm_env_get_cpu(env); | 524 | + s->dramctl[idx] = (uint32_t) val; |
128 | + | 525 | +} |
129 | + if (tlb_force_broadcast(env)) { | 526 | + |
130 | + tlbimvaa_is_write(env, NULL, value); | 527 | +static uint64_t allwinner_r40_dramphy_read(void *opaque, hwaddr offset, |
528 | + unsigned size) | ||
529 | +{ | ||
530 | + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
531 | + const uint32_t idx = REG_INDEX(offset); | ||
532 | + | ||
533 | + if (idx >= AW_R40_DRAMPHY_REGS_NUM) { | ||
534 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
535 | + __func__, (uint32_t)offset); | ||
536 | + return 0; | ||
537 | + } | ||
538 | + | ||
539 | + trace_allwinner_r40_dramphy_read(offset, s->dramphy[idx], size); | ||
540 | + return s->dramphy[idx]; | ||
541 | +} | ||
542 | + | ||
543 | +static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset, | ||
544 | + uint64_t val, unsigned size) | ||
545 | +{ | ||
546 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); | ||
547 | + const uint32_t idx = REG_INDEX(offset); | ||
548 | + | ||
549 | + trace_allwinner_r40_dramphy_write(offset, val, size); | ||
550 | + | ||
551 | + if (idx >= AW_R40_DRAMPHY_REGS_NUM) { | ||
552 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", | ||
553 | + __func__, (uint32_t)offset); | ||
131 | + return; | 554 | + return; |
132 | + } | 555 | + } |
133 | + | 556 | + |
134 | + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); | 557 | + s->dramphy[idx] = (uint32_t) val; |
135 | +} | 558 | +} |
136 | + | 559 | + |
137 | static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, | 560 | +static const MemoryRegionOps allwinner_r40_dramcom_ops = { |
138 | uint64_t value) | 561 | + .read = allwinner_r40_dramcom_read, |
139 | { | 562 | + .write = allwinner_r40_dramcom_write, |
140 | @@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, | 563 | + .endianness = DEVICE_NATIVE_ENDIAN, |
141 | * Page D4-1736 (DDI0487A.b) | 564 | + .valid = { |
142 | */ | 565 | + .min_access_size = 4, |
143 | 566 | + .max_access_size = 4, | |
144 | -static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 567 | + }, |
145 | - uint64_t value) | 568 | + .impl.min_access_size = 4, |
146 | -{ | 569 | +}; |
147 | - CPUState *cs = ENV_GET_CPU(env); | 570 | + |
148 | - | 571 | +static const MemoryRegionOps allwinner_r40_dramctl_ops = { |
149 | - if (arm_is_secure_below_el3(env)) { | 572 | + .read = allwinner_r40_dramctl_read, |
150 | - tlb_flush_by_mmuidx(cs, | 573 | + .write = allwinner_r40_dramctl_write, |
151 | - ARMMMUIdxBit_S1SE1 | | 574 | + .endianness = DEVICE_NATIVE_ENDIAN, |
152 | - ARMMMUIdxBit_S1SE0); | 575 | + .valid = { |
153 | - } else { | 576 | + .min_access_size = 4, |
154 | - tlb_flush_by_mmuidx(cs, | 577 | + .max_access_size = 4, |
155 | - ARMMMUIdxBit_S12NSE1 | | 578 | + }, |
156 | - ARMMMUIdxBit_S12NSE0); | 579 | + .impl.min_access_size = 4, |
157 | - } | 580 | +}; |
158 | -} | 581 | + |
159 | - | 582 | +static const MemoryRegionOps allwinner_r40_dramphy_ops = { |
160 | static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 583 | + .read = allwinner_r40_dramphy_read, |
161 | uint64_t value) | 584 | + .write = allwinner_r40_dramphy_write, |
162 | { | 585 | + .endianness = DEVICE_NATIVE_ENDIAN, |
163 | @@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 586 | + .valid = { |
164 | } | 587 | + .min_access_size = 4, |
165 | } | 588 | + .max_access_size = 4, |
166 | 589 | + }, | |
167 | +static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 590 | + .impl.min_access_size = 4, |
168 | + uint64_t value) | 591 | +}; |
169 | +{ | 592 | + |
170 | + CPUState *cs = ENV_GET_CPU(env); | 593 | +static uint64_t allwinner_r40_detect_read(void *opaque, hwaddr offset, |
171 | + | 594 | + unsigned size) |
172 | + if (tlb_force_broadcast(env)) { | 595 | +{ |
173 | + tlbi_aa64_vmalle1_write(env, NULL, value); | 596 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); |
174 | + return; | 597 | + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); |
175 | + } | 598 | + uint64_t data = 0; |
176 | + | 599 | + |
177 | + if (arm_is_secure_below_el3(env)) { | 600 | + if (ddr) { |
178 | + tlb_flush_by_mmuidx(cs, | 601 | + data = *address_to_autodetect_cells(s, ddr, (uint32_t)offset); |
179 | + ARMMMUIdxBit_S1SE1 | | 602 | + } |
180 | + ARMMMUIdxBit_S1SE0); | 603 | + |
181 | + } else { | 604 | + trace_allwinner_r40_dramc_detect_cell_read(offset, data); |
182 | + tlb_flush_by_mmuidx(cs, | 605 | + return data; |
183 | + ARMMMUIdxBit_S12NSE1 | | 606 | +} |
184 | + ARMMMUIdxBit_S12NSE0); | 607 | + |
185 | + } | 608 | +static void allwinner_r40_detect_write(void *opaque, hwaddr offset, |
186 | +} | 609 | + uint64_t data, unsigned size) |
187 | + | 610 | +{ |
188 | static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 611 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); |
189 | uint64_t value) | 612 | + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); |
190 | { | 613 | + |
191 | @@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 614 | + if (ddr) { |
192 | tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); | 615 | + uint64_t *cell = address_to_autodetect_cells(s, ddr, (uint32_t)offset); |
193 | } | 616 | + trace_allwinner_r40_dramc_detect_cell_write(offset, data); |
194 | 617 | + *cell = data; | |
195 | -static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 618 | + } |
196 | - uint64_t value) | 619 | +} |
197 | -{ | 620 | + |
198 | - /* Invalidate by VA, EL1&0 (AArch64 version). | 621 | +static const MemoryRegionOps allwinner_r40_detect_ops = { |
199 | - * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, | 622 | + .read = allwinner_r40_detect_read, |
200 | - * since we don't support flush-for-specific-ASID-only or | 623 | + .write = allwinner_r40_detect_write, |
201 | - * flush-last-level-only. | 624 | + .endianness = DEVICE_NATIVE_ENDIAN, |
202 | - */ | 625 | + .valid = { |
203 | - ARMCPU *cpu = arm_env_get_cpu(env); | 626 | + .min_access_size = 4, |
204 | - CPUState *cs = CPU(cpu); | 627 | + .max_access_size = 4, |
205 | - uint64_t pageaddr = sextract64(value << 12, 0, 56); | 628 | + }, |
206 | - | 629 | + .impl.min_access_size = 4, |
207 | - if (arm_is_secure_below_el3(env)) { | 630 | +}; |
208 | - tlb_flush_page_by_mmuidx(cs, pageaddr, | 631 | + |
209 | - ARMMMUIdxBit_S1SE1 | | 632 | +/* |
210 | - ARMMMUIdxBit_S1SE0); | 633 | + * mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR |
211 | - } else { | 634 | + * to detect wether the board support dual_rank or not. Create a virtual memory |
212 | - tlb_flush_page_by_mmuidx(cs, pageaddr, | 635 | + * if the board's ram_size less or equal than 1G, and set read time out flag of |
213 | - ARMMMUIdxBit_S12NSE1 | | 636 | + * REG_DRAMCTL_PGSR when the user touch this high dram. |
214 | - ARMMMUIdxBit_S12NSE0); | 637 | + */ |
215 | - } | 638 | +static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset, |
216 | -} | 639 | + unsigned size) |
217 | - | 640 | +{ |
218 | static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, | 641 | + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); |
219 | uint64_t value) | 642 | + uint32_t reg; |
220 | { | 643 | + |
221 | @@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 644 | + reg = s->dramctl[REG_INDEX(REG_DRAMCTL_PGCR)]; |
222 | } | 645 | + if (reg & REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT) { /* Enable read time out */ |
223 | } | 646 | + /* |
224 | 647 | + * this driver only support one rank, mark READ_TIMEOUT when try | |
225 | +static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, | 648 | + * read the second rank. |
226 | + uint64_t value) | 649 | + */ |
227 | +{ | 650 | + s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |
228 | + /* Invalidate by VA, EL1&0 (AArch64 version). | 651 | + |= REG_DRAMCTL_PGSR_READ_TIMEOUT; |
229 | + * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, | 652 | + } |
230 | + * since we don't support flush-for-specific-ASID-only or | 653 | + |
231 | + * flush-last-level-only. | 654 | + return 0; |
655 | +} | ||
656 | + | ||
657 | +static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = { | ||
658 | + .read = allwinner_r40_dualrank_detect_read, | ||
659 | + .endianness = DEVICE_NATIVE_ENDIAN, | ||
660 | + .valid = { | ||
661 | + .min_access_size = 4, | ||
662 | + .max_access_size = 4, | ||
663 | + }, | ||
664 | + .impl.min_access_size = 4, | ||
665 | +}; | ||
666 | + | ||
667 | +static void allwinner_r40_dramc_reset(DeviceState *dev) | ||
668 | +{ | ||
669 | + AwR40DramCtlState *s = AW_R40_DRAMC(dev); | ||
670 | + | ||
671 | + /* Set default values for registers */ | ||
672 | + memset(&s->dramcom, 0, sizeof(s->dramcom)); | ||
673 | + memset(&s->dramctl, 0, sizeof(s->dramctl)); | ||
674 | + memset(&s->dramphy, 0, sizeof(s->dramphy)); | ||
675 | +} | ||
676 | + | ||
677 | +static void allwinner_r40_dramc_realize(DeviceState *dev, Error **errp) | ||
678 | +{ | ||
679 | + AwR40DramCtlState *s = AW_R40_DRAMC(dev); | ||
680 | + | ||
681 | + if (!get_match_ddr(s->ram_size)) { | ||
682 | + error_report("%s: ram-size %u MiB is not supported", | ||
683 | + __func__, s->ram_size); | ||
684 | + exit(1); | ||
685 | + } | ||
686 | + | ||
687 | + /* detect_cells */ | ||
688 | + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s), 3, s->ram_addr, 10); | ||
689 | + memory_region_set_enabled(&s->detect_cells, false); | ||
690 | + | ||
691 | + /* | ||
692 | + * We only support DRAM size up to 1G now, so prepare a high memory page | ||
693 | + * after 1G for dualrank detect. index = 4 | ||
232 | + */ | 694 | + */ |
233 | + ARMCPU *cpu = arm_env_get_cpu(env); | 695 | + memory_region_init_io(&s->dram_high, OBJECT(s), |
234 | + CPUState *cs = CPU(cpu); | 696 | + &allwinner_r40_dualrank_detect_ops, s, |
235 | + uint64_t pageaddr = sextract64(value << 12, 0, 56); | 697 | + "DRAMHIGH", KiB); |
236 | + | 698 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->dram_high); |
237 | + if (tlb_force_broadcast(env)) { | 699 | + sysbus_mmio_map(SYS_BUS_DEVICE(s), 4, s->ram_addr + GiB); |
238 | + tlbi_aa64_vae1is_write(env, NULL, value); | 700 | +} |
239 | + return; | 701 | + |
240 | + } | 702 | +static void allwinner_r40_dramc_init(Object *obj) |
241 | + | 703 | +{ |
242 | + if (arm_is_secure_below_el3(env)) { | 704 | + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); |
243 | + tlb_flush_page_by_mmuidx(cs, pageaddr, | 705 | + AwR40DramCtlState *s = AW_R40_DRAMC(obj); |
244 | + ARMMMUIdxBit_S1SE1 | | 706 | + |
245 | + ARMMMUIdxBit_S1SE0); | 707 | + /* DRAMCOM registers, index 0 */ |
246 | + } else { | 708 | + memory_region_init_io(&s->dramcom_iomem, OBJECT(s), |
247 | + tlb_flush_page_by_mmuidx(cs, pageaddr, | 709 | + &allwinner_r40_dramcom_ops, s, |
248 | + ARMMMUIdxBit_S12NSE1 | | 710 | + "DRAMCOM", 4 * KiB); |
249 | + ARMMMUIdxBit_S12NSE0); | 711 | + sysbus_init_mmio(sbd, &s->dramcom_iomem); |
250 | + } | 712 | + |
251 | +} | 713 | + /* DRAMCTL registers, index 1 */ |
252 | + | 714 | + memory_region_init_io(&s->dramctl_iomem, OBJECT(s), |
253 | static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, | 715 | + &allwinner_r40_dramctl_ops, s, |
254 | uint64_t value) | 716 | + "DRAMCTL", 4 * KiB); |
255 | { | 717 | + sysbus_init_mmio(sbd, &s->dramctl_iomem); |
718 | + | ||
719 | + /* DRAMPHY registers. index 2 */ | ||
720 | + memory_region_init_io(&s->dramphy_iomem, OBJECT(s), | ||
721 | + &allwinner_r40_dramphy_ops, s, | ||
722 | + "DRAMPHY", 4 * KiB); | ||
723 | + sysbus_init_mmio(sbd, &s->dramphy_iomem); | ||
724 | + | ||
725 | + /* R40 support max 2G memory but we only support up to 1G now. index 3 */ | ||
726 | + memory_region_init_io(&s->detect_cells, OBJECT(s), | ||
727 | + &allwinner_r40_detect_ops, s, | ||
728 | + "DRAMCELLS", 1 * GiB); | ||
729 | + sysbus_init_mmio(sbd, &s->detect_cells); | ||
730 | +} | ||
731 | + | ||
732 | +static Property allwinner_r40_dramc_properties[] = { | ||
733 | + DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0), | ||
734 | + DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */ | ||
735 | + DEFINE_PROP_END_OF_LIST() | ||
736 | +}; | ||
737 | + | ||
738 | +static const VMStateDescription allwinner_r40_dramc_vmstate = { | ||
739 | + .name = "allwinner-r40-dramc", | ||
740 | + .version_id = 1, | ||
741 | + .minimum_version_id = 1, | ||
742 | + .fields = (VMStateField[]) { | ||
743 | + VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState, | ||
744 | + AW_R40_DRAMCOM_REGS_NUM), | ||
745 | + VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState, | ||
746 | + AW_R40_DRAMCTL_REGS_NUM), | ||
747 | + VMSTATE_UINT32_ARRAY(dramphy, AwR40DramCtlState, | ||
748 | + AW_R40_DRAMPHY_REGS_NUM), | ||
749 | + VMSTATE_END_OF_LIST() | ||
750 | + } | ||
751 | +}; | ||
752 | + | ||
753 | +static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data) | ||
754 | +{ | ||
755 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
756 | + | ||
757 | + dc->reset = allwinner_r40_dramc_reset; | ||
758 | + dc->vmsd = &allwinner_r40_dramc_vmstate; | ||
759 | + dc->realize = allwinner_r40_dramc_realize; | ||
760 | + device_class_set_props(dc, allwinner_r40_dramc_properties); | ||
761 | +} | ||
762 | + | ||
763 | +static const TypeInfo allwinner_r40_dramc_info = { | ||
764 | + .name = TYPE_AW_R40_DRAMC, | ||
765 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
766 | + .instance_init = allwinner_r40_dramc_init, | ||
767 | + .instance_size = sizeof(AwR40DramCtlState), | ||
768 | + .class_init = allwinner_r40_dramc_class_init, | ||
769 | +}; | ||
770 | + | ||
771 | +static void allwinner_r40_dramc_register(void) | ||
772 | +{ | ||
773 | + type_register_static(&allwinner_r40_dramc_info); | ||
774 | +} | ||
775 | + | ||
776 | +type_init(allwinner_r40_dramc_register) | ||
777 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
778 | index XXXXXXX..XXXXXXX 100644 | ||
779 | --- a/hw/misc/meson.build | ||
780 | +++ b/hw/misc/meson.build | ||
781 | @@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c | ||
782 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) | ||
783 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) | ||
784 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c')) | ||
785 | +softmmu_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-dramc.c')) | ||
786 | softmmu_ss.add(when: 'CONFIG_AXP2XX_PMU', if_true: files('axp2xx.c')) | ||
787 | softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) | ||
788 | softmmu_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) | ||
789 | diff --git a/hw/misc/trace-events b/hw/misc/trace-events | ||
790 | index XXXXXXX..XXXXXXX 100644 | ||
791 | --- a/hw/misc/trace-events | ||
792 | +++ b/hw/misc/trace-events | ||
793 | @@ -XXX,XX +XXX,XX @@ allwinner_h3_dramctl_write(uint64_t offset, uint64_t data, unsigned size) "Write | ||
794 | allwinner_h3_dramphy_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
795 | allwinner_h3_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
796 | |||
797 | +# allwinner-r40-dramc.c | ||
798 | +allwinner_r40_dramc_detect_cells_disable(void) "Disable detect cells" | ||
799 | +allwinner_r40_dramc_detect_cells_enable(void) "Enable detect cells" | ||
800 | +allwinner_r40_dramc_map_rows(uint8_t row_bits, uint8_t bank_bits, uint8_t col_bits) "DRAM layout: row_bits %d, bank_bits %d, col_bits %d" | ||
801 | +allwinner_r40_dramc_offset_to_cell(uint64_t offset, int row, int bank, int col) "offset 0x%" PRIx64 " row %d bank %d col %d" | ||
802 | +allwinner_r40_dramc_detect_cell_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 "" | ||
803 | +allwinner_r40_dramc_detect_cell_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 "" | ||
804 | +allwinner_r40_dramcom_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
805 | +allwinner_r40_dramcom_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
806 | +allwinner_r40_dramctl_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
807 | +allwinner_r40_dramctl_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
808 | +allwinner_r40_dramphy_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
809 | +allwinner_r40_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
810 | + | ||
811 | # allwinner-sid.c | ||
812 | allwinner_sid_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
813 | allwinner_sid_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
256 | -- | 814 | -- |
257 | 2.19.1 | 815 | 2.34.1 |
258 | |||
259 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Move mla_op and mls_op expanders from translate-a64.c. | 3 | A64's sd register was similar to H3, and it introduced a new register |
4 | 4 | named SAMP_DL_REG location at 0x144. The dma descriptor buffer size of | |
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | mmc2 is only 8K and the other mmc controllers has 64K. |
6 | Message-id: 20181011205206.3552-16-richard.henderson@linaro.org | 6 | |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Also fix allwinner-r40's mmc controller type. |
8 | |||
9 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 11 | --- |
10 | target/arm/translate.h | 2 + | 12 | include/hw/sd/allwinner-sdhost.h | 9 ++++ |
11 | target/arm/translate-a64.c | 106 ----------------------------- | 13 | hw/arm/allwinner-r40.c | 2 +- |
12 | target/arm/translate.c | 134 ++++++++++++++++++++++++++++++++----- | 14 | hw/sd/allwinner-sdhost.c | 72 ++++++++++++++++++++++++++++++-- |
13 | 3 files changed, 120 insertions(+), 122 deletions(-) | 15 | 3 files changed, 79 insertions(+), 4 deletions(-) |
14 | 16 | ||
15 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 17 | diff --git a/include/hw/sd/allwinner-sdhost.h b/include/hw/sd/allwinner-sdhost.h |
16 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate.h | 19 | --- a/include/hw/sd/allwinner-sdhost.h |
18 | +++ b/target/arm/translate.h | 20 | +++ b/include/hw/sd/allwinner-sdhost.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void) | 21 | @@ -XXX,XX +XXX,XX @@ |
20 | extern const GVecGen3 bsl_op; | 22 | /** Allwinner sun5i family and newer (A13, H2+, H3, etc) */ |
21 | extern const GVecGen3 bit_op; | 23 | #define TYPE_AW_SDHOST_SUN5I TYPE_AW_SDHOST "-sun5i" |
22 | extern const GVecGen3 bif_op; | 24 | |
23 | +extern const GVecGen3 mla_op[4]; | 25 | +/** Allwinner sun50i-a64 */ |
24 | +extern const GVecGen3 mls_op[4]; | 26 | +#define TYPE_AW_SDHOST_SUN50I_A64 TYPE_AW_SDHOST "-sun50i-a64" |
25 | extern const GVecGen2i ssra_op[4]; | 27 | + |
26 | extern const GVecGen2i usra_op[4]; | 28 | +/** Allwinner sun50i-a64 emmc */ |
27 | extern const GVecGen2i sri_op[4]; | 29 | +#define TYPE_AW_SDHOST_SUN50I_A64_EMMC TYPE_AW_SDHOST "-sun50i-a64-emmc" |
28 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 30 | + |
31 | /** @} */ | ||
32 | |||
33 | /** | ||
34 | @@ -XXX,XX +XXX,XX @@ struct AwSdHostState { | ||
35 | uint32_t startbit_detect; /**< eMMC DDR Start Bit Detection Control */ | ||
36 | uint32_t response_crc; /**< Response CRC */ | ||
37 | uint32_t data_crc[8]; /**< Data CRC */ | ||
38 | + uint32_t sample_delay; /**< Sample delay control */ | ||
39 | uint32_t status_crc; /**< Status CRC */ | ||
40 | |||
41 | /** @} */ | ||
42 | @@ -XXX,XX +XXX,XX @@ struct AwSdHostClass { | ||
43 | size_t max_desc_size; | ||
44 | bool is_sun4i; | ||
45 | |||
46 | + /** does the IP block support autocalibration? */ | ||
47 | + bool can_calibrate; | ||
48 | }; | ||
49 | |||
50 | #endif /* HW_SD_ALLWINNER_SDHOST_H */ | ||
51 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | 52 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/translate-a64.c | 53 | --- a/hw/arm/allwinner-r40.c |
31 | +++ b/target/arm/translate-a64.c | 54 | +++ b/hw/arm/allwinner-r40.c |
32 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) | 55 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) |
33 | } | 56 | |
34 | } | 57 | for (int i = 0; i < AW_R40_NUM_MMCS; i++) { |
35 | 58 | object_initialize_child(obj, mmc_names[i], &s->mmc[i], | |
36 | -static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 59 | - TYPE_AW_SDHOST_SUN5I); |
37 | -{ | 60 | + TYPE_AW_SDHOST_SUN50I_A64); |
38 | - gen_helper_neon_mul_u8(a, a, b); | 61 | } |
39 | - gen_helper_neon_add_u8(d, d, a); | 62 | |
40 | -} | 63 | object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); |
41 | - | 64 | diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c |
42 | -static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 65 | index XXXXXXX..XXXXXXX 100644 |
43 | -{ | 66 | --- a/hw/sd/allwinner-sdhost.c |
44 | - gen_helper_neon_mul_u16(a, a, b); | 67 | +++ b/hw/sd/allwinner-sdhost.c |
45 | - gen_helper_neon_add_u16(d, d, a); | 68 | @@ -XXX,XX +XXX,XX @@ enum { |
46 | -} | 69 | REG_SD_DATA1_CRC = 0x12C, /* CRC Data 1 from card/eMMC */ |
47 | - | 70 | REG_SD_DATA0_CRC = 0x130, /* CRC Data 0 from card/eMMC */ |
48 | -static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 71 | REG_SD_CRC_STA = 0x134, /* CRC status from card/eMMC during write */ |
49 | -{ | 72 | + REG_SD_SAMP_DL = 0x144, /* Sample Delay Control (sun50i-a64) */ |
50 | - tcg_gen_mul_i32(a, a, b); | 73 | REG_SD_FIFO = 0x200, /* Read/Write FIFO */ |
51 | - tcg_gen_add_i32(d, d, a); | 74 | }; |
52 | -} | 75 | |
53 | - | 76 | @@ -XXX,XX +XXX,XX @@ enum { |
54 | -static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | 77 | REG_SD_RES_CRC_RST = 0x0, |
55 | -{ | 78 | REG_SD_DATA_CRC_RST = 0x0, |
56 | - tcg_gen_mul_i64(a, a, b); | 79 | REG_SD_CRC_STA_RST = 0x0, |
57 | - tcg_gen_add_i64(d, d, a); | 80 | + REG_SD_SAMPLE_DL_RST = 0x00002000, |
58 | -} | 81 | REG_SD_FIFO_RST = 0x0, |
59 | - | 82 | }; |
60 | -static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | 83 | |
61 | -{ | 84 | @@ -XXX,XX +XXX,XX @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, |
62 | - tcg_gen_mul_vec(vece, a, a, b); | ||
63 | - tcg_gen_add_vec(vece, d, d, a); | ||
64 | -} | ||
65 | - | ||
66 | -static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
67 | -{ | ||
68 | - gen_helper_neon_mul_u8(a, a, b); | ||
69 | - gen_helper_neon_sub_u8(d, d, a); | ||
70 | -} | ||
71 | - | ||
72 | -static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
73 | -{ | ||
74 | - gen_helper_neon_mul_u16(a, a, b); | ||
75 | - gen_helper_neon_sub_u16(d, d, a); | ||
76 | -} | ||
77 | - | ||
78 | -static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
79 | -{ | ||
80 | - tcg_gen_mul_i32(a, a, b); | ||
81 | - tcg_gen_sub_i32(d, d, a); | ||
82 | -} | ||
83 | - | ||
84 | -static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
85 | -{ | ||
86 | - tcg_gen_mul_i64(a, a, b); | ||
87 | - tcg_gen_sub_i64(d, d, a); | ||
88 | -} | ||
89 | - | ||
90 | -static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | ||
91 | -{ | ||
92 | - tcg_gen_mul_vec(vece, a, a, b); | ||
93 | - tcg_gen_sub_vec(vece, d, d, a); | ||
94 | -} | ||
95 | - | ||
96 | /* Integer op subgroup of C3.6.16. */ | ||
97 | static void disas_simd_3same_int(DisasContext *s, uint32_t insn) | ||
98 | { | 85 | { |
99 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) | 86 | AwSdHostState *s = AW_SDHOST(opaque); |
100 | .prefer_i64 = TCG_TARGET_REG_BITS == 64, | 87 | AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); |
101 | .vece = MO_64 }, | 88 | + bool out_of_bounds = false; |
102 | }; | 89 | uint32_t res = 0; |
103 | - static const GVecGen3 mla_op[4] = { | 90 | |
104 | - { .fni4 = gen_mla8_i32, | 91 | switch (offset) { |
105 | - .fniv = gen_mla_vec, | 92 | @@ -XXX,XX +XXX,XX @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, |
106 | - .opc = INDEX_op_mul_vec, | 93 | case REG_SD_FIFO: /* Read/Write FIFO */ |
107 | - .load_dest = true, | 94 | res = allwinner_sdhost_fifo_read(s); |
108 | - .vece = MO_8 }, | 95 | break; |
109 | - { .fni4 = gen_mla16_i32, | 96 | + case REG_SD_SAMP_DL: /* Sample Delay */ |
110 | - .fniv = gen_mla_vec, | 97 | + if (sc->can_calibrate) { |
111 | - .opc = INDEX_op_mul_vec, | 98 | + res = s->sample_delay; |
112 | - .load_dest = true, | 99 | + } else { |
113 | - .vece = MO_16 }, | 100 | + out_of_bounds = true; |
114 | - { .fni4 = gen_mla32_i32, | 101 | + } |
115 | - .fniv = gen_mla_vec, | 102 | + break; |
116 | - .opc = INDEX_op_mul_vec, | 103 | default: |
117 | - .load_dest = true, | 104 | - qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" |
118 | - .vece = MO_32 }, | 105 | - HWADDR_PRIx"\n", __func__, offset); |
119 | - { .fni8 = gen_mla64_i64, | 106 | + out_of_bounds = true; |
120 | - .fniv = gen_mla_vec, | 107 | res = 0; |
121 | - .opc = INDEX_op_mul_vec, | 108 | break; |
122 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | 109 | } |
123 | - .load_dest = true, | 110 | |
124 | - .vece = MO_64 }, | 111 | + if (out_of_bounds) { |
125 | - }; | 112 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" |
126 | - static const GVecGen3 mls_op[4] = { | 113 | + HWADDR_PRIx"\n", __func__, offset); |
127 | - { .fni4 = gen_mls8_i32, | 114 | + } |
128 | - .fniv = gen_mls_vec, | 115 | + |
129 | - .opc = INDEX_op_mul_vec, | 116 | trace_allwinner_sdhost_read(offset, res, size); |
130 | - .load_dest = true, | 117 | return res; |
131 | - .vece = MO_8 }, | 118 | } |
132 | - { .fni4 = gen_mls16_i32, | 119 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, |
133 | - .fniv = gen_mls_vec, | 120 | { |
134 | - .opc = INDEX_op_mul_vec, | 121 | AwSdHostState *s = AW_SDHOST(opaque); |
135 | - .load_dest = true, | 122 | AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); |
136 | - .vece = MO_16 }, | 123 | + bool out_of_bounds = false; |
137 | - { .fni4 = gen_mls32_i32, | 124 | |
138 | - .fniv = gen_mls_vec, | 125 | trace_allwinner_sdhost_write(offset, value, size); |
139 | - .opc = INDEX_op_mul_vec, | 126 | |
140 | - .load_dest = true, | 127 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, |
141 | - .vece = MO_32 }, | 128 | case REG_SD_DATA0_CRC: /* CRC Data 0 from card/eMMC */ |
142 | - { .fni8 = gen_mls64_i64, | 129 | case REG_SD_CRC_STA: /* CRC status from card/eMMC in write operation */ |
143 | - .fniv = gen_mls_vec, | 130 | break; |
144 | - .opc = INDEX_op_mul_vec, | 131 | + case REG_SD_SAMP_DL: /* Sample delay control */ |
145 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | 132 | + if (sc->can_calibrate) { |
146 | - .load_dest = true, | 133 | + s->sample_delay = value; |
147 | - .vece = MO_64 }, | 134 | + } else { |
148 | - }; | 135 | + out_of_bounds = true; |
149 | 136 | + } | |
150 | int is_q = extract32(insn, 30, 1); | 137 | + break; |
151 | int u = extract32(insn, 29, 1); | 138 | default: |
152 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 139 | + out_of_bounds = true; |
153 | index XXXXXXX..XXXXXXX 100644 | 140 | + break; |
154 | --- a/target/arm/translate.c | 141 | + } |
155 | +++ b/target/arm/translate.c | 142 | + |
156 | @@ -XXX,XX +XXX,XX @@ static void gen_neon_narrow_op(int op, int u, int size, | 143 | + if (out_of_bounds) { |
157 | #define NEON_3R_VABA 15 | 144 | qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" |
158 | #define NEON_3R_VADD_VSUB 16 | 145 | HWADDR_PRIx"\n", __func__, offset); |
159 | #define NEON_3R_VTST_VCEQ 17 | 146 | - break; |
160 | -#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */ | 147 | } |
161 | +#define NEON_3R_VML 18 /* VMLA, VMLS */ | 148 | } |
162 | #define NEON_3R_VMUL 19 | 149 | |
163 | #define NEON_3R_VPMAX 20 | 150 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_allwinner_sdhost = { |
164 | #define NEON_3R_VPMIN 21 | 151 | VMSTATE_UINT32(response_crc, AwSdHostState), |
165 | @@ -XXX,XX +XXX,XX @@ const GVecGen2i sli_op[4] = { | 152 | VMSTATE_UINT32_ARRAY(data_crc, AwSdHostState, 8), |
166 | .vece = MO_64 }, | 153 | VMSTATE_UINT32(status_crc, AwSdHostState), |
167 | }; | 154 | + VMSTATE_UINT32(sample_delay, AwSdHostState), |
168 | 155 | VMSTATE_END_OF_LIST() | |
169 | +static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 156 | } |
157 | }; | ||
158 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_realize(DeviceState *dev, Error **errp) | ||
159 | static void allwinner_sdhost_reset(DeviceState *dev) | ||
160 | { | ||
161 | AwSdHostState *s = AW_SDHOST(dev); | ||
162 | + AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); | ||
163 | |||
164 | s->global_ctl = REG_SD_GCTL_RST; | ||
165 | s->clock_ctl = REG_SD_CKCR_RST; | ||
166 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_reset(DeviceState *dev) | ||
167 | } | ||
168 | |||
169 | s->status_crc = REG_SD_CRC_STA_RST; | ||
170 | + | ||
171 | + if (sc->can_calibrate) { | ||
172 | + s->sample_delay = REG_SD_SAMPLE_DL_RST; | ||
173 | + } | ||
174 | } | ||
175 | |||
176 | static void allwinner_sdhost_bus_class_init(ObjectClass *klass, void *data) | ||
177 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) | ||
178 | AwSdHostClass *sc = AW_SDHOST_CLASS(klass); | ||
179 | sc->max_desc_size = 8 * KiB; | ||
180 | sc->is_sun4i = true; | ||
181 | + sc->can_calibrate = false; | ||
182 | } | ||
183 | |||
184 | static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) | ||
185 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) | ||
186 | AwSdHostClass *sc = AW_SDHOST_CLASS(klass); | ||
187 | sc->max_desc_size = 64 * KiB; | ||
188 | sc->is_sun4i = false; | ||
189 | + sc->can_calibrate = false; | ||
190 | +} | ||
191 | + | ||
192 | +static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass, | ||
193 | + void *data) | ||
170 | +{ | 194 | +{ |
171 | + gen_helper_neon_mul_u8(a, a, b); | 195 | + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); |
172 | + gen_helper_neon_add_u8(d, d, a); | 196 | + sc->max_desc_size = 64 * KiB; |
197 | + sc->is_sun4i = false; | ||
198 | + sc->can_calibrate = true; | ||
173 | +} | 199 | +} |
174 | + | 200 | + |
175 | +static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 201 | +static void allwinner_sdhost_sun50i_a64_emmc_class_init(ObjectClass *klass, |
202 | + void *data) | ||
176 | +{ | 203 | +{ |
177 | + gen_helper_neon_mul_u8(a, a, b); | 204 | + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); |
178 | + gen_helper_neon_sub_u8(d, d, a); | 205 | + sc->max_desc_size = 8 * KiB; |
179 | +} | 206 | + sc->is_sun4i = false; |
180 | + | 207 | + sc->can_calibrate = true; |
181 | +static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 208 | } |
182 | +{ | 209 | |
183 | + gen_helper_neon_mul_u16(a, a, b); | 210 | static const TypeInfo allwinner_sdhost_info = { |
184 | + gen_helper_neon_add_u16(d, d, a); | 211 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo allwinner_sdhost_sun5i_info = { |
185 | +} | 212 | .class_init = allwinner_sdhost_sun5i_class_init, |
186 | + | 213 | }; |
187 | +static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 214 | |
188 | +{ | 215 | +static const TypeInfo allwinner_sdhost_sun50i_a64_info = { |
189 | + gen_helper_neon_mul_u16(a, a, b); | 216 | + .name = TYPE_AW_SDHOST_SUN50I_A64, |
190 | + gen_helper_neon_sub_u16(d, d, a); | 217 | + .parent = TYPE_AW_SDHOST, |
191 | +} | 218 | + .class_init = allwinner_sdhost_sun50i_a64_class_init, |
192 | + | ||
193 | +static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
194 | +{ | ||
195 | + tcg_gen_mul_i32(a, a, b); | ||
196 | + tcg_gen_add_i32(d, d, a); | ||
197 | +} | ||
198 | + | ||
199 | +static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
200 | +{ | ||
201 | + tcg_gen_mul_i32(a, a, b); | ||
202 | + tcg_gen_sub_i32(d, d, a); | ||
203 | +} | ||
204 | + | ||
205 | +static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
206 | +{ | ||
207 | + tcg_gen_mul_i64(a, a, b); | ||
208 | + tcg_gen_add_i64(d, d, a); | ||
209 | +} | ||
210 | + | ||
211 | +static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
212 | +{ | ||
213 | + tcg_gen_mul_i64(a, a, b); | ||
214 | + tcg_gen_sub_i64(d, d, a); | ||
215 | +} | ||
216 | + | ||
217 | +static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | ||
218 | +{ | ||
219 | + tcg_gen_mul_vec(vece, a, a, b); | ||
220 | + tcg_gen_add_vec(vece, d, d, a); | ||
221 | +} | ||
222 | + | ||
223 | +static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | ||
224 | +{ | ||
225 | + tcg_gen_mul_vec(vece, a, a, b); | ||
226 | + tcg_gen_sub_vec(vece, d, d, a); | ||
227 | +} | ||
228 | + | ||
229 | +/* Note that while NEON does not support VMLA and VMLS as 64-bit ops, | ||
230 | + * these tables are shared with AArch64 which does support them. | ||
231 | + */ | ||
232 | +const GVecGen3 mla_op[4] = { | ||
233 | + { .fni4 = gen_mla8_i32, | ||
234 | + .fniv = gen_mla_vec, | ||
235 | + .opc = INDEX_op_mul_vec, | ||
236 | + .load_dest = true, | ||
237 | + .vece = MO_8 }, | ||
238 | + { .fni4 = gen_mla16_i32, | ||
239 | + .fniv = gen_mla_vec, | ||
240 | + .opc = INDEX_op_mul_vec, | ||
241 | + .load_dest = true, | ||
242 | + .vece = MO_16 }, | ||
243 | + { .fni4 = gen_mla32_i32, | ||
244 | + .fniv = gen_mla_vec, | ||
245 | + .opc = INDEX_op_mul_vec, | ||
246 | + .load_dest = true, | ||
247 | + .vece = MO_32 }, | ||
248 | + { .fni8 = gen_mla64_i64, | ||
249 | + .fniv = gen_mla_vec, | ||
250 | + .opc = INDEX_op_mul_vec, | ||
251 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
252 | + .load_dest = true, | ||
253 | + .vece = MO_64 }, | ||
254 | +}; | 219 | +}; |
255 | + | 220 | + |
256 | +const GVecGen3 mls_op[4] = { | 221 | +static const TypeInfo allwinner_sdhost_sun50i_a64_emmc_info = { |
257 | + { .fni4 = gen_mls8_i32, | 222 | + .name = TYPE_AW_SDHOST_SUN50I_A64_EMMC, |
258 | + .fniv = gen_mls_vec, | 223 | + .parent = TYPE_AW_SDHOST, |
259 | + .opc = INDEX_op_mul_vec, | 224 | + .class_init = allwinner_sdhost_sun50i_a64_emmc_class_init, |
260 | + .load_dest = true, | ||
261 | + .vece = MO_8 }, | ||
262 | + { .fni4 = gen_mls16_i32, | ||
263 | + .fniv = gen_mls_vec, | ||
264 | + .opc = INDEX_op_mul_vec, | ||
265 | + .load_dest = true, | ||
266 | + .vece = MO_16 }, | ||
267 | + { .fni4 = gen_mls32_i32, | ||
268 | + .fniv = gen_mls_vec, | ||
269 | + .opc = INDEX_op_mul_vec, | ||
270 | + .load_dest = true, | ||
271 | + .vece = MO_32 }, | ||
272 | + { .fni8 = gen_mls64_i64, | ||
273 | + .fniv = gen_mls_vec, | ||
274 | + .opc = INDEX_op_mul_vec, | ||
275 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
276 | + .load_dest = true, | ||
277 | + .vece = MO_64 }, | ||
278 | +}; | 225 | +}; |
279 | + | 226 | + |
280 | /* Translate a NEON data processing instruction. Return nonzero if the | 227 | static const TypeInfo allwinner_sdhost_bus_info = { |
281 | instruction is invalid. | 228 | .name = TYPE_AW_SDHOST_BUS, |
282 | We process data in a mixture of 32-bit and 64-bit chunks. | 229 | .parent = TYPE_SD_BUS, |
283 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 230 | @@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_register_types(void) |
284 | return 0; | 231 | type_register_static(&allwinner_sdhost_info); |
285 | } | 232 | type_register_static(&allwinner_sdhost_sun4i_info); |
286 | break; | 233 | type_register_static(&allwinner_sdhost_sun5i_info); |
287 | + | 234 | + type_register_static(&allwinner_sdhost_sun50i_a64_info); |
288 | + case NEON_3R_VML: /* VMLA, VMLS */ | 235 | + type_register_static(&allwinner_sdhost_sun50i_a64_emmc_info); |
289 | + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, | 236 | type_register_static(&allwinner_sdhost_bus_info); |
290 | + u ? &mls_op[size] : &mla_op[size]); | 237 | } |
291 | + return 0; | 238 | |
292 | } | ||
293 | + | ||
294 | if (size == 3) { | ||
295 | /* 64-bit element instructions. */ | ||
296 | for (pass = 0; pass < (q ? 2 : 1); pass++) { | ||
297 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
298 | } | ||
299 | } | ||
300 | break; | ||
301 | - case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ | ||
302 | - switch (size) { | ||
303 | - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; | ||
304 | - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; | ||
305 | - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; | ||
306 | - default: abort(); | ||
307 | - } | ||
308 | - tcg_temp_free_i32(tmp2); | ||
309 | - tmp2 = neon_load_reg(rd, pass); | ||
310 | - if (u) { /* VMLS */ | ||
311 | - gen_neon_rsb(size, tmp, tmp2); | ||
312 | - } else { /* VMLA */ | ||
313 | - gen_neon_add(size, tmp, tmp2); | ||
314 | - } | ||
315 | - break; | ||
316 | case NEON_3R_VMUL: | ||
317 | /* VMUL.P8; other cases already eliminated. */ | ||
318 | gen_helper_neon_mul_p8(tmp, tmp, tmp2); | ||
319 | -- | 239 | -- |
320 | 2.19.1 | 240 | 2.34.1 |
321 | |||
322 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | R40 has two ethernet controllers named as emac and gmac. The emac is |
4 | Message-id: 20181011205206.3552-13-richard.henderson@linaro.org | 4 | compatibled with A10, and the GMAC is compatibled with H3. |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | |
6 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 8 | --- |
8 | target/arm/translate.c | 70 +++++++++++++++++++++++++++++------------- | 9 | include/hw/arm/allwinner-r40.h | 6 ++++ |
9 | 1 file changed, 48 insertions(+), 22 deletions(-) | 10 | hw/arm/allwinner-r40.c | 50 ++++++++++++++++++++++++++++++++-- |
11 | hw/arm/bananapi_m2u.c | 3 ++ | ||
12 | 3 files changed, 57 insertions(+), 2 deletions(-) | ||
10 | 13 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 14 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
12 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 16 | --- a/include/hw/arm/allwinner-r40.h |
14 | +++ b/target/arm/translate.c | 17 | +++ b/include/hw/arm/allwinner-r40.h |
15 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 18 | @@ -XXX,XX +XXX,XX @@ |
16 | size--; | 19 | #include "hw/misc/allwinner-r40-ccu.h" |
17 | } | 20 | #include "hw/misc/allwinner-r40-dramc.h" |
18 | shift = (insn >> 16) & ((1 << (3 + size)) - 1); | 21 | #include "hw/i2c/allwinner-i2c.h" |
19 | - /* To avoid excessive duplication of ops we implement shift | 22 | +#include "hw/net/allwinner_emac.h" |
20 | - by immediate using the variable shift operations. */ | 23 | +#include "hw/net/allwinner-sun8i-emac.h" |
21 | if (op < 8) { | 24 | #include "target/arm/cpu.h" |
22 | /* Shift by immediate: | 25 | #include "sysemu/block-backend.h" |
23 | VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ | 26 | |
24 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 27 | @@ -XXX,XX +XXX,XX @@ enum { |
25 | } | 28 | AW_R40_DEV_SRAM_A2, |
26 | /* Right shifts are encoded as N - shift, where N is the | 29 | AW_R40_DEV_SRAM_A3, |
27 | element size in bits. */ | 30 | AW_R40_DEV_SRAM_A4, |
28 | - if (op <= 4) | 31 | + AW_R40_DEV_EMAC, |
29 | + if (op <= 4) { | 32 | AW_R40_DEV_MMC0, |
30 | shift = shift - (1 << (size + 3)); | 33 | AW_R40_DEV_MMC1, |
31 | + } | 34 | AW_R40_DEV_MMC2, |
35 | @@ -XXX,XX +XXX,XX @@ enum { | ||
36 | AW_R40_DEV_UART6, | ||
37 | AW_R40_DEV_UART7, | ||
38 | AW_R40_DEV_TWI0, | ||
39 | + AW_R40_DEV_GMAC, | ||
40 | AW_R40_DEV_GIC_DIST, | ||
41 | AW_R40_DEV_GIC_CPU, | ||
42 | AW_R40_DEV_GIC_HYP, | ||
43 | @@ -XXX,XX +XXX,XX @@ struct AwR40State { | ||
44 | AwR40ClockCtlState ccu; | ||
45 | AwR40DramCtlState dramc; | ||
46 | AWI2CState i2c0; | ||
47 | + AwEmacState emac; | ||
48 | + AwSun8iEmacState gmac; | ||
49 | GICState gic; | ||
50 | MemoryRegion sram_a1; | ||
51 | MemoryRegion sram_a2; | ||
52 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/hw/arm/allwinner-r40.c | ||
55 | +++ b/hw/arm/allwinner-r40.c | ||
56 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { | ||
57 | [AW_R40_DEV_SRAM_A2] = 0x00004000, | ||
58 | [AW_R40_DEV_SRAM_A3] = 0x00008000, | ||
59 | [AW_R40_DEV_SRAM_A4] = 0x0000b400, | ||
60 | + [AW_R40_DEV_EMAC] = 0x01c0b000, | ||
61 | [AW_R40_DEV_MMC0] = 0x01c0f000, | ||
62 | [AW_R40_DEV_MMC1] = 0x01c10000, | ||
63 | [AW_R40_DEV_MMC2] = 0x01c11000, | ||
64 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { | ||
65 | [AW_R40_DEV_UART6] = 0x01c29800, | ||
66 | [AW_R40_DEV_UART7] = 0x01c29c00, | ||
67 | [AW_R40_DEV_TWI0] = 0x01c2ac00, | ||
68 | + [AW_R40_DEV_GMAC] = 0x01c50000, | ||
69 | [AW_R40_DEV_DRAMCOM] = 0x01c62000, | ||
70 | [AW_R40_DEV_DRAMCTL] = 0x01c63000, | ||
71 | [AW_R40_DEV_DRAMPHY] = 0x01c65000, | ||
72 | @@ -XXX,XX +XXX,XX @@ static struct AwR40Unimplemented r40_unimplemented[] = { | ||
73 | { "spi1", 0x01c06000, 4 * KiB }, | ||
74 | { "cs0", 0x01c09000, 4 * KiB }, | ||
75 | { "keymem", 0x01c0a000, 4 * KiB }, | ||
76 | - { "emac", 0x01c0b000, 4 * KiB }, | ||
77 | { "usb0-otg", 0x01c13000, 4 * KiB }, | ||
78 | { "usb0-host", 0x01c14000, 4 * KiB }, | ||
79 | { "crypto", 0x01c15000, 4 * KiB }, | ||
80 | @@ -XXX,XX +XXX,XX @@ static struct AwR40Unimplemented r40_unimplemented[] = { | ||
81 | { "tvd2", 0x01c33000, 4 * KiB }, | ||
82 | { "tvd3", 0x01c34000, 4 * KiB }, | ||
83 | { "gpu", 0x01c40000, 64 * KiB }, | ||
84 | - { "gmac", 0x01c50000, 64 * KiB }, | ||
85 | { "hstmr", 0x01c60000, 4 * KiB }, | ||
86 | { "tcon-top", 0x01c70000, 4 * KiB }, | ||
87 | { "lcd0", 0x01c71000, 4 * KiB }, | ||
88 | @@ -XXX,XX +XXX,XX @@ enum { | ||
89 | AW_R40_GIC_SPI_MMC1 = 33, | ||
90 | AW_R40_GIC_SPI_MMC2 = 34, | ||
91 | AW_R40_GIC_SPI_MMC3 = 35, | ||
92 | + AW_R40_GIC_SPI_EMAC = 55, | ||
93 | + AW_R40_GIC_SPI_GMAC = 85, | ||
94 | }; | ||
95 | |||
96 | /* Allwinner R40 general constants */ | ||
97 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) | ||
98 | |||
99 | object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); | ||
100 | |||
101 | + object_initialize_child(obj, "emac", &s->emac, TYPE_AW_EMAC); | ||
102 | + object_initialize_child(obj, "gmac", &s->gmac, TYPE_AW_SUN8I_EMAC); | ||
103 | + object_property_add_alias(obj, "gmac-phy-addr", | ||
104 | + OBJECT(&s->gmac), "phy-addr"); | ||
32 | + | 105 | + |
33 | + switch (op) { | 106 | object_initialize_child(obj, "dramc", &s->dramc, TYPE_AW_R40_DRAMC); |
34 | + case 0: /* VSHR */ | 107 | object_property_add_alias(obj, "ram-addr", OBJECT(&s->dramc), |
35 | + /* Right shift comes here negative. */ | 108 | "ram-addr"); |
36 | + shift = -shift; | 109 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) |
37 | + /* Shifts larger than the element size are architecturally | 110 | |
38 | + * valid. Unsigned results in all zeros; signed results | 111 | static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
39 | + * in all sign bits. | 112 | { |
40 | + */ | 113 | + const char *r40_nic_models[] = { "gmac", "emac", NULL }; |
41 | + if (!u) { | 114 | AwR40State *s = AW_R40(dev); |
42 | + tcg_gen_gvec_sari(size, rd_ofs, rm_ofs, | 115 | unsigned i; |
43 | + MIN(shift, (8 << size) - 1), | 116 | |
44 | + vec_size, vec_size); | 117 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
45 | + } else if (shift >= 8 << size) { | 118 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 2, |
46 | + tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0); | 119 | s->memmap[AW_R40_DEV_DRAMPHY]); |
47 | + } else { | 120 | |
48 | + tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift, | 121 | + /* nic support gmac and emac */ |
49 | + vec_size, vec_size); | 122 | + for (int i = 0; i < ARRAY_SIZE(r40_nic_models) - 1; i++) { |
50 | + } | 123 | + NICInfo *nic = &nd_table[i]; |
51 | + return 0; | ||
52 | + | 124 | + |
53 | + case 5: /* VSHL, VSLI */ | 125 | + if (!nic->used) { |
54 | + if (!u) { /* VSHL */ | 126 | + continue; |
55 | + /* Shifts larger than the element size are | 127 | + } |
56 | + * architecturally valid and results in zero. | 128 | + if (qemu_show_nic_models(nic->model, r40_nic_models)) { |
57 | + */ | 129 | + exit(0); |
58 | + if (shift >= 8 << size) { | 130 | + } |
59 | + tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0); | ||
60 | + } else { | ||
61 | + tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift, | ||
62 | + vec_size, vec_size); | ||
63 | + } | ||
64 | + return 0; | ||
65 | + } | ||
66 | + break; | ||
67 | + } | ||
68 | + | 131 | + |
69 | if (size == 3) { | 132 | + switch (qemu_find_nic_model(nic, r40_nic_models, r40_nic_models[0])) { |
70 | count = q + 1; | 133 | + case 0: /* gmac */ |
71 | } else { | 134 | + qdev_set_nic_properties(DEVICE(&s->gmac), nic); |
72 | count = q ? 4: 2; | 135 | + break; |
73 | } | 136 | + case 1: /* emac */ |
74 | - switch (size) { | 137 | + qdev_set_nic_properties(DEVICE(&s->emac), nic); |
75 | - case 0: | 138 | + break; |
76 | - imm = (uint8_t) shift; | 139 | + default: |
77 | - imm |= imm << 8; | 140 | + exit(1); |
78 | - imm |= imm << 16; | 141 | + break; |
79 | - break; | 142 | + } |
80 | - case 1: | 143 | + } |
81 | - imm = (uint16_t) shift; | ||
82 | - imm |= imm << 16; | ||
83 | - break; | ||
84 | - case 2: | ||
85 | - case 3: | ||
86 | - imm = shift; | ||
87 | - break; | ||
88 | - default: | ||
89 | - abort(); | ||
90 | - } | ||
91 | + | 144 | + |
92 | + /* To avoid excessive duplication of ops we implement shift | 145 | + /* GMAC */ |
93 | + * by immediate using the variable shift operations. | 146 | + object_property_set_link(OBJECT(&s->gmac), "dma-memory", |
94 | + */ | 147 | + OBJECT(get_system_memory()), &error_fatal); |
95 | + imm = dup_const(size, shift); | 148 | + sysbus_realize(SYS_BUS_DEVICE(&s->gmac), &error_fatal); |
96 | 149 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gmac), 0, s->memmap[AW_R40_DEV_GMAC]); | |
97 | for (pass = 0; pass < count; pass++) { | 150 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gmac), 0, |
98 | if (size == 3) { | 151 | + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_GMAC)); |
99 | neon_load_reg64(cpu_V0, rm + pass); | 152 | + |
100 | tcg_gen_movi_i64(cpu_V1, imm); | 153 | + /* EMAC */ |
101 | switch (op) { | 154 | + sysbus_realize(SYS_BUS_DEVICE(&s->emac), &error_fatal); |
102 | - case 0: /* VSHR */ | 155 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->emac), 0, s->memmap[AW_R40_DEV_EMAC]); |
103 | case 1: /* VSRA */ | 156 | + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emac), 0, |
104 | if (u) | 157 | + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC)); |
105 | gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); | 158 | + |
106 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 159 | /* Unimplemented devices */ |
107 | cpu_V0, cpu_V1); | 160 | for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { |
108 | } | 161 | create_unimplemented_device(r40_unimplemented[i].device_name, |
109 | break; | 162 | diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c |
110 | + default: | 163 | index XXXXXXX..XXXXXXX 100644 |
111 | + g_assert_not_reached(); | 164 | --- a/hw/arm/bananapi_m2u.c |
112 | } | 165 | +++ b/hw/arm/bananapi_m2u.c |
113 | if (op == 1 || op == 3) { | 166 | @@ -XXX,XX +XXX,XX @@ static void bpim2u_init(MachineState *machine) |
114 | /* Accumulate. */ | 167 | object_property_set_int(OBJECT(r40), "ram-size", |
115 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 168 | r40->ram_size, &error_abort); |
116 | tmp2 = tcg_temp_new_i32(); | 169 | |
117 | tcg_gen_movi_i32(tmp2, imm); | 170 | + /* GMAC PHY */ |
118 | switch (op) { | 171 | + object_property_set_uint(OBJECT(r40), "gmac-phy-addr", 1, &error_abort); |
119 | - case 0: /* VSHR */ | 172 | + |
120 | case 1: /* VSRA */ | 173 | /* Mark R40 object realized */ |
121 | GEN_NEON_INTEGER_OP(shl); | 174 | qdev_realize(DEVICE(r40), NULL, &error_abort); |
122 | break; | ||
123 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
124 | case 7: /* VQSHL */ | ||
125 | GEN_NEON_INTEGER_OP_ENV(qshl); | ||
126 | break; | ||
127 | + default: | ||
128 | + g_assert_not_reached(); | ||
129 | } | ||
130 | tcg_temp_free_i32(tmp2); | ||
131 | 175 | ||
132 | -- | 176 | -- |
133 | 2.19.1 | 177 | 2.34.1 |
134 | |||
135 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Instead of shifts and masks, use direct loads and stores from | 3 | Only a few important registers are added, especially the SRAM_VER |
4 | the neon register file. | 4 | register. |
5 | 5 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
7 | Message-id: 20181011205206.3552-21-richard.henderson@linaro.org | 7 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 9 | --- |
11 | target/arm/translate.c | 92 +++++++++++++++++++++++------------------- | 10 | include/hw/arm/allwinner-r40.h | 3 + |
12 | 1 file changed, 50 insertions(+), 42 deletions(-) | 11 | include/hw/misc/allwinner-sramc.h | 69 +++++++++++ |
12 | hw/arm/allwinner-r40.c | 7 +- | ||
13 | hw/misc/allwinner-sramc.c | 184 ++++++++++++++++++++++++++++++ | ||
14 | hw/arm/Kconfig | 1 + | ||
15 | hw/misc/Kconfig | 3 + | ||
16 | hw/misc/meson.build | 1 + | ||
17 | hw/misc/trace-events | 4 + | ||
18 | 8 files changed, 271 insertions(+), 1 deletion(-) | ||
19 | create mode 100644 include/hw/misc/allwinner-sramc.h | ||
20 | create mode 100644 hw/misc/allwinner-sramc.c | ||
13 | 21 | ||
14 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 22 | diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h |
15 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/translate.c | 24 | --- a/include/hw/arm/allwinner-r40.h |
17 | +++ b/target/arm/translate.c | 25 | +++ b/include/hw/arm/allwinner-r40.h |
18 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass) | 26 | @@ -XXX,XX +XXX,XX @@ |
19 | return tmp; | 27 | #include "hw/sd/allwinner-sdhost.h" |
28 | #include "hw/misc/allwinner-r40-ccu.h" | ||
29 | #include "hw/misc/allwinner-r40-dramc.h" | ||
30 | +#include "hw/misc/allwinner-sramc.h" | ||
31 | #include "hw/i2c/allwinner-i2c.h" | ||
32 | #include "hw/net/allwinner_emac.h" | ||
33 | #include "hw/net/allwinner-sun8i-emac.h" | ||
34 | @@ -XXX,XX +XXX,XX @@ enum { | ||
35 | AW_R40_DEV_SRAM_A2, | ||
36 | AW_R40_DEV_SRAM_A3, | ||
37 | AW_R40_DEV_SRAM_A4, | ||
38 | + AW_R40_DEV_SRAMC, | ||
39 | AW_R40_DEV_EMAC, | ||
40 | AW_R40_DEV_MMC0, | ||
41 | AW_R40_DEV_MMC1, | ||
42 | @@ -XXX,XX +XXX,XX @@ struct AwR40State { | ||
43 | |||
44 | ARMCPU cpus[AW_R40_NUM_CPUS]; | ||
45 | const hwaddr *memmap; | ||
46 | + AwSRAMCState sramc; | ||
47 | AwA10PITState timer; | ||
48 | AwSdHostState mmc[AW_R40_NUM_MMCS]; | ||
49 | AwR40ClockCtlState ccu; | ||
50 | diff --git a/include/hw/misc/allwinner-sramc.h b/include/hw/misc/allwinner-sramc.h | ||
51 | new file mode 100644 | ||
52 | index XXXXXXX..XXXXXXX | ||
53 | --- /dev/null | ||
54 | +++ b/include/hw/misc/allwinner-sramc.h | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | +/* | ||
57 | + * Allwinner SRAM controller emulation | ||
58 | + * | ||
59 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
60 | + * | ||
61 | + * This program is free software: you can redistribute it and/or modify | ||
62 | + * it under the terms of the GNU General Public License as published by | ||
63 | + * the Free Software Foundation, either version 2 of the License, or | ||
64 | + * (at your option) any later version. | ||
65 | + * | ||
66 | + * This program is distributed in the hope that it will be useful, | ||
67 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
68 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
69 | + * GNU General Public License for more details. | ||
70 | + * | ||
71 | + * You should have received a copy of the GNU General Public License | ||
72 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
73 | + */ | ||
74 | + | ||
75 | +#ifndef HW_MISC_ALLWINNER_SRAMC_H | ||
76 | +#define HW_MISC_ALLWINNER_SRAMC_H | ||
77 | + | ||
78 | +#include "qom/object.h" | ||
79 | +#include "hw/sysbus.h" | ||
80 | +#include "qemu/uuid.h" | ||
81 | + | ||
82 | +/** | ||
83 | + * Object model | ||
84 | + * @{ | ||
85 | + */ | ||
86 | +#define TYPE_AW_SRAMC "allwinner-sramc" | ||
87 | +#define TYPE_AW_SRAMC_SUN8I_R40 TYPE_AW_SRAMC "-sun8i-r40" | ||
88 | +OBJECT_DECLARE_TYPE(AwSRAMCState, AwSRAMCClass, AW_SRAMC) | ||
89 | + | ||
90 | +/** @} */ | ||
91 | + | ||
92 | +/** | ||
93 | + * Allwinner SRAMC object instance state | ||
94 | + */ | ||
95 | +struct AwSRAMCState { | ||
96 | + /*< private >*/ | ||
97 | + SysBusDevice parent_obj; | ||
98 | + /*< public >*/ | ||
99 | + | ||
100 | + /** Maps I/O registers in physical memory */ | ||
101 | + MemoryRegion iomem; | ||
102 | + | ||
103 | + /* registers */ | ||
104 | + uint32_t sram_ctl1; | ||
105 | + uint32_t sram_ver; | ||
106 | + uint32_t sram_soft_entry_reg0; | ||
107 | +}; | ||
108 | + | ||
109 | +/** | ||
110 | + * Allwinner SRAM Controller class-level struct. | ||
111 | + * | ||
112 | + * This struct is filled by each sunxi device specific code | ||
113 | + * such that the generic code can use this struct to support | ||
114 | + * all devices. | ||
115 | + */ | ||
116 | +struct AwSRAMCClass { | ||
117 | + /*< private >*/ | ||
118 | + SysBusDeviceClass parent_class; | ||
119 | + /*< public >*/ | ||
120 | + | ||
121 | + uint32_t sram_version_code; | ||
122 | +}; | ||
123 | + | ||
124 | +#endif /* HW_MISC_ALLWINNER_SRAMC_H */ | ||
125 | diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/hw/arm/allwinner-r40.c | ||
128 | +++ b/hw/arm/allwinner-r40.c | ||
129 | @@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_r40_memmap[] = { | ||
130 | [AW_R40_DEV_SRAM_A2] = 0x00004000, | ||
131 | [AW_R40_DEV_SRAM_A3] = 0x00008000, | ||
132 | [AW_R40_DEV_SRAM_A4] = 0x0000b400, | ||
133 | + [AW_R40_DEV_SRAMC] = 0x01c00000, | ||
134 | [AW_R40_DEV_EMAC] = 0x01c0b000, | ||
135 | [AW_R40_DEV_MMC0] = 0x01c0f000, | ||
136 | [AW_R40_DEV_MMC1] = 0x01c10000, | ||
137 | @@ -XXX,XX +XXX,XX @@ struct AwR40Unimplemented { | ||
138 | static struct AwR40Unimplemented r40_unimplemented[] = { | ||
139 | { "d-engine", 0x01000000, 4 * MiB }, | ||
140 | { "d-inter", 0x01400000, 128 * KiB }, | ||
141 | - { "sram-c", 0x01c00000, 4 * KiB }, | ||
142 | { "dma", 0x01c02000, 4 * KiB }, | ||
143 | { "nfdc", 0x01c03000, 4 * KiB }, | ||
144 | { "ts", 0x01c04000, 4 * KiB }, | ||
145 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_init(Object *obj) | ||
146 | "ram-addr"); | ||
147 | object_property_add_alias(obj, "ram-size", OBJECT(&s->dramc), | ||
148 | "ram-size"); | ||
149 | + | ||
150 | + object_initialize_child(obj, "sramc", &s->sramc, TYPE_AW_SRAMC_SUN8I_R40); | ||
20 | } | 151 | } |
21 | 152 | ||
22 | +static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) | 153 | static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
23 | +{ | 154 | @@ -XXX,XX +XXX,XX @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) |
24 | + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | 155 | AW_R40_GIC_SPI_TIMER1)); |
25 | + | 156 | |
26 | + switch (mop) { | 157 | /* SRAM */ |
27 | + case MO_UB: | 158 | + sysbus_realize(SYS_BUS_DEVICE(&s->sramc), &error_fatal); |
28 | + tcg_gen_ld8u_i32(var, cpu_env, offset); | 159 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sramc), 0, s->memmap[AW_R40_DEV_SRAMC]); |
29 | + break; | 160 | + |
30 | + case MO_UW: | 161 | memory_region_init_ram(&s->sram_a1, OBJECT(dev), "sram A1", |
31 | + tcg_gen_ld16u_i32(var, cpu_env, offset); | 162 | 16 * KiB, &error_abort); |
32 | + break; | 163 | memory_region_init_ram(&s->sram_a2, OBJECT(dev), "sram A2", |
33 | + case MO_UL: | 164 | diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c |
34 | + tcg_gen_ld_i32(var, cpu_env, offset); | 165 | new file mode 100644 |
166 | index XXXXXXX..XXXXXXX | ||
167 | --- /dev/null | ||
168 | +++ b/hw/misc/allwinner-sramc.c | ||
169 | @@ -XXX,XX +XXX,XX @@ | ||
170 | +/* | ||
171 | + * Allwinner R40 SRAM controller emulation | ||
172 | + * | ||
173 | + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> | ||
174 | + * | ||
175 | + * This program is free software: you can redistribute it and/or modify | ||
176 | + * it under the terms of the GNU General Public License as published by | ||
177 | + * the Free Software Foundation, either version 2 of the License, or | ||
178 | + * (at your option) any later version. | ||
179 | + * | ||
180 | + * This program is distributed in the hope that it will be useful, | ||
181 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
182 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
183 | + * GNU General Public License for more details. | ||
184 | + * | ||
185 | + * You should have received a copy of the GNU General Public License | ||
186 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
187 | + */ | ||
188 | + | ||
189 | +#include "qemu/osdep.h" | ||
190 | +#include "qemu/units.h" | ||
191 | +#include "hw/sysbus.h" | ||
192 | +#include "migration/vmstate.h" | ||
193 | +#include "qemu/log.h" | ||
194 | +#include "qemu/module.h" | ||
195 | +#include "qapi/error.h" | ||
196 | +#include "hw/qdev-properties.h" | ||
197 | +#include "hw/qdev-properties-system.h" | ||
198 | +#include "hw/misc/allwinner-sramc.h" | ||
199 | +#include "trace.h" | ||
200 | + | ||
201 | +/* | ||
202 | + * register offsets | ||
203 | + * https://linux-sunxi.org/SRAM_Controller_Register_Guide | ||
204 | + */ | ||
205 | +enum { | ||
206 | + REG_SRAM_CTL1_CFG = 0x04, /* SRAM Control register 1 */ | ||
207 | + REG_SRAM_VER = 0x24, /* SRAM Version register */ | ||
208 | + REG_SRAM_R40_SOFT_ENTRY_REG0 = 0xbc, | ||
209 | +}; | ||
210 | + | ||
211 | +/* REG_SRAMC_VERSION bit defines */ | ||
212 | +#define SRAM_VER_READ_ENABLE (1 << 15) | ||
213 | +#define SRAM_VER_VERSION_SHIFT 16 | ||
214 | +#define SRAM_VERSION_SUN8I_R40 0x1701 | ||
215 | + | ||
216 | +static uint64_t allwinner_sramc_read(void *opaque, hwaddr offset, | ||
217 | + unsigned size) | ||
218 | +{ | ||
219 | + AwSRAMCState *s = AW_SRAMC(opaque); | ||
220 | + AwSRAMCClass *sc = AW_SRAMC_GET_CLASS(s); | ||
221 | + uint64_t val = 0; | ||
222 | + | ||
223 | + switch (offset) { | ||
224 | + case REG_SRAM_CTL1_CFG: | ||
225 | + val = s->sram_ctl1; | ||
226 | + break; | ||
227 | + case REG_SRAM_VER: | ||
228 | + /* bit15: lock bit, set this bit before reading this register */ | ||
229 | + if (s->sram_ver & SRAM_VER_READ_ENABLE) { | ||
230 | + val = SRAM_VER_READ_ENABLE | | ||
231 | + (sc->sram_version_code << SRAM_VER_VERSION_SHIFT); | ||
232 | + } | ||
233 | + break; | ||
234 | + case REG_SRAM_R40_SOFT_ENTRY_REG0: | ||
235 | + val = s->sram_soft_entry_reg0; | ||
35 | + break; | 236 | + break; |
36 | + default: | 237 | + default: |
37 | + g_assert_not_reached(); | 238 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", |
239 | + __func__, (uint32_t)offset); | ||
240 | + return 0; | ||
38 | + } | 241 | + } |
39 | +} | 242 | + |
40 | + | 243 | + trace_allwinner_sramc_read(offset, val); |
41 | static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) | 244 | + |
42 | { | 245 | + return val; |
43 | long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | 246 | +} |
44 | @@ -XXX,XX +XXX,XX @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var) | 247 | + |
45 | tcg_temp_free_i32(var); | 248 | +static void allwinner_sramc_write(void *opaque, hwaddr offset, |
46 | } | 249 | + uint64_t val, unsigned size) |
47 | 250 | +{ | |
48 | +static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) | 251 | + AwSRAMCState *s = AW_SRAMC(opaque); |
49 | +{ | 252 | + |
50 | + long offset = neon_element_offset(reg, ele, size); | 253 | + trace_allwinner_sramc_write(offset, val); |
51 | + | 254 | + |
52 | + switch (size) { | 255 | + switch (offset) { |
53 | + case MO_8: | 256 | + case REG_SRAM_CTL1_CFG: |
54 | + tcg_gen_st8_i32(var, cpu_env, offset); | 257 | + s->sram_ctl1 = val; |
55 | + break; | 258 | + break; |
56 | + case MO_16: | 259 | + case REG_SRAM_VER: |
57 | + tcg_gen_st16_i32(var, cpu_env, offset); | 260 | + /* Only the READ_ENABLE bit is writeable */ |
58 | + break; | 261 | + s->sram_ver = val & SRAM_VER_READ_ENABLE; |
59 | + case MO_32: | 262 | + break; |
60 | + tcg_gen_st_i32(var, cpu_env, offset); | 263 | + case REG_SRAM_R40_SOFT_ENTRY_REG0: |
264 | + s->sram_soft_entry_reg0 = val; | ||
61 | + break; | 265 | + break; |
62 | + default: | 266 | + default: |
63 | + g_assert_not_reached(); | 267 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", |
268 | + __func__, (uint32_t)offset); | ||
269 | + break; | ||
64 | + } | 270 | + } |
65 | +} | 271 | +} |
66 | + | 272 | + |
67 | static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) | 273 | +static const MemoryRegionOps allwinner_sramc_ops = { |
68 | { | 274 | + .read = allwinner_sramc_read, |
69 | long offset = neon_element_offset(reg, ele, size); | 275 | + .write = allwinner_sramc_write, |
70 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 276 | + .endianness = DEVICE_NATIVE_ENDIAN, |
71 | int stride; | 277 | + .valid = { |
72 | int size; | 278 | + .min_access_size = 4, |
73 | int reg; | 279 | + .max_access_size = 4, |
74 | - int pass; | 280 | + }, |
75 | int load; | 281 | + .impl.min_access_size = 4, |
76 | - int shift; | 282 | +}; |
77 | int n; | 283 | + |
78 | int vec_size; | 284 | +static const VMStateDescription allwinner_sramc_vmstate = { |
79 | int mmu_idx; | 285 | + .name = "allwinner-sramc", |
80 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 286 | + .version_id = 1, |
81 | } else { | 287 | + .minimum_version_id = 1, |
82 | /* Single element. */ | 288 | + .fields = (VMStateField[]) { |
83 | int idx = (insn >> 4) & 0xf; | 289 | + VMSTATE_UINT32(sram_ver, AwSRAMCState), |
84 | - pass = (insn >> 7) & 1; | 290 | + VMSTATE_UINT32(sram_soft_entry_reg0, AwSRAMCState), |
85 | + int reg_idx; | 291 | + VMSTATE_END_OF_LIST() |
86 | switch (size) { | 292 | + } |
87 | case 0: | 293 | +}; |
88 | - shift = ((insn >> 5) & 3) * 8; | 294 | + |
89 | + reg_idx = (insn >> 5) & 7; | 295 | +static void allwinner_sramc_reset(DeviceState *dev) |
90 | stride = 1; | 296 | +{ |
91 | break; | 297 | + AwSRAMCState *s = AW_SRAMC(dev); |
92 | case 1: | 298 | + AwSRAMCClass *sc = AW_SRAMC_GET_CLASS(s); |
93 | - shift = ((insn >> 6) & 1) * 16; | 299 | + |
94 | + reg_idx = (insn >> 6) & 3; | 300 | + switch (sc->sram_version_code) { |
95 | stride = (insn & (1 << 5)) ? 2 : 1; | 301 | + case SRAM_VERSION_SUN8I_R40: |
96 | break; | 302 | + s->sram_ctl1 = 0x1300; |
97 | case 2: | 303 | + break; |
98 | - shift = 0; | 304 | + } |
99 | + reg_idx = (insn >> 7) & 1; | 305 | +} |
100 | stride = (insn & (1 << 6)) ? 2 : 1; | 306 | + |
101 | break; | 307 | +static void allwinner_sramc_class_init(ObjectClass *klass, void *data) |
102 | default: | 308 | +{ |
103 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 309 | + DeviceClass *dc = DEVICE_CLASS(klass); |
104 | */ | 310 | + |
105 | return 1; | 311 | + dc->reset = allwinner_sramc_reset; |
106 | } | 312 | + dc->vmsd = &allwinner_sramc_vmstate; |
107 | + tmp = tcg_temp_new_i32(); | 313 | +} |
108 | addr = tcg_temp_new_i32(); | 314 | + |
109 | load_reg_var(s, addr, rn); | 315 | +static void allwinner_sramc_init(Object *obj) |
110 | for (reg = 0; reg < nregs; reg++) { | 316 | +{ |
111 | if (load) { | 317 | + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); |
112 | - tmp = tcg_temp_new_i32(); | 318 | + AwSRAMCState *s = AW_SRAMC(obj); |
113 | - switch (size) { | 319 | + |
114 | - case 0: | 320 | + /* Memory mapping */ |
115 | - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); | 321 | + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_sramc_ops, s, |
116 | - break; | 322 | + TYPE_AW_SRAMC, 1 * KiB); |
117 | - case 1: | 323 | + sysbus_init_mmio(sbd, &s->iomem); |
118 | - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); | 324 | +} |
119 | - break; | 325 | + |
120 | - case 2: | 326 | +static const TypeInfo allwinner_sramc_info = { |
121 | - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); | 327 | + .name = TYPE_AW_SRAMC, |
122 | - break; | 328 | + .parent = TYPE_SYS_BUS_DEVICE, |
123 | - default: /* Avoid compiler warnings. */ | 329 | + .instance_init = allwinner_sramc_init, |
124 | - abort(); | 330 | + .instance_size = sizeof(AwSRAMCState), |
125 | - } | 331 | + .class_init = allwinner_sramc_class_init, |
126 | - if (size != 2) { | 332 | +}; |
127 | - tmp2 = neon_load_reg(rd, pass); | 333 | + |
128 | - tcg_gen_deposit_i32(tmp, tmp2, tmp, | 334 | +static void allwinner_r40_sramc_class_init(ObjectClass *klass, void *data) |
129 | - shift, size ? 16 : 8); | 335 | +{ |
130 | - tcg_temp_free_i32(tmp2); | 336 | + AwSRAMCClass *sc = AW_SRAMC_CLASS(klass); |
131 | - } | 337 | + |
132 | - neon_store_reg(rd, pass, tmp); | 338 | + sc->sram_version_code = SRAM_VERSION_SUN8I_R40; |
133 | + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), | 339 | +} |
134 | + s->be_data | size); | 340 | + |
135 | + neon_store_element(rd, reg_idx, size, tmp); | 341 | +static const TypeInfo allwinner_r40_sramc_info = { |
136 | } else { /* Store */ | 342 | + .name = TYPE_AW_SRAMC_SUN8I_R40, |
137 | - tmp = neon_load_reg(rd, pass); | 343 | + .parent = TYPE_AW_SRAMC, |
138 | - if (shift) | 344 | + .class_init = allwinner_r40_sramc_class_init, |
139 | - tcg_gen_shri_i32(tmp, tmp, shift); | 345 | +}; |
140 | - switch (size) { | 346 | + |
141 | - case 0: | 347 | +static void allwinner_sramc_register(void) |
142 | - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); | 348 | +{ |
143 | - break; | 349 | + type_register_static(&allwinner_sramc_info); |
144 | - case 1: | 350 | + type_register_static(&allwinner_r40_sramc_info); |
145 | - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); | 351 | +} |
146 | - break; | 352 | + |
147 | - case 2: | 353 | +type_init(allwinner_sramc_register) |
148 | - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | 354 | diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig |
149 | - break; | 355 | index XXXXXXX..XXXXXXX 100644 |
150 | - } | 356 | --- a/hw/arm/Kconfig |
151 | - tcg_temp_free_i32(tmp); | 357 | +++ b/hw/arm/Kconfig |
152 | + neon_load_element(tmp, rd, reg_idx, size); | 358 | @@ -XXX,XX +XXX,XX @@ config ALLWINNER_H3 |
153 | + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), | 359 | config ALLWINNER_R40 |
154 | + s->be_data | size); | 360 | bool |
155 | } | 361 | default y if TCG && ARM |
156 | rd += stride; | 362 | + select ALLWINNER_SRAMC |
157 | tcg_gen_addi_i32(addr, addr, 1 << size); | 363 | select ALLWINNER_A10_PIT |
158 | } | 364 | select AXP2XX_PMU |
159 | tcg_temp_free_i32(addr); | 365 | select SERIAL |
160 | + tcg_temp_free_i32(tmp); | 366 | diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig |
161 | stride = nregs * (1 << size); | 367 | index XXXXXXX..XXXXXXX 100644 |
162 | } | 368 | --- a/hw/misc/Kconfig |
163 | } | 369 | +++ b/hw/misc/Kconfig |
370 | @@ -XXX,XX +XXX,XX @@ config VIRT_CTRL | ||
371 | config LASI | ||
372 | bool | ||
373 | |||
374 | +config ALLWINNER_SRAMC | ||
375 | + bool | ||
376 | + | ||
377 | config ALLWINNER_A10_CCM | ||
378 | bool | ||
379 | |||
380 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
381 | index XXXXXXX..XXXXXXX 100644 | ||
382 | --- a/hw/misc/meson.build | ||
383 | +++ b/hw/misc/meson.build | ||
384 | @@ -XXX,XX +XXX,XX @@ subdir('macio') | ||
385 | |||
386 | softmmu_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c')) | ||
387 | |||
388 | +softmmu_ss.add(when: 'CONFIG_ALLWINNER_SRAMC', if_true: files('allwinner-sramc.c')) | ||
389 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_CCM', if_true: files('allwinner-a10-ccm.c')) | ||
390 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_DRAMC', if_true: files('allwinner-a10-dramc.c')) | ||
391 | softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-ccu.c')) | ||
392 | diff --git a/hw/misc/trace-events b/hw/misc/trace-events | ||
393 | index XXXXXXX..XXXXXXX 100644 | ||
394 | --- a/hw/misc/trace-events | ||
395 | +++ b/hw/misc/trace-events | ||
396 | @@ -XXX,XX +XXX,XX @@ allwinner_r40_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "writ | ||
397 | allwinner_sid_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
398 | allwinner_sid_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 | ||
399 | |||
400 | +# allwinner-sramc.c | ||
401 | +allwinner_sramc_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 | ||
402 | +allwinner_sramc_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 | ||
403 | + | ||
404 | # avr_power.c | ||
405 | avr_power_read(uint8_t value) "power_reduc read value:%u" | ||
406 | avr_power_write(uint8_t value) "power_reduc write value:%u" | ||
164 | -- | 407 | -- |
165 | 2.19.1 | 408 | 2.34.1 |
166 | |||
167 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | For a sequence of loads or stores from a single register, | 3 | Add test case for booting from initrd and sd card. |
4 | little-endian operations can be promoted to an 8-byte op. | ||
5 | This can reduce the number of operations by a factor of 8. | ||
6 | 4 | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
8 | Message-id: 20181011205206.3552-20-richard.henderson@linaro.org | 6 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 7 | Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 9 | --- |
13 | target/arm/translate.c | 10 ++++++++++ | 10 | tests/avocado/boot_linux_console.py | 176 ++++++++++++++++++++++++++++ |
14 | 1 file changed, 10 insertions(+) | 11 | 1 file changed, 176 insertions(+) |
15 | 12 | ||
16 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 13 | diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/translate.c | 15 | --- a/tests/avocado/boot_linux_console.py |
19 | +++ b/target/arm/translate.c | 16 | +++ b/tests/avocado/boot_linux_console.py |
20 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 17 | @@ -XXX,XX +XXX,XX @@ def test_arm_quanta_gsj_initrd(self): |
21 | if (size == 3 && (interleave | spacing) != 1) { | 18 | self.wait_for_console_pattern( |
22 | return 1; | 19 | 'Give root password for system maintenance') |
23 | } | 20 | |
24 | + /* For our purposes, bytes are always little-endian. */ | 21 | + def test_arm_bpim2u(self): |
25 | + if (size == 0) { | 22 | + """ |
26 | + endian = MO_LE; | 23 | + :avocado: tags=arch:arm |
27 | + } | 24 | + :avocado: tags=machine:bpim2u |
28 | + /* Consecutive little-endian elements from a single register | 25 | + :avocado: tags=accel:tcg |
29 | + * can be promoted to a larger little-endian operation. | 26 | + """ |
30 | + */ | 27 | + deb_url = ('https://apt.armbian.com/pool/main/l/linux-5.10.16-sunxi/' |
31 | + if (interleave == 1 && endian == MO_LE) { | 28 | + 'linux-image-current-sunxi_21.02.2_armhf.deb') |
32 | + size = 3; | 29 | + deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' |
33 | + } | 30 | + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) |
34 | tmp64 = tcg_temp_new_i64(); | 31 | + kernel_path = self.extract_from_deb(deb_path, |
35 | addr = tcg_temp_new_i32(); | 32 | + '/boot/vmlinuz-5.10.16-sunxi') |
36 | tmp2 = tcg_const_i32(1 << size); | 33 | + dtb_path = ('/usr/lib/linux-image-current-sunxi/' |
34 | + 'sun8i-r40-bananapi-m2-ultra.dtb') | ||
35 | + dtb_path = self.extract_from_deb(deb_path, dtb_path) | ||
36 | + | ||
37 | + self.vm.set_console() | ||
38 | + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + | ||
39 | + 'console=ttyS0,115200n8 ' | ||
40 | + 'earlycon=uart,mmio32,0x1c28000') | ||
41 | + self.vm.add_args('-kernel', kernel_path, | ||
42 | + '-dtb', dtb_path, | ||
43 | + '-append', kernel_command_line) | ||
44 | + self.vm.launch() | ||
45 | + console_pattern = 'Kernel command line: %s' % kernel_command_line | ||
46 | + self.wait_for_console_pattern(console_pattern) | ||
47 | + | ||
48 | + def test_arm_bpim2u_initrd(self): | ||
49 | + """ | ||
50 | + :avocado: tags=arch:arm | ||
51 | + :avocado: tags=accel:tcg | ||
52 | + :avocado: tags=machine:bpim2u | ||
53 | + """ | ||
54 | + deb_url = ('https://apt.armbian.com/pool/main/l/linux-5.10.16-sunxi/' | ||
55 | + 'linux-image-current-sunxi_21.02.2_armhf.deb') | ||
56 | + deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' | ||
57 | + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) | ||
58 | + kernel_path = self.extract_from_deb(deb_path, | ||
59 | + '/boot/vmlinuz-5.10.16-sunxi') | ||
60 | + dtb_path = ('/usr/lib/linux-image-current-sunxi/' | ||
61 | + 'sun8i-r40-bananapi-m2-ultra.dtb') | ||
62 | + dtb_path = self.extract_from_deb(deb_path, dtb_path) | ||
63 | + initrd_url = ('https://github.com/groeck/linux-build-test/raw/' | ||
64 | + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' | ||
65 | + 'arm/rootfs-armv7a.cpio.gz') | ||
66 | + initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' | ||
67 | + initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) | ||
68 | + initrd_path = os.path.join(self.workdir, 'rootfs.cpio') | ||
69 | + archive.gzip_uncompress(initrd_path_gz, initrd_path) | ||
70 | + | ||
71 | + self.vm.set_console() | ||
72 | + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + | ||
73 | + 'console=ttyS0,115200 ' | ||
74 | + 'panic=-1 noreboot') | ||
75 | + self.vm.add_args('-kernel', kernel_path, | ||
76 | + '-dtb', dtb_path, | ||
77 | + '-initrd', initrd_path, | ||
78 | + '-append', kernel_command_line, | ||
79 | + '-no-reboot') | ||
80 | + self.vm.launch() | ||
81 | + self.wait_for_console_pattern('Boot successful.') | ||
82 | + | ||
83 | + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', | ||
84 | + 'Allwinner sun8i Family') | ||
85 | + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', | ||
86 | + 'system-control@1c00000') | ||
87 | + exec_command_and_wait_for_pattern(self, 'reboot', | ||
88 | + 'reboot: Restarting system') | ||
89 | + # Wait for VM to shut down gracefully | ||
90 | + self.vm.wait() | ||
91 | + | ||
92 | + def test_arm_bpim2u_gmac(self): | ||
93 | + """ | ||
94 | + :avocado: tags=arch:arm | ||
95 | + :avocado: tags=accel:tcg | ||
96 | + :avocado: tags=machine:bpim2u | ||
97 | + :avocado: tags=device:sd | ||
98 | + """ | ||
99 | + self.require_netdev('user') | ||
100 | + | ||
101 | + deb_url = ('https://apt.armbian.com/pool/main/l/linux-5.10.16-sunxi/' | ||
102 | + 'linux-image-current-sunxi_21.02.2_armhf.deb') | ||
103 | + deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' | ||
104 | + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) | ||
105 | + kernel_path = self.extract_from_deb(deb_path, | ||
106 | + '/boot/vmlinuz-5.10.16-sunxi') | ||
107 | + dtb_path = ('/usr/lib/linux-image-current-sunxi/' | ||
108 | + 'sun8i-r40-bananapi-m2-ultra.dtb') | ||
109 | + dtb_path = self.extract_from_deb(deb_path, dtb_path) | ||
110 | + rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' | ||
111 | + 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') | ||
112 | + rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' | ||
113 | + rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) | ||
114 | + rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') | ||
115 | + archive.lzma_uncompress(rootfs_path_xz, rootfs_path) | ||
116 | + image_pow2ceil_expand(rootfs_path) | ||
117 | + | ||
118 | + self.vm.set_console() | ||
119 | + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + | ||
120 | + 'console=ttyS0,115200 ' | ||
121 | + 'root=/dev/mmcblk0 rootwait rw ' | ||
122 | + 'panic=-1 noreboot') | ||
123 | + self.vm.add_args('-kernel', kernel_path, | ||
124 | + '-dtb', dtb_path, | ||
125 | + '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', | ||
126 | + '-net', 'nic,model=gmac,netdev=host_gmac', | ||
127 | + '-netdev', 'user,id=host_gmac', | ||
128 | + '-append', kernel_command_line, | ||
129 | + '-no-reboot') | ||
130 | + self.vm.launch() | ||
131 | + shell_ready = "/bin/sh: can't access tty; job control turned off" | ||
132 | + self.wait_for_console_pattern(shell_ready) | ||
133 | + | ||
134 | + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', | ||
135 | + 'Allwinner sun8i Family') | ||
136 | + exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', | ||
137 | + 'mmcblk0') | ||
138 | + exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', | ||
139 | + 'eth0: Link is Up') | ||
140 | + exec_command_and_wait_for_pattern(self, 'udhcpc eth0', | ||
141 | + 'udhcpc: lease of 10.0.2.15 obtained') | ||
142 | + exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', | ||
143 | + '3 packets transmitted, 3 packets received, 0% packet loss') | ||
144 | + exec_command_and_wait_for_pattern(self, 'reboot', | ||
145 | + 'reboot: Restarting system') | ||
146 | + # Wait for VM to shut down gracefully | ||
147 | + self.vm.wait() | ||
148 | + | ||
149 | + @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') | ||
150 | + def test_arm_bpim2u_openwrt_22_03_3(self): | ||
151 | + """ | ||
152 | + :avocado: tags=arch:arm | ||
153 | + :avocado: tags=machine:bpim2u | ||
154 | + :avocado: tags=device:sd | ||
155 | + """ | ||
156 | + | ||
157 | + # This test download a 8.9 MiB compressed image and expand it | ||
158 | + # to 127 MiB. | ||
159 | + image_url = ('https://downloads.openwrt.org/releases/22.03.3/targets/' | ||
160 | + 'sunxi/cortexa7/openwrt-22.03.3-sunxi-cortexa7-' | ||
161 | + 'sinovoip_bananapi-m2-ultra-ext4-sdcard.img.gz') | ||
162 | + image_hash = ('5b41b4e11423e562c6011640f9a7cd3b' | ||
163 | + 'dd0a3d42b83430f7caa70a432e6cd82c') | ||
164 | + image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash, | ||
165 | + algorithm='sha256') | ||
166 | + image_path = archive.extract(image_path_gz, self.workdir) | ||
167 | + image_pow2ceil_expand(image_path) | ||
168 | + | ||
169 | + self.vm.set_console() | ||
170 | + self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', | ||
171 | + '-nic', 'user', | ||
172 | + '-no-reboot') | ||
173 | + self.vm.launch() | ||
174 | + | ||
175 | + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + | ||
176 | + 'usbcore.nousb ' | ||
177 | + 'noreboot') | ||
178 | + | ||
179 | + self.wait_for_console_pattern('U-Boot SPL') | ||
180 | + | ||
181 | + interrupt_interactive_console_until_pattern( | ||
182 | + self, 'Hit any key to stop autoboot:', '=>') | ||
183 | + exec_command_and_wait_for_pattern(self, "setenv extraargs '" + | ||
184 | + kernel_command_line + "'", '=>') | ||
185 | + exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); | ||
186 | + | ||
187 | + self.wait_for_console_pattern( | ||
188 | + 'Please press Enter to activate this console.') | ||
189 | + | ||
190 | + exec_command_and_wait_for_pattern(self, ' ', 'root@') | ||
191 | + | ||
192 | + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', | ||
193 | + 'Allwinner sun8i Family') | ||
194 | + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', | ||
195 | + 'system-control@1c00000') | ||
196 | + | ||
197 | def test_arm_orangepi(self): | ||
198 | """ | ||
199 | :avocado: tags=arch:arm | ||
37 | -- | 200 | -- |
38 | 2.19.1 | 201 | 2.34.1 |
39 | |||
40 | diff view generated by jsdifflib |
1 | From: Markus Armbruster <armbru@redhat.com> | 1 | From: qianfan Zhao <qianfanguijin@163.com> |
---|---|---|---|
2 | 2 | ||
3 | Device models aren't supposed to go on fishing expeditions for | 3 | Add documents for Banana Pi M2U |
4 | backends. They should expose suitable properties for the user to set. | ||
5 | For onboard devices, board code sets them. | ||
6 | 4 | ||
7 | Device ssi-sd picks up its block backend in its init() method with | 5 | Signed-off-by: qianfan Zhao <qianfanguijin@163.com> |
8 | drive_get_next() instead. This mistake is already marked FIXME since | 6 | Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com> |
9 | commit af9e40a. | 7 | [PMM: Minor format fixes to correct sphinx errors] |
10 | |||
11 | Unset user_creatable to remove the mistake from our external | ||
12 | interface. Since the SSI bus doesn't support hotplug, only -device | ||
13 | can be affected. Only certain ARM machines have ssi-sd and provide an | ||
14 | SSI bus for it; this patch breaks -device ssi-sd for these machines. | ||
15 | No actual use of -device ssi-sd is known. | ||
16 | |||
17 | Signed-off-by: Markus Armbruster <armbru@redhat.com> | ||
18 | Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
19 | Acked-by: Thomas Huth <thuth@redhat.com> | ||
20 | Message-id: 20181009060835.4608-1-armbru@redhat.com | ||
21 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
22 | --- | 9 | --- |
23 | hw/sd/ssi-sd.c | 2 ++ | 10 | docs/system/arm/bananapi_m2u.rst | 139 +++++++++++++++++++++++++++++++ |
24 | 1 file changed, 2 insertions(+) | 11 | docs/system/target-arm.rst | 1 + |
12 | 2 files changed, 140 insertions(+) | ||
13 | create mode 100644 docs/system/arm/bananapi_m2u.rst | ||
25 | 14 | ||
26 | diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c | 15 | diff --git a/docs/system/arm/bananapi_m2u.rst b/docs/system/arm/bananapi_m2u.rst |
16 | new file mode 100644 | ||
17 | index XXXXXXX..XXXXXXX | ||
18 | --- /dev/null | ||
19 | +++ b/docs/system/arm/bananapi_m2u.rst | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | +Banana Pi BPI-M2U (``bpim2u``) | ||
22 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ||
23 | + | ||
24 | +Banana Pi BPI-M2 Ultra is a quad-core mini single board computer built with | ||
25 | +Allwinner A40i/R40/V40 SoC. It features 2GB of RAM and 8GB eMMC. It also | ||
26 | +has onboard WiFi and BT. On the ports side, the BPI-M2 Ultra has 2 USB A | ||
27 | +2.0 ports, 1 USB OTG port, 1 HDMI port, 1 audio jack, a DC power port, | ||
28 | +and last but not least, a SATA port. | ||
29 | + | ||
30 | +Supported devices | ||
31 | +""""""""""""""""" | ||
32 | + | ||
33 | +The Banana Pi M2U machine supports the following devices: | ||
34 | + | ||
35 | + * SMP (Quad Core Cortex-A7) | ||
36 | + * Generic Interrupt Controller configuration | ||
37 | + * SRAM mappings | ||
38 | + * SDRAM controller | ||
39 | + * Timer device (re-used from Allwinner A10) | ||
40 | + * UART | ||
41 | + * SD/MMC storage controller | ||
42 | + * EMAC ethernet | ||
43 | + * GMAC ethernet | ||
44 | + * Clock Control Unit | ||
45 | + * TWI (I2C) | ||
46 | + | ||
47 | +Limitations | ||
48 | +""""""""""" | ||
49 | + | ||
50 | +Currently, Banana Pi M2U does *not* support the following features: | ||
51 | + | ||
52 | +- Graphical output via HDMI, GPU and/or the Display Engine | ||
53 | +- Audio output | ||
54 | +- Hardware Watchdog | ||
55 | +- Real Time Clock | ||
56 | +- USB 2.0 interfaces | ||
57 | + | ||
58 | +Also see the 'unimplemented' array in the Allwinner R40 SoC module | ||
59 | +for a complete list of unimplemented I/O devices: ``./hw/arm/allwinner-r40.c`` | ||
60 | + | ||
61 | +Boot options | ||
62 | +"""""""""""" | ||
63 | + | ||
64 | +The Banana Pi M2U machine can start using the standard -kernel functionality | ||
65 | +for loading a Linux kernel or ELF executable. Additionally, the Banana Pi M2U | ||
66 | +machine can also emulate the BootROM which is present on an actual Allwinner R40 | ||
67 | +based SoC, which loads the bootloader from a SD card, specified via the -sd | ||
68 | +argument to qemu-system-arm. | ||
69 | + | ||
70 | +Running mainline Linux | ||
71 | +"""""""""""""""""""""" | ||
72 | + | ||
73 | +To build a Linux mainline kernel that can be booted by the Banana Pi M2U machine, | ||
74 | +simply configure the kernel using the sunxi_defconfig configuration: | ||
75 | + | ||
76 | +.. code-block:: bash | ||
77 | + | ||
78 | + $ ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- make mrproper | ||
79 | + $ ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- make sunxi_defconfig | ||
80 | + | ||
81 | +To boot the newly build linux kernel in QEMU with the Banana Pi M2U machine, use: | ||
82 | + | ||
83 | +.. code-block:: bash | ||
84 | + | ||
85 | + $ qemu-system-arm -M bpim2u -nographic \ | ||
86 | + -kernel /path/to/linux/arch/arm/boot/zImage \ | ||
87 | + -append 'console=ttyS0,115200' \ | ||
88 | + -dtb /path/to/linux/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dtb | ||
89 | + | ||
90 | +Banana Pi M2U images | ||
91 | +"""""""""""""""""""" | ||
92 | + | ||
93 | +Note that the mainline kernel does not have a root filesystem. You can choose | ||
94 | +to build you own image with buildroot using the bananapi_m2_ultra_defconfig. | ||
95 | +Also see https://buildroot.org for more information. | ||
96 | + | ||
97 | +Another possibility is to run an OpenWrt image for Banana Pi M2U which | ||
98 | +can be downloaded from: | ||
99 | + | ||
100 | + https://downloads.openwrt.org/releases/22.03.3/targets/sunxi/cortexa7/ | ||
101 | + | ||
102 | +When using an image as an SD card, it must be resized to a power of two. This can be | ||
103 | +done with the ``qemu-img`` command. It is recommended to only increase the image size | ||
104 | +instead of shrinking it to a power of two, to avoid loss of data. For example, | ||
105 | +to prepare a downloaded Armbian image, first extract it and then increase | ||
106 | +its size to one gigabyte as follows: | ||
107 | + | ||
108 | +.. code-block:: bash | ||
109 | + | ||
110 | + $ qemu-img resize \ | ||
111 | + openwrt-22.03.3-sunxi-cortexa7-sinovoip_bananapi-m2-ultra-ext4-sdcard.img \ | ||
112 | + 1G | ||
113 | + | ||
114 | +Instead of providing a custom Linux kernel via the -kernel command you may also | ||
115 | +choose to let the Banana Pi M2U machine load the bootloader from SD card, just like | ||
116 | +a real board would do using the BootROM. Simply pass the selected image via the -sd | ||
117 | +argument and remove the -kernel, -append, -dbt and -initrd arguments: | ||
118 | + | ||
119 | +.. code-block:: bash | ||
120 | + | ||
121 | + $ qemu-system-arm -M bpim2u -nic user -nographic \ | ||
122 | + -sd openwrt-22.03.3-sunxi-cortexa7-sinovoip_bananapi-m2-ultra-ext4-sdcard.img | ||
123 | + | ||
124 | +Running U-Boot | ||
125 | +"""""""""""""" | ||
126 | + | ||
127 | +U-Boot mainline can be build and configured using the Bananapi_M2_Ultra_defconfig | ||
128 | +using similar commands as describe above for Linux. Note that it is recommended | ||
129 | +for development/testing to select the following configuration setting in U-Boot: | ||
130 | + | ||
131 | + Device Tree Control > Provider for DTB for DT Control > Embedded DTB | ||
132 | + | ||
133 | +The BootROM of allwinner R40 loading u-boot from the 8KiB offset of sdcard. | ||
134 | +Let's create an bootable disk image: | ||
135 | + | ||
136 | +.. code-block:: bash | ||
137 | + | ||
138 | + $ dd if=/dev/zero of=sd.img bs=32M count=1 | ||
139 | + $ dd if=u-boot-sunxi-with-spl.bin of=sd.img bs=1k seek=8 conv=notrunc | ||
140 | + | ||
141 | +And then boot it. | ||
142 | + | ||
143 | +.. code-block:: bash | ||
144 | + | ||
145 | + $ qemu-system-arm -M bpim2u -nographic -sd sd.img | ||
146 | + | ||
147 | +Banana Pi M2U integration tests | ||
148 | +""""""""""""""""""""""""""""""" | ||
149 | + | ||
150 | +The Banana Pi M2U machine has several integration tests included. | ||
151 | +To run the whole set of tests, build QEMU from source and simply | ||
152 | +provide the following command: | ||
153 | + | ||
154 | +.. code-block:: bash | ||
155 | + | ||
156 | + $ cd qemu-build-dir | ||
157 | + $ AVOCADO_ALLOW_LARGE_STORAGE=yes tests/venv/bin/avocado \ | ||
158 | + --verbose --show=app,console run -t machine:bpim2u \ | ||
159 | + ../tests/avocado/boot_linux_console.py | ||
160 | diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst | ||
27 | index XXXXXXX..XXXXXXX 100644 | 161 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/hw/sd/ssi-sd.c | 162 | --- a/docs/system/target-arm.rst |
29 | +++ b/hw/sd/ssi-sd.c | 163 | +++ b/docs/system/target-arm.rst |
30 | @@ -XXX,XX +XXX,XX @@ static void ssi_sd_class_init(ObjectClass *klass, void *data) | 164 | @@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running |
31 | k->cs_polarity = SSI_CS_LOW; | 165 | arm/versatile |
32 | dc->vmsd = &vmstate_ssi_sd; | 166 | arm/vexpress |
33 | dc->reset = ssi_sd_reset; | 167 | arm/aspeed |
34 | + /* Reason: init() method uses drive_get_next() */ | 168 | + arm/bananapi_m2u.rst |
35 | + dc->user_creatable = false; | 169 | arm/sabrelite |
36 | } | 170 | arm/digic |
37 | 171 | arm/cubieboard | |
38 | static const TypeInfo ssi_sd_info = { | ||
39 | -- | 172 | -- |
40 | 2.19.1 | 173 | 2.34.1 |
41 | |||
42 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Both arm and thumb2 division are controlled by the same ISAR field, | 3 | Document the meaning of exclusive_high in a big-endian context, |
4 | which takes care of the arm implies thumb case. Having M imply | 4 | and why we can't change it now. |
5 | thumb2 division was wrong for cortex-m0, which is v6m and does not | ||
6 | have thumb2 at all, much less thumb2 division. | ||
7 | 5 | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20181016223115.24100-5-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-2-richard.henderson@linaro.org |
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 10 | --- |
14 | target/arm/cpu.h | 12 ++++++++++-- | 11 | target/arm/cpu.h | 8 ++++++++ |
15 | linux-user/elfload.c | 4 ++-- | 12 | 1 file changed, 8 insertions(+) |
16 | target/arm/cpu.c | 10 +--------- | ||
17 | target/arm/translate.c | 4 ++-- | ||
18 | 4 files changed, 15 insertions(+), 15 deletions(-) | ||
19 | 13 | ||
20 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 14 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
21 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/target/arm/cpu.h | 16 | --- a/target/arm/cpu.h |
23 | +++ b/target/arm/cpu.h | 17 | +++ b/target/arm/cpu.h |
24 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 18 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState { |
25 | ARM_FEATURE_VFP3, | 19 | uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ |
26 | ARM_FEATURE_VFP_FP16, | 20 | uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ |
27 | ARM_FEATURE_NEON, | 21 | } vfp; |
28 | - ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ | ||
29 | ARM_FEATURE_M, /* Microcontroller profile. */ | ||
30 | ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ | ||
31 | ARM_FEATURE_THUMB2EE, | ||
32 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | ||
33 | ARM_FEATURE_V5, | ||
34 | ARM_FEATURE_STRONGARM, | ||
35 | ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ | ||
36 | - ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ | ||
37 | ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ | ||
38 | ARM_FEATURE_GENERIC_TIMER, | ||
39 | ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ | ||
40 | @@ -XXX,XX +XXX,XX @@ extern const uint64_t pred_esz_masks[4]; | ||
41 | /* | ||
42 | * 32-bit feature tests via id registers. | ||
43 | */ | ||
44 | +static inline bool isar_feature_thumb_div(const ARMISARegisters *id) | ||
45 | +{ | ||
46 | + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; | ||
47 | +} | ||
48 | + | 22 | + |
49 | +static inline bool isar_feature_arm_div(const ARMISARegisters *id) | 23 | uint64_t exclusive_addr; |
50 | +{ | 24 | uint64_t exclusive_val; |
51 | + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; | 25 | + /* |
52 | +} | 26 | + * Contains the 'val' for the second 64-bit register of LDXP, which comes |
53 | + | 27 | + * from the higher address, not the high part of a complete 128-bit value. |
54 | static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) | 28 | + * In some ways it might be more convenient to record the exclusive value |
55 | { | 29 | + * as the low and high halves of a 128 bit data value, but the current |
56 | return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; | 30 | + * semantics of these fields are baked into the migration format. |
57 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | 31 | + */ |
58 | index XXXXXXX..XXXXXXX 100644 | 32 | uint64_t exclusive_high; |
59 | --- a/linux-user/elfload.c | 33 | |
60 | +++ b/linux-user/elfload.c | 34 | /* iwMMXt coprocessor state. */ |
61 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | ||
62 | GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); | ||
63 | GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); | ||
64 | GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); | ||
65 | - GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA); | ||
66 | - GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT); | ||
67 | + GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA); | ||
68 | + GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT); | ||
69 | /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. | ||
70 | * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of | ||
71 | * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated | ||
72 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
73 | index XXXXXXX..XXXXXXX 100644 | ||
74 | --- a/target/arm/cpu.c | ||
75 | +++ b/target/arm/cpu.c | ||
76 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
77 | * Presence of EL2 itself is ARM_FEATURE_EL2, and of the | ||
78 | * Security Extensions is ARM_FEATURE_EL3. | ||
79 | */ | ||
80 | - set_feature(env, ARM_FEATURE_ARM_DIV); | ||
81 | + assert(cpu_isar_feature(arm_div, cpu)); | ||
82 | set_feature(env, ARM_FEATURE_LPAE); | ||
83 | set_feature(env, ARM_FEATURE_V7); | ||
84 | } | ||
85 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
86 | if (arm_feature(env, ARM_FEATURE_V5)) { | ||
87 | set_feature(env, ARM_FEATURE_V4T); | ||
88 | } | ||
89 | - if (arm_feature(env, ARM_FEATURE_M)) { | ||
90 | - set_feature(env, ARM_FEATURE_THUMB_DIV); | ||
91 | - } | ||
92 | - if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { | ||
93 | - set_feature(env, ARM_FEATURE_THUMB_DIV); | ||
94 | - } | ||
95 | if (arm_feature(env, ARM_FEATURE_VFP4)) { | ||
96 | set_feature(env, ARM_FEATURE_VFP3); | ||
97 | set_feature(env, ARM_FEATURE_VFP_FP16); | ||
98 | @@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj) | ||
99 | ARMCPU *cpu = ARM_CPU(obj); | ||
100 | |||
101 | set_feature(&cpu->env, ARM_FEATURE_V7); | ||
102 | - set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV); | ||
103 | - set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); | ||
104 | set_feature(&cpu->env, ARM_FEATURE_V7MP); | ||
105 | set_feature(&cpu->env, ARM_FEATURE_PMSA); | ||
106 | cpu->midr = 0x411fc153; /* r1p3 */ | ||
107 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/target/arm/translate.c | ||
110 | +++ b/target/arm/translate.c | ||
111 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | ||
112 | case 1: | ||
113 | case 3: | ||
114 | /* SDIV, UDIV */ | ||
115 | - if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) { | ||
116 | + if (!dc_isar_feature(arm_div, s)) { | ||
117 | goto illegal_op; | ||
118 | } | ||
119 | if (((insn >> 5) & 7) || (rd != 15)) { | ||
120 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) | ||
121 | tmp2 = load_reg(s, rm); | ||
122 | if ((op & 0x50) == 0x10) { | ||
123 | /* sdiv, udiv */ | ||
124 | - if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) { | ||
125 | + if (!dc_isar_feature(thumb_div, s)) { | ||
126 | goto illegal_op; | ||
127 | } | ||
128 | if (op & 0x20) | ||
129 | -- | 35 | -- |
130 | 2.19.1 | 36 | 2.34.1 |
131 | |||
132 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20181016223115.24100-7-richard.henderson@linaro.org | 6 | Message-id: 20230530191438.411344-3-richard.henderson@linaro.org |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | --- | 8 | --- |
9 | target/arm/cpu.h | 6 +++++- | 9 | target/arm/cpu.h | 5 +++++ |
10 | linux-user/elfload.c | 2 +- | 10 | 1 file changed, 5 insertions(+) |
11 | target/arm/cpu.c | 4 ---- | ||
12 | target/arm/helper.c | 2 +- | ||
13 | target/arm/machine.c | 3 +-- | ||
14 | 5 files changed, 8 insertions(+), 9 deletions(-) | ||
15 | 11 | ||
16 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 12 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
17 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/cpu.h | 14 | --- a/target/arm/cpu.h |
19 | +++ b/target/arm/cpu.h | 15 | +++ b/target/arm/cpu.h |
20 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 16 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_st(const ARMISARegisters *id) |
21 | ARM_FEATURE_NEON, | 17 | return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; |
22 | ARM_FEATURE_M, /* Microcontroller profile. */ | ||
23 | ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ | ||
24 | - ARM_FEATURE_THUMB2EE, | ||
25 | ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ | ||
26 | ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ | ||
27 | ARM_FEATURE_V4T, | ||
28 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_jazelle(const ARMISARegisters *id) | ||
29 | return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; | ||
30 | } | 18 | } |
31 | 19 | ||
32 | +static inline bool isar_feature_t32ee(const ARMISARegisters *id) | 20 | +static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id) |
33 | +{ | 21 | +{ |
34 | + return FIELD_EX32(id->id_isar3, ID_ISAR3, T32EE) != 0; | 22 | + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0; |
35 | +} | 23 | +} |
36 | + | 24 | + |
37 | static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) | 25 | static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id) |
38 | { | 26 | { |
39 | return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; | 27 | return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0; |
40 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/linux-user/elfload.c | ||
43 | +++ b/linux-user/elfload.c | ||
44 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | ||
45 | GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); | ||
46 | GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); | ||
47 | GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); | ||
48 | - GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); | ||
49 | + GET_FEATURE_ID(t32ee, ARM_HWCAP_ARM_THUMBEE); | ||
50 | GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); | ||
51 | GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); | ||
52 | GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); | ||
53 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/target/arm/cpu.c | ||
56 | +++ b/target/arm/cpu.c | ||
57 | @@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj) | ||
58 | set_feature(&cpu->env, ARM_FEATURE_V7); | ||
59 | set_feature(&cpu->env, ARM_FEATURE_VFP3); | ||
60 | set_feature(&cpu->env, ARM_FEATURE_NEON); | ||
61 | - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); | ||
62 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | ||
63 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
64 | cpu->midr = 0x410fc080; | ||
65 | @@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj) | ||
66 | set_feature(&cpu->env, ARM_FEATURE_VFP3); | ||
67 | set_feature(&cpu->env, ARM_FEATURE_VFP_FP16); | ||
68 | set_feature(&cpu->env, ARM_FEATURE_NEON); | ||
69 | - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); | ||
70 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
71 | /* Note that A9 supports the MP extensions even for | ||
72 | * A9UP and single-core A9MP (which are both different | ||
73 | @@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj) | ||
74 | set_feature(&cpu->env, ARM_FEATURE_V7VE); | ||
75 | set_feature(&cpu->env, ARM_FEATURE_VFP4); | ||
76 | set_feature(&cpu->env, ARM_FEATURE_NEON); | ||
77 | - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); | ||
78 | set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); | ||
79 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | ||
80 | set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); | ||
81 | @@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj) | ||
82 | set_feature(&cpu->env, ARM_FEATURE_V7VE); | ||
83 | set_feature(&cpu->env, ARM_FEATURE_VFP4); | ||
84 | set_feature(&cpu->env, ARM_FEATURE_NEON); | ||
85 | - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); | ||
86 | set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); | ||
87 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | ||
88 | set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); | ||
89 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
90 | index XXXXXXX..XXXXXXX 100644 | ||
91 | --- a/target/arm/helper.c | ||
92 | +++ b/target/arm/helper.c | ||
93 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | ||
94 | define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); | ||
95 | define_arm_cp_regs(cpu, vmsa_cp_reginfo); | ||
96 | } | ||
97 | - if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { | ||
98 | + if (cpu_isar_feature(t32ee, cpu)) { | ||
99 | define_arm_cp_regs(cpu, t2ee_cp_reginfo); | ||
100 | } | ||
101 | if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { | ||
102 | diff --git a/target/arm/machine.c b/target/arm/machine.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/target/arm/machine.c | ||
105 | +++ b/target/arm/machine.c | ||
106 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m = { | ||
107 | static bool thumb2ee_needed(void *opaque) | ||
108 | { | ||
109 | ARMCPU *cpu = opaque; | ||
110 | - CPUARMState *env = &cpu->env; | ||
111 | |||
112 | - return arm_feature(env, ARM_FEATURE_THUMB2EE); | ||
113 | + return cpu_isar_feature(t32ee, cpu); | ||
114 | } | ||
115 | |||
116 | static const VMStateDescription vmstate_thumb2ee = { | ||
117 | -- | 28 | -- |
118 | 2.19.1 | 29 | 2.34.1 |
119 | 30 | ||
120 | 31 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Move expanders for VBSL, VBIT, and VBIF from translate-a64.c. | 3 | Let finalize_memop_atom be the new basic function, with |
4 | finalize_memop and finalize_memop_pair testing FEAT_LSE2 | ||
5 | to apply the appropriate atomicity. | ||
4 | 6 | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20181011205206.3552-9-richard.henderson@linaro.org | 10 | Message-id: 20230530191438.411344-4-richard.henderson@linaro.org |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 12 | --- |
10 | target/arm/translate.h | 6 ++ | 13 | target/arm/tcg/translate.h | 39 +++++++++++++++++++++++++++++----- |
11 | target/arm/translate-a64.c | 61 -------------- | 14 | target/arm/tcg/translate-a64.c | 2 ++ |
12 | target/arm/translate.c | 162 +++++++++++++++++++++++++++---------- | 15 | target/arm/tcg/translate.c | 1 + |
13 | 3 files changed, 124 insertions(+), 105 deletions(-) | 16 | 3 files changed, 37 insertions(+), 5 deletions(-) |
14 | 17 | ||
15 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 18 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate.h | 20 | --- a/target/arm/tcg/translate.h |
18 | +++ b/target/arm/translate.h | 21 | +++ b/target/arm/tcg/translate.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void) | 22 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { |
20 | return ret; | 23 | uint64_t features; /* CPU features bits */ |
24 | bool aarch64; | ||
25 | bool thumb; | ||
26 | + bool lse2; | ||
27 | /* Because unallocated encodings generate different exception syndrome | ||
28 | * information from traps due to FP being disabled, we can't do a single | ||
29 | * "is fp access disabled" check at a high level in the decode tree. | ||
30 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) | ||
21 | } | 31 | } |
22 | 32 | ||
23 | + | 33 | /** |
24 | +/* Vector operations shared between ARM and AArch64. */ | 34 | - * finalize_memop: |
25 | +extern const GVecGen3 bsl_op; | 35 | + * finalize_memop_atom: |
26 | +extern const GVecGen3 bit_op; | 36 | * @s: DisasContext |
27 | +extern const GVecGen3 bif_op; | 37 | * @opc: size+sign+align of the memory operation |
28 | + | 38 | + * @atom: atomicity of the memory operation |
29 | /* | 39 | * |
30 | * Forward to the isar_feature_* tests given a DisasContext pointer. | 40 | - * Build the complete MemOp for a memory operation, including alignment |
41 | - * and endianness. | ||
42 | + * Build the complete MemOp for a memory operation, including alignment, | ||
43 | + * endianness, and atomicity. | ||
44 | * | ||
45 | * If (op & MO_AMASK) then the operation already contains the required | ||
46 | * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally | ||
47 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) | ||
48 | * and this is applied here. Note that there is no way to indicate that | ||
49 | * no alignment should ever be enforced; this must be handled manually. | ||
31 | */ | 50 | */ |
32 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 51 | -static inline MemOp finalize_memop(DisasContext *s, MemOp opc) |
33 | index XXXXXXX..XXXXXXX 100644 | 52 | +static inline MemOp finalize_memop_atom(DisasContext *s, MemOp opc, MemOp atom) |
34 | --- a/target/arm/translate-a64.c | 53 | { |
35 | +++ b/target/arm/translate-a64.c | 54 | if (s->align_mem && !(opc & MO_AMASK)) { |
36 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) | 55 | opc |= MO_ALIGN; |
37 | } | 56 | } |
38 | } | 57 | - return opc | s->be_data; |
39 | 58 | + return opc | atom | s->be_data; | |
40 | -static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | ||
41 | -{ | ||
42 | - tcg_gen_xor_i64(rn, rn, rm); | ||
43 | - tcg_gen_and_i64(rn, rn, rd); | ||
44 | - tcg_gen_xor_i64(rd, rm, rn); | ||
45 | -} | ||
46 | - | ||
47 | -static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | ||
48 | -{ | ||
49 | - tcg_gen_xor_i64(rn, rn, rd); | ||
50 | - tcg_gen_and_i64(rn, rn, rm); | ||
51 | - tcg_gen_xor_i64(rd, rd, rn); | ||
52 | -} | ||
53 | - | ||
54 | -static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | ||
55 | -{ | ||
56 | - tcg_gen_xor_i64(rn, rn, rd); | ||
57 | - tcg_gen_andc_i64(rn, rn, rm); | ||
58 | - tcg_gen_xor_i64(rd, rd, rn); | ||
59 | -} | ||
60 | - | ||
61 | -static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | ||
62 | -{ | ||
63 | - tcg_gen_xor_vec(vece, rn, rn, rm); | ||
64 | - tcg_gen_and_vec(vece, rn, rn, rd); | ||
65 | - tcg_gen_xor_vec(vece, rd, rm, rn); | ||
66 | -} | ||
67 | - | ||
68 | -static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | ||
69 | -{ | ||
70 | - tcg_gen_xor_vec(vece, rn, rn, rd); | ||
71 | - tcg_gen_and_vec(vece, rn, rn, rm); | ||
72 | - tcg_gen_xor_vec(vece, rd, rd, rn); | ||
73 | -} | ||
74 | - | ||
75 | -static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | ||
76 | -{ | ||
77 | - tcg_gen_xor_vec(vece, rn, rn, rd); | ||
78 | - tcg_gen_andc_vec(vece, rn, rn, rm); | ||
79 | - tcg_gen_xor_vec(vece, rd, rd, rn); | ||
80 | -} | ||
81 | - | ||
82 | /* Logic op (opcode == 3) subgroup of C3.6.16. */ | ||
83 | static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) | ||
84 | { | ||
85 | - static const GVecGen3 bsl_op = { | ||
86 | - .fni8 = gen_bsl_i64, | ||
87 | - .fniv = gen_bsl_vec, | ||
88 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
89 | - .load_dest = true | ||
90 | - }; | ||
91 | - static const GVecGen3 bit_op = { | ||
92 | - .fni8 = gen_bit_i64, | ||
93 | - .fniv = gen_bit_vec, | ||
94 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
95 | - .load_dest = true | ||
96 | - }; | ||
97 | - static const GVecGen3 bif_op = { | ||
98 | - .fni8 = gen_bif_i64, | ||
99 | - .fniv = gen_bif_vec, | ||
100 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
101 | - .load_dest = true | ||
102 | - }; | ||
103 | - | ||
104 | int rd = extract32(insn, 0, 5); | ||
105 | int rn = extract32(insn, 5, 5); | ||
106 | int rm = extract32(insn, 16, 5); | ||
107 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/target/arm/translate.c | ||
110 | +++ b/target/arm/translate.c | ||
111 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | -/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ | ||
116 | -static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c) | ||
117 | -{ | ||
118 | - tcg_gen_and_i32(t, t, c); | ||
119 | - tcg_gen_andc_i32(f, f, c); | ||
120 | - tcg_gen_or_i32(dest, t, f); | ||
121 | -} | ||
122 | - | ||
123 | static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src) | ||
124 | { | ||
125 | switch (size) { | ||
126 | @@ -XXX,XX +XXX,XX @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, | ||
127 | return 1; | ||
128 | } | ||
129 | |||
130 | +/* | ||
131 | + * Expanders for VBitOps_VBIF, VBIT, VBSL. | ||
132 | + */ | ||
133 | +static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | ||
134 | +{ | ||
135 | + tcg_gen_xor_i64(rn, rn, rm); | ||
136 | + tcg_gen_and_i64(rn, rn, rd); | ||
137 | + tcg_gen_xor_i64(rd, rm, rn); | ||
138 | +} | 59 | +} |
139 | + | 60 | + |
140 | +static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | 61 | +/** |
62 | + * finalize_memop: | ||
63 | + * @s: DisasContext | ||
64 | + * @opc: size+sign+align of the memory operation | ||
65 | + * | ||
66 | + * Like finalize_memop_atom, but with default atomicity. | ||
67 | + */ | ||
68 | +static inline MemOp finalize_memop(DisasContext *s, MemOp opc) | ||
141 | +{ | 69 | +{ |
142 | + tcg_gen_xor_i64(rn, rn, rd); | 70 | + MemOp atom = s->lse2 ? MO_ATOM_WITHIN16 : MO_ATOM_IFALIGN; |
143 | + tcg_gen_and_i64(rn, rn, rm); | 71 | + return finalize_memop_atom(s, opc, atom); |
144 | + tcg_gen_xor_i64(rd, rd, rn); | ||
145 | +} | 72 | +} |
146 | + | 73 | + |
147 | +static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) | 74 | +/** |
75 | + * finalize_memop_pair: | ||
76 | + * @s: DisasContext | ||
77 | + * @opc: size+sign+align of the memory operation | ||
78 | + * | ||
79 | + * Like finalize_memop_atom, but with atomicity for a pair. | ||
80 | + * C.f. Pseudocode for Mem[], operand ispair. | ||
81 | + */ | ||
82 | +static inline MemOp finalize_memop_pair(DisasContext *s, MemOp opc) | ||
148 | +{ | 83 | +{ |
149 | + tcg_gen_xor_i64(rn, rn, rd); | 84 | + MemOp atom = s->lse2 ? MO_ATOM_WITHIN16_PAIR : MO_ATOM_IFALIGN_PAIR; |
150 | + tcg_gen_andc_i64(rn, rn, rm); | 85 | + return finalize_memop_atom(s, opc, atom); |
151 | + tcg_gen_xor_i64(rd, rd, rn); | 86 | } |
152 | +} | 87 | |
88 | /** | ||
89 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
90 | index XXXXXXX..XXXXXXX 100644 | ||
91 | --- a/target/arm/tcg/translate-a64.c | ||
92 | +++ b/target/arm/tcg/translate-a64.c | ||
93 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | ||
94 | tcg_debug_assert(dc->tbid & 1); | ||
95 | #endif | ||
96 | |||
97 | + dc->lse2 = dc_isar_feature(aa64_lse2, dc); | ||
153 | + | 98 | + |
154 | +static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | 99 | /* Single step state. The code-generation logic here is: |
155 | +{ | 100 | * SS_ACTIVE == 0: |
156 | + tcg_gen_xor_vec(vece, rn, rn, rm); | 101 | * generate code with no special handling for single-stepping (except |
157 | + tcg_gen_and_vec(vece, rn, rn, rd); | 102 | diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c |
158 | + tcg_gen_xor_vec(vece, rd, rm, rn); | 103 | index XXXXXXX..XXXXXXX 100644 |
159 | +} | 104 | --- a/target/arm/tcg/translate.c |
160 | + | 105 | +++ b/target/arm/tcg/translate.c |
161 | +static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | 106 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) |
162 | +{ | 107 | dc->sme_trap_nonstreaming = |
163 | + tcg_gen_xor_vec(vece, rn, rn, rd); | 108 | EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); |
164 | + tcg_gen_and_vec(vece, rn, rn, rm); | 109 | } |
165 | + tcg_gen_xor_vec(vece, rd, rd, rn); | 110 | + dc->lse2 = false; /* applies only to aarch64 */ |
166 | +} | 111 | dc->cp_regs = cpu->cp_regs; |
167 | + | 112 | dc->features = env->features; |
168 | +static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) | 113 | |
169 | +{ | ||
170 | + tcg_gen_xor_vec(vece, rn, rn, rd); | ||
171 | + tcg_gen_andc_vec(vece, rn, rn, rm); | ||
172 | + tcg_gen_xor_vec(vece, rd, rd, rn); | ||
173 | +} | ||
174 | + | ||
175 | +const GVecGen3 bsl_op = { | ||
176 | + .fni8 = gen_bsl_i64, | ||
177 | + .fniv = gen_bsl_vec, | ||
178 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
179 | + .load_dest = true | ||
180 | +}; | ||
181 | + | ||
182 | +const GVecGen3 bit_op = { | ||
183 | + .fni8 = gen_bit_i64, | ||
184 | + .fniv = gen_bit_vec, | ||
185 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
186 | + .load_dest = true | ||
187 | +}; | ||
188 | + | ||
189 | +const GVecGen3 bif_op = { | ||
190 | + .fni8 = gen_bif_i64, | ||
191 | + .fniv = gen_bif_vec, | ||
192 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
193 | + .load_dest = true | ||
194 | +}; | ||
195 | + | ||
196 | + | ||
197 | /* Translate a NEON data processing instruction. Return nonzero if the | ||
198 | instruction is invalid. | ||
199 | We process data in a mixture of 32-bit and 64-bit chunks. | ||
200 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
201 | { | ||
202 | int op; | ||
203 | int q; | ||
204 | - int rd, rn, rm; | ||
205 | + int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs; | ||
206 | int size; | ||
207 | int shift; | ||
208 | int pass; | ||
209 | int count; | ||
210 | int pairwise; | ||
211 | int u; | ||
212 | + int vec_size; | ||
213 | uint32_t imm, mask; | ||
214 | TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; | ||
215 | TCGv_ptr ptr1, ptr2, ptr3; | ||
216 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
217 | VFP_DREG_N(rn, insn); | ||
218 | VFP_DREG_M(rm, insn); | ||
219 | size = (insn >> 20) & 3; | ||
220 | + vec_size = q ? 16 : 8; | ||
221 | + rd_ofs = neon_reg_offset(rd, 0); | ||
222 | + rn_ofs = neon_reg_offset(rn, 0); | ||
223 | + rm_ofs = neon_reg_offset(rm, 0); | ||
224 | + | ||
225 | if ((insn & (1 << 23)) == 0) { | ||
226 | /* Three register same length. */ | ||
227 | op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); | ||
228 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
229 | q, rd, rn, rm); | ||
230 | } | ||
231 | return 1; | ||
232 | + | ||
233 | + case NEON_3R_LOGIC: /* Logic ops. */ | ||
234 | + switch ((u << 2) | size) { | ||
235 | + case 0: /* VAND */ | ||
236 | + tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs, | ||
237 | + vec_size, vec_size); | ||
238 | + break; | ||
239 | + case 1: /* VBIC */ | ||
240 | + tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs, | ||
241 | + vec_size, vec_size); | ||
242 | + break; | ||
243 | + case 2: | ||
244 | + if (rn == rm) { | ||
245 | + /* VMOV */ | ||
246 | + tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size); | ||
247 | + } else { | ||
248 | + /* VORR */ | ||
249 | + tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs, | ||
250 | + vec_size, vec_size); | ||
251 | + } | ||
252 | + break; | ||
253 | + case 3: /* VORN */ | ||
254 | + tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs, | ||
255 | + vec_size, vec_size); | ||
256 | + break; | ||
257 | + case 4: /* VEOR */ | ||
258 | + tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs, | ||
259 | + vec_size, vec_size); | ||
260 | + break; | ||
261 | + case 5: /* VBSL */ | ||
262 | + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, | ||
263 | + vec_size, vec_size, &bsl_op); | ||
264 | + break; | ||
265 | + case 6: /* VBIT */ | ||
266 | + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, | ||
267 | + vec_size, vec_size, &bit_op); | ||
268 | + break; | ||
269 | + case 7: /* VBIF */ | ||
270 | + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, | ||
271 | + vec_size, vec_size, &bif_op); | ||
272 | + break; | ||
273 | + } | ||
274 | + return 0; | ||
275 | } | ||
276 | - if (size == 3 && op != NEON_3R_LOGIC) { | ||
277 | + if (size == 3) { | ||
278 | /* 64-bit element instructions. */ | ||
279 | for (pass = 0; pass < (q ? 2 : 1); pass++) { | ||
280 | neon_load_reg64(cpu_V0, rn + pass); | ||
281 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
282 | case NEON_3R_VRHADD: | ||
283 | GEN_NEON_INTEGER_OP(rhadd); | ||
284 | break; | ||
285 | - case NEON_3R_LOGIC: /* Logic ops. */ | ||
286 | - switch ((u << 2) | size) { | ||
287 | - case 0: /* VAND */ | ||
288 | - tcg_gen_and_i32(tmp, tmp, tmp2); | ||
289 | - break; | ||
290 | - case 1: /* BIC */ | ||
291 | - tcg_gen_andc_i32(tmp, tmp, tmp2); | ||
292 | - break; | ||
293 | - case 2: /* VORR */ | ||
294 | - tcg_gen_or_i32(tmp, tmp, tmp2); | ||
295 | - break; | ||
296 | - case 3: /* VORN */ | ||
297 | - tcg_gen_orc_i32(tmp, tmp, tmp2); | ||
298 | - break; | ||
299 | - case 4: /* VEOR */ | ||
300 | - tcg_gen_xor_i32(tmp, tmp, tmp2); | ||
301 | - break; | ||
302 | - case 5: /* VBSL */ | ||
303 | - tmp3 = neon_load_reg(rd, pass); | ||
304 | - gen_neon_bsl(tmp, tmp, tmp2, tmp3); | ||
305 | - tcg_temp_free_i32(tmp3); | ||
306 | - break; | ||
307 | - case 6: /* VBIT */ | ||
308 | - tmp3 = neon_load_reg(rd, pass); | ||
309 | - gen_neon_bsl(tmp, tmp, tmp3, tmp2); | ||
310 | - tcg_temp_free_i32(tmp3); | ||
311 | - break; | ||
312 | - case 7: /* VBIF */ | ||
313 | - tmp3 = neon_load_reg(rd, pass); | ||
314 | - gen_neon_bsl(tmp, tmp3, tmp, tmp2); | ||
315 | - tcg_temp_free_i32(tmp3); | ||
316 | - break; | ||
317 | - } | ||
318 | - break; | ||
319 | case NEON_3R_VHSUB: | ||
320 | GEN_NEON_INTEGER_OP(hsub); | ||
321 | break; | ||
322 | -- | 114 | -- |
323 | 2.19.1 | 115 | 2.34.1 |
324 | 116 | ||
325 | 117 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | While we don't require 16-byte atomicity here, using a single larger | ||
4 | load simplifies the code, and makes it a closer match to STXP. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20181011205206.3552-8-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-5-richard.henderson@linaro.org |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate.c | 67 ++++++++++++++++++++++++------------------ | 11 | target/arm/tcg/translate-a64.c | 31 ++++++++++++++++++++----------- |
9 | 1 file changed, 39 insertions(+), 28 deletions(-) | 12 | 1 file changed, 20 insertions(+), 11 deletions(-) |
10 | 13 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 14 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
12 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 16 | --- a/target/arm/tcg/translate-a64.c |
14 | +++ b/target/arm/translate.c | 17 | +++ b/target/arm/tcg/translate-a64.c |
15 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 18 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, |
16 | return 1; | 19 | TCGv_i64 addr, int size, bool is_pair) |
20 | { | ||
21 | int idx = get_mem_index(s); | ||
22 | - MemOp memop = s->be_data; | ||
23 | + MemOp memop; | ||
24 | |||
25 | g_assert(size <= 3); | ||
26 | if (is_pair) { | ||
27 | g_assert(size >= 2); | ||
28 | if (size == 2) { | ||
29 | /* The pair must be single-copy atomic for the doubleword. */ | ||
30 | - memop |= MO_64 | MO_ALIGN; | ||
31 | + memop = finalize_memop(s, MO_64 | MO_ALIGN); | ||
32 | tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); | ||
33 | if (s->be_data == MO_LE) { | ||
34 | tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); | ||
35 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
36 | tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32); | ||
17 | } | 37 | } |
18 | } else { /* (insn & 0x00380080) == 0 */ | 38 | } else { |
19 | - int invert; | 39 | - /* The pair must be single-copy atomic for *each* doubleword, not |
20 | + int invert, reg_ofs, vec_size; | 40 | - the entire quadword, however it must be quadword aligned. */ |
21 | + | 41 | - memop |= MO_64; |
22 | if (q && (rd & 1)) { | 42 | - tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, |
23 | return 1; | 43 | - memop | MO_ALIGN_16); |
24 | } | 44 | + /* |
25 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 45 | + * The pair must be single-copy atomic for *each* doubleword, not |
26 | break; | 46 | + * the entire quadword, however it must be quadword aligned. |
27 | case 14: | 47 | + * Expose the complete load to tcg, for ease of tlb lookup, |
28 | imm |= (imm << 8) | (imm << 16) | (imm << 24); | 48 | + * but indicate that only 8-byte atomicity is required. |
29 | - if (invert) | 49 | + */ |
30 | + if (invert) { | 50 | + TCGv_i128 t16 = tcg_temp_new_i128(); |
31 | imm = ~imm; | 51 | |
32 | + } | 52 | - TCGv_i64 addr2 = tcg_temp_new_i64(); |
33 | break; | 53 | - tcg_gen_addi_i64(addr2, addr, 8); |
34 | case 15: | 54 | - tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop); |
35 | if (invert) { | 55 | + memop = finalize_memop_atom(s, MO_128 | MO_ALIGN_16, |
36 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 56 | + MO_ATOM_IFALIGN_PAIR); |
37 | | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); | 57 | + tcg_gen_qemu_ld_i128(t16, addr, idx, memop); |
38 | break; | 58 | |
39 | } | 59 | + if (s->be_data == MO_LE) { |
40 | - if (invert) | 60 | + tcg_gen_extr_i128_i64(cpu_exclusive_val, |
41 | + if (invert) { | 61 | + cpu_exclusive_high, t16); |
42 | imm = ~imm; | 62 | + } else { |
63 | + tcg_gen_extr_i128_i64(cpu_exclusive_high, | ||
64 | + cpu_exclusive_val, t16); | ||
43 | + } | 65 | + } |
44 | 66 | tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); | |
45 | - for (pass = 0; pass < (q ? 4 : 2); pass++) { | 67 | tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); |
46 | - if (op & 1 && op < 12) { | ||
47 | - tmp = neon_load_reg(rd, pass); | ||
48 | - if (invert) { | ||
49 | - /* The immediate value has already been inverted, so | ||
50 | - BIC becomes AND. */ | ||
51 | - tcg_gen_andi_i32(tmp, tmp, imm); | ||
52 | - } else { | ||
53 | - tcg_gen_ori_i32(tmp, tmp, imm); | ||
54 | - } | ||
55 | + reg_ofs = neon_reg_offset(rd, 0); | ||
56 | + vec_size = q ? 16 : 8; | ||
57 | + | ||
58 | + if (op & 1 && op < 12) { | ||
59 | + if (invert) { | ||
60 | + /* The immediate value has already been inverted, | ||
61 | + * so BIC becomes AND. | ||
62 | + */ | ||
63 | + tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm, | ||
64 | + vec_size, vec_size); | ||
65 | } else { | ||
66 | - /* VMOV, VMVN. */ | ||
67 | - tmp = tcg_temp_new_i32(); | ||
68 | - if (op == 14 && invert) { | ||
69 | - int n; | ||
70 | - uint32_t val; | ||
71 | - val = 0; | ||
72 | - for (n = 0; n < 4; n++) { | ||
73 | - if (imm & (1 << (n + (pass & 1) * 4))) | ||
74 | - val |= 0xff << (n * 8); | ||
75 | - } | ||
76 | - tcg_gen_movi_i32(tmp, val); | ||
77 | - } else { | ||
78 | - tcg_gen_movi_i32(tmp, imm); | ||
79 | - } | ||
80 | + tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm, | ||
81 | + vec_size, vec_size); | ||
82 | + } | ||
83 | + } else { | ||
84 | + /* VMOV, VMVN. */ | ||
85 | + if (op == 14 && invert) { | ||
86 | + TCGv_i64 t64 = tcg_temp_new_i64(); | ||
87 | + | ||
88 | + for (pass = 0; pass <= q; ++pass) { | ||
89 | + uint64_t val = 0; | ||
90 | + int n; | ||
91 | + | ||
92 | + for (n = 0; n < 8; n++) { | ||
93 | + if (imm & (1 << (n + pass * 8))) { | ||
94 | + val |= 0xffull << (n * 8); | ||
95 | + } | ||
96 | + } | ||
97 | + tcg_gen_movi_i64(t64, val); | ||
98 | + neon_store_reg64(t64, rd + pass); | ||
99 | + } | ||
100 | + tcg_temp_free_i64(t64); | ||
101 | + } else { | ||
102 | + tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm); | ||
103 | } | ||
104 | - neon_store_reg(rd, pass, tmp); | ||
105 | } | ||
106 | } | 68 | } |
107 | } else { /* (insn & 0x00800010 == 0x00800000) */ | 69 | } else { |
70 | - memop |= size | MO_ALIGN; | ||
71 | + memop = finalize_memop(s, size | MO_ALIGN); | ||
72 | tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); | ||
73 | tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); | ||
74 | } | ||
108 | -- | 75 | -- |
109 | 2.19.1 | 76 | 2.34.1 |
110 | |||
111 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 3 | While we don't require 16-byte atomicity here, using a single larger |
4 | operation simplifies the code. Introduce finalize_memop_asimd for this. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20181016223115.24100-9-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-6-richard.henderson@linaro.org |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | --- | 10 | --- |
9 | target/arm/cpu.h | 17 +++++++++++++++- | 11 | target/arm/tcg/translate.h | 24 +++++++++++++++++++++++ |
10 | linux-user/elfload.c | 6 +----- | 12 | target/arm/tcg/translate-a64.c | 35 +++++++++++----------------------- |
11 | target/arm/cpu64.c | 16 ++++++++------- | 13 | 2 files changed, 35 insertions(+), 24 deletions(-) |
12 | target/arm/helper.c | 2 +- | ||
13 | target/arm/translate-a64.c | 40 +++++++++++++++++++------------------- | ||
14 | target/arm/translate.c | 6 +++--- | ||
15 | 6 files changed, 50 insertions(+), 37 deletions(-) | ||
16 | 14 | ||
17 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/tcg/translate.h |
20 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/tcg/translate.h |
21 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 19 | @@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop_pair(DisasContext *s, MemOp opc) |
22 | ARM_FEATURE_PMU, /* has PMU support */ | 20 | return finalize_memop_atom(s, opc, atom); |
23 | ARM_FEATURE_VBAR, /* has cp15 VBAR */ | ||
24 | ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ | ||
25 | - ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ | ||
26 | ARM_FEATURE_M_MAIN, /* M profile Main Extension */ | ||
27 | }; | ||
28 | |||
29 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) | ||
30 | return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; | ||
31 | } | 21 | } |
32 | 22 | ||
33 | +static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) | 23 | +/** |
24 | + * finalize_memop_asimd: | ||
25 | + * @s: DisasContext | ||
26 | + * @opc: size+sign+align of the memory operation | ||
27 | + * | ||
28 | + * Like finalize_memop_atom, but with atomicity of AccessType_ASIMD. | ||
29 | + */ | ||
30 | +static inline MemOp finalize_memop_asimd(DisasContext *s, MemOp opc) | ||
34 | +{ | 31 | +{ |
35 | + /* | 32 | + /* |
36 | + * This is a placeholder for use by VCMA until the rest of | 33 | + * In the pseudocode for Mem[], with AccessType_ASIMD, size == 16, |
37 | + * the ARMv8.2-FP16 extension is implemented for aa32 mode. | 34 | + * if IsAligned(8), the first case provides separate atomicity for |
38 | + * At which point we can properly set and check MVFR1.FPHP. | 35 | + * the pair of 64-bit accesses. If !IsAligned(8), the middle cases |
36 | + * do not apply, and we're left with the final case of no atomicity. | ||
37 | + * Thus MO_ATOM_IFALIGN_PAIR. | ||
38 | + * | ||
39 | + * For other sizes, normal LSE2 rules apply. | ||
39 | + */ | 40 | + */ |
40 | + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; | 41 | + if ((opc & MO_SIZE) == MO_128) { |
42 | + return finalize_memop_atom(s, opc, MO_ATOM_IFALIGN_PAIR); | ||
43 | + } | ||
44 | + return finalize_memop(s, opc); | ||
41 | +} | 45 | +} |
42 | + | 46 | + |
43 | /* | 47 | /** |
44 | * 64-bit feature tests via id registers. | 48 | * asimd_imm_const: Expand an encoded SIMD constant value |
45 | */ | 49 | * |
46 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) | 50 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
47 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; | 51 | index XXXXXXX..XXXXXXX 100644 |
52 | --- a/target/arm/tcg/translate-a64.c | ||
53 | +++ b/target/arm/tcg/translate-a64.c | ||
54 | @@ -XXX,XX +XXX,XX @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) | ||
55 | { | ||
56 | /* This writes the bottom N bits of a 128 bit wide vector to memory */ | ||
57 | TCGv_i64 tmplo = tcg_temp_new_i64(); | ||
58 | - MemOp mop; | ||
59 | + MemOp mop = finalize_memop_asimd(s, size); | ||
60 | |||
61 | tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64)); | ||
62 | |||
63 | - if (size < 4) { | ||
64 | - mop = finalize_memop(s, size); | ||
65 | + if (size < MO_128) { | ||
66 | tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
67 | } else { | ||
68 | - bool be = s->be_data == MO_BE; | ||
69 | - TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(); | ||
70 | TCGv_i64 tmphi = tcg_temp_new_i64(); | ||
71 | + TCGv_i128 t16 = tcg_temp_new_i128(); | ||
72 | |||
73 | tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx)); | ||
74 | + tcg_gen_concat_i64_i128(t16, tmplo, tmphi); | ||
75 | |||
76 | - mop = s->be_data | MO_UQ; | ||
77 | - tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), | ||
78 | - mop | (s->align_mem ? MO_ALIGN_16 : 0)); | ||
79 | - tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); | ||
80 | - tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr, | ||
81 | - get_mem_index(s), mop); | ||
82 | + tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop); | ||
83 | } | ||
48 | } | 84 | } |
49 | 85 | ||
50 | +static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) | 86 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) |
51 | +{ | 87 | /* This always zero-extends and writes to a full 128 bit wide vector */ |
52 | + /* We always set the AdvSIMD and FP fields identically wrt FP16. */ | 88 | TCGv_i64 tmplo = tcg_temp_new_i64(); |
53 | + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; | 89 | TCGv_i64 tmphi = NULL; |
54 | +} | 90 | - MemOp mop; |
91 | + MemOp mop = finalize_memop_asimd(s, size); | ||
92 | |||
93 | - if (size < 4) { | ||
94 | - mop = finalize_memop(s, size); | ||
95 | + if (size < MO_128) { | ||
96 | tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
97 | } else { | ||
98 | - bool be = s->be_data == MO_BE; | ||
99 | - TCGv_i64 tcg_hiaddr; | ||
100 | + TCGv_i128 t16 = tcg_temp_new_i128(); | ||
55 | + | 101 | + |
56 | static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) | 102 | + tcg_gen_qemu_ld_i128(t16, tcg_addr, get_mem_index(s), mop); |
57 | { | 103 | |
58 | return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; | 104 | tmphi = tcg_temp_new_i64(); |
59 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | 105 | - tcg_hiaddr = tcg_temp_new_i64(); |
60 | index XXXXXXX..XXXXXXX 100644 | 106 | - |
61 | --- a/linux-user/elfload.c | 107 | - mop = s->be_data | MO_UQ; |
62 | +++ b/linux-user/elfload.c | 108 | - tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), |
63 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | 109 | - mop | (s->align_mem ? MO_ALIGN_16 : 0)); |
64 | hwcaps |= ARM_HWCAP_A64_ASIMD; | 110 | - tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); |
65 | 111 | - tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr, | |
66 | /* probe for the extra features */ | 112 | - get_mem_index(s), mop); |
67 | -#define GET_FEATURE(feat, hwcap) \ | 113 | + tcg_gen_extr_i128_i64(tmplo, tmphi, t16); |
68 | - do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) | ||
69 | #define GET_FEATURE_ID(feat, hwcap) \ | ||
70 | do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) | ||
71 | |||
72 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | ||
73 | GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); | ||
74 | GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); | ||
75 | GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); | ||
76 | - GET_FEATURE(ARM_FEATURE_V8_FP16, | ||
77 | - ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); | ||
78 | + GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); | ||
79 | GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); | ||
80 | GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); | ||
81 | GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); | ||
82 | GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); | ||
83 | GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); | ||
84 | |||
85 | -#undef GET_FEATURE | ||
86 | #undef GET_FEATURE_ID | ||
87 | |||
88 | return hwcaps; | ||
89 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | ||
90 | index XXXXXXX..XXXXXXX 100644 | ||
91 | --- a/target/arm/cpu64.c | ||
92 | +++ b/target/arm/cpu64.c | ||
93 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
94 | |||
95 | t = cpu->isar.id_aa64pfr0; | ||
96 | t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); | ||
97 | + t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); | ||
98 | + t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); | ||
99 | cpu->isar.id_aa64pfr0 = t; | ||
100 | |||
101 | /* Replicate the same data to the 32-bit id registers. */ | ||
102 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
103 | u = FIELD_DP32(u, ID_ISAR6, DP, 1); | ||
104 | cpu->isar.id_isar6 = u; | ||
105 | |||
106 | -#ifdef CONFIG_USER_ONLY | ||
107 | - /* We don't set these in system emulation mode for the moment, | ||
108 | - * since we don't correctly set the ID registers to advertise them, | ||
109 | - * and in some cases they're only available in AArch64 and not AArch32, | ||
110 | - * whereas the architecture requires them to be present in both if | ||
111 | - * present in either. | ||
112 | + /* | ||
113 | + * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet, | ||
114 | + * so do not set MVFR1.FPHP. Strictly speaking this is not legal, | ||
115 | + * but it is also not legal to enable SVE without support for FP16, | ||
116 | + * and enabling SVE in system mode is more useful in the short term. | ||
117 | */ | ||
118 | - set_feature(&cpu->env, ARM_FEATURE_V8_FP16); | ||
119 | + | ||
120 | +#ifdef CONFIG_USER_ONLY | ||
121 | /* For usermode -cpu max we can use a larger and more efficient DCZ | ||
122 | * blocksize since we don't have to follow what the hardware does. | ||
123 | */ | ||
124 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
125 | index XXXXXXX..XXXXXXX 100644 | ||
126 | --- a/target/arm/helper.c | ||
127 | +++ b/target/arm/helper.c | ||
128 | @@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) | ||
129 | uint32_t changed; | ||
130 | |||
131 | /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ | ||
132 | - if (!arm_feature(env, ARM_FEATURE_V8_FP16)) { | ||
133 | + if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) { | ||
134 | val &= ~FPCR_FZ16; | ||
135 | } | 114 | } |
136 | 115 | ||
137 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 116 | tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64)); |
138 | index XXXXXXX..XXXXXXX 100644 | ||
139 | --- a/target/arm/translate-a64.c | ||
140 | +++ b/target/arm/translate-a64.c | ||
141 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_compare(DisasContext *s, uint32_t insn) | ||
142 | break; | ||
143 | case 3: | ||
144 | size = MO_16; | ||
145 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
146 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
147 | break; | ||
148 | } | ||
149 | /* fallthru */ | ||
150 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) | ||
151 | break; | ||
152 | case 3: | ||
153 | size = MO_16; | ||
154 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
155 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
156 | break; | ||
157 | } | ||
158 | /* fallthru */ | ||
159 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) | ||
160 | break; | ||
161 | case 3: | ||
162 | sz = MO_16; | ||
163 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
164 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
165 | break; | ||
166 | } | ||
167 | /* fallthru */ | ||
168 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn) | ||
169 | handle_fp_1src_double(s, opcode, rd, rn); | ||
170 | break; | ||
171 | case 3: | ||
172 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
173 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
174 | unallocated_encoding(s); | ||
175 | return; | ||
176 | } | ||
177 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_2src(DisasContext *s, uint32_t insn) | ||
178 | handle_fp_2src_double(s, opcode, rd, rn, rm); | ||
179 | break; | ||
180 | case 3: | ||
181 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
182 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
183 | unallocated_encoding(s); | ||
184 | return; | ||
185 | } | ||
186 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) | ||
187 | handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); | ||
188 | break; | ||
189 | case 3: | ||
190 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
191 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
192 | unallocated_encoding(s); | ||
193 | return; | ||
194 | } | ||
195 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) | ||
196 | break; | ||
197 | case 3: | ||
198 | sz = MO_16; | ||
199 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
200 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
201 | break; | ||
202 | } | ||
203 | /* fallthru */ | ||
204 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) | ||
205 | case 1: /* float64 */ | ||
206 | break; | ||
207 | case 3: /* float16 */ | ||
208 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
209 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
210 | break; | ||
211 | } | ||
212 | /* fallthru */ | ||
213 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) | ||
214 | break; | ||
215 | case 0x6: /* 16-bit float, 32-bit int */ | ||
216 | case 0xe: /* 16-bit float, 64-bit int */ | ||
217 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
218 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
219 | break; | ||
220 | } | ||
221 | /* fallthru */ | ||
222 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) | ||
223 | case 1: /* float64 */ | ||
224 | break; | ||
225 | case 3: /* float16 */ | ||
226 | - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
227 | + if (dc_isar_feature(aa64_fp16, s)) { | ||
228 | break; | ||
229 | } | ||
230 | /* fallthru */ | ||
231 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) | ||
232 | */ | ||
233 | is_min = extract32(size, 1, 1); | ||
234 | is_fp = true; | ||
235 | - if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
236 | + if (!is_u && dc_isar_feature(aa64_fp16, s)) { | ||
237 | size = 1; | ||
238 | } else if (!is_u || !is_q || extract32(size, 0, 1)) { | ||
239 | unallocated_encoding(s); | ||
240 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) | ||
241 | |||
242 | if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { | ||
243 | /* Check for FMOV (vector, immediate) - half-precision */ | ||
244 | - if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) { | ||
245 | + if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) { | ||
246 | unallocated_encoding(s); | ||
247 | return; | ||
248 | } | ||
249 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) | ||
250 | case 0x2f: /* FMINP */ | ||
251 | /* FP op, size[0] is 32 or 64 bit*/ | ||
252 | if (!u) { | ||
253 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
254 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
255 | unallocated_encoding(s); | ||
256 | return; | ||
257 | } else { | ||
258 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, | ||
259 | size = MO_32; | ||
260 | } else if (immh & 2) { | ||
261 | size = MO_16; | ||
262 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
263 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
264 | unallocated_encoding(s); | ||
265 | return; | ||
266 | } | ||
267 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, | ||
268 | size = MO_32; | ||
269 | } else if (immh & 0x2) { | ||
270 | size = MO_16; | ||
271 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
272 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
273 | unallocated_encoding(s); | ||
274 | return; | ||
275 | } | ||
276 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, | ||
277 | return; | ||
278 | } | ||
279 | |||
280 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
281 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
282 | unallocated_encoding(s); | ||
283 | } | ||
284 | |||
285 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) | ||
286 | TCGv_ptr fpst; | ||
287 | bool pairwise = false; | ||
288 | |||
289 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
290 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
291 | unallocated_encoding(s); | ||
292 | return; | ||
293 | } | ||
294 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | ||
295 | case 0x1c: /* FCADD, #90 */ | ||
296 | case 0x1e: /* FCADD, #270 */ | ||
297 | if (size == 0 | ||
298 | - || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) | ||
299 | + || (size == 1 && !dc_isar_feature(aa64_fp16, s)) | ||
300 | || (size == 3 && !is_q)) { | ||
301 | unallocated_encoding(s); | ||
302 | return; | ||
303 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | ||
304 | bool need_fpst = true; | ||
305 | int rmode; | ||
306 | |||
307 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
308 | + if (!dc_isar_feature(aa64_fp16, s)) { | ||
309 | unallocated_encoding(s); | ||
310 | return; | ||
311 | } | ||
312 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
313 | } | ||
314 | break; | ||
315 | } | ||
316 | - if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
317 | + if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { | ||
318 | unallocated_encoding(s); | ||
319 | return; | ||
320 | } | ||
321 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
322 | index XXXXXXX..XXXXXXX 100644 | ||
323 | --- a/target/arm/translate.c | ||
324 | +++ b/target/arm/translate.c | ||
325 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
326 | int size = extract32(insn, 20, 1); | ||
327 | data = extract32(insn, 23, 2); /* rot */ | ||
328 | if (!dc_isar_feature(aa32_vcma, s) | ||
329 | - || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { | ||
330 | + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { | ||
331 | return 1; | ||
332 | } | ||
333 | fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah; | ||
334 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
335 | int size = extract32(insn, 20, 1); | ||
336 | data = extract32(insn, 24, 1); /* rot */ | ||
337 | if (!dc_isar_feature(aa32_vcma, s) | ||
338 | - || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { | ||
339 | + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { | ||
340 | return 1; | ||
341 | } | ||
342 | fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh; | ||
343 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) | ||
344 | return 1; | ||
345 | } | ||
346 | if (size == 0) { | ||
347 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { | ||
348 | + if (!dc_isar_feature(aa32_fp16_arith, s)) { | ||
349 | return 1; | ||
350 | } | ||
351 | /* For fp16, rm is just Vm, and index is M. */ | ||
352 | -- | 117 | -- |
353 | 2.19.1 | 118 | 2.34.1 |
354 | |||
355 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | For a sequence of loads or stores from a single register, | 3 | This fixes a bug in that these two insns should have been using atomic |
4 | little-endian operations can be promoted to an 8-byte op. | 4 | 16-byte stores, since MTE is ARMv8.5 and LSE2 is mandatory from ARMv8.4. |
5 | This can reduce the number of operations by a factor of 8. | ||
6 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Message-id: 20181011205206.3552-5-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-7-richard.henderson@linaro.org |
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 10 | --- |
12 | target/arm/translate-a64.c | 66 +++++++++++++++++++++++--------------- | 11 | target/arm/tcg/translate-a64.c | 17 ++++++++++------- |
13 | 1 file changed, 40 insertions(+), 26 deletions(-) | 12 | 1 file changed, 10 insertions(+), 7 deletions(-) |
14 | 13 | ||
15 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 14 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
16 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate-a64.c | 16 | --- a/target/arm/tcg/translate-a64.c |
18 | +++ b/target/arm/translate-a64.c | 17 | +++ b/target/arm/tcg/translate-a64.c |
19 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | 18 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn) |
20 | 19 | ||
21 | /* Store from vector register to memory */ | 20 | if (is_zero) { |
22 | static void do_vec_st(DisasContext *s, int srcidx, int element, | 21 | TCGv_i64 clean_addr = clean_data_tbi(s, addr); |
23 | - TCGv_i64 tcg_addr, int size) | 22 | - TCGv_i64 tcg_zero = tcg_constant_i64(0); |
24 | + TCGv_i64 tcg_addr, int size, TCGMemOp endian) | 23 | + TCGv_i64 zero64 = tcg_constant_i64(0); |
25 | { | 24 | + TCGv_i128 zero128 = tcg_temp_new_i128(); |
26 | - TCGMemOp memop = s->be_data + size; | 25 | int mem_index = get_mem_index(s); |
27 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | 26 | - int i, n = (1 + is_pair) << LOG2_TAG_GRANULE; |
28 | 27 | + MemOp mop = finalize_memop(s, MO_128 | MO_ALIGN); | |
29 | read_vec_element(s, tcg_tmp, srcidx, element, size); | 28 | |
30 | - tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop); | 29 | - tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, |
31 | + tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); | 30 | - MO_UQ | MO_ALIGN_16); |
32 | 31 | - for (i = 8; i < n; i += 8) { | |
33 | tcg_temp_free_i64(tcg_tmp); | 32 | - tcg_gen_addi_i64(clean_addr, clean_addr, 8); |
34 | } | 33 | - tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ); |
35 | 34 | + tcg_gen_concat_i64_i128(zero128, zero64, zero64); | |
36 | /* Load from memory to vector register */ | ||
37 | static void do_vec_ld(DisasContext *s, int destidx, int element, | ||
38 | - TCGv_i64 tcg_addr, int size) | ||
39 | + TCGv_i64 tcg_addr, int size, TCGMemOp endian) | ||
40 | { | ||
41 | - TCGMemOp memop = s->be_data + size; | ||
42 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
43 | |||
44 | - tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop); | ||
45 | + tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); | ||
46 | write_vec_element(s, tcg_tmp, destidx, element, size); | ||
47 | |||
48 | tcg_temp_free_i64(tcg_tmp); | ||
49 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
50 | bool is_postidx = extract32(insn, 23, 1); | ||
51 | bool is_q = extract32(insn, 30, 1); | ||
52 | TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes; | ||
53 | + TCGMemOp endian = s->be_data; | ||
54 | |||
55 | - int ebytes = 1 << size; | ||
56 | - int elements = (is_q ? 128 : 64) / (8 << size); | ||
57 | + int ebytes; /* bytes per element */ | ||
58 | + int elements; /* elements per vector */ | ||
59 | int rpt; /* num iterations */ | ||
60 | int selem; /* structure elements */ | ||
61 | int r; | ||
62 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
63 | gen_check_sp_alignment(s); | ||
64 | } | ||
65 | |||
66 | + /* For our purposes, bytes are always little-endian. */ | ||
67 | + if (size == 0) { | ||
68 | + endian = MO_LE; | ||
69 | + } | ||
70 | + | 35 | + |
71 | + /* Consecutive little-endian elements from a single register | 36 | + /* This is 1 or 2 atomic 16-byte operations. */ |
72 | + * can be promoted to a larger little-endian operation. | 37 | + tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop); |
73 | + */ | 38 | + if (is_pair) { |
74 | + if (selem == 1 && endian == MO_LE) { | 39 | + tcg_gen_addi_i64(clean_addr, clean_addr, 16); |
75 | + size = 3; | 40 | + tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop); |
76 | + } | ||
77 | + ebytes = 1 << size; | ||
78 | + elements = (is_q ? 16 : 8) / ebytes; | ||
79 | + | ||
80 | tcg_rn = cpu_reg_sp(s, rn); | ||
81 | tcg_addr = tcg_temp_new_i64(); | ||
82 | tcg_gen_mov_i64(tcg_addr, tcg_rn); | ||
83 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
84 | for (r = 0; r < rpt; r++) { | ||
85 | int e; | ||
86 | for (e = 0; e < elements; e++) { | ||
87 | - int tt = (rt + r) % 32; | ||
88 | int xs; | ||
89 | for (xs = 0; xs < selem; xs++) { | ||
90 | + int tt = (rt + r + xs) % 32; | ||
91 | if (is_store) { | ||
92 | - do_vec_st(s, tt, e, tcg_addr, size); | ||
93 | + do_vec_st(s, tt, e, tcg_addr, size, endian); | ||
94 | } else { | ||
95 | - do_vec_ld(s, tt, e, tcg_addr, size); | ||
96 | - | ||
97 | - /* For non-quad operations, setting a slice of the low | ||
98 | - * 64 bits of the register clears the high 64 bits (in | ||
99 | - * the ARM ARM pseudocode this is implicit in the fact | ||
100 | - * that 'rval' is a 64 bit wide variable). | ||
101 | - * For quad operations, we might still need to zero the | ||
102 | - * high bits of SVE. We optimize by noticing that we only | ||
103 | - * need to do this the first time we touch a register. | ||
104 | - */ | ||
105 | - if (e == 0 && (r == 0 || xs == selem - 1)) { | ||
106 | - clear_vec_high(s, is_q, tt); | ||
107 | - } | ||
108 | + do_vec_ld(s, tt, e, tcg_addr, size, endian); | ||
109 | } | ||
110 | tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); | ||
111 | - tt = (tt + 1) % 32; | ||
112 | } | ||
113 | } | 41 | } |
114 | } | 42 | } |
115 | 43 | ||
116 | + if (!is_store) { | ||
117 | + /* For non-quad operations, setting a slice of the low | ||
118 | + * 64 bits of the register clears the high 64 bits (in | ||
119 | + * the ARM ARM pseudocode this is implicit in the fact | ||
120 | + * that 'rval' is a 64 bit wide variable). | ||
121 | + * For quad operations, we might still need to zero the | ||
122 | + * high bits of SVE. | ||
123 | + */ | ||
124 | + for (r = 0; r < rpt * selem; r++) { | ||
125 | + int tt = (rt + r) % 32; | ||
126 | + clear_vec_high(s, is_q, tt); | ||
127 | + } | ||
128 | + } | ||
129 | + | ||
130 | if (is_postidx) { | ||
131 | int rm = extract32(insn, 16, 5); | ||
132 | if (rm == 31) { | ||
133 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | ||
134 | } else { | ||
135 | /* Load/store one element per register */ | ||
136 | if (is_load) { | ||
137 | - do_vec_ld(s, rt, index, tcg_addr, scale); | ||
138 | + do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data); | ||
139 | } else { | ||
140 | - do_vec_st(s, rt, index, tcg_addr, scale); | ||
141 | + do_vec_st(s, rt, index, tcg_addr, scale, s->be_data); | ||
142 | } | ||
143 | } | ||
144 | tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); | ||
145 | -- | 44 | -- |
146 | 2.19.1 | 45 | 2.34.1 |
147 | |||
148 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Round len_align to 16 instead of 8, handling an odd 8-byte as part | ||
4 | of the tail. Use MO_ATOM_NONE to indicate that all of these memory | ||
5 | ops have only byte atomicity. | ||
6 | |||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20181011205206.3552-12-richard.henderson@linaro.org | 9 | Message-id: 20230530191438.411344-8-richard.henderson@linaro.org |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | target/arm/translate.c | 31 +++++++++++++++---------------- | 12 | target/arm/tcg/translate-sve.c | 95 +++++++++++++++++++++++++--------- |
9 | 1 file changed, 15 insertions(+), 16 deletions(-) | 13 | 1 file changed, 70 insertions(+), 25 deletions(-) |
10 | 14 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 15 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 17 | --- a/target/arm/tcg/translate-sve.c |
14 | +++ b/target/arm/translate.c | 18 | +++ b/target/arm/tcg/translate-sve.c |
15 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 19 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, |
16 | vec_size, vec_size); | 20 | void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, |
17 | } | 21 | int len, int rn, int imm) |
18 | return 0; | 22 | { |
19 | + | 23 | - int len_align = QEMU_ALIGN_DOWN(len, 8); |
20 | + case NEON_3R_VMUL: /* VMUL */ | 24 | - int len_remain = len % 8; |
21 | + if (u) { | 25 | - int nparts = len / 8 + ctpop8(len_remain); |
22 | + /* Polynomial case allows only P8 and is handled below. */ | 26 | + int len_align = QEMU_ALIGN_DOWN(len, 16); |
23 | + if (size != 0) { | 27 | + int len_remain = len % 16; |
24 | + return 1; | 28 | + int nparts = len / 16 + ctpop8(len_remain); |
25 | + } | 29 | int midx = get_mem_index(s); |
26 | + } else { | 30 | TCGv_i64 dirty_addr, clean_addr, t0, t1; |
27 | + tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs, | 31 | + TCGv_i128 t16; |
28 | + vec_size, vec_size); | 32 | |
29 | + return 0; | 33 | dirty_addr = tcg_temp_new_i64(); |
30 | + } | 34 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); |
31 | + break; | 35 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, |
36 | int i; | ||
37 | |||
38 | t0 = tcg_temp_new_i64(); | ||
39 | - for (i = 0; i < len_align; i += 8) { | ||
40 | - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); | ||
41 | + t1 = tcg_temp_new_i64(); | ||
42 | + t16 = tcg_temp_new_i128(); | ||
43 | + | ||
44 | + for (i = 0; i < len_align; i += 16) { | ||
45 | + tcg_gen_qemu_ld_i128(t16, clean_addr, midx, | ||
46 | + MO_LE | MO_128 | MO_ATOM_NONE); | ||
47 | + tcg_gen_extr_i128_i64(t0, t1, t16); | ||
48 | tcg_gen_st_i64(t0, base, vofs + i); | ||
49 | - tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
50 | + tcg_gen_st_i64(t1, base, vofs + i + 8); | ||
51 | + tcg_gen_addi_i64(clean_addr, clean_addr, 16); | ||
32 | } | 52 | } |
33 | if (size == 3) { | 53 | } else { |
34 | /* 64-bit element instructions. */ | 54 | TCGLabel *loop = gen_new_label(); |
35 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 55 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, |
36 | return 1; | 56 | tcg_gen_movi_ptr(i, 0); |
37 | } | 57 | gen_set_label(loop); |
38 | break; | 58 | |
39 | - case NEON_3R_VMUL: | 59 | - t0 = tcg_temp_new_i64(); |
40 | - if (u && (size != 0)) { | 60 | - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); |
41 | - /* UNDEF on invalid size for polynomial subcase */ | 61 | - tcg_gen_addi_i64(clean_addr, clean_addr, 8); |
42 | - return 1; | 62 | + t16 = tcg_temp_new_i128(); |
43 | - } | 63 | + tcg_gen_qemu_ld_i128(t16, clean_addr, midx, |
44 | - break; | 64 | + MO_LE | MO_128 | MO_ATOM_NONE); |
45 | case NEON_3R_VFM_VQRDMLSH: | 65 | + tcg_gen_addi_i64(clean_addr, clean_addr, 16); |
46 | if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) { | 66 | |
47 | return 1; | 67 | tp = tcg_temp_new_ptr(); |
48 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 68 | tcg_gen_add_ptr(tp, base, i); |
49 | } | 69 | - tcg_gen_addi_ptr(i, i, 8); |
50 | break; | 70 | + tcg_gen_addi_ptr(i, i, 16); |
51 | case NEON_3R_VMUL: | 71 | + |
52 | - if (u) { /* polynomial */ | 72 | + t0 = tcg_temp_new_i64(); |
53 | - gen_helper_neon_mul_p8(tmp, tmp, tmp2); | 73 | + t1 = tcg_temp_new_i64(); |
54 | - } else { /* Integer */ | 74 | + tcg_gen_extr_i128_i64(t0, t1, t16); |
55 | - switch (size) { | 75 | + |
56 | - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; | 76 | tcg_gen_st_i64(t0, tp, vofs); |
57 | - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; | 77 | + tcg_gen_st_i64(t1, tp, vofs + 8); |
58 | - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; | 78 | |
59 | - default: abort(); | 79 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); |
60 | - } | 80 | } |
61 | - } | 81 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, |
62 | + /* VMUL.P8; other cases already eliminated. */ | 82 | * Predicate register loads can be any multiple of 2. |
63 | + gen_helper_neon_mul_p8(tmp, tmp, tmp2); | 83 | * Note that we still store the entire 64-bit unit into cpu_env. |
64 | break; | 84 | */ |
65 | case NEON_3R_VPMAX: | 85 | + if (len_remain >= 8) { |
66 | GEN_NEON_INTEGER_OP(pmax); | 86 | + t0 = tcg_temp_new_i64(); |
87 | + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); | ||
88 | + tcg_gen_st_i64(t0, base, vofs + len_align); | ||
89 | + len_remain -= 8; | ||
90 | + len_align += 8; | ||
91 | + if (len_remain) { | ||
92 | + tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
93 | + } | ||
94 | + } | ||
95 | if (len_remain) { | ||
96 | t0 = tcg_temp_new_i64(); | ||
97 | switch (len_remain) { | ||
98 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, | ||
99 | case 4: | ||
100 | case 8: | ||
101 | tcg_gen_qemu_ld_i64(t0, clean_addr, midx, | ||
102 | - MO_LE | ctz32(len_remain)); | ||
103 | + MO_LE | ctz32(len_remain) | MO_ATOM_NONE); | ||
104 | break; | ||
105 | |||
106 | case 6: | ||
107 | t1 = tcg_temp_new_i64(); | ||
108 | - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL); | ||
109 | + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); | ||
110 | tcg_gen_addi_i64(clean_addr, clean_addr, 4); | ||
111 | - tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); | ||
112 | + tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); | ||
113 | tcg_gen_deposit_i64(t0, t0, t1, 32, 32); | ||
114 | break; | ||
115 | |||
116 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, | ||
117 | void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
118 | int len, int rn, int imm) | ||
119 | { | ||
120 | - int len_align = QEMU_ALIGN_DOWN(len, 8); | ||
121 | - int len_remain = len % 8; | ||
122 | - int nparts = len / 8 + ctpop8(len_remain); | ||
123 | + int len_align = QEMU_ALIGN_DOWN(len, 16); | ||
124 | + int len_remain = len % 16; | ||
125 | + int nparts = len / 16 + ctpop8(len_remain); | ||
126 | int midx = get_mem_index(s); | ||
127 | - TCGv_i64 dirty_addr, clean_addr, t0; | ||
128 | + TCGv_i64 dirty_addr, clean_addr, t0, t1; | ||
129 | + TCGv_i128 t16; | ||
130 | |||
131 | dirty_addr = tcg_temp_new_i64(); | ||
132 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); | ||
133 | @@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
134 | int i; | ||
135 | |||
136 | t0 = tcg_temp_new_i64(); | ||
137 | + t1 = tcg_temp_new_i64(); | ||
138 | + t16 = tcg_temp_new_i128(); | ||
139 | for (i = 0; i < len_align; i += 8) { | ||
140 | tcg_gen_ld_i64(t0, base, vofs + i); | ||
141 | - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); | ||
142 | - tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
143 | + tcg_gen_ld_i64(t1, base, vofs + i + 8); | ||
144 | + tcg_gen_concat_i64_i128(t16, t0, t1); | ||
145 | + tcg_gen_qemu_st_i128(t16, clean_addr, midx, | ||
146 | + MO_LE | MO_128 | MO_ATOM_NONE); | ||
147 | + tcg_gen_addi_i64(clean_addr, clean_addr, 16); | ||
148 | } | ||
149 | } else { | ||
150 | TCGLabel *loop = gen_new_label(); | ||
151 | @@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
152 | gen_set_label(loop); | ||
153 | |||
154 | t0 = tcg_temp_new_i64(); | ||
155 | + t1 = tcg_temp_new_i64(); | ||
156 | tp = tcg_temp_new_ptr(); | ||
157 | tcg_gen_add_ptr(tp, base, i); | ||
158 | tcg_gen_ld_i64(t0, tp, vofs); | ||
159 | - tcg_gen_addi_ptr(i, i, 8); | ||
160 | + tcg_gen_ld_i64(t1, tp, vofs + 8); | ||
161 | + tcg_gen_addi_ptr(i, i, 16); | ||
162 | |||
163 | - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); | ||
164 | - tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
165 | + t16 = tcg_temp_new_i128(); | ||
166 | + tcg_gen_concat_i64_i128(t16, t0, t1); | ||
167 | + | ||
168 | + tcg_gen_qemu_st_i128(t16, clean_addr, midx, MO_LEUQ); | ||
169 | + tcg_gen_addi_i64(clean_addr, clean_addr, 16); | ||
170 | |||
171 | tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); | ||
172 | } | ||
173 | |||
174 | /* Predicate register stores can be any multiple of 2. */ | ||
175 | + if (len_remain >= 8) { | ||
176 | + t0 = tcg_temp_new_i64(); | ||
177 | + tcg_gen_st_i64(t0, base, vofs + len_align); | ||
178 | + tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); | ||
179 | + len_remain -= 8; | ||
180 | + len_align += 8; | ||
181 | + if (len_remain) { | ||
182 | + tcg_gen_addi_i64(clean_addr, clean_addr, 8); | ||
183 | + } | ||
184 | + } | ||
185 | if (len_remain) { | ||
186 | t0 = tcg_temp_new_i64(); | ||
187 | tcg_gen_ld_i64(t0, base, vofs + len_align); | ||
188 | @@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
189 | case 4: | ||
190 | case 8: | ||
191 | tcg_gen_qemu_st_i64(t0, clean_addr, midx, | ||
192 | - MO_LE | ctz32(len_remain)); | ||
193 | + MO_LE | ctz32(len_remain) | MO_ATOM_NONE); | ||
194 | break; | ||
195 | |||
196 | case 6: | ||
197 | - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL); | ||
198 | + tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); | ||
199 | tcg_gen_addi_i64(clean_addr, clean_addr, 4); | ||
200 | tcg_gen_shri_i64(t0, t0, 32); | ||
201 | - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW); | ||
202 | + tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); | ||
203 | break; | ||
204 | |||
205 | default: | ||
67 | -- | 206 | -- |
68 | 2.19.1 | 207 | 2.34.1 |
69 | |||
70 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Also introduces neon_element_offset to find the env offset | 3 | No need to duplicate this check across multiple call sites. |
4 | of a specific element within a neon register. | ||
5 | 4 | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20181011205206.3552-7-richard.henderson@linaro.org | 7 | Message-id: 20230530191438.411344-9-richard.henderson@linaro.org |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 9 | --- |
11 | target/arm/translate.c | 63 ++++++++++++++++++++++++------------------ | 10 | target/arm/tcg/translate-a64.c | 44 ++++++++++++++++------------------ |
12 | 1 file changed, 36 insertions(+), 27 deletions(-) | 11 | 1 file changed, 21 insertions(+), 23 deletions(-) |
13 | 12 | ||
14 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 13 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
15 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/translate.c | 15 | --- a/target/arm/tcg/translate-a64.c |
17 | +++ b/target/arm/translate.c | 16 | +++ b/target/arm/tcg/translate-a64.c |
18 | @@ -XXX,XX +XXX,XX @@ neon_reg_offset (int reg, int n) | 17 | @@ -XXX,XX +XXX,XX @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) |
19 | return vfp_reg_offset(0, sreg); | 18 | * races in multi-threaded linux-user and when MTTCG softmmu is |
19 | * enabled. | ||
20 | */ | ||
21 | -static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
22 | - TCGv_i64 addr, int size, bool is_pair) | ||
23 | +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, | ||
24 | + int size, bool is_pair) | ||
25 | { | ||
26 | int idx = get_mem_index(s); | ||
27 | MemOp memop; | ||
28 | + TCGv_i64 dirty_addr, clean_addr; | ||
29 | + | ||
30 | + s->is_ldex = true; | ||
31 | + dirty_addr = cpu_reg_sp(s, rn); | ||
32 | + clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, size); | ||
33 | |||
34 | g_assert(size <= 3); | ||
35 | if (is_pair) { | ||
36 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
37 | if (size == 2) { | ||
38 | /* The pair must be single-copy atomic for the doubleword. */ | ||
39 | memop = finalize_memop(s, MO_64 | MO_ALIGN); | ||
40 | - tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); | ||
41 | + tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); | ||
42 | if (s->be_data == MO_LE) { | ||
43 | tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); | ||
44 | tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32); | ||
45 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
46 | |||
47 | memop = finalize_memop_atom(s, MO_128 | MO_ALIGN_16, | ||
48 | MO_ATOM_IFALIGN_PAIR); | ||
49 | - tcg_gen_qemu_ld_i128(t16, addr, idx, memop); | ||
50 | + tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop); | ||
51 | |||
52 | if (s->be_data == MO_LE) { | ||
53 | tcg_gen_extr_i128_i64(cpu_exclusive_val, | ||
54 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
55 | } | ||
56 | } else { | ||
57 | memop = finalize_memop(s, size | MO_ALIGN); | ||
58 | - tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); | ||
59 | + tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); | ||
60 | tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); | ||
61 | } | ||
62 | - tcg_gen_mov_i64(cpu_exclusive_addr, addr); | ||
63 | + tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr); | ||
20 | } | 64 | } |
21 | 65 | ||
22 | +/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE, | 66 | static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, |
23 | + * where 0 is the least significant end of the register. | 67 | - TCGv_i64 addr, int size, int is_pair) |
24 | + */ | 68 | + int rn, int size, int is_pair) |
25 | +static inline long | 69 | { |
26 | +neon_element_offset(int reg, int element, TCGMemOp size) | 70 | /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] |
27 | +{ | 71 | * && (!is_pair || env->exclusive_high == [addr + datasize])) { |
28 | + int element_size = 1 << size; | 72 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, |
29 | + int ofs = element * element_size; | 73 | */ |
30 | +#ifdef HOST_WORDS_BIGENDIAN | 74 | TCGLabel *fail_label = gen_new_label(); |
31 | + /* Calculate the offset assuming fully little-endian, | 75 | TCGLabel *done_label = gen_new_label(); |
32 | + * then XOR to account for the order of the 8-byte units. | 76 | - TCGv_i64 tmp; |
33 | + */ | 77 | + TCGv_i64 tmp, dirty_addr, clean_addr; |
34 | + if (element_size < 8) { | 78 | |
35 | + ofs ^= 8 - element_size; | 79 | - tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label); |
36 | + } | 80 | + dirty_addr = cpu_reg_sp(s, rn); |
37 | +#endif | 81 | + clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, size); |
38 | + return neon_reg_offset(reg, 0) + ofs; | ||
39 | +} | ||
40 | + | 82 | + |
41 | static TCGv_i32 neon_load_reg(int reg, int pass) | 83 | + tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label); |
42 | { | 84 | |
43 | TCGv_i32 tmp = tcg_temp_new_i32(); | 85 | tmp = tcg_temp_new_i64(); |
44 | @@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) | 86 | if (is_pair) { |
45 | tmp = load_reg(s, rd); | 87 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
46 | if (insn & (1 << 23)) { | 88 | if (is_lasr) { |
47 | /* VDUP */ | 89 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); |
48 | - if (size == 0) { | 90 | } |
49 | - gen_neon_dup_u8(tmp, 0); | 91 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
50 | - } else if (size == 1) { | 92 | - true, rn != 31, size); |
51 | - gen_neon_dup_low16(tmp); | 93 | - gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false); |
52 | - } | 94 | + gen_store_exclusive(s, rs, rt, rt2, rn, size, false); |
53 | - for (n = 0; n <= pass * 2; n++) { | 95 | return; |
54 | - tmp2 = tcg_temp_new_i32(); | 96 | |
55 | - tcg_gen_mov_i32(tmp2, tmp); | 97 | case 0x4: /* LDXR */ |
56 | - neon_store_reg(rn, n, tmp2); | 98 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
57 | - } | 99 | if (rn == 31) { |
58 | - neon_store_reg(rn, n, tmp); | 100 | gen_check_sp_alignment(s); |
59 | + int vec_size = pass ? 16 : 8; | 101 | } |
60 | + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0), | 102 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
61 | + vec_size, vec_size, tmp); | 103 | - false, rn != 31, size); |
62 | + tcg_temp_free_i32(tmp); | 104 | - s->is_ldex = true; |
63 | } else { | 105 | - gen_load_exclusive(s, rt, rt2, clean_addr, size, false); |
64 | /* VMOV */ | 106 | + gen_load_exclusive(s, rt, rt2, rn, size, false); |
65 | switch (size) { | 107 | if (is_lasr) { |
66 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 108 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); |
67 | tcg_temp_free_i32(tmp); | 109 | } |
68 | } else if ((insn & 0x380) == 0) { | 110 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
69 | /* VDUP */ | 111 | if (is_lasr) { |
70 | + int element; | 112 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); |
71 | + TCGMemOp size; | 113 | } |
72 | + | 114 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
73 | if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { | 115 | - true, rn != 31, size); |
74 | return 1; | 116 | - gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true); |
75 | } | 117 | + gen_store_exclusive(s, rs, rt, rt2, rn, size, true); |
76 | - if (insn & (1 << 19)) { | 118 | return; |
77 | - tmp = neon_load_reg(rm, 1); | 119 | } |
78 | - } else { | 120 | if (rt2 == 31 |
79 | - tmp = neon_load_reg(rm, 0); | 121 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
80 | - } | 122 | if (rn == 31) { |
81 | if (insn & (1 << 16)) { | 123 | gen_check_sp_alignment(s); |
82 | - gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8); | 124 | } |
83 | + size = MO_8; | 125 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
84 | + element = (insn >> 17) & 7; | 126 | - false, rn != 31, size); |
85 | } else if (insn & (1 << 17)) { | 127 | - s->is_ldex = true; |
86 | - if ((insn >> 18) & 1) | 128 | - gen_load_exclusive(s, rt, rt2, clean_addr, size, true); |
87 | - gen_neon_dup_high16(tmp); | 129 | + gen_load_exclusive(s, rt, rt2, rn, size, true); |
88 | - else | 130 | if (is_lasr) { |
89 | - gen_neon_dup_low16(tmp); | 131 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); |
90 | + size = MO_16; | ||
91 | + element = (insn >> 18) & 3; | ||
92 | + } else { | ||
93 | + size = MO_32; | ||
94 | + element = (insn >> 19) & 1; | ||
95 | } | ||
96 | - for (pass = 0; pass < (q ? 4 : 2); pass++) { | ||
97 | - tmp2 = tcg_temp_new_i32(); | ||
98 | - tcg_gen_mov_i32(tmp2, tmp); | ||
99 | - neon_store_reg(rd, pass, tmp2); | ||
100 | - } | ||
101 | - tcg_temp_free_i32(tmp); | ||
102 | + tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0), | ||
103 | + neon_element_offset(rm, element, size), | ||
104 | + q ? 16 : 8, q ? 16 : 8); | ||
105 | } else { | ||
106 | return 1; | ||
107 | } | 132 | } |
108 | -- | 133 | -- |
109 | 2.19.1 | 134 | 2.34.1 |
110 | |||
111 | diff view generated by jsdifflib |
1 | From: Stewart Hildebrand <Stewart.Hildebrand@dornerworks.com> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | "The Image must be placed text_offset bytes from a 2MB aligned base | 3 | This is required for LSE2, where the pair must be treated atomically if |
4 | address anywhere in usable system RAM and called there." | 4 | it does not cross a 16-byte boundary. But it simplifies the code to do |
5 | this always. | ||
5 | 6 | ||
6 | For the virt board, we write our startup bootloader at the very | ||
7 | bottom of RAM, so that bit can't be used for the image. To avoid | ||
8 | overlap in case the image requests to be loaded at an offset | ||
9 | smaller than our bootloader, we increment the load offset to the | ||
10 | next 2MB. | ||
11 | |||
12 | This fixes a boot failure for Xen AArch64. | ||
13 | |||
14 | Signed-off-by: Stewart Hildebrand <stewart.hildebrand@dornerworks.com> | ||
15 | Tested-by: Andre Przywara <andre.przywara@arm.com> | ||
16 | Message-id: b8a89518794b4436af0c151ed10de4fa@dornerworks.com | ||
17 | [PMM: Rephrased a comment a bit] | ||
18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-id: 20230530191438.411344-10-richard.henderson@linaro.org | ||
19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
20 | --- | 11 | --- |
21 | hw/arm/boot.c | 18 ++++++++++++++++++ | 12 | target/arm/tcg/translate-a64.c | 70 ++++++++++++++++++++++++++-------- |
22 | 1 file changed, 18 insertions(+) | 13 | 1 file changed, 55 insertions(+), 15 deletions(-) |
23 | 14 | ||
24 | diff --git a/hw/arm/boot.c b/hw/arm/boot.c | 15 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
25 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/hw/arm/boot.c | 17 | --- a/target/arm/tcg/translate-a64.c |
27 | +++ b/hw/arm/boot.c | 18 | +++ b/target/arm/tcg/translate-a64.c |
28 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) |
29 | #include "qemu/config-file.h" | 20 | } else { |
30 | #include "qemu/option.h" | 21 | TCGv_i64 tcg_rt = cpu_reg(s, rt); |
31 | #include "exec/address-spaces.h" | 22 | TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); |
32 | +#include "qemu/units.h" | 23 | + MemOp mop = size + 1; |
33 | |||
34 | /* Kernel boot protocol is specified in the kernel docs | ||
35 | * Documentation/arm/Booting and Documentation/arm64/booting.txt | ||
36 | @@ -XXX,XX +XXX,XX @@ | ||
37 | #define ARM64_TEXT_OFFSET_OFFSET 8 | ||
38 | #define ARM64_MAGIC_OFFSET 56 | ||
39 | |||
40 | +#define BOOTLOADER_MAX_SIZE (4 * KiB) | ||
41 | + | 24 | + |
42 | AddressSpace *arm_boot_address_space(ARMCPU *cpu, | 25 | + /* |
43 | const struct arm_boot_info *info) | 26 | + * With LSE2, non-sign-extending pairs are treated atomically if |
44 | { | 27 | + * aligned, and if unaligned one of the pair will be completely |
45 | @@ -XXX,XX +XXX,XX @@ static void write_bootloader(const char *name, hwaddr addr, | 28 | + * within a 16-byte block and that element will be atomic. |
46 | code[i] = tswap32(insn); | 29 | + * Otherwise each element is separately atomic. |
47 | } | 30 | + * In all cases, issue one operation with the correct atomicity. |
48 | 31 | + * | |
49 | + assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE); | 32 | + * This treats sign-extending loads like zero-extending loads, |
33 | + * since that reuses the most code below. | ||
34 | + */ | ||
35 | + if (s->align_mem) { | ||
36 | + mop |= (size == 2 ? MO_ALIGN_4 : MO_ALIGN_8); | ||
37 | + } | ||
38 | + mop = finalize_memop_pair(s, mop); | ||
39 | |||
40 | if (is_load) { | ||
41 | - TCGv_i64 tmp = tcg_temp_new_i64(); | ||
42 | + if (size == 2) { | ||
43 | + int o2 = s->be_data == MO_LE ? 32 : 0; | ||
44 | + int o1 = o2 ^ 32; | ||
45 | |||
46 | - /* Do not modify tcg_rt before recognizing any exception | ||
47 | - * from the second load. | ||
48 | - */ | ||
49 | - do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN, | ||
50 | - false, false, 0, false, false); | ||
51 | - tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); | ||
52 | - do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN, | ||
53 | - false, false, 0, false, false); | ||
54 | + tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop); | ||
55 | + if (is_signed) { | ||
56 | + tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32); | ||
57 | + tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32); | ||
58 | + } else { | ||
59 | + tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32); | ||
60 | + tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32); | ||
61 | + } | ||
62 | + } else { | ||
63 | + TCGv_i128 tmp = tcg_temp_new_i128(); | ||
64 | |||
65 | - tcg_gen_mov_i64(tcg_rt, tmp); | ||
66 | + tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop); | ||
67 | + if (s->be_data == MO_LE) { | ||
68 | + tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp); | ||
69 | + } else { | ||
70 | + tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp); | ||
71 | + } | ||
72 | + } | ||
73 | } else { | ||
74 | - do_gpr_st(s, tcg_rt, clean_addr, size, | ||
75 | - false, 0, false, false); | ||
76 | - tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); | ||
77 | - do_gpr_st(s, tcg_rt2, clean_addr, size, | ||
78 | - false, 0, false, false); | ||
79 | + if (size == 2) { | ||
80 | + TCGv_i64 tmp = tcg_temp_new_i64(); | ||
50 | + | 81 | + |
51 | rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as); | 82 | + if (s->be_data == MO_LE) { |
52 | 83 | + tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2); | |
53 | g_free(code); | 84 | + } else { |
54 | @@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, | 85 | + tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt); |
55 | memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals)); | 86 | + } |
56 | if (hdrvals[1] != 0) { | 87 | + tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop); |
57 | kernel_load_offset = le64_to_cpu(hdrvals[0]); | 88 | + } else { |
89 | + TCGv_i128 tmp = tcg_temp_new_i128(); | ||
58 | + | 90 | + |
59 | + /* | 91 | + if (s->be_data == MO_LE) { |
60 | + * We write our startup "bootloader" at the very bottom of RAM, | 92 | + tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2); |
61 | + * so that bit can't be used for the image. Luckily the Image | 93 | + } else { |
62 | + * format specification is that the image requests only an offset | 94 | + tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt); |
63 | + * from a 2MB boundary, not an absolute load address. So if the | 95 | + } |
64 | + * image requests an offset that might mean it overlaps with the | 96 | + tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop); |
65 | + * bootloader, we can just load it starting at 2MB+offset rather | ||
66 | + * than 0MB + offset. | ||
67 | + */ | ||
68 | + if (kernel_load_offset < BOOTLOADER_MAX_SIZE) { | ||
69 | + kernel_load_offset += 2 * MiB; | ||
70 | + } | 97 | + } |
71 | } | 98 | } |
72 | } | 99 | } |
73 | 100 | ||
74 | -- | 101 | -- |
75 | 2.19.1 | 102 | 2.34.1 |
76 | |||
77 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Move cmtst_op expanders from translate-a64.c. | 3 | We are going to need the complete memop beforehand, |
4 | so let's not compute it twice. | ||
4 | 5 | ||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20181011205206.3552-17-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-11-richard.henderson@linaro.org |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 10 | --- |
10 | target/arm/translate.h | 2 + | 11 | target/arm/tcg/translate-a64.c | 61 +++++++++++++++++++--------------- |
11 | target/arm/translate-a64.c | 38 ------------------ | 12 | 1 file changed, 35 insertions(+), 26 deletions(-) |
12 | target/arm/translate.c | 81 +++++++++++++++++++++++++++----------- | ||
13 | 3 files changed, 60 insertions(+), 61 deletions(-) | ||
14 | 13 | ||
15 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 14 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
16 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate.h | 16 | --- a/target/arm/tcg/translate-a64.c |
18 | +++ b/target/arm/translate.h | 17 | +++ b/target/arm/tcg/translate-a64.c |
19 | @@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op; | 18 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, |
20 | extern const GVecGen3 bif_op; | 19 | unsigned int iss_srt, |
21 | extern const GVecGen3 mla_op[4]; | 20 | bool iss_sf, bool iss_ar) |
22 | extern const GVecGen3 mls_op[4]; | 21 | { |
23 | +extern const GVecGen3 cmtst_op[4]; | 22 | - memop = finalize_memop(s, memop); |
24 | extern const GVecGen2i ssra_op[4]; | 23 | tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); |
25 | extern const GVecGen2i usra_op[4]; | 24 | |
26 | extern const GVecGen2i sri_op[4]; | 25 | if (iss_valid) { |
27 | extern const GVecGen2i sli_op[4]; | 26 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, |
28 | +void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); | 27 | bool iss_valid, unsigned int iss_srt, |
29 | 28 | bool iss_sf, bool iss_ar) | |
30 | /* | 29 | { |
31 | * Forward to the isar_feature_* tests given a DisasContext pointer. | 30 | - memop = finalize_memop(s, memop); |
32 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 31 | tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); |
33 | index XXXXXXX..XXXXXXX 100644 | 32 | |
34 | --- a/target/arm/translate-a64.c | 33 | if (extend && (memop & MO_SIGN)) { |
35 | +++ b/target/arm/translate-a64.c | 34 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
36 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) | 35 | int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr; |
36 | int size = extract32(insn, 30, 2); | ||
37 | TCGv_i64 clean_addr; | ||
38 | + MemOp memop; | ||
39 | |||
40 | switch (o2_L_o1_o0) { | ||
41 | case 0x0: /* STXR */ | ||
42 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
43 | gen_check_sp_alignment(s); | ||
44 | } | ||
45 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); | ||
46 | + /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
47 | + memop = finalize_memop(s, size | MO_ALIGN); | ||
48 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | ||
49 | true, rn != 31, size); | ||
50 | - /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
51 | - do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt, | ||
52 | + do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt, | ||
53 | disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | ||
54 | return; | ||
55 | |||
56 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
57 | if (rn == 31) { | ||
58 | gen_check_sp_alignment(s); | ||
59 | } | ||
60 | + /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
61 | + memop = finalize_memop(s, size | MO_ALIGN); | ||
62 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | ||
63 | false, rn != 31, size); | ||
64 | - /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
65 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true, | ||
66 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true, | ||
67 | rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | ||
68 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
69 | return; | ||
70 | @@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) | ||
71 | } else { | ||
72 | /* Only unsigned 32bit loads target 32bit registers. */ | ||
73 | bool iss_sf = opc != 0; | ||
74 | + MemOp memop = finalize_memop(s, size + is_signed * MO_SIGN); | ||
75 | |||
76 | - do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, | ||
77 | - false, true, rt, iss_sf, false); | ||
78 | + do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false); | ||
37 | } | 79 | } |
38 | } | 80 | } |
39 | 81 | ||
40 | -/* CMTST : test is "if (X & Y != 0)". */ | 82 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
41 | -static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 83 | bool post_index; |
42 | -{ | 84 | bool writeback; |
43 | - tcg_gen_and_i32(d, a, b); | 85 | int memidx; |
44 | - tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0); | 86 | - |
45 | - tcg_gen_neg_i32(d, d); | 87 | + MemOp memop; |
46 | -} | 88 | TCGv_i64 clean_addr, dirty_addr; |
47 | - | 89 | |
48 | -static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | 90 | if (is_vector) { |
49 | -{ | 91 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
50 | - tcg_gen_and_i64(d, a, b); | 92 | return; |
51 | - tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0); | 93 | } |
52 | - tcg_gen_neg_i64(d, d); | 94 | is_store = (opc == 0); |
53 | -} | 95 | - is_signed = extract32(opc, 1, 1); |
54 | - | 96 | + is_signed = !is_store && extract32(opc, 1, 1); |
55 | -static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | 97 | is_extended = (size < 3) && extract32(opc, 0, 1); |
56 | -{ | 98 | } |
57 | - tcg_gen_and_vec(vece, d, a, b); | 99 | |
58 | - tcg_gen_dupi_vec(vece, a, 0); | 100 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
59 | - tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a); | 101 | } |
60 | -} | 102 | |
61 | - | 103 | memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); |
62 | static void handle_3same_64(DisasContext *s, int opcode, bool u, | 104 | + memop = finalize_memop(s, size + is_signed * MO_SIGN); |
63 | TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm) | 105 | + |
64 | { | 106 | clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store, |
65 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) | 107 | writeback || rn != 31, |
66 | /* Integer op subgroup of C3.6.16. */ | 108 | size, is_unpriv, memidx); |
67 | static void disas_simd_3same_int(DisasContext *s, uint32_t insn) | 109 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
68 | { | 110 | bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); |
69 | - static const GVecGen3 cmtst_op[4] = { | 111 | |
70 | - { .fni4 = gen_helper_neon_tst_u8, | 112 | if (is_store) { |
71 | - .fniv = gen_cmtst_vec, | 113 | - do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, |
72 | - .vece = MO_8 }, | 114 | + do_gpr_st_memidx(s, tcg_rt, clean_addr, memop, memidx, |
73 | - { .fni4 = gen_helper_neon_tst_u16, | 115 | iss_valid, rt, iss_sf, false); |
74 | - .fniv = gen_cmtst_vec, | 116 | } else { |
75 | - .vece = MO_16 }, | 117 | - do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, |
76 | - { .fni4 = gen_cmtst_i32, | 118 | + do_gpr_ld_memidx(s, tcg_rt, clean_addr, memop, |
77 | - .fniv = gen_cmtst_vec, | 119 | is_extended, memidx, |
78 | - .vece = MO_32 }, | 120 | iss_valid, rt, iss_sf, false); |
79 | - { .fni8 = gen_cmtst_i64, | 121 | } |
80 | - .fniv = gen_cmtst_vec, | 122 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, |
81 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | 123 | bool is_signed = false; |
82 | - .vece = MO_64 }, | 124 | bool is_store = false; |
83 | - }; | 125 | bool is_extended = false; |
84 | - | 126 | - |
85 | int is_q = extract32(insn, 30, 1); | 127 | TCGv_i64 tcg_rm, clean_addr, dirty_addr; |
86 | int u = extract32(insn, 29, 1); | 128 | + MemOp memop; |
87 | int size = extract32(insn, 22, 2); | 129 | |
88 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 130 | if (extract32(opt, 1, 1) == 0) { |
89 | index XXXXXXX..XXXXXXX 100644 | 131 | unallocated_encoding(s); |
90 | --- a/target/arm/translate.c | 132 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, |
91 | +++ b/target/arm/translate.c | 133 | return; |
92 | @@ -XXX,XX +XXX,XX @@ const GVecGen3 mls_op[4] = { | 134 | } |
93 | .vece = MO_64 }, | 135 | is_store = (opc == 0); |
94 | }; | 136 | - is_signed = extract32(opc, 1, 1); |
95 | 137 | + is_signed = !is_store && extract32(opc, 1, 1); | |
96 | +/* CMTST : test is "if (X & Y != 0)". */ | 138 | is_extended = (size < 3) && extract32(opc, 0, 1); |
97 | +static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | 139 | } |
98 | +{ | 140 | |
99 | + tcg_gen_and_i32(d, a, b); | 141 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, |
100 | + tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0); | 142 | ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0); |
101 | + tcg_gen_neg_i32(d, d); | 143 | |
102 | +} | 144 | tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm); |
103 | + | 145 | + |
104 | +void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | 146 | + memop = finalize_memop(s, size + is_signed * MO_SIGN); |
105 | +{ | 147 | clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size); |
106 | + tcg_gen_and_i64(d, a, b); | 148 | |
107 | + tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0); | 149 | if (is_vector) { |
108 | + tcg_gen_neg_i64(d, d); | 150 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, |
109 | +} | 151 | } else { |
110 | + | 152 | TCGv_i64 tcg_rt = cpu_reg(s, rt); |
111 | +static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) | 153 | bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); |
112 | +{ | 154 | + |
113 | + tcg_gen_and_vec(vece, d, a, b); | 155 | if (is_store) { |
114 | + tcg_gen_dupi_vec(vece, a, 0); | 156 | - do_gpr_st(s, tcg_rt, clean_addr, size, |
115 | + tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a); | 157 | + do_gpr_st(s, tcg_rt, clean_addr, memop, |
116 | +} | 158 | true, rt, iss_sf, false); |
117 | + | 159 | } else { |
118 | +const GVecGen3 cmtst_op[4] = { | 160 | - do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, |
119 | + { .fni4 = gen_helper_neon_tst_u8, | 161 | + do_gpr_ld(s, tcg_rt, clean_addr, memop, |
120 | + .fniv = gen_cmtst_vec, | 162 | is_extended, true, rt, iss_sf, false); |
121 | + .vece = MO_8 }, | 163 | } |
122 | + { .fni4 = gen_helper_neon_tst_u16, | 164 | } |
123 | + .fniv = gen_cmtst_vec, | 165 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, |
124 | + .vece = MO_16 }, | 166 | int rn = extract32(insn, 5, 5); |
125 | + { .fni4 = gen_cmtst_i32, | 167 | unsigned int imm12 = extract32(insn, 10, 12); |
126 | + .fniv = gen_cmtst_vec, | 168 | unsigned int offset; |
127 | + .vece = MO_32 }, | 169 | - |
128 | + { .fni8 = gen_cmtst_i64, | 170 | TCGv_i64 clean_addr, dirty_addr; |
129 | + .fniv = gen_cmtst_vec, | 171 | - |
130 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | 172 | bool is_store; |
131 | + .vece = MO_64 }, | 173 | bool is_signed = false; |
132 | +}; | 174 | bool is_extended = false; |
133 | + | 175 | + MemOp memop; |
134 | /* Translate a NEON data processing instruction. Return nonzero if the | 176 | |
135 | instruction is invalid. | 177 | if (is_vector) { |
136 | We process data in a mixture of 32-bit and 64-bit chunks. | 178 | size |= (opc & 2) << 1; |
137 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 179 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, |
138 | tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, | 180 | return; |
139 | u ? &mls_op[size] : &mla_op[size]); | 181 | } |
140 | return 0; | 182 | is_store = (opc == 0); |
141 | + | 183 | - is_signed = extract32(opc, 1, 1); |
142 | + case NEON_3R_VTST_VCEQ: | 184 | + is_signed = !is_store && extract32(opc, 1, 1); |
143 | + if (u) { /* VCEQ */ | 185 | is_extended = (size < 3) && extract32(opc, 0, 1); |
144 | + tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs, | 186 | } |
145 | + vec_size, vec_size); | 187 | |
146 | + } else { /* VTST */ | 188 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, |
147 | + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, | 189 | dirty_addr = read_cpu_reg_sp(s, rn, 1); |
148 | + vec_size, vec_size, &cmtst_op[size]); | 190 | offset = imm12 << size; |
149 | + } | 191 | tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); |
150 | + return 0; | 192 | + |
151 | + | 193 | + memop = finalize_memop(s, size + is_signed * MO_SIGN); |
152 | + case NEON_3R_VCGT: | 194 | clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size); |
153 | + tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size, | 195 | |
154 | + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); | 196 | if (is_vector) { |
155 | + return 0; | 197 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, |
156 | + | 198 | TCGv_i64 tcg_rt = cpu_reg(s, rt); |
157 | + case NEON_3R_VCGE: | 199 | bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); |
158 | + tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size, | 200 | if (is_store) { |
159 | + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); | 201 | - do_gpr_st(s, tcg_rt, clean_addr, size, |
160 | + return 0; | 202 | - true, rt, iss_sf, false); |
161 | } | 203 | + do_gpr_st(s, tcg_rt, clean_addr, memop, true, rt, iss_sf, false); |
162 | 204 | } else { | |
163 | if (size == 3) { | 205 | - do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, |
164 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 206 | + do_gpr_ld(s, tcg_rt, clean_addr, memop, |
165 | case NEON_3R_VQSUB: | 207 | is_extended, true, rt, iss_sf, false); |
166 | GEN_NEON_INTEGER_OP_ENV(qsub); | 208 | } |
167 | break; | 209 | } |
168 | - case NEON_3R_VCGT: | 210 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, |
169 | - GEN_NEON_INTEGER_OP(cgt); | 211 | bool a = extract32(insn, 23, 1); |
170 | - break; | 212 | TCGv_i64 tcg_rs, tcg_rt, clean_addr; |
171 | - case NEON_3R_VCGE: | 213 | AtomicThreeOpFn *fn = NULL; |
172 | - GEN_NEON_INTEGER_OP(cge); | 214 | - MemOp mop = s->be_data | size | MO_ALIGN; |
173 | - break; | 215 | + MemOp mop = finalize_memop(s, size | MO_ALIGN); |
174 | case NEON_3R_VSHL: | 216 | |
175 | GEN_NEON_INTEGER_OP(shl); | 217 | if (is_vector || !dc_isar_feature(aa64_atomics, s)) { |
176 | break; | 218 | unallocated_encoding(s); |
177 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 219 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, |
178 | tmp2 = neon_load_reg(rd, pass); | 220 | * full load-acquire (we only need "load-acquire processor consistent"), |
179 | gen_neon_add(size, tmp, tmp2); | 221 | * but we choose to implement them as full LDAQ. |
180 | break; | 222 | */ |
181 | - case NEON_3R_VTST_VCEQ: | 223 | - do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, |
182 | - if (!u) { /* VTST */ | 224 | + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop, false, |
183 | - switch (size) { | 225 | true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); |
184 | - case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; | 226 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); |
185 | - case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; | 227 | return; |
186 | - case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; | 228 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, |
187 | - default: abort(); | 229 | bool use_key_a = !extract32(insn, 23, 1); |
188 | - } | 230 | int offset; |
189 | - } else { /* VCEQ */ | 231 | TCGv_i64 clean_addr, dirty_addr, tcg_rt; |
190 | - switch (size) { | 232 | + MemOp memop; |
191 | - case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; | 233 | |
192 | - case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; | 234 | if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) { |
193 | - case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; | 235 | unallocated_encoding(s); |
194 | - default: abort(); | 236 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, |
195 | - } | 237 | offset = sextract32(offset << size, 0, 10 + size); |
196 | - } | 238 | tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); |
197 | - break; | 239 | |
198 | case NEON_3R_VMUL: | 240 | + memop = finalize_memop(s, size); |
199 | /* VMUL.P8; other cases already eliminated. */ | 241 | + |
200 | gen_helper_neon_mul_p8(tmp, tmp, tmp2); | 242 | /* Note that "clean" and "dirty" here refer to TBI not PAC. */ |
243 | clean_addr = gen_mte_check1(s, dirty_addr, false, | ||
244 | is_wback || rn != 31, size); | ||
245 | |||
246 | tcg_rt = cpu_reg(s, rt); | ||
247 | - do_gpr_ld(s, tcg_rt, clean_addr, size, | ||
248 | + do_gpr_ld(s, tcg_rt, clean_addr, memop, | ||
249 | /* extend */ false, /* iss_valid */ !is_wback, | ||
250 | /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); | ||
251 | |||
252 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | ||
253 | } | ||
254 | |||
255 | /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
256 | - mop = size | MO_ALIGN; | ||
257 | + mop = finalize_memop(s, size | MO_ALIGN); | ||
258 | |||
259 | switch (opc) { | ||
260 | case 0: /* STLURB */ | ||
201 | -- | 261 | -- |
202 | 2.19.1 | 262 | 2.34.1 |
203 | |||
204 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 3 | We are going to need the complete memop beforehand, |
4 | so let's not compute it twice. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20181016223115.24100-8-richard.henderson@linaro.org | 9 | Message-id: 20230530191438.411344-12-richard.henderson@linaro.org |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | --- | 11 | --- |
9 | target/arm/cpu.h | 16 +++++++++++++++- | 12 | target/arm/tcg/translate-a64.c | 43 ++++++++++++++++++---------------- |
10 | linux-user/aarch64/signal.c | 4 ++-- | 13 | 1 file changed, 23 insertions(+), 20 deletions(-) |
11 | linux-user/elfload.c | 2 +- | ||
12 | linux-user/syscall.c | 10 ++++++---- | ||
13 | target/arm/cpu64.c | 5 ++++- | ||
14 | target/arm/helper.c | 9 ++++++--- | ||
15 | target/arm/machine.c | 3 +-- | ||
16 | target/arm/translate-a64.c | 4 ++-- | ||
17 | 8 files changed, 37 insertions(+), 16 deletions(-) | ||
18 | 14 | ||
19 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 15 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
20 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/arm/cpu.h | 17 | --- a/target/arm/tcg/translate-a64.c |
22 | +++ b/target/arm/cpu.h | 18 | +++ b/target/arm/tcg/translate-a64.c |
23 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) | 19 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, |
24 | FIELD(ID_AA64ISAR1, SB, 36, 4) | 20 | /* |
25 | FIELD(ID_AA64ISAR1, SPECRES, 40, 4) | 21 | * Store from FP register to memory |
26 | 22 | */ | |
27 | +FIELD(ID_AA64PFR0, EL0, 0, 4) | 23 | -static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) |
28 | +FIELD(ID_AA64PFR0, EL1, 4, 4) | 24 | +static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop) |
29 | +FIELD(ID_AA64PFR0, EL2, 8, 4) | 25 | { |
30 | +FIELD(ID_AA64PFR0, EL3, 12, 4) | 26 | /* This writes the bottom N bits of a 128 bit wide vector to memory */ |
31 | +FIELD(ID_AA64PFR0, FP, 16, 4) | 27 | TCGv_i64 tmplo = tcg_temp_new_i64(); |
32 | +FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) | 28 | - MemOp mop = finalize_memop_asimd(s, size); |
33 | +FIELD(ID_AA64PFR0, GIC, 24, 4) | 29 | |
34 | +FIELD(ID_AA64PFR0, RAS, 28, 4) | 30 | tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64)); |
35 | +FIELD(ID_AA64PFR0, SVE, 32, 4) | 31 | |
32 | - if (size < MO_128) { | ||
33 | + if ((mop & MO_SIZE) < MO_128) { | ||
34 | tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
35 | } else { | ||
36 | TCGv_i64 tmphi = tcg_temp_new_i64(); | ||
37 | @@ -XXX,XX +XXX,XX @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) | ||
38 | /* | ||
39 | * Load from memory to FP register | ||
40 | */ | ||
41 | -static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
42 | +static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop) | ||
43 | { | ||
44 | /* This always zero-extends and writes to a full 128 bit wide vector */ | ||
45 | TCGv_i64 tmplo = tcg_temp_new_i64(); | ||
46 | TCGv_i64 tmphi = NULL; | ||
47 | - MemOp mop = finalize_memop_asimd(s, size); | ||
48 | |||
49 | - if (size < MO_128) { | ||
50 | + if ((mop & MO_SIZE) < MO_128) { | ||
51 | tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); | ||
52 | } else { | ||
53 | TCGv_i128 t16 = tcg_temp_new_i128(); | ||
54 | @@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) | ||
55 | bool is_signed = false; | ||
56 | int size = 2; | ||
57 | TCGv_i64 tcg_rt, clean_addr; | ||
58 | + MemOp memop; | ||
59 | |||
60 | if (is_vector) { | ||
61 | if (opc == 3) { | ||
62 | @@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) | ||
63 | if (!fp_access_check(s)) { | ||
64 | return; | ||
65 | } | ||
66 | + memop = finalize_memop_asimd(s, size); | ||
67 | } else { | ||
68 | if (opc == 3) { | ||
69 | /* PRFM (literal) : prefetch */ | ||
70 | @@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) | ||
71 | } | ||
72 | size = 2 + extract32(opc, 0, 1); | ||
73 | is_signed = extract32(opc, 1, 1); | ||
74 | + memop = finalize_memop(s, size + is_signed * MO_SIGN); | ||
75 | } | ||
76 | |||
77 | tcg_rt = cpu_reg(s, rt); | ||
78 | |||
79 | clean_addr = tcg_temp_new_i64(); | ||
80 | gen_pc_plus_diff(s, clean_addr, imm); | ||
36 | + | 81 | + |
37 | QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); | 82 | if (is_vector) { |
38 | 83 | - do_fp_ld(s, rt, clean_addr, size); | |
39 | /* If adding a feature bit which corresponds to a Linux ELF | 84 | + do_fp_ld(s, rt, clean_addr, memop); |
40 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 85 | } else { |
41 | ARM_FEATURE_PMU, /* has PMU support */ | 86 | /* Only unsigned 32bit loads target 32bit registers. */ |
42 | ARM_FEATURE_VBAR, /* has cp15 VBAR */ | 87 | bool iss_sf = opc != 0; |
43 | ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ | 88 | - MemOp memop = finalize_memop(s, size + is_signed * MO_SIGN); |
44 | - ARM_FEATURE_SVE, /* has Scalable Vector Extension */ | 89 | - |
45 | ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ | 90 | do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false); |
46 | ARM_FEATURE_M_MAIN, /* M profile Main Extension */ | 91 | } |
47 | }; | ||
48 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) | ||
49 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; | ||
50 | } | 92 | } |
51 | 93 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | |
52 | +static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) | 94 | (wback || rn != 31) && !set_tag, 2 << size); |
53 | +{ | 95 | |
54 | + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; | 96 | if (is_vector) { |
55 | +} | 97 | + MemOp mop = finalize_memop_asimd(s, size); |
56 | + | 98 | + |
57 | /* | 99 | if (is_load) { |
58 | * Forward to the above feature tests given an ARMCPU pointer. | 100 | - do_fp_ld(s, rt, clean_addr, size); |
59 | */ | 101 | + do_fp_ld(s, rt, clean_addr, mop); |
60 | diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c | 102 | } else { |
61 | index XXXXXXX..XXXXXXX 100644 | 103 | - do_fp_st(s, rt, clean_addr, size); |
62 | --- a/linux-user/aarch64/signal.c | 104 | + do_fp_st(s, rt, clean_addr, mop); |
63 | +++ b/linux-user/aarch64/signal.c | 105 | } |
64 | @@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env, | 106 | tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); |
65 | break; | 107 | if (is_load) { |
66 | 108 | - do_fp_ld(s, rt2, clean_addr, size); | |
67 | case TARGET_SVE_MAGIC: | 109 | + do_fp_ld(s, rt2, clean_addr, mop); |
68 | - if (arm_feature(env, ARM_FEATURE_SVE)) { | 110 | } else { |
69 | + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { | 111 | - do_fp_st(s, rt2, clean_addr, size); |
70 | vq = (env->vfp.zcr_el[1] & 0xf) + 1; | 112 | + do_fp_st(s, rt2, clean_addr, mop); |
71 | sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); | 113 | } |
72 | if (!sve && size == sve_size) { | 114 | } else { |
73 | @@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka, | 115 | TCGv_i64 tcg_rt = cpu_reg(s, rt); |
74 | &layout); | 116 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
75 | 117 | if (!fp_access_check(s)) { | |
76 | /* SVE state needs saving only if it exists. */ | 118 | return; |
77 | - if (arm_feature(env, ARM_FEATURE_SVE)) { | 119 | } |
78 | + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { | 120 | + memop = finalize_memop_asimd(s, size); |
79 | vq = (env->vfp.zcr_el[1] & 0xf) + 1; | 121 | } else { |
80 | sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); | 122 | if (size == 3 && opc == 2) { |
81 | sve_ofs = alloc_sigframe_space(sve_size, &layout); | 123 | /* PRFM - prefetch */ |
82 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | 124 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
83 | index XXXXXXX..XXXXXXX 100644 | 125 | is_store = (opc == 0); |
84 | --- a/linux-user/elfload.c | 126 | is_signed = !is_store && extract32(opc, 1, 1); |
85 | +++ b/linux-user/elfload.c | 127 | is_extended = (size < 3) && extract32(opc, 0, 1); |
86 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | 128 | + memop = finalize_memop(s, size + is_signed * MO_SIGN); |
87 | GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); | ||
88 | GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); | ||
89 | GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); | ||
90 | - GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE); | ||
91 | + GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); | ||
92 | |||
93 | #undef GET_FEATURE | ||
94 | #undef GET_FEATURE_ID | ||
95 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/linux-user/syscall.c | ||
98 | +++ b/linux-user/syscall.c | ||
99 | @@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, | ||
100 | * even though the current architectural maximum is VQ=16. | ||
101 | */ | ||
102 | ret = -TARGET_EINVAL; | ||
103 | - if (arm_feature(cpu_env, ARM_FEATURE_SVE) | ||
104 | + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env)) | ||
105 | && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { | ||
106 | CPUARMState *env = cpu_env; | ||
107 | ARMCPU *cpu = arm_env_get_cpu(env); | ||
108 | @@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, | ||
109 | return ret; | ||
110 | case TARGET_PR_SVE_GET_VL: | ||
111 | ret = -TARGET_EINVAL; | ||
112 | - if (arm_feature(cpu_env, ARM_FEATURE_SVE)) { | ||
113 | - CPUARMState *env = cpu_env; | ||
114 | - ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16; | ||
115 | + { | ||
116 | + ARMCPU *cpu = arm_env_get_cpu(cpu_env); | ||
117 | + if (cpu_isar_feature(aa64_sve, cpu)) { | ||
118 | + ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; | ||
119 | + } | ||
120 | } | ||
121 | return ret; | ||
122 | #endif /* AARCH64 */ | ||
123 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | ||
124 | index XXXXXXX..XXXXXXX 100644 | ||
125 | --- a/target/arm/cpu64.c | ||
126 | +++ b/target/arm/cpu64.c | ||
127 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
128 | t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); | ||
129 | cpu->isar.id_aa64isar1 = t; | ||
130 | |||
131 | + t = cpu->isar.id_aa64pfr0; | ||
132 | + t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); | ||
133 | + cpu->isar.id_aa64pfr0 = t; | ||
134 | + | ||
135 | /* Replicate the same data to the 32-bit id registers. */ | ||
136 | u = cpu->isar.id_isar5; | ||
137 | u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ | ||
138 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
139 | * present in either. | ||
140 | */ | ||
141 | set_feature(&cpu->env, ARM_FEATURE_V8_FP16); | ||
142 | - set_feature(&cpu->env, ARM_FEATURE_SVE); | ||
143 | /* For usermode -cpu max we can use a larger and more efficient DCZ | ||
144 | * blocksize since we don't have to follow what the hardware does. | ||
145 | */ | ||
146 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
147 | index XXXXXXX..XXXXXXX 100644 | ||
148 | --- a/target/arm/helper.c | ||
149 | +++ b/target/arm/helper.c | ||
150 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | ||
151 | define_one_arm_cp_reg(cpu, &sctlr); | ||
152 | } | 129 | } |
153 | 130 | ||
154 | - if (arm_feature(env, ARM_FEATURE_SVE)) { | 131 | switch (idx) { |
155 | + if (cpu_isar_feature(aa64_sve, cpu)) { | 132 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
156 | define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); | ||
157 | if (arm_feature(env, ARM_FEATURE_EL2)) { | ||
158 | define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); | ||
159 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
160 | uint32_t flags; | ||
161 | |||
162 | if (is_a64(env)) { | ||
163 | + ARMCPU *cpu = arm_env_get_cpu(env); | ||
164 | + | ||
165 | *pc = env->pc; | ||
166 | flags = ARM_TBFLAG_AARCH64_STATE_MASK; | ||
167 | /* Get control bits for tagged addresses */ | ||
168 | flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); | ||
169 | flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); | ||
170 | |||
171 | - if (arm_feature(env, ARM_FEATURE_SVE)) { | ||
172 | + if (cpu_isar_feature(aa64_sve, cpu)) { | ||
173 | int sve_el = sve_exception_el(env, current_el); | ||
174 | uint32_t zcr_len; | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) | ||
177 | void aarch64_sve_change_el(CPUARMState *env, int old_el, | ||
178 | int new_el, bool el0_a64) | ||
179 | { | ||
180 | + ARMCPU *cpu = arm_env_get_cpu(env); | ||
181 | int old_len, new_len; | ||
182 | bool old_a64, new_a64; | ||
183 | |||
184 | /* Nothing to do if no SVE. */ | ||
185 | - if (!arm_feature(env, ARM_FEATURE_SVE)) { | ||
186 | + if (!cpu_isar_feature(aa64_sve, cpu)) { | ||
187 | return; | ||
188 | } | 133 | } |
189 | 134 | ||
190 | diff --git a/target/arm/machine.c b/target/arm/machine.c | 135 | memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); |
191 | index XXXXXXX..XXXXXXX 100644 | 136 | - memop = finalize_memop(s, size + is_signed * MO_SIGN); |
192 | --- a/target/arm/machine.c | 137 | |
193 | +++ b/target/arm/machine.c | 138 | clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store, |
194 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_iwmmxt = { | 139 | writeback || rn != 31, |
195 | static bool sve_needed(void *opaque) | 140 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, |
196 | { | 141 | |
197 | ARMCPU *cpu = opaque; | 142 | if (is_vector) { |
198 | - CPUARMState *env = &cpu->env; | 143 | if (is_store) { |
199 | 144 | - do_fp_st(s, rt, clean_addr, size); | |
200 | - return arm_feature(env, ARM_FEATURE_SVE); | 145 | + do_fp_st(s, rt, clean_addr, memop); |
201 | + return cpu_isar_feature(aa64_sve, cpu); | 146 | } else { |
202 | } | 147 | - do_fp_ld(s, rt, clean_addr, size); |
203 | 148 | + do_fp_ld(s, rt, clean_addr, memop); | |
204 | /* The first two words of each Zreg is stored in VFP state. */ | ||
205 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
206 | index XXXXXXX..XXXXXXX 100644 | ||
207 | --- a/target/arm/translate-a64.c | ||
208 | +++ b/target/arm/translate-a64.c | ||
209 | @@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f, | ||
210 | cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n", | ||
211 | vfp_get_fpcr(env), vfp_get_fpsr(env)); | ||
212 | |||
213 | - if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) { | ||
214 | + if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { | ||
215 | int j, zcr_len = sve_zcr_len_for_el(env, el); | ||
216 | |||
217 | for (i = 0; i <= FFR_PRED_NUM; i++) { | ||
218 | @@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) | ||
219 | unallocated_encoding(s); | ||
220 | break; | ||
221 | case 0x2: | ||
222 | - if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) { | ||
223 | + if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { | ||
224 | unallocated_encoding(s); | ||
225 | } | 149 | } |
226 | break; | 150 | } else { |
151 | TCGv_i64 tcg_rt = cpu_reg(s, rt); | ||
152 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, | ||
153 | |||
154 | if (is_vector) { | ||
155 | if (is_store) { | ||
156 | - do_fp_st(s, rt, clean_addr, size); | ||
157 | + do_fp_st(s, rt, clean_addr, memop); | ||
158 | } else { | ||
159 | - do_fp_ld(s, rt, clean_addr, size); | ||
160 | + do_fp_ld(s, rt, clean_addr, memop); | ||
161 | } | ||
162 | } else { | ||
163 | TCGv_i64 tcg_rt = cpu_reg(s, rt); | ||
164 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, | ||
165 | |||
166 | if (is_vector) { | ||
167 | if (is_store) { | ||
168 | - do_fp_st(s, rt, clean_addr, size); | ||
169 | + do_fp_st(s, rt, clean_addr, memop); | ||
170 | } else { | ||
171 | - do_fp_ld(s, rt, clean_addr, size); | ||
172 | + do_fp_ld(s, rt, clean_addr, memop); | ||
173 | } | ||
174 | } else { | ||
175 | TCGv_i64 tcg_rt = cpu_reg(s, rt); | ||
227 | -- | 176 | -- |
228 | 2.19.1 | 177 | 2.34.1 |
229 | 178 | ||
230 | 179 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Having V6 alone imply jazelle was wrong for cortex-m0. | 3 | Pass the completed memop to gen_mte_check1_mmuidx. |
4 | Change to an assertion for V6 & !M. | 4 | For the moment, do nothing more than extract the size. |
5 | 5 | ||
6 | This was harmless, because the only place we tested ARM_FEATURE_JAZELLE | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
7 | was for 'bxj' in disas_arm(), which is unreachable for M-profile cores. | ||
8 | |||
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20181016223115.24100-6-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-13-richard.henderson@linaro.org |
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | --- | 10 | --- |
15 | target/arm/cpu.h | 6 +++++- | 11 | target/arm/tcg/translate-a64.h | 2 +- |
16 | target/arm/cpu.c | 17 ++++++++++++++--- | 12 | target/arm/tcg/translate-a64.c | 82 ++++++++++++++++++---------------- |
17 | target/arm/translate.c | 2 +- | 13 | target/arm/tcg/translate-sve.c | 7 +-- |
18 | 3 files changed, 20 insertions(+), 5 deletions(-) | 14 | 3 files changed, 49 insertions(+), 42 deletions(-) |
19 | 15 | ||
20 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 16 | diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h |
21 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/target/arm/cpu.h | 18 | --- a/target/arm/tcg/translate-a64.h |
23 | +++ b/target/arm/cpu.h | 19 | +++ b/target/arm/tcg/translate-a64.h |
24 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 20 | @@ -XXX,XX +XXX,XX @@ static inline bool sme_smza_enabled_check(DisasContext *s) |
25 | ARM_FEATURE_PMU, /* has PMU support */ | 21 | |
26 | ARM_FEATURE_VBAR, /* has cp15 VBAR */ | 22 | TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); |
27 | ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ | 23 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, |
28 | - ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ | 24 | - bool tag_checked, int log2_size); |
29 | ARM_FEATURE_SVE, /* has Scalable Vector Extension */ | 25 | + bool tag_checked, MemOp memop); |
30 | ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ | 26 | TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, |
31 | ARM_FEATURE_M_MAIN, /* M profile Main Extension */ | 27 | bool tag_checked, int size); |
32 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_arm_div(const ARMISARegisters *id) | 28 | |
33 | return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; | 29 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/arm/tcg/translate-a64.c | ||
32 | +++ b/target/arm/tcg/translate-a64.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static void gen_probe_access(DisasContext *s, TCGv_i64 ptr, | ||
34 | */ | ||
35 | static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, | ||
36 | bool is_write, bool tag_checked, | ||
37 | - int log2_size, bool is_unpriv, | ||
38 | + MemOp memop, bool is_unpriv, | ||
39 | int core_idx) | ||
40 | { | ||
41 | if (tag_checked && s->mte_active[is_unpriv]) { | ||
42 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, | ||
43 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
44 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
45 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | ||
46 | - desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); | ||
47 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); | ||
48 | |||
49 | ret = tcg_temp_new_i64(); | ||
50 | gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); | ||
51 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, | ||
34 | } | 52 | } |
35 | 53 | ||
36 | +static inline bool isar_feature_jazelle(const ARMISARegisters *id) | 54 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, |
37 | +{ | 55 | - bool tag_checked, int log2_size) |
38 | + return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; | 56 | + bool tag_checked, MemOp memop) |
39 | +} | ||
40 | + | ||
41 | static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) | ||
42 | { | 57 | { |
43 | return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; | 58 | - return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size, |
44 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | 59 | + return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, memop, |
45 | index XXXXXXX..XXXXXXX 100644 | 60 | false, get_mem_index(s)); |
46 | --- a/target/arm/cpu.c | 61 | } |
47 | +++ b/target/arm/cpu.c | 62 | |
48 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | 63 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, |
49 | } | 64 | int size, bool is_pair) |
50 | if (arm_feature(env, ARM_FEATURE_V6)) { | 65 | { |
51 | set_feature(env, ARM_FEATURE_V5); | 66 | int idx = get_mem_index(s); |
52 | - set_feature(env, ARM_FEATURE_JAZELLE); | 67 | - MemOp memop; |
53 | if (!arm_feature(env, ARM_FEATURE_M)) { | 68 | TCGv_i64 dirty_addr, clean_addr; |
54 | + assert(cpu_isar_feature(jazelle, cpu)); | 69 | + MemOp memop; |
55 | set_feature(env, ARM_FEATURE_AUXCR); | ||
56 | } | ||
57 | } | ||
58 | @@ -XXX,XX +XXX,XX @@ static void arm926_initfn(Object *obj) | ||
59 | set_feature(&cpu->env, ARM_FEATURE_VFP); | ||
60 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | ||
61 | set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); | ||
62 | - set_feature(&cpu->env, ARM_FEATURE_JAZELLE); | ||
63 | cpu->midr = 0x41069265; | ||
64 | cpu->reset_fpsid = 0x41011090; | ||
65 | cpu->ctr = 0x1dd20d2; | ||
66 | cpu->reset_sctlr = 0x00090078; | ||
67 | + | 70 | + |
68 | + /* | 71 | + /* |
69 | + * ARMv5 does not have the ID_ISAR registers, but we can still | 72 | + * For pairs: |
70 | + * set the field to indicate Jazelle support within QEMU. | 73 | + * if size == 2, the operation is single-copy atomic for the doubleword. |
74 | + * if size == 3, the operation is single-copy atomic for *each* doubleword, | ||
75 | + * not the entire quadword, however it must be quadword aligned. | ||
71 | + */ | 76 | + */ |
72 | + cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); | 77 | + memop = size + is_pair; |
78 | + if (memop == MO_128) { | ||
79 | + memop = finalize_memop_atom(s, MO_128 | MO_ALIGN, | ||
80 | + MO_ATOM_IFALIGN_PAIR); | ||
81 | + } else { | ||
82 | + memop = finalize_memop(s, memop | MO_ALIGN); | ||
83 | + } | ||
84 | |||
85 | s->is_ldex = true; | ||
86 | dirty_addr = cpu_reg_sp(s, rn); | ||
87 | - clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, size); | ||
88 | + clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, memop); | ||
89 | |||
90 | g_assert(size <= 3); | ||
91 | if (is_pair) { | ||
92 | g_assert(size >= 2); | ||
93 | if (size == 2) { | ||
94 | - /* The pair must be single-copy atomic for the doubleword. */ | ||
95 | - memop = finalize_memop(s, MO_64 | MO_ALIGN); | ||
96 | tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); | ||
97 | if (s->be_data == MO_LE) { | ||
98 | tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); | ||
99 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, | ||
100 | tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32); | ||
101 | } | ||
102 | } else { | ||
103 | - /* | ||
104 | - * The pair must be single-copy atomic for *each* doubleword, not | ||
105 | - * the entire quadword, however it must be quadword aligned. | ||
106 | - * Expose the complete load to tcg, for ease of tlb lookup, | ||
107 | - * but indicate that only 8-byte atomicity is required. | ||
108 | - */ | ||
109 | TCGv_i128 t16 = tcg_temp_new_i128(); | ||
110 | |||
111 | - memop = finalize_memop_atom(s, MO_128 | MO_ALIGN_16, | ||
112 | - MO_ATOM_IFALIGN_PAIR); | ||
113 | tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop); | ||
114 | |||
115 | if (s->be_data == MO_LE) { | ||
116 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, | ||
117 | tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); | ||
118 | } | ||
119 | } else { | ||
120 | - memop = finalize_memop(s, size | MO_ALIGN); | ||
121 | tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); | ||
122 | tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); | ||
123 | } | ||
124 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
125 | TCGLabel *fail_label = gen_new_label(); | ||
126 | TCGLabel *done_label = gen_new_label(); | ||
127 | TCGv_i64 tmp, dirty_addr, clean_addr; | ||
128 | + MemOp memop; | ||
129 | + | ||
130 | + memop = (size + is_pair) | MO_ALIGN; | ||
131 | + memop = finalize_memop(s, memop); | ||
132 | |||
133 | dirty_addr = cpu_reg_sp(s, rn); | ||
134 | - clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, size); | ||
135 | + clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, memop); | ||
136 | |||
137 | tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label); | ||
138 | |||
139 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
140 | } | ||
141 | tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, | ||
142 | cpu_exclusive_val, tmp, | ||
143 | - get_mem_index(s), | ||
144 | - MO_64 | MO_ALIGN | s->be_data); | ||
145 | + get_mem_index(s), memop); | ||
146 | tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); | ||
147 | } else { | ||
148 | TCGv_i128 t16 = tcg_temp_new_i128(); | ||
149 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
150 | } | ||
151 | |||
152 | tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16, | ||
153 | - get_mem_index(s), | ||
154 | - MO_128 | MO_ALIGN | s->be_data); | ||
155 | + get_mem_index(s), memop); | ||
156 | |||
157 | a = tcg_temp_new_i64(); | ||
158 | b = tcg_temp_new_i64(); | ||
159 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
160 | } | ||
161 | } else { | ||
162 | tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val, | ||
163 | - cpu_reg(s, rt), get_mem_index(s), | ||
164 | - size | MO_ALIGN | s->be_data); | ||
165 | + cpu_reg(s, rt), get_mem_index(s), memop); | ||
166 | tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); | ||
167 | } | ||
168 | tcg_gen_mov_i64(cpu_reg(s, rd), tmp); | ||
169 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt, | ||
170 | TCGv_i64 tcg_rt = cpu_reg(s, rt); | ||
171 | int memidx = get_mem_index(s); | ||
172 | TCGv_i64 clean_addr; | ||
173 | + MemOp memop; | ||
174 | |||
175 | if (rn == 31) { | ||
176 | gen_check_sp_alignment(s); | ||
177 | } | ||
178 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size); | ||
179 | - tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx, | ||
180 | - size | MO_ALIGN | s->be_data); | ||
181 | + memop = finalize_memop(s, size | MO_ALIGN); | ||
182 | + clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); | ||
183 | + tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, | ||
184 | + memidx, memop); | ||
73 | } | 185 | } |
74 | 186 | ||
75 | static void arm946_initfn(Object *obj) | 187 | static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, |
76 | @@ -XXX,XX +XXX,XX @@ static void arm1026_initfn(Object *obj) | 188 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, |
77 | set_feature(&cpu->env, ARM_FEATURE_AUXCR); | 189 | TCGv_i64 t2 = cpu_reg(s, rt + 1); |
78 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | 190 | TCGv_i64 clean_addr; |
79 | set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); | 191 | int memidx = get_mem_index(s); |
80 | - set_feature(&cpu->env, ARM_FEATURE_JAZELLE); | 192 | + MemOp memop; |
81 | cpu->midr = 0x4106a262; | 193 | |
82 | cpu->reset_fpsid = 0x410110a0; | 194 | if (rn == 31) { |
83 | cpu->ctr = 0x1dd20d2; | 195 | gen_check_sp_alignment(s); |
84 | cpu->reset_sctlr = 0x00090078; | 196 | } |
85 | cpu->reset_auxcr = 1; | 197 | |
86 | + | 198 | /* This is a single atomic access, despite the "pair". */ |
87 | + /* | 199 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1); |
88 | + * ARMv5 does not have the ID_ISAR registers, but we can still | 200 | + memop = finalize_memop(s, (size + 1) | MO_ALIGN); |
89 | + * set the field to indicate Jazelle support within QEMU. | 201 | + clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); |
90 | + */ | 202 | |
91 | + cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); | 203 | if (size == 2) { |
92 | + | 204 | TCGv_i64 cmp = tcg_temp_new_i64(); |
93 | { | 205 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, |
94 | /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ | 206 | tcg_gen_concat32_i64(cmp, s2, s1); |
95 | ARMCPRegInfo ifar = { | 207 | } |
96 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 208 | |
209 | - tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, | ||
210 | - MO_64 | MO_ALIGN | s->be_data); | ||
211 | + tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, memop); | ||
212 | |||
213 | if (s->be_data == MO_LE) { | ||
214 | tcg_gen_extr32_i64(s1, s2, cmp); | ||
215 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, | ||
216 | tcg_gen_concat_i64_i128(cmp, s2, s1); | ||
217 | } | ||
218 | |||
219 | - tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, | ||
220 | - MO_128 | MO_ALIGN | s->be_data); | ||
221 | + tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, memop); | ||
222 | |||
223 | if (s->be_data == MO_LE) { | ||
224 | tcg_gen_extr_i128_i64(s1, s2, cmp); | ||
225 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
226 | /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
227 | memop = finalize_memop(s, size | MO_ALIGN); | ||
228 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | ||
229 | - true, rn != 31, size); | ||
230 | + true, rn != 31, memop); | ||
231 | do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt, | ||
232 | disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | ||
233 | return; | ||
234 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
235 | /* TODO: ARMv8.4-LSE SCTLR.nAA */ | ||
236 | memop = finalize_memop(s, size | MO_ALIGN); | ||
237 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), | ||
238 | - false, rn != 31, size); | ||
239 | + false, rn != 31, memop); | ||
240 | do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true, | ||
241 | rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); | ||
242 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); | ||
243 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, | ||
244 | tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm); | ||
245 | |||
246 | memop = finalize_memop(s, size + is_signed * MO_SIGN); | ||
247 | - clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size); | ||
248 | + clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, memop); | ||
249 | |||
250 | if (is_vector) { | ||
251 | if (is_store) { | ||
252 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, | ||
253 | tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); | ||
254 | |||
255 | memop = finalize_memop(s, size + is_signed * MO_SIGN); | ||
256 | - clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size); | ||
257 | + clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, memop); | ||
258 | |||
259 | if (is_vector) { | ||
260 | if (is_store) { | ||
261 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, | ||
262 | if (rn == 31) { | ||
263 | gen_check_sp_alignment(s); | ||
264 | } | ||
265 | - clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); | ||
266 | + clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, mop); | ||
267 | |||
268 | if (o3_opc == 014) { | ||
269 | /* | ||
270 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, | ||
271 | |||
272 | /* Note that "clean" and "dirty" here refer to TBI not PAC. */ | ||
273 | clean_addr = gen_mte_check1(s, dirty_addr, false, | ||
274 | - is_wback || rn != 31, size); | ||
275 | + is_wback || rn != 31, memop); | ||
276 | |||
277 | tcg_rt = cpu_reg(s, rt); | ||
278 | do_gpr_ld(s, tcg_rt, clean_addr, memop, | ||
279 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c | ||
97 | index XXXXXXX..XXXXXXX 100644 | 280 | index XXXXXXX..XXXXXXX 100644 |
98 | --- a/target/arm/translate.c | 281 | --- a/target/arm/tcg/translate-sve.c |
99 | +++ b/target/arm/translate.c | 282 | +++ b/target/arm/tcg/translate-sve.c |
100 | @@ -XXX,XX +XXX,XX @@ | 283 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) |
101 | #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) | 284 | unsigned msz = dtype_msz(a->dtype); |
102 | /* currently all emulated v5 cores are also v5TE, so don't bother */ | 285 | TCGLabel *over; |
103 | #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) | 286 | TCGv_i64 temp, clean_addr; |
104 | -#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE) | 287 | + MemOp memop; |
105 | +#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s) | 288 | |
106 | #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) | 289 | if (!dc_isar_feature(aa64_sve, s)) { |
107 | #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) | 290 | return false; |
108 | #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) | 291 | @@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) |
292 | /* Load the data. */ | ||
293 | temp = tcg_temp_new_i64(); | ||
294 | tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz); | ||
295 | - clean_addr = gen_mte_check1(s, temp, false, true, msz); | ||
296 | |||
297 | - tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), | ||
298 | - finalize_memop(s, dtype_mop[a->dtype])); | ||
299 | + memop = finalize_memop(s, dtype_mop[a->dtype]); | ||
300 | + clean_addr = gen_mte_check1(s, temp, false, true, memop); | ||
301 | + tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), memop); | ||
302 | |||
303 | /* Broadcast to *all* elements. */ | ||
304 | tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), | ||
109 | -- | 305 | -- |
110 | 2.19.1 | 306 | 2.34.1 |
111 | |||
112 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Pass the individual memop to gen_mte_checkN. | ||
4 | For the moment, do nothing with it. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20181011205206.3552-4-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-14-richard.henderson@linaro.org |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate-a64.c | 28 +++------------------------- | 11 | target/arm/tcg/translate-a64.h | 2 +- |
9 | 1 file changed, 3 insertions(+), 25 deletions(-) | 12 | target/arm/tcg/translate-a64.c | 31 +++++++++++++++++++------------ |
13 | target/arm/tcg/translate-sve.c | 4 ++-- | ||
14 | 3 files changed, 22 insertions(+), 15 deletions(-) | ||
10 | 15 | ||
11 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 16 | diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h |
12 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate-a64.c | 18 | --- a/target/arm/tcg/translate-a64.h |
14 | +++ b/target/arm/translate-a64.c | 19 | +++ b/target/arm/tcg/translate-a64.h |
20 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); | ||
21 | TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
22 | bool tag_checked, MemOp memop); | ||
23 | TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
24 | - bool tag_checked, int size); | ||
25 | + bool tag_checked, int total_size, MemOp memop); | ||
26 | |||
27 | /* We should have at some point before trying to access an FP register | ||
28 | * done the necessary access check, so assert that | ||
29 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/arm/tcg/translate-a64.c | ||
32 | +++ b/target/arm/tcg/translate-a64.c | ||
33 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
34 | * For MTE, check multiple logical sequential accesses. | ||
35 | */ | ||
36 | TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
37 | - bool tag_checked, int size) | ||
38 | + bool tag_checked, int total_size, MemOp single_mop) | ||
39 | { | ||
40 | if (tag_checked && s->mte_active[0]) { | ||
41 | TCGv_i64 ret; | ||
42 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, | ||
43 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
44 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
45 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); | ||
46 | - desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); | ||
47 | + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); | ||
48 | |||
49 | ret = tcg_temp_new_i64(); | ||
50 | gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); | ||
51 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
52 | bool is_vector = extract32(insn, 26, 1); | ||
53 | bool is_load = extract32(insn, 22, 1); | ||
54 | int opc = extract32(insn, 30, 2); | ||
55 | - | ||
56 | bool is_signed = false; | ||
57 | bool postindex = false; | ||
58 | bool wback = false; | ||
59 | bool set_tag = false; | ||
60 | - | ||
61 | TCGv_i64 clean_addr, dirty_addr; | ||
62 | - | ||
63 | + MemOp mop; | ||
64 | int size; | ||
65 | |||
66 | if (opc == 3) { | ||
67 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
68 | } | ||
69 | } | ||
70 | |||
71 | + if (is_vector) { | ||
72 | + mop = finalize_memop_asimd(s, size); | ||
73 | + } else { | ||
74 | + mop = finalize_memop(s, size); | ||
75 | + } | ||
76 | clean_addr = gen_mte_checkN(s, dirty_addr, !is_load, | ||
77 | - (wback || rn != 31) && !set_tag, 2 << size); | ||
78 | + (wback || rn != 31) && !set_tag, | ||
79 | + 2 << size, mop); | ||
80 | |||
81 | if (is_vector) { | ||
82 | - MemOp mop = finalize_memop_asimd(s, size); | ||
83 | - | ||
84 | + /* LSE2 does not merge FP pairs; leave these as separate operations. */ | ||
85 | if (is_load) { | ||
86 | do_fp_ld(s, rt, clean_addr, mop); | ||
87 | } else { | ||
88 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
89 | } else { | ||
90 | TCGv_i64 tcg_rt = cpu_reg(s, rt); | ||
91 | TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); | ||
92 | - MemOp mop = size + 1; | ||
93 | |||
94 | /* | ||
95 | + * We built mop above for the single logical access -- rebuild it | ||
96 | + * now for the paired operation. | ||
97 | + * | ||
98 | * With LSE2, non-sign-extending pairs are treated atomically if | ||
99 | * aligned, and if unaligned one of the pair will be completely | ||
100 | * within a 16-byte block and that element will be atomic. | ||
101 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) | ||
102 | * This treats sign-extending loads like zero-extending loads, | ||
103 | * since that reuses the most code below. | ||
104 | */ | ||
105 | + mop = size + 1; | ||
106 | if (s->align_mem) { | ||
107 | mop |= (size == 2 ? MO_ALIGN_4 : MO_ALIGN_8); | ||
108 | } | ||
109 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
110 | * promote consecutive little-endian elements below. | ||
111 | */ | ||
112 | clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31, | ||
113 | - total); | ||
114 | + total, finalize_memop(s, size)); | ||
115 | |||
116 | /* | ||
117 | * Consecutive little-endian elements from a single register | ||
15 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) | 118 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) |
119 | total = selem << scale; | ||
120 | tcg_rn = cpu_reg_sp(s, rn); | ||
121 | |||
122 | - clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, | ||
123 | - total); | ||
124 | mop = finalize_memop(s, scale); | ||
125 | |||
126 | + clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, | ||
127 | + total, mop); | ||
128 | + | ||
129 | tcg_ebytes = tcg_constant_i64(1 << scale); | ||
16 | for (xs = 0; xs < selem; xs++) { | 130 | for (xs = 0; xs < selem; xs++) { |
17 | if (replicate) { | 131 | if (replicate) { |
18 | /* Load and replicate to all elements */ | 132 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c |
19 | - uint64_t mulconst; | 133 | index XXXXXXX..XXXXXXX 100644 |
20 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | 134 | --- a/target/arm/tcg/translate-sve.c |
21 | 135 | +++ b/target/arm/tcg/translate-sve.c | |
22 | tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, | 136 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, |
23 | get_mem_index(s), s->be_data + scale); | 137 | |
24 | - switch (scale) { | 138 | dirty_addr = tcg_temp_new_i64(); |
25 | - case 0: | 139 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); |
26 | - mulconst = 0x0101010101010101ULL; | 140 | - clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); |
27 | - break; | 141 | + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); |
28 | - case 1: | 142 | |
29 | - mulconst = 0x0001000100010001ULL; | 143 | /* |
30 | - break; | 144 | * Note that unpredicated load/store of vector/predicate registers |
31 | - case 2: | 145 | @@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, |
32 | - mulconst = 0x0000000100000001ULL; | 146 | |
33 | - break; | 147 | dirty_addr = tcg_temp_new_i64(); |
34 | - case 3: | 148 | tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); |
35 | - mulconst = 0; | 149 | - clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); |
36 | - break; | 150 | + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); |
37 | - default: | 151 | |
38 | - g_assert_not_reached(); | 152 | /* Note that unpredicated load/store of vector/predicate registers |
39 | - } | 153 | * are defined as a stream of bytes, which equates to little-endian |
40 | - if (mulconst) { | ||
41 | - tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst); | ||
42 | - } | ||
43 | - write_vec_element(s, tcg_tmp, rt, 0, MO_64); | ||
44 | - if (is_q) { | ||
45 | - write_vec_element(s, tcg_tmp, rt, 1, MO_64); | ||
46 | - } | ||
47 | + tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), | ||
48 | + (is_q + 1) * 8, vec_full_reg_size(s), | ||
49 | + tcg_tmp); | ||
50 | tcg_temp_free_i64(tcg_tmp); | ||
51 | - clear_vec_high(s, is_q, rt); | ||
52 | } else { | ||
53 | /* Load/store one element per register */ | ||
54 | if (is_load) { | ||
55 | -- | 154 | -- |
56 | 2.19.1 | 155 | 2.34.1 |
57 | |||
58 | diff view generated by jsdifflib |
1 | For traps of FP/SIMD instructions to AArch32 Hyp mode, the syndrome | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | provided in HSR has more information than is reported to AArch64. | ||
3 | Specifically, there are extra fields TA and coproc which indicate | ||
4 | whether the trapped instruction was FP or SIMD. Add this extra | ||
5 | information to the syndromes we construct, and mask it out when | ||
6 | taking the exception to AArch64. | ||
7 | 2 | ||
3 | Fixes a bug in that with SCTLR.A set, we should raise any | ||
4 | alignment fault before raising any MTE check fault. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20230530191438.411344-15-richard.henderson@linaro.org | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 20181012144235.19646-11-peter.maydell@linaro.org | ||
11 | --- | 10 | --- |
12 | target/arm/internals.h | 14 +++++++++++++- | 11 | target/arm/internals.h | 3 ++- |
13 | target/arm/helper.c | 9 +++++++++ | 12 | target/arm/tcg/mte_helper.c | 18 ++++++++++++++++++ |
14 | target/arm/translate.c | 8 ++++---- | 13 | target/arm/tcg/translate-a64.c | 2 ++ |
15 | 3 files changed, 26 insertions(+), 5 deletions(-) | 14 | 3 files changed, 22 insertions(+), 1 deletion(-) |
16 | 15 | ||
17 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 16 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
18 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/internals.h | 18 | --- a/target/arm/internals.h |
20 | +++ b/target/arm/internals.h | 19 | +++ b/target/arm/internals.h |
21 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn) | 20 | @@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4) |
22 | * few cases the value in HSR for exceptions taken to AArch32 Hyp | 21 | FIELD(MTEDESC, TBI, 4, 2) |
23 | * mode differs slightly, and we fix this up when populating HSR in | 22 | FIELD(MTEDESC, TCMA, 6, 2) |
24 | * arm_cpu_do_interrupt_aarch32_hyp(). | 23 | FIELD(MTEDESC, WRITE, 8, 1) |
25 | + * The exception is FP/SIMD access traps -- these report extra information | 24 | -FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ |
26 | + * when taking an exception to AArch32. For those we include the extra coproc | 25 | +FIELD(MTEDESC, ALIGN, 9, 3) |
27 | + * and TA fields, and mask them out when taking the exception to AArch64. | 26 | +FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */ |
28 | */ | 27 | |
29 | static inline uint32_t syn_uncategorized(void) | 28 | bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); |
29 | uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); | ||
30 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/tcg/mte_helper.c | ||
33 | +++ b/target/arm/tcg/mte_helper.c | ||
34 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra) | ||
35 | |||
36 | uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
30 | { | 37 | { |
31 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, | 38 | + /* |
32 | 39 | + * R_XCHFJ: Alignment check not caused by memory type is priority 1, | |
33 | static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) | 40 | + * higher than any translation fault. When MTE is disabled, tcg |
34 | { | 41 | + * performs the alignment check during the code generated for the |
35 | + /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ | 42 | + * memory access. With MTE enabled, we must check this here before |
36 | return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | 43 | + * raising any translation fault in allocation_tag_mem. |
37 | | (is_16bit ? 0 : ARM_EL_IL) | 44 | + */ |
38 | - | (cv << 24) | (cond << 20); | 45 | + unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN); |
39 | + | (cv << 24) | (cond << 20) | 0xa; | 46 | + if (unlikely(align)) { |
40 | +} | 47 | + align = (1u << align) - 1; |
48 | + if (unlikely(ptr & align)) { | ||
49 | + int idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
50 | + bool w = FIELD_EX32(desc, MTEDESC, WRITE); | ||
51 | + MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
52 | + arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC()); | ||
53 | + } | ||
54 | + } | ||
41 | + | 55 | + |
42 | +static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) | 56 | return mte_check(env, desc, ptr, GETPC()); |
43 | +{ | ||
44 | + /* AArch32 SIMD trap: TA == 1 coproc == 0 */ | ||
45 | + return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | ||
46 | + | (is_16bit ? 0 : ARM_EL_IL) | ||
47 | + | (cv << 24) | (cond << 20) | (1 << 5); | ||
48 | } | 57 | } |
49 | 58 | ||
50 | static inline uint32_t syn_sve_access_trap(void) | 59 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
51 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | 60 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/target/arm/helper.c | 61 | --- a/target/arm/tcg/translate-a64.c |
54 | +++ b/target/arm/helper.c | 62 | +++ b/target/arm/tcg/translate-a64.c |
55 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) | 63 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, |
56 | case EXCP_HVC: | 64 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); |
57 | case EXCP_HYP_TRAP: | 65 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); |
58 | case EXCP_SMC: | 66 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); |
59 | + if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { | 67 | + desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop)); |
60 | + /* | 68 | desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); |
61 | + * QEMU internal FP/SIMD syndromes from AArch32 include the | 69 | |
62 | + * TA and coproc fields which are only exposed if the exception | 70 | ret = tcg_temp_new_i64(); |
63 | + * is taken to AArch32 Hyp mode. Mask them out to get a valid | 71 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, |
64 | + * AArch64 format syndrome. | 72 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); |
65 | + */ | 73 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); |
66 | + env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); | 74 | desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); |
67 | + } | 75 | + desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop)); |
68 | env->cp15.esr_el[new_el] = env->exception.syndrome; | 76 | desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); |
69 | break; | 77 | |
70 | case EXCP_IRQ: | 78 | ret = tcg_temp_new_i64(); |
71 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
72 | index XXXXXXX..XXXXXXX 100644 | ||
73 | --- a/target/arm/translate.c | ||
74 | +++ b/target/arm/translate.c | ||
75 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
76 | */ | ||
77 | if (s->fp_excp_el) { | ||
78 | gen_exception_insn(s, 4, EXCP_UDEF, | ||
79 | - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); | ||
80 | + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
85 | */ | ||
86 | if (s->fp_excp_el) { | ||
87 | gen_exception_insn(s, 4, EXCP_UDEF, | ||
88 | - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); | ||
89 | + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
94 | |||
95 | if (s->fp_excp_el) { | ||
96 | gen_exception_insn(s, 4, EXCP_UDEF, | ||
97 | - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); | ||
98 | + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); | ||
99 | return 0; | ||
100 | } | ||
101 | if (!s->vfp_enabled) { | ||
102 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) | ||
103 | |||
104 | if (s->fp_excp_el) { | ||
105 | gen_exception_insn(s, 4, EXCP_UDEF, | ||
106 | - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); | ||
107 | + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); | ||
108 | return 0; | ||
109 | } | ||
110 | if (!s->vfp_enabled) { | ||
111 | -- | 79 | -- |
112 | 2.19.1 | 80 | 2.34.1 |
113 | |||
114 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Most of the v8 extensions are self-contained within the ISAR | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | registers and are not implied by other feature bits, which | ||
5 | makes them the easiest to convert. | ||
6 | |||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20181016223115.24100-4-richard.henderson@linaro.org | 5 | Message-id: 20230530191438.411344-16-richard.henderson@linaro.org |
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 7 | --- |
13 | target/arm/cpu.h | 131 +++++++++++++++++++++++++++++++++---- | 8 | target/arm/cpu.h | 3 ++- |
14 | target/arm/translate.h | 7 ++ | 9 | target/arm/tcg/translate.h | 2 ++ |
15 | linux-user/elfload.c | 46 ++++++++----- | 10 | target/arm/tcg/hflags.c | 6 ++++++ |
16 | target/arm/cpu.c | 27 +++++--- | 11 | target/arm/tcg/translate-a64.c | 1 + |
17 | target/arm/cpu64.c | 57 +++++++++------- | 12 | 4 files changed, 11 insertions(+), 1 deletion(-) |
18 | target/arm/translate-a64.c | 101 ++++++++++++++-------------- | ||
19 | target/arm/translate.c | 36 +++++----- | ||
20 | 7 files changed, 273 insertions(+), 132 deletions(-) | ||
21 | 13 | ||
22 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 14 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
23 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/target/arm/cpu.h | 16 | --- a/target/arm/cpu.h |
25 | +++ b/target/arm/cpu.h | 17 | +++ b/target/arm/cpu.h |
26 | @@ -XXX,XX +XXX,XX @@ typedef enum ARMPSCIState { | 18 | @@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu); |
27 | PSCI_ON_PENDING = 2 | 19 | #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ |
28 | } ARMPSCIState; | 20 | #define SCTLR_CP15BEN (1U << 5) /* v7 onward */ |
29 | 21 | #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ | |
30 | +typedef struct ARMISARegisters ARMISARegisters; | 22 | -#define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */ |
31 | + | 23 | +#define SCTLR_nAA (1U << 6) /* when FEAT_LSE2 is implemented */ |
32 | /** | 24 | #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ |
33 | * ARMCPU: | 25 | #define SCTLR_ITD (1U << 7) /* v8 onward */ |
34 | * @env: #CPUARMState | 26 | #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ |
35 | @@ -XXX,XX +XXX,XX @@ enum arm_features { | 27 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SVL, 24, 4) |
36 | ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ | 28 | /* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ |
37 | ARM_FEATURE_V8, | 29 | FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) |
38 | ARM_FEATURE_AARCH64, /* supports 64 bit mode */ | 30 | FIELD(TBFLAG_A64, FGT_ERET, 29, 1) |
39 | - ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ | 31 | +FIELD(TBFLAG_A64, NAA, 30, 1) |
40 | ARM_FEATURE_CBAR, /* has cp15 CBAR */ | 32 | |
41 | ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ | 33 | /* |
42 | ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ | 34 | * Helpers for using the above. |
43 | ARM_FEATURE_EL2, /* has EL2 Virtualization support */ | 35 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h |
44 | ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ | ||
45 | - ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ | ||
46 | - ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ | ||
47 | - ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ | ||
48 | ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ | ||
49 | ARM_FEATURE_PMU, /* has PMU support */ | ||
50 | ARM_FEATURE_VBAR, /* has cp15 VBAR */ | ||
51 | ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ | ||
52 | ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ | ||
53 | ARM_FEATURE_SVE, /* has Scalable Vector Extension */ | ||
54 | - ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */ | ||
55 | - ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */ | ||
56 | - ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */ | ||
57 | - ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */ | ||
58 | - ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */ | ||
59 | - ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */ | ||
60 | - ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */ | ||
61 | ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ | ||
62 | - ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */ | ||
63 | ARM_FEATURE_M_MAIN, /* M profile Main Extension */ | ||
64 | }; | ||
65 | |||
66 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) | ||
67 | /* Shared between translate-sve.c and sve_helper.c. */ | ||
68 | extern const uint64_t pred_esz_masks[4]; | ||
69 | |||
70 | +/* | ||
71 | + * 32-bit feature tests via id registers. | ||
72 | + */ | ||
73 | +static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) | ||
74 | +{ | ||
75 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; | ||
76 | +} | ||
77 | + | ||
78 | +static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) | ||
79 | +{ | ||
80 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; | ||
81 | +} | ||
82 | + | ||
83 | +static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) | ||
84 | +{ | ||
85 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; | ||
86 | +} | ||
87 | + | ||
88 | +static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) | ||
89 | +{ | ||
90 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; | ||
91 | +} | ||
92 | + | ||
93 | +static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) | ||
94 | +{ | ||
95 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; | ||
96 | +} | ||
97 | + | ||
98 | +static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) | ||
99 | +{ | ||
100 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; | ||
101 | +} | ||
102 | + | ||
103 | +static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) | ||
104 | +{ | ||
105 | + return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; | ||
106 | +} | ||
107 | + | ||
108 | +static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) | ||
109 | +{ | ||
110 | + return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; | ||
111 | +} | ||
112 | + | ||
113 | +/* | ||
114 | + * 64-bit feature tests via id registers. | ||
115 | + */ | ||
116 | +static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) | ||
117 | +{ | ||
118 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; | ||
119 | +} | ||
120 | + | ||
121 | +static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) | ||
122 | +{ | ||
123 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; | ||
124 | +} | ||
125 | + | ||
126 | +static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) | ||
127 | +{ | ||
128 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; | ||
129 | +} | ||
130 | + | ||
131 | +static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) | ||
132 | +{ | ||
133 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; | ||
134 | +} | ||
135 | + | ||
136 | +static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) | ||
137 | +{ | ||
138 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; | ||
139 | +} | ||
140 | + | ||
141 | +static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) | ||
142 | +{ | ||
143 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; | ||
144 | +} | ||
145 | + | ||
146 | +static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) | ||
147 | +{ | ||
148 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; | ||
149 | +} | ||
150 | + | ||
151 | +static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) | ||
152 | +{ | ||
153 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; | ||
154 | +} | ||
155 | + | ||
156 | +static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) | ||
157 | +{ | ||
158 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; | ||
159 | +} | ||
160 | + | ||
161 | +static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) | ||
162 | +{ | ||
163 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; | ||
164 | +} | ||
165 | + | ||
166 | +static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) | ||
167 | +{ | ||
168 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; | ||
169 | +} | ||
170 | + | ||
171 | +static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) | ||
172 | +{ | ||
173 | + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; | ||
174 | +} | ||
175 | + | ||
176 | +static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) | ||
177 | +{ | ||
178 | + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; | ||
179 | +} | ||
180 | + | ||
181 | +/* | ||
182 | + * Forward to the above feature tests given an ARMCPU pointer. | ||
183 | + */ | ||
184 | +#define cpu_isar_feature(name, cpu) \ | ||
185 | + ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) | ||
186 | + | ||
187 | #endif | ||
188 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
189 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
190 | --- a/target/arm/translate.h | 37 | --- a/target/arm/tcg/translate.h |
191 | +++ b/target/arm/translate.h | 38 | +++ b/target/arm/tcg/translate.h |
192 | @@ -XXX,XX +XXX,XX @@ | 39 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { |
193 | /* internal defines */ | 40 | bool fgt_eret; |
194 | typedef struct DisasContext { | 41 | /* True if fine-grained trap on SVC is enabled */ |
195 | DisasContextBase base; | 42 | bool fgt_svc; |
196 | + const ARMISARegisters *isar; | 43 | + /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ |
197 | 44 | + bool naa; | |
198 | target_ulong pc; | 45 | /* |
199 | target_ulong page_start; | 46 | * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. |
200 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void) | 47 | * < 0, set by the current instruction. |
201 | return ret; | 48 | diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c |
202 | } | ||
203 | |||
204 | +/* | ||
205 | + * Forward to the isar_feature_* tests given a DisasContext pointer. | ||
206 | + */ | ||
207 | +#define dc_isar_feature(name, ctx) \ | ||
208 | + ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); }) | ||
209 | + | ||
210 | #endif /* TARGET_ARM_TRANSLATE_H */ | ||
211 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | ||
212 | index XXXXXXX..XXXXXXX 100644 | 49 | index XXXXXXX..XXXXXXX 100644 |
213 | --- a/linux-user/elfload.c | 50 | --- a/target/arm/tcg/hflags.c |
214 | +++ b/linux-user/elfload.c | 51 | +++ b/target/arm/tcg/hflags.c |
215 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | 52 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, |
216 | /* probe for the extra features */ | ||
217 | #define GET_FEATURE(feat, hwcap) \ | ||
218 | do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) | ||
219 | + | ||
220 | +#define GET_FEATURE_ID(feat, hwcap) \ | ||
221 | + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) | ||
222 | + | ||
223 | /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ | ||
224 | GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); | ||
225 | GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); | ||
226 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void) | ||
227 | ARMCPU *cpu = ARM_CPU(thread_cpu); | ||
228 | uint32_t hwcaps = 0; | ||
229 | |||
230 | - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES); | ||
231 | - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL); | ||
232 | - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1); | ||
233 | - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2); | ||
234 | - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32); | ||
235 | + GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); | ||
236 | + GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); | ||
237 | + GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); | ||
238 | + GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); | ||
239 | + GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); | ||
240 | return hwcaps; | ||
241 | } | ||
242 | |||
243 | #undef GET_FEATURE | ||
244 | +#undef GET_FEATURE_ID | ||
245 | |||
246 | #else | ||
247 | /* 64 bit ARM definitions */ | ||
248 | @@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void) | ||
249 | /* probe for the extra features */ | ||
250 | #define GET_FEATURE(feat, hwcap) \ | ||
251 | do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) | ||
252 | - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES); | ||
253 | - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL); | ||
254 | - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1); | ||
255 | - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2); | ||
256 | - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32); | ||
257 | - GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3); | ||
258 | - GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3); | ||
259 | - GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4); | ||
260 | - GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512); | ||
261 | +#define GET_FEATURE_ID(feat, hwcap) \ | ||
262 | + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) | ||
263 | + | ||
264 | + GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); | ||
265 | + GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); | ||
266 | + GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); | ||
267 | + GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); | ||
268 | + GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); | ||
269 | + GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); | ||
270 | + GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); | ||
271 | + GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); | ||
272 | + GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); | ||
273 | GET_FEATURE(ARM_FEATURE_V8_FP16, | ||
274 | ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); | ||
275 | - GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS); | ||
276 | - GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM); | ||
277 | - GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP); | ||
278 | - GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA); | ||
279 | + GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); | ||
280 | + GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); | ||
281 | + GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); | ||
282 | + GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); | ||
283 | GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE); | ||
284 | + | ||
285 | #undef GET_FEATURE | ||
286 | +#undef GET_FEATURE_ID | ||
287 | |||
288 | return hwcaps; | ||
289 | } | ||
290 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
291 | index XXXXXXX..XXXXXXX 100644 | ||
292 | --- a/target/arm/cpu.c | ||
293 | +++ b/target/arm/cpu.c | ||
294 | @@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj) | ||
295 | cortex_a15_initfn(obj); | ||
296 | #ifdef CONFIG_USER_ONLY | ||
297 | /* We don't set these in system emulation mode for the moment, | ||
298 | - * since we don't correctly set the ID registers to advertise them, | ||
299 | + * since we don't correctly set (all of) the ID registers to | ||
300 | + * advertise them. | ||
301 | */ | ||
302 | set_feature(&cpu->env, ARM_FEATURE_V8); | ||
303 | - set_feature(&cpu->env, ARM_FEATURE_V8_AES); | ||
304 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); | ||
305 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); | ||
306 | - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); | ||
307 | - set_feature(&cpu->env, ARM_FEATURE_CRC); | ||
308 | - set_feature(&cpu->env, ARM_FEATURE_V8_RDM); | ||
309 | - set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD); | ||
310 | - set_feature(&cpu->env, ARM_FEATURE_V8_FCMA); | ||
311 | + { | ||
312 | + uint32_t t; | ||
313 | + | ||
314 | + t = cpu->isar.id_isar5; | ||
315 | + t = FIELD_DP32(t, ID_ISAR5, AES, 2); | ||
316 | + t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); | ||
317 | + t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); | ||
318 | + t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); | ||
319 | + t = FIELD_DP32(t, ID_ISAR5, RDM, 1); | ||
320 | + t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); | ||
321 | + cpu->isar.id_isar5 = t; | ||
322 | + | ||
323 | + t = cpu->isar.id_isar6; | ||
324 | + t = FIELD_DP32(t, ID_ISAR6, DP, 1); | ||
325 | + cpu->isar.id_isar6 = t; | ||
326 | + } | ||
327 | #endif | ||
328 | } | ||
329 | } | ||
330 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | ||
331 | index XXXXXXX..XXXXXXX 100644 | ||
332 | --- a/target/arm/cpu64.c | ||
333 | +++ b/target/arm/cpu64.c | ||
334 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj) | ||
335 | set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); | ||
336 | set_feature(&cpu->env, ARM_FEATURE_AARCH64); | ||
337 | set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); | ||
338 | - set_feature(&cpu->env, ARM_FEATURE_V8_AES); | ||
339 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); | ||
340 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); | ||
341 | - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); | ||
342 | - set_feature(&cpu->env, ARM_FEATURE_CRC); | ||
343 | set_feature(&cpu->env, ARM_FEATURE_EL2); | ||
344 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
345 | set_feature(&cpu->env, ARM_FEATURE_PMU); | ||
346 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj) | ||
347 | set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); | ||
348 | set_feature(&cpu->env, ARM_FEATURE_AARCH64); | ||
349 | set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); | ||
350 | - set_feature(&cpu->env, ARM_FEATURE_V8_AES); | ||
351 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); | ||
352 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); | ||
353 | - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); | ||
354 | - set_feature(&cpu->env, ARM_FEATURE_CRC); | ||
355 | set_feature(&cpu->env, ARM_FEATURE_EL2); | ||
356 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
357 | set_feature(&cpu->env, ARM_FEATURE_PMU); | ||
358 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj) | ||
359 | set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); | ||
360 | set_feature(&cpu->env, ARM_FEATURE_AARCH64); | ||
361 | set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); | ||
362 | - set_feature(&cpu->env, ARM_FEATURE_V8_AES); | ||
363 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); | ||
364 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); | ||
365 | - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); | ||
366 | - set_feature(&cpu->env, ARM_FEATURE_CRC); | ||
367 | set_feature(&cpu->env, ARM_FEATURE_EL2); | ||
368 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
369 | set_feature(&cpu->env, ARM_FEATURE_PMU); | ||
370 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
371 | if (kvm_enabled()) { | ||
372 | kvm_arm_set_cpu_features_from_host(cpu); | ||
373 | } else { | ||
374 | + uint64_t t; | ||
375 | + uint32_t u; | ||
376 | aarch64_a57_initfn(obj); | ||
377 | + | ||
378 | + t = cpu->isar.id_aa64isar0; | ||
379 | + t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */ | ||
380 | + t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); | ||
381 | + t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */ | ||
382 | + t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); | ||
383 | + t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); | ||
384 | + t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); | ||
385 | + t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); | ||
386 | + t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); | ||
387 | + t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); | ||
388 | + t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); | ||
389 | + cpu->isar.id_aa64isar0 = t; | ||
390 | + | ||
391 | + t = cpu->isar.id_aa64isar1; | ||
392 | + t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); | ||
393 | + cpu->isar.id_aa64isar1 = t; | ||
394 | + | ||
395 | + /* Replicate the same data to the 32-bit id registers. */ | ||
396 | + u = cpu->isar.id_isar5; | ||
397 | + u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ | ||
398 | + u = FIELD_DP32(u, ID_ISAR5, SHA1, 1); | ||
399 | + u = FIELD_DP32(u, ID_ISAR5, SHA2, 1); | ||
400 | + u = FIELD_DP32(u, ID_ISAR5, CRC32, 1); | ||
401 | + u = FIELD_DP32(u, ID_ISAR5, RDM, 1); | ||
402 | + u = FIELD_DP32(u, ID_ISAR5, VCMA, 1); | ||
403 | + cpu->isar.id_isar5 = u; | ||
404 | + | ||
405 | + u = cpu->isar.id_isar6; | ||
406 | + u = FIELD_DP32(u, ID_ISAR6, DP, 1); | ||
407 | + cpu->isar.id_isar6 = u; | ||
408 | + | ||
409 | #ifdef CONFIG_USER_ONLY | ||
410 | /* We don't set these in system emulation mode for the moment, | ||
411 | * since we don't correctly set the ID registers to advertise them, | ||
412 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) | ||
413 | * whereas the architecture requires them to be present in both if | ||
414 | * present in either. | ||
415 | */ | ||
416 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA512); | ||
417 | - set_feature(&cpu->env, ARM_FEATURE_V8_SHA3); | ||
418 | - set_feature(&cpu->env, ARM_FEATURE_V8_SM3); | ||
419 | - set_feature(&cpu->env, ARM_FEATURE_V8_SM4); | ||
420 | - set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS); | ||
421 | - set_feature(&cpu->env, ARM_FEATURE_V8_RDM); | ||
422 | - set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD); | ||
423 | set_feature(&cpu->env, ARM_FEATURE_V8_FP16); | ||
424 | - set_feature(&cpu->env, ARM_FEATURE_V8_FCMA); | ||
425 | set_feature(&cpu->env, ARM_FEATURE_SVE); | ||
426 | /* For usermode -cpu max we can use a larger and more efficient DCZ | ||
427 | * blocksize since we don't have to follow what the hardware does. | ||
428 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
429 | index XXXXXXX..XXXXXXX 100644 | ||
430 | --- a/target/arm/translate-a64.c | ||
431 | +++ b/target/arm/translate-a64.c | ||
432 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
433 | } | ||
434 | if (rt2 == 31 | ||
435 | && ((rt | rs) & 1) == 0 | ||
436 | - && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { | ||
437 | + && dc_isar_feature(aa64_atomics, s)) { | ||
438 | /* CASP / CASPL */ | ||
439 | gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); | ||
440 | return; | ||
441 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
442 | } | ||
443 | if (rt2 == 31 | ||
444 | && ((rt | rs) & 1) == 0 | ||
445 | - && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { | ||
446 | + && dc_isar_feature(aa64_atomics, s)) { | ||
447 | /* CASPA / CASPAL */ | ||
448 | gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); | ||
449 | return; | ||
450 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) | ||
451 | case 0xb: /* CASL */ | ||
452 | case 0xe: /* CASA */ | ||
453 | case 0xf: /* CASAL */ | ||
454 | - if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { | ||
455 | + if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) { | ||
456 | gen_compare_and_swap(s, rs, rt, rn, size); | ||
457 | return; | ||
458 | } | ||
459 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, | ||
460 | int rs = extract32(insn, 16, 5); | ||
461 | int rn = extract32(insn, 5, 5); | ||
462 | int o3_opc = extract32(insn, 12, 4); | ||
463 | - int feature = ARM_FEATURE_V8_ATOMICS; | ||
464 | TCGv_i64 tcg_rn, tcg_rs; | ||
465 | AtomicThreeOpFn *fn; | ||
466 | |||
467 | - if (is_vector) { | ||
468 | + if (is_vector || !dc_isar_feature(aa64_atomics, s)) { | ||
469 | unallocated_encoding(s); | ||
470 | return; | ||
471 | } | ||
472 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, | ||
473 | unallocated_encoding(s); | ||
474 | return; | ||
475 | } | ||
476 | - if (!arm_dc_feature(s, feature)) { | ||
477 | - unallocated_encoding(s); | ||
478 | - return; | ||
479 | - } | ||
480 | |||
481 | if (rn == 31) { | ||
482 | gen_check_sp_alignment(s); | ||
483 | @@ -XXX,XX +XXX,XX @@ static void handle_crc32(DisasContext *s, | ||
484 | TCGv_i64 tcg_acc, tcg_val; | ||
485 | TCGv_i32 tcg_bytes; | ||
486 | |||
487 | - if (!arm_dc_feature(s, ARM_FEATURE_CRC) | ||
488 | + if (!dc_isar_feature(aa64_crc32, s) | ||
489 | || (sf == 1 && sz != 3) | ||
490 | || (sf == 0 && sz == 3)) { | ||
491 | unallocated_encoding(s); | ||
492 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, | ||
493 | bool u = extract32(insn, 29, 1); | ||
494 | TCGv_i32 ele1, ele2, ele3; | ||
495 | TCGv_i64 res; | ||
496 | - int feature; | ||
497 | + bool feature; | ||
498 | |||
499 | switch (u * 16 + opcode) { | ||
500 | case 0x10: /* SQRDMLAH (vector) */ | ||
501 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, | ||
502 | unallocated_encoding(s); | ||
503 | return; | ||
504 | } | ||
505 | - feature = ARM_FEATURE_V8_RDM; | ||
506 | + feature = dc_isar_feature(aa64_rdm, s); | ||
507 | break; | ||
508 | default: | ||
509 | unallocated_encoding(s); | ||
510 | return; | ||
511 | } | ||
512 | - if (!arm_dc_feature(s, feature)) { | ||
513 | + if (!feature) { | ||
514 | unallocated_encoding(s); | ||
515 | return; | ||
516 | } | ||
517 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) | ||
518 | return; | ||
519 | } | ||
520 | if (size == 3) { | ||
521 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { | ||
522 | + if (!dc_isar_feature(aa64_pmull, s)) { | ||
523 | unallocated_encoding(s); | ||
524 | return; | ||
525 | } | ||
526 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | ||
527 | int size = extract32(insn, 22, 2); | ||
528 | bool u = extract32(insn, 29, 1); | ||
529 | bool is_q = extract32(insn, 30, 1); | ||
530 | - int feature, rot; | ||
531 | + bool feature; | ||
532 | + int rot; | ||
533 | |||
534 | switch (u * 16 + opcode) { | ||
535 | case 0x10: /* SQRDMLAH (vector) */ | ||
536 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | ||
537 | unallocated_encoding(s); | ||
538 | return; | ||
539 | } | ||
540 | - feature = ARM_FEATURE_V8_RDM; | ||
541 | + feature = dc_isar_feature(aa64_rdm, s); | ||
542 | break; | ||
543 | case 0x02: /* SDOT (vector) */ | ||
544 | case 0x12: /* UDOT (vector) */ | ||
545 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | ||
546 | unallocated_encoding(s); | ||
547 | return; | ||
548 | } | ||
549 | - feature = ARM_FEATURE_V8_DOTPROD; | ||
550 | + feature = dc_isar_feature(aa64_dp, s); | ||
551 | break; | ||
552 | case 0x18: /* FCMLA, #0 */ | ||
553 | case 0x19: /* FCMLA, #90 */ | ||
554 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) | ||
555 | unallocated_encoding(s); | ||
556 | return; | ||
557 | } | ||
558 | - feature = ARM_FEATURE_V8_FCMA; | ||
559 | + feature = dc_isar_feature(aa64_fcma, s); | ||
560 | break; | ||
561 | default: | ||
562 | unallocated_encoding(s); | ||
563 | return; | ||
564 | } | ||
565 | - if (!arm_dc_feature(s, feature)) { | ||
566 | + if (!feature) { | ||
567 | unallocated_encoding(s); | ||
568 | return; | ||
569 | } | ||
570 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
571 | break; | ||
572 | case 0x1d: /* SQRDMLAH */ | ||
573 | case 0x1f: /* SQRDMLSH */ | ||
574 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { | ||
575 | + if (!dc_isar_feature(aa64_rdm, s)) { | ||
576 | unallocated_encoding(s); | ||
577 | return; | ||
578 | } | ||
579 | break; | ||
580 | case 0x0e: /* SDOT */ | ||
581 | case 0x1e: /* UDOT */ | ||
582 | - if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { | ||
583 | + if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) { | ||
584 | unallocated_encoding(s); | ||
585 | return; | ||
586 | } | ||
587 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
588 | case 0x13: /* FCMLA #90 */ | ||
589 | case 0x15: /* FCMLA #180 */ | ||
590 | case 0x17: /* FCMLA #270 */ | ||
591 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) { | ||
592 | + if (!dc_isar_feature(aa64_fcma, s)) { | ||
593 | unallocated_encoding(s); | ||
594 | return; | ||
595 | } | ||
596 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn) | ||
597 | TCGv_i32 tcg_decrypt; | ||
598 | CryptoThreeOpIntFn *genfn; | ||
599 | |||
600 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) | ||
601 | - || size != 0) { | ||
602 | + if (!dc_isar_feature(aa64_aes, s) || size != 0) { | ||
603 | unallocated_encoding(s); | ||
604 | return; | ||
605 | } | ||
606 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) | ||
607 | int rd = extract32(insn, 0, 5); | ||
608 | CryptoThreeOpFn *genfn; | ||
609 | TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; | ||
610 | - int feature = ARM_FEATURE_V8_SHA256; | ||
611 | + bool feature; | ||
612 | |||
613 | if (size != 0) { | ||
614 | unallocated_encoding(s); | ||
615 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) | ||
616 | case 2: /* SHA1M */ | ||
617 | case 3: /* SHA1SU0 */ | ||
618 | genfn = NULL; | ||
619 | - feature = ARM_FEATURE_V8_SHA1; | ||
620 | + feature = dc_isar_feature(aa64_sha1, s); | ||
621 | break; | ||
622 | case 4: /* SHA256H */ | ||
623 | genfn = gen_helper_crypto_sha256h; | ||
624 | + feature = dc_isar_feature(aa64_sha256, s); | ||
625 | break; | ||
626 | case 5: /* SHA256H2 */ | ||
627 | genfn = gen_helper_crypto_sha256h2; | ||
628 | + feature = dc_isar_feature(aa64_sha256, s); | ||
629 | break; | ||
630 | case 6: /* SHA256SU1 */ | ||
631 | genfn = gen_helper_crypto_sha256su1; | ||
632 | + feature = dc_isar_feature(aa64_sha256, s); | ||
633 | break; | ||
634 | default: | ||
635 | unallocated_encoding(s); | ||
636 | return; | ||
637 | } | ||
638 | |||
639 | - if (!arm_dc_feature(s, feature)) { | ||
640 | + if (!feature) { | ||
641 | unallocated_encoding(s); | ||
642 | return; | ||
643 | } | ||
644 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) | ||
645 | int rn = extract32(insn, 5, 5); | ||
646 | int rd = extract32(insn, 0, 5); | ||
647 | CryptoTwoOpFn *genfn; | ||
648 | - int feature; | ||
649 | + bool feature; | ||
650 | TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; | ||
651 | |||
652 | if (size != 0) { | ||
653 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) | ||
654 | |||
655 | switch (opcode) { | ||
656 | case 0: /* SHA1H */ | ||
657 | - feature = ARM_FEATURE_V8_SHA1; | ||
658 | + feature = dc_isar_feature(aa64_sha1, s); | ||
659 | genfn = gen_helper_crypto_sha1h; | ||
660 | break; | ||
661 | case 1: /* SHA1SU1 */ | ||
662 | - feature = ARM_FEATURE_V8_SHA1; | ||
663 | + feature = dc_isar_feature(aa64_sha1, s); | ||
664 | genfn = gen_helper_crypto_sha1su1; | ||
665 | break; | ||
666 | case 2: /* SHA256SU0 */ | ||
667 | - feature = ARM_FEATURE_V8_SHA256; | ||
668 | + feature = dc_isar_feature(aa64_sha256, s); | ||
669 | genfn = gen_helper_crypto_sha256su0; | ||
670 | break; | ||
671 | default: | ||
672 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | - if (!arm_dc_feature(s, feature)) { | ||
677 | + if (!feature) { | ||
678 | unallocated_encoding(s); | ||
679 | return; | ||
680 | } | ||
681 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) | ||
682 | int rm = extract32(insn, 16, 5); | ||
683 | int rn = extract32(insn, 5, 5); | ||
684 | int rd = extract32(insn, 0, 5); | ||
685 | - int feature; | ||
686 | + bool feature; | ||
687 | CryptoThreeOpFn *genfn; | ||
688 | |||
689 | if (o == 0) { | ||
690 | switch (opcode) { | ||
691 | case 0: /* SHA512H */ | ||
692 | - feature = ARM_FEATURE_V8_SHA512; | ||
693 | + feature = dc_isar_feature(aa64_sha512, s); | ||
694 | genfn = gen_helper_crypto_sha512h; | ||
695 | break; | ||
696 | case 1: /* SHA512H2 */ | ||
697 | - feature = ARM_FEATURE_V8_SHA512; | ||
698 | + feature = dc_isar_feature(aa64_sha512, s); | ||
699 | genfn = gen_helper_crypto_sha512h2; | ||
700 | break; | ||
701 | case 2: /* SHA512SU1 */ | ||
702 | - feature = ARM_FEATURE_V8_SHA512; | ||
703 | + feature = dc_isar_feature(aa64_sha512, s); | ||
704 | genfn = gen_helper_crypto_sha512su1; | ||
705 | break; | ||
706 | case 3: /* RAX1 */ | ||
707 | - feature = ARM_FEATURE_V8_SHA3; | ||
708 | + feature = dc_isar_feature(aa64_sha3, s); | ||
709 | genfn = NULL; | ||
710 | break; | ||
711 | } | ||
712 | } else { | ||
713 | switch (opcode) { | ||
714 | case 0: /* SM3PARTW1 */ | ||
715 | - feature = ARM_FEATURE_V8_SM3; | ||
716 | + feature = dc_isar_feature(aa64_sm3, s); | ||
717 | genfn = gen_helper_crypto_sm3partw1; | ||
718 | break; | ||
719 | case 1: /* SM3PARTW2 */ | ||
720 | - feature = ARM_FEATURE_V8_SM3; | ||
721 | + feature = dc_isar_feature(aa64_sm3, s); | ||
722 | genfn = gen_helper_crypto_sm3partw2; | ||
723 | break; | ||
724 | case 2: /* SM4EKEY */ | ||
725 | - feature = ARM_FEATURE_V8_SM4; | ||
726 | + feature = dc_isar_feature(aa64_sm4, s); | ||
727 | genfn = gen_helper_crypto_sm4ekey; | ||
728 | break; | ||
729 | default: | ||
730 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) | ||
731 | } | 53 | } |
732 | } | 54 | } |
733 | 55 | ||
734 | - if (!arm_dc_feature(s, feature)) { | 56 | + if (cpu_isar_feature(aa64_lse2, env_archcpu(env))) { |
735 | + if (!feature) { | 57 | + if (sctlr & SCTLR_nAA) { |
736 | unallocated_encoding(s); | 58 | + DP_TBFLAG_A64(flags, NAA, 1); |
737 | return; | 59 | + } |
738 | } | 60 | + } |
739 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) | 61 | + |
740 | int rn = extract32(insn, 5, 5); | 62 | /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ |
741 | int rd = extract32(insn, 0, 5); | 63 | if (!(env->pstate & PSTATE_UAO)) { |
742 | TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; | 64 | switch (mmu_idx) { |
743 | - int feature; | 65 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
744 | + bool feature; | 66 | index XXXXXXX..XXXXXXX 100644 |
745 | CryptoTwoOpFn *genfn; | 67 | --- a/target/arm/tcg/translate-a64.c |
746 | 68 | +++ b/target/arm/tcg/translate-a64.c | |
747 | switch (opcode) { | ||
748 | case 0: /* SHA512SU0 */ | ||
749 | - feature = ARM_FEATURE_V8_SHA512; | ||
750 | + feature = dc_isar_feature(aa64_sha512, s); | ||
751 | genfn = gen_helper_crypto_sha512su0; | ||
752 | break; | ||
753 | case 1: /* SM4E */ | ||
754 | - feature = ARM_FEATURE_V8_SM4; | ||
755 | + feature = dc_isar_feature(aa64_sm4, s); | ||
756 | genfn = gen_helper_crypto_sm4e; | ||
757 | break; | ||
758 | default: | ||
759 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) | ||
760 | return; | ||
761 | } | ||
762 | |||
763 | - if (!arm_dc_feature(s, feature)) { | ||
764 | + if (!feature) { | ||
765 | unallocated_encoding(s); | ||
766 | return; | ||
767 | } | ||
768 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) | ||
769 | int ra = extract32(insn, 10, 5); | ||
770 | int rn = extract32(insn, 5, 5); | ||
771 | int rd = extract32(insn, 0, 5); | ||
772 | - int feature; | ||
773 | + bool feature; | ||
774 | |||
775 | switch (op0) { | ||
776 | case 0: /* EOR3 */ | ||
777 | case 1: /* BCAX */ | ||
778 | - feature = ARM_FEATURE_V8_SHA3; | ||
779 | + feature = dc_isar_feature(aa64_sha3, s); | ||
780 | break; | ||
781 | case 2: /* SM3SS1 */ | ||
782 | - feature = ARM_FEATURE_V8_SM3; | ||
783 | + feature = dc_isar_feature(aa64_sm3, s); | ||
784 | break; | ||
785 | default: | ||
786 | unallocated_encoding(s); | ||
787 | return; | ||
788 | } | ||
789 | |||
790 | - if (!arm_dc_feature(s, feature)) { | ||
791 | + if (!feature) { | ||
792 | unallocated_encoding(s); | ||
793 | return; | ||
794 | } | ||
795 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn) | ||
796 | TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; | ||
797 | int pass; | ||
798 | |||
799 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) { | ||
800 | + if (!dc_isar_feature(aa64_sha3, s)) { | ||
801 | unallocated_encoding(s); | ||
802 | return; | ||
803 | } | ||
804 | @@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn) | ||
805 | TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; | ||
806 | TCGv_i32 tcg_imm2, tcg_opcode; | ||
807 | |||
808 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) { | ||
809 | + if (!dc_isar_feature(aa64_sm3, s)) { | ||
810 | unallocated_encoding(s); | ||
811 | return; | ||
812 | } | ||
813 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | 69 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, |
814 | ARMCPU *arm_cpu = arm_env_get_cpu(env); | 70 | dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); |
815 | int bound; | 71 | dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); |
816 | 72 | dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); | |
817 | + dc->isar = &arm_cpu->isar; | 73 | + dc->naa = EX_TBFLAG_A64(tb_flags, NAA); |
818 | dc->pc = dc->base.pc_first; | 74 | dc->vec_len = 0; |
819 | dc->condjmp = 0; | 75 | dc->vec_stride = 0; |
820 | 76 | dc->cp_regs = arm_cpu->cp_regs; | |
821 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
822 | index XXXXXXX..XXXXXXX 100644 | ||
823 | --- a/target/arm/translate.c | ||
824 | +++ b/target/arm/translate.c | ||
825 | @@ -XXX,XX +XXX,XX @@ static const uint8_t neon_2rm_sizes[] = { | ||
826 | static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, | ||
827 | int q, int rd, int rn, int rm) | ||
828 | { | ||
829 | - if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { | ||
830 | + if (dc_isar_feature(aa32_rdm, s)) { | ||
831 | int opr_sz = (1 + q) * 8; | ||
832 | tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), | ||
833 | vfp_reg_offset(1, rn), | ||
834 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
835 | return 1; | ||
836 | } | ||
837 | if (!u) { /* SHA-1 */ | ||
838 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { | ||
839 | + if (!dc_isar_feature(aa32_sha1, s)) { | ||
840 | return 1; | ||
841 | } | ||
842 | ptr1 = vfp_reg_ptr(true, rd); | ||
843 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
844 | gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4); | ||
845 | tcg_temp_free_i32(tmp4); | ||
846 | } else { /* SHA-256 */ | ||
847 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) { | ||
848 | + if (!dc_isar_feature(aa32_sha2, s) || size == 3) { | ||
849 | return 1; | ||
850 | } | ||
851 | ptr1 = vfp_reg_ptr(true, rd); | ||
852 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
853 | if (op == 14 && size == 2) { | ||
854 | TCGv_i64 tcg_rn, tcg_rm, tcg_rd; | ||
855 | |||
856 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { | ||
857 | + if (!dc_isar_feature(aa32_pmull, s)) { | ||
858 | return 1; | ||
859 | } | ||
860 | tcg_rn = tcg_temp_new_i64(); | ||
861 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
862 | { | ||
863 | NeonGenThreeOpEnvFn *fn; | ||
864 | |||
865 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { | ||
866 | + if (!dc_isar_feature(aa32_rdm, s)) { | ||
867 | return 1; | ||
868 | } | ||
869 | if (u && ((rd | rn) & 1)) { | ||
870 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
871 | break; | ||
872 | } | ||
873 | case NEON_2RM_AESE: case NEON_2RM_AESMC: | ||
874 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) | ||
875 | - || ((rm | rd) & 1)) { | ||
876 | + if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) { | ||
877 | return 1; | ||
878 | } | ||
879 | ptr1 = vfp_reg_ptr(true, rd); | ||
880 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
881 | tcg_temp_free_i32(tmp3); | ||
882 | break; | ||
883 | case NEON_2RM_SHA1H: | ||
884 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1) | ||
885 | - || ((rm | rd) & 1)) { | ||
886 | + if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) { | ||
887 | return 1; | ||
888 | } | ||
889 | ptr1 = vfp_reg_ptr(true, rd); | ||
890 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
891 | } | ||
892 | /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ | ||
893 | if (q) { | ||
894 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) { | ||
895 | + if (!dc_isar_feature(aa32_sha2, s)) { | ||
896 | return 1; | ||
897 | } | ||
898 | - } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { | ||
899 | + } else if (!dc_isar_feature(aa32_sha1, s)) { | ||
900 | return 1; | ||
901 | } | ||
902 | ptr1 = vfp_reg_ptr(true, rd); | ||
903 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
904 | /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */ | ||
905 | int size = extract32(insn, 20, 1); | ||
906 | data = extract32(insn, 23, 2); /* rot */ | ||
907 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA) | ||
908 | + if (!dc_isar_feature(aa32_vcma, s) | ||
909 | || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { | ||
910 | return 1; | ||
911 | } | ||
912 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
913 | /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */ | ||
914 | int size = extract32(insn, 20, 1); | ||
915 | data = extract32(insn, 24, 1); /* rot */ | ||
916 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA) | ||
917 | + if (!dc_isar_feature(aa32_vcma, s) | ||
918 | || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { | ||
919 | return 1; | ||
920 | } | ||
921 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) | ||
922 | } else if ((insn & 0xfeb00f00) == 0xfc200d00) { | ||
923 | /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */ | ||
924 | bool u = extract32(insn, 4, 1); | ||
925 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { | ||
926 | + if (!dc_isar_feature(aa32_dp, s)) { | ||
927 | return 1; | ||
928 | } | ||
929 | fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b; | ||
930 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) | ||
931 | int size = extract32(insn, 23, 1); | ||
932 | int index; | ||
933 | |||
934 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) { | ||
935 | + if (!dc_isar_feature(aa32_vcma, s)) { | ||
936 | return 1; | ||
937 | } | ||
938 | if (size == 0) { | ||
939 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) | ||
940 | } else if ((insn & 0xffb00f00) == 0xfe200d00) { | ||
941 | /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */ | ||
942 | int u = extract32(insn, 4, 1); | ||
943 | - if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { | ||
944 | + if (!dc_isar_feature(aa32_dp, s)) { | ||
945 | return 1; | ||
946 | } | ||
947 | fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; | ||
948 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | ||
949 | * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED. | ||
950 | * Bits 8, 10 and 11 should be zero. | ||
951 | */ | ||
952 | - if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 || | ||
953 | - (c & 0xd) != 0) { | ||
954 | + if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) { | ||
955 | goto illegal_op; | ||
956 | } | ||
957 | |||
958 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) | ||
959 | case 0x28: | ||
960 | case 0x29: | ||
961 | case 0x2a: | ||
962 | - if (!arm_dc_feature(s, ARM_FEATURE_CRC)) { | ||
963 | + if (!dc_isar_feature(aa32_crc32, s)) { | ||
964 | goto illegal_op; | ||
965 | } | ||
966 | break; | ||
967 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
968 | CPUARMState *env = cs->env_ptr; | ||
969 | ARMCPU *cpu = arm_env_get_cpu(env); | ||
970 | |||
971 | + dc->isar = &cpu->isar; | ||
972 | dc->pc = dc->base.pc_first; | ||
973 | dc->condjmp = 0; | ||
974 | |||
975 | -- | 77 | -- |
976 | 2.19.1 | 78 | 2.34.1 |
977 | |||
978 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Create struct ARMISARegisters, to be accessed during translation. | 3 | FEAT_LSE2 only requires that atomic operations not cross a |
4 | 16-byte boundary. Ordered operations may be completely | ||
5 | unaligned if SCTLR.nAA is set. | ||
6 | |||
7 | Because this alignment check is so special, do it by hand. | ||
8 | Make sure not to keep TCG temps live across the branch. | ||
4 | 9 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20181016223115.24100-2-richard.henderson@linaro.org | 11 | Message-id: 20230530191438.411344-17-richard.henderson@linaro.org |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 14 | --- |
10 | target/arm/cpu.h | 32 ++++---- | 15 | target/arm/tcg/helper-a64.h | 3 + |
11 | hw/intc/armv7m_nvic.c | 12 +-- | 16 | target/arm/tcg/helper-a64.c | 7 ++ |
12 | target/arm/cpu.c | 178 +++++++++++++++++++++--------------------- | 17 | target/arm/tcg/translate-a64.c | 120 ++++++++++++++++++++++++++------- |
13 | target/arm/cpu64.c | 70 ++++++++--------- | 18 | 3 files changed, 104 insertions(+), 26 deletions(-) |
14 | target/arm/helper.c | 28 +++---- | 19 | |
15 | 5 files changed, 162 insertions(+), 158 deletions(-) | 20 | diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h |
16 | |||
17 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/cpu.h | 22 | --- a/target/arm/tcg/helper-a64.h |
20 | +++ b/target/arm/cpu.h | 23 | +++ b/target/arm/tcg/helper-a64.h |
21 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | 24 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(st2g_stub, TCG_CALL_NO_WG, void, env, i64) |
22 | * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix | 25 | DEF_HELPER_FLAGS_2(ldgm, TCG_CALL_NO_WG, i64, env, i64) |
23 | * is used for reset values of non-constant registers; no reset_ | 26 | DEF_HELPER_FLAGS_3(stgm, TCG_CALL_NO_WG, void, env, i64, i64) |
24 | * prefix means a constant register. | 27 | DEF_HELPER_FLAGS_3(stzgm_tags, TCG_CALL_NO_WG, void, env, i64, i64) |
25 | + * Some of these registers are split out into a substructure that | 28 | + |
26 | + * is shared with the translators to control the ISA. | 29 | +DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG, |
27 | */ | 30 | + noreturn, env, i64, i32, i32) |
28 | + struct ARMISARegisters { | 31 | diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c |
29 | + uint32_t id_isar0; | ||
30 | + uint32_t id_isar1; | ||
31 | + uint32_t id_isar2; | ||
32 | + uint32_t id_isar3; | ||
33 | + uint32_t id_isar4; | ||
34 | + uint32_t id_isar5; | ||
35 | + uint32_t id_isar6; | ||
36 | + uint32_t mvfr0; | ||
37 | + uint32_t mvfr1; | ||
38 | + uint32_t mvfr2; | ||
39 | + uint64_t id_aa64isar0; | ||
40 | + uint64_t id_aa64isar1; | ||
41 | + uint64_t id_aa64pfr0; | ||
42 | + uint64_t id_aa64pfr1; | ||
43 | + } isar; | ||
44 | uint32_t midr; | ||
45 | uint32_t revidr; | ||
46 | uint32_t reset_fpsid; | ||
47 | - uint32_t mvfr0; | ||
48 | - uint32_t mvfr1; | ||
49 | - uint32_t mvfr2; | ||
50 | uint32_t ctr; | ||
51 | uint32_t reset_sctlr; | ||
52 | uint32_t id_pfr0; | ||
53 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | ||
54 | uint32_t id_mmfr2; | ||
55 | uint32_t id_mmfr3; | ||
56 | uint32_t id_mmfr4; | ||
57 | - uint32_t id_isar0; | ||
58 | - uint32_t id_isar1; | ||
59 | - uint32_t id_isar2; | ||
60 | - uint32_t id_isar3; | ||
61 | - uint32_t id_isar4; | ||
62 | - uint32_t id_isar5; | ||
63 | - uint32_t id_isar6; | ||
64 | - uint64_t id_aa64pfr0; | ||
65 | - uint64_t id_aa64pfr1; | ||
66 | uint64_t id_aa64dfr0; | ||
67 | uint64_t id_aa64dfr1; | ||
68 | uint64_t id_aa64afr0; | ||
69 | uint64_t id_aa64afr1; | ||
70 | - uint64_t id_aa64isar0; | ||
71 | - uint64_t id_aa64isar1; | ||
72 | uint64_t id_aa64mmfr0; | ||
73 | uint64_t id_aa64mmfr1; | ||
74 | uint32_t dbgdidr; | ||
75 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
77 | --- a/hw/intc/armv7m_nvic.c | 33 | --- a/target/arm/tcg/helper-a64.c |
78 | +++ b/hw/intc/armv7m_nvic.c | 34 | +++ b/target/arm/tcg/helper-a64.c |
79 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | 35 | @@ -XXX,XX +XXX,XX @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) |
80 | case 0xd5c: /* MMFR3. */ | 36 | |
81 | return cpu->id_mmfr3; | 37 | memset(mem, 0, blocklen); |
82 | case 0xd60: /* ISAR0. */ | 38 | } |
83 | - return cpu->id_isar0; | 39 | + |
84 | + return cpu->isar.id_isar0; | 40 | +void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr, |
85 | case 0xd64: /* ISAR1. */ | 41 | + uint32_t access_type, uint32_t mmu_idx) |
86 | - return cpu->id_isar1; | 42 | +{ |
87 | + return cpu->isar.id_isar1; | 43 | + arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type, |
88 | case 0xd68: /* ISAR2. */ | 44 | + mmu_idx, GETPC()); |
89 | - return cpu->id_isar2; | 45 | +} |
90 | + return cpu->isar.id_isar2; | 46 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
91 | case 0xd6c: /* ISAR3. */ | ||
92 | - return cpu->id_isar3; | ||
93 | + return cpu->isar.id_isar3; | ||
94 | case 0xd70: /* ISAR4. */ | ||
95 | - return cpu->id_isar4; | ||
96 | + return cpu->isar.id_isar4; | ||
97 | case 0xd74: /* ISAR5. */ | ||
98 | - return cpu->id_isar5; | ||
99 | + return cpu->isar.id_isar5; | ||
100 | case 0xd78: /* CLIDR */ | ||
101 | return cpu->clidr; | ||
102 | case 0xd7c: /* CTR */ | ||
103 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
104 | index XXXXXXX..XXXXXXX 100644 | 47 | index XXXXXXX..XXXXXXX 100644 |
105 | --- a/target/arm/cpu.c | 48 | --- a/target/arm/tcg/translate-a64.c |
106 | +++ b/target/arm/cpu.c | 49 | +++ b/target/arm/tcg/translate-a64.c |
107 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s) | 50 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, |
108 | g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); | 51 | return clean_data_tbi(s, addr); |
109 | |||
110 | env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; | ||
111 | - env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; | ||
112 | - env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; | ||
113 | - env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; | ||
114 | + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; | ||
115 | + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; | ||
116 | + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; | ||
117 | |||
118 | cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; | ||
119 | s->halted = cpu->start_powered_off; | ||
120 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
121 | * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. | ||
122 | */ | ||
123 | cpu->id_pfr1 &= ~0xf0; | ||
124 | - cpu->id_aa64pfr0 &= ~0xf000; | ||
125 | + cpu->isar.id_aa64pfr0 &= ~0xf000; | ||
126 | } | ||
127 | |||
128 | if (!cpu->has_el2) { | ||
129 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
130 | * registers if we don't have EL2. These are id_pfr1[15:12] and | ||
131 | * id_aa64pfr0_el1[11:8]. | ||
132 | */ | ||
133 | - cpu->id_aa64pfr0 &= ~0xf00; | ||
134 | + cpu->isar.id_aa64pfr0 &= ~0xf00; | ||
135 | cpu->id_pfr1 &= ~0xf000; | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj) | ||
139 | set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); | ||
140 | cpu->midr = 0x4107b362; | ||
141 | cpu->reset_fpsid = 0x410120b4; | ||
142 | - cpu->mvfr0 = 0x11111111; | ||
143 | - cpu->mvfr1 = 0x00000000; | ||
144 | + cpu->isar.mvfr0 = 0x11111111; | ||
145 | + cpu->isar.mvfr1 = 0x00000000; | ||
146 | cpu->ctr = 0x1dd20d2; | ||
147 | cpu->reset_sctlr = 0x00050078; | ||
148 | cpu->id_pfr0 = 0x111; | ||
149 | @@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj) | ||
150 | cpu->id_mmfr0 = 0x01130003; | ||
151 | cpu->id_mmfr1 = 0x10030302; | ||
152 | cpu->id_mmfr2 = 0x01222110; | ||
153 | - cpu->id_isar0 = 0x00140011; | ||
154 | - cpu->id_isar1 = 0x12002111; | ||
155 | - cpu->id_isar2 = 0x11231111; | ||
156 | - cpu->id_isar3 = 0x01102131; | ||
157 | - cpu->id_isar4 = 0x141; | ||
158 | + cpu->isar.id_isar0 = 0x00140011; | ||
159 | + cpu->isar.id_isar1 = 0x12002111; | ||
160 | + cpu->isar.id_isar2 = 0x11231111; | ||
161 | + cpu->isar.id_isar3 = 0x01102131; | ||
162 | + cpu->isar.id_isar4 = 0x141; | ||
163 | cpu->reset_auxcr = 7; | ||
164 | } | 52 | } |
165 | 53 | ||
166 | @@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj) | 54 | +/* |
167 | set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); | 55 | + * Generate the special alignment check that applies to AccType_ATOMIC |
168 | cpu->midr = 0x4117b363; | 56 | + * and AccType_ORDERED insns under FEAT_LSE2: the access need not be |
169 | cpu->reset_fpsid = 0x410120b4; | 57 | + * naturally aligned, but it must not cross a 16-byte boundary. |
170 | - cpu->mvfr0 = 0x11111111; | 58 | + * See AArch64.CheckAlignment(). |
171 | - cpu->mvfr1 = 0x00000000; | 59 | + */ |
172 | + cpu->isar.mvfr0 = 0x11111111; | 60 | +static void check_lse2_align(DisasContext *s, int rn, int imm, |
173 | + cpu->isar.mvfr1 = 0x00000000; | 61 | + bool is_write, MemOp mop) |
174 | cpu->ctr = 0x1dd20d2; | 62 | +{ |
175 | cpu->reset_sctlr = 0x00050078; | 63 | + TCGv_i32 tmp; |
176 | cpu->id_pfr0 = 0x111; | 64 | + TCGv_i64 addr; |
177 | @@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj) | 65 | + TCGLabel *over_label; |
178 | cpu->id_mmfr0 = 0x01130003; | 66 | + MMUAccessType type; |
179 | cpu->id_mmfr1 = 0x10030302; | 67 | + int mmu_idx; |
180 | cpu->id_mmfr2 = 0x01222110; | 68 | + |
181 | - cpu->id_isar0 = 0x00140011; | 69 | + tmp = tcg_temp_new_i32(); |
182 | - cpu->id_isar1 = 0x12002111; | 70 | + tcg_gen_extrl_i64_i32(tmp, cpu_reg_sp(s, rn)); |
183 | - cpu->id_isar2 = 0x11231111; | 71 | + tcg_gen_addi_i32(tmp, tmp, imm & 15); |
184 | - cpu->id_isar3 = 0x01102131; | 72 | + tcg_gen_andi_i32(tmp, tmp, 15); |
185 | - cpu->id_isar4 = 0x141; | 73 | + tcg_gen_addi_i32(tmp, tmp, memop_size(mop)); |
186 | + cpu->isar.id_isar0 = 0x00140011; | 74 | + |
187 | + cpu->isar.id_isar1 = 0x12002111; | 75 | + over_label = gen_new_label(); |
188 | + cpu->isar.id_isar2 = 0x11231111; | 76 | + tcg_gen_brcondi_i32(TCG_COND_LEU, tmp, 16, over_label); |
189 | + cpu->isar.id_isar3 = 0x01102131; | 77 | + |
190 | + cpu->isar.id_isar4 = 0x141; | 78 | + addr = tcg_temp_new_i64(); |
191 | cpu->reset_auxcr = 7; | 79 | + tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm); |
192 | } | 80 | + |
193 | 81 | + type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD, | |
194 | @@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj) | 82 | + mmu_idx = get_mem_index(s); |
195 | set_feature(&cpu->env, ARM_FEATURE_EL3); | 83 | + gen_helper_unaligned_access(cpu_env, addr, tcg_constant_i32(type), |
196 | cpu->midr = 0x410fb767; | 84 | + tcg_constant_i32(mmu_idx)); |
197 | cpu->reset_fpsid = 0x410120b5; | 85 | + |
198 | - cpu->mvfr0 = 0x11111111; | 86 | + gen_set_label(over_label); |
199 | - cpu->mvfr1 = 0x00000000; | 87 | + |
200 | + cpu->isar.mvfr0 = 0x11111111; | 88 | +} |
201 | + cpu->isar.mvfr1 = 0x00000000; | 89 | + |
202 | cpu->ctr = 0x1dd20d2; | 90 | +/* Handle the alignment check for AccType_ATOMIC instructions. */ |
203 | cpu->reset_sctlr = 0x00050078; | 91 | +static MemOp check_atomic_align(DisasContext *s, int rn, MemOp mop) |
204 | cpu->id_pfr0 = 0x111; | 92 | +{ |
205 | @@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj) | 93 | + MemOp size = mop & MO_SIZE; |
206 | cpu->id_mmfr0 = 0x01130003; | 94 | + |
207 | cpu->id_mmfr1 = 0x10030302; | 95 | + if (size == MO_8) { |
208 | cpu->id_mmfr2 = 0x01222100; | 96 | + return mop; |
209 | - cpu->id_isar0 = 0x0140011; | 97 | + } |
210 | - cpu->id_isar1 = 0x12002111; | 98 | + |
211 | - cpu->id_isar2 = 0x11231121; | 99 | + /* |
212 | - cpu->id_isar3 = 0x01102131; | 100 | + * If size == MO_128, this is a LDXP, and the operation is single-copy |
213 | - cpu->id_isar4 = 0x01141; | 101 | + * atomic for each doubleword, not the entire quadword; it still must |
214 | + cpu->isar.id_isar0 = 0x0140011; | 102 | + * be quadword aligned. |
215 | + cpu->isar.id_isar1 = 0x12002111; | 103 | + */ |
216 | + cpu->isar.id_isar2 = 0x11231121; | 104 | + if (size == MO_128) { |
217 | + cpu->isar.id_isar3 = 0x01102131; | 105 | + return finalize_memop_atom(s, MO_128 | MO_ALIGN, |
218 | + cpu->isar.id_isar4 = 0x01141; | 106 | + MO_ATOM_IFALIGN_PAIR); |
219 | cpu->reset_auxcr = 7; | 107 | + } |
220 | } | 108 | + if (dc_isar_feature(aa64_lse2, s)) { |
221 | 109 | + check_lse2_align(s, rn, 0, true, mop); | |
222 | @@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj) | 110 | + } else { |
223 | set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); | 111 | + mop |= MO_ALIGN; |
224 | cpu->midr = 0x410fb022; | 112 | + } |
225 | cpu->reset_fpsid = 0x410120b4; | 113 | + return finalize_memop(s, mop); |
226 | - cpu->mvfr0 = 0x11111111; | 114 | +} |
227 | - cpu->mvfr1 = 0x00000000; | 115 | + |
228 | + cpu->isar.mvfr0 = 0x11111111; | 116 | +/* Handle the alignment check for AccType_ORDERED instructions. */ |
229 | + cpu->isar.mvfr1 = 0x00000000; | 117 | +static MemOp check_ordered_align(DisasContext *s, int rn, int imm, |
230 | cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ | 118 | + bool is_write, MemOp mop) |
231 | cpu->id_pfr0 = 0x111; | 119 | +{ |
232 | cpu->id_pfr1 = 0x1; | 120 | + MemOp size = mop & MO_SIZE; |
233 | @@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj) | 121 | + |
234 | cpu->id_mmfr0 = 0x01100103; | 122 | + if (size == MO_8) { |
235 | cpu->id_mmfr1 = 0x10020302; | 123 | + return mop; |
236 | cpu->id_mmfr2 = 0x01222000; | 124 | + } |
237 | - cpu->id_isar0 = 0x00100011; | 125 | + if (size == MO_128) { |
238 | - cpu->id_isar1 = 0x12002111; | 126 | + return finalize_memop_atom(s, MO_128 | MO_ALIGN, |
239 | - cpu->id_isar2 = 0x11221011; | 127 | + MO_ATOM_IFALIGN_PAIR); |
240 | - cpu->id_isar3 = 0x01102131; | 128 | + } |
241 | - cpu->id_isar4 = 0x141; | 129 | + if (!dc_isar_feature(aa64_lse2, s)) { |
242 | + cpu->isar.id_isar0 = 0x00100011; | 130 | + mop |= MO_ALIGN; |
243 | + cpu->isar.id_isar1 = 0x12002111; | 131 | + } else if (!s->naa) { |
244 | + cpu->isar.id_isar2 = 0x11221011; | 132 | + check_lse2_align(s, rn, imm, is_write, mop); |
245 | + cpu->isar.id_isar3 = 0x01102131; | 133 | + } |
246 | + cpu->isar.id_isar4 = 0x141; | 134 | + return finalize_memop(s, mop); |
247 | cpu->reset_auxcr = 1; | 135 | +} |
248 | } | 136 | + |
249 | 137 | typedef struct DisasCompare64 { | |
250 | @@ -XXX,XX +XXX,XX @@ static void cortex_m3_initfn(Object *obj) | 138 | TCGCond cond; |
251 | cpu->id_mmfr1 = 0x00000000; | 139 | TCGv_i64 value; |
252 | cpu->id_mmfr2 = 0x00000000; | 140 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, |
253 | cpu->id_mmfr3 = 0x00000000; | ||
254 | - cpu->id_isar0 = 0x01141110; | ||
255 | - cpu->id_isar1 = 0x02111000; | ||
256 | - cpu->id_isar2 = 0x21112231; | ||
257 | - cpu->id_isar3 = 0x01111110; | ||
258 | - cpu->id_isar4 = 0x01310102; | ||
259 | - cpu->id_isar5 = 0x00000000; | ||
260 | - cpu->id_isar6 = 0x00000000; | ||
261 | + cpu->isar.id_isar0 = 0x01141110; | ||
262 | + cpu->isar.id_isar1 = 0x02111000; | ||
263 | + cpu->isar.id_isar2 = 0x21112231; | ||
264 | + cpu->isar.id_isar3 = 0x01111110; | ||
265 | + cpu->isar.id_isar4 = 0x01310102; | ||
266 | + cpu->isar.id_isar5 = 0x00000000; | ||
267 | + cpu->isar.id_isar6 = 0x00000000; | ||
268 | } | ||
269 | |||
270 | static void cortex_m4_initfn(Object *obj) | ||
271 | @@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj) | ||
272 | cpu->id_mmfr1 = 0x00000000; | ||
273 | cpu->id_mmfr2 = 0x00000000; | ||
274 | cpu->id_mmfr3 = 0x00000000; | ||
275 | - cpu->id_isar0 = 0x01141110; | ||
276 | - cpu->id_isar1 = 0x02111000; | ||
277 | - cpu->id_isar2 = 0x21112231; | ||
278 | - cpu->id_isar3 = 0x01111110; | ||
279 | - cpu->id_isar4 = 0x01310102; | ||
280 | - cpu->id_isar5 = 0x00000000; | ||
281 | - cpu->id_isar6 = 0x00000000; | ||
282 | + cpu->isar.id_isar0 = 0x01141110; | ||
283 | + cpu->isar.id_isar1 = 0x02111000; | ||
284 | + cpu->isar.id_isar2 = 0x21112231; | ||
285 | + cpu->isar.id_isar3 = 0x01111110; | ||
286 | + cpu->isar.id_isar4 = 0x01310102; | ||
287 | + cpu->isar.id_isar5 = 0x00000000; | ||
288 | + cpu->isar.id_isar6 = 0x00000000; | ||
289 | } | ||
290 | |||
291 | static void cortex_m33_initfn(Object *obj) | ||
292 | @@ -XXX,XX +XXX,XX @@ static void cortex_m33_initfn(Object *obj) | ||
293 | cpu->id_mmfr1 = 0x00000000; | ||
294 | cpu->id_mmfr2 = 0x01000000; | ||
295 | cpu->id_mmfr3 = 0x00000000; | ||
296 | - cpu->id_isar0 = 0x01101110; | ||
297 | - cpu->id_isar1 = 0x02212000; | ||
298 | - cpu->id_isar2 = 0x20232232; | ||
299 | - cpu->id_isar3 = 0x01111131; | ||
300 | - cpu->id_isar4 = 0x01310132; | ||
301 | - cpu->id_isar5 = 0x00000000; | ||
302 | - cpu->id_isar6 = 0x00000000; | ||
303 | + cpu->isar.id_isar0 = 0x01101110; | ||
304 | + cpu->isar.id_isar1 = 0x02212000; | ||
305 | + cpu->isar.id_isar2 = 0x20232232; | ||
306 | + cpu->isar.id_isar3 = 0x01111131; | ||
307 | + cpu->isar.id_isar4 = 0x01310132; | ||
308 | + cpu->isar.id_isar5 = 0x00000000; | ||
309 | + cpu->isar.id_isar6 = 0x00000000; | ||
310 | cpu->clidr = 0x00000000; | ||
311 | cpu->ctr = 0x8000c000; | ||
312 | } | ||
313 | @@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj) | ||
314 | cpu->id_mmfr1 = 0x00000000; | ||
315 | cpu->id_mmfr2 = 0x01200000; | ||
316 | cpu->id_mmfr3 = 0x0211; | ||
317 | - cpu->id_isar0 = 0x02101111; | ||
318 | - cpu->id_isar1 = 0x13112111; | ||
319 | - cpu->id_isar2 = 0x21232141; | ||
320 | - cpu->id_isar3 = 0x01112131; | ||
321 | - cpu->id_isar4 = 0x0010142; | ||
322 | - cpu->id_isar5 = 0x0; | ||
323 | - cpu->id_isar6 = 0x0; | ||
324 | + cpu->isar.id_isar0 = 0x02101111; | ||
325 | + cpu->isar.id_isar1 = 0x13112111; | ||
326 | + cpu->isar.id_isar2 = 0x21232141; | ||
327 | + cpu->isar.id_isar3 = 0x01112131; | ||
328 | + cpu->isar.id_isar4 = 0x0010142; | ||
329 | + cpu->isar.id_isar5 = 0x0; | ||
330 | + cpu->isar.id_isar6 = 0x0; | ||
331 | cpu->mp_is_up = true; | ||
332 | cpu->pmsav7_dregion = 16; | ||
333 | define_arm_cp_regs(cpu, cortexr5_cp_reginfo); | ||
334 | @@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj) | ||
335 | set_feature(&cpu->env, ARM_FEATURE_EL3); | ||
336 | cpu->midr = 0x410fc080; | ||
337 | cpu->reset_fpsid = 0x410330c0; | ||
338 | - cpu->mvfr0 = 0x11110222; | ||
339 | - cpu->mvfr1 = 0x00011111; | ||
340 | + cpu->isar.mvfr0 = 0x11110222; | ||
341 | + cpu->isar.mvfr1 = 0x00011111; | ||
342 | cpu->ctr = 0x82048004; | ||
343 | cpu->reset_sctlr = 0x00c50078; | ||
344 | cpu->id_pfr0 = 0x1031; | ||
345 | @@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj) | ||
346 | cpu->id_mmfr1 = 0x20000000; | ||
347 | cpu->id_mmfr2 = 0x01202000; | ||
348 | cpu->id_mmfr3 = 0x11; | ||
349 | - cpu->id_isar0 = 0x00101111; | ||
350 | - cpu->id_isar1 = 0x12112111; | ||
351 | - cpu->id_isar2 = 0x21232031; | ||
352 | - cpu->id_isar3 = 0x11112131; | ||
353 | - cpu->id_isar4 = 0x00111142; | ||
354 | + cpu->isar.id_isar0 = 0x00101111; | ||
355 | + cpu->isar.id_isar1 = 0x12112111; | ||
356 | + cpu->isar.id_isar2 = 0x21232031; | ||
357 | + cpu->isar.id_isar3 = 0x11112131; | ||
358 | + cpu->isar.id_isar4 = 0x00111142; | ||
359 | cpu->dbgdidr = 0x15141000; | ||
360 | cpu->clidr = (1 << 27) | (2 << 24) | 3; | ||
361 | cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ | ||
362 | @@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj) | ||
363 | set_feature(&cpu->env, ARM_FEATURE_CBAR); | ||
364 | cpu->midr = 0x410fc090; | ||
365 | cpu->reset_fpsid = 0x41033090; | ||
366 | - cpu->mvfr0 = 0x11110222; | ||
367 | - cpu->mvfr1 = 0x01111111; | ||
368 | + cpu->isar.mvfr0 = 0x11110222; | ||
369 | + cpu->isar.mvfr1 = 0x01111111; | ||
370 | cpu->ctr = 0x80038003; | ||
371 | cpu->reset_sctlr = 0x00c50078; | ||
372 | cpu->id_pfr0 = 0x1031; | ||
373 | @@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj) | ||
374 | cpu->id_mmfr1 = 0x20000000; | ||
375 | cpu->id_mmfr2 = 0x01230000; | ||
376 | cpu->id_mmfr3 = 0x00002111; | ||
377 | - cpu->id_isar0 = 0x00101111; | ||
378 | - cpu->id_isar1 = 0x13112111; | ||
379 | - cpu->id_isar2 = 0x21232041; | ||
380 | - cpu->id_isar3 = 0x11112131; | ||
381 | - cpu->id_isar4 = 0x00111142; | ||
382 | + cpu->isar.id_isar0 = 0x00101111; | ||
383 | + cpu->isar.id_isar1 = 0x13112111; | ||
384 | + cpu->isar.id_isar2 = 0x21232041; | ||
385 | + cpu->isar.id_isar3 = 0x11112131; | ||
386 | + cpu->isar.id_isar4 = 0x00111142; | ||
387 | cpu->dbgdidr = 0x35141000; | ||
388 | cpu->clidr = (1 << 27) | (1 << 24) | 3; | ||
389 | cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ | ||
390 | @@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj) | ||
391 | cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; | ||
392 | cpu->midr = 0x410fc075; | ||
393 | cpu->reset_fpsid = 0x41023075; | ||
394 | - cpu->mvfr0 = 0x10110222; | ||
395 | - cpu->mvfr1 = 0x11111111; | ||
396 | + cpu->isar.mvfr0 = 0x10110222; | ||
397 | + cpu->isar.mvfr1 = 0x11111111; | ||
398 | cpu->ctr = 0x84448003; | ||
399 | cpu->reset_sctlr = 0x00c50078; | ||
400 | cpu->id_pfr0 = 0x00001131; | ||
401 | @@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj) | ||
402 | /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but | ||
403 | * table 4-41 gives 0x02101110, which includes the arm div insns. | ||
404 | */ | ||
405 | - cpu->id_isar0 = 0x02101110; | ||
406 | - cpu->id_isar1 = 0x13112111; | ||
407 | - cpu->id_isar2 = 0x21232041; | ||
408 | - cpu->id_isar3 = 0x11112131; | ||
409 | - cpu->id_isar4 = 0x10011142; | ||
410 | + cpu->isar.id_isar0 = 0x02101110; | ||
411 | + cpu->isar.id_isar1 = 0x13112111; | ||
412 | + cpu->isar.id_isar2 = 0x21232041; | ||
413 | + cpu->isar.id_isar3 = 0x11112131; | ||
414 | + cpu->isar.id_isar4 = 0x10011142; | ||
415 | cpu->dbgdidr = 0x3515f005; | ||
416 | cpu->clidr = 0x0a200023; | ||
417 | cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ | ||
418 | @@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj) | ||
419 | cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; | ||
420 | cpu->midr = 0x412fc0f1; | ||
421 | cpu->reset_fpsid = 0x410430f0; | ||
422 | - cpu->mvfr0 = 0x10110222; | ||
423 | - cpu->mvfr1 = 0x11111111; | ||
424 | + cpu->isar.mvfr0 = 0x10110222; | ||
425 | + cpu->isar.mvfr1 = 0x11111111; | ||
426 | cpu->ctr = 0x8444c004; | ||
427 | cpu->reset_sctlr = 0x00c50078; | ||
428 | cpu->id_pfr0 = 0x00001131; | ||
429 | @@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj) | ||
430 | cpu->id_mmfr1 = 0x20000000; | ||
431 | cpu->id_mmfr2 = 0x01240000; | ||
432 | cpu->id_mmfr3 = 0x02102211; | ||
433 | - cpu->id_isar0 = 0x02101110; | ||
434 | - cpu->id_isar1 = 0x13112111; | ||
435 | - cpu->id_isar2 = 0x21232041; | ||
436 | - cpu->id_isar3 = 0x11112131; | ||
437 | - cpu->id_isar4 = 0x10011142; | ||
438 | + cpu->isar.id_isar0 = 0x02101110; | ||
439 | + cpu->isar.id_isar1 = 0x13112111; | ||
440 | + cpu->isar.id_isar2 = 0x21232041; | ||
441 | + cpu->isar.id_isar3 = 0x11112131; | ||
442 | + cpu->isar.id_isar4 = 0x10011142; | ||
443 | cpu->dbgdidr = 0x3515f021; | ||
444 | cpu->clidr = 0x0a200023; | ||
445 | cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ | ||
446 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | ||
447 | index XXXXXXX..XXXXXXX 100644 | ||
448 | --- a/target/arm/cpu64.c | ||
449 | +++ b/target/arm/cpu64.c | ||
450 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj) | ||
451 | cpu->midr = 0x411fd070; | ||
452 | cpu->revidr = 0x00000000; | ||
453 | cpu->reset_fpsid = 0x41034070; | ||
454 | - cpu->mvfr0 = 0x10110222; | ||
455 | - cpu->mvfr1 = 0x12111111; | ||
456 | - cpu->mvfr2 = 0x00000043; | ||
457 | + cpu->isar.mvfr0 = 0x10110222; | ||
458 | + cpu->isar.mvfr1 = 0x12111111; | ||
459 | + cpu->isar.mvfr2 = 0x00000043; | ||
460 | cpu->ctr = 0x8444c004; | ||
461 | cpu->reset_sctlr = 0x00c50838; | ||
462 | cpu->id_pfr0 = 0x00000131; | ||
463 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj) | ||
464 | cpu->id_mmfr1 = 0x40000000; | ||
465 | cpu->id_mmfr2 = 0x01260000; | ||
466 | cpu->id_mmfr3 = 0x02102211; | ||
467 | - cpu->id_isar0 = 0x02101110; | ||
468 | - cpu->id_isar1 = 0x13112111; | ||
469 | - cpu->id_isar2 = 0x21232042; | ||
470 | - cpu->id_isar3 = 0x01112131; | ||
471 | - cpu->id_isar4 = 0x00011142; | ||
472 | - cpu->id_isar5 = 0x00011121; | ||
473 | - cpu->id_isar6 = 0; | ||
474 | - cpu->id_aa64pfr0 = 0x00002222; | ||
475 | + cpu->isar.id_isar0 = 0x02101110; | ||
476 | + cpu->isar.id_isar1 = 0x13112111; | ||
477 | + cpu->isar.id_isar2 = 0x21232042; | ||
478 | + cpu->isar.id_isar3 = 0x01112131; | ||
479 | + cpu->isar.id_isar4 = 0x00011142; | ||
480 | + cpu->isar.id_isar5 = 0x00011121; | ||
481 | + cpu->isar.id_isar6 = 0; | ||
482 | + cpu->isar.id_aa64pfr0 = 0x00002222; | ||
483 | cpu->id_aa64dfr0 = 0x10305106; | ||
484 | cpu->pmceid0 = 0x00000000; | ||
485 | cpu->pmceid1 = 0x00000000; | ||
486 | - cpu->id_aa64isar0 = 0x00011120; | ||
487 | + cpu->isar.id_aa64isar0 = 0x00011120; | ||
488 | cpu->id_aa64mmfr0 = 0x00001124; | ||
489 | cpu->dbgdidr = 0x3516d000; | ||
490 | cpu->clidr = 0x0a200023; | ||
491 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj) | ||
492 | cpu->midr = 0x410fd034; | ||
493 | cpu->revidr = 0x00000000; | ||
494 | cpu->reset_fpsid = 0x41034070; | ||
495 | - cpu->mvfr0 = 0x10110222; | ||
496 | - cpu->mvfr1 = 0x12111111; | ||
497 | - cpu->mvfr2 = 0x00000043; | ||
498 | + cpu->isar.mvfr0 = 0x10110222; | ||
499 | + cpu->isar.mvfr1 = 0x12111111; | ||
500 | + cpu->isar.mvfr2 = 0x00000043; | ||
501 | cpu->ctr = 0x84448004; /* L1Ip = VIPT */ | ||
502 | cpu->reset_sctlr = 0x00c50838; | ||
503 | cpu->id_pfr0 = 0x00000131; | ||
504 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj) | ||
505 | cpu->id_mmfr1 = 0x40000000; | ||
506 | cpu->id_mmfr2 = 0x01260000; | ||
507 | cpu->id_mmfr3 = 0x02102211; | ||
508 | - cpu->id_isar0 = 0x02101110; | ||
509 | - cpu->id_isar1 = 0x13112111; | ||
510 | - cpu->id_isar2 = 0x21232042; | ||
511 | - cpu->id_isar3 = 0x01112131; | ||
512 | - cpu->id_isar4 = 0x00011142; | ||
513 | - cpu->id_isar5 = 0x00011121; | ||
514 | - cpu->id_isar6 = 0; | ||
515 | - cpu->id_aa64pfr0 = 0x00002222; | ||
516 | + cpu->isar.id_isar0 = 0x02101110; | ||
517 | + cpu->isar.id_isar1 = 0x13112111; | ||
518 | + cpu->isar.id_isar2 = 0x21232042; | ||
519 | + cpu->isar.id_isar3 = 0x01112131; | ||
520 | + cpu->isar.id_isar4 = 0x00011142; | ||
521 | + cpu->isar.id_isar5 = 0x00011121; | ||
522 | + cpu->isar.id_isar6 = 0; | ||
523 | + cpu->isar.id_aa64pfr0 = 0x00002222; | ||
524 | cpu->id_aa64dfr0 = 0x10305106; | ||
525 | - cpu->id_aa64isar0 = 0x00011120; | ||
526 | + cpu->isar.id_aa64isar0 = 0x00011120; | ||
527 | cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ | ||
528 | cpu->dbgdidr = 0x3516d000; | ||
529 | cpu->clidr = 0x0a200023; | ||
530 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj) | ||
531 | cpu->midr = 0x410fd083; | ||
532 | cpu->revidr = 0x00000000; | ||
533 | cpu->reset_fpsid = 0x41034080; | ||
534 | - cpu->mvfr0 = 0x10110222; | ||
535 | - cpu->mvfr1 = 0x12111111; | ||
536 | - cpu->mvfr2 = 0x00000043; | ||
537 | + cpu->isar.mvfr0 = 0x10110222; | ||
538 | + cpu->isar.mvfr1 = 0x12111111; | ||
539 | + cpu->isar.mvfr2 = 0x00000043; | ||
540 | cpu->ctr = 0x8444c004; | ||
541 | cpu->reset_sctlr = 0x00c50838; | ||
542 | cpu->id_pfr0 = 0x00000131; | ||
543 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj) | ||
544 | cpu->id_mmfr1 = 0x40000000; | ||
545 | cpu->id_mmfr2 = 0x01260000; | ||
546 | cpu->id_mmfr3 = 0x02102211; | ||
547 | - cpu->id_isar0 = 0x02101110; | ||
548 | - cpu->id_isar1 = 0x13112111; | ||
549 | - cpu->id_isar2 = 0x21232042; | ||
550 | - cpu->id_isar3 = 0x01112131; | ||
551 | - cpu->id_isar4 = 0x00011142; | ||
552 | - cpu->id_isar5 = 0x00011121; | ||
553 | - cpu->id_aa64pfr0 = 0x00002222; | ||
554 | + cpu->isar.id_isar0 = 0x02101110; | ||
555 | + cpu->isar.id_isar1 = 0x13112111; | ||
556 | + cpu->isar.id_isar2 = 0x21232042; | ||
557 | + cpu->isar.id_isar3 = 0x01112131; | ||
558 | + cpu->isar.id_isar4 = 0x00011142; | ||
559 | + cpu->isar.id_isar5 = 0x00011121; | ||
560 | + cpu->isar.id_aa64pfr0 = 0x00002222; | ||
561 | cpu->id_aa64dfr0 = 0x10305106; | ||
562 | cpu->pmceid0 = 0x00000000; | ||
563 | cpu->pmceid1 = 0x00000000; | ||
564 | - cpu->id_aa64isar0 = 0x00011120; | ||
565 | + cpu->isar.id_aa64isar0 = 0x00011120; | ||
566 | cpu->id_aa64mmfr0 = 0x00001124; | ||
567 | cpu->dbgdidr = 0x3516d000; | ||
568 | cpu->clidr = 0x0a200023; | ||
569 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
570 | index XXXXXXX..XXXXXXX 100644 | ||
571 | --- a/target/arm/helper.c | ||
572 | +++ b/target/arm/helper.c | ||
573 | @@ -XXX,XX +XXX,XX @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) | ||
574 | static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) | ||
575 | { | 141 | { |
576 | ARMCPU *cpu = arm_env_get_cpu(env); | 142 | int idx = get_mem_index(s); |
577 | - uint64_t pfr0 = cpu->id_aa64pfr0; | 143 | TCGv_i64 dirty_addr, clean_addr; |
578 | + uint64_t pfr0 = cpu->isar.id_aa64pfr0; | 144 | - MemOp memop; |
579 | 145 | - | |
580 | if (env->gicv3state) { | 146 | - /* |
581 | pfr0 |= 1 << 24; | 147 | - * For pairs: |
582 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 148 | - * if size == 2, the operation is single-copy atomic for the doubleword. |
583 | { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, | 149 | - * if size == 3, the operation is single-copy atomic for *each* doubleword, |
584 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, | 150 | - * not the entire quadword, however it must be quadword aligned. |
585 | .access = PL1_R, .type = ARM_CP_CONST, | 151 | - */ |
586 | - .resetvalue = cpu->id_isar0 }, | 152 | - memop = size + is_pair; |
587 | + .resetvalue = cpu->isar.id_isar0 }, | 153 | - if (memop == MO_128) { |
588 | { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, | 154 | - memop = finalize_memop_atom(s, MO_128 | MO_ALIGN, |
589 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, | 155 | - MO_ATOM_IFALIGN_PAIR); |
590 | .access = PL1_R, .type = ARM_CP_CONST, | 156 | - } else { |
591 | - .resetvalue = cpu->id_isar1 }, | 157 | - memop = finalize_memop(s, memop | MO_ALIGN); |
592 | + .resetvalue = cpu->isar.id_isar1 }, | 158 | - } |
593 | { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, | 159 | + MemOp memop = check_atomic_align(s, rn, size + is_pair); |
594 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, | 160 | |
595 | .access = PL1_R, .type = ARM_CP_CONST, | 161 | s->is_ldex = true; |
596 | - .resetvalue = cpu->id_isar2 }, | 162 | dirty_addr = cpu_reg_sp(s, rn); |
597 | + .resetvalue = cpu->isar.id_isar2 }, | 163 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt, |
598 | { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, | 164 | if (rn == 31) { |
599 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, | 165 | gen_check_sp_alignment(s); |
600 | .access = PL1_R, .type = ARM_CP_CONST, | 166 | } |
601 | - .resetvalue = cpu->id_isar3 }, | 167 | - memop = finalize_memop(s, size | MO_ALIGN); |
602 | + .resetvalue = cpu->isar.id_isar3 }, | 168 | + memop = check_atomic_align(s, rn, size); |
603 | { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, | 169 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); |
604 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, | 170 | tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, |
605 | .access = PL1_R, .type = ARM_CP_CONST, | 171 | memidx, memop); |
606 | - .resetvalue = cpu->id_isar4 }, | 172 | @@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, |
607 | + .resetvalue = cpu->isar.id_isar4 }, | 173 | } |
608 | { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, | 174 | |
609 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, | 175 | /* This is a single atomic access, despite the "pair". */ |
610 | .access = PL1_R, .type = ARM_CP_CONST, | 176 | - memop = finalize_memop(s, (size + 1) | MO_ALIGN); |
611 | - .resetvalue = cpu->id_isar5 }, | 177 | + memop = check_atomic_align(s, rn, size + 1); |
612 | + .resetvalue = cpu->isar.id_isar5 }, | 178 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); |
613 | { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, | 179 | |
614 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, | 180 | if (size == 2) { |
615 | .access = PL1_R, .type = ARM_CP_CONST, | 181 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
616 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 182 | gen_check_sp_alignment(s); |
617 | { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, | 183 | } |
618 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, | 184 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); |
619 | .access = PL1_R, .type = ARM_CP_CONST, | 185 | - /* TODO: ARMv8.4-LSE SCTLR.nAA */ |
620 | - .resetvalue = cpu->id_isar6 }, | 186 | - memop = finalize_memop(s, size | MO_ALIGN); |
621 | + .resetvalue = cpu->isar.id_isar6 }, | 187 | + memop = check_ordered_align(s, rn, 0, true, size); |
622 | REGINFO_SENTINEL | 188 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
623 | }; | 189 | true, rn != 31, memop); |
624 | define_arm_cp_regs(cpu, v6_idregs); | 190 | do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt, |
625 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 191 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) |
626 | { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, | 192 | if (rn == 31) { |
627 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, | 193 | gen_check_sp_alignment(s); |
628 | .access = PL1_R, .type = ARM_CP_CONST, | 194 | } |
629 | - .resetvalue = cpu->id_aa64pfr1}, | 195 | - /* TODO: ARMv8.4-LSE SCTLR.nAA */ |
630 | + .resetvalue = cpu->isar.id_aa64pfr1}, | 196 | - memop = finalize_memop(s, size | MO_ALIGN); |
631 | { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | 197 | + memop = check_ordered_align(s, rn, 0, false, size); |
632 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, | 198 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), |
633 | .access = PL1_R, .type = ARM_CP_CONST, | 199 | false, rn != 31, memop); |
634 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 200 | do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true, |
635 | { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, | 201 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, |
636 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, | 202 | bool a = extract32(insn, 23, 1); |
637 | .access = PL1_R, .type = ARM_CP_CONST, | 203 | TCGv_i64 tcg_rs, tcg_rt, clean_addr; |
638 | - .resetvalue = cpu->id_aa64isar0 }, | 204 | AtomicThreeOpFn *fn = NULL; |
639 | + .resetvalue = cpu->isar.id_aa64isar0 }, | 205 | - MemOp mop = finalize_memop(s, size | MO_ALIGN); |
640 | { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, | 206 | + MemOp mop = size; |
641 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, | 207 | |
642 | .access = PL1_R, .type = ARM_CP_CONST, | 208 | if (is_vector || !dc_isar_feature(aa64_atomics, s)) { |
643 | - .resetvalue = cpu->id_aa64isar1 }, | 209 | unallocated_encoding(s); |
644 | + .resetvalue = cpu->isar.id_aa64isar1 }, | 210 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, |
645 | { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | 211 | if (rn == 31) { |
646 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, | 212 | gen_check_sp_alignment(s); |
647 | .access = PL1_R, .type = ARM_CP_CONST, | 213 | } |
648 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 214 | + |
649 | { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, | 215 | + mop = check_atomic_align(s, rn, mop); |
650 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, | 216 | clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, mop); |
651 | .access = PL1_R, .type = ARM_CP_CONST, | 217 | |
652 | - .resetvalue = cpu->mvfr0 }, | 218 | if (o3_opc == 014) { |
653 | + .resetvalue = cpu->isar.mvfr0 }, | 219 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) |
654 | { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, | 220 | bool is_store = false; |
655 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, | 221 | bool extend = false; |
656 | .access = PL1_R, .type = ARM_CP_CONST, | 222 | bool iss_sf; |
657 | - .resetvalue = cpu->mvfr1 }, | 223 | - MemOp mop; |
658 | + .resetvalue = cpu->isar.mvfr1 }, | 224 | + MemOp mop = size; |
659 | { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, | 225 | |
660 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, | 226 | if (!dc_isar_feature(aa64_rcpc_8_4, s)) { |
661 | .access = PL1_R, .type = ARM_CP_CONST, | 227 | unallocated_encoding(s); |
662 | - .resetvalue = cpu->mvfr2 }, | 228 | return; |
663 | + .resetvalue = cpu->isar.mvfr2 }, | 229 | } |
664 | { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | 230 | |
665 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, | 231 | - /* TODO: ARMv8.4-LSE SCTLR.nAA */ |
666 | .access = PL1_R, .type = ARM_CP_CONST, | 232 | - mop = finalize_memop(s, size | MO_ALIGN); |
233 | - | ||
234 | switch (opc) { | ||
235 | case 0: /* STLURB */ | ||
236 | is_store = true; | ||
237 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) | ||
238 | gen_check_sp_alignment(s); | ||
239 | } | ||
240 | |||
241 | + mop = check_ordered_align(s, rn, offset, is_store, mop); | ||
242 | + | ||
243 | dirty_addr = read_cpu_reg_sp(s, rn, 1); | ||
244 | tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); | ||
245 | clean_addr = clean_data_tbi(s, dirty_addr); | ||
667 | -- | 246 | -- |
668 | 2.19.1 | 247 | 2.34.1 |
669 | |||
670 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Push the mte check behind the exclusive_addr check. | ||
4 | Document the several ways that we are still out of spec | ||
5 | with this implementation. | ||
6 | |||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20181011205206.3552-11-richard.henderson@linaro.org | 9 | Message-id: 20230530191438.411344-18-richard.henderson@linaro.org |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | target/arm/translate.c | 16 ++++++++-------- | 12 | target/arm/tcg/translate-a64.c | 42 +++++++++++++++++++++++++++++----- |
9 | 1 file changed, 8 insertions(+), 8 deletions(-) | 13 | 1 file changed, 36 insertions(+), 6 deletions(-) |
10 | 14 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 15 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 17 | --- a/target/arm/tcg/translate-a64.c |
14 | +++ b/target/arm/translate.c | 18 | +++ b/target/arm/tcg/translate-a64.c |
15 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 19 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, |
16 | tcg_temp_free_ptr(ptr1); | 20 | */ |
17 | tcg_temp_free_ptr(ptr2); | 21 | TCGLabel *fail_label = gen_new_label(); |
18 | break; | 22 | TCGLabel *done_label = gen_new_label(); |
23 | - TCGv_i64 tmp, dirty_addr, clean_addr; | ||
24 | + TCGv_i64 tmp, clean_addr; | ||
25 | MemOp memop; | ||
26 | |||
27 | - memop = (size + is_pair) | MO_ALIGN; | ||
28 | - memop = finalize_memop(s, memop); | ||
29 | - | ||
30 | - dirty_addr = cpu_reg_sp(s, rn); | ||
31 | - clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, memop); | ||
32 | + /* | ||
33 | + * FIXME: We are out of spec here. We have recorded only the address | ||
34 | + * from load_exclusive, not the entire range, and we assume that the | ||
35 | + * size of the access on both sides match. The architecture allows the | ||
36 | + * store to be smaller than the load, so long as the stored bytes are | ||
37 | + * within the range recorded by the load. | ||
38 | + */ | ||
39 | |||
40 | + /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */ | ||
41 | + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); | ||
42 | tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label); | ||
43 | |||
44 | + /* | ||
45 | + * The write, and any associated faults, only happen if the virtual | ||
46 | + * and physical addresses pass the exclusive monitor check. These | ||
47 | + * faults are exceedingly unlikely, because normally the guest uses | ||
48 | + * the exact same address register for the load_exclusive, and we | ||
49 | + * would have recognized these faults there. | ||
50 | + * | ||
51 | + * It is possible to trigger an alignment fault pre-LSE2, e.g. with an | ||
52 | + * unaligned 4-byte write within the range of an aligned 8-byte load. | ||
53 | + * With LSE2, the store would need to cross a 16-byte boundary when the | ||
54 | + * load did not, which would mean the store is outside the range | ||
55 | + * recorded for the monitor, which would have failed a corrected monitor | ||
56 | + * check above. For now, we assume no size change and retain the | ||
57 | + * MO_ALIGN to let tcg know what we checked in the load_exclusive. | ||
58 | + * | ||
59 | + * It is possible to trigger an MTE fault, by performing the load with | ||
60 | + * a virtual address with a valid tag and performing the store with the | ||
61 | + * same virtual address and a different invalid tag. | ||
62 | + */ | ||
63 | + memop = size + is_pair; | ||
64 | + if (memop == MO_128 || !dc_isar_feature(aa64_lse2, s)) { | ||
65 | + memop |= MO_ALIGN; | ||
66 | + } | ||
67 | + memop = finalize_memop(s, memop); | ||
68 | + gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); | ||
19 | + | 69 | + |
20 | + case NEON_2RM_VMVN: | 70 | tmp = tcg_temp_new_i64(); |
21 | + tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size); | 71 | if (is_pair) { |
22 | + break; | 72 | if (size == 2) { |
23 | + case NEON_2RM_VNEG: | ||
24 | + tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size); | ||
25 | + break; | ||
26 | + | ||
27 | default: | ||
28 | elementwise: | ||
29 | for (pass = 0; pass < (q ? 4 : 2); pass++) { | ||
30 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
31 | case NEON_2RM_VCNT: | ||
32 | gen_helper_neon_cnt_u8(tmp, tmp); | ||
33 | break; | ||
34 | - case NEON_2RM_VMVN: | ||
35 | - tcg_gen_not_i32(tmp, tmp); | ||
36 | - break; | ||
37 | case NEON_2RM_VQABS: | ||
38 | switch (size) { | ||
39 | case 0: | ||
40 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
41 | default: abort(); | ||
42 | } | ||
43 | break; | ||
44 | - case NEON_2RM_VNEG: | ||
45 | - tmp2 = tcg_const_i32(0); | ||
46 | - gen_neon_rsb(size, tmp, tmp2); | ||
47 | - tcg_temp_free_i32(tmp2); | ||
48 | - break; | ||
49 | case NEON_2RM_VCGT0_F: | ||
50 | { | ||
51 | TCGv_ptr fpstatus = get_fpstatus_ptr(1); | ||
52 | -- | 73 | -- |
53 | 2.19.1 | 74 | 2.34.1 |
54 | |||
55 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | We have many other instances of stg in the testsuite; | ||
4 | change these to provide an instance of stz2g. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Message-id: 20181011205206.3552-10-richard.henderson@linaro.org | 8 | Message-id: 20230530191438.411344-19-richard.henderson@linaro.org |
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 10 | --- |
8 | target/arm/translate.c | 29 ++++++++++------------------- | 11 | tests/tcg/aarch64/mte-7.c | 3 +-- |
9 | 1 file changed, 10 insertions(+), 19 deletions(-) | 12 | 1 file changed, 1 insertion(+), 2 deletions(-) |
10 | 13 | ||
11 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 14 | diff --git a/tests/tcg/aarch64/mte-7.c b/tests/tcg/aarch64/mte-7.c |
12 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/target/arm/translate.c | 16 | --- a/tests/tcg/aarch64/mte-7.c |
14 | +++ b/target/arm/translate.c | 17 | +++ b/tests/tcg/aarch64/mte-7.c |
15 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 18 | @@ -XXX,XX +XXX,XX @@ int main(int ac, char **av) |
16 | break; | 19 | p = (void *)((unsigned long)p | (1ul << 56)); |
17 | } | 20 | |
18 | return 0; | 21 | /* Store tag in sequential granules. */ |
19 | + | 22 | - asm("stg %0, [%0]" : : "r"(p + 0x0ff0)); |
20 | + case NEON_3R_VADD_VSUB: | 23 | - asm("stg %0, [%0]" : : "r"(p + 0x1000)); |
21 | + if (u) { | 24 | + asm("stz2g %0, [%0]" : : "r"(p + 0x0ff0)); |
22 | + tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs, | 25 | |
23 | + vec_size, vec_size); | 26 | /* |
24 | + } else { | 27 | * Perform an unaligned store with tag 1 crossing the pages. |
25 | + tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs, | ||
26 | + vec_size, vec_size); | ||
27 | + } | ||
28 | + return 0; | ||
29 | } | ||
30 | if (size == 3) { | ||
31 | /* 64-bit element instructions. */ | ||
32 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
33 | cpu_V1, cpu_V0); | ||
34 | } | ||
35 | break; | ||
36 | - case NEON_3R_VADD_VSUB: | ||
37 | - if (u) { | ||
38 | - tcg_gen_sub_i64(CPU_V001); | ||
39 | - } else { | ||
40 | - tcg_gen_add_i64(CPU_V001); | ||
41 | - } | ||
42 | - break; | ||
43 | default: | ||
44 | abort(); | ||
45 | } | ||
46 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
47 | tmp2 = neon_load_reg(rd, pass); | ||
48 | gen_neon_add(size, tmp, tmp2); | ||
49 | break; | ||
50 | - case NEON_3R_VADD_VSUB: | ||
51 | - if (!u) { /* VADD */ | ||
52 | - gen_neon_add(size, tmp, tmp2); | ||
53 | - } else { /* VSUB */ | ||
54 | - switch (size) { | ||
55 | - case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; | ||
56 | - case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; | ||
57 | - case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; | ||
58 | - default: abort(); | ||
59 | - } | ||
60 | - } | ||
61 | - break; | ||
62 | case NEON_3R_VTST_VCEQ: | ||
63 | if (!u) { /* VTST */ | ||
64 | switch (size) { | ||
65 | -- | 28 | -- |
66 | 2.19.1 | 29 | 2.34.1 |
67 | |||
68 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | With -cpu max and FEAT_LSE2, the __aarch64__ section will only raise | ||
4 | an alignment exception when the load crosses a 16-byte boundary. | ||
5 | |||
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 8 | Message-id: 20230530191438.411344-20-richard.henderson@linaro.org |
5 | Message-id: 20181011205206.3552-6-richard.henderson@linaro.org | ||
6 | [PMM: drop change to now-deleted cpu_mode_names array] | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 10 | --- |
10 | target/arm/translate.c | 4 ++-- | 11 | tests/tcg/multiarch/sigbus.c | 13 +++++++++---- |
11 | 1 file changed, 2 insertions(+), 2 deletions(-) | 12 | 1 file changed, 9 insertions(+), 4 deletions(-) |
12 | 13 | ||
13 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 14 | diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c |
14 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/translate.c | 16 | --- a/tests/tcg/multiarch/sigbus.c |
16 | +++ b/target/arm/translate.c | 17 | +++ b/tests/tcg/multiarch/sigbus.c |
17 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_F0d, cpu_F1d; | 18 | @@ -XXX,XX +XXX,XX @@ |
18 | 19 | #include <endian.h> | |
19 | #include "exec/gen-icount.h" | 20 | |
20 | 21 | ||
21 | -static const char *regnames[] = | 22 | -unsigned long long x = 0x8877665544332211ull; |
22 | +static const char * const regnames[] = | 23 | -void * volatile p = (void *)&x + 1; |
23 | { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | 24 | +char x[32] __attribute__((aligned(16))) = { |
24 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; | 25 | + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, |
25 | 26 | + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, | |
26 | @@ -XXX,XX +XXX,XX @@ static struct { | 27 | + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, |
27 | int nregs; | 28 | + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, |
28 | int interleave; | 29 | +}; |
29 | int spacing; | 30 | +void * volatile p = (void *)&x + 15; |
30 | -} neon_ls_element_type[11] = { | 31 | |
31 | +} const neon_ls_element_type[11] = { | 32 | void sigbus(int sig, siginfo_t *info, void *uc) |
32 | {4, 4, 1}, | 33 | { |
33 | {4, 4, 2}, | 34 | @@ -XXX,XX +XXX,XX @@ int main() |
34 | {4, 1, 1}, | 35 | * We might as well validate the unaligned load worked. |
36 | */ | ||
37 | if (BYTE_ORDER == LITTLE_ENDIAN) { | ||
38 | - assert(tmp == 0x55443322); | ||
39 | + assert(tmp == 0x13121110); | ||
40 | } else { | ||
41 | - assert(tmp == 0x77665544); | ||
42 | + assert(tmp == 0x10111213); | ||
43 | } | ||
44 | return EXIT_SUCCESS; | ||
45 | } | ||
35 | -- | 46 | -- |
36 | 2.19.1 | 47 | 2.34.1 |
37 | |||
38 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | This is done generically in translator_loop. | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | |||
5 | Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 5 | Message-id: 20230530191438.411344-21-richard.henderson@linaro.org |
8 | Message-id: 20181011205206.3552-3-richard.henderson@linaro.org | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 7 | --- |
12 | target/arm/translate-a64.c | 1 - | 8 | docs/system/arm/emulation.rst | 1 + |
13 | target/arm/translate.c | 1 - | 9 | target/arm/tcg/cpu64.c | 1 + |
14 | 2 files changed, 2 deletions(-) | 10 | 2 files changed, 2 insertions(+) |
15 | 11 | ||
16 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 12 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst |
17 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/translate-a64.c | 14 | --- a/docs/system/arm/emulation.rst |
19 | +++ b/target/arm/translate-a64.c | 15 | +++ b/docs/system/arm/emulation.rst |
20 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | 16 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: |
21 | 17 | - FEAT_LRCPC (Load-acquire RCpc instructions) | |
22 | static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) | 18 | - FEAT_LRCPC2 (Load-acquire RCpc instructions v2) |
23 | { | 19 | - FEAT_LSE (Large System Extensions) |
24 | - tcg_clear_temp_count(); | 20 | +- FEAT_LSE2 (Large System Extensions v2) |
25 | } | 21 | - FEAT_LVA (Large Virtual Address space) |
26 | 22 | - FEAT_MTE (Memory Tagging Extension) | |
27 | static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) | 23 | - FEAT_MTE2 (Memory Tagging Extension) |
28 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 24 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c |
29 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/translate.c | 26 | --- a/target/arm/tcg/cpu64.c |
31 | +++ b/target/arm/translate.c | 27 | +++ b/target/arm/tcg/cpu64.c |
32 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) | 28 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) |
33 | tcg_gen_movi_i32(tmp, 0); | 29 | t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ |
34 | store_cpu_field(tmp, condexec_bits); | 30 | t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ |
35 | } | 31 | t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ |
36 | - tcg_clear_temp_count(); | 32 | + t = FIELD_DP64(t, ID_AA64MMFR2, AT, 1); /* FEAT_LSE2 */ |
37 | } | 33 | t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ |
38 | 34 | t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */ | |
39 | static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) | 35 | t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */ |
40 | -- | 36 | -- |
41 | 2.19.1 | 37 | 2.34.1 |
42 | |||
43 | diff view generated by jsdifflib |
1 | The HCR_EL2 VI and VF bits are supposed to track whether there is | 1 | From: Zhuojia Shen <chaosdefinition@hotmail.com> |
---|---|---|---|
2 | a pending virtual IRQ or virtual FIQ. For QEMU we store the | ||
3 | pending VIRQ/VFIQ status in cs->interrupt_request, so this means: | ||
4 | * if the register is read we must get these bit values from | ||
5 | cs->interrupt_request | ||
6 | * if the register is written then we must write the bit | ||
7 | values back into cs->interrupt_request | ||
8 | 2 | ||
3 | DC CVAP and DC CVADP instructions can be executed in EL0 on Linux, | ||
4 | either directly when SCTLR_EL1.UCI == 1 or emulated by the kernel (see | ||
5 | user_cache_maint_handler() in arch/arm64/kernel/traps.c). | ||
6 | |||
7 | This patch enables execution of the two instructions in user mode | ||
8 | emulation. | ||
9 | |||
10 | Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Message-id: 20181012144235.19646-7-peter.maydell@linaro.org | ||
12 | --- | 14 | --- |
13 | target/arm/helper.c | 47 +++++++++++++++++++++++++++++++++++++++++---- | 15 | target/arm/helper.c | 6 ++---- |
14 | 1 file changed, 43 insertions(+), 4 deletions(-) | 16 | 1 file changed, 2 insertions(+), 4 deletions(-) |
15 | 17 | ||
16 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 18 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
17 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/helper.c | 20 | --- a/target/arm/helper.c |
19 | +++ b/target/arm/helper.c | 21 | +++ b/target/arm/helper.c |
20 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { | 22 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo rndr_reginfo[] = { |
21 | static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | 23 | .access = PL0_R, .readfn = rndr_readfn }, |
24 | }; | ||
25 | |||
26 | -#ifndef CONFIG_USER_ONLY | ||
27 | static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, | ||
28 | uint64_t value) | ||
22 | { | 29 | { |
23 | ARMCPU *cpu = arm_env_get_cpu(env); | 30 | @@ -XXX,XX +XXX,XX @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, |
24 | + CPUState *cs = ENV_GET_CPU(env); | 31 | /* This won't be crossing page boundaries */ |
25 | uint64_t valid_mask = HCR_MASK; | 32 | haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); |
26 | 33 | if (haddr) { | |
27 | if (arm_feature(env, ARM_FEATURE_EL3)) { | 34 | +#ifndef CONFIG_USER_ONLY |
28 | @@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | 35 | |
29 | /* Clear RES0 bits. */ | 36 | ram_addr_t offset; |
30 | value &= valid_mask; | 37 | MemoryRegion *mr; |
31 | 38 | @@ -XXX,XX +XXX,XX @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, | |
32 | + /* | 39 | if (mr) { |
33 | + * VI and VF are kept in cs->interrupt_request. Modifying that | 40 | memory_region_writeback(mr, offset, dline_size); |
34 | + * requires that we have the iothread lock, which is done by | 41 | } |
35 | + * marking the reginfo structs as ARM_CP_IO. | 42 | +#endif /*CONFIG_USER_ONLY*/ |
36 | + * Note that if a write to HCR pends a VIRQ or VFIQ it is never | 43 | } |
37 | + * possible for it to be taken immediately, because VIRQ and | ||
38 | + * VFIQ are masked unless running at EL0 or EL1, and HCR | ||
39 | + * can only be written at EL2. | ||
40 | + */ | ||
41 | + g_assert(qemu_mutex_iothread_locked()); | ||
42 | + if (value & HCR_VI) { | ||
43 | + cs->interrupt_request |= CPU_INTERRUPT_VIRQ; | ||
44 | + } else { | ||
45 | + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; | ||
46 | + } | ||
47 | + if (value & HCR_VF) { | ||
48 | + cs->interrupt_request |= CPU_INTERRUPT_VFIQ; | ||
49 | + } else { | ||
50 | + cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ; | ||
51 | + } | ||
52 | + value &= ~(HCR_VI | HCR_VF); | ||
53 | + | ||
54 | /* These bits change the MMU setup: | ||
55 | * HCR_VM enables stage 2 translation | ||
56 | * HCR_PTW forbids certain page-table setups | ||
57 | @@ -XXX,XX +XXX,XX @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, | ||
58 | hcr_write(env, NULL, value); | ||
59 | } | 44 | } |
60 | 45 | ||
61 | +static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) | 46 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo dcpodp_reg[] = { |
62 | +{ | 47 | .fgt = FGT_DCCVADP, |
63 | + /* The VI and VF bits live in cs->interrupt_request */ | 48 | .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, |
64 | + uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF); | 49 | }; |
65 | + CPUState *cs = ENV_GET_CPU(env); | 50 | -#endif /*CONFIG_USER_ONLY*/ |
66 | + | 51 | |
67 | + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { | 52 | static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri, |
68 | + ret |= HCR_VI; | 53 | bool isread) |
69 | + } | 54 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) |
70 | + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { | 55 | if (cpu_isar_feature(aa64_tlbios, cpu)) { |
71 | + ret |= HCR_VF; | 56 | define_arm_cp_regs(cpu, tlbios_reginfo); |
72 | + } | 57 | } |
73 | + return ret; | 58 | -#ifndef CONFIG_USER_ONLY |
74 | +} | 59 | /* Data Cache clean instructions up to PoP */ |
75 | + | 60 | if (cpu_isar_feature(aa64_dcpop, cpu)) { |
76 | static const ARMCPRegInfo el2_cp_reginfo[] = { | 61 | define_one_arm_cp_reg(cpu, dcpop_reg); |
77 | { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, | 62 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) |
78 | + .type = ARM_CP_IO, | 63 | define_one_arm_cp_reg(cpu, dcpodp_reg); |
79 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, | 64 | } |
80 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), | 65 | } |
81 | - .writefn = hcr_write }, | 66 | -#endif /*CONFIG_USER_ONLY*/ |
82 | + .writefn = hcr_write, .readfn = hcr_read }, | 67 | |
83 | { .name = "HCR", .state = ARM_CP_STATE_AA32, | 68 | /* |
84 | - .type = ARM_CP_ALIAS, | 69 | * If full MTE is enabled, add all of the system registers. |
85 | + .type = ARM_CP_ALIAS | ARM_CP_IO, | ||
86 | .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, | ||
87 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), | ||
88 | - .writefn = hcr_writelow }, | ||
89 | + .writefn = hcr_writelow, .readfn = hcr_read }, | ||
90 | { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, | ||
91 | .type = ARM_CP_ALIAS, | ||
92 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, | ||
93 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | ||
94 | |||
95 | static const ARMCPRegInfo el2_v8_cp_reginfo[] = { | ||
96 | { .name = "HCR2", .state = ARM_CP_STATE_AA32, | ||
97 | - .type = ARM_CP_ALIAS, | ||
98 | + .type = ARM_CP_ALIAS | ARM_CP_IO, | ||
99 | .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, | ||
100 | .access = PL2_RW, | ||
101 | .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), | ||
102 | -- | 70 | -- |
103 | 2.19.1 | 71 | 2.34.1 |
104 | |||
105 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Zhuojia Shen <chaosdefinition@hotmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Move shi_op and sli_op expanders from translate-a64.c. | 3 | Test execution of DC CVAP and DC CVADP instructions under user mode |
4 | emulation. | ||
4 | 5 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com> |
6 | Message-id: 20181011205206.3552-15-richard.henderson@linaro.org | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 10 | --- |
10 | target/arm/translate.h | 2 + | 11 | tests/tcg/aarch64/dcpodp.c | 63 +++++++++++++++++++++++++++++++ |
11 | target/arm/translate-a64.c | 152 +---------------------- | 12 | tests/tcg/aarch64/dcpop.c | 63 +++++++++++++++++++++++++++++++ |
12 | target/arm/translate.c | 244 ++++++++++++++++++++++++++----------- | 13 | tests/tcg/aarch64/Makefile.target | 11 ++++++ |
13 | 3 files changed, 179 insertions(+), 219 deletions(-) | 14 | 3 files changed, 137 insertions(+) |
15 | create mode 100644 tests/tcg/aarch64/dcpodp.c | ||
16 | create mode 100644 tests/tcg/aarch64/dcpop.c | ||
14 | 17 | ||
15 | diff --git a/target/arm/translate.h b/target/arm/translate.h | 18 | diff --git a/tests/tcg/aarch64/dcpodp.c b/tests/tcg/aarch64/dcpodp.c |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | new file mode 100644 |
17 | --- a/target/arm/translate.h | 20 | index XXXXXXX..XXXXXXX |
18 | +++ b/target/arm/translate.h | 21 | --- /dev/null |
19 | @@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op; | 22 | +++ b/tests/tcg/aarch64/dcpodp.c |
20 | extern const GVecGen3 bif_op; | 23 | @@ -XXX,XX +XXX,XX @@ |
21 | extern const GVecGen2i ssra_op[4]; | 24 | +/* |
22 | extern const GVecGen2i usra_op[4]; | 25 | + * Test execution of DC CVADP instruction. |
23 | +extern const GVecGen2i sri_op[4]; | 26 | + * |
24 | +extern const GVecGen2i sli_op[4]; | 27 | + * Copyright (c) 2023 Zhuojia Shen <chaosdefinition@hotmail.com> |
25 | 28 | + * SPDX-License-Identifier: GPL-2.0-or-later | |
26 | /* | 29 | + */ |
27 | * Forward to the isar_feature_* tests given a DisasContext pointer. | 30 | + |
28 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 31 | +#include <asm/hwcap.h> |
29 | index XXXXXXX..XXXXXXX 100644 | 32 | +#include <sys/auxv.h> |
30 | --- a/target/arm/translate-a64.c | 33 | + |
31 | +++ b/target/arm/translate-a64.c | 34 | +#include <signal.h> |
32 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) | 35 | +#include <stdbool.h> |
33 | } | 36 | +#include <stdio.h> |
34 | } | 37 | +#include <stdlib.h> |
35 | 38 | + | |
36 | -static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 39 | +#ifndef HWCAP2_DCPODP |
37 | -{ | 40 | +#define HWCAP2_DCPODP (1 << 0) |
38 | - uint64_t mask = dup_const(MO_8, 0xff >> shift); | 41 | +#endif |
39 | - TCGv_i64 t = tcg_temp_new_i64(); | 42 | + |
40 | - | 43 | +bool should_fail = false; |
41 | - tcg_gen_shri_i64(t, a, shift); | 44 | + |
42 | - tcg_gen_andi_i64(t, t, mask); | 45 | +static void signal_handler(int sig, siginfo_t *si, void *data) |
43 | - tcg_gen_andi_i64(d, d, ~mask); | ||
44 | - tcg_gen_or_i64(d, d, t); | ||
45 | - tcg_temp_free_i64(t); | ||
46 | -} | ||
47 | - | ||
48 | -static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
49 | -{ | ||
50 | - uint64_t mask = dup_const(MO_16, 0xffff >> shift); | ||
51 | - TCGv_i64 t = tcg_temp_new_i64(); | ||
52 | - | ||
53 | - tcg_gen_shri_i64(t, a, shift); | ||
54 | - tcg_gen_andi_i64(t, t, mask); | ||
55 | - tcg_gen_andi_i64(d, d, ~mask); | ||
56 | - tcg_gen_or_i64(d, d, t); | ||
57 | - tcg_temp_free_i64(t); | ||
58 | -} | ||
59 | - | ||
60 | -static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | ||
61 | -{ | ||
62 | - tcg_gen_shri_i32(a, a, shift); | ||
63 | - tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); | ||
64 | -} | ||
65 | - | ||
66 | -static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
67 | -{ | ||
68 | - tcg_gen_shri_i64(a, a, shift); | ||
69 | - tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); | ||
70 | -} | ||
71 | - | ||
72 | -static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
73 | -{ | ||
74 | - uint64_t mask = (2ull << ((8 << vece) - 1)) - 1; | ||
75 | - TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
76 | - TCGv_vec m = tcg_temp_new_vec_matching(d); | ||
77 | - | ||
78 | - tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh)); | ||
79 | - tcg_gen_shri_vec(vece, t, a, sh); | ||
80 | - tcg_gen_and_vec(vece, d, d, m); | ||
81 | - tcg_gen_or_vec(vece, d, d, t); | ||
82 | - | ||
83 | - tcg_temp_free_vec(t); | ||
84 | - tcg_temp_free_vec(m); | ||
85 | -} | ||
86 | - | ||
87 | /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ | ||
88 | static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, | ||
89 | int immh, int immb, int opcode, int rn, int rd) | ||
90 | { | ||
91 | - static const GVecGen2i sri_op[4] = { | ||
92 | - { .fni8 = gen_shr8_ins_i64, | ||
93 | - .fniv = gen_shr_ins_vec, | ||
94 | - .load_dest = true, | ||
95 | - .opc = INDEX_op_shri_vec, | ||
96 | - .vece = MO_8 }, | ||
97 | - { .fni8 = gen_shr16_ins_i64, | ||
98 | - .fniv = gen_shr_ins_vec, | ||
99 | - .load_dest = true, | ||
100 | - .opc = INDEX_op_shri_vec, | ||
101 | - .vece = MO_16 }, | ||
102 | - { .fni4 = gen_shr32_ins_i32, | ||
103 | - .fniv = gen_shr_ins_vec, | ||
104 | - .load_dest = true, | ||
105 | - .opc = INDEX_op_shri_vec, | ||
106 | - .vece = MO_32 }, | ||
107 | - { .fni8 = gen_shr64_ins_i64, | ||
108 | - .fniv = gen_shr_ins_vec, | ||
109 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
110 | - .load_dest = true, | ||
111 | - .opc = INDEX_op_shri_vec, | ||
112 | - .vece = MO_64 }, | ||
113 | - }; | ||
114 | - | ||
115 | int size = 32 - clz32(immh) - 1; | ||
116 | int immhb = immh << 3 | immb; | ||
117 | int shift = 2 * (8 << size) - immhb; | ||
118 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, | ||
119 | clear_vec_high(s, is_q, rd); | ||
120 | } | ||
121 | |||
122 | -static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
123 | -{ | ||
124 | - uint64_t mask = dup_const(MO_8, 0xff << shift); | ||
125 | - TCGv_i64 t = tcg_temp_new_i64(); | ||
126 | - | ||
127 | - tcg_gen_shli_i64(t, a, shift); | ||
128 | - tcg_gen_andi_i64(t, t, mask); | ||
129 | - tcg_gen_andi_i64(d, d, ~mask); | ||
130 | - tcg_gen_or_i64(d, d, t); | ||
131 | - tcg_temp_free_i64(t); | ||
132 | -} | ||
133 | - | ||
134 | -static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
135 | -{ | ||
136 | - uint64_t mask = dup_const(MO_16, 0xffff << shift); | ||
137 | - TCGv_i64 t = tcg_temp_new_i64(); | ||
138 | - | ||
139 | - tcg_gen_shli_i64(t, a, shift); | ||
140 | - tcg_gen_andi_i64(t, t, mask); | ||
141 | - tcg_gen_andi_i64(d, d, ~mask); | ||
142 | - tcg_gen_or_i64(d, d, t); | ||
143 | - tcg_temp_free_i64(t); | ||
144 | -} | ||
145 | - | ||
146 | -static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | ||
147 | -{ | ||
148 | - tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); | ||
149 | -} | ||
150 | - | ||
151 | -static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
152 | -{ | ||
153 | - tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); | ||
154 | -} | ||
155 | - | ||
156 | -static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
157 | -{ | ||
158 | - uint64_t mask = (1ull << sh) - 1; | ||
159 | - TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
160 | - TCGv_vec m = tcg_temp_new_vec_matching(d); | ||
161 | - | ||
162 | - tcg_gen_dupi_vec(vece, m, mask); | ||
163 | - tcg_gen_shli_vec(vece, t, a, sh); | ||
164 | - tcg_gen_and_vec(vece, d, d, m); | ||
165 | - tcg_gen_or_vec(vece, d, d, t); | ||
166 | - | ||
167 | - tcg_temp_free_vec(t); | ||
168 | - tcg_temp_free_vec(m); | ||
169 | -} | ||
170 | - | ||
171 | /* SHL/SLI - Vector shift left */ | ||
172 | static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, | ||
173 | int immh, int immb, int opcode, int rn, int rd) | ||
174 | { | ||
175 | - static const GVecGen2i shi_op[4] = { | ||
176 | - { .fni8 = gen_shl8_ins_i64, | ||
177 | - .fniv = gen_shl_ins_vec, | ||
178 | - .opc = INDEX_op_shli_vec, | ||
179 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
180 | - .load_dest = true, | ||
181 | - .vece = MO_8 }, | ||
182 | - { .fni8 = gen_shl16_ins_i64, | ||
183 | - .fniv = gen_shl_ins_vec, | ||
184 | - .opc = INDEX_op_shli_vec, | ||
185 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
186 | - .load_dest = true, | ||
187 | - .vece = MO_16 }, | ||
188 | - { .fni4 = gen_shl32_ins_i32, | ||
189 | - .fniv = gen_shl_ins_vec, | ||
190 | - .opc = INDEX_op_shli_vec, | ||
191 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
192 | - .load_dest = true, | ||
193 | - .vece = MO_32 }, | ||
194 | - { .fni8 = gen_shl64_ins_i64, | ||
195 | - .fniv = gen_shl_ins_vec, | ||
196 | - .opc = INDEX_op_shli_vec, | ||
197 | - .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
198 | - .load_dest = true, | ||
199 | - .vece = MO_64 }, | ||
200 | - }; | ||
201 | int size = 32 - clz32(immh) - 1; | ||
202 | int immhb = immh << 3 | immb; | ||
203 | int shift = immhb - (8 << size); | ||
204 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, | ||
205 | } | ||
206 | |||
207 | if (insert) { | ||
208 | - gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]); | ||
209 | + gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]); | ||
210 | } else { | ||
211 | gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); | ||
212 | } | ||
213 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
214 | index XXXXXXX..XXXXXXX 100644 | ||
215 | --- a/target/arm/translate.c | ||
216 | +++ b/target/arm/translate.c | ||
217 | @@ -XXX,XX +XXX,XX @@ const GVecGen2i usra_op[4] = { | ||
218 | .vece = MO_64, }, | ||
219 | }; | ||
220 | |||
221 | +static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
222 | +{ | 46 | +{ |
223 | + uint64_t mask = dup_const(MO_8, 0xff >> shift); | 47 | + ucontext_t *uc = (ucontext_t *)data; |
224 | + TCGv_i64 t = tcg_temp_new_i64(); | ||
225 | + | 48 | + |
226 | + tcg_gen_shri_i64(t, a, shift); | 49 | + if (should_fail) { |
227 | + tcg_gen_andi_i64(t, t, mask); | 50 | + uc->uc_mcontext.pc += 4; |
228 | + tcg_gen_andi_i64(d, d, ~mask); | ||
229 | + tcg_gen_or_i64(d, d, t); | ||
230 | + tcg_temp_free_i64(t); | ||
231 | +} | ||
232 | + | ||
233 | +static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
234 | +{ | ||
235 | + uint64_t mask = dup_const(MO_16, 0xffff >> shift); | ||
236 | + TCGv_i64 t = tcg_temp_new_i64(); | ||
237 | + | ||
238 | + tcg_gen_shri_i64(t, a, shift); | ||
239 | + tcg_gen_andi_i64(t, t, mask); | ||
240 | + tcg_gen_andi_i64(d, d, ~mask); | ||
241 | + tcg_gen_or_i64(d, d, t); | ||
242 | + tcg_temp_free_i64(t); | ||
243 | +} | ||
244 | + | ||
245 | +static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | ||
246 | +{ | ||
247 | + tcg_gen_shri_i32(a, a, shift); | ||
248 | + tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); | ||
249 | +} | ||
250 | + | ||
251 | +static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | ||
252 | +{ | ||
253 | + tcg_gen_shri_i64(a, a, shift); | ||
254 | + tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); | ||
255 | +} | ||
256 | + | ||
257 | +static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
258 | +{ | ||
259 | + if (sh == 0) { | ||
260 | + tcg_gen_mov_vec(d, a); | ||
261 | + } else { | 51 | + } else { |
262 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 52 | + exit(EXIT_FAILURE); |
263 | + TCGv_vec m = tcg_temp_new_vec_matching(d); | ||
264 | + | ||
265 | + tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh)); | ||
266 | + tcg_gen_shri_vec(vece, t, a, sh); | ||
267 | + tcg_gen_and_vec(vece, d, d, m); | ||
268 | + tcg_gen_or_vec(vece, d, d, t); | ||
269 | + | ||
270 | + tcg_temp_free_vec(t); | ||
271 | + tcg_temp_free_vec(m); | ||
272 | + } | 53 | + } |
273 | +} | 54 | +} |
274 | + | 55 | + |
275 | +const GVecGen2i sri_op[4] = { | 56 | +static int do_dc_cvadp(void) |
276 | + { .fni8 = gen_shr8_ins_i64, | 57 | +{ |
277 | + .fniv = gen_shr_ins_vec, | 58 | + struct sigaction sa = { |
278 | + .load_dest = true, | 59 | + .sa_flags = SA_SIGINFO, |
279 | + .opc = INDEX_op_shri_vec, | 60 | + .sa_sigaction = signal_handler, |
280 | + .vece = MO_8 }, | 61 | + }; |
281 | + { .fni8 = gen_shr16_ins_i64, | ||
282 | + .fniv = gen_shr_ins_vec, | ||
283 | + .load_dest = true, | ||
284 | + .opc = INDEX_op_shri_vec, | ||
285 | + .vece = MO_16 }, | ||
286 | + { .fni4 = gen_shr32_ins_i32, | ||
287 | + .fniv = gen_shr_ins_vec, | ||
288 | + .load_dest = true, | ||
289 | + .opc = INDEX_op_shri_vec, | ||
290 | + .vece = MO_32 }, | ||
291 | + { .fni8 = gen_shr64_ins_i64, | ||
292 | + .fniv = gen_shr_ins_vec, | ||
293 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
294 | + .load_dest = true, | ||
295 | + .opc = INDEX_op_shri_vec, | ||
296 | + .vece = MO_64 }, | ||
297 | +}; | ||
298 | + | 62 | + |
299 | +static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 63 | + sigemptyset(&sa.sa_mask); |
300 | +{ | 64 | + if (sigaction(SIGSEGV, &sa, NULL) < 0) { |
301 | + uint64_t mask = dup_const(MO_8, 0xff << shift); | 65 | + perror("sigaction"); |
302 | + TCGv_i64 t = tcg_temp_new_i64(); | 66 | + return EXIT_FAILURE; |
67 | + } | ||
303 | + | 68 | + |
304 | + tcg_gen_shli_i64(t, a, shift); | 69 | + asm volatile("dc cvadp, %0\n\t" :: "r"(&sa)); |
305 | + tcg_gen_andi_i64(t, t, mask); | 70 | + |
306 | + tcg_gen_andi_i64(d, d, ~mask); | 71 | + should_fail = true; |
307 | + tcg_gen_or_i64(d, d, t); | 72 | + asm volatile("dc cvadp, %0\n\t" :: "r"(NULL)); |
308 | + tcg_temp_free_i64(t); | 73 | + should_fail = false; |
74 | + | ||
75 | + return EXIT_SUCCESS; | ||
309 | +} | 76 | +} |
310 | + | 77 | + |
311 | +static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 78 | +int main(void) |
312 | +{ | 79 | +{ |
313 | + uint64_t mask = dup_const(MO_16, 0xffff << shift); | 80 | + if (getauxval(AT_HWCAP2) & HWCAP2_DCPODP) { |
314 | + TCGv_i64 t = tcg_temp_new_i64(); | 81 | + return do_dc_cvadp(); |
82 | + } else { | ||
83 | + printf("SKIP: no HWCAP2_DCPODP on this system\n"); | ||
84 | + return EXIT_SUCCESS; | ||
85 | + } | ||
86 | +} | ||
87 | diff --git a/tests/tcg/aarch64/dcpop.c b/tests/tcg/aarch64/dcpop.c | ||
88 | new file mode 100644 | ||
89 | index XXXXXXX..XXXXXXX | ||
90 | --- /dev/null | ||
91 | +++ b/tests/tcg/aarch64/dcpop.c | ||
92 | @@ -XXX,XX +XXX,XX @@ | ||
93 | +/* | ||
94 | + * Test execution of DC CVAP instruction. | ||
95 | + * | ||
96 | + * Copyright (c) 2023 Zhuojia Shen <chaosdefinition@hotmail.com> | ||
97 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
98 | + */ | ||
315 | + | 99 | + |
316 | + tcg_gen_shli_i64(t, a, shift); | 100 | +#include <asm/hwcap.h> |
317 | + tcg_gen_andi_i64(t, t, mask); | 101 | +#include <sys/auxv.h> |
318 | + tcg_gen_andi_i64(d, d, ~mask); | ||
319 | + tcg_gen_or_i64(d, d, t); | ||
320 | + tcg_temp_free_i64(t); | ||
321 | +} | ||
322 | + | 102 | + |
323 | +static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) | 103 | +#include <signal.h> |
104 | +#include <stdbool.h> | ||
105 | +#include <stdio.h> | ||
106 | +#include <stdlib.h> | ||
107 | + | ||
108 | +#ifndef HWCAP_DCPOP | ||
109 | +#define HWCAP_DCPOP (1 << 16) | ||
110 | +#endif | ||
111 | + | ||
112 | +bool should_fail = false; | ||
113 | + | ||
114 | +static void signal_handler(int sig, siginfo_t *si, void *data) | ||
324 | +{ | 115 | +{ |
325 | + tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); | 116 | + ucontext_t *uc = (ucontext_t *)data; |
326 | +} | ||
327 | + | 117 | + |
328 | +static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) | 118 | + if (should_fail) { |
329 | +{ | 119 | + uc->uc_mcontext.pc += 4; |
330 | + tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); | ||
331 | +} | ||
332 | + | ||
333 | +static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) | ||
334 | +{ | ||
335 | + if (sh == 0) { | ||
336 | + tcg_gen_mov_vec(d, a); | ||
337 | + } else { | 120 | + } else { |
338 | + TCGv_vec t = tcg_temp_new_vec_matching(d); | 121 | + exit(EXIT_FAILURE); |
339 | + TCGv_vec m = tcg_temp_new_vec_matching(d); | ||
340 | + | ||
341 | + tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh)); | ||
342 | + tcg_gen_shli_vec(vece, t, a, sh); | ||
343 | + tcg_gen_and_vec(vece, d, d, m); | ||
344 | + tcg_gen_or_vec(vece, d, d, t); | ||
345 | + | ||
346 | + tcg_temp_free_vec(t); | ||
347 | + tcg_temp_free_vec(m); | ||
348 | + } | 122 | + } |
349 | +} | 123 | +} |
350 | + | 124 | + |
351 | +const GVecGen2i sli_op[4] = { | 125 | +static int do_dc_cvap(void) |
352 | + { .fni8 = gen_shl8_ins_i64, | 126 | +{ |
353 | + .fniv = gen_shl_ins_vec, | 127 | + struct sigaction sa = { |
354 | + .load_dest = true, | 128 | + .sa_flags = SA_SIGINFO, |
355 | + .opc = INDEX_op_shli_vec, | 129 | + .sa_sigaction = signal_handler, |
356 | + .vece = MO_8 }, | 130 | + }; |
357 | + { .fni8 = gen_shl16_ins_i64, | ||
358 | + .fniv = gen_shl_ins_vec, | ||
359 | + .load_dest = true, | ||
360 | + .opc = INDEX_op_shli_vec, | ||
361 | + .vece = MO_16 }, | ||
362 | + { .fni4 = gen_shl32_ins_i32, | ||
363 | + .fniv = gen_shl_ins_vec, | ||
364 | + .load_dest = true, | ||
365 | + .opc = INDEX_op_shli_vec, | ||
366 | + .vece = MO_32 }, | ||
367 | + { .fni8 = gen_shl64_ins_i64, | ||
368 | + .fniv = gen_shl_ins_vec, | ||
369 | + .prefer_i64 = TCG_TARGET_REG_BITS == 64, | ||
370 | + .load_dest = true, | ||
371 | + .opc = INDEX_op_shli_vec, | ||
372 | + .vece = MO_64 }, | ||
373 | +}; | ||
374 | + | 131 | + |
375 | /* Translate a NEON data processing instruction. Return nonzero if the | 132 | + sigemptyset(&sa.sa_mask); |
376 | instruction is invalid. | 133 | + if (sigaction(SIGSEGV, &sa, NULL) < 0) { |
377 | We process data in a mixture of 32-bit and 64-bit chunks. | 134 | + perror("sigaction"); |
378 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 135 | + return EXIT_FAILURE; |
379 | int pairwise; | 136 | + } |
380 | int u; | ||
381 | int vec_size; | ||
382 | - uint32_t imm, mask; | ||
383 | + uint32_t imm; | ||
384 | TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; | ||
385 | TCGv_ptr ptr1, ptr2, ptr3; | ||
386 | TCGv_i64 tmp64; | ||
387 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
388 | } | ||
389 | return 0; | ||
390 | |||
391 | + case 4: /* VSRI */ | ||
392 | + if (!u) { | ||
393 | + return 1; | ||
394 | + } | ||
395 | + /* Right shift comes here negative. */ | ||
396 | + shift = -shift; | ||
397 | + /* Shift out of range leaves destination unchanged. */ | ||
398 | + if (shift < 8 << size) { | ||
399 | + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, | ||
400 | + shift, &sri_op[size]); | ||
401 | + } | ||
402 | + return 0; | ||
403 | + | 137 | + |
404 | case 5: /* VSHL, VSLI */ | 138 | + asm volatile("dc cvap, %0\n\t" :: "r"(&sa)); |
405 | - if (!u) { /* VSHL */ | 139 | + |
406 | + if (u) { /* VSLI */ | 140 | + should_fail = true; |
407 | + /* Shift out of range leaves destination unchanged. */ | 141 | + asm volatile("dc cvap, %0\n\t" :: "r"(NULL)); |
408 | + if (shift < 8 << size) { | 142 | + should_fail = false; |
409 | + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, | 143 | + |
410 | + vec_size, shift, &sli_op[size]); | 144 | + return EXIT_SUCCESS; |
411 | + } | 145 | +} |
412 | + } else { /* VSHL */ | 146 | + |
413 | /* Shifts larger than the element size are | 147 | +int main(void) |
414 | * architecturally valid and results in zero. | 148 | +{ |
415 | */ | 149 | + if (getauxval(AT_HWCAP) & HWCAP_DCPOP) { |
416 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 150 | + return do_dc_cvap(); |
417 | tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift, | 151 | + } else { |
418 | vec_size, vec_size); | 152 | + printf("SKIP: no HWCAP_DCPOP on this system\n"); |
419 | } | 153 | + return EXIT_SUCCESS; |
420 | - return 0; | 154 | + } |
421 | } | 155 | +} |
422 | - break; | 156 | diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target |
423 | + return 0; | 157 | index XXXXXXX..XXXXXXX 100644 |
424 | } | 158 | --- a/tests/tcg/aarch64/Makefile.target |
425 | 159 | +++ b/tests/tcg/aarch64/Makefile.target | |
426 | if (size == 3) { | 160 | @@ -XXX,XX +XXX,XX @@ config-cc.mak: Makefile |
427 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 161 | $(quiet-@)( \ |
428 | else | 162 | $(call cc-option,-march=armv8.1-a+sve, CROSS_CC_HAS_SVE); \ |
429 | gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); | 163 | $(call cc-option,-march=armv8.1-a+sve2, CROSS_CC_HAS_SVE2); \ |
430 | break; | 164 | + $(call cc-option,-march=armv8.2-a, CROSS_CC_HAS_ARMV8_2); \ |
431 | - case 4: /* VSRI */ | 165 | $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \ |
432 | - case 5: /* VSHL, VSLI */ | 166 | + $(call cc-option,-march=armv8.5-a, CROSS_CC_HAS_ARMV8_5); \ |
433 | - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); | 167 | $(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \ |
434 | - break; | 168 | $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE); \ |
435 | case 6: /* VQSHLU */ | 169 | $(call cc-option,-march=armv9-a+sme, CROSS_CC_HAS_ARMV9_SME)) 3> config-cc.mak |
436 | gen_helper_neon_qshlu_s64(cpu_V0, cpu_env, | 170 | -include config-cc.mak |
437 | cpu_V0, cpu_V1); | 171 | |
438 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | 172 | +ifneq ($(CROSS_CC_HAS_ARMV8_2),) |
439 | /* Accumulate. */ | 173 | +AARCH64_TESTS += dcpop |
440 | neon_load_reg64(cpu_V1, rd + pass); | 174 | +dcpop: CFLAGS += -march=armv8.2-a |
441 | tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1); | 175 | +endif |
442 | - } else if (op == 4 || (op == 5 && u)) { | 176 | +ifneq ($(CROSS_CC_HAS_ARMV8_5),) |
443 | - /* Insert */ | 177 | +AARCH64_TESTS += dcpodp |
444 | - neon_load_reg64(cpu_V1, rd + pass); | 178 | +dcpodp: CFLAGS += -march=armv8.5-a |
445 | - uint64_t mask; | 179 | +endif |
446 | - if (shift < -63 || shift > 63) { | 180 | + |
447 | - mask = 0; | 181 | # Pauth Tests |
448 | - } else { | 182 | ifneq ($(CROSS_CC_HAS_ARMV8_3),) |
449 | - if (op == 4) { | 183 | AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5 |
450 | - mask = 0xffffffffffffffffull >> -shift; | ||
451 | - } else { | ||
452 | - mask = 0xffffffffffffffffull << shift; | ||
453 | - } | ||
454 | - } | ||
455 | - tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask); | ||
456 | - tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); | ||
457 | } | ||
458 | neon_store_reg64(cpu_V0, rd + pass); | ||
459 | } else { /* size < 3 */ | ||
460 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
461 | case 3: /* VRSRA */ | ||
462 | GEN_NEON_INTEGER_OP(rshl); | ||
463 | break; | ||
464 | - case 4: /* VSRI */ | ||
465 | - case 5: /* VSHL, VSLI */ | ||
466 | - switch (size) { | ||
467 | - case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; | ||
468 | - case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; | ||
469 | - case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; | ||
470 | - default: abort(); | ||
471 | - } | ||
472 | - break; | ||
473 | case 6: /* VQSHLU */ | ||
474 | switch (size) { | ||
475 | case 0: | ||
476 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
477 | tmp2 = neon_load_reg(rd, pass); | ||
478 | gen_neon_add(size, tmp, tmp2); | ||
479 | tcg_temp_free_i32(tmp2); | ||
480 | - } else if (op == 4 || (op == 5 && u)) { | ||
481 | - /* Insert */ | ||
482 | - switch (size) { | ||
483 | - case 0: | ||
484 | - if (op == 4) | ||
485 | - mask = 0xff >> -shift; | ||
486 | - else | ||
487 | - mask = (uint8_t)(0xff << shift); | ||
488 | - mask |= mask << 8; | ||
489 | - mask |= mask << 16; | ||
490 | - break; | ||
491 | - case 1: | ||
492 | - if (op == 4) | ||
493 | - mask = 0xffff >> -shift; | ||
494 | - else | ||
495 | - mask = (uint16_t)(0xffff << shift); | ||
496 | - mask |= mask << 16; | ||
497 | - break; | ||
498 | - case 2: | ||
499 | - if (shift < -31 || shift > 31) { | ||
500 | - mask = 0; | ||
501 | - } else { | ||
502 | - if (op == 4) | ||
503 | - mask = 0xffffffffu >> -shift; | ||
504 | - else | ||
505 | - mask = 0xffffffffu << shift; | ||
506 | - } | ||
507 | - break; | ||
508 | - default: | ||
509 | - abort(); | ||
510 | - } | ||
511 | - tmp2 = neon_load_reg(rd, pass); | ||
512 | - tcg_gen_andi_i32(tmp, tmp, mask); | ||
513 | - tcg_gen_andi_i32(tmp2, tmp2, ~mask); | ||
514 | - tcg_gen_or_i32(tmp, tmp, tmp2); | ||
515 | - tcg_temp_free_i32(tmp2); | ||
516 | } | ||
517 | neon_store_reg(rd, pass, tmp); | ||
518 | } | ||
519 | -- | 184 | -- |
520 | 2.19.1 | 185 | 2.34.1 |
521 | |||
522 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Zhuojia Shen <chaosdefinition@hotmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Instantiating mps2-an505 (cortex-m33) will fail make check when | 3 | Accessing EL0-accessible Debug Communication Channel (DCC) registers in |
4 | V7VE asserts that ID_ISAR0.Divide includes ARM division. It is | 4 | user mode emulation is currently enabled. However, it does not match |
5 | also wrong to include ARM_FEATURE_LPAE. | 5 | Linux behavior as Linux sets MDSCR_EL1.TDCC on startup to disable EL0 |
6 | access to DCC (see __cpu_setup() in arch/arm64/mm/proc.S). | ||
6 | 7 | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | This patch fixes access_tdcc() to check MDSCR_EL1.TDCC for EL0 and sets |
8 | Message-id: 20181016223115.24100-3-richard.henderson@linaro.org | 9 | MDSCR_EL1.TDCC for user mode emulation to match Linux. |
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 10 | |
11 | Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: DS7PR12MB630905198DD8E69F6817544CAC4EA@DS7PR12MB6309.namprd12.prod.outlook.com | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 15 | --- |
12 | target/arm/cpu.c | 6 +++++- | 16 | target/arm/cpu.c | 2 ++ |
13 | 1 file changed, 5 insertions(+), 1 deletion(-) | 17 | target/arm/debug_helper.c | 5 +++++ |
18 | 2 files changed, 7 insertions(+) | ||
14 | 19 | ||
15 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | 20 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
16 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/cpu.c | 22 | --- a/target/arm/cpu.c |
18 | +++ b/target/arm/cpu.c | 23 | +++ b/target/arm/cpu.c |
19 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | 24 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset_hold(Object *obj) |
20 | 25 | * This is not yet exposed from the Linux kernel in any way. | |
21 | /* Some features automatically imply others: */ | 26 | */ |
22 | if (arm_feature(env, ARM_FEATURE_V8)) { | 27 | env->cp15.sctlr_el[1] |= SCTLR_TSCXT; |
23 | - set_feature(env, ARM_FEATURE_V7VE); | 28 | + /* Disable access to Debug Communication Channel (DCC). */ |
24 | + if (arm_feature(env, ARM_FEATURE_M)) { | 29 | + env->cp15.mdscr_el1 |= 1 << 12; |
25 | + set_feature(env, ARM_FEATURE_V7); | 30 | #else |
26 | + } else { | 31 | /* Reset into the highest available EL */ |
27 | + set_feature(env, ARM_FEATURE_V7VE); | 32 | if (arm_feature(env, ARM_FEATURE_EL3)) { |
28 | + } | 33 | diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c |
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/target/arm/debug_helper.c | ||
36 | +++ b/target/arm/debug_helper.c | ||
37 | @@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, | ||
38 | * is implemented then these are controlled by MDCR_EL2.TDCC for | ||
39 | * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by | ||
40 | * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. | ||
41 | + * For EL0, they are also controlled by MDSCR_EL1.TDCC. | ||
42 | */ | ||
43 | static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, | ||
44 | bool isread) | ||
45 | { | ||
46 | int el = arm_current_el(env); | ||
47 | uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); | ||
48 | + bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1); | ||
49 | bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || | ||
50 | (arm_hcr_el2_eff(env) & HCR_TGE); | ||
51 | bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && | ||
52 | @@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, | ||
53 | bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && | ||
54 | (env->cp15.mdcr_el3 & MDCR_TDCC); | ||
55 | |||
56 | + if (el < 1 && mdscr_el1_tdcc) { | ||
57 | + return CP_ACCESS_TRAP; | ||
58 | + } | ||
59 | if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { | ||
60 | return CP_ACCESS_TRAP_EL2; | ||
29 | } | 61 | } |
30 | if (arm_feature(env, ARM_FEATURE_V7VE)) { | ||
31 | /* v7 Virtualization Extensions. In real hardware this implies | ||
32 | -- | 62 | -- |
33 | 2.19.1 | 63 | 2.34.1 |
34 | |||
35 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | For AArch32, exception return happens through certain kinds | ||
2 | of CPSR write. We don't currently have any CPU_LOG_INT logging | ||
3 | of these events (unlike AArch64, where we log in the ERET | ||
4 | instruction). Add some suitable logging. | ||
5 | 1 | ||
6 | This will log exception returns like this: | ||
7 | Exception return from AArch32 hyp to usr PC 0x80100374 | ||
8 | |||
9 | paralleling the existing logging in the exception_return | ||
10 | helper for AArch64 exception returns: | ||
11 | Exception return from AArch64 EL2 to AArch64 EL0 PC 0x8003045c | ||
12 | Exception return from AArch64 EL2 to AArch32 EL0 PC 0x8003045c | ||
13 | |||
14 | (Note that an AArch32 exception return can only be | ||
15 | AArch32->AArch32, never to AArch64.) | ||
16 | |||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
18 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | Message-id: 20181012144235.19646-2-peter.maydell@linaro.org | ||
20 | --- | ||
21 | target/arm/internals.h | 18 ++++++++++++++++++ | ||
22 | target/arm/helper.c | 10 ++++++++++ | ||
23 | target/arm/translate.c | 7 +------ | ||
24 | 3 files changed, 29 insertions(+), 6 deletions(-) | ||
25 | |||
26 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/target/arm/internals.h | ||
29 | +++ b/target/arm/internals.h | ||
30 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t v7m_sp_limit(CPUARMState *env) | ||
31 | } | ||
32 | } | ||
33 | |||
34 | +/** | ||
35 | + * aarch32_mode_name(): Return name of the AArch32 CPU mode | ||
36 | + * @psr: Program Status Register indicating CPU mode | ||
37 | + * | ||
38 | + * Returns, for debug logging purposes, a printable representation | ||
39 | + * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by | ||
40 | + * the low bits of the specified PSR. | ||
41 | + */ | ||
42 | +static inline const char *aarch32_mode_name(uint32_t psr) | ||
43 | +{ | ||
44 | + static const char cpu_mode_names[16][4] = { | ||
45 | + "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", | ||
46 | + "???", "???", "hyp", "und", "???", "???", "???", "sys" | ||
47 | + }; | ||
48 | + | ||
49 | + return cpu_mode_names[psr & 0xf]; | ||
50 | +} | ||
51 | + | ||
52 | #endif | ||
53 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/target/arm/helper.c | ||
56 | +++ b/target/arm/helper.c | ||
57 | @@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, | ||
58 | mask |= CPSR_IL; | ||
59 | val |= CPSR_IL; | ||
60 | } | ||
61 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
62 | + "Illegal AArch32 mode switch attempt from %s to %s\n", | ||
63 | + aarch32_mode_name(env->uncached_cpsr), | ||
64 | + aarch32_mode_name(val)); | ||
65 | } else { | ||
66 | + qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", | ||
67 | + write_type == CPSRWriteExceptionReturn ? | ||
68 | + "Exception return from AArch32" : | ||
69 | + "AArch32 mode switch from", | ||
70 | + aarch32_mode_name(env->uncached_cpsr), | ||
71 | + aarch32_mode_name(val), env->regs[15]); | ||
72 | switch_mode(env, val & CPSR_M); | ||
73 | } | ||
74 | } | ||
75 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | ||
77 | --- a/target/arm/translate.c | ||
78 | +++ b/target/arm/translate.c | ||
79 | @@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb) | ||
80 | translator_loop(ops, &dc.base, cpu, tb); | ||
81 | } | ||
82 | |||
83 | -static const char *cpu_mode_names[16] = { | ||
84 | - "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", | ||
85 | - "???", "???", "hyp", "und", "???", "???", "???", "sys" | ||
86 | -}; | ||
87 | - | ||
88 | void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, | ||
89 | int flags) | ||
90 | { | ||
91 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, | ||
92 | psr & CPSR_V ? 'V' : '-', | ||
93 | psr & CPSR_T ? 'T' : 'A', | ||
94 | ns_status, | ||
95 | - cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); | ||
96 | + aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26); | ||
97 | } | ||
98 | |||
99 | if (flags & CPU_DUMP_FPU) { | ||
100 | -- | ||
101 | 2.19.1 | ||
102 | |||
103 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | If the HCR_EL2 PTW virtualizaiton configuration register bit | ||
2 | is set, then this means that a stage 2 Permission fault must | ||
3 | be generated if a stage 1 translation table access is made | ||
4 | to an address that is mapped as Device memory in stage 2. | ||
5 | Implement this. | ||
6 | 1 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-id: 20181012144235.19646-8-peter.maydell@linaro.org | ||
10 | --- | ||
11 | target/arm/helper.c | 21 ++++++++++++++++++++- | ||
12 | 1 file changed, 20 insertions(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/helper.c | ||
17 | +++ b/target/arm/helper.c | ||
18 | @@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, | ||
19 | hwaddr s2pa; | ||
20 | int s2prot; | ||
21 | int ret; | ||
22 | + ARMCacheAttrs cacheattrs = {}; | ||
23 | + ARMCacheAttrs *pcacheattrs = NULL; | ||
24 | + | ||
25 | + if (env->cp15.hcr_el2 & HCR_PTW) { | ||
26 | + /* | ||
27 | + * PTW means we must fault if this S1 walk touches S2 Device | ||
28 | + * memory; otherwise we don't care about the attributes and can | ||
29 | + * save the S2 translation the effort of computing them. | ||
30 | + */ | ||
31 | + pcacheattrs = &cacheattrs; | ||
32 | + } | ||
33 | |||
34 | ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, | ||
35 | - &txattrs, &s2prot, &s2size, fi, NULL); | ||
36 | + &txattrs, &s2prot, &s2size, fi, pcacheattrs); | ||
37 | if (ret) { | ||
38 | assert(fi->type != ARMFault_None); | ||
39 | fi->s2addr = addr; | ||
40 | @@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, | ||
41 | fi->s1ptw = true; | ||
42 | return ~0; | ||
43 | } | ||
44 | + if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { | ||
45 | + /* Access was to Device memory: generate Permission fault */ | ||
46 | + fi->type = ARMFault_Permission; | ||
47 | + fi->s2addr = addr; | ||
48 | + fi->stage2 = true; | ||
49 | + fi->s1ptw = true; | ||
50 | + return ~0; | ||
51 | + } | ||
52 | addr = s2pa; | ||
53 | } | ||
54 | return addr; | ||
55 | -- | ||
56 | 2.19.1 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Create and use a utility function to extract the EC field | ||
2 | from a syndrome, rather than open-coding the shift. | ||
3 | 1 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20181012144235.19646-9-peter.maydell@linaro.org | ||
7 | --- | ||
8 | target/arm/internals.h | 5 +++++ | ||
9 | target/arm/helper.c | 4 ++-- | ||
10 | target/arm/kvm64.c | 2 +- | ||
11 | target/arm/op_helper.c | 2 +- | ||
12 | 4 files changed, 9 insertions(+), 4 deletions(-) | ||
13 | |||
14 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/internals.h | ||
17 | +++ b/target/arm/internals.h | ||
18 | @@ -XXX,XX +XXX,XX @@ enum arm_exception_class { | ||
19 | #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) | ||
20 | #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) | ||
21 | |||
22 | +static inline uint32_t syn_get_ec(uint32_t syn) | ||
23 | +{ | ||
24 | + return syn >> ARM_EL_EC_SHIFT; | ||
25 | +} | ||
26 | + | ||
27 | /* Utility functions for constructing various kinds of syndrome value. | ||
28 | * Note that in general we follow the AArch64 syndrome values; in a | ||
29 | * few cases the value in HSR for exceptions taken to AArch32 Hyp | ||
30 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/helper.c | ||
33 | +++ b/target/arm/helper.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) | ||
35 | uint32_t moe; | ||
36 | |||
37 | /* If this is a debug exception we must update the DBGDSCR.MOE bits */ | ||
38 | - switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { | ||
39 | + switch (syn_get_ec(env->exception.syndrome)) { | ||
40 | case EC_BREAKPOINT: | ||
41 | case EC_BREAKPOINT_SAME_EL: | ||
42 | moe = 1; | ||
43 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs) | ||
44 | if (qemu_loglevel_mask(CPU_LOG_INT) | ||
45 | && !excp_is_internal(cs->exception_index)) { | ||
46 | qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", | ||
47 | - env->exception.syndrome >> ARM_EL_EC_SHIFT, | ||
48 | + syn_get_ec(env->exception.syndrome), | ||
49 | env->exception.syndrome); | ||
50 | } | ||
51 | |||
52 | diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/target/arm/kvm64.c | ||
55 | +++ b/target/arm/kvm64.c | ||
56 | @@ -XXX,XX +XXX,XX @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | ||
57 | |||
58 | bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) | ||
59 | { | ||
60 | - int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT; | ||
61 | + int hsr_ec = syn_get_ec(debug_exit->hsr); | ||
62 | ARMCPU *cpu = ARM_CPU(cs); | ||
63 | CPUClass *cc = CPU_GET_CLASS(cs); | ||
64 | CPUARMState *env = &cpu->env; | ||
65 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/target/arm/op_helper.c | ||
68 | +++ b/target/arm/op_helper.c | ||
69 | @@ -XXX,XX +XXX,XX @@ void raise_exception(CPUARMState *env, uint32_t excp, | ||
70 | * (see DDI0478C.a D1.10.4) | ||
71 | */ | ||
72 | target_el = 2; | ||
73 | - if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) { | ||
74 | + if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) { | ||
75 | syndrome = syn_uncategorized(); | ||
76 | } | ||
77 | } | ||
78 | -- | ||
79 | 2.19.1 | ||
80 | |||
81 | diff view generated by jsdifflib |