1 | Hi; here's the latest round of arm patches. I have included also | 1 | Hi; here's this week's arm pullreq. Mostly this is my |
---|---|---|---|
2 | my patchset for the RTC devices to avoid keeping time_t and | 2 | work on FEAT_MOPS and FEAT_HBC, but there are some |
3 | time_t diffs in 32-bit variables. | 3 | other bits and pieces in there too, including a recent |
4 | set of elf2dmp patches. | ||
4 | 5 | ||
5 | thanks | 6 | thanks |
6 | -- PMM | 7 | -- PMM |
7 | 8 | ||
8 | The following changes since commit 156618d9ea67f2f2e31d9dedd97f2dcccbe6808c: | 9 | The following changes since commit 55394dcbec8f0c29c30e792c102a0edd50a52bf4: |
9 | 10 | ||
10 | Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging (2023-08-30 09:20:27 -0400) | 11 | Merge tag 'pull-loongarch-20230920' of https://gitlab.com/gaosong/qemu into staging (2023-09-20 13:56:18 -0400) |
11 | 12 | ||
12 | are available in the Git repository at: | 13 | are available in the Git repository at: |
13 | 14 | ||
14 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230831 | 15 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230921 |
15 | 16 | ||
16 | for you to fetch changes up to e73b8bb8a3e9a162f70e9ffbf922d4fafc96bbfb: | 17 | for you to fetch changes up to 231f6a7d66254a58bedbee458591b780e0a507b1: |
17 | 18 | ||
18 | hw/arm: Set number of MPU regions correctly for an505, an521, an524 (2023-08-31 11:07:02 +0100) | 19 | elf2dmp: rework PDB_STREAM_INDEXES::segments obtaining (2023-09-21 16:13:54 +0100) |
19 | 20 | ||
20 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
21 | target-arm queue: | 22 | target-arm queue: |
22 | * Some of the preliminary patches for Cortex-A710 support | 23 | * target/m68k: Add URL to semihosting spec |
23 | * i.MX7 and i.MX6UL refactoring | 24 | * docs/devel/loads-stores: Fix git grep regexes |
24 | * Implement SRC device for i.MX7 | 25 | * hw/arm/boot: Set SCR_EL3.FGTEn when booting kernel |
25 | * Catch illegal-exception-return from EL3 with bad NSE/NS | 26 | * linux-user: Correct SME feature names reported in cpuinfo |
26 | * Use 64-bit offsets for holding time_t differences in RTC devices | 27 | * linux-user: Add missing arm32 hwcaps |
27 | * Model correct number of MPU regions for an505, an521, an524 boards | 28 | * Don't skip MTE checks for LDRT/STRT at EL0 |
29 | * Implement FEAT_HBC | ||
30 | * Implement FEAT_MOPS | ||
31 | * audio/jackaudio: Avoid dynamic stack allocation | ||
32 | * sbsa-ref: add non-secure EL2 virtual timer | ||
33 | * elf2dmp: improve Win2022, Win11 and large dumps | ||
28 | 34 | ||
29 | ---------------------------------------------------------------- | 35 | ---------------------------------------------------------------- |
30 | Alex Bennée (1): | 36 | Fabian Vogt (1): |
31 | target/arm: properly document FEAT_CRC32 | 37 | hw/arm/boot: Set SCR_EL3.FGTEn when booting kernel |
32 | 38 | ||
33 | Jean-Christophe Dubois (6): | 39 | Marcin Juszkiewicz (1): |
34 | Remove i.MX7 IOMUX GPR device from i.MX6UL | 40 | sbsa-ref: add non-secure EL2 virtual timer |
35 | Refactor i.MX6UL processor code | ||
36 | Add i.MX6UL missing devices. | ||
37 | Refactor i.MX7 processor code | ||
38 | Add i.MX7 missing TZ devices and memory regions | ||
39 | Add i.MX7 SRC device implementation | ||
40 | 41 | ||
41 | Peter Maydell (8): | 42 | Peter Maydell (23): |
42 | target/arm: Catch illegal-exception-return from EL3 with bad NSE/NS | 43 | target/m68k: Add URL to semihosting spec |
43 | hw/rtc/m48t59: Use 64-bit arithmetic in set_alarm() | 44 | docs/devel/loads-stores: Fix git grep regexes |
44 | hw/rtc/twl92230: Use int64_t for sec_offset and alm_sec | 45 | linux-user/elfload.c: Correct SME feature names reported in cpuinfo |
45 | hw/rtc/aspeed_rtc: Use 64-bit offset for holding time_t difference | 46 | linux-user/elfload.c: Add missing arm and arm64 hwcap values |
46 | rtc: Use time_t for passing and returning time offsets | 47 | linux-user/elfload.c: Report previously missing arm32 hwcaps |
47 | target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init | 48 | target/arm: Update AArch64 ID register field definitions |
48 | hw/arm/armv7m: Add mpu-ns-regions and mpu-s-regions properties | 49 | target/arm: Update user-mode ID reg mask values |
49 | hw/arm: Set number of MPU regions correctly for an505, an521, an524 | 50 | target/arm: Implement FEAT_HBC |
51 | target/arm: Remove unused allocation_tag_mem() argument | ||
52 | target/arm: Don't skip MTE checks for LDRT/STRT at EL0 | ||
53 | target/arm: Implement FEAT_MOPS enable bits | ||
54 | target/arm: Pass unpriv bool to get_a64_user_mem_index() | ||
55 | target/arm: Define syndrome function for MOPS exceptions | ||
56 | target/arm: New function allocation_tag_mem_probe() | ||
57 | target/arm: Implement MTE tag-checking functions for FEAT_MOPS | ||
58 | target/arm: Implement the SET* instructions | ||
59 | target/arm: Define new TB flag for ATA0 | ||
60 | target/arm: Implement the SETG* instructions | ||
61 | target/arm: Implement MTE tag-checking functions for FEAT_MOPS copies | ||
62 | target/arm: Implement the CPY* instructions | ||
63 | target/arm: Enable FEAT_MOPS for CPU 'max' | ||
64 | audio/jackaudio: Avoid dynamic stack allocation in qjack_client_init | ||
65 | audio/jackaudio: Avoid dynamic stack allocation in qjack_process() | ||
50 | 66 | ||
51 | Richard Henderson (9): | 67 | Viktor Prutyanov (5): |
52 | target/arm: Reduce dcz_blocksize to uint8_t | 68 | elf2dmp: replace PE export name check with PDB name check |
53 | target/arm: Allow cpu to configure GM blocksize | 69 | elf2dmp: introduce physical block alignment |
54 | target/arm: Support more GM blocksizes | 70 | elf2dmp: introduce merging of physical memory runs |
55 | target/arm: When tag memory is not present, set MTE=1 | 71 | elf2dmp: use Linux mmap with MAP_NORESERVE when possible |
56 | target/arm: Introduce make_ccsidr64 | 72 | elf2dmp: rework PDB_STREAM_INDEXES::segments obtaining |
57 | target/arm: Apply access checks to neoverse-n1 special registers | ||
58 | target/arm: Apply access checks to neoverse-v1 special registers | ||
59 | target/arm: Suppress FEAT_TRBE (Trace Buffer Extension) | ||
60 | target/arm: Implement FEAT_HPDS2 as a no-op | ||
61 | 73 | ||
74 | docs/devel/loads-stores.rst | 40 +- | ||
62 | docs/system/arm/emulation.rst | 2 + | 75 | docs/system/arm/emulation.rst | 2 + |
63 | include/hw/arm/armsse.h | 5 + | 76 | contrib/elf2dmp/addrspace.h | 1 + |
64 | include/hw/arm/armv7m.h | 8 + | 77 | contrib/elf2dmp/pdb.h | 2 +- |
65 | include/hw/arm/fsl-imx6ul.h | 158 ++++++++++++++++--- | 78 | contrib/elf2dmp/qemu_elf.h | 2 + |
66 | include/hw/arm/fsl-imx7.h | 338 ++++++++++++++++++++++++++++++----------- | 79 | target/arm/cpu.h | 35 ++ |
67 | include/hw/misc/imx7_src.h | 66 ++++++++ | 80 | target/arm/internals.h | 55 +++ |
68 | include/hw/rtc/aspeed_rtc.h | 2 +- | 81 | target/arm/syndrome.h | 12 + |
69 | include/sysemu/rtc.h | 4 +- | 82 | target/arm/tcg/helper-a64.h | 14 + |
70 | target/arm/cpregs.h | 2 + | 83 | target/arm/tcg/translate.h | 4 +- |
71 | target/arm/cpu.h | 5 +- | 84 | target/arm/tcg/a64.decode | 38 +- |
72 | target/arm/internals.h | 6 - | 85 | audio/jackaudio.c | 21 +- |
73 | target/arm/tcg/translate.h | 2 + | 86 | contrib/elf2dmp/addrspace.c | 31 +- |
74 | hw/arm/armsse.c | 16 ++ | 87 | contrib/elf2dmp/main.c | 154 ++++---- |
75 | hw/arm/armv7m.c | 21 +++ | 88 | contrib/elf2dmp/pdb.c | 15 +- |
76 | hw/arm/fsl-imx6ul.c | 174 +++++++++++++-------- | 89 | contrib/elf2dmp/qemu_elf.c | 68 +++- |
77 | hw/arm/fsl-imx7.c | 201 +++++++++++++++++++----- | 90 | hw/arm/boot.c | 4 + |
78 | hw/arm/mps2-tz.c | 29 ++++ | 91 | hw/arm/sbsa-ref.c | 2 + |
79 | hw/misc/imx7_src.c | 276 +++++++++++++++++++++++++++++++++ | 92 | linux-user/elfload.c | 72 +++- |
80 | hw/rtc/aspeed_rtc.c | 5 +- | 93 | target/arm/helper.c | 39 +- |
81 | hw/rtc/m48t59.c | 2 +- | 94 | target/arm/tcg/cpu64.c | 5 + |
82 | hw/rtc/twl92230.c | 4 +- | 95 | target/arm/tcg/helper-a64.c | 878 +++++++++++++++++++++++++++++++++++++++++ |
83 | softmmu/rtc.c | 4 +- | 96 | target/arm/tcg/hflags.c | 21 + |
84 | target/arm/cpu.c | 207 ++++++++++++++----------- | 97 | target/arm/tcg/mte_helper.c | 281 +++++++++++-- |
85 | target/arm/helper.c | 15 +- | 98 | target/arm/tcg/translate-a64.c | 164 +++++++- |
86 | target/arm/tcg/cpu32.c | 2 +- | 99 | target/m68k/m68k-semi.c | 4 + |
87 | target/arm/tcg/cpu64.c | 102 +++++++++---- | 100 | tests/tcg/aarch64/sysregs.c | 4 +- |
88 | target/arm/tcg/helper-a64.c | 9 ++ | 101 | 27 files changed, 1768 insertions(+), 200 deletions(-) |
89 | target/arm/tcg/mte_helper.c | 90 ++++++++--- | ||
90 | target/arm/tcg/translate-a64.c | 5 +- | ||
91 | hw/misc/meson.build | 1 + | ||
92 | hw/misc/trace-events | 4 + | ||
93 | 31 files changed, 1393 insertions(+), 372 deletions(-) | ||
94 | create mode 100644 include/hw/misc/imx7_src.h | ||
95 | create mode 100644 hw/misc/imx7_src.c | ||
96 | diff view generated by jsdifflib |
1 | The IoTKit, SSE200 and SSE300 all default to 8 MPU regions. The | 1 | The spec for m68k semihosting is documented in the libgloss |
---|---|---|---|
2 | MPS2/MPS3 FPGA images don't override these except in the case of | 2 | sources. Add a comment with the URL for it, as we already |
3 | AN547, which uses 16 MPU regions. | 3 | have for nios2 semihosting. |
4 | 4 | ||
5 | Define properties on the ARMSSE object for the MPU regions (using the | ||
6 | same names as the documented RTL configuration settings, and | ||
7 | following the pattern we already have for this device of using | ||
8 | all-caps names as the RTL does), and set them in the board code. | ||
9 | |||
10 | We don't actually need to override the default except on AN547, | ||
11 | but it's simpler code to have the board code set them always | ||
12 | rather than tracking which board subtypes want to set them to | ||
13 | a non-default value separately from what that value is. | ||
14 | |||
15 | Tho overall effect is that for mps2-an505, mps2-an521 and mps3-an524 | ||
16 | we now correctly use 8 MPU regions, while mps3-an547 stays at its | ||
17 | current 16 regions. | ||
18 | |||
19 | It's possible some guest code wrongly depended on the previous | ||
20 | incorrectly modeled number of memory regions. (Such guest code | ||
21 | should ideally check the number of regions via the MPU_TYPE | ||
22 | register.) The old behaviour can be obtained with additional | ||
23 | -global arguments to QEMU: | ||
24 | |||
25 | For mps2-an521 and mps2-an524: | ||
26 | -global sse-200.CPU0_MPU_NS=16 -global sse-200.CPU0_MPU_S=16 -global sse-200.CPU1_MPU_NS=16 -global sse-200.CPU1_MPU_S=16 | ||
27 | |||
28 | For mps2-an505: | ||
29 | -global sse-200.CPU0_MPU_NS=16 -global sse-200.CPU0_MPU_S=16 | ||
30 | |||
31 | NB that the way the implementation allows this use of -global | ||
32 | is slightly fragile: if the board code explicitly sets the | ||
33 | properties on the sse-200 object, this overrides the -global | ||
34 | command line option. So we rely on: | ||
35 | - the boards that need fixing all happen to use the SSE defaults | ||
36 | - we can write the board code to only set the property if it | ||
37 | is different from the default, rather than having all boards | ||
38 | explicitly set the property | ||
39 | - the board that does need to use a non-default value happens | ||
40 | to need to set it to the same value (16) we previously used | ||
41 | This works, but there are some kinds of refactoring of the | ||
42 | mps2-tz.c code that would break the support for -global here. | ||
43 | |||
44 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1772 | ||
45 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
46 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
47 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
48 | Message-id: 20230724174335.2150499-4-peter.maydell@linaro.org | 9 | Message-id: 20230801154451.3505492-1-peter.maydell@linaro.org |
49 | --- | 10 | --- |
50 | include/hw/arm/armsse.h | 5 +++++ | 11 | target/m68k/m68k-semi.c | 4 ++++ |
51 | hw/arm/armsse.c | 16 ++++++++++++++++ | 12 | 1 file changed, 4 insertions(+) |
52 | hw/arm/mps2-tz.c | 29 +++++++++++++++++++++++++++++ | ||
53 | 3 files changed, 50 insertions(+) | ||
54 | 13 | ||
55 | diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h | 14 | diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c |
56 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
57 | --- a/include/hw/arm/armsse.h | 16 | --- a/target/m68k/m68k-semi.c |
58 | +++ b/include/hw/arm/armsse.h | 17 | +++ b/target/m68k/m68k-semi.c |
59 | @@ -XXX,XX +XXX,XX @@ | 18 | @@ -XXX,XX +XXX,XX @@ |
60 | * (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an | 19 | * |
61 | * SSE-200 both are present; CPU0 in an SSE-200 has neither. | 20 | * You should have received a copy of the GNU General Public License |
62 | * Since the IoTKit has only one CPU, it does not have the CPU1_* properties. | 21 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
63 | + * + QOM properties "CPU0_MPU_NS", "CPU0_MPU_S", "CPU1_MPU_NS" and "CPU1_MPU_S" | 22 | + * |
64 | + * which set the number of MPU regions on the CPUs. If there is only one | 23 | + * The semihosting protocol implemented here is described in the |
65 | + * CPU the CPU1 properties are not present. | 24 | + * libgloss sources: |
66 | * + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0, | 25 | + * https://sourceware.org/git/?p=newlib-cygwin.git;a=blob;f=libgloss/m68k/m68k-semi.txt;hb=HEAD |
67 | * which are wired to its NVIC lines 32 .. n+32 | 26 | */ |
68 | * + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for | 27 | |
69 | @@ -XXX,XX +XXX,XX @@ struct ARMSSE { | 28 | #include "qemu/osdep.h" |
70 | uint32_t exp_numirq; | ||
71 | uint32_t sram_addr_width; | ||
72 | uint32_t init_svtor; | ||
73 | + uint32_t cpu_mpu_ns[SSE_MAX_CPUS]; | ||
74 | + uint32_t cpu_mpu_s[SSE_MAX_CPUS]; | ||
75 | bool cpu_fpu[SSE_MAX_CPUS]; | ||
76 | bool cpu_dsp[SSE_MAX_CPUS]; | ||
77 | }; | ||
78 | diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/hw/arm/armsse.c | ||
81 | +++ b/hw/arm/armsse.c | ||
82 | @@ -XXX,XX +XXX,XX @@ static Property iotkit_properties[] = { | ||
83 | DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), | ||
84 | DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true), | ||
85 | DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), | ||
86 | + DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8), | ||
87 | + DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), | ||
88 | DEFINE_PROP_END_OF_LIST() | ||
89 | }; | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ static Property sse200_properties[] = { | ||
92 | DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false), | ||
93 | DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true), | ||
94 | DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true), | ||
95 | + DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8), | ||
96 | + DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), | ||
97 | + DEFINE_PROP_UINT32("CPU1_MPU_NS", ARMSSE, cpu_mpu_ns[1], 8), | ||
98 | + DEFINE_PROP_UINT32("CPU1_MPU_S", ARMSSE, cpu_mpu_s[1], 8), | ||
99 | DEFINE_PROP_END_OF_LIST() | ||
100 | }; | ||
101 | |||
102 | @@ -XXX,XX +XXX,XX @@ static Property sse300_properties[] = { | ||
103 | DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), | ||
104 | DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true), | ||
105 | DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), | ||
106 | + DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8), | ||
107 | + DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), | ||
108 | DEFINE_PROP_END_OF_LIST() | ||
109 | }; | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp) | ||
112 | return; | ||
113 | } | ||
114 | } | ||
115 | + if (!object_property_set_uint(cpuobj, "mpu-ns-regions", | ||
116 | + s->cpu_mpu_ns[i], errp)) { | ||
117 | + return; | ||
118 | + } | ||
119 | + if (!object_property_set_uint(cpuobj, "mpu-s-regions", | ||
120 | + s->cpu_mpu_s[i], errp)) { | ||
121 | + return; | ||
122 | + } | ||
123 | |||
124 | if (i > 0) { | ||
125 | memory_region_add_subregion_overlap(&s->cpu_container[i], 0, | ||
126 | diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c | ||
127 | index XXXXXXX..XXXXXXX 100644 | ||
128 | --- a/hw/arm/mps2-tz.c | ||
129 | +++ b/hw/arm/mps2-tz.c | ||
130 | @@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass { | ||
131 | int uart_overflow_irq; /* number of the combined UART overflow IRQ */ | ||
132 | uint32_t init_svtor; /* init-svtor setting for SSE */ | ||
133 | uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */ | ||
134 | + uint32_t cpu0_mpu_ns; /* CPU0_MPU_NS setting for SSE */ | ||
135 | + uint32_t cpu0_mpu_s; /* CPU0_MPU_S setting for SSE */ | ||
136 | + uint32_t cpu1_mpu_ns; /* CPU1_MPU_NS setting for SSE */ | ||
137 | + uint32_t cpu1_mpu_s; /* CPU1_MPU_S setting for SSE */ | ||
138 | const RAMInfo *raminfo; | ||
139 | const char *armsse_type; | ||
140 | uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */ | ||
141 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_TYPE(MPS2TZMachineState, MPS2TZMachineClass, MPS2TZ_MACHINE) | ||
142 | #define MPS3_DDR_SIZE (2 * GiB) | ||
143 | #endif | ||
144 | |||
145 | +/* For cpu{0,1}_mpu_{ns,s}, means "leave at SSE's default value" */ | ||
146 | +#define MPU_REGION_DEFAULT UINT32_MAX | ||
147 | + | ||
148 | static const uint32_t an505_oscclk[] = { | ||
149 | 40000000, | ||
150 | 24580000, | ||
151 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine) | ||
152 | OBJECT(system_memory), &error_abort); | ||
153 | qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq); | ||
154 | qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor); | ||
155 | + if (mmc->cpu0_mpu_ns != MPU_REGION_DEFAULT) { | ||
156 | + qdev_prop_set_uint32(iotkitdev, "CPU0_MPU_NS", mmc->cpu0_mpu_ns); | ||
157 | + } | ||
158 | + if (mmc->cpu0_mpu_s != MPU_REGION_DEFAULT) { | ||
159 | + qdev_prop_set_uint32(iotkitdev, "CPU0_MPU_S", mmc->cpu0_mpu_s); | ||
160 | + } | ||
161 | + if (object_property_find(OBJECT(iotkitdev), "CPU1_MPU_NS")) { | ||
162 | + if (mmc->cpu1_mpu_ns != MPU_REGION_DEFAULT) { | ||
163 | + qdev_prop_set_uint32(iotkitdev, "CPU1_MPU_NS", mmc->cpu1_mpu_ns); | ||
164 | + } | ||
165 | + if (mmc->cpu1_mpu_s != MPU_REGION_DEFAULT) { | ||
166 | + qdev_prop_set_uint32(iotkitdev, "CPU1_MPU_S", mmc->cpu1_mpu_s); | ||
167 | + } | ||
168 | + } | ||
169 | qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width); | ||
170 | qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk); | ||
171 | qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk); | ||
172 | @@ -XXX,XX +XXX,XX @@ static void mps2tz_class_init(ObjectClass *oc, void *data) | ||
173 | { | ||
174 | MachineClass *mc = MACHINE_CLASS(oc); | ||
175 | IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(oc); | ||
176 | + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); | ||
177 | |||
178 | mc->init = mps2tz_common_init; | ||
179 | mc->reset = mps2_machine_reset; | ||
180 | iic->check = mps2_tz_idau_check; | ||
181 | + | ||
182 | + /* Most machines leave these at the SSE defaults */ | ||
183 | + mmc->cpu0_mpu_ns = MPU_REGION_DEFAULT; | ||
184 | + mmc->cpu0_mpu_s = MPU_REGION_DEFAULT; | ||
185 | + mmc->cpu1_mpu_ns = MPU_REGION_DEFAULT; | ||
186 | + mmc->cpu1_mpu_s = MPU_REGION_DEFAULT; | ||
187 | } | ||
188 | |||
189 | static void mps2tz_set_default_ram_info(MPS2TZMachineClass *mmc) | ||
190 | @@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) | ||
191 | mmc->numirq = 96; | ||
192 | mmc->uart_overflow_irq = 48; | ||
193 | mmc->init_svtor = 0x00000000; | ||
194 | + mmc->cpu0_mpu_s = mmc->cpu0_mpu_ns = 16; | ||
195 | mmc->sram_addr_width = 21; | ||
196 | mmc->raminfo = an547_raminfo; | ||
197 | mmc->armsse_type = TYPE_SSE300; | ||
198 | -- | 29 | -- |
199 | 2.34.1 | 30 | 2.34.1 |
200 | 31 | ||
201 | 32 | diff view generated by jsdifflib |
1 | In the twl92230 device, use int64_t for the two state fields | 1 | The loads-and-stores documentation includes git grep regexes to find |
---|---|---|---|
2 | sec_offset and alm_sec, because we set these to values that | 2 | occurrences of the various functions. Some of these regexes have |
3 | are either time_t or differences between two time_t values. | 3 | errors, typically failing to escape the '?', '(' and ')' when they |
4 | should be metacharacters (since these are POSIX basic REs). We also | ||
5 | weren't consistent about whether to have a ':' on the end of the | ||
6 | line introducing the list of regexes in each section. | ||
4 | 7 | ||
5 | These fields aren't saved in vmstate anywhere, so we can | 8 | Fix the errors. |
6 | safely widen them. | 9 | |
10 | The following shell rune will complain about any REs in the | ||
11 | file which don't have any matches in the codebase: | ||
12 | for re in $(sed -ne 's/ - ``\(\\<.*\)``/\1/p' docs/devel/loads-stores.rst); do git grep -q "$re" || echo "no matches for re $re"; done | ||
7 | 13 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
16 | Message-id: 20230904161703.3996734-1-peter.maydell@linaro.org | ||
10 | --- | 17 | --- |
11 | hw/rtc/twl92230.c | 4 ++-- | 18 | docs/devel/loads-stores.rst | 40 ++++++++++++++++++------------------- |
12 | 1 file changed, 2 insertions(+), 2 deletions(-) | 19 | 1 file changed, 20 insertions(+), 20 deletions(-) |
13 | 20 | ||
14 | diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c | 21 | diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst |
15 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/rtc/twl92230.c | 23 | --- a/docs/devel/loads-stores.rst |
17 | +++ b/hw/rtc/twl92230.c | 24 | +++ b/docs/devel/loads-stores.rst |
18 | @@ -XXX,XX +XXX,XX @@ struct MenelausState { | 25 | @@ -XXX,XX +XXX,XX @@ which stores ``val`` to ``ptr`` as an ``{endian}`` order value |
19 | struct tm tm; | 26 | of size ``sz`` bytes. |
20 | struct tm new; | 27 | |
21 | struct tm alm; | 28 | |
22 | - int sec_offset; | 29 | -Regexes for git grep |
23 | - int alm_sec; | 30 | +Regexes for git grep: |
24 | + int64_t sec_offset; | 31 | - ``\<ld[us]\?[bwlq]\(_[hbl]e\)\?_p\>`` |
25 | + int64_t alm_sec; | 32 | - ``\<st[bwlq]\(_[hbl]e\)\?_p\>`` |
26 | int next_comp; | 33 | - ``\<st24\(_[hbl]e\)\?_p\>`` |
27 | } rtc; | 34 | - - ``\<ldn_\([hbl]e\)?_p\>`` |
28 | uint16_t rtc_next_vmstate; | 35 | - - ``\<stn_\([hbl]e\)?_p\>`` |
36 | + - ``\<ldn_\([hbl]e\)\?_p\>`` | ||
37 | + - ``\<stn_\([hbl]e\)\?_p\>`` | ||
38 | |||
39 | ``cpu_{ld,st}*_mmu`` | ||
40 | ~~~~~~~~~~~~~~~~~~~~ | ||
41 | @@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)`` | ||
42 | - ``_le`` : little endian | ||
43 | |||
44 | Regexes for git grep: | ||
45 | - - ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>`` | ||
46 | - - ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>`` | ||
47 | + - ``\<cpu_ld[bwlq]\(_[bl]e\)\?_mmu\>`` | ||
48 | + - ``\<cpu_st[bwlq]\(_[bl]e\)\?_mmu\>`` | ||
49 | |||
50 | |||
51 | ``cpu_{ld,st}*_mmuidx_ra`` | ||
52 | @@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)`` | ||
53 | - ``_le`` : little endian | ||
54 | |||
55 | Regexes for git grep: | ||
56 | - - ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_mmuidx_ra\>`` | ||
57 | - - ``\<cpu_st[bwlq](_[bl]e)\?_mmuidx_ra\>`` | ||
58 | + - ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_mmuidx_ra\>`` | ||
59 | + - ``\<cpu_st[bwlq]\(_[bl]e\)\?_mmuidx_ra\>`` | ||
60 | |||
61 | ``cpu_{ld,st}*_data_ra`` | ||
62 | ~~~~~~~~~~~~~~~~~~~~~~~~ | ||
63 | @@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}{end}_data_ra(env, ptr, val, ra)`` | ||
64 | - ``_le`` : little endian | ||
65 | |||
66 | Regexes for git grep: | ||
67 | - - ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_data_ra\>`` | ||
68 | - - ``\<cpu_st[bwlq](_[bl]e)\?_data_ra\>`` | ||
69 | + - ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_data_ra\>`` | ||
70 | + - ``\<cpu_st[bwlq]\(_[bl]e\)\?_data_ra\>`` | ||
71 | |||
72 | ``cpu_{ld,st}*_data`` | ||
73 | ~~~~~~~~~~~~~~~~~~~~~ | ||
74 | @@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}{end}_data(env, ptr, val)`` | ||
75 | - ``_be`` : big endian | ||
76 | - ``_le`` : little endian | ||
77 | |||
78 | -Regexes for git grep | ||
79 | - - ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_data\>`` | ||
80 | - - ``\<cpu_st[bwlq](_[bl]e)\?_data\+\>`` | ||
81 | +Regexes for git grep: | ||
82 | + - ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_data\>`` | ||
83 | + - ``\<cpu_st[bwlq]\(_[bl]e\)\?_data\+\>`` | ||
84 | |||
85 | ``cpu_ld*_code`` | ||
86 | ~~~~~~~~~~~~~~~~ | ||
87 | @@ -XXX,XX +XXX,XX @@ swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)`` | ||
88 | - ``l`` : 32 bits | ||
89 | - ``q`` : 64 bits | ||
90 | |||
91 | -Regexes for git grep | ||
92 | +Regexes for git grep: | ||
93 | - ``\<translator_ld[us]\?[bwlq]\(_swap\)\?\>`` | ||
94 | |||
95 | ``helper_{ld,st}*_mmu`` | ||
96 | @@ -XXX,XX +XXX,XX @@ store: ``helper_{size}_mmu(env, addr, val, opindex, retaddr)`` | ||
97 | - ``l`` : 32 bits | ||
98 | - ``q`` : 64 bits | ||
99 | |||
100 | -Regexes for git grep | ||
101 | +Regexes for git grep: | ||
102 | - ``\<helper_ld[us]\?[bwlq]_mmu\>`` | ||
103 | - ``\<helper_st[bwlq]_mmu\>`` | ||
104 | |||
105 | @@ -XXX,XX +XXX,XX @@ succeeded using a MemTxResult return code. | ||
106 | |||
107 | The ``_{endian}`` suffix is omitted for byte accesses. | ||
108 | |||
109 | -Regexes for git grep | ||
110 | +Regexes for git grep: | ||
111 | - ``\<address_space_\(read\|write\|rw\)\>`` | ||
112 | - ``\<address_space_ldu\?[bwql]\(_[lb]e\)\?\>`` | ||
113 | - ``\<address_space_st[bwql]\(_[lb]e\)\?\>`` | ||
114 | @@ -XXX,XX +XXX,XX @@ Note that portions of the write which attempt to write data to a | ||
115 | device will be silently ignored -- only real RAM and ROM will | ||
116 | be written to. | ||
117 | |||
118 | -Regexes for git grep | ||
119 | +Regexes for git grep: | ||
120 | - ``address_space_write_rom`` | ||
121 | |||
122 | ``{ld,st}*_phys`` | ||
123 | @@ -XXX,XX +XXX,XX @@ device doing the access has no way to report such an error. | ||
124 | |||
125 | The ``_{endian}_`` infix is omitted for byte accesses. | ||
126 | |||
127 | -Regexes for git grep | ||
128 | +Regexes for git grep: | ||
129 | - ``\<ldu\?[bwlq]\(_[bl]e\)\?_phys\>`` | ||
130 | - ``\<st[bwlq]\(_[bl]e\)\?_phys\>`` | ||
131 | |||
132 | @@ -XXX,XX +XXX,XX @@ For new code they are better avoided: | ||
133 | |||
134 | ``cpu_physical_memory_rw`` | ||
135 | |||
136 | -Regexes for git grep | ||
137 | +Regexes for git grep: | ||
138 | - ``\<cpu_physical_memory_\(read\|write\|rw\)\>`` | ||
139 | |||
140 | ``cpu_memory_rw_debug`` | ||
141 | @@ -XXX,XX +XXX,XX @@ make sure our existing code is doing things correctly. | ||
142 | |||
143 | ``dma_memory_rw`` | ||
144 | |||
145 | -Regexes for git grep | ||
146 | +Regexes for git grep: | ||
147 | - ``\<dma_memory_\(read\|write\|rw\)\>`` | ||
148 | - ``\<ldu\?[bwlq]\(_[bl]e\)\?_dma\>`` | ||
149 | - ``\<st[bwlq]\(_[bl]e\)\?_dma\>`` | ||
150 | @@ -XXX,XX +XXX,XX @@ correct address space for that device. | ||
151 | |||
152 | The ``_{endian}_`` infix is omitted for byte accesses. | ||
153 | |||
154 | -Regexes for git grep | ||
155 | +Regexes for git grep: | ||
156 | - ``\<pci_dma_\(read\|write\|rw\)\>`` | ||
157 | - ``\<ldu\?[bwlq]\(_[bl]e\)\?_pci_dma\>`` | ||
158 | - ``\<st[bwlq]\(_[bl]e\)\?_pci_dma\>`` | ||
29 | -- | 159 | -- |
30 | 2.34.1 | 160 | 2.34.1 |
31 | 161 | ||
32 | 162 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | From: Fabian Vogt <fvogt@suse.de> |
---|---|---|---|
2 | 2 | ||
3 | * Add Addr and size definition for all i.MX7 devices in i.MX7 header file. | 3 | Just like d7ef5e16a17c sets SCR_EL3.HXEn for FEAT_HCX, this commit |
4 | * Use those newly defined named constants whenever possible. | 4 | handles SCR_EL3.FGTEn for FEAT_FGT: |
5 | * Standardize the way we init a familly of unimplemented devices | ||
6 | - SAI | ||
7 | - PWM | ||
8 | - CAN | ||
9 | * Add/rework few comments | ||
10 | 5 | ||
11 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | 6 | When we direct boot a kernel on a CPU which emulates EL3, we need to |
12 | Message-id: 59e195d33e4d486a8d131392acd46633c8c10ed7.1692964892.git.jcd@tribudubois.net | 7 | set up the EL3 system registers as the Linux kernel documentation |
8 | specifies: | ||
9 | https://www.kernel.org/doc/Documentation/arm64/booting.rst | ||
10 | |||
11 | > For CPUs with the Fine Grained Traps (FEAT_FGT) extension present: | ||
12 | > - If EL3 is present and the kernel is entered at EL2: | ||
13 | > - SCR_EL3.FGTEn (bit 27) must be initialised to 0b1. | ||
14 | |||
15 | Cc: qemu-stable@nongnu.org | ||
16 | Signed-off-by: Fabian Vogt <fvogt@suse.de> | ||
17 | Message-id: 4831384.GXAFRqVoOG@linux-e202.suse.de | ||
13 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 20 | --- |
16 | include/hw/arm/fsl-imx7.h | 330 ++++++++++++++++++++++++++++---------- | 21 | hw/arm/boot.c | 4 ++++ |
17 | hw/arm/fsl-imx7.c | 130 ++++++++++----- | 22 | 1 file changed, 4 insertions(+) |
18 | 2 files changed, 335 insertions(+), 125 deletions(-) | ||
19 | 23 | ||
20 | diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h | 24 | diff --git a/hw/arm/boot.c b/hw/arm/boot.c |
21 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/hw/arm/fsl-imx7.h | 26 | --- a/hw/arm/boot.c |
23 | +++ b/include/hw/arm/fsl-imx7.h | 27 | +++ b/hw/arm/boot.c |
24 | @@ -XXX,XX +XXX,XX @@ | 28 | @@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque) |
25 | #include "hw/misc/imx7_ccm.h" | 29 | if (cpu_isar_feature(aa64_hcx, cpu)) { |
26 | #include "hw/misc/imx7_snvs.h" | 30 | env->cp15.scr_el3 |= SCR_HXEN; |
27 | #include "hw/misc/imx7_gpr.h" | 31 | } |
28 | -#include "hw/misc/imx6_src.h" | 32 | + if (cpu_isar_feature(aa64_fgt, cpu)) { |
29 | #include "hw/watchdog/wdt_imx2.h" | 33 | + env->cp15.scr_el3 |= SCR_FGTEN; |
30 | #include "hw/gpio/imx_gpio.h" | 34 | + } |
31 | #include "hw/char/imx_serial.h" | ||
32 | @@ -XXX,XX +XXX,XX @@ | ||
33 | #include "hw/usb/chipidea.h" | ||
34 | #include "cpu.h" | ||
35 | #include "qom/object.h" | ||
36 | +#include "qemu/units.h" | ||
37 | |||
38 | #define TYPE_FSL_IMX7 "fsl-imx7" | ||
39 | OBJECT_DECLARE_SIMPLE_TYPE(FslIMX7State, FSL_IMX7) | ||
40 | @@ -XXX,XX +XXX,XX @@ enum FslIMX7Configuration { | ||
41 | FSL_IMX7_NUM_ECSPIS = 4, | ||
42 | FSL_IMX7_NUM_USBS = 3, | ||
43 | FSL_IMX7_NUM_ADCS = 2, | ||
44 | + FSL_IMX7_NUM_SAIS = 3, | ||
45 | + FSL_IMX7_NUM_CANS = 2, | ||
46 | + FSL_IMX7_NUM_PWMS = 4, | ||
47 | }; | ||
48 | |||
49 | struct FslIMX7State { | ||
50 | @@ -XXX,XX +XXX,XX @@ struct FslIMX7State { | ||
51 | |||
52 | enum FslIMX7MemoryMap { | ||
53 | FSL_IMX7_MMDC_ADDR = 0x80000000, | ||
54 | - FSL_IMX7_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL, | ||
55 | + FSL_IMX7_MMDC_SIZE = (2 * GiB), | ||
56 | |||
57 | - FSL_IMX7_GPIO1_ADDR = 0x30200000, | ||
58 | - FSL_IMX7_GPIO2_ADDR = 0x30210000, | ||
59 | - FSL_IMX7_GPIO3_ADDR = 0x30220000, | ||
60 | - FSL_IMX7_GPIO4_ADDR = 0x30230000, | ||
61 | - FSL_IMX7_GPIO5_ADDR = 0x30240000, | ||
62 | - FSL_IMX7_GPIO6_ADDR = 0x30250000, | ||
63 | - FSL_IMX7_GPIO7_ADDR = 0x30260000, | ||
64 | + FSL_IMX7_QSPI1_MEM_ADDR = 0x60000000, | ||
65 | + FSL_IMX7_QSPI1_MEM_SIZE = (256 * MiB), | ||
66 | |||
67 | - FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000, | ||
68 | + FSL_IMX7_PCIE1_MEM_ADDR = 0x40000000, | ||
69 | + FSL_IMX7_PCIE1_MEM_SIZE = (256 * MiB), | ||
70 | |||
71 | - FSL_IMX7_WDOG1_ADDR = 0x30280000, | ||
72 | - FSL_IMX7_WDOG2_ADDR = 0x30290000, | ||
73 | - FSL_IMX7_WDOG3_ADDR = 0x302A0000, | ||
74 | - FSL_IMX7_WDOG4_ADDR = 0x302B0000, | ||
75 | + FSL_IMX7_QSPI1_RX_BUF_ADDR = 0x34000000, | ||
76 | + FSL_IMX7_QSPI1_RX_BUF_SIZE = (32 * MiB), | ||
77 | |||
78 | - FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000, | ||
79 | + /* PCIe Peripherals */ | ||
80 | + FSL_IMX7_PCIE_REG_ADDR = 0x33800000, | ||
81 | |||
82 | - FSL_IMX7_GPT1_ADDR = 0x302D0000, | ||
83 | - FSL_IMX7_GPT2_ADDR = 0x302E0000, | ||
84 | - FSL_IMX7_GPT3_ADDR = 0x302F0000, | ||
85 | - FSL_IMX7_GPT4_ADDR = 0x30300000, | ||
86 | + /* MMAP Peripherals */ | ||
87 | + FSL_IMX7_DMA_APBH_ADDR = 0x33000000, | ||
88 | + FSL_IMX7_DMA_APBH_SIZE = 0x8000, | ||
89 | |||
90 | - FSL_IMX7_IOMUXC_ADDR = 0x30330000, | ||
91 | - FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000, | ||
92 | - FSL_IMX7_IOMUXCn_SIZE = 0x1000, | ||
93 | + /* GPV configuration */ | ||
94 | + FSL_IMX7_GPV6_ADDR = 0x32600000, | ||
95 | + FSL_IMX7_GPV5_ADDR = 0x32500000, | ||
96 | + FSL_IMX7_GPV4_ADDR = 0x32400000, | ||
97 | + FSL_IMX7_GPV3_ADDR = 0x32300000, | ||
98 | + FSL_IMX7_GPV2_ADDR = 0x32200000, | ||
99 | + FSL_IMX7_GPV1_ADDR = 0x32100000, | ||
100 | + FSL_IMX7_GPV0_ADDR = 0x32000000, | ||
101 | + FSL_IMX7_GPVn_SIZE = (1 * MiB), | ||
102 | |||
103 | - FSL_IMX7_OCOTP_ADDR = 0x30350000, | ||
104 | - FSL_IMX7_OCOTP_SIZE = 0x10000, | ||
105 | + /* Arm Peripherals */ | ||
106 | + FSL_IMX7_A7MPCORE_ADDR = 0x31000000, | ||
107 | |||
108 | - FSL_IMX7_ANALOG_ADDR = 0x30360000, | ||
109 | - FSL_IMX7_SNVS_ADDR = 0x30370000, | ||
110 | - FSL_IMX7_CCM_ADDR = 0x30380000, | ||
111 | + /* AIPS-3 Begin */ | ||
112 | |||
113 | - FSL_IMX7_SRC_ADDR = 0x30390000, | ||
114 | - FSL_IMX7_SRC_SIZE = 0x1000, | ||
115 | + FSL_IMX7_ENET2_ADDR = 0x30BF0000, | ||
116 | + FSL_IMX7_ENET1_ADDR = 0x30BE0000, | ||
117 | |||
118 | - FSL_IMX7_ADC1_ADDR = 0x30610000, | ||
119 | - FSL_IMX7_ADC2_ADDR = 0x30620000, | ||
120 | - FSL_IMX7_ADCn_SIZE = 0x1000, | ||
121 | + FSL_IMX7_SDMA_ADDR = 0x30BD0000, | ||
122 | + FSL_IMX7_SDMA_SIZE = (4 * KiB), | ||
123 | |||
124 | - FSL_IMX7_PWM1_ADDR = 0x30660000, | ||
125 | - FSL_IMX7_PWM2_ADDR = 0x30670000, | ||
126 | - FSL_IMX7_PWM3_ADDR = 0x30680000, | ||
127 | - FSL_IMX7_PWM4_ADDR = 0x30690000, | ||
128 | - FSL_IMX7_PWMn_SIZE = 0x10000, | ||
129 | + FSL_IMX7_EIM_ADDR = 0x30BC0000, | ||
130 | + FSL_IMX7_EIM_SIZE = (4 * KiB), | ||
131 | |||
132 | - FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000, | ||
133 | - FSL_IMX7_PCIE_PHY_SIZE = 0x10000, | ||
134 | + FSL_IMX7_QSPI_ADDR = 0x30BB0000, | ||
135 | + FSL_IMX7_QSPI_SIZE = 0x8000, | ||
136 | |||
137 | - FSL_IMX7_GPC_ADDR = 0x303A0000, | ||
138 | + FSL_IMX7_SIM2_ADDR = 0x30BA0000, | ||
139 | + FSL_IMX7_SIM1_ADDR = 0x30B90000, | ||
140 | + FSL_IMX7_SIMn_SIZE = (4 * KiB), | ||
141 | + | 35 | + |
142 | + FSL_IMX7_USDHC3_ADDR = 0x30B60000, | 36 | /* AArch64 kernels never boot in secure mode */ |
143 | + FSL_IMX7_USDHC2_ADDR = 0x30B50000, | 37 | assert(!info->secure_boot); |
144 | + FSL_IMX7_USDHC1_ADDR = 0x30B40000, | 38 | /* This hook is only supported for AArch32 currently: |
145 | + | ||
146 | + FSL_IMX7_USB3_ADDR = 0x30B30000, | ||
147 | + FSL_IMX7_USBMISC3_ADDR = 0x30B30200, | ||
148 | + FSL_IMX7_USB2_ADDR = 0x30B20000, | ||
149 | + FSL_IMX7_USBMISC2_ADDR = 0x30B20200, | ||
150 | + FSL_IMX7_USB1_ADDR = 0x30B10000, | ||
151 | + FSL_IMX7_USBMISC1_ADDR = 0x30B10200, | ||
152 | + FSL_IMX7_USBMISCn_SIZE = 0x200, | ||
153 | + | ||
154 | + FSL_IMX7_USB_PL301_ADDR = 0x30AD0000, | ||
155 | + FSL_IMX7_USB_PL301_SIZE = (64 * KiB), | ||
156 | + | ||
157 | + FSL_IMX7_SEMAPHORE_HS_ADDR = 0x30AC0000, | ||
158 | + FSL_IMX7_SEMAPHORE_HS_SIZE = (64 * KiB), | ||
159 | + | ||
160 | + FSL_IMX7_MUB_ADDR = 0x30AB0000, | ||
161 | + FSL_IMX7_MUA_ADDR = 0x30AA0000, | ||
162 | + FSL_IMX7_MUn_SIZE = (KiB), | ||
163 | + | ||
164 | + FSL_IMX7_UART7_ADDR = 0x30A90000, | ||
165 | + FSL_IMX7_UART6_ADDR = 0x30A80000, | ||
166 | + FSL_IMX7_UART5_ADDR = 0x30A70000, | ||
167 | + FSL_IMX7_UART4_ADDR = 0x30A60000, | ||
168 | + | ||
169 | + FSL_IMX7_I2C4_ADDR = 0x30A50000, | ||
170 | + FSL_IMX7_I2C3_ADDR = 0x30A40000, | ||
171 | + FSL_IMX7_I2C2_ADDR = 0x30A30000, | ||
172 | + FSL_IMX7_I2C1_ADDR = 0x30A20000, | ||
173 | + | ||
174 | + FSL_IMX7_CAN2_ADDR = 0x30A10000, | ||
175 | + FSL_IMX7_CAN1_ADDR = 0x30A00000, | ||
176 | + FSL_IMX7_CANn_SIZE = (4 * KiB), | ||
177 | + | ||
178 | + FSL_IMX7_AIPS3_CONF_ADDR = 0x309F0000, | ||
179 | + FSL_IMX7_AIPS3_CONF_SIZE = (64 * KiB), | ||
180 | |||
181 | FSL_IMX7_CAAM_ADDR = 0x30900000, | ||
182 | - FSL_IMX7_CAAM_SIZE = 0x40000, | ||
183 | + FSL_IMX7_CAAM_SIZE = (256 * KiB), | ||
184 | |||
185 | - FSL_IMX7_CAN1_ADDR = 0x30A00000, | ||
186 | - FSL_IMX7_CAN2_ADDR = 0x30A10000, | ||
187 | - FSL_IMX7_CANn_SIZE = 0x10000, | ||
188 | + FSL_IMX7_SPBA_ADDR = 0x308F0000, | ||
189 | + FSL_IMX7_SPBA_SIZE = (4 * KiB), | ||
190 | |||
191 | - FSL_IMX7_I2C1_ADDR = 0x30A20000, | ||
192 | - FSL_IMX7_I2C2_ADDR = 0x30A30000, | ||
193 | - FSL_IMX7_I2C3_ADDR = 0x30A40000, | ||
194 | - FSL_IMX7_I2C4_ADDR = 0x30A50000, | ||
195 | + FSL_IMX7_SAI3_ADDR = 0x308C0000, | ||
196 | + FSL_IMX7_SAI2_ADDR = 0x308B0000, | ||
197 | + FSL_IMX7_SAI1_ADDR = 0x308A0000, | ||
198 | + FSL_IMX7_SAIn_SIZE = (4 * KiB), | ||
199 | |||
200 | - FSL_IMX7_ECSPI1_ADDR = 0x30820000, | ||
201 | - FSL_IMX7_ECSPI2_ADDR = 0x30830000, | ||
202 | - FSL_IMX7_ECSPI3_ADDR = 0x30840000, | ||
203 | - FSL_IMX7_ECSPI4_ADDR = 0x30630000, | ||
204 | - | ||
205 | - FSL_IMX7_LCDIF_ADDR = 0x30730000, | ||
206 | - FSL_IMX7_LCDIF_SIZE = 0x1000, | ||
207 | - | ||
208 | - FSL_IMX7_UART1_ADDR = 0x30860000, | ||
209 | + FSL_IMX7_UART3_ADDR = 0x30880000, | ||
210 | /* | ||
211 | * Some versions of the reference manual claim that UART2 is @ | ||
212 | * 0x30870000, but experiments with HW + DT files in upstream | ||
213 | @@ -XXX,XX +XXX,XX @@ enum FslIMX7MemoryMap { | ||
214 | * actually located @ 0x30890000 | ||
215 | */ | ||
216 | FSL_IMX7_UART2_ADDR = 0x30890000, | ||
217 | - FSL_IMX7_UART3_ADDR = 0x30880000, | ||
218 | - FSL_IMX7_UART4_ADDR = 0x30A60000, | ||
219 | - FSL_IMX7_UART5_ADDR = 0x30A70000, | ||
220 | - FSL_IMX7_UART6_ADDR = 0x30A80000, | ||
221 | - FSL_IMX7_UART7_ADDR = 0x30A90000, | ||
222 | + FSL_IMX7_UART1_ADDR = 0x30860000, | ||
223 | |||
224 | - FSL_IMX7_SAI1_ADDR = 0x308A0000, | ||
225 | - FSL_IMX7_SAI2_ADDR = 0x308B0000, | ||
226 | - FSL_IMX7_SAI3_ADDR = 0x308C0000, | ||
227 | - FSL_IMX7_SAIn_SIZE = 0x10000, | ||
228 | + FSL_IMX7_ECSPI3_ADDR = 0x30840000, | ||
229 | + FSL_IMX7_ECSPI2_ADDR = 0x30830000, | ||
230 | + FSL_IMX7_ECSPI1_ADDR = 0x30820000, | ||
231 | + FSL_IMX7_ECSPIn_SIZE = (4 * KiB), | ||
232 | |||
233 | - FSL_IMX7_ENET1_ADDR = 0x30BE0000, | ||
234 | - FSL_IMX7_ENET2_ADDR = 0x30BF0000, | ||
235 | + /* AIPS-3 End */ | ||
236 | |||
237 | - FSL_IMX7_USB1_ADDR = 0x30B10000, | ||
238 | - FSL_IMX7_USBMISC1_ADDR = 0x30B10200, | ||
239 | - FSL_IMX7_USB2_ADDR = 0x30B20000, | ||
240 | - FSL_IMX7_USBMISC2_ADDR = 0x30B20200, | ||
241 | - FSL_IMX7_USB3_ADDR = 0x30B30000, | ||
242 | - FSL_IMX7_USBMISC3_ADDR = 0x30B30200, | ||
243 | - FSL_IMX7_USBMISCn_SIZE = 0x200, | ||
244 | + /* AIPS-2 Begin */ | ||
245 | |||
246 | - FSL_IMX7_USDHC1_ADDR = 0x30B40000, | ||
247 | - FSL_IMX7_USDHC2_ADDR = 0x30B50000, | ||
248 | - FSL_IMX7_USDHC3_ADDR = 0x30B60000, | ||
249 | + FSL_IMX7_AXI_DEBUG_MON_ADDR = 0x307E0000, | ||
250 | + FSL_IMX7_AXI_DEBUG_MON_SIZE = (64 * KiB), | ||
251 | |||
252 | - FSL_IMX7_SDMA_ADDR = 0x30BD0000, | ||
253 | - FSL_IMX7_SDMA_SIZE = 0x1000, | ||
254 | + FSL_IMX7_PERFMON2_ADDR = 0x307D0000, | ||
255 | + FSL_IMX7_PERFMON1_ADDR = 0x307C0000, | ||
256 | + FSL_IMX7_PERFMONn_SIZE = (64 * KiB), | ||
257 | + | ||
258 | + FSL_IMX7_DDRC_ADDR = 0x307A0000, | ||
259 | + FSL_IMX7_DDRC_SIZE = (4 * KiB), | ||
260 | + | ||
261 | + FSL_IMX7_DDRC_PHY_ADDR = 0x30790000, | ||
262 | + FSL_IMX7_DDRC_PHY_SIZE = (4 * KiB), | ||
263 | + | ||
264 | + FSL_IMX7_TZASC_ADDR = 0x30780000, | ||
265 | + FSL_IMX7_TZASC_SIZE = (64 * KiB), | ||
266 | + | ||
267 | + FSL_IMX7_MIPI_DSI_ADDR = 0x30760000, | ||
268 | + FSL_IMX7_MIPI_DSI_SIZE = (4 * KiB), | ||
269 | + | ||
270 | + FSL_IMX7_MIPI_CSI_ADDR = 0x30750000, | ||
271 | + FSL_IMX7_MIPI_CSI_SIZE = 0x4000, | ||
272 | + | ||
273 | + FSL_IMX7_LCDIF_ADDR = 0x30730000, | ||
274 | + FSL_IMX7_LCDIF_SIZE = 0x8000, | ||
275 | + | ||
276 | + FSL_IMX7_CSI_ADDR = 0x30710000, | ||
277 | + FSL_IMX7_CSI_SIZE = (4 * KiB), | ||
278 | + | ||
279 | + FSL_IMX7_PXP_ADDR = 0x30700000, | ||
280 | + FSL_IMX7_PXP_SIZE = 0x4000, | ||
281 | + | ||
282 | + FSL_IMX7_EPDC_ADDR = 0x306F0000, | ||
283 | + FSL_IMX7_EPDC_SIZE = (4 * KiB), | ||
284 | + | ||
285 | + FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000, | ||
286 | + FSL_IMX7_PCIE_PHY_SIZE = (4 * KiB), | ||
287 | + | ||
288 | + FSL_IMX7_SYSCNT_CTRL_ADDR = 0x306C0000, | ||
289 | + FSL_IMX7_SYSCNT_CMP_ADDR = 0x306B0000, | ||
290 | + FSL_IMX7_SYSCNT_RD_ADDR = 0x306A0000, | ||
291 | + | ||
292 | + FSL_IMX7_PWM4_ADDR = 0x30690000, | ||
293 | + FSL_IMX7_PWM3_ADDR = 0x30680000, | ||
294 | + FSL_IMX7_PWM2_ADDR = 0x30670000, | ||
295 | + FSL_IMX7_PWM1_ADDR = 0x30660000, | ||
296 | + FSL_IMX7_PWMn_SIZE = (4 * KiB), | ||
297 | + | ||
298 | + FSL_IMX7_FlEXTIMER2_ADDR = 0x30650000, | ||
299 | + FSL_IMX7_FlEXTIMER1_ADDR = 0x30640000, | ||
300 | + FSL_IMX7_FLEXTIMERn_SIZE = (4 * KiB), | ||
301 | + | ||
302 | + FSL_IMX7_ECSPI4_ADDR = 0x30630000, | ||
303 | + | ||
304 | + FSL_IMX7_ADC2_ADDR = 0x30620000, | ||
305 | + FSL_IMX7_ADC1_ADDR = 0x30610000, | ||
306 | + FSL_IMX7_ADCn_SIZE = (4 * KiB), | ||
307 | + | ||
308 | + FSL_IMX7_AIPS2_CONF_ADDR = 0x305F0000, | ||
309 | + FSL_IMX7_AIPS2_CONF_SIZE = (64 * KiB), | ||
310 | + | ||
311 | + /* AIPS-2 End */ | ||
312 | + | ||
313 | + /* AIPS-1 Begin */ | ||
314 | + | ||
315 | + FSL_IMX7_CSU_ADDR = 0x303E0000, | ||
316 | + FSL_IMX7_CSU_SIZE = (64 * KiB), | ||
317 | + | ||
318 | + FSL_IMX7_RDC_ADDR = 0x303D0000, | ||
319 | + FSL_IMX7_RDC_SIZE = (4 * KiB), | ||
320 | + | ||
321 | + FSL_IMX7_SEMAPHORE2_ADDR = 0x303C0000, | ||
322 | + FSL_IMX7_SEMAPHORE1_ADDR = 0x303B0000, | ||
323 | + FSL_IMX7_SEMAPHOREn_SIZE = (4 * KiB), | ||
324 | + | ||
325 | + FSL_IMX7_GPC_ADDR = 0x303A0000, | ||
326 | + | ||
327 | + FSL_IMX7_SRC_ADDR = 0x30390000, | ||
328 | + FSL_IMX7_SRC_SIZE = (4 * KiB), | ||
329 | + | ||
330 | + FSL_IMX7_CCM_ADDR = 0x30380000, | ||
331 | + | ||
332 | + FSL_IMX7_SNVS_HP_ADDR = 0x30370000, | ||
333 | + | ||
334 | + FSL_IMX7_ANALOG_ADDR = 0x30360000, | ||
335 | + | ||
336 | + FSL_IMX7_OCOTP_ADDR = 0x30350000, | ||
337 | + FSL_IMX7_OCOTP_SIZE = 0x10000, | ||
338 | + | ||
339 | + FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000, | ||
340 | + FSL_IMX7_IOMUXC_GPR_SIZE = (4 * KiB), | ||
341 | + | ||
342 | + FSL_IMX7_IOMUXC_ADDR = 0x30330000, | ||
343 | + FSL_IMX7_IOMUXC_SIZE = (4 * KiB), | ||
344 | + | ||
345 | + FSL_IMX7_KPP_ADDR = 0x30320000, | ||
346 | + FSL_IMX7_KPP_SIZE = (4 * KiB), | ||
347 | + | ||
348 | + FSL_IMX7_ROMCP_ADDR = 0x30310000, | ||
349 | + FSL_IMX7_ROMCP_SIZE = (4 * KiB), | ||
350 | + | ||
351 | + FSL_IMX7_GPT4_ADDR = 0x30300000, | ||
352 | + FSL_IMX7_GPT3_ADDR = 0x302F0000, | ||
353 | + FSL_IMX7_GPT2_ADDR = 0x302E0000, | ||
354 | + FSL_IMX7_GPT1_ADDR = 0x302D0000, | ||
355 | + | ||
356 | + FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000, | ||
357 | + FSL_IMX7_IOMUXC_LPSR_SIZE = (4 * KiB), | ||
358 | + | ||
359 | + FSL_IMX7_WDOG4_ADDR = 0x302B0000, | ||
360 | + FSL_IMX7_WDOG3_ADDR = 0x302A0000, | ||
361 | + FSL_IMX7_WDOG2_ADDR = 0x30290000, | ||
362 | + FSL_IMX7_WDOG1_ADDR = 0x30280000, | ||
363 | + | ||
364 | + FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000, | ||
365 | + | ||
366 | + FSL_IMX7_GPIO7_ADDR = 0x30260000, | ||
367 | + FSL_IMX7_GPIO6_ADDR = 0x30250000, | ||
368 | + FSL_IMX7_GPIO5_ADDR = 0x30240000, | ||
369 | + FSL_IMX7_GPIO4_ADDR = 0x30230000, | ||
370 | + FSL_IMX7_GPIO3_ADDR = 0x30220000, | ||
371 | + FSL_IMX7_GPIO2_ADDR = 0x30210000, | ||
372 | + FSL_IMX7_GPIO1_ADDR = 0x30200000, | ||
373 | + | ||
374 | + FSL_IMX7_AIPS1_CONF_ADDR = 0x301F0000, | ||
375 | + FSL_IMX7_AIPS1_CONF_SIZE = (64 * KiB), | ||
376 | |||
377 | - FSL_IMX7_A7MPCORE_ADDR = 0x31000000, | ||
378 | FSL_IMX7_A7MPCORE_DAP_ADDR = 0x30000000, | ||
379 | + FSL_IMX7_A7MPCORE_DAP_SIZE = (1 * MiB), | ||
380 | |||
381 | - FSL_IMX7_PCIE_REG_ADDR = 0x33800000, | ||
382 | - FSL_IMX7_PCIE_REG_SIZE = 16 * 1024, | ||
383 | + /* AIPS-1 End */ | ||
384 | |||
385 | - FSL_IMX7_GPR_ADDR = 0x30340000, | ||
386 | + FSL_IMX7_EIM_CS0_ADDR = 0x28000000, | ||
387 | + FSL_IMX7_EIM_CS0_SIZE = (128 * MiB), | ||
388 | |||
389 | - FSL_IMX7_DMA_APBH_ADDR = 0x33000000, | ||
390 | - FSL_IMX7_DMA_APBH_SIZE = 0x2000, | ||
391 | + FSL_IMX7_OCRAM_PXP_ADDR = 0x00940000, | ||
392 | + FSL_IMX7_OCRAM_PXP_SIZE = (32 * KiB), | ||
393 | + | ||
394 | + FSL_IMX7_OCRAM_EPDC_ADDR = 0x00920000, | ||
395 | + FSL_IMX7_OCRAM_EPDC_SIZE = (128 * KiB), | ||
396 | + | ||
397 | + FSL_IMX7_OCRAM_MEM_ADDR = 0x00900000, | ||
398 | + FSL_IMX7_OCRAM_MEM_SIZE = (128 * KiB), | ||
399 | + | ||
400 | + FSL_IMX7_TCMU_ADDR = 0x00800000, | ||
401 | + FSL_IMX7_TCMU_SIZE = (32 * KiB), | ||
402 | + | ||
403 | + FSL_IMX7_TCML_ADDR = 0x007F8000, | ||
404 | + FSL_IMX7_TCML_SIZE = (32 * KiB), | ||
405 | + | ||
406 | + FSL_IMX7_OCRAM_S_ADDR = 0x00180000, | ||
407 | + FSL_IMX7_OCRAM_S_SIZE = (32 * KiB), | ||
408 | + | ||
409 | + FSL_IMX7_CAAM_MEM_ADDR = 0x00100000, | ||
410 | + FSL_IMX7_CAAM_MEM_SIZE = (32 * KiB), | ||
411 | + | ||
412 | + FSL_IMX7_ROM_ADDR = 0x00000000, | ||
413 | + FSL_IMX7_ROM_SIZE = (96 * KiB), | ||
414 | }; | ||
415 | |||
416 | enum FslIMX7IRQs { | ||
417 | diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c | ||
418 | index XXXXXXX..XXXXXXX 100644 | ||
419 | --- a/hw/arm/fsl-imx7.c | ||
420 | +++ b/hw/arm/fsl-imx7.c | ||
421 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
422 | char name[NAME_SIZE]; | ||
423 | int i; | ||
424 | |||
425 | + /* | ||
426 | + * CPUs | ||
427 | + */ | ||
428 | for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX7_NUM_CPUS); i++) { | ||
429 | snprintf(name, NAME_SIZE, "cpu%d", i); | ||
430 | object_initialize_child(obj, name, &s->cpu[i], | ||
431 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
432 | TYPE_A15MPCORE_PRIV); | ||
433 | |||
434 | /* | ||
435 | - * GPIOs 1 to 7 | ||
436 | + * GPIOs | ||
437 | */ | ||
438 | for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) { | ||
439 | snprintf(name, NAME_SIZE, "gpio%d", i); | ||
440 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | - * GPT1, 2, 3, 4 | ||
445 | + * GPTs | ||
446 | */ | ||
447 | for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) { | ||
448 | snprintf(name, NAME_SIZE, "gpt%d", i); | ||
449 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
450 | */ | ||
451 | object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2); | ||
452 | |||
453 | + /* | ||
454 | + * ECSPIs | ||
455 | + */ | ||
456 | for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) { | ||
457 | snprintf(name, NAME_SIZE, "spi%d", i + 1); | ||
458 | object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI); | ||
459 | } | ||
460 | |||
461 | - | ||
462 | + /* | ||
463 | + * I2Cs | ||
464 | + */ | ||
465 | for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) { | ||
466 | snprintf(name, NAME_SIZE, "i2c%d", i + 1); | ||
467 | object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C); | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | - * UART | ||
472 | + * UARTs | ||
473 | */ | ||
474 | for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) { | ||
475 | snprintf(name, NAME_SIZE, "uart%d", i); | ||
476 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | - * Ethernet | ||
481 | + * Ethernets | ||
482 | */ | ||
483 | for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) { | ||
484 | snprintf(name, NAME_SIZE, "eth%d", i); | ||
485 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | - * SDHCI | ||
490 | + * SDHCIs | ||
491 | */ | ||
492 | for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) { | ||
493 | snprintf(name, NAME_SIZE, "usdhc%d", i); | ||
494 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
495 | object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); | ||
496 | |||
497 | /* | ||
498 | - * Watchdog | ||
499 | + * Watchdogs | ||
500 | */ | ||
501 | for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) { | ||
502 | snprintf(name, NAME_SIZE, "wdt%d", i); | ||
503 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
504 | */ | ||
505 | object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR); | ||
506 | |||
507 | + /* | ||
508 | + * PCIE | ||
509 | + */ | ||
510 | object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); | ||
511 | |||
512 | + /* | ||
513 | + * USBs | ||
514 | + */ | ||
515 | for (i = 0; i < FSL_IMX7_NUM_USBS; i++) { | ||
516 | snprintf(name, NAME_SIZE, "usb%d", i); | ||
517 | object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA); | ||
518 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | + /* | ||
523 | + * CPUs | ||
524 | + */ | ||
525 | for (i = 0; i < smp_cpus; i++) { | ||
526 | o = OBJECT(&s->cpu[i]); | ||
527 | |||
528 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
529 | * A7MPCORE DAP | ||
530 | */ | ||
531 | create_unimplemented_device("a7mpcore-dap", FSL_IMX7_A7MPCORE_DAP_ADDR, | ||
532 | - 0x100000); | ||
533 | + FSL_IMX7_A7MPCORE_DAP_SIZE); | ||
534 | |||
535 | /* | ||
536 | - * GPT1, 2, 3, 4 | ||
537 | + * GPTs | ||
538 | */ | ||
539 | for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) { | ||
540 | static const hwaddr FSL_IMX7_GPTn_ADDR[FSL_IMX7_NUM_GPTS] = { | ||
541 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
542 | FSL_IMX7_GPTn_IRQ[i])); | ||
543 | } | ||
544 | |||
545 | + /* | ||
546 | + * GPIOs | ||
547 | + */ | ||
548 | for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) { | ||
549 | static const hwaddr FSL_IMX7_GPIOn_ADDR[FSL_IMX7_NUM_GPIOS] = { | ||
550 | FSL_IMX7_GPIO1_ADDR, | ||
551 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
552 | /* | ||
553 | * IOMUXC and IOMUXC_LPSR | ||
554 | */ | ||
555 | - for (i = 0; i < FSL_IMX7_NUM_IOMUXCS; i++) { | ||
556 | - static const hwaddr FSL_IMX7_IOMUXCn_ADDR[FSL_IMX7_NUM_IOMUXCS] = { | ||
557 | - FSL_IMX7_IOMUXC_ADDR, | ||
558 | - FSL_IMX7_IOMUXC_LPSR_ADDR, | ||
559 | - }; | ||
560 | - | ||
561 | - snprintf(name, NAME_SIZE, "iomuxc%d", i); | ||
562 | - create_unimplemented_device(name, FSL_IMX7_IOMUXCn_ADDR[i], | ||
563 | - FSL_IMX7_IOMUXCn_SIZE); | ||
564 | - } | ||
565 | + create_unimplemented_device("iomuxc", FSL_IMX7_IOMUXC_ADDR, | ||
566 | + FSL_IMX7_IOMUXC_SIZE); | ||
567 | + create_unimplemented_device("iomuxc_lspr", FSL_IMX7_IOMUXC_LPSR_ADDR, | ||
568 | + FSL_IMX7_IOMUXC_LPSR_SIZE); | ||
569 | |||
570 | /* | ||
571 | * CCM | ||
572 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
573 | sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort); | ||
574 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX7_GPC_ADDR); | ||
575 | |||
576 | - /* Initialize all ECSPI */ | ||
577 | + /* | ||
578 | + * ECSPIs | ||
579 | + */ | ||
580 | for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) { | ||
581 | static const hwaddr FSL_IMX7_SPIn_ADDR[FSL_IMX7_NUM_ECSPIS] = { | ||
582 | FSL_IMX7_ECSPI1_ADDR, | ||
583 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
584 | FSL_IMX7_SPIn_IRQ[i])); | ||
585 | } | ||
586 | |||
587 | + /* | ||
588 | + * I2Cs | ||
589 | + */ | ||
590 | for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) { | ||
591 | static const hwaddr FSL_IMX7_I2Cn_ADDR[FSL_IMX7_NUM_I2CS] = { | ||
592 | FSL_IMX7_I2C1_ADDR, | ||
593 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | - * UART | ||
598 | + * UARTs | ||
599 | */ | ||
600 | for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) { | ||
601 | static const hwaddr FSL_IMX7_UARTn_ADDR[FSL_IMX7_NUM_UARTS] = { | ||
602 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | - * Ethernet | ||
607 | + * Ethernets | ||
608 | * | ||
609 | * We must use two loops since phy_connected affects the other interface | ||
610 | * and we have to set all properties before calling sysbus_realize(). | ||
611 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | - * USDHC | ||
616 | + * USDHCs | ||
617 | */ | ||
618 | for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) { | ||
619 | static const hwaddr FSL_IMX7_USDHCn_ADDR[FSL_IMX7_NUM_USDHCS] = { | ||
620 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
621 | * SNVS | ||
622 | */ | ||
623 | sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); | ||
624 | - sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_ADDR); | ||
625 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_HP_ADDR); | ||
626 | |||
627 | /* | ||
628 | * SRC | ||
629 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
630 | create_unimplemented_device("src", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE); | ||
631 | |||
632 | /* | ||
633 | - * Watchdog | ||
634 | + * Watchdogs | ||
635 | */ | ||
636 | for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) { | ||
637 | static const hwaddr FSL_IMX7_WDOGn_ADDR[FSL_IMX7_NUM_WDTS] = { | ||
638 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
639 | create_unimplemented_device("caam", FSL_IMX7_CAAM_ADDR, FSL_IMX7_CAAM_SIZE); | ||
640 | |||
641 | /* | ||
642 | - * PWM | ||
643 | + * PWMs | ||
644 | */ | ||
645 | - create_unimplemented_device("pwm1", FSL_IMX7_PWM1_ADDR, FSL_IMX7_PWMn_SIZE); | ||
646 | - create_unimplemented_device("pwm2", FSL_IMX7_PWM2_ADDR, FSL_IMX7_PWMn_SIZE); | ||
647 | - create_unimplemented_device("pwm3", FSL_IMX7_PWM3_ADDR, FSL_IMX7_PWMn_SIZE); | ||
648 | - create_unimplemented_device("pwm4", FSL_IMX7_PWM4_ADDR, FSL_IMX7_PWMn_SIZE); | ||
649 | + for (i = 0; i < FSL_IMX7_NUM_PWMS; i++) { | ||
650 | + static const hwaddr FSL_IMX7_PWMn_ADDR[FSL_IMX7_NUM_PWMS] = { | ||
651 | + FSL_IMX7_PWM1_ADDR, | ||
652 | + FSL_IMX7_PWM2_ADDR, | ||
653 | + FSL_IMX7_PWM3_ADDR, | ||
654 | + FSL_IMX7_PWM4_ADDR, | ||
655 | + }; | ||
656 | + | ||
657 | + snprintf(name, NAME_SIZE, "pwm%d", i); | ||
658 | + create_unimplemented_device(name, FSL_IMX7_PWMn_ADDR[i], | ||
659 | + FSL_IMX7_PWMn_SIZE); | ||
660 | + } | ||
661 | |||
662 | /* | ||
663 | - * CAN | ||
664 | + * CANs | ||
665 | */ | ||
666 | - create_unimplemented_device("can1", FSL_IMX7_CAN1_ADDR, FSL_IMX7_CANn_SIZE); | ||
667 | - create_unimplemented_device("can2", FSL_IMX7_CAN2_ADDR, FSL_IMX7_CANn_SIZE); | ||
668 | + for (i = 0; i < FSL_IMX7_NUM_CANS; i++) { | ||
669 | + static const hwaddr FSL_IMX7_CANn_ADDR[FSL_IMX7_NUM_CANS] = { | ||
670 | + FSL_IMX7_CAN1_ADDR, | ||
671 | + FSL_IMX7_CAN2_ADDR, | ||
672 | + }; | ||
673 | + | ||
674 | + snprintf(name, NAME_SIZE, "can%d", i); | ||
675 | + create_unimplemented_device(name, FSL_IMX7_CANn_ADDR[i], | ||
676 | + FSL_IMX7_CANn_SIZE); | ||
677 | + } | ||
678 | |||
679 | /* | ||
680 | - * SAI (Audio SSI (Synchronous Serial Interface)) | ||
681 | + * SAIs (Audio SSI (Synchronous Serial Interface)) | ||
682 | */ | ||
683 | - create_unimplemented_device("sai1", FSL_IMX7_SAI1_ADDR, FSL_IMX7_SAIn_SIZE); | ||
684 | - create_unimplemented_device("sai2", FSL_IMX7_SAI2_ADDR, FSL_IMX7_SAIn_SIZE); | ||
685 | - create_unimplemented_device("sai2", FSL_IMX7_SAI3_ADDR, FSL_IMX7_SAIn_SIZE); | ||
686 | + for (i = 0; i < FSL_IMX7_NUM_SAIS; i++) { | ||
687 | + static const hwaddr FSL_IMX7_SAIn_ADDR[FSL_IMX7_NUM_SAIS] = { | ||
688 | + FSL_IMX7_SAI1_ADDR, | ||
689 | + FSL_IMX7_SAI2_ADDR, | ||
690 | + FSL_IMX7_SAI3_ADDR, | ||
691 | + }; | ||
692 | + | ||
693 | + snprintf(name, NAME_SIZE, "sai%d", i); | ||
694 | + create_unimplemented_device(name, FSL_IMX7_SAIn_ADDR[i], | ||
695 | + FSL_IMX7_SAIn_SIZE); | ||
696 | + } | ||
697 | |||
698 | /* | ||
699 | * OCOTP | ||
700 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
701 | create_unimplemented_device("ocotp", FSL_IMX7_OCOTP_ADDR, | ||
702 | FSL_IMX7_OCOTP_SIZE); | ||
703 | |||
704 | + /* | ||
705 | + * GPR | ||
706 | + */ | ||
707 | sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort); | ||
708 | - sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_GPR_ADDR); | ||
709 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_IOMUXC_GPR_ADDR); | ||
710 | |||
711 | + /* | ||
712 | + * PCIE | ||
713 | + */ | ||
714 | sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort); | ||
715 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR); | ||
716 | |||
717 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
718 | irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ); | ||
719 | sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq); | ||
720 | |||
721 | - | ||
722 | + /* | ||
723 | + * USBs | ||
724 | + */ | ||
725 | for (i = 0; i < FSL_IMX7_NUM_USBS; i++) { | ||
726 | static const hwaddr FSL_IMX7_USBMISCn_ADDR[FSL_IMX7_NUM_USBS] = { | ||
727 | FSL_IMX7_USBMISC1_ADDR, | ||
728 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
729 | */ | ||
730 | create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR, | ||
731 | FSL_IMX7_PCIE_PHY_SIZE); | ||
732 | + | ||
733 | } | ||
734 | |||
735 | static Property fsl_imx7_properties[] = { | ||
736 | -- | 39 | -- |
737 | 2.34.1 | 40 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | Some of the names we use for CPU features in linux-user's dummy |
---|---|---|---|
2 | /proc/cpuinfo don't match the strings in the real kernel in | ||
3 | arch/arm64/kernel/cpuinfo.c. Specifically, the SME related | ||
4 | features have an underscore in the HWCAP_FOO define name, | ||
5 | but (like the SVE ones) they do not have an underscore in the | ||
6 | string in cpuinfo. Correct the errors. | ||
2 | 7 | ||
3 | * Add TZASC as unimplemented device. | 8 | Fixes: a55b9e7226708 ("linux-user: Emulate /proc/cpuinfo on aarch64 and arm") |
4 | - Allow bare metal application to access this (unimplemented) device | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | * Add CSU as unimplemented device. | 10 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | - Allow bare metal application to access this (unimplemented) device | 11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | * Add various memory segments | 12 | --- |
8 | - OCRAM | 13 | linux-user/elfload.c | 14 +++++++------- |
9 | - OCRAM EPDC | 14 | 1 file changed, 7 insertions(+), 7 deletions(-) |
10 | - OCRAM PXP | ||
11 | - OCRAM S | ||
12 | - ROM | ||
13 | - CAAM | ||
14 | 15 | ||
15 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | 16 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c |
16 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
17 | Message-id: f887a3483996ba06d40bd62ffdfb0ecf68621987.1692964892.git.jcd@tribudubois.net | ||
18 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
19 | --- | ||
20 | include/hw/arm/fsl-imx7.h | 7 +++++ | ||
21 | hw/arm/fsl-imx7.c | 63 +++++++++++++++++++++++++++++++++++++++ | ||
22 | 2 files changed, 70 insertions(+) | ||
23 | |||
24 | diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/hw/arm/fsl-imx7.h | 18 | --- a/linux-user/elfload.c |
27 | +++ b/include/hw/arm/fsl-imx7.h | 19 | +++ b/linux-user/elfload.c |
28 | @@ -XXX,XX +XXX,XX @@ struct FslIMX7State { | 20 | @@ -XXX,XX +XXX,XX @@ const char *elf_hwcap2_str(uint32_t bit) |
29 | IMX7GPRState gpr; | 21 | [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", |
30 | ChipideaState usb[FSL_IMX7_NUM_USBS]; | 22 | [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", |
31 | DesignwarePCIEHost pcie; | 23 | [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", |
32 | + MemoryRegion rom; | 24 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "sme_i16i64", |
33 | + MemoryRegion caam; | 25 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "sme_f64f64", |
34 | + MemoryRegion ocram; | 26 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "sme_i8i32", |
35 | + MemoryRegion ocram_epdc; | 27 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "sme_f16f32", |
36 | + MemoryRegion ocram_pxp; | 28 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "sme_b16f32", |
37 | + MemoryRegion ocram_s; | 29 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "sme_f32f32", |
38 | + | 30 | - [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "sme_fa64", |
39 | uint32_t phy_num[FSL_IMX7_NUM_ETHS]; | 31 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", |
40 | bool phy_connected[FSL_IMX7_NUM_ETHS]; | 32 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", |
41 | }; | 33 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", |
42 | diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c | 34 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", |
43 | index XXXXXXX..XXXXXXX 100644 | 35 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", |
44 | --- a/hw/arm/fsl-imx7.c | 36 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", |
45 | +++ b/hw/arm/fsl-imx7.c | 37 | + [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", |
46 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | 38 | }; |
47 | create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR, | 39 | |
48 | FSL_IMX7_PCIE_PHY_SIZE); | 40 | return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; |
49 | |||
50 | + /* | ||
51 | + * CSU | ||
52 | + */ | ||
53 | + create_unimplemented_device("csu", FSL_IMX7_CSU_ADDR, | ||
54 | + FSL_IMX7_CSU_SIZE); | ||
55 | + | ||
56 | + /* | ||
57 | + * TZASC | ||
58 | + */ | ||
59 | + create_unimplemented_device("tzasc", FSL_IMX7_TZASC_ADDR, | ||
60 | + FSL_IMX7_TZASC_SIZE); | ||
61 | + | ||
62 | + /* | ||
63 | + * OCRAM memory | ||
64 | + */ | ||
65 | + memory_region_init_ram(&s->ocram, NULL, "imx7.ocram", | ||
66 | + FSL_IMX7_OCRAM_MEM_SIZE, | ||
67 | + &error_abort); | ||
68 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_MEM_ADDR, | ||
69 | + &s->ocram); | ||
70 | + | ||
71 | + /* | ||
72 | + * OCRAM EPDC memory | ||
73 | + */ | ||
74 | + memory_region_init_ram(&s->ocram_epdc, NULL, "imx7.ocram_epdc", | ||
75 | + FSL_IMX7_OCRAM_EPDC_SIZE, | ||
76 | + &error_abort); | ||
77 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_EPDC_ADDR, | ||
78 | + &s->ocram_epdc); | ||
79 | + | ||
80 | + /* | ||
81 | + * OCRAM PXP memory | ||
82 | + */ | ||
83 | + memory_region_init_ram(&s->ocram_pxp, NULL, "imx7.ocram_pxp", | ||
84 | + FSL_IMX7_OCRAM_PXP_SIZE, | ||
85 | + &error_abort); | ||
86 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_PXP_ADDR, | ||
87 | + &s->ocram_pxp); | ||
88 | + | ||
89 | + /* | ||
90 | + * OCRAM_S memory | ||
91 | + */ | ||
92 | + memory_region_init_ram(&s->ocram_s, NULL, "imx7.ocram_s", | ||
93 | + FSL_IMX7_OCRAM_S_SIZE, | ||
94 | + &error_abort); | ||
95 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_S_ADDR, | ||
96 | + &s->ocram_s); | ||
97 | + | ||
98 | + /* | ||
99 | + * ROM memory | ||
100 | + */ | ||
101 | + memory_region_init_rom(&s->rom, OBJECT(dev), "imx7.rom", | ||
102 | + FSL_IMX7_ROM_SIZE, &error_abort); | ||
103 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_ROM_ADDR, | ||
104 | + &s->rom); | ||
105 | + | ||
106 | + /* | ||
107 | + * CAAM memory | ||
108 | + */ | ||
109 | + memory_region_init_rom(&s->caam, OBJECT(dev), "imx7.caam", | ||
110 | + FSL_IMX7_CAAM_MEM_SIZE, &error_abort); | ||
111 | + memory_region_add_subregion(get_system_memory(), FSL_IMX7_CAAM_MEM_ADDR, | ||
112 | + &s->caam); | ||
113 | } | ||
114 | |||
115 | static Property fsl_imx7_properties[] = { | ||
116 | -- | 41 | -- |
117 | 2.34.1 | 42 | 2.34.1 |
118 | 43 | ||
119 | 44 | diff view generated by jsdifflib |
1 | In the aspeed_rtc device we store a difference between two time_t | 1 | Our lists of Arm 32 and 64 bit hwcap values have lagged behind |
---|---|---|---|
2 | values in an 'int'. This is not really correct when time_t could | 2 | the Linux kernel. Update them to include all the bits defined |
3 | be 64 bits. Enlarge the field to 'int64_t'. | 3 | as of upstream Linux git commit a48fa7efaf1161c1 (in the middle |
4 | of the kernel 6.6 dev cycle). | ||
4 | 5 | ||
5 | This is a migration compatibility break for the aspeed boards. | 6 | For 64-bit, we don't yet implement any of the features reported via |
6 | While we are changing the vmstate, remove the accidental | 7 | these hwcap bits. For 32-bit we do in fact already implement them |
7 | duplicate of the offset field. | 8 | all; we'll add the code to set them in a subsequent commit. |
8 | 9 | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Reviewed-by: Cédric Le Goater <clg@kaod.org> | 11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | 13 | --- |
12 | include/hw/rtc/aspeed_rtc.h | 2 +- | 14 | linux-user/elfload.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ |
13 | hw/rtc/aspeed_rtc.c | 5 ++--- | 15 | 1 file changed, 44 insertions(+) |
14 | 2 files changed, 3 insertions(+), 4 deletions(-) | ||
15 | 16 | ||
16 | diff --git a/include/hw/rtc/aspeed_rtc.h b/include/hw/rtc/aspeed_rtc.h | 17 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c |
17 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/hw/rtc/aspeed_rtc.h | 19 | --- a/linux-user/elfload.c |
19 | +++ b/include/hw/rtc/aspeed_rtc.h | 20 | +++ b/linux-user/elfload.c |
20 | @@ -XXX,XX +XXX,XX @@ struct AspeedRtcState { | 21 | @@ -XXX,XX +XXX,XX @@ enum |
21 | qemu_irq irq; | 22 | ARM_HWCAP_ARM_VFPD32 = 1 << 19, |
22 | 23 | ARM_HWCAP_ARM_LPAE = 1 << 20, | |
23 | uint32_t reg[0x18]; | 24 | ARM_HWCAP_ARM_EVTSTRM = 1 << 21, |
24 | - int offset; | 25 | + ARM_HWCAP_ARM_FPHP = 1 << 22, |
25 | + int64_t offset; | 26 | + ARM_HWCAP_ARM_ASIMDHP = 1 << 23, |
26 | 27 | + ARM_HWCAP_ARM_ASIMDDP = 1 << 24, | |
28 | + ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, | ||
29 | + ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, | ||
30 | + ARM_HWCAP_ARM_I8MM = 1 << 27, | ||
27 | }; | 31 | }; |
28 | 32 | ||
29 | diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c | 33 | enum { |
30 | index XXXXXXX..XXXXXXX 100644 | 34 | @@ -XXX,XX +XXX,XX @@ enum { |
31 | --- a/hw/rtc/aspeed_rtc.c | 35 | ARM_HWCAP2_ARM_SHA1 = 1 << 2, |
32 | +++ b/hw/rtc/aspeed_rtc.c | 36 | ARM_HWCAP2_ARM_SHA2 = 1 << 3, |
33 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps aspeed_rtc_ops = { | 37 | ARM_HWCAP2_ARM_CRC32 = 1 << 4, |
34 | 38 | + ARM_HWCAP2_ARM_SB = 1 << 5, | |
35 | static const VMStateDescription vmstate_aspeed_rtc = { | 39 | + ARM_HWCAP2_ARM_SSBS = 1 << 6, |
36 | .name = TYPE_ASPEED_RTC, | ||
37 | - .version_id = 1, | ||
38 | + .version_id = 2, | ||
39 | .fields = (VMStateField[]) { | ||
40 | VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18), | ||
41 | - VMSTATE_INT32(offset, AspeedRtcState), | ||
42 | - VMSTATE_INT32(offset, AspeedRtcState), | ||
43 | + VMSTATE_INT64(offset, AspeedRtcState), | ||
44 | VMSTATE_END_OF_LIST() | ||
45 | } | ||
46 | }; | 40 | }; |
41 | |||
42 | /* The commpage only exists for 32 bit kernels */ | ||
43 | @@ -XXX,XX +XXX,XX @@ const char *elf_hwcap_str(uint32_t bit) | ||
44 | [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", | ||
45 | [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", | ||
46 | [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", | ||
47 | + [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", | ||
48 | + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", | ||
49 | + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", | ||
50 | + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", | ||
51 | + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", | ||
52 | + [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", | ||
53 | }; | ||
54 | |||
55 | return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; | ||
56 | @@ -XXX,XX +XXX,XX @@ const char *elf_hwcap2_str(uint32_t bit) | ||
57 | [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", | ||
58 | [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", | ||
59 | [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", | ||
60 | + [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", | ||
61 | + [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", | ||
62 | }; | ||
63 | |||
64 | return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; | ||
65 | @@ -XXX,XX +XXX,XX @@ enum { | ||
66 | ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, | ||
67 | ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, | ||
68 | ARM_HWCAP2_A64_SME_FA64 = 1 << 30, | ||
69 | + ARM_HWCAP2_A64_WFXT = 1ULL << 31, | ||
70 | + ARM_HWCAP2_A64_EBF16 = 1ULL << 32, | ||
71 | + ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, | ||
72 | + ARM_HWCAP2_A64_CSSC = 1ULL << 34, | ||
73 | + ARM_HWCAP2_A64_RPRFM = 1ULL << 35, | ||
74 | + ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, | ||
75 | + ARM_HWCAP2_A64_SME2 = 1ULL << 37, | ||
76 | + ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, | ||
77 | + ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, | ||
78 | + ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, | ||
79 | + ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, | ||
80 | + ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, | ||
81 | + ARM_HWCAP2_A64_MOPS = 1ULL << 43, | ||
82 | + ARM_HWCAP2_A64_HBC = 1ULL << 44, | ||
83 | }; | ||
84 | |||
85 | #define ELF_HWCAP get_elf_hwcap() | ||
86 | @@ -XXX,XX +XXX,XX @@ const char *elf_hwcap2_str(uint32_t bit) | ||
87 | [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", | ||
88 | [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", | ||
89 | [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", | ||
90 | + [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", | ||
91 | + [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", | ||
92 | + [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", | ||
93 | + [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", | ||
94 | + [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", | ||
95 | + [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", | ||
96 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", | ||
97 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", | ||
98 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", | ||
99 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", | ||
100 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", | ||
101 | + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", | ||
102 | + [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", | ||
103 | + [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", | ||
104 | }; | ||
105 | |||
106 | return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; | ||
47 | -- | 107 | -- |
48 | 2.34.1 | 108 | 2.34.1 |
49 | 109 | ||
50 | 110 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Add the code to report the arm32 hwcaps we were previously missing: | ||
2 | ss, ssbs, fphp, asimdhp, asimddp, asimdfhm, asimdbf16, i8mm | ||
1 | 3 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | linux-user/elfload.c | 12 ++++++++++++ | ||
8 | 1 file changed, 12 insertions(+) | ||
9 | |||
10 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/linux-user/elfload.c | ||
13 | +++ b/linux-user/elfload.c | ||
14 | @@ -XXX,XX +XXX,XX @@ uint32_t get_elf_hwcap(void) | ||
15 | } | ||
16 | } | ||
17 | GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); | ||
18 | + /* | ||
19 | + * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same | ||
20 | + * isar_feature function for both. The kernel reports them as two hwcaps. | ||
21 | + */ | ||
22 | + GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); | ||
23 | + GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); | ||
24 | + GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); | ||
25 | + GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); | ||
26 | + GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); | ||
27 | + GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); | ||
28 | |||
29 | return hwcaps; | ||
30 | } | ||
31 | @@ -XXX,XX +XXX,XX @@ uint32_t get_elf_hwcap2(void) | ||
32 | GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); | ||
33 | GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); | ||
34 | GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); | ||
35 | + GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); | ||
36 | + GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); | ||
37 | return hwcaps; | ||
38 | } | ||
39 | |||
40 | -- | ||
41 | 2.34.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Update our AArch64 ID register field definitions from the 2023-06 | ||
2 | system register XML release: | ||
3 | https://developer.arm.com/documentation/ddi0601/2023-06/ | ||
1 | 4 | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/arm/cpu.h | 23 +++++++++++++++++++++++ | ||
9 | 1 file changed, 23 insertions(+) | ||
10 | |||
11 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/arm/cpu.h | ||
14 | +++ b/target/arm/cpu.h | ||
15 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ISAR0, SHA1, 8, 4) | ||
16 | FIELD(ID_AA64ISAR0, SHA2, 12, 4) | ||
17 | FIELD(ID_AA64ISAR0, CRC32, 16, 4) | ||
18 | FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) | ||
19 | +FIELD(ID_AA64ISAR0, TME, 24, 4) | ||
20 | FIELD(ID_AA64ISAR0, RDM, 28, 4) | ||
21 | FIELD(ID_AA64ISAR0, SHA3, 32, 4) | ||
22 | FIELD(ID_AA64ISAR0, SM3, 36, 4) | ||
23 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ISAR2, APA3, 12, 4) | ||
24 | FIELD(ID_AA64ISAR2, MOPS, 16, 4) | ||
25 | FIELD(ID_AA64ISAR2, BC, 20, 4) | ||
26 | FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) | ||
27 | +FIELD(ID_AA64ISAR2, CLRBHB, 28, 4) | ||
28 | +FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4) | ||
29 | +FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) | ||
30 | +FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) | ||
31 | +FIELD(ID_AA64ISAR2, RPRFM, 48, 4) | ||
32 | +FIELD(ID_AA64ISAR2, CSSC, 52, 4) | ||
33 | +FIELD(ID_AA64ISAR2, ATS1A, 60, 4) | ||
34 | |||
35 | FIELD(ID_AA64PFR0, EL0, 0, 4) | ||
36 | FIELD(ID_AA64PFR0, EL1, 4, 4) | ||
37 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64PFR1, SME, 24, 4) | ||
38 | FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) | ||
39 | FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) | ||
40 | FIELD(ID_AA64PFR1, NMI, 36, 4) | ||
41 | +FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4) | ||
42 | +FIELD(ID_AA64PFR1, GCS, 44, 4) | ||
43 | +FIELD(ID_AA64PFR1, THE, 48, 4) | ||
44 | +FIELD(ID_AA64PFR1, MTEX, 52, 4) | ||
45 | +FIELD(ID_AA64PFR1, DF2, 56, 4) | ||
46 | +FIELD(ID_AA64PFR1, PFAR, 60, 4) | ||
47 | |||
48 | FIELD(ID_AA64MMFR0, PARANGE, 0, 4) | ||
49 | FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) | ||
50 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64MMFR1, AFP, 44, 4) | ||
51 | FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) | ||
52 | FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) | ||
53 | FIELD(ID_AA64MMFR1, CMOW, 56, 4) | ||
54 | +FIELD(ID_AA64MMFR1, ECBHB, 60, 4) | ||
55 | |||
56 | FIELD(ID_AA64MMFR2, CNP, 0, 4) | ||
57 | FIELD(ID_AA64MMFR2, UAO, 4, 4) | ||
58 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) | ||
59 | FIELD(ID_AA64DFR0, TRACEVER, 4, 4) | ||
60 | FIELD(ID_AA64DFR0, PMUVER, 8, 4) | ||
61 | FIELD(ID_AA64DFR0, BRPS, 12, 4) | ||
62 | +FIELD(ID_AA64DFR0, PMSS, 16, 4) | ||
63 | FIELD(ID_AA64DFR0, WRPS, 20, 4) | ||
64 | +FIELD(ID_AA64DFR0, SEBEP, 24, 4) | ||
65 | FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) | ||
66 | FIELD(ID_AA64DFR0, PMSVER, 32, 4) | ||
67 | FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) | ||
68 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) | ||
69 | FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) | ||
70 | FIELD(ID_AA64DFR0, MTPMU, 48, 4) | ||
71 | FIELD(ID_AA64DFR0, BRBE, 52, 4) | ||
72 | +FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4) | ||
73 | FIELD(ID_AA64DFR0, HPMN0, 60, 4) | ||
74 | |||
75 | FIELD(ID_AA64ZFR0, SVEVER, 0, 4) | ||
76 | FIELD(ID_AA64ZFR0, AES, 4, 4) | ||
77 | FIELD(ID_AA64ZFR0, BITPERM, 16, 4) | ||
78 | FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) | ||
79 | +FIELD(ID_AA64ZFR0, B16B16, 24, 4) | ||
80 | FIELD(ID_AA64ZFR0, SHA3, 32, 4) | ||
81 | FIELD(ID_AA64ZFR0, SM4, 40, 4) | ||
82 | FIELD(ID_AA64ZFR0, I8MM, 44, 4) | ||
83 | @@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ZFR0, F32MM, 52, 4) | ||
84 | FIELD(ID_AA64ZFR0, F64MM, 56, 4) | ||
85 | |||
86 | FIELD(ID_AA64SMFR0, F32F32, 32, 1) | ||
87 | +FIELD(ID_AA64SMFR0, BI32I32, 33, 1) | ||
88 | FIELD(ID_AA64SMFR0, B16F32, 34, 1) | ||
89 | FIELD(ID_AA64SMFR0, F16F32, 35, 1) | ||
90 | FIELD(ID_AA64SMFR0, I8I32, 36, 4) | ||
91 | +FIELD(ID_AA64SMFR0, F16F16, 42, 1) | ||
92 | +FIELD(ID_AA64SMFR0, B16B16, 43, 1) | ||
93 | +FIELD(ID_AA64SMFR0, I16I32, 44, 4) | ||
94 | FIELD(ID_AA64SMFR0, F64F64, 48, 1) | ||
95 | FIELD(ID_AA64SMFR0, I16I64, 52, 4) | ||
96 | FIELD(ID_AA64SMFR0, SMEVER, 56, 4) | ||
97 | -- | ||
98 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | For user-only mode we reveal a subset of the AArch64 ID registers |
---|---|---|---|
2 | to the guest, to emulate the kernel's trap-and-emulate-ID-regs | ||
3 | handling. Update the feature bit masks to match upstream kernel | ||
4 | commit a48fa7efaf1161c1c. | ||
2 | 5 | ||
3 | Access to many of the special registers is enabled or disabled | 6 | None of these features are yet implemented by QEMU, so this |
4 | by ACTLR_EL[23], which we implement as constant 0, which means | 7 | doesn't yet have a behavioural change, but implementation of |
5 | that all writes outside EL3 should trap. | 8 | FEAT_MOPS and FEAT_HBC is imminent. |
6 | 9 | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Message-id: 20230811214031.171020-7-richard.henderson@linaro.org | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | 12 | --- |
12 | target/arm/cpregs.h | 2 ++ | 13 | target/arm/helper.c | 11 ++++++++++- |
13 | target/arm/helper.c | 4 ++-- | 14 | tests/tcg/aarch64/sysregs.c | 4 ++-- |
14 | target/arm/tcg/cpu64.c | 46 +++++++++++++++++++++++++++++++++--------- | 15 | 2 files changed, 12 insertions(+), 3 deletions(-) |
15 | 3 files changed, 41 insertions(+), 11 deletions(-) | ||
16 | 16 | ||
17 | diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/cpregs.h | ||
20 | +++ b/target/arm/cpregs.h | ||
21 | @@ -XXX,XX +XXX,XX @@ static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { } | ||
22 | void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); | ||
23 | #endif | ||
24 | |||
25 | +CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool); | ||
26 | + | ||
27 | #endif /* TARGET_ARM_CPREGS_H */ | ||
28 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 17 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
29 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/helper.c | 19 | --- a/target/arm/helper.c |
31 | +++ b/target/arm/helper.c | 20 | +++ b/target/arm/helper.c |
32 | @@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, | 21 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) |
33 | } | 22 | R_ID_AA64ZFR0_F64MM_MASK }, |
34 | 23 | { .name = "ID_AA64SMFR0_EL1", | |
35 | /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ | 24 | .exported_bits = R_ID_AA64SMFR0_F32F32_MASK | |
36 | -static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, | 25 | + R_ID_AA64SMFR0_BI32I32_MASK | |
37 | - bool isread) | 26 | R_ID_AA64SMFR0_B16F32_MASK | |
38 | +CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, | 27 | R_ID_AA64SMFR0_F16F32_MASK | |
39 | + bool isread) | 28 | R_ID_AA64SMFR0_I8I32_MASK | |
40 | { | 29 | + R_ID_AA64SMFR0_F16F16_MASK | |
41 | if (arm_current_el(env) == 1) { | 30 | + R_ID_AA64SMFR0_B16B16_MASK | |
42 | uint64_t trap = isread ? HCR_TRVM : HCR_TVM; | 31 | + R_ID_AA64SMFR0_I16I32_MASK | |
43 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | 32 | R_ID_AA64SMFR0_F64F64_MASK | |
33 | R_ID_AA64SMFR0_I16I64_MASK | | ||
34 | + R_ID_AA64SMFR0_SMEVER_MASK | | ||
35 | R_ID_AA64SMFR0_FA64_MASK }, | ||
36 | { .name = "ID_AA64MMFR0_EL1", | ||
37 | .exported_bits = R_ID_AA64MMFR0_ECV_MASK, | ||
38 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | ||
39 | .exported_bits = R_ID_AA64ISAR2_WFXT_MASK | | ||
40 | R_ID_AA64ISAR2_RPRES_MASK | | ||
41 | R_ID_AA64ISAR2_GPA3_MASK | | ||
42 | - R_ID_AA64ISAR2_APA3_MASK }, | ||
43 | + R_ID_AA64ISAR2_APA3_MASK | | ||
44 | + R_ID_AA64ISAR2_MOPS_MASK | | ||
45 | + R_ID_AA64ISAR2_BC_MASK | | ||
46 | + R_ID_AA64ISAR2_RPRFM_MASK | | ||
47 | + R_ID_AA64ISAR2_CSSC_MASK }, | ||
48 | { .name = "ID_AA64ISAR*_EL1_RESERVED", | ||
49 | .is_glob = true }, | ||
50 | }; | ||
51 | diff --git a/tests/tcg/aarch64/sysregs.c b/tests/tcg/aarch64/sysregs.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | 52 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/target/arm/tcg/cpu64.c | 53 | --- a/tests/tcg/aarch64/sysregs.c |
46 | +++ b/target/arm/tcg/cpu64.c | 54 | +++ b/tests/tcg/aarch64/sysregs.c |
47 | @@ -XXX,XX +XXX,XX @@ static void aarch64_a64fx_initfn(Object *obj) | 55 | @@ -XXX,XX +XXX,XX @@ int main(void) |
48 | /* TODO: Add A64FX specific HPC extension registers */ | 56 | */ |
49 | } | 57 | get_cpu_reg_check_mask(id_aa64isar0_el1, _m(f0ff,ffff,f0ff,fff0)); |
50 | 58 | get_cpu_reg_check_mask(id_aa64isar1_el1, _m(00ff,f0ff,ffff,ffff)); | |
51 | +static CPAccessResult access_actlr_w(CPUARMState *env, const ARMCPRegInfo *r, | 59 | - get_cpu_reg_check_mask(SYS_ID_AA64ISAR2_EL1, _m(0000,0000,0000,ffff)); |
52 | + bool read) | 60 | + get_cpu_reg_check_mask(SYS_ID_AA64ISAR2_EL1, _m(00ff,0000,00ff,ffff)); |
53 | +{ | 61 | /* TGran4 & TGran64 as pegged to -1 */ |
54 | + if (!read) { | 62 | get_cpu_reg_check_mask(id_aa64mmfr0_el1, _m(f000,0000,ff00,0000)); |
55 | + int el = arm_current_el(env); | 63 | get_cpu_reg_check_mask(id_aa64mmfr1_el1, _m(0000,f000,0000,0000)); |
56 | + | 64 | @@ -XXX,XX +XXX,XX @@ int main(void) |
57 | + /* Because ACTLR_EL2 is constant 0, writes below EL2 trap to EL2. */ | 65 | get_cpu_reg_check_mask(id_aa64dfr0_el1, _m(0000,0000,0000,0006)); |
58 | + if (el < 2 && arm_is_el2_enabled(env)) { | 66 | get_cpu_reg_check_zero(id_aa64dfr1_el1); |
59 | + return CP_ACCESS_TRAP_EL2; | 67 | get_cpu_reg_check_mask(SYS_ID_AA64ZFR0_EL1, _m(0ff0,ff0f,00ff,00ff)); |
60 | + } | 68 | - get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(80f1,00fd,0000,0000)); |
61 | + /* Because ACTLR_EL3 is constant 0, writes below EL3 trap to EL3. */ | 69 | + get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(8ff1,fcff,0000,0000)); |
62 | + if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { | 70 | |
63 | + return CP_ACCESS_TRAP_EL3; | 71 | get_cpu_reg_check_zero(id_aa64afr0_el1); |
64 | + } | 72 | get_cpu_reg_check_zero(id_aa64afr1_el1); |
65 | + } | ||
66 | + return CP_ACCESS_OK; | ||
67 | +} | ||
68 | + | ||
69 | static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = { | ||
70 | { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64, | ||
71 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0, | ||
72 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
73 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
74 | + /* Traps and enables are the same as for TCR_EL1. */ | ||
75 | + .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, }, | ||
76 | { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64, | ||
77 | .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0, | ||
78 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
79 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = { | ||
80 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
81 | { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, | ||
82 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0, | ||
83 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
84 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
85 | + .accessfn = access_actlr_w }, | ||
86 | { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64, | ||
87 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1, | ||
88 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
89 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
90 | + .accessfn = access_actlr_w }, | ||
91 | { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64, | ||
92 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2, | ||
93 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
94 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
95 | + .accessfn = access_actlr_w }, | ||
96 | /* | ||
97 | * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU | ||
98 | * (and in particular its system registers). | ||
99 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = { | ||
100 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 }, | ||
101 | { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, | ||
102 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4, | ||
103 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 }, | ||
104 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010, | ||
105 | + .accessfn = access_actlr_w }, | ||
106 | { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64, | ||
107 | .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1, | ||
108 | .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
109 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = { | ||
110 | .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
111 | { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64, | ||
112 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7, | ||
113 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
114 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
115 | + .accessfn = access_actlr_w }, | ||
116 | { .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64, | ||
117 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2, | ||
118 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
119 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
120 | + .accessfn = access_actlr_w }, | ||
121 | { .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64, | ||
122 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1, | ||
123 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
124 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
125 | + .accessfn = access_actlr_w }, | ||
126 | { .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64, | ||
127 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0, | ||
128 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
129 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | ||
130 | + .accessfn = access_actlr_w }, | ||
131 | }; | ||
132 | |||
133 | static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu) | ||
134 | -- | 73 | -- |
135 | 2.34.1 | 74 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | FEAT_HBC (Hinted conditional branches) provides a new instruction |
---|---|---|---|
2 | BC.cond, which behaves exactly like the existing B.cond except | ||
3 | that it provides a hint to the branch predictor about the | ||
4 | likely behaviour of the branch. | ||
2 | 5 | ||
3 | This is a mandatory feature for Armv8.1 architectures but we don't | 6 | Since QEMU does not implement branch prediction, we can treat |
4 | state the feature clearly in our emulation list. Also include | 7 | this identically to B.cond. |
5 | FEAT_CRC32 comment in aarch64_max_tcg_initfn for ease of grepping. | ||
6 | 8 | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 10 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | 11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20230824075406.1515566-1-alex.bennee@linaro.org | ||
10 | Cc: qemu-stable@nongnu.org | ||
11 | Message-Id: <20230222110104.3996971-1-alex.bennee@linaro.org> | ||
12 | [PMM: pluralize 'instructions' in docs] | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | --- | 12 | --- |
15 | docs/system/arm/emulation.rst | 1 + | 13 | docs/system/arm/emulation.rst | 1 + |
16 | target/arm/tcg/cpu64.c | 2 +- | 14 | target/arm/cpu.h | 5 +++++ |
17 | 2 files changed, 2 insertions(+), 1 deletion(-) | 15 | target/arm/tcg/a64.decode | 3 ++- |
16 | linux-user/elfload.c | 1 + | ||
17 | target/arm/tcg/cpu64.c | 4 ++++ | ||
18 | target/arm/tcg/translate-a64.c | 4 ++++ | ||
19 | 6 files changed, 17 insertions(+), 1 deletion(-) | ||
18 | 20 | ||
19 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst | 21 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst |
20 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/docs/system/arm/emulation.rst | 23 | --- a/docs/system/arm/emulation.rst |
22 | +++ b/docs/system/arm/emulation.rst | 24 | +++ b/docs/system/arm/emulation.rst |
23 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: | 25 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: |
24 | - FEAT_BBM at level 2 (Translation table break-before-make levels) | 26 | - FEAT_FlagM2 (Enhancements to flag manipulation instructions) |
25 | - FEAT_BF16 (AArch64 BFloat16 instructions) | 27 | - FEAT_GTG (Guest translation granule size) |
26 | - FEAT_BTI (Branch Target Identification) | 28 | - FEAT_HAFDBS (Hardware management of the access flag and dirty bit state) |
27 | +- FEAT_CRC32 (CRC32 instructions) | 29 | +- FEAT_HBC (Hinted conditional branches) |
28 | - FEAT_CSV2 (Cache speculation variant 2) | 30 | - FEAT_HCX (Support for the HCRX_EL2 register) |
29 | - FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1) | 31 | - FEAT_HPDS (Hierarchical permission disables) |
30 | - FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2) | 32 | - FEAT_HPDS2 (Translation table page-based hardware attributes) |
33 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/target/arm/cpu.h | ||
36 | +++ b/target/arm/cpu.h | ||
37 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) | ||
38 | return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; | ||
39 | } | ||
40 | |||
41 | +static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id) | ||
42 | +{ | ||
43 | + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0; | ||
44 | +} | ||
45 | + | ||
46 | static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) | ||
47 | { | ||
48 | return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; | ||
49 | diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/tcg/a64.decode | ||
52 | +++ b/target/arm/tcg/a64.decode | ||
53 | @@ -XXX,XX +XXX,XX @@ CBZ sf:1 011010 nz:1 ................... rt:5 &cbz imm=%imm19 | ||
54 | |||
55 | TBZ . 011011 nz:1 ..... .............. rt:5 &tbz imm=%imm14 bitpos=%imm31_19 | ||
56 | |||
57 | -B_cond 0101010 0 ................... 0 cond:4 imm=%imm19 | ||
58 | +# B.cond and BC.cond | ||
59 | +B_cond 0101010 0 ................... c:1 cond:4 imm=%imm19 | ||
60 | |||
61 | BR 1101011 0000 11111 000000 rn:5 00000 &r | ||
62 | BLR 1101011 0001 11111 000000 rn:5 00000 &r | ||
63 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/linux-user/elfload.c | ||
66 | +++ b/linux-user/elfload.c | ||
67 | @@ -XXX,XX +XXX,XX @@ uint32_t get_elf_hwcap2(void) | ||
68 | GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); | ||
69 | GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); | ||
70 | GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); | ||
71 | + GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); | ||
72 | |||
73 | return hwcaps; | ||
74 | } | ||
31 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | 75 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c |
32 | index XXXXXXX..XXXXXXX 100644 | 76 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/target/arm/tcg/cpu64.c | 77 | --- a/target/arm/tcg/cpu64.c |
34 | +++ b/target/arm/tcg/cpu64.c | 78 | +++ b/target/arm/tcg/cpu64.c |
35 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) | 79 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) |
36 | t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ | 80 | t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ |
37 | t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ | 81 | cpu->isar.id_aa64isar1 = t; |
38 | t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ | 82 | |
39 | - t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); | 83 | + t = cpu->isar.id_aa64isar2; |
40 | + t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); /* FEAT_CRC32 */ | 84 | + t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ |
41 | t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */ | 85 | + cpu->isar.id_aa64isar2 = t; |
42 | t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */ | 86 | + |
43 | t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */ | 87 | t = cpu->isar.id_aa64pfr0; |
88 | t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ | ||
89 | t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ | ||
90 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/target/arm/tcg/translate-a64.c | ||
93 | +++ b/target/arm/tcg/translate-a64.c | ||
94 | @@ -XXX,XX +XXX,XX @@ static bool trans_TBZ(DisasContext *s, arg_tbz *a) | ||
95 | |||
96 | static bool trans_B_cond(DisasContext *s, arg_B_cond *a) | ||
97 | { | ||
98 | + /* BC.cond is only present with FEAT_HBC */ | ||
99 | + if (a->c && !dc_isar_feature(aa64_hbc, s)) { | ||
100 | + return false; | ||
101 | + } | ||
102 | reset_btype(s); | ||
103 | if (a->cond < 0x0e) { | ||
104 | /* genuinely conditional branches */ | ||
44 | -- | 105 | -- |
45 | 2.34.1 | 106 | 2.34.1 |
46 | 107 | ||
47 | 108 | diff view generated by jsdifflib |
1 | In the m48t59 device we almost always use 64-bit arithmetic when | 1 | The allocation_tag_mem() function takes an argument tag_size, |
---|---|---|---|
2 | dealing with time_t deltas. The one exception is in set_alarm(), | 2 | but it never uses it. Remove the argument. In mte_probe_int() |
3 | which currently uses a plain 'int' to hold the difference between two | 3 | in particular this also lets us delete the code computing |
4 | time_t values. Switch to int64_t instead to avoid any possible | 4 | the value we were passing in. |
5 | overflow issues. | ||
6 | 5 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
9 | --- | 9 | --- |
10 | hw/rtc/m48t59.c | 2 +- | 10 | target/arm/tcg/mte_helper.c | 42 +++++++++++++------------------------ |
11 | 1 file changed, 1 insertion(+), 1 deletion(-) | 11 | 1 file changed, 14 insertions(+), 28 deletions(-) |
12 | 12 | ||
13 | diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c | 13 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c |
14 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/hw/rtc/m48t59.c | 15 | --- a/target/arm/tcg/mte_helper.c |
16 | +++ b/hw/rtc/m48t59.c | 16 | +++ b/target/arm/tcg/mte_helper.c |
17 | @@ -XXX,XX +XXX,XX @@ static void alarm_cb (void *opaque) | 17 | @@ -XXX,XX +XXX,XX @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) |
18 | 18 | * @ptr_access: the access to use for the virtual address | |
19 | static void set_alarm(M48t59State *NVRAM) | 19 | * @ptr_size: the number of bytes in the normal memory access |
20 | * @tag_access: the access to use for the tag memory | ||
21 | - * @tag_size: the number of bytes in the tag memory access | ||
22 | * @ra: the return address for exception handling | ||
23 | * | ||
24 | * Our tag memory is formatted as a sequence of little-endian nibbles. | ||
25 | @@ -XXX,XX +XXX,XX @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) | ||
26 | * a pointer to the corresponding tag byte. Exit with exception if the | ||
27 | * virtual address is not accessible for @ptr_access. | ||
28 | * | ||
29 | - * The @ptr_size and @tag_size values may not have an obvious relation | ||
30 | - * due to the alignment of @ptr, and the number of tag checks required. | ||
31 | - * | ||
32 | * If there is no tag storage corresponding to @ptr, return NULL. | ||
33 | */ | ||
34 | static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
35 | uint64_t ptr, MMUAccessType ptr_access, | ||
36 | int ptr_size, MMUAccessType tag_access, | ||
37 | - int tag_size, uintptr_t ra) | ||
38 | + uintptr_t ra) | ||
20 | { | 39 | { |
21 | - int diff; | 40 | #ifdef CONFIG_USER_ONLY |
22 | + int64_t diff; | 41 | uint64_t clean_ptr = useronly_clean_ptr(ptr); |
23 | if (NVRAM->alrm_timer != NULL) { | 42 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) |
24 | timer_del(NVRAM->alrm_timer); | 43 | |
25 | diff = qemu_timedate_diff(&NVRAM->alarm) - NVRAM->time_offset; | 44 | /* Trap if accessing an invalid page. */ |
45 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, | ||
46 | - MMU_DATA_LOAD, 1, GETPC()); | ||
47 | + MMU_DATA_LOAD, GETPC()); | ||
48 | |||
49 | /* Load if page supports tags. */ | ||
50 | if (mem) { | ||
51 | @@ -XXX,XX +XXX,XX @@ static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, | ||
52 | |||
53 | /* Trap if accessing an invalid page. */ | ||
54 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, | ||
55 | - MMU_DATA_STORE, 1, ra); | ||
56 | + MMU_DATA_STORE, ra); | ||
57 | |||
58 | /* Store if page supports tags. */ | ||
59 | if (mem) { | ||
60 | @@ -XXX,XX +XXX,XX @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, | ||
61 | if (ptr & TAG_GRANULE) { | ||
62 | /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ | ||
63 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | ||
64 | - TAG_GRANULE, MMU_DATA_STORE, 1, ra); | ||
65 | + TAG_GRANULE, MMU_DATA_STORE, ra); | ||
66 | mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, | ||
67 | MMU_DATA_STORE, TAG_GRANULE, | ||
68 | - MMU_DATA_STORE, 1, ra); | ||
69 | + MMU_DATA_STORE, ra); | ||
70 | |||
71 | /* Store if page(s) support tags. */ | ||
72 | if (mem1) { | ||
73 | @@ -XXX,XX +XXX,XX @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, | ||
74 | } else { | ||
75 | /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ | ||
76 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | ||
77 | - 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); | ||
78 | + 2 * TAG_GRANULE, MMU_DATA_STORE, ra); | ||
79 | if (mem1) { | ||
80 | tag |= tag << 4; | ||
81 | qatomic_set(mem1, tag); | ||
82 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) | ||
83 | |||
84 | /* Trap if accessing an invalid page. */ | ||
85 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, | ||
86 | - gm_bs_bytes, MMU_DATA_LOAD, | ||
87 | - gm_bs_bytes / (2 * TAG_GRANULE), ra); | ||
88 | + gm_bs_bytes, MMU_DATA_LOAD, ra); | ||
89 | |||
90 | /* The tag is squashed to zero if the page does not support tags. */ | ||
91 | if (!tag_mem) { | ||
92 | @@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | ||
93 | |||
94 | /* Trap if accessing an invalid page. */ | ||
95 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | ||
96 | - gm_bs_bytes, MMU_DATA_LOAD, | ||
97 | - gm_bs_bytes / (2 * TAG_GRANULE), ra); | ||
98 | + gm_bs_bytes, MMU_DATA_LOAD, ra); | ||
99 | |||
100 | /* | ||
101 | * Tag store only happens if the page support tags, | ||
102 | @@ -XXX,XX +XXX,XX @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) | ||
103 | ptr &= -dcz_bytes; | ||
104 | |||
105 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, | ||
106 | - MMU_DATA_STORE, tag_bytes, ra); | ||
107 | + MMU_DATA_STORE, ra); | ||
108 | if (mem) { | ||
109 | int tag_pair = (val & 0xf) * 0x11; | ||
110 | memset(mem, tag_pair, tag_bytes); | ||
111 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | ||
112 | int mmu_idx, ptr_tag, bit55; | ||
113 | uint64_t ptr_last, prev_page, next_page; | ||
114 | uint64_t tag_first, tag_last; | ||
115 | - uint64_t tag_byte_first, tag_byte_last; | ||
116 | - uint32_t sizem1, tag_count, tag_size, n, c; | ||
117 | + uint32_t sizem1, tag_count, n, c; | ||
118 | uint8_t *mem1, *mem2; | ||
119 | MMUAccessType type; | ||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | ||
122 | tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); | ||
123 | tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; | ||
124 | |||
125 | - /* Round the bounds to twice the tag granule, and compute the bytes. */ | ||
126 | - tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); | ||
127 | - tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); | ||
128 | - | ||
129 | /* Locate the page boundaries. */ | ||
130 | prev_page = ptr & TARGET_PAGE_MASK; | ||
131 | next_page = prev_page + TARGET_PAGE_SIZE; | ||
132 | |||
133 | if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) { | ||
134 | /* Memory access stays on one page. */ | ||
135 | - tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; | ||
136 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, | ||
137 | - MMU_DATA_LOAD, tag_size, ra); | ||
138 | + MMU_DATA_LOAD, ra); | ||
139 | if (!mem1) { | ||
140 | return 1; | ||
141 | } | ||
142 | @@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | ||
143 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); | ||
144 | } else { | ||
145 | /* Memory access crosses to next page. */ | ||
146 | - tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE); | ||
147 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, | ||
148 | - MMU_DATA_LOAD, tag_size, ra); | ||
149 | + MMU_DATA_LOAD, ra); | ||
150 | |||
151 | - tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; | ||
152 | mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, | ||
153 | ptr_last - next_page + 1, | ||
154 | - MMU_DATA_LOAD, tag_size, ra); | ||
155 | + MMU_DATA_LOAD, ra); | ||
156 | |||
157 | /* | ||
158 | * Perform all of the comparisons. | ||
159 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) | ||
160 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
161 | (void) probe_write(env, ptr, 1, mmu_idx, ra); | ||
162 | mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, | ||
163 | - dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra); | ||
164 | + dcz_bytes, MMU_DATA_LOAD, ra); | ||
165 | if (!mem) { | ||
166 | goto done; | ||
167 | } | ||
26 | -- | 168 | -- |
27 | 2.34.1 | 169 | 2.34.1 |
28 | 170 | ||
29 | 171 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The LDRT/STRT "unprivileged load/store" instructions behave like | ||
2 | normal ones if executed at EL0. We handle this correctly for | ||
3 | the load/store semantics, but get the MTE checking wrong. | ||
1 | 4 | ||
5 | We always look at s->mte_active[is_unpriv] to see whether we should | ||
6 | be doing MTE checks, but in hflags.c when we set the TB flags that | ||
7 | will be used to fill the mte_active[] array we only set the | ||
8 | MTE0_ACTIVE bit if UNPRIV is true (i.e. we are not at EL0). | ||
9 | |||
10 | This means that a LDRT at EL0 will see s->mte_active[1] as 0, | ||
11 | and will not do MTE checks even when MTE is enabled. | ||
12 | |||
13 | To avoid the translate-time code having to do an explicit check on | ||
14 | s->unpriv to see if it is OK to index into the mte_active[] array, | ||
15 | duplicate MTE_ACTIVE into MTE0_ACTIVE when UNPRIV is false. | ||
16 | |||
17 | (This isn't a very serious bug because generally nobody executes | ||
18 | LDRT/STRT at EL0, because they have no use there.) | ||
19 | |||
20 | Cc: qemu-stable@nongnu.org | ||
21 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
22 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
23 | Message-id: 20230912140434.1333369-2-peter.maydell@linaro.org | ||
24 | --- | ||
25 | target/arm/tcg/hflags.c | 9 +++++++++ | ||
26 | 1 file changed, 9 insertions(+) | ||
27 | |||
28 | diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/target/arm/tcg/hflags.c | ||
31 | +++ b/target/arm/tcg/hflags.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | ||
33 | && !(env->pstate & PSTATE_TCO) | ||
34 | && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { | ||
35 | DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); | ||
36 | + if (!EX_TBFLAG_A64(flags, UNPRIV)) { | ||
37 | + /* | ||
38 | + * In non-unpriv contexts (eg EL0), unpriv load/stores | ||
39 | + * act like normal ones; duplicate the MTE info to | ||
40 | + * avoid translate-a64.c having to check UNPRIV to see | ||
41 | + * whether it is OK to index into MTE_ACTIVE[]. | ||
42 | + */ | ||
43 | + DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); | ||
44 | + } | ||
45 | } | ||
46 | } | ||
47 | /* And again for unprivileged accesses, if required. */ | ||
48 | -- | ||
49 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | FEAT_MOPS defines a handful of new enable bits: |
---|---|---|---|
2 | * HCRX_EL2.MSCEn, SCTLR_EL1.MSCEn, SCTLR_EL2.MSCen: | ||
3 | define whether the new insns should UNDEF or not | ||
4 | * HCRX_EL2.MCE2: defines whether memops exceptions from | ||
5 | EL1 should be taken to EL1 or EL2 | ||
2 | 6 | ||
3 | This value is only 4 bits wide. | 7 | Since we don't sanitise what bits can be written for the SCTLR |
8 | registers, we only need to handle the new bits in HCRX_EL2, and | ||
9 | define SCTLR_MSCEN for the new SCTLR bit value. | ||
4 | 10 | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 11 | The precedence of "HCRX bits acts as 0 if SCR_EL3.HXEn is 0" versus |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 12 | "bit acts as 1 if EL2 disabled" is not clear from the register |
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 13 | definition text, but it is clear in the CheckMOPSEnabled() |
8 | Message-id: 20230811214031.171020-2-richard.henderson@linaro.org | 14 | pseudocode(), so we follow that. We'll have to check whether other |
15 | bits we need to implement in future follow the same logic or not. | ||
16 | |||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | Message-id: 20230912140434.1333369-3-peter.maydell@linaro.org | ||
10 | --- | 20 | --- |
11 | target/arm/cpu.h | 3 ++- | 21 | target/arm/cpu.h | 6 ++++++ |
12 | 1 file changed, 2 insertions(+), 1 deletion(-) | 22 | target/arm/helper.c | 28 +++++++++++++++++++++------- |
23 | 2 files changed, 27 insertions(+), 7 deletions(-) | ||
13 | 24 | ||
14 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 25 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
15 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/cpu.h | 27 | --- a/target/arm/cpu.h |
17 | +++ b/target/arm/cpu.h | 28 | +++ b/target/arm/cpu.h |
18 | @@ -XXX,XX +XXX,XX @@ struct ArchCPU { | 29 | @@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu); |
19 | bool prop_lpa2; | 30 | #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ |
20 | 31 | #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ | |
21 | /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ | 32 | #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */ |
22 | - uint32_t dcz_blocksize; | 33 | +#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */ |
23 | + uint8_t dcz_blocksize; | 34 | #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ |
35 | #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ | ||
36 | #define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */ | ||
37 | @@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) | ||
38 | return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; | ||
39 | } | ||
40 | |||
41 | +static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) | ||
42 | +{ | ||
43 | + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); | ||
44 | +} | ||
24 | + | 45 | + |
25 | uint64_t rvbar_prop; /* Property/input signals. */ | 46 | /* |
26 | 47 | * Feature tests for "does this exist in either 32-bit or 64-bit?" | |
27 | /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ | 48 | */ |
49 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/target/arm/helper.c | ||
52 | +++ b/target/arm/helper.c | ||
53 | @@ -XXX,XX +XXX,XX @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
54 | { | ||
55 | uint64_t valid_mask = 0; | ||
56 | |||
57 | - /* No features adding bits to HCRX are implemented. */ | ||
58 | + /* FEAT_MOPS adds MSCEn and MCE2 */ | ||
59 | + if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { | ||
60 | + valid_mask |= HCRX_MSCEN | HCRX_MCE2; | ||
61 | + } | ||
62 | |||
63 | /* Clear RES0 bits. */ | ||
64 | env->cp15.hcrx_el2 = value & valid_mask; | ||
65 | @@ -XXX,XX +XXX,XX @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env) | ||
66 | { | ||
67 | /* | ||
68 | * The bits in this register behave as 0 for all purposes other than | ||
69 | - * direct reads of the register if: | ||
70 | - * - EL2 is not enabled in the current security state, | ||
71 | - * - SCR_EL3.HXEn is 0. | ||
72 | + * direct reads of the register if SCR_EL3.HXEn is 0. | ||
73 | + * If EL2 is not enabled in the current security state, then the | ||
74 | + * bit may behave as if 0, or as if 1, depending on the bit. | ||
75 | + * For the moment, we treat the EL2-disabled case as taking | ||
76 | + * priority over the HXEn-disabled case. This is true for the only | ||
77 | + * bit for a feature which we implement where the answer is different | ||
78 | + * for the two cases (MSCEn for FEAT_MOPS). | ||
79 | + * This may need to be revisited for future bits. | ||
80 | */ | ||
81 | - if (!arm_is_el2_enabled(env) | ||
82 | - || (arm_feature(env, ARM_FEATURE_EL3) | ||
83 | - && !(env->cp15.scr_el3 & SCR_HXEN))) { | ||
84 | + if (!arm_is_el2_enabled(env)) { | ||
85 | + uint64_t hcrx = 0; | ||
86 | + if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { | ||
87 | + /* MSCEn behaves as 1 if EL2 is not enabled */ | ||
88 | + hcrx |= HCRX_MSCEN; | ||
89 | + } | ||
90 | + return hcrx; | ||
91 | + } | ||
92 | + if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { | ||
93 | return 0; | ||
94 | } | ||
95 | return env->cp15.hcrx_el2; | ||
28 | -- | 96 | -- |
29 | 2.34.1 | 97 | 2.34.1 |
30 | |||
31 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | In every place that we call the get_a64_user_mem_index() function | ||
2 | we do it like this: | ||
3 | memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); | ||
4 | Refactor so the caller passes in the bool that says whether they | ||
5 | want the 'unpriv' or 'normal' mem_index rather than having to | ||
6 | do the ?: themselves. | ||
1 | 7 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Message-id: 20230912140434.1333369-4-peter.maydell@linaro.org | ||
10 | --- | ||
11 | target/arm/tcg/translate-a64.c | 20 ++++++++++++++------ | ||
12 | 1 file changed, 14 insertions(+), 6 deletions(-) | ||
13 | |||
14 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/tcg/translate-a64.c | ||
17 | +++ b/target/arm/tcg/translate-a64.c | ||
18 | @@ -XXX,XX +XXX,XX @@ void a64_translate_init(void) | ||
19 | } | ||
20 | |||
21 | /* | ||
22 | - * Return the core mmu_idx to use for A64 "unprivileged load/store" insns | ||
23 | + * Return the core mmu_idx to use for A64 load/store insns which | ||
24 | + * have a "unprivileged load/store" variant. Those insns access | ||
25 | + * EL0 if executed from an EL which has control over EL0 (usually | ||
26 | + * EL1) but behave like normal loads and stores if executed from | ||
27 | + * elsewhere (eg EL3). | ||
28 | + * | ||
29 | + * @unpriv : true for the unprivileged encoding; false for the | ||
30 | + * normal encoding (in which case we will return the same | ||
31 | + * thing as get_mem_index(). | ||
32 | */ | ||
33 | -static int get_a64_user_mem_index(DisasContext *s) | ||
34 | +static int get_a64_user_mem_index(DisasContext *s, bool unpriv) | ||
35 | { | ||
36 | /* | ||
37 | * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, | ||
38 | @@ -XXX,XX +XXX,XX @@ static int get_a64_user_mem_index(DisasContext *s) | ||
39 | */ | ||
40 | ARMMMUIdx useridx = s->mmu_idx; | ||
41 | |||
42 | - if (s->unpriv) { | ||
43 | + if (unpriv && s->unpriv) { | ||
44 | /* | ||
45 | * We have pre-computed the condition for AccType_UNPRIV. | ||
46 | * Therefore we should never get here with a mmu_idx for | ||
47 | @@ -XXX,XX +XXX,XX @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a, | ||
48 | if (!a->p) { | ||
49 | tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset); | ||
50 | } | ||
51 | - memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); | ||
52 | + memidx = get_a64_user_mem_index(s, a->unpriv); | ||
53 | *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store, | ||
54 | a->w || a->rn != 31, | ||
55 | mop, a->unpriv, memidx); | ||
56 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a) | ||
57 | { | ||
58 | bool iss_sf, iss_valid = !a->w; | ||
59 | TCGv_i64 clean_addr, dirty_addr, tcg_rt; | ||
60 | - int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); | ||
61 | + int memidx = get_a64_user_mem_index(s, a->unpriv); | ||
62 | MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); | ||
63 | |||
64 | op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop); | ||
65 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a) | ||
66 | { | ||
67 | bool iss_sf, iss_valid = !a->w; | ||
68 | TCGv_i64 clean_addr, dirty_addr, tcg_rt; | ||
69 | - int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); | ||
70 | + int memidx = get_a64_user_mem_index(s, a->unpriv); | ||
71 | MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); | ||
72 | |||
73 | op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop); | ||
74 | -- | ||
75 | 2.34.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The FEAT_MOPS memory operations can raise a Memory Copy or Memory Set | ||
2 | exception if a copy or set instruction is executed when the CPU | ||
3 | register state is not correct for that instruction. Define the | ||
4 | usual syn_* function that constructs the syndrome register value | ||
5 | for these exceptions. | ||
1 | 6 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-id: 20230912140434.1333369-5-peter.maydell@linaro.org | ||
10 | --- | ||
11 | target/arm/syndrome.h | 12 ++++++++++++ | ||
12 | 1 file changed, 12 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/arm/syndrome.h | ||
17 | +++ b/target/arm/syndrome.h | ||
18 | @@ -XXX,XX +XXX,XX @@ enum arm_exception_class { | ||
19 | EC_DATAABORT = 0x24, | ||
20 | EC_DATAABORT_SAME_EL = 0x25, | ||
21 | EC_SPALIGNMENT = 0x26, | ||
22 | + EC_MOP = 0x27, | ||
23 | EC_AA32_FPTRAP = 0x28, | ||
24 | EC_AA64_FPTRAP = 0x2c, | ||
25 | EC_SERROR = 0x2f, | ||
26 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_serror(uint32_t extra) | ||
27 | return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra; | ||
28 | } | ||
29 | |||
30 | +static inline uint32_t syn_mop(bool is_set, bool is_setg, int options, | ||
31 | + bool epilogue, bool wrong_option, bool option_a, | ||
32 | + int destreg, int srcreg, int sizereg) | ||
33 | +{ | ||
34 | + return (EC_MOP << ARM_EL_EC_SHIFT) | ARM_EL_IL | | ||
35 | + (is_set << 24) | (is_setg << 23) | (options << 19) | | ||
36 | + (epilogue << 18) | (wrong_option << 17) | (option_a << 16) | | ||
37 | + (destreg << 10) | (srcreg << 5) | sizereg; | ||
38 | +} | ||
39 | + | ||
40 | + | ||
41 | #endif /* TARGET_ARM_SYNDROME_H */ | ||
42 | -- | ||
43 | 2.34.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | For the FEAT_MOPS operations, the existing allocation_tag_mem() | ||
2 | function almost does what we want, but it will take a watchpoint | ||
3 | exception even for an ra == 0 probe request, and it requires that the | ||
4 | caller guarantee that the memory is accessible. For FEAT_MOPS we | ||
5 | want a function that will not take any kind of exception, and will | ||
6 | return NULL for the not-accessible case. | ||
1 | 7 | ||
8 | Rename allocation_tag_mem() to allocation_tag_mem_probe() and add an | ||
9 | extra 'probe' argument that lets us distinguish these cases; | ||
10 | allocation_tag_mem() is now a wrapper that always passes 'false'. | ||
11 | |||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-id: 20230912140434.1333369-6-peter.maydell@linaro.org | ||
15 | --- | ||
16 | target/arm/tcg/mte_helper.c | 48 ++++++++++++++++++++++++++++--------- | ||
17 | 1 file changed, 37 insertions(+), 11 deletions(-) | ||
18 | |||
19 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/target/arm/tcg/mte_helper.c | ||
22 | +++ b/target/arm/tcg/mte_helper.c | ||
23 | @@ -XXX,XX +XXX,XX @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | - * allocation_tag_mem: | ||
28 | + * allocation_tag_mem_probe: | ||
29 | * @env: the cpu environment | ||
30 | * @ptr_mmu_idx: the addressing regime to use for the virtual address | ||
31 | * @ptr: the virtual address for which to look up tag memory | ||
32 | * @ptr_access: the access to use for the virtual address | ||
33 | * @ptr_size: the number of bytes in the normal memory access | ||
34 | * @tag_access: the access to use for the tag memory | ||
35 | + * @probe: true to merely probe, never taking an exception | ||
36 | * @ra: the return address for exception handling | ||
37 | * | ||
38 | * Our tag memory is formatted as a sequence of little-endian nibbles. | ||
39 | @@ -XXX,XX +XXX,XX @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) | ||
40 | * for the higher addr. | ||
41 | * | ||
42 | * Here, resolve the physical address from the virtual address, and return | ||
43 | - * a pointer to the corresponding tag byte. Exit with exception if the | ||
44 | - * virtual address is not accessible for @ptr_access. | ||
45 | + * a pointer to the corresponding tag byte. | ||
46 | * | ||
47 | * If there is no tag storage corresponding to @ptr, return NULL. | ||
48 | + * | ||
49 | + * If the page is inaccessible for @ptr_access, or has a watchpoint, there are | ||
50 | + * three options: | ||
51 | + * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not | ||
52 | + * accessible, and do not take watchpoint traps. The calling code must | ||
53 | + * handle those cases in the right priority compared to MTE traps. | ||
54 | + * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees | ||
55 | + * that the page is going to be accessible. We will take watchpoint traps. | ||
56 | + * (3) probe = false, ra != 0 : non-probe -- we will take both memory access | ||
57 | + * traps and watchpoint traps. | ||
58 | + * (probe = true, ra != 0 is invalid and will assert.) | ||
59 | */ | ||
60 | -static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
61 | - uint64_t ptr, MMUAccessType ptr_access, | ||
62 | - int ptr_size, MMUAccessType tag_access, | ||
63 | - uintptr_t ra) | ||
64 | +static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, | ||
65 | + uint64_t ptr, MMUAccessType ptr_access, | ||
66 | + int ptr_size, MMUAccessType tag_access, | ||
67 | + bool probe, uintptr_t ra) | ||
68 | { | ||
69 | #ifdef CONFIG_USER_ONLY | ||
70 | uint64_t clean_ptr = useronly_clean_ptr(ptr); | ||
71 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
72 | uint8_t *tags; | ||
73 | uintptr_t index; | ||
74 | |||
75 | + assert(!(probe && ra)); | ||
76 | + | ||
77 | if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { | ||
78 | cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, | ||
79 | !(flags & PAGE_VALID), ra); | ||
80 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
81 | * exception for inaccessible pages, and resolves the virtual address | ||
82 | * into the softmmu tlb. | ||
83 | * | ||
84 | - * When RA == 0, this is for mte_probe. The page is expected to be | ||
85 | - * valid. Indicate to probe_access_flags no-fault, then assert that | ||
86 | - * we received a valid page. | ||
87 | + * When RA == 0, this is either a pure probe or a no-fault-expected probe. | ||
88 | + * Indicate to probe_access_flags no-fault, then either return NULL | ||
89 | + * for the pure probe, or assert that we received a valid page for the | ||
90 | + * no-fault-expected probe. | ||
91 | */ | ||
92 | flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, | ||
93 | ra == 0, &host, &full, ra); | ||
94 | + if (probe && (flags & TLB_INVALID_MASK)) { | ||
95 | + return NULL; | ||
96 | + } | ||
97 | assert(!(flags & TLB_INVALID_MASK)); | ||
98 | |||
99 | /* If the virtual page MemAttr != Tagged, access unchecked. */ | ||
100 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
101 | } | ||
102 | |||
103 | /* Any debug exception has priority over a tag check exception. */ | ||
104 | - if (unlikely(flags & TLB_WATCHPOINT)) { | ||
105 | + if (!probe && unlikely(flags & TLB_WATCHPOINT)) { | ||
106 | int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; | ||
107 | assert(ra != 0); | ||
108 | cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); | ||
109 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
110 | #endif | ||
111 | } | ||
112 | |||
113 | +static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
114 | + uint64_t ptr, MMUAccessType ptr_access, | ||
115 | + int ptr_size, MMUAccessType tag_access, | ||
116 | + uintptr_t ra) | ||
117 | +{ | ||
118 | + return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access, | ||
119 | + ptr_size, tag_access, false, ra); | ||
120 | +} | ||
121 | + | ||
122 | uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) | ||
123 | { | ||
124 | uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); | ||
125 | -- | ||
126 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | The FEAT_MOPS instructions need a couple of helper routines that |
---|---|---|---|
2 | check for MTE tag failures: | ||
3 | * mte_mops_probe() checks whether there is going to be a tag | ||
4 | error in the next up-to-a-page worth of data | ||
5 | * mte_check_fail() is an existing function to record the fact | ||
6 | of a tag failure, which we need to make global so we can | ||
7 | call it from helper-a64.c | ||
2 | 8 | ||
3 | Support all of the easy GM block sizes. | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Use direct memory operations, since the pointers are aligned. | 10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20230912140434.1333369-7-peter.maydell@linaro.org | ||
12 | --- | ||
13 | target/arm/internals.h | 28 +++++++++++++++++++ | ||
14 | target/arm/tcg/mte_helper.c | 54 +++++++++++++++++++++++++++++++++++-- | ||
15 | 2 files changed, 80 insertions(+), 2 deletions(-) | ||
5 | 16 | ||
6 | While BS=2 (16 bytes, 1 tag) is a legal setting, that requires | 17 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
7 | an atomic store of one nibble. This is not difficult, but there | ||
8 | is also no point in supporting it until required. | ||
9 | |||
10 | Note that cortex-a710 sets GM blocksize to match its cacheline | ||
11 | size of 64 bytes. I expect many implementations will also | ||
12 | match the cacheline, which makes 16 bytes very unlikely. | ||
13 | |||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
15 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
16 | Message-id: 20230811214031.171020-4-richard.henderson@linaro.org | ||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
18 | --- | ||
19 | target/arm/cpu.c | 18 +++++++++--- | ||
20 | target/arm/tcg/mte_helper.c | 56 +++++++++++++++++++++++++++++++------ | ||
21 | 2 files changed, 62 insertions(+), 12 deletions(-) | ||
22 | |||
23 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/target/arm/cpu.c | 19 | --- a/target/arm/internals.h |
26 | +++ b/target/arm/cpu.c | 20 | +++ b/target/arm/internals.h |
27 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | 21 | @@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */ |
28 | ID_PFR1, VIRTUALIZATION, 0); | 22 | bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); |
29 | } | 23 | uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); |
30 | 24 | ||
31 | + if (cpu_isar_feature(aa64_mte, cpu)) { | 25 | +/** |
32 | + /* | 26 | + * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation |
33 | + * The architectural range of GM blocksize is 2-6, however qemu | 27 | + * @env: CPU env |
34 | + * doesn't support blocksize of 2 (see HELPER(ldgm)). | 28 | + * @ptr: start address of memory region (dirty pointer) |
35 | + */ | 29 | + * @size: length of region (guaranteed not to cross a page boundary) |
36 | + if (tcg_enabled()) { | 30 | + * @desc: MTEDESC descriptor word (0 means no MTE checks) |
37 | + assert(cpu->gm_blocksize >= 3 && cpu->gm_blocksize <= 6); | 31 | + * Returns: the size of the region that can be copied without hitting |
38 | + } | 32 | + * an MTE tag failure |
33 | + * | ||
34 | + * Note that we assume that the caller has already checked the TBI | ||
35 | + * and TCMA bits with mte_checks_needed() and an MTE check is definitely | ||
36 | + * required. | ||
37 | + */ | ||
38 | +uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
39 | + uint32_t desc); | ||
39 | + | 40 | + |
40 | #ifndef CONFIG_USER_ONLY | 41 | +/** |
41 | - if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) { | 42 | + * mte_check_fail: Record an MTE tag check failure |
42 | /* | 43 | + * @env: CPU env |
43 | * Disable the MTE feature bits if we do not have tag-memory | 44 | + * @desc: MTEDESC descriptor word |
44 | * provided by the machine. | 45 | + * @dirty_ptr: Failing dirty address |
45 | */ | 46 | + * @ra: TCG retaddr |
46 | - cpu->isar.id_aa64pfr1 = | 47 | + * |
47 | - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); | 48 | + * This may never return (if the MTE tag checks are configured to fault). |
48 | - } | 49 | + */ |
49 | + if (cpu->tag_memory == NULL) { | 50 | +void mte_check_fail(CPUARMState *env, uint32_t desc, |
50 | + cpu->isar.id_aa64pfr1 = | 51 | + uint64_t dirty_ptr, uintptr_t ra); |
51 | + FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); | 52 | + |
52 | + } | 53 | static inline int allocation_tag_from_addr(uint64_t ptr) |
53 | #endif | 54 | { |
54 | + } | 55 | return extract64(ptr, 56, 4); |
55 | |||
56 | if (tcg_enabled()) { | ||
57 | /* | ||
58 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | 56 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c |
59 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
60 | --- a/target/arm/tcg/mte_helper.c | 58 | --- a/target/arm/tcg/mte_helper.c |
61 | +++ b/target/arm/tcg/mte_helper.c | 59 | +++ b/target/arm/tcg/mte_helper.c |
62 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) | 60 | @@ -XXX,XX +XXX,XX @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, |
63 | int gm_bs = env_archcpu(env)->gm_blocksize; | ||
64 | int gm_bs_bytes = 4 << gm_bs; | ||
65 | void *tag_mem; | ||
66 | + uint64_t ret; | ||
67 | + int shift; | ||
68 | |||
69 | ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); | ||
70 | |||
71 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) | ||
72 | |||
73 | /* | ||
74 | * The ordering of elements within the word corresponds to | ||
75 | - * a little-endian operation. | ||
76 | + * a little-endian operation. Computation of shift comes from | ||
77 | + * | ||
78 | + * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE> | ||
79 | + * data<index*4+3:index*4> = tag | ||
80 | + * | ||
81 | + * Because of the alignment of ptr above, BS=6 has shift=0. | ||
82 | + * All memory operations are aligned. Defer support for BS=2, | ||
83 | + * requiring insertion or extraction of a nibble, until we | ||
84 | + * support a cpu that requires it. | ||
85 | */ | ||
86 | switch (gm_bs) { | ||
87 | + case 3: | ||
88 | + /* 32 bytes -> 2 tags -> 8 result bits */ | ||
89 | + ret = *(uint8_t *)tag_mem; | ||
90 | + break; | ||
91 | + case 4: | ||
92 | + /* 64 bytes -> 4 tags -> 16 result bits */ | ||
93 | + ret = cpu_to_le16(*(uint16_t *)tag_mem); | ||
94 | + break; | ||
95 | + case 5: | ||
96 | + /* 128 bytes -> 8 tags -> 32 result bits */ | ||
97 | + ret = cpu_to_le32(*(uint32_t *)tag_mem); | ||
98 | + break; | ||
99 | case 6: | ||
100 | /* 256 bytes -> 16 tags -> 64 result bits */ | ||
101 | - return ldq_le_p(tag_mem); | ||
102 | + return cpu_to_le64(*(uint64_t *)tag_mem); | ||
103 | default: | ||
104 | - /* cpu configured with unsupported gm blocksize. */ | ||
105 | + /* | ||
106 | + * CPU configured with unsupported/invalid gm blocksize. | ||
107 | + * This is detected early in arm_cpu_realizefn. | ||
108 | + */ | ||
109 | g_assert_not_reached(); | ||
110 | } | ||
111 | + shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; | ||
112 | + return ret << shift; | ||
113 | } | 61 | } |
114 | 62 | ||
115 | void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | 63 | /* Record a tag check failure. */ |
116 | @@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | 64 | -static void mte_check_fail(CPUARMState *env, uint32_t desc, |
117 | int gm_bs = env_archcpu(env)->gm_blocksize; | 65 | - uint64_t dirty_ptr, uintptr_t ra) |
118 | int gm_bs_bytes = 4 << gm_bs; | 66 | +void mte_check_fail(CPUARMState *env, uint32_t desc, |
119 | void *tag_mem; | 67 | + uint64_t dirty_ptr, uintptr_t ra) |
120 | + int shift; | 68 | { |
121 | 69 | int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | |
122 | ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); | 70 | ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); |
123 | 71 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) | |
124 | @@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | 72 | done: |
125 | return; | 73 | return useronly_clean_ptr(ptr); |
126 | } | 74 | } |
127 | 75 | + | |
128 | - /* | 76 | +uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, |
129 | - * The ordering of elements within the word corresponds to | 77 | + uint32_t desc) |
130 | - * a little-endian operation. | 78 | +{ |
131 | - */ | 79 | + int mmu_idx, tag_count; |
132 | + /* See LDGM for comments on BS and on shift. */ | 80 | + uint64_t ptr_tag, tag_first, tag_last; |
133 | + shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; | 81 | + void *mem; |
134 | + val >>= shift; | 82 | + bool w = FIELD_EX32(desc, MTEDESC, WRITE); |
135 | switch (gm_bs) { | 83 | + uint32_t n; |
136 | + case 3: | 84 | + |
137 | + /* 32 bytes -> 2 tags -> 8 result bits */ | 85 | + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); |
138 | + *(uint8_t *)tag_mem = val; | 86 | + /* True probe; this will never fault */ |
139 | + break; | 87 | + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, |
140 | + case 4: | 88 | + w ? MMU_DATA_STORE : MMU_DATA_LOAD, |
141 | + /* 64 bytes -> 4 tags -> 16 result bits */ | 89 | + size, MMU_DATA_LOAD, true, 0); |
142 | + *(uint16_t *)tag_mem = cpu_to_le16(val); | 90 | + if (!mem) { |
143 | + break; | 91 | + return size; |
144 | + case 5: | 92 | + } |
145 | + /* 128 bytes -> 8 tags -> 32 result bits */ | 93 | + |
146 | + *(uint32_t *)tag_mem = cpu_to_le32(val); | 94 | + /* |
147 | + break; | 95 | + * TODO: checkN() is not designed for checks of the size we expect |
148 | case 6: | 96 | + * for FEAT_MOPS operations, so we should implement this differently. |
149 | - stq_le_p(tag_mem, val); | 97 | + * Maybe we should do something like |
150 | + /* 256 bytes -> 16 tags -> 64 result bits */ | 98 | + * if (region start and size are aligned nicely) { |
151 | + *(uint64_t *)tag_mem = cpu_to_le64(val); | 99 | + * do direct loads of 64 tag bits at a time; |
152 | break; | 100 | + * } else { |
153 | default: | 101 | + * call checkN() |
154 | /* cpu configured with unsupported gm blocksize. */ | 102 | + * } |
103 | + */ | ||
104 | + /* Round the bounds to the tag granule, and compute the number of tags. */ | ||
105 | + ptr_tag = allocation_tag_from_addr(ptr); | ||
106 | + tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); | ||
107 | + tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE); | ||
108 | + tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; | ||
109 | + n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); | ||
110 | + if (likely(n == tag_count)) { | ||
111 | + return size; | ||
112 | + } | ||
113 | + | ||
114 | + /* | ||
115 | + * Failure; for the first granule, it's at @ptr. Otherwise | ||
116 | + * it's at the first byte of the nth granule. Calculate how | ||
117 | + * many bytes we can access without hitting that failure. | ||
118 | + */ | ||
119 | + if (n == 0) { | ||
120 | + return 0; | ||
121 | + } else { | ||
122 | + return n * TAG_GRANULE - (ptr - tag_first); | ||
123 | + } | ||
124 | +} | ||
155 | -- | 125 | -- |
156 | 2.34.1 | 126 | 2.34.1 | diff view generated by jsdifflib |
1 | The architecture requires (R_TYTWB) that an attempt to return from EL3 | 1 | Implement the SET* instructions which collectively implement a |
---|---|---|---|
2 | when SCR_EL3.{NSE,NS} are {1,0} is an illegal exception return. (This | 2 | "memset" operation. These come in a set of three, eg SETP |
3 | enforces that the CPU can't ever be executing below EL3 with the | 3 | (prologue), SETM (main), SETE (epilogue), and each of those has |
4 | NSE,NS bits indicating an invalid security state.) | 4 | different flavours to indicate whether memory accesses should be |
5 | unpriv or non-temporal. | ||
5 | 6 | ||
6 | We were missing this check; add it. | 7 | This commit does not include the "memset with tag setting" |
8 | SETG* instructions. | ||
7 | 9 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20230807150618.101357-1-peter.maydell@linaro.org | 12 | Message-id: 20230912140434.1333369-8-peter.maydell@linaro.org |
11 | --- | 13 | --- |
12 | target/arm/tcg/helper-a64.c | 9 +++++++++ | 14 | target/arm/tcg/helper-a64.h | 4 + |
13 | 1 file changed, 9 insertions(+) | 15 | target/arm/tcg/a64.decode | 16 ++ |
16 | target/arm/tcg/helper-a64.c | 344 +++++++++++++++++++++++++++++++++ | ||
17 | target/arm/tcg/translate-a64.c | 49 +++++ | ||
18 | 4 files changed, 413 insertions(+) | ||
14 | 19 | ||
20 | diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/target/arm/tcg/helper-a64.h | ||
23 | +++ b/target/arm/tcg/helper-a64.h | ||
24 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(stzgm_tags, TCG_CALL_NO_WG, void, env, i64, i64) | ||
25 | |||
26 | DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG, | ||
27 | noreturn, env, i64, i32, i32) | ||
28 | + | ||
29 | +DEF_HELPER_3(setp, void, env, i32, i32) | ||
30 | +DEF_HELPER_3(setm, void, env, i32, i32) | ||
31 | +DEF_HELPER_3(sete, void, env, i32, i32) | ||
32 | diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/target/arm/tcg/a64.decode | ||
35 | +++ b/target/arm/tcg/a64.decode | ||
36 | @@ -XXX,XX +XXX,XX @@ LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0 | ||
37 | STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1 | ||
38 | STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0 | ||
39 | STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1 | ||
40 | + | ||
41 | +# Memory operations (memset, memcpy, memmove) | ||
42 | +# Each of these comes in a set of three, eg SETP (prologue), SETM (main), | ||
43 | +# SETE (epilogue), and each of those has different flavours to | ||
44 | +# indicate whether memory accesses should be unpriv or non-temporal. | ||
45 | +# We don't distinguish temporal and non-temporal accesses, but we | ||
46 | +# do need to report it in syndrome register values. | ||
47 | + | ||
48 | +# Memset | ||
49 | +&set rs rn rd unpriv nontemp | ||
50 | +# op2 bit 1 is nontemporal bit | ||
51 | +@set .. ......... rs:5 .. nontemp:1 unpriv:1 .. rn:5 rd:5 &set | ||
52 | + | ||
53 | +SETP 00 011001110 ..... 00 . . 01 ..... ..... @set | ||
54 | +SETM 00 011001110 ..... 01 . . 01 ..... ..... @set | ||
55 | +SETE 00 011001110 ..... 10 . . 01 ..... ..... @set | ||
15 | diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c | 56 | diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c |
16 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/tcg/helper-a64.c | 58 | --- a/target/arm/tcg/helper-a64.c |
18 | +++ b/target/arm/tcg/helper-a64.c | 59 | +++ b/target/arm/tcg/helper-a64.c |
19 | @@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) | 60 | @@ -XXX,XX +XXX,XX @@ void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr, |
20 | spsr &= ~PSTATE_SS; | 61 | arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type, |
21 | } | 62 | mmu_idx, GETPC()); |
22 | 63 | } | |
23 | + /* | 64 | + |
24 | + * FEAT_RME forbids return from EL3 with an invalid security state. | 65 | +/* Memory operations (memset, memmove, memcpy) */ |
25 | + * We don't need an explicit check for FEAT_RME here because we enforce | 66 | + |
26 | + * in scr_write() that you can't set the NSE bit without it. | 67 | +/* |
27 | + */ | 68 | + * Return true if the CPY* and SET* insns can execute; compare |
28 | + if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) { | 69 | + * pseudocode CheckMOPSEnabled(), though we refactor it a little. |
29 | + goto illegal_return; | 70 | + */ |
30 | + } | 71 | +static bool mops_enabled(CPUARMState *env) |
31 | + | 72 | +{ |
32 | new_el = el_from_spsr(spsr); | 73 | + int el = arm_current_el(env); |
33 | if (new_el == -1) { | 74 | + |
34 | goto illegal_return; | 75 | + if (el < 2 && |
76 | + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && | ||
77 | + !(arm_hcrx_el2_eff(env) & HCRX_MSCEN)) { | ||
78 | + return false; | ||
79 | + } | ||
80 | + | ||
81 | + if (el == 0) { | ||
82 | + if (!el_is_in_host(env, 0)) { | ||
83 | + return env->cp15.sctlr_el[1] & SCTLR_MSCEN; | ||
84 | + } else { | ||
85 | + return env->cp15.sctlr_el[2] & SCTLR_MSCEN; | ||
86 | + } | ||
87 | + } | ||
88 | + return true; | ||
89 | +} | ||
90 | + | ||
91 | +static void check_mops_enabled(CPUARMState *env, uintptr_t ra) | ||
92 | +{ | ||
93 | + if (!mops_enabled(env)) { | ||
94 | + raise_exception_ra(env, EXCP_UDEF, syn_uncategorized(), | ||
95 | + exception_target_el(env), ra); | ||
96 | + } | ||
97 | +} | ||
98 | + | ||
99 | +/* | ||
100 | + * Return the target exception level for an exception due | ||
101 | + * to mismatched arguments in a FEAT_MOPS copy or set. | ||
102 | + * Compare pseudocode MismatchedCpySetTargetEL() | ||
103 | + */ | ||
104 | +static int mops_mismatch_exception_target_el(CPUARMState *env) | ||
105 | +{ | ||
106 | + int el = arm_current_el(env); | ||
107 | + | ||
108 | + if (el > 1) { | ||
109 | + return el; | ||
110 | + } | ||
111 | + if (el == 0 && (arm_hcr_el2_eff(env) & HCR_TGE)) { | ||
112 | + return 2; | ||
113 | + } | ||
114 | + if (el == 1 && (arm_hcrx_el2_eff(env) & HCRX_MCE2)) { | ||
115 | + return 2; | ||
116 | + } | ||
117 | + return 1; | ||
118 | +} | ||
119 | + | ||
120 | +/* | ||
121 | + * Check whether an M or E instruction was executed with a CF value | ||
122 | + * indicating the wrong option for this implementation. | ||
123 | + * Assumes we are always Option A. | ||
124 | + */ | ||
125 | +static void check_mops_wrong_option(CPUARMState *env, uint32_t syndrome, | ||
126 | + uintptr_t ra) | ||
127 | +{ | ||
128 | + if (env->CF != 0) { | ||
129 | + syndrome |= 1 << 17; /* Set the wrong-option bit */ | ||
130 | + raise_exception_ra(env, EXCP_UDEF, syndrome, | ||
131 | + mops_mismatch_exception_target_el(env), ra); | ||
132 | + } | ||
133 | +} | ||
134 | + | ||
135 | +/* | ||
136 | + * Return the maximum number of bytes we can transfer starting at addr | ||
137 | + * without crossing a page boundary. | ||
138 | + */ | ||
139 | +static uint64_t page_limit(uint64_t addr) | ||
140 | +{ | ||
141 | + return TARGET_PAGE_ALIGN(addr + 1) - addr; | ||
142 | +} | ||
143 | + | ||
144 | +/* | ||
145 | + * Perform part of a memory set on an area of guest memory starting at | ||
146 | + * toaddr (a dirty address) and extending for setsize bytes. | ||
147 | + * | ||
148 | + * Returns the number of bytes actually set, which might be less than | ||
149 | + * setsize; the caller should loop until the whole set has been done. | ||
150 | + * The caller should ensure that the guest registers are correct | ||
151 | + * for the possibility that the first byte of the set encounters | ||
152 | + * an exception or watchpoint. We guarantee not to take any faults | ||
153 | + * for bytes other than the first. | ||
154 | + */ | ||
155 | +static uint64_t set_step(CPUARMState *env, uint64_t toaddr, | ||
156 | + uint64_t setsize, uint32_t data, int memidx, | ||
157 | + uint32_t *mtedesc, uintptr_t ra) | ||
158 | +{ | ||
159 | + void *mem; | ||
160 | + | ||
161 | + setsize = MIN(setsize, page_limit(toaddr)); | ||
162 | + if (*mtedesc) { | ||
163 | + uint64_t mtesize = mte_mops_probe(env, toaddr, setsize, *mtedesc); | ||
164 | + if (mtesize == 0) { | ||
165 | + /* Trap, or not. All CPU state is up to date */ | ||
166 | + mte_check_fail(env, *mtedesc, toaddr, ra); | ||
167 | + /* Continue, with no further MTE checks required */ | ||
168 | + *mtedesc = 0; | ||
169 | + } else { | ||
170 | + /* Advance to the end, or to the tag mismatch */ | ||
171 | + setsize = MIN(setsize, mtesize); | ||
172 | + } | ||
173 | + } | ||
174 | + | ||
175 | + toaddr = useronly_clean_ptr(toaddr); | ||
176 | + /* | ||
177 | + * Trapless lookup: returns NULL for invalid page, I/O, | ||
178 | + * watchpoints, clean pages, etc. | ||
179 | + */ | ||
180 | + mem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, memidx); | ||
181 | + | ||
182 | +#ifndef CONFIG_USER_ONLY | ||
183 | + if (unlikely(!mem)) { | ||
184 | + /* | ||
185 | + * Slow-path: just do one byte write. This will handle the | ||
186 | + * watchpoint, invalid page, etc handling correctly. | ||
187 | + * For clean code pages, the next iteration will see | ||
188 | + * the page dirty and will use the fast path. | ||
189 | + */ | ||
190 | + cpu_stb_mmuidx_ra(env, toaddr, data, memidx, ra); | ||
191 | + return 1; | ||
192 | + } | ||
193 | +#endif | ||
194 | + /* Easy case: just memset the host memory */ | ||
195 | + memset(mem, data, setsize); | ||
196 | + return setsize; | ||
197 | +} | ||
198 | + | ||
199 | +typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr, | ||
200 | + uint64_t setsize, uint32_t data, | ||
201 | + int memidx, uint32_t *mtedesc, uintptr_t ra); | ||
202 | + | ||
203 | +/* Extract register numbers from a MOPS exception syndrome value */ | ||
204 | +static int mops_destreg(uint32_t syndrome) | ||
205 | +{ | ||
206 | + return extract32(syndrome, 10, 5); | ||
207 | +} | ||
208 | + | ||
209 | +static int mops_srcreg(uint32_t syndrome) | ||
210 | +{ | ||
211 | + return extract32(syndrome, 5, 5); | ||
212 | +} | ||
213 | + | ||
214 | +static int mops_sizereg(uint32_t syndrome) | ||
215 | +{ | ||
216 | + return extract32(syndrome, 0, 5); | ||
217 | +} | ||
218 | + | ||
219 | +/* | ||
220 | + * Return true if TCMA and TBI bits mean we need to do MTE checks. | ||
221 | + * We only need to do this once per MOPS insn, not for every page. | ||
222 | + */ | ||
223 | +static bool mte_checks_needed(uint64_t ptr, uint32_t desc) | ||
224 | +{ | ||
225 | + int bit55 = extract64(ptr, 55, 1); | ||
226 | + | ||
227 | + /* | ||
228 | + * Note that tbi_check() returns true for "access checked" but | ||
229 | + * tcma_check() returns true for "access unchecked". | ||
230 | + */ | ||
231 | + if (!tbi_check(desc, bit55)) { | ||
232 | + return false; | ||
233 | + } | ||
234 | + return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr)); | ||
235 | +} | ||
236 | + | ||
237 | +/* | ||
238 | + * For the Memory Set operation, our implementation chooses | ||
239 | + * always to use "option A", where we update Xd to the final | ||
240 | + * address in the SETP insn, and set Xn to be -(bytes remaining). | ||
241 | + * On SETM and SETE insns we only need update Xn. | ||
242 | + * | ||
243 | + * @env: CPU | ||
244 | + * @syndrome: syndrome value for mismatch exceptions | ||
245 | + * (also contains the register numbers we need to use) | ||
246 | + * @mtedesc: MTE descriptor word | ||
247 | + * @stepfn: function which does a single part of the set operation | ||
248 | + * @is_setg: true if this is the tag-setting SETG variant | ||
249 | + */ | ||
250 | +static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
251 | + StepFn *stepfn, bool is_setg, uintptr_t ra) | ||
252 | +{ | ||
253 | + /* Prologue: we choose to do up to the next page boundary */ | ||
254 | + int rd = mops_destreg(syndrome); | ||
255 | + int rs = mops_srcreg(syndrome); | ||
256 | + int rn = mops_sizereg(syndrome); | ||
257 | + uint8_t data = env->xregs[rs]; | ||
258 | + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); | ||
259 | + uint64_t toaddr = env->xregs[rd]; | ||
260 | + uint64_t setsize = env->xregs[rn]; | ||
261 | + uint64_t stagesetsize, step; | ||
262 | + | ||
263 | + check_mops_enabled(env, ra); | ||
264 | + | ||
265 | + if (setsize > INT64_MAX) { | ||
266 | + setsize = INT64_MAX; | ||
267 | + } | ||
268 | + | ||
269 | + if (!mte_checks_needed(toaddr, mtedesc)) { | ||
270 | + mtedesc = 0; | ||
271 | + } | ||
272 | + | ||
273 | + stagesetsize = MIN(setsize, page_limit(toaddr)); | ||
274 | + while (stagesetsize) { | ||
275 | + env->xregs[rd] = toaddr; | ||
276 | + env->xregs[rn] = setsize; | ||
277 | + step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra); | ||
278 | + toaddr += step; | ||
279 | + setsize -= step; | ||
280 | + stagesetsize -= step; | ||
281 | + } | ||
282 | + /* Insn completed, so update registers to the Option A format */ | ||
283 | + env->xregs[rd] = toaddr + setsize; | ||
284 | + env->xregs[rn] = -setsize; | ||
285 | + | ||
286 | + /* Set NZCV = 0000 to indicate we are an Option A implementation */ | ||
287 | + env->NF = 0; | ||
288 | + env->ZF = 1; /* our env->ZF encoding is inverted */ | ||
289 | + env->CF = 0; | ||
290 | + env->VF = 0; | ||
291 | + return; | ||
292 | +} | ||
293 | + | ||
294 | +void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
295 | +{ | ||
296 | + do_setp(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
297 | +} | ||
298 | + | ||
299 | +static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
300 | + StepFn *stepfn, bool is_setg, uintptr_t ra) | ||
301 | +{ | ||
302 | + /* Main: we choose to do all the full-page chunks */ | ||
303 | + CPUState *cs = env_cpu(env); | ||
304 | + int rd = mops_destreg(syndrome); | ||
305 | + int rs = mops_srcreg(syndrome); | ||
306 | + int rn = mops_sizereg(syndrome); | ||
307 | + uint8_t data = env->xregs[rs]; | ||
308 | + uint64_t toaddr = env->xregs[rd] + env->xregs[rn]; | ||
309 | + uint64_t setsize = -env->xregs[rn]; | ||
310 | + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); | ||
311 | + uint64_t step, stagesetsize; | ||
312 | + | ||
313 | + check_mops_enabled(env, ra); | ||
314 | + | ||
315 | + /* | ||
316 | + * We're allowed to NOP out "no data to copy" before the consistency | ||
317 | + * checks; we choose to do so. | ||
318 | + */ | ||
319 | + if (env->xregs[rn] == 0) { | ||
320 | + return; | ||
321 | + } | ||
322 | + | ||
323 | + check_mops_wrong_option(env, syndrome, ra); | ||
324 | + | ||
325 | + /* | ||
326 | + * Our implementation will work fine even if we have an unaligned | ||
327 | + * destination address, and because we update Xn every time around | ||
328 | + * the loop below and the return value from stepfn() may be less | ||
329 | + * than requested, we might find toaddr is unaligned. So we don't | ||
330 | + * have an IMPDEF check for alignment here. | ||
331 | + */ | ||
332 | + | ||
333 | + if (!mte_checks_needed(toaddr, mtedesc)) { | ||
334 | + mtedesc = 0; | ||
335 | + } | ||
336 | + | ||
337 | + /* Do the actual memset: we leave the last partial page to SETE */ | ||
338 | + stagesetsize = setsize & TARGET_PAGE_MASK; | ||
339 | + while (stagesetsize > 0) { | ||
340 | + step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); | ||
341 | + toaddr += step; | ||
342 | + setsize -= step; | ||
343 | + stagesetsize -= step; | ||
344 | + env->xregs[rn] = -setsize; | ||
345 | + if (stagesetsize > 0 && unlikely(cpu_loop_exit_requested(cs))) { | ||
346 | + cpu_loop_exit_restore(cs, ra); | ||
347 | + } | ||
348 | + } | ||
349 | +} | ||
350 | + | ||
351 | +void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
352 | +{ | ||
353 | + do_setm(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
354 | +} | ||
355 | + | ||
356 | +static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
357 | + StepFn *stepfn, bool is_setg, uintptr_t ra) | ||
358 | +{ | ||
359 | + /* Epilogue: do the last partial page */ | ||
360 | + int rd = mops_destreg(syndrome); | ||
361 | + int rs = mops_srcreg(syndrome); | ||
362 | + int rn = mops_sizereg(syndrome); | ||
363 | + uint8_t data = env->xregs[rs]; | ||
364 | + uint64_t toaddr = env->xregs[rd] + env->xregs[rn]; | ||
365 | + uint64_t setsize = -env->xregs[rn]; | ||
366 | + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); | ||
367 | + uint64_t step; | ||
368 | + | ||
369 | + check_mops_enabled(env, ra); | ||
370 | + | ||
371 | + /* | ||
372 | + * We're allowed to NOP out "no data to copy" before the consistency | ||
373 | + * checks; we choose to do so. | ||
374 | + */ | ||
375 | + if (setsize == 0) { | ||
376 | + return; | ||
377 | + } | ||
378 | + | ||
379 | + check_mops_wrong_option(env, syndrome, ra); | ||
380 | + | ||
381 | + /* | ||
382 | + * Our implementation has no address alignment requirements, but | ||
383 | + * we do want to enforce the "less than a page" size requirement, | ||
384 | + * so we don't need to have the "check for interrupts" here. | ||
385 | + */ | ||
386 | + if (setsize >= TARGET_PAGE_SIZE) { | ||
387 | + raise_exception_ra(env, EXCP_UDEF, syndrome, | ||
388 | + mops_mismatch_exception_target_el(env), ra); | ||
389 | + } | ||
390 | + | ||
391 | + if (!mte_checks_needed(toaddr, mtedesc)) { | ||
392 | + mtedesc = 0; | ||
393 | + } | ||
394 | + | ||
395 | + /* Do the actual memset */ | ||
396 | + while (setsize > 0) { | ||
397 | + step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); | ||
398 | + toaddr += step; | ||
399 | + setsize -= step; | ||
400 | + env->xregs[rn] = -setsize; | ||
401 | + } | ||
402 | +} | ||
403 | + | ||
404 | +void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
405 | +{ | ||
406 | + do_sete(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
407 | +} | ||
408 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
409 | index XXXXXXX..XXXXXXX 100644 | ||
410 | --- a/target/arm/tcg/translate-a64.c | ||
411 | +++ b/target/arm/tcg/translate-a64.c | ||
412 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false) | ||
413 | TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true) | ||
414 | TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true) | ||
415 | |||
416 | +typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32); | ||
417 | + | ||
418 | +static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn) | ||
419 | +{ | ||
420 | + int memidx; | ||
421 | + uint32_t syndrome, desc = 0; | ||
422 | + | ||
423 | + /* | ||
424 | + * UNPREDICTABLE cases: we choose to UNDEF, which allows | ||
425 | + * us to pull this check before the CheckMOPSEnabled() test | ||
426 | + * (which we do in the helper function) | ||
427 | + */ | ||
428 | + if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || | ||
429 | + a->rd == 31 || a->rn == 31) { | ||
430 | + return false; | ||
431 | + } | ||
432 | + | ||
433 | + memidx = get_a64_user_mem_index(s, a->unpriv); | ||
434 | + | ||
435 | + /* | ||
436 | + * We pass option_a == true, matching our implementation; | ||
437 | + * we pass wrong_option == false: helper function may set that bit. | ||
438 | + */ | ||
439 | + syndrome = syn_mop(true, false, (a->nontemp << 1) | a->unpriv, | ||
440 | + is_epilogue, false, true, a->rd, a->rs, a->rn); | ||
441 | + | ||
442 | + if (s->mte_active[a->unpriv]) { | ||
443 | + /* We may need to do MTE tag checking, so assemble the descriptor */ | ||
444 | + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
445 | + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
446 | + desc = FIELD_DP32(desc, MTEDESC, WRITE, true); | ||
447 | + /* SIZEM1 and ALIGN we leave 0 (byte write) */ | ||
448 | + } | ||
449 | + /* The helper function always needs the memidx even with MTE disabled */ | ||
450 | + desc = FIELD_DP32(desc, MTEDESC, MIDX, memidx); | ||
451 | + | ||
452 | + /* | ||
453 | + * The helper needs the register numbers, but since they're in | ||
454 | + * the syndrome anyway, we let it extract them from there rather | ||
455 | + * than passing in an extra three integer arguments. | ||
456 | + */ | ||
457 | + fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc)); | ||
458 | + return true; | ||
459 | +} | ||
460 | + | ||
461 | +TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, gen_helper_setp) | ||
462 | +TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, gen_helper_setm) | ||
463 | +TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, gen_helper_sete) | ||
464 | + | ||
465 | typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); | ||
466 | |||
467 | static bool gen_rri(DisasContext *s, arg_rri_sf *a, | ||
35 | -- | 468 | -- |
36 | 2.34.1 | 469 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | Currently the only tag-setting instructions always do so in the |
---|---|---|---|
2 | context of the current EL, and so we only need one ATA bit in the TB | ||
3 | flags. The FEAT_MOPS SETG instructions include ones which set tags | ||
4 | for a non-privileged access, so we now also need the equivalent "are | ||
5 | tags enabled?" information for EL0. | ||
2 | 6 | ||
3 | Previously we hard-coded the blocksize with GMID_EL1_BS. | 7 | Add the new TB flag, and convert the existing 'bool ata' field in |
4 | But the value we choose for -cpu max does not match the | 8 | DisasContext to a 'bool ata[2]' that can be indexed by the is_unpriv |
5 | value that cortex-a710 uses. | 9 | bit in an instruction, similarly to mte[2]. |
6 | 10 | ||
7 | Mirror the way we handle dcz_blocksize. | ||
8 | |||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Message-id: 20230811214031.171020-3-richard.henderson@linaro.org | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: 20230912140434.1333369-9-peter.maydell@linaro.org | ||
13 | --- | 14 | --- |
14 | target/arm/cpu.h | 2 ++ | 15 | target/arm/cpu.h | 1 + |
15 | target/arm/internals.h | 6 ----- | 16 | target/arm/tcg/translate.h | 4 ++-- |
16 | target/arm/tcg/translate.h | 2 ++ | 17 | target/arm/tcg/hflags.c | 12 ++++++++++++ |
17 | target/arm/helper.c | 11 +++++--- | 18 | target/arm/tcg/translate-a64.c | 23 ++++++++++++----------- |
18 | target/arm/tcg/cpu64.c | 1 + | 19 | 4 files changed, 27 insertions(+), 13 deletions(-) |
19 | target/arm/tcg/mte_helper.c | 46 ++++++++++++++++++++++------------ | ||
20 | target/arm/tcg/translate-a64.c | 5 ++-- | ||
21 | 7 files changed, 45 insertions(+), 28 deletions(-) | ||
22 | 20 | ||
23 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 21 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
24 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/target/arm/cpu.h | 23 | --- a/target/arm/cpu.h |
26 | +++ b/target/arm/cpu.h | 24 | +++ b/target/arm/cpu.h |
27 | @@ -XXX,XX +XXX,XX @@ struct ArchCPU { | 25 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SVL, 24, 4) |
28 | 26 | FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) | |
29 | /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ | 27 | FIELD(TBFLAG_A64, FGT_ERET, 29, 1) |
30 | uint8_t dcz_blocksize; | 28 | FIELD(TBFLAG_A64, NAA, 30, 1) |
31 | + /* GM blocksize, in log_2(words), ie low 4 bits of GMID_EL0 */ | 29 | +FIELD(TBFLAG_A64, ATA0, 31, 1) |
32 | + uint8_t gm_blocksize; | 30 | |
33 | |||
34 | uint64_t rvbar_prop; /* Property/input signals. */ | ||
35 | |||
36 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/target/arm/internals.h | ||
39 | +++ b/target/arm/internals.h | ||
40 | @@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs); | ||
41 | |||
42 | #endif /* !CONFIG_USER_ONLY */ | ||
43 | |||
44 | -/* | ||
45 | - * The log2 of the words in the tag block, for GMID_EL1.BS. | ||
46 | - * The is the maximum, 256 bytes, which manipulates 64-bits of tags. | ||
47 | - */ | ||
48 | -#define GMID_EL1_BS 6 | ||
49 | - | ||
50 | /* | 31 | /* |
51 | * SVE predicates are 1/8 the size of SVE vectors, and cannot use | 32 | * Helpers for using the above. |
52 | * the same simd_desc() encoding due to restrictions on size. | ||
53 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h | 33 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h |
54 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
55 | --- a/target/arm/tcg/translate.h | 35 | --- a/target/arm/tcg/translate.h |
56 | +++ b/target/arm/tcg/translate.h | 36 | +++ b/target/arm/tcg/translate.h |
57 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | 37 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { |
58 | int8_t btype; | 38 | bool unpriv; |
59 | /* A copy of cpu->dcz_blocksize. */ | 39 | /* True if v8.3-PAuth is active. */ |
60 | uint8_t dcz_blocksize; | 40 | bool pauth_active; |
61 | + /* A copy of cpu->gm_blocksize. */ | 41 | - /* True if v8.5-MTE access to tags is enabled. */ |
62 | + uint8_t gm_blocksize; | 42 | - bool ata; |
63 | /* True if this page is guarded. */ | 43 | + /* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */ |
64 | bool guarded_page; | 44 | + bool ata[2]; |
65 | /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ | 45 | /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ |
66 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 46 | bool mte_active[2]; |
47 | /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ | ||
48 | diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | 49 | index XXXXXXX..XXXXXXX 100644 |
68 | --- a/target/arm/helper.c | 50 | --- a/target/arm/tcg/hflags.c |
69 | +++ b/target/arm/helper.c | 51 | +++ b/target/arm/tcg/hflags.c |
70 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_reginfo[] = { | 52 | @@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, |
71 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6, | 53 | && allocation_tag_access_enabled(env, 0, sctlr)) { |
72 | .access = PL1_RW, .accessfn = access_mte, | 54 | DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); |
73 | .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) }, | 55 | } |
74 | - { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, | 56 | + /* |
75 | - .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, | 57 | + * For unpriv tag-setting accesses we alse need ATA0. Again, in |
76 | - .access = PL1_R, .accessfn = access_aa64_tid5, | 58 | + * contexts where unpriv and normal insns are the same we |
77 | - .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS }, | 59 | + * duplicate the ATA bit to save effort for translate-a64.c. |
78 | { .name = "TCO", .state = ARM_CP_STATE_AA64, | 60 | + */ |
79 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, | 61 | + if (EX_TBFLAG_A64(flags, UNPRIV)) { |
80 | .type = ARM_CP_NO_RAW, | 62 | + if (allocation_tag_access_enabled(env, 0, sctlr)) { |
81 | @@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu) | 63 | + DP_TBFLAG_A64(flags, ATA0, 1); |
82 | * then define only a RAZ/WI version of PSTATE.TCO. | 64 | + } |
83 | */ | 65 | + } else { |
84 | if (cpu_isar_feature(aa64_mte, cpu)) { | 66 | + DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA)); |
85 | + ARMCPRegInfo gmid_reginfo = { | 67 | + } |
86 | + .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, | 68 | /* Cache TCMA as well as TBI. */ |
87 | + .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, | 69 | DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); |
88 | + .access = PL1_R, .accessfn = access_aa64_tid5, | ||
89 | + .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize, | ||
90 | + }; | ||
91 | + define_one_arm_cp_reg(cpu, &gmid_reginfo); | ||
92 | define_arm_cp_regs(cpu, mte_reginfo); | ||
93 | define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); | ||
94 | } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) { | ||
95 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/target/arm/tcg/cpu64.c | ||
98 | +++ b/target/arm/tcg/cpu64.c | ||
99 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) | ||
100 | cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ | ||
101 | cpu->dcz_blocksize = 7; /* 512 bytes */ | ||
102 | #endif | ||
103 | + cpu->gm_blocksize = 6; /* 256 bytes */ | ||
104 | |||
105 | cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ); | ||
106 | cpu->sme_vq.supported = SVE_VQ_POW2_MAP; | ||
107 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/target/arm/tcg/mte_helper.c | ||
110 | +++ b/target/arm/tcg/mte_helper.c | ||
111 | @@ -XXX,XX +XXX,XX @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) | ||
112 | } | 70 | } |
113 | } | ||
114 | |||
115 | -#define LDGM_STGM_SIZE (4 << GMID_EL1_BS) | ||
116 | - | ||
117 | uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) | ||
118 | { | ||
119 | int mmu_idx = cpu_mmu_index(env, false); | ||
120 | uintptr_t ra = GETPC(); | ||
121 | + int gm_bs = env_archcpu(env)->gm_blocksize; | ||
122 | + int gm_bs_bytes = 4 << gm_bs; | ||
123 | void *tag_mem; | ||
124 | |||
125 | - ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); | ||
126 | + ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); | ||
127 | |||
128 | /* Trap if accessing an invalid page. */ | ||
129 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, | ||
130 | - LDGM_STGM_SIZE, MMU_DATA_LOAD, | ||
131 | - LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); | ||
132 | + gm_bs_bytes, MMU_DATA_LOAD, | ||
133 | + gm_bs_bytes / (2 * TAG_GRANULE), ra); | ||
134 | |||
135 | /* The tag is squashed to zero if the page does not support tags. */ | ||
136 | if (!tag_mem) { | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | - QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); | ||
141 | /* | ||
142 | - * We are loading 64-bits worth of tags. The ordering of elements | ||
143 | - * within the word corresponds to a 64-bit little-endian operation. | ||
144 | + * The ordering of elements within the word corresponds to | ||
145 | + * a little-endian operation. | ||
146 | */ | ||
147 | - return ldq_le_p(tag_mem); | ||
148 | + switch (gm_bs) { | ||
149 | + case 6: | ||
150 | + /* 256 bytes -> 16 tags -> 64 result bits */ | ||
151 | + return ldq_le_p(tag_mem); | ||
152 | + default: | ||
153 | + /* cpu configured with unsupported gm blocksize. */ | ||
154 | + g_assert_not_reached(); | ||
155 | + } | ||
156 | } | ||
157 | |||
158 | void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | ||
159 | { | ||
160 | int mmu_idx = cpu_mmu_index(env, false); | ||
161 | uintptr_t ra = GETPC(); | ||
162 | + int gm_bs = env_archcpu(env)->gm_blocksize; | ||
163 | + int gm_bs_bytes = 4 << gm_bs; | ||
164 | void *tag_mem; | ||
165 | |||
166 | - ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); | ||
167 | + ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); | ||
168 | |||
169 | /* Trap if accessing an invalid page. */ | ||
170 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | ||
171 | - LDGM_STGM_SIZE, MMU_DATA_LOAD, | ||
172 | - LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); | ||
173 | + gm_bs_bytes, MMU_DATA_LOAD, | ||
174 | + gm_bs_bytes / (2 * TAG_GRANULE), ra); | ||
175 | |||
176 | /* | ||
177 | * Tag store only happens if the page support tags, | ||
178 | @@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | ||
179 | return; | ||
180 | } | ||
181 | |||
182 | - QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); | ||
183 | /* | ||
184 | - * We are storing 64-bits worth of tags. The ordering of elements | ||
185 | - * within the word corresponds to a 64-bit little-endian operation. | ||
186 | + * The ordering of elements within the word corresponds to | ||
187 | + * a little-endian operation. | ||
188 | */ | ||
189 | - stq_le_p(tag_mem, val); | ||
190 | + switch (gm_bs) { | ||
191 | + case 6: | ||
192 | + stq_le_p(tag_mem, val); | ||
193 | + break; | ||
194 | + default: | ||
195 | + /* cpu configured with unsupported gm blocksize. */ | ||
196 | + g_assert_not_reached(); | ||
197 | + } | ||
198 | } | ||
199 | |||
200 | void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) | ||
201 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | 71 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
202 | index XXXXXXX..XXXXXXX 100644 | 72 | index XXXXXXX..XXXXXXX 100644 |
203 | --- a/target/arm/tcg/translate-a64.c | 73 | --- a/target/arm/tcg/translate-a64.c |
204 | +++ b/target/arm/tcg/translate-a64.c | 74 | +++ b/target/arm/tcg/translate-a64.c |
75 | @@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, bool isread, | ||
76 | clean_addr = clean_data_tbi(s, tcg_rt); | ||
77 | gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8); | ||
78 | |||
79 | - if (s->ata) { | ||
80 | + if (s->ata[0]) { | ||
81 | /* Extract the tag from the register to match STZGM. */ | ||
82 | tag = tcg_temp_new_i64(); | ||
83 | tcg_gen_shri_i64(tag, tcg_rt, 56); | ||
84 | @@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, bool isread, | ||
85 | clean_addr = clean_data_tbi(s, tcg_rt); | ||
86 | gen_helper_dc_zva(cpu_env, clean_addr); | ||
87 | |||
88 | - if (s->ata) { | ||
89 | + if (s->ata[0]) { | ||
90 | /* Extract the tag from the register to match STZGM. */ | ||
91 | tag = tcg_temp_new_i64(); | ||
92 | tcg_gen_shri_i64(tag, tcg_rt, 56); | ||
93 | @@ -XXX,XX +XXX,XX @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a) | ||
94 | tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop); | ||
95 | |||
96 | /* Perform the tag store, if tag access enabled. */ | ||
97 | - if (s->ata) { | ||
98 | + if (s->ata[0]) { | ||
99 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { | ||
100 | gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr); | ||
101 | } else { | ||
102 | @@ -XXX,XX +XXX,XX @@ static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a) | ||
103 | tcg_gen_addi_i64(addr, addr, a->imm); | ||
104 | tcg_rt = cpu_reg(s, a->rt); | ||
105 | |||
106 | - if (s->ata) { | ||
107 | + if (s->ata[0]) { | ||
108 | gen_helper_stzgm_tags(cpu_env, addr, tcg_rt); | ||
109 | } | ||
110 | /* | ||
205 | @@ -XXX,XX +XXX,XX @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a) | 111 | @@ -XXX,XX +XXX,XX @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a) |
112 | tcg_gen_addi_i64(addr, addr, a->imm); | ||
113 | tcg_rt = cpu_reg(s, a->rt); | ||
114 | |||
115 | - if (s->ata) { | ||
116 | + if (s->ata[0]) { | ||
206 | gen_helper_stgm(cpu_env, addr, tcg_rt); | 117 | gen_helper_stgm(cpu_env, addr, tcg_rt); |
207 | } else { | 118 | } else { |
208 | MMUAccessType acc = MMU_DATA_STORE; | 119 | MMUAccessType acc = MMU_DATA_STORE; |
209 | - int size = 4 << GMID_EL1_BS; | ||
210 | + int size = 4 << s->gm_blocksize; | ||
211 | |||
212 | clean_addr = clean_data_tbi(s, addr); | ||
213 | tcg_gen_andi_i64(clean_addr, clean_addr, -size); | ||
214 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a) | 120 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a) |
121 | tcg_gen_addi_i64(addr, addr, a->imm); | ||
122 | tcg_rt = cpu_reg(s, a->rt); | ||
123 | |||
124 | - if (s->ata) { | ||
125 | + if (s->ata[0]) { | ||
215 | gen_helper_ldgm(tcg_rt, cpu_env, addr); | 126 | gen_helper_ldgm(tcg_rt, cpu_env, addr); |
216 | } else { | 127 | } else { |
217 | MMUAccessType acc = MMU_DATA_LOAD; | 128 | MMUAccessType acc = MMU_DATA_LOAD; |
218 | - int size = 4 << GMID_EL1_BS; | 129 | @@ -XXX,XX +XXX,XX @@ static bool trans_LDG(DisasContext *s, arg_ldst_tag *a) |
219 | + int size = 4 << s->gm_blocksize; | 130 | |
220 | 131 | tcg_gen_andi_i64(addr, addr, -TAG_GRANULE); | |
221 | clean_addr = clean_data_tbi(s, addr); | 132 | tcg_rt = cpu_reg(s, a->rt); |
222 | tcg_gen_andi_i64(clean_addr, clean_addr, -size); | 133 | - if (s->ata) { |
134 | + if (s->ata[0]) { | ||
135 | gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); | ||
136 | } else { | ||
137 | /* | ||
138 | @@ -XXX,XX +XXX,XX @@ static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair) | ||
139 | tcg_gen_addi_i64(addr, addr, a->imm); | ||
140 | } | ||
141 | tcg_rt = cpu_reg_sp(s, a->rt); | ||
142 | - if (!s->ata) { | ||
143 | + if (!s->ata[0]) { | ||
144 | /* | ||
145 | * For STG and ST2G, we need to check alignment and probe memory. | ||
146 | * TODO: For STZG and STZ2G, we could rely on the stores below, | ||
147 | @@ -XXX,XX +XXX,XX @@ static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a, | ||
148 | tcg_rn = cpu_reg_sp(s, a->rn); | ||
149 | tcg_rd = cpu_reg_sp(s, a->rd); | ||
150 | |||
151 | - if (s->ata) { | ||
152 | + if (s->ata[0]) { | ||
153 | gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, | ||
154 | tcg_constant_i32(imm), | ||
155 | tcg_constant_i32(a->uimm4)); | ||
156 | @@ -XXX,XX +XXX,XX @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) | ||
157 | if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { | ||
158 | goto do_unallocated; | ||
159 | } | ||
160 | - if (s->ata) { | ||
161 | + if (s->ata[0]) { | ||
162 | gen_helper_irg(cpu_reg_sp(s, rd), cpu_env, | ||
163 | cpu_reg_sp(s, rn), cpu_reg(s, rm)); | ||
164 | } else { | ||
223 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, | 165 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, |
224 | dc->cp_regs = arm_cpu->cp_regs; | 166 | dc->bt = EX_TBFLAG_A64(tb_flags, BT); |
225 | dc->features = env->features; | 167 | dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); |
226 | dc->dcz_blocksize = arm_cpu->dcz_blocksize; | 168 | dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); |
227 | + dc->gm_blocksize = arm_cpu->gm_blocksize; | 169 | - dc->ata = EX_TBFLAG_A64(tb_flags, ATA); |
228 | 170 | + dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA); | |
229 | #ifdef CONFIG_USER_ONLY | 171 | + dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0); |
230 | /* In sve_probe_page, we assume TBI is enabled. */ | 172 | dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); |
173 | dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); | ||
174 | dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); | ||
231 | -- | 175 | -- |
232 | 2.34.1 | 176 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | The FEAT_MOPS SETG* instructions are very similar to the SET* |
---|---|---|---|
2 | instructions, but as well as setting memory contents they also | ||
3 | set the MTE tags. They are architecturally required to operate | ||
4 | on tag-granule aligned regions only. | ||
2 | 5 | ||
3 | * Add TZASC as unimplemented device. | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
4 | - Allow bare metal application to access this (unimplemented) device | 7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
5 | * Add CSU as unimplemented device. | 8 | Message-id: 20230912140434.1333369-10-peter.maydell@linaro.org |
6 | - Allow bare metal application to access this (unimplemented) device | 9 | --- |
7 | * Add 4 missing PWM devices | 10 | target/arm/internals.h | 10 ++++ |
11 | target/arm/tcg/helper-a64.h | 3 ++ | ||
12 | target/arm/tcg/a64.decode | 5 ++ | ||
13 | target/arm/tcg/helper-a64.c | 86 ++++++++++++++++++++++++++++++++-- | ||
14 | target/arm/tcg/mte_helper.c | 40 ++++++++++++++++ | ||
15 | target/arm/tcg/translate-a64.c | 20 +++++--- | ||
16 | 6 files changed, 155 insertions(+), 9 deletions(-) | ||
8 | 17 | ||
9 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | 18 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
10 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 19 | index XXXXXXX..XXXXXXX 100644 |
11 | Message-id: 59e4dc56e14eccfefd379275ec19048dff9c10b3.1692964892.git.jcd@tribudubois.net | 20 | --- a/target/arm/internals.h |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 21 | +++ b/target/arm/internals.h |
13 | --- | 22 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, |
14 | include/hw/arm/fsl-imx6ul.h | 2 +- | 23 | void mte_check_fail(CPUARMState *env, uint32_t desc, |
15 | hw/arm/fsl-imx6ul.c | 16 ++++++++++++++++ | 24 | uint64_t dirty_ptr, uintptr_t ra); |
16 | 2 files changed, 17 insertions(+), 1 deletion(-) | 25 | |
17 | 26 | +/** | |
18 | diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h | 27 | + * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation |
19 | index XXXXXXX..XXXXXXX 100644 | 28 | + * @env: CPU env |
20 | --- a/include/hw/arm/fsl-imx6ul.h | 29 | + * @dirty_ptr: Start address of memory region (dirty pointer) |
21 | +++ b/include/hw/arm/fsl-imx6ul.h | 30 | + * @size: length of region (guaranteed not to cross page boundary) |
22 | @@ -XXX,XX +XXX,XX @@ enum FslIMX6ULConfiguration { | 31 | + * @desc: MTEDESC descriptor word |
23 | FSL_IMX6UL_NUM_USBS = 2, | 32 | + */ |
24 | FSL_IMX6UL_NUM_SAIS = 3, | 33 | +void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, |
25 | FSL_IMX6UL_NUM_CANS = 2, | 34 | + uint32_t desc); |
26 | - FSL_IMX6UL_NUM_PWMS = 4, | 35 | + |
27 | + FSL_IMX6UL_NUM_PWMS = 8, | 36 | static inline int allocation_tag_from_addr(uint64_t ptr) |
28 | }; | 37 | { |
29 | 38 | return extract64(ptr, 56, 4); | |
30 | struct FslIMX6ULState { | 39 | diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h |
31 | diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c | 40 | index XXXXXXX..XXXXXXX 100644 |
32 | index XXXXXXX..XXXXXXX 100644 | 41 | --- a/target/arm/tcg/helper-a64.h |
33 | --- a/hw/arm/fsl-imx6ul.c | 42 | +++ b/target/arm/tcg/helper-a64.h |
34 | +++ b/hw/arm/fsl-imx6ul.c | 43 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG, |
35 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | 44 | DEF_HELPER_3(setp, void, env, i32, i32) |
36 | FSL_IMX6UL_PWM2_ADDR, | 45 | DEF_HELPER_3(setm, void, env, i32, i32) |
37 | FSL_IMX6UL_PWM3_ADDR, | 46 | DEF_HELPER_3(sete, void, env, i32, i32) |
38 | FSL_IMX6UL_PWM4_ADDR, | 47 | +DEF_HELPER_3(setgp, void, env, i32, i32) |
39 | + FSL_IMX6UL_PWM5_ADDR, | 48 | +DEF_HELPER_3(setgm, void, env, i32, i32) |
40 | + FSL_IMX6UL_PWM6_ADDR, | 49 | +DEF_HELPER_3(setge, void, env, i32, i32) |
41 | + FSL_IMX6UL_PWM7_ADDR, | 50 | diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode |
42 | + FSL_IMX6UL_PWM8_ADDR, | 51 | index XXXXXXX..XXXXXXX 100644 |
43 | }; | 52 | --- a/target/arm/tcg/a64.decode |
44 | 53 | +++ b/target/arm/tcg/a64.decode | |
45 | snprintf(name, NAME_SIZE, "pwm%d", i); | 54 | @@ -XXX,XX +XXX,XX @@ STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1 |
46 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | 55 | SETP 00 011001110 ..... 00 . . 01 ..... ..... @set |
47 | create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, | 56 | SETM 00 011001110 ..... 01 . . 01 ..... ..... @set |
48 | FSL_IMX6UL_LCDIF_SIZE); | 57 | SETE 00 011001110 ..... 10 . . 01 ..... ..... @set |
49 | 58 | + | |
59 | +# Like SET, but also setting MTE tags | ||
60 | +SETGP 00 011101110 ..... 00 . . 01 ..... ..... @set | ||
61 | +SETGM 00 011101110 ..... 01 . . 01 ..... ..... @set | ||
62 | +SETGE 00 011101110 ..... 10 . . 01 ..... ..... @set | ||
63 | diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/target/arm/tcg/helper-a64.c | ||
66 | +++ b/target/arm/tcg/helper-a64.c | ||
67 | @@ -XXX,XX +XXX,XX @@ static uint64_t set_step(CPUARMState *env, uint64_t toaddr, | ||
68 | return setsize; | ||
69 | } | ||
70 | |||
71 | +/* | ||
72 | + * Similar, but setting tags. The architecture requires us to do this | ||
73 | + * in 16-byte chunks. SETP accesses are not tag checked; they set | ||
74 | + * the tags. | ||
75 | + */ | ||
76 | +static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr, | ||
77 | + uint64_t setsize, uint32_t data, int memidx, | ||
78 | + uint32_t *mtedesc, uintptr_t ra) | ||
79 | +{ | ||
80 | + void *mem; | ||
81 | + uint64_t cleanaddr; | ||
82 | + | ||
83 | + setsize = MIN(setsize, page_limit(toaddr)); | ||
84 | + | ||
85 | + cleanaddr = useronly_clean_ptr(toaddr); | ||
50 | + /* | 86 | + /* |
51 | + * CSU | 87 | + * Trapless lookup: returns NULL for invalid page, I/O, |
88 | + * watchpoints, clean pages, etc. | ||
52 | + */ | 89 | + */ |
53 | + create_unimplemented_device("csu", FSL_IMX6UL_CSU_ADDR, | 90 | + mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx); |
54 | + FSL_IMX6UL_CSU_SIZE); | 91 | + |
92 | +#ifndef CONFIG_USER_ONLY | ||
93 | + if (unlikely(!mem)) { | ||
94 | + /* | ||
95 | + * Slow-path: just do one write. This will handle the | ||
96 | + * watchpoint, invalid page, etc handling correctly. | ||
97 | + * The architecture requires that we do 16 bytes at a time, | ||
98 | + * and we know both ptr and size are 16 byte aligned. | ||
99 | + * For clean code pages, the next iteration will see | ||
100 | + * the page dirty and will use the fast path. | ||
101 | + */ | ||
102 | + uint64_t repldata = data * 0x0101010101010101ULL; | ||
103 | + MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx); | ||
104 | + cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra); | ||
105 | + mte_mops_set_tags(env, toaddr, 16, *mtedesc); | ||
106 | + return 16; | ||
107 | + } | ||
108 | +#endif | ||
109 | + /* Easy case: just memset the host memory */ | ||
110 | + memset(mem, data, setsize); | ||
111 | + mte_mops_set_tags(env, toaddr, setsize, *mtedesc); | ||
112 | + return setsize; | ||
113 | +} | ||
114 | + | ||
115 | typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr, | ||
116 | uint64_t setsize, uint32_t data, | ||
117 | int memidx, uint32_t *mtedesc, uintptr_t ra); | ||
118 | @@ -XXX,XX +XXX,XX @@ static bool mte_checks_needed(uint64_t ptr, uint32_t desc) | ||
119 | return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr)); | ||
120 | } | ||
121 | |||
122 | +/* Take an exception if the SETG addr/size are not granule aligned */ | ||
123 | +static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
124 | + uint32_t memidx, uintptr_t ra) | ||
125 | +{ | ||
126 | + if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) || | ||
127 | + !QEMU_IS_ALIGNED(size, TAG_GRANULE)) { | ||
128 | + arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, | ||
129 | + memidx, ra); | ||
130 | + | ||
131 | + } | ||
132 | +} | ||
133 | + | ||
134 | /* | ||
135 | * For the Memory Set operation, our implementation chooses | ||
136 | * always to use "option A", where we update Xd to the final | ||
137 | @@ -XXX,XX +XXX,XX @@ static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
138 | |||
139 | if (setsize > INT64_MAX) { | ||
140 | setsize = INT64_MAX; | ||
141 | + if (is_setg) { | ||
142 | + setsize &= ~0xf; | ||
143 | + } | ||
144 | } | ||
145 | |||
146 | - if (!mte_checks_needed(toaddr, mtedesc)) { | ||
147 | + if (unlikely(is_setg)) { | ||
148 | + check_setg_alignment(env, toaddr, setsize, memidx, ra); | ||
149 | + } else if (!mte_checks_needed(toaddr, mtedesc)) { | ||
150 | mtedesc = 0; | ||
151 | } | ||
152 | |||
153 | @@ -XXX,XX +XXX,XX @@ void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
154 | do_setp(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
155 | } | ||
156 | |||
157 | +void HELPER(setgp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
158 | +{ | ||
159 | + do_setp(env, syndrome, mtedesc, set_step_tags, true, GETPC()); | ||
160 | +} | ||
161 | + | ||
162 | static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
163 | StepFn *stepfn, bool is_setg, uintptr_t ra) | ||
164 | { | ||
165 | @@ -XXX,XX +XXX,XX @@ static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
166 | * have an IMPDEF check for alignment here. | ||
167 | */ | ||
168 | |||
169 | - if (!mte_checks_needed(toaddr, mtedesc)) { | ||
170 | + if (unlikely(is_setg)) { | ||
171 | + check_setg_alignment(env, toaddr, setsize, memidx, ra); | ||
172 | + } else if (!mte_checks_needed(toaddr, mtedesc)) { | ||
173 | mtedesc = 0; | ||
174 | } | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
177 | do_setm(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
178 | } | ||
179 | |||
180 | +void HELPER(setgm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
181 | +{ | ||
182 | + do_setm(env, syndrome, mtedesc, set_step_tags, true, GETPC()); | ||
183 | +} | ||
184 | + | ||
185 | static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
186 | StepFn *stepfn, bool is_setg, uintptr_t ra) | ||
187 | { | ||
188 | @@ -XXX,XX +XXX,XX @@ static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, | ||
189 | mops_mismatch_exception_target_el(env), ra); | ||
190 | } | ||
191 | |||
192 | - if (!mte_checks_needed(toaddr, mtedesc)) { | ||
193 | + if (unlikely(is_setg)) { | ||
194 | + check_setg_alignment(env, toaddr, setsize, memidx, ra); | ||
195 | + } else if (!mte_checks_needed(toaddr, mtedesc)) { | ||
196 | mtedesc = 0; | ||
197 | } | ||
198 | |||
199 | @@ -XXX,XX +XXX,XX @@ void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
200 | { | ||
201 | do_sete(env, syndrome, mtedesc, set_step, false, GETPC()); | ||
202 | } | ||
203 | + | ||
204 | +void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
205 | +{ | ||
206 | + do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC()); | ||
207 | +} | ||
208 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | ||
209 | index XXXXXXX..XXXXXXX 100644 | ||
210 | --- a/target/arm/tcg/mte_helper.c | ||
211 | +++ b/target/arm/tcg/mte_helper.c | ||
212 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
213 | return n * TAG_GRANULE - (ptr - tag_first); | ||
214 | } | ||
215 | } | ||
216 | + | ||
217 | +void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
218 | + uint32_t desc) | ||
219 | +{ | ||
220 | + int mmu_idx, tag_count; | ||
221 | + uint64_t ptr_tag; | ||
222 | + void *mem; | ||
223 | + | ||
224 | + if (!desc) { | ||
225 | + /* Tags not actually enabled */ | ||
226 | + return; | ||
227 | + } | ||
228 | + | ||
229 | + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
230 | + /* True probe: this will never fault */ | ||
231 | + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size, | ||
232 | + MMU_DATA_STORE, true, 0); | ||
233 | + if (!mem) { | ||
234 | + return; | ||
235 | + } | ||
55 | + | 236 | + |
56 | + /* | 237 | + /* |
57 | + * TZASC | 238 | + * We know that ptr and size are both TAG_GRANULE aligned; store |
239 | + * the tag from the pointer value into the tag memory. | ||
58 | + */ | 240 | + */ |
59 | + create_unimplemented_device("tzasc", FSL_IMX6UL_TZASC_ADDR, | 241 | + ptr_tag = allocation_tag_from_addr(ptr); |
60 | + FSL_IMX6UL_TZASC_SIZE); | 242 | + tag_count = size / TAG_GRANULE; |
243 | + if (ptr & TAG_GRANULE) { | ||
244 | + /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */ | ||
245 | + store_tag1_parallel(TAG_GRANULE, mem, ptr_tag); | ||
246 | + mem++; | ||
247 | + tag_count--; | ||
248 | + } | ||
249 | + memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2); | ||
250 | + if (tag_count & 1) { | ||
251 | + /* Final trailing unaligned nibble */ | ||
252 | + mem += tag_count / 2; | ||
253 | + store_tag1_parallel(0, mem, ptr_tag); | ||
254 | + } | ||
255 | +} | ||
256 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
257 | index XXXXXXX..XXXXXXX 100644 | ||
258 | --- a/target/arm/tcg/translate-a64.c | ||
259 | +++ b/target/arm/tcg/translate-a64.c | ||
260 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true) | ||
261 | |||
262 | typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32); | ||
263 | |||
264 | -static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn) | ||
265 | +static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, | ||
266 | + bool is_setg, SetFn fn) | ||
267 | { | ||
268 | int memidx; | ||
269 | uint32_t syndrome, desc = 0; | ||
270 | |||
271 | + if (is_setg && !dc_isar_feature(aa64_mte, s)) { | ||
272 | + return false; | ||
273 | + } | ||
61 | + | 274 | + |
62 | /* | 275 | /* |
63 | * ROM memory | 276 | * UNPREDICTABLE cases: we choose to UNDEF, which allows |
277 | * us to pull this check before the CheckMOPSEnabled() test | ||
278 | @@ -XXX,XX +XXX,XX @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn) | ||
279 | * We pass option_a == true, matching our implementation; | ||
280 | * we pass wrong_option == false: helper function may set that bit. | ||
64 | */ | 281 | */ |
282 | - syndrome = syn_mop(true, false, (a->nontemp << 1) | a->unpriv, | ||
283 | + syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv, | ||
284 | is_epilogue, false, true, a->rd, a->rs, a->rn); | ||
285 | |||
286 | - if (s->mte_active[a->unpriv]) { | ||
287 | + if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) { | ||
288 | /* We may need to do MTE tag checking, so assemble the descriptor */ | ||
289 | desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); | ||
290 | desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); | ||
291 | @@ -XXX,XX +XXX,XX @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn) | ||
292 | return true; | ||
293 | } | ||
294 | |||
295 | -TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, gen_helper_setp) | ||
296 | -TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, gen_helper_setm) | ||
297 | -TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, gen_helper_sete) | ||
298 | +TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp) | ||
299 | +TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm) | ||
300 | +TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete) | ||
301 | +TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp) | ||
302 | +TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm) | ||
303 | +TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge) | ||
304 | |||
305 | typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); | ||
306 | |||
65 | -- | 307 | -- |
66 | 2.34.1 | 308 | 2.34.1 |
67 | |||
68 | diff view generated by jsdifflib |
1 | Where architecturally one ARM_FEATURE_X flag implies another | 1 | The FEAT_MOPS memory copy operations need an extra helper routine |
---|---|---|---|
2 | ARM_FEATURE_Y, we allow the CPU init function to only set X, and then | 2 | for checking for MTE tag checking failures beyond the ones we |
3 | set Y for it. Currently we do this in two places -- we set a few | 3 | already added for memory set operations: |
4 | flags in arm_cpu_post_init() because we need them to decide which | 4 | * mte_mops_probe_rev() does the same job as mte_mops_probe(), but |
5 | properties to create on the CPU object, and then we do the rest in | 5 | it checks tags starting at the provided address and working |
6 | arm_cpu_realizefn(). However, this is fragile, because it's easy to | 6 | backwards, rather than forwards |
7 | add a new property and not notice that this means that an X-implies-Y | ||
8 | check now has to move from realize to post-init. | ||
9 | |||
10 | As a specific example, the pmsav7-dregion property is conditional | ||
11 | on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear | ||
12 | on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and | ||
13 | rely on V8-implies-V7, which doesn't happen until the realizefn. | ||
14 | |||
15 | Move all of these X-implies-Y checks into a new function, which | ||
16 | we call at the top of arm_cpu_post_init(), so the feature bits | ||
17 | are available at that point. | ||
18 | |||
19 | This does now give us the reverse issue, that if there's a feature | ||
20 | bit which is enabled or disabled by the setting of a property then | ||
21 | then X-implies-Y features that are dependent on that property need to | ||
22 | be in realize, not in this new function. But the only one of those | ||
23 | is the "EL3 implies VBAR" which is already in the right place, so | ||
24 | putting things this way round seems better to me. | ||
25 | 7 | ||
26 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
27 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
28 | Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org | 10 | Message-id: 20230912140434.1333369-11-peter.maydell@linaro.org |
29 | --- | 11 | --- |
30 | target/arm/cpu.c | 179 +++++++++++++++++++++++++---------------------- | 12 | target/arm/internals.h | 17 +++++++ |
31 | 1 file changed, 97 insertions(+), 82 deletions(-) | 13 | target/arm/tcg/mte_helper.c | 99 +++++++++++++++++++++++++++++++++++++ |
14 | 2 files changed, 116 insertions(+) | ||
32 | 15 | ||
33 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | 16 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
34 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/target/arm/cpu.c | 18 | --- a/target/arm/internals.h |
36 | +++ b/target/arm/cpu.c | 19 | +++ b/target/arm/internals.h |
37 | @@ -XXX,XX +XXX,XX @@ unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) | 20 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); |
38 | NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; | 21 | uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, |
22 | uint32_t desc); | ||
23 | |||
24 | +/** | ||
25 | + * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS | ||
26 | + * operation going in the reverse direction | ||
27 | + * @env: CPU env | ||
28 | + * @ptr: *end* address of memory region (dirty pointer) | ||
29 | + * @size: length of region (guaranteed not to cross a page boundary) | ||
30 | + * @desc: MTEDESC descriptor word (0 means no MTE checks) | ||
31 | + * Returns: the size of the region that can be copied without hitting | ||
32 | + * an MTE tag failure | ||
33 | + * | ||
34 | + * Note that we assume that the caller has already checked the TBI | ||
35 | + * and TCMA bits with mte_checks_needed() and an MTE check is definitely | ||
36 | + * required. | ||
37 | + */ | ||
38 | +uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
39 | + uint32_t desc); | ||
40 | + | ||
41 | /** | ||
42 | * mte_check_fail: Record an MTE tag check failure | ||
43 | * @env: CPU env | ||
44 | diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/target/arm/tcg/mte_helper.c | ||
47 | +++ b/target/arm/tcg/mte_helper.c | ||
48 | @@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count) | ||
49 | return n; | ||
39 | } | 50 | } |
40 | 51 | ||
41 | +static void arm_cpu_propagate_feature_implications(ARMCPU *cpu) | 52 | +/** |
53 | + * checkNrev: | ||
54 | + * @tag: tag memory to test | ||
55 | + * @odd: true to begin testing at tags at odd nibble | ||
56 | + * @cmp: the tag to compare against | ||
57 | + * @count: number of tags to test | ||
58 | + * | ||
59 | + * Return the number of successful tests. | ||
60 | + * Thus a return value < @count indicates a failure. | ||
61 | + * | ||
62 | + * This is like checkN, but it runs backwards, checking the | ||
63 | + * tags starting with @tag and then the tags preceding it. | ||
64 | + * This is needed by the backwards-memory-copying operations. | ||
65 | + */ | ||
66 | +static int checkNrev(uint8_t *mem, int odd, int cmp, int count) | ||
42 | +{ | 67 | +{ |
43 | + CPUARMState *env = &cpu->env; | 68 | + int n = 0, diff; |
44 | + bool no_aa32 = false; | ||
45 | + | 69 | + |
46 | + /* | 70 | + /* Replicate the test tag and compare. */ |
47 | + * Some features automatically imply others: set the feature | 71 | + cmp *= 0x11; |
48 | + * bits explicitly for these cases. | 72 | + diff = *mem-- ^ cmp; |
49 | + */ | ||
50 | + | 73 | + |
51 | + if (arm_feature(env, ARM_FEATURE_M)) { | 74 | + if (!odd) { |
52 | + set_feature(env, ARM_FEATURE_PMSA); | 75 | + goto start_even; |
53 | + } | 76 | + } |
54 | + | 77 | + |
55 | + if (arm_feature(env, ARM_FEATURE_V8)) { | 78 | + while (1) { |
56 | + if (arm_feature(env, ARM_FEATURE_M)) { | 79 | + /* Test odd tag. */ |
57 | + set_feature(env, ARM_FEATURE_V7); | 80 | + if (unlikely((diff) & 0xf0)) { |
58 | + } else { | 81 | + break; |
59 | + set_feature(env, ARM_FEATURE_V7VE); | ||
60 | + } | 82 | + } |
83 | + if (++n == count) { | ||
84 | + break; | ||
85 | + } | ||
86 | + | ||
87 | + start_even: | ||
88 | + /* Test even tag. */ | ||
89 | + if (unlikely((diff) & 0x0f)) { | ||
90 | + break; | ||
91 | + } | ||
92 | + if (++n == count) { | ||
93 | + break; | ||
94 | + } | ||
95 | + | ||
96 | + diff = *mem-- ^ cmp; | ||
97 | + } | ||
98 | + return n; | ||
99 | +} | ||
100 | + | ||
101 | /** | ||
102 | * mte_probe_int() - helper for mte_probe and mte_check | ||
103 | * @env: CPU environment | ||
104 | @@ -XXX,XX +XXX,XX @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
105 | } | ||
106 | } | ||
107 | |||
108 | +uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, | ||
109 | + uint32_t desc) | ||
110 | +{ | ||
111 | + int mmu_idx, tag_count; | ||
112 | + uint64_t ptr_tag, tag_first, tag_last; | ||
113 | + void *mem; | ||
114 | + bool w = FIELD_EX32(desc, MTEDESC, WRITE); | ||
115 | + uint32_t n; | ||
116 | + | ||
117 | + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | ||
118 | + /* True probe; this will never fault */ | ||
119 | + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, | ||
120 | + w ? MMU_DATA_STORE : MMU_DATA_LOAD, | ||
121 | + size, MMU_DATA_LOAD, true, 0); | ||
122 | + if (!mem) { | ||
123 | + return size; | ||
61 | + } | 124 | + } |
62 | + | 125 | + |
63 | + /* | 126 | + /* |
64 | + * There exist AArch64 cpus without AArch32 support. When KVM | 127 | + * TODO: checkNrev() is not designed for checks of the size we expect |
65 | + * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. | 128 | + * for FEAT_MOPS operations, so we should implement this differently. |
66 | + * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. | 129 | + * Maybe we should do something like |
67 | + * As a general principle, we also do not make ID register | 130 | + * if (region start and size are aligned nicely) { |
68 | + * consistency checks anywhere unless using TCG, because only | 131 | + * do direct loads of 64 tag bits at a time; |
69 | + * for TCG would a consistency-check failure be a QEMU bug. | 132 | + * } else { |
133 | + * call checkN() | ||
134 | + * } | ||
70 | + */ | 135 | + */ |
71 | + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | 136 | + /* Round the bounds to the tag granule, and compute the number of tags. */ |
72 | + no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); | 137 | + ptr_tag = allocation_tag_from_addr(ptr); |
138 | + tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE); | ||
139 | + tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); | ||
140 | + tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; | ||
141 | + n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); | ||
142 | + if (likely(n == tag_count)) { | ||
143 | + return size; | ||
73 | + } | 144 | + } |
74 | + | 145 | + |
75 | + if (arm_feature(env, ARM_FEATURE_V7VE)) { | 146 | + /* |
76 | + /* | 147 | + * Failure; for the first granule, it's at @ptr. Otherwise |
77 | + * v7 Virtualization Extensions. In real hardware this implies | 148 | + * it's at the last byte of the nth granule. Calculate how |
78 | + * EL2 and also the presence of the Security Extensions. | 149 | + * many bytes we can access without hitting that failure. |
79 | + * For QEMU, for backwards-compatibility we implement some | 150 | + */ |
80 | + * CPUs or CPU configs which have no actual EL2 or EL3 but do | 151 | + if (n == 0) { |
81 | + * include the various other features that V7VE implies. | 152 | + return 0; |
82 | + * Presence of EL2 itself is ARM_FEATURE_EL2, and of the | 153 | + } else { |
83 | + * Security Extensions is ARM_FEATURE_EL3. | 154 | + return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last); |
84 | + */ | ||
85 | + assert(!tcg_enabled() || no_aa32 || | ||
86 | + cpu_isar_feature(aa32_arm_div, cpu)); | ||
87 | + set_feature(env, ARM_FEATURE_LPAE); | ||
88 | + set_feature(env, ARM_FEATURE_V7); | ||
89 | + } | ||
90 | + if (arm_feature(env, ARM_FEATURE_V7)) { | ||
91 | + set_feature(env, ARM_FEATURE_VAPA); | ||
92 | + set_feature(env, ARM_FEATURE_THUMB2); | ||
93 | + set_feature(env, ARM_FEATURE_MPIDR); | ||
94 | + if (!arm_feature(env, ARM_FEATURE_M)) { | ||
95 | + set_feature(env, ARM_FEATURE_V6K); | ||
96 | + } else { | ||
97 | + set_feature(env, ARM_FEATURE_V6); | ||
98 | + } | ||
99 | + | ||
100 | + /* | ||
101 | + * Always define VBAR for V7 CPUs even if it doesn't exist in | ||
102 | + * non-EL3 configs. This is needed by some legacy boards. | ||
103 | + */ | ||
104 | + set_feature(env, ARM_FEATURE_VBAR); | ||
105 | + } | ||
106 | + if (arm_feature(env, ARM_FEATURE_V6K)) { | ||
107 | + set_feature(env, ARM_FEATURE_V6); | ||
108 | + set_feature(env, ARM_FEATURE_MVFR); | ||
109 | + } | ||
110 | + if (arm_feature(env, ARM_FEATURE_V6)) { | ||
111 | + set_feature(env, ARM_FEATURE_V5); | ||
112 | + if (!arm_feature(env, ARM_FEATURE_M)) { | ||
113 | + assert(!tcg_enabled() || no_aa32 || | ||
114 | + cpu_isar_feature(aa32_jazelle, cpu)); | ||
115 | + set_feature(env, ARM_FEATURE_AUXCR); | ||
116 | + } | ||
117 | + } | ||
118 | + if (arm_feature(env, ARM_FEATURE_V5)) { | ||
119 | + set_feature(env, ARM_FEATURE_V4T); | ||
120 | + } | ||
121 | + if (arm_feature(env, ARM_FEATURE_LPAE)) { | ||
122 | + set_feature(env, ARM_FEATURE_V7MP); | ||
123 | + } | ||
124 | + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { | ||
125 | + set_feature(env, ARM_FEATURE_CBAR); | ||
126 | + } | ||
127 | + if (arm_feature(env, ARM_FEATURE_THUMB2) && | ||
128 | + !arm_feature(env, ARM_FEATURE_M)) { | ||
129 | + set_feature(env, ARM_FEATURE_THUMB_DSP); | ||
130 | + } | 155 | + } |
131 | +} | 156 | +} |
132 | + | 157 | + |
133 | void arm_cpu_post_init(Object *obj) | 158 | void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size, |
159 | uint32_t desc) | ||
134 | { | 160 | { |
135 | ARMCPU *cpu = ARM_CPU(obj); | ||
136 | |||
137 | - /* M profile implies PMSA. We have to do this here rather than | ||
138 | - * in realize with the other feature-implication checks because | ||
139 | - * we look at the PMSA bit to see if we should add some properties. | ||
140 | + /* | ||
141 | + * Some features imply others. Figure this out now, because we | ||
142 | + * are going to look at the feature bits in deciding which | ||
143 | + * properties to add. | ||
144 | */ | ||
145 | - if (arm_feature(&cpu->env, ARM_FEATURE_M)) { | ||
146 | - set_feature(&cpu->env, ARM_FEATURE_PMSA); | ||
147 | - } | ||
148 | + arm_cpu_propagate_feature_implications(cpu); | ||
149 | |||
150 | if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || | ||
151 | arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { | ||
152 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
153 | CPUARMState *env = &cpu->env; | ||
154 | int pagebits; | ||
155 | Error *local_err = NULL; | ||
156 | - bool no_aa32 = false; | ||
157 | |||
158 | /* Use pc-relative instructions in system-mode */ | ||
159 | #ifndef CONFIG_USER_ONLY | ||
160 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
161 | cpu->isar.id_isar3 = u; | ||
162 | } | ||
163 | |||
164 | - /* Some features automatically imply others: */ | ||
165 | - if (arm_feature(env, ARM_FEATURE_V8)) { | ||
166 | - if (arm_feature(env, ARM_FEATURE_M)) { | ||
167 | - set_feature(env, ARM_FEATURE_V7); | ||
168 | - } else { | ||
169 | - set_feature(env, ARM_FEATURE_V7VE); | ||
170 | - } | ||
171 | - } | ||
172 | - | ||
173 | - /* | ||
174 | - * There exist AArch64 cpus without AArch32 support. When KVM | ||
175 | - * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. | ||
176 | - * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. | ||
177 | - * As a general principle, we also do not make ID register | ||
178 | - * consistency checks anywhere unless using TCG, because only | ||
179 | - * for TCG would a consistency-check failure be a QEMU bug. | ||
180 | - */ | ||
181 | - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | ||
182 | - no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); | ||
183 | - } | ||
184 | - | ||
185 | - if (arm_feature(env, ARM_FEATURE_V7VE)) { | ||
186 | - /* v7 Virtualization Extensions. In real hardware this implies | ||
187 | - * EL2 and also the presence of the Security Extensions. | ||
188 | - * For QEMU, for backwards-compatibility we implement some | ||
189 | - * CPUs or CPU configs which have no actual EL2 or EL3 but do | ||
190 | - * include the various other features that V7VE implies. | ||
191 | - * Presence of EL2 itself is ARM_FEATURE_EL2, and of the | ||
192 | - * Security Extensions is ARM_FEATURE_EL3. | ||
193 | - */ | ||
194 | - assert(!tcg_enabled() || no_aa32 || | ||
195 | - cpu_isar_feature(aa32_arm_div, cpu)); | ||
196 | - set_feature(env, ARM_FEATURE_LPAE); | ||
197 | - set_feature(env, ARM_FEATURE_V7); | ||
198 | - } | ||
199 | - if (arm_feature(env, ARM_FEATURE_V7)) { | ||
200 | - set_feature(env, ARM_FEATURE_VAPA); | ||
201 | - set_feature(env, ARM_FEATURE_THUMB2); | ||
202 | - set_feature(env, ARM_FEATURE_MPIDR); | ||
203 | - if (!arm_feature(env, ARM_FEATURE_M)) { | ||
204 | - set_feature(env, ARM_FEATURE_V6K); | ||
205 | - } else { | ||
206 | - set_feature(env, ARM_FEATURE_V6); | ||
207 | - } | ||
208 | - | ||
209 | - /* Always define VBAR for V7 CPUs even if it doesn't exist in | ||
210 | - * non-EL3 configs. This is needed by some legacy boards. | ||
211 | - */ | ||
212 | - set_feature(env, ARM_FEATURE_VBAR); | ||
213 | - } | ||
214 | - if (arm_feature(env, ARM_FEATURE_V6K)) { | ||
215 | - set_feature(env, ARM_FEATURE_V6); | ||
216 | - set_feature(env, ARM_FEATURE_MVFR); | ||
217 | - } | ||
218 | - if (arm_feature(env, ARM_FEATURE_V6)) { | ||
219 | - set_feature(env, ARM_FEATURE_V5); | ||
220 | - if (!arm_feature(env, ARM_FEATURE_M)) { | ||
221 | - assert(!tcg_enabled() || no_aa32 || | ||
222 | - cpu_isar_feature(aa32_jazelle, cpu)); | ||
223 | - set_feature(env, ARM_FEATURE_AUXCR); | ||
224 | - } | ||
225 | - } | ||
226 | - if (arm_feature(env, ARM_FEATURE_V5)) { | ||
227 | - set_feature(env, ARM_FEATURE_V4T); | ||
228 | - } | ||
229 | - if (arm_feature(env, ARM_FEATURE_LPAE)) { | ||
230 | - set_feature(env, ARM_FEATURE_V7MP); | ||
231 | - } | ||
232 | - if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { | ||
233 | - set_feature(env, ARM_FEATURE_CBAR); | ||
234 | - } | ||
235 | - if (arm_feature(env, ARM_FEATURE_THUMB2) && | ||
236 | - !arm_feature(env, ARM_FEATURE_M)) { | ||
237 | - set_feature(env, ARM_FEATURE_THUMB_DSP); | ||
238 | - } | ||
239 | |||
240 | /* | ||
241 | * We rely on no XScale CPU having VFP so we can use the same bits in the | ||
242 | -- | 161 | -- |
243 | 2.34.1 | 162 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | The FEAT_MOPS CPY* instructions implement memory copies. These |
---|---|---|---|
2 | come in both "always forwards" (memcpy-style) and "overlap OK" | ||
3 | (memmove-style) flavours. | ||
2 | 4 | ||
3 | Like FEAT_TRF (Self-hosted Trace Extension), suppress tracing | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
4 | external to the cpu, which is out of scope for QEMU. | 6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20230912140434.1333369-12-peter.maydell@linaro.org | ||
8 | --- | ||
9 | target/arm/tcg/helper-a64.h | 7 + | ||
10 | target/arm/tcg/a64.decode | 14 + | ||
11 | target/arm/tcg/helper-a64.c | 454 +++++++++++++++++++++++++++++++++ | ||
12 | target/arm/tcg/translate-a64.c | 60 +++++ | ||
13 | 4 files changed, 535 insertions(+) | ||
5 | 14 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 15 | diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Message-id: 20230811214031.171020-10-richard.henderson@linaro.org | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | target/arm/cpu.c | 3 +++ | ||
12 | 1 file changed, 3 insertions(+) | ||
13 | |||
14 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/cpu.c | 17 | --- a/target/arm/tcg/helper-a64.h |
17 | +++ b/target/arm/cpu.c | 18 | +++ b/target/arm/tcg/helper-a64.h |
18 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | 19 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(sete, void, env, i32, i32) |
19 | /* FEAT_SPE (Statistical Profiling Extension) */ | 20 | DEF_HELPER_3(setgp, void, env, i32, i32) |
20 | cpu->isar.id_aa64dfr0 = | 21 | DEF_HELPER_3(setgm, void, env, i32, i32) |
21 | FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0); | 22 | DEF_HELPER_3(setge, void, env, i32, i32) |
22 | + /* FEAT_TRBE (Trace Buffer Extension) */ | 23 | + |
23 | + cpu->isar.id_aa64dfr0 = | 24 | +DEF_HELPER_4(cpyp, void, env, i32, i32, i32) |
24 | + FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0); | 25 | +DEF_HELPER_4(cpym, void, env, i32, i32, i32) |
25 | /* FEAT_TRF (Self-hosted Trace Extension) */ | 26 | +DEF_HELPER_4(cpye, void, env, i32, i32, i32) |
26 | cpu->isar.id_aa64dfr0 = | 27 | +DEF_HELPER_4(cpyfp, void, env, i32, i32, i32) |
27 | FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0); | 28 | +DEF_HELPER_4(cpyfm, void, env, i32, i32, i32) |
29 | +DEF_HELPER_4(cpyfe, void, env, i32, i32, i32) | ||
30 | diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/target/arm/tcg/a64.decode | ||
33 | +++ b/target/arm/tcg/a64.decode | ||
34 | @@ -XXX,XX +XXX,XX @@ SETE 00 011001110 ..... 10 . . 01 ..... ..... @set | ||
35 | SETGP 00 011101110 ..... 00 . . 01 ..... ..... @set | ||
36 | SETGM 00 011101110 ..... 01 . . 01 ..... ..... @set | ||
37 | SETGE 00 011101110 ..... 10 . . 01 ..... ..... @set | ||
38 | + | ||
39 | +# Memmove/Memcopy: the CPY insns allow overlapping src/dest and | ||
40 | +# copy in the correct direction; the CPYF insns always copy forwards. | ||
41 | +# | ||
42 | +# options has the nontemporal and unpriv bits for src and dest | ||
43 | +&cpy rs rn rd options | ||
44 | +@cpy .. ... . ..... rs:5 options:4 .. rn:5 rd:5 &cpy | ||
45 | + | ||
46 | +CPYFP 00 011 0 01000 ..... .... 01 ..... ..... @cpy | ||
47 | +CPYFM 00 011 0 01010 ..... .... 01 ..... ..... @cpy | ||
48 | +CPYFE 00 011 0 01100 ..... .... 01 ..... ..... @cpy | ||
49 | +CPYP 00 011 1 01000 ..... .... 01 ..... ..... @cpy | ||
50 | +CPYM 00 011 1 01010 ..... .... 01 ..... ..... @cpy | ||
51 | +CPYE 00 011 1 01100 ..... .... 01 ..... ..... @cpy | ||
52 | diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/target/arm/tcg/helper-a64.c | ||
55 | +++ b/target/arm/tcg/helper-a64.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static uint64_t page_limit(uint64_t addr) | ||
57 | return TARGET_PAGE_ALIGN(addr + 1) - addr; | ||
58 | } | ||
59 | |||
60 | +/* | ||
61 | + * Return the number of bytes we can copy starting from addr and working | ||
62 | + * backwards without crossing a page boundary. | ||
63 | + */ | ||
64 | +static uint64_t page_limit_rev(uint64_t addr) | ||
65 | +{ | ||
66 | + return (addr & ~TARGET_PAGE_MASK) + 1; | ||
67 | +} | ||
68 | + | ||
69 | /* | ||
70 | * Perform part of a memory set on an area of guest memory starting at | ||
71 | * toaddr (a dirty address) and extending for setsize bytes. | ||
72 | @@ -XXX,XX +XXX,XX @@ void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) | ||
73 | { | ||
74 | do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC()); | ||
75 | } | ||
76 | + | ||
77 | +/* | ||
78 | + * Perform part of a memory copy from the guest memory at fromaddr | ||
79 | + * and extending for copysize bytes, to the guest memory at | ||
80 | + * toaddr. Both addreses are dirty. | ||
81 | + * | ||
82 | + * Returns the number of bytes actually set, which might be less than | ||
83 | + * copysize; the caller should loop until the whole copy has been done. | ||
84 | + * The caller should ensure that the guest registers are correct | ||
85 | + * for the possibility that the first byte of the copy encounters | ||
86 | + * an exception or watchpoint. We guarantee not to take any faults | ||
87 | + * for bytes other than the first. | ||
88 | + */ | ||
89 | +static uint64_t copy_step(CPUARMState *env, uint64_t toaddr, uint64_t fromaddr, | ||
90 | + uint64_t copysize, int wmemidx, int rmemidx, | ||
91 | + uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra) | ||
92 | +{ | ||
93 | + void *rmem; | ||
94 | + void *wmem; | ||
95 | + | ||
96 | + /* Don't cross a page boundary on either source or destination */ | ||
97 | + copysize = MIN(copysize, page_limit(toaddr)); | ||
98 | + copysize = MIN(copysize, page_limit(fromaddr)); | ||
99 | + /* | ||
100 | + * Handle MTE tag checks: either handle the tag mismatch for byte 0, | ||
101 | + * or else copy up to but not including the byte with the mismatch. | ||
102 | + */ | ||
103 | + if (*rdesc) { | ||
104 | + uint64_t mtesize = mte_mops_probe(env, fromaddr, copysize, *rdesc); | ||
105 | + if (mtesize == 0) { | ||
106 | + mte_check_fail(env, *rdesc, fromaddr, ra); | ||
107 | + *rdesc = 0; | ||
108 | + } else { | ||
109 | + copysize = MIN(copysize, mtesize); | ||
110 | + } | ||
111 | + } | ||
112 | + if (*wdesc) { | ||
113 | + uint64_t mtesize = mte_mops_probe(env, toaddr, copysize, *wdesc); | ||
114 | + if (mtesize == 0) { | ||
115 | + mte_check_fail(env, *wdesc, toaddr, ra); | ||
116 | + *wdesc = 0; | ||
117 | + } else { | ||
118 | + copysize = MIN(copysize, mtesize); | ||
119 | + } | ||
120 | + } | ||
121 | + | ||
122 | + toaddr = useronly_clean_ptr(toaddr); | ||
123 | + fromaddr = useronly_clean_ptr(fromaddr); | ||
124 | + /* Trapless lookup of whether we can get a host memory pointer */ | ||
125 | + wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx); | ||
126 | + rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx); | ||
127 | + | ||
128 | +#ifndef CONFIG_USER_ONLY | ||
129 | + /* | ||
130 | + * If we don't have host memory for both source and dest then just | ||
131 | + * do a single byte copy. This will handle watchpoints, invalid pages, | ||
132 | + * etc correctly. For clean code pages, the next iteration will see | ||
133 | + * the page dirty and will use the fast path. | ||
134 | + */ | ||
135 | + if (unlikely(!rmem || !wmem)) { | ||
136 | + uint8_t byte; | ||
137 | + if (rmem) { | ||
138 | + byte = *(uint8_t *)rmem; | ||
139 | + } else { | ||
140 | + byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra); | ||
141 | + } | ||
142 | + if (wmem) { | ||
143 | + *(uint8_t *)wmem = byte; | ||
144 | + } else { | ||
145 | + cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra); | ||
146 | + } | ||
147 | + return 1; | ||
148 | + } | ||
149 | +#endif | ||
150 | + /* Easy case: just memmove the host memory */ | ||
151 | + memmove(wmem, rmem, copysize); | ||
152 | + return copysize; | ||
153 | +} | ||
154 | + | ||
155 | +/* | ||
156 | + * Do part of a backwards memory copy. Here toaddr and fromaddr point | ||
157 | + * to the *last* byte to be copied. | ||
158 | + */ | ||
159 | +static uint64_t copy_step_rev(CPUARMState *env, uint64_t toaddr, | ||
160 | + uint64_t fromaddr, | ||
161 | + uint64_t copysize, int wmemidx, int rmemidx, | ||
162 | + uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra) | ||
163 | +{ | ||
164 | + void *rmem; | ||
165 | + void *wmem; | ||
166 | + | ||
167 | + /* Don't cross a page boundary on either source or destination */ | ||
168 | + copysize = MIN(copysize, page_limit_rev(toaddr)); | ||
169 | + copysize = MIN(copysize, page_limit_rev(fromaddr)); | ||
170 | + | ||
171 | + /* | ||
172 | + * Handle MTE tag checks: either handle the tag mismatch for byte 0, | ||
173 | + * or else copy up to but not including the byte with the mismatch. | ||
174 | + */ | ||
175 | + if (*rdesc) { | ||
176 | + uint64_t mtesize = mte_mops_probe_rev(env, fromaddr, copysize, *rdesc); | ||
177 | + if (mtesize == 0) { | ||
178 | + mte_check_fail(env, *rdesc, fromaddr, ra); | ||
179 | + *rdesc = 0; | ||
180 | + } else { | ||
181 | + copysize = MIN(copysize, mtesize); | ||
182 | + } | ||
183 | + } | ||
184 | + if (*wdesc) { | ||
185 | + uint64_t mtesize = mte_mops_probe_rev(env, toaddr, copysize, *wdesc); | ||
186 | + if (mtesize == 0) { | ||
187 | + mte_check_fail(env, *wdesc, toaddr, ra); | ||
188 | + *wdesc = 0; | ||
189 | + } else { | ||
190 | + copysize = MIN(copysize, mtesize); | ||
191 | + } | ||
192 | + } | ||
193 | + | ||
194 | + toaddr = useronly_clean_ptr(toaddr); | ||
195 | + fromaddr = useronly_clean_ptr(fromaddr); | ||
196 | + /* Trapless lookup of whether we can get a host memory pointer */ | ||
197 | + wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx); | ||
198 | + rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx); | ||
199 | + | ||
200 | +#ifndef CONFIG_USER_ONLY | ||
201 | + /* | ||
202 | + * If we don't have host memory for both source and dest then just | ||
203 | + * do a single byte copy. This will handle watchpoints, invalid pages, | ||
204 | + * etc correctly. For clean code pages, the next iteration will see | ||
205 | + * the page dirty and will use the fast path. | ||
206 | + */ | ||
207 | + if (unlikely(!rmem || !wmem)) { | ||
208 | + uint8_t byte; | ||
209 | + if (rmem) { | ||
210 | + byte = *(uint8_t *)rmem; | ||
211 | + } else { | ||
212 | + byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra); | ||
213 | + } | ||
214 | + if (wmem) { | ||
215 | + *(uint8_t *)wmem = byte; | ||
216 | + } else { | ||
217 | + cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra); | ||
218 | + } | ||
219 | + return 1; | ||
220 | + } | ||
221 | +#endif | ||
222 | + /* | ||
223 | + * Easy case: just memmove the host memory. Note that wmem and | ||
224 | + * rmem here point to the *last* byte to copy. | ||
225 | + */ | ||
226 | + memmove(wmem - (copysize - 1), rmem - (copysize - 1), copysize); | ||
227 | + return copysize; | ||
228 | +} | ||
229 | + | ||
230 | +/* | ||
231 | + * for the Memory Copy operation, our implementation chooses always | ||
232 | + * to use "option A", where we update Xd and Xs to the final addresses | ||
233 | + * in the CPYP insn, and then in CPYM and CPYE only need to update Xn. | ||
234 | + * | ||
235 | + * @env: CPU | ||
236 | + * @syndrome: syndrome value for mismatch exceptions | ||
237 | + * (also contains the register numbers we need to use) | ||
238 | + * @wdesc: MTE descriptor for the writes (destination) | ||
239 | + * @rdesc: MTE descriptor for the reads (source) | ||
240 | + * @move: true if this is CPY (memmove), false for CPYF (memcpy forwards) | ||
241 | + */ | ||
242 | +static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
243 | + uint32_t rdesc, uint32_t move, uintptr_t ra) | ||
244 | +{ | ||
245 | + int rd = mops_destreg(syndrome); | ||
246 | + int rs = mops_srcreg(syndrome); | ||
247 | + int rn = mops_sizereg(syndrome); | ||
248 | + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); | ||
249 | + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); | ||
250 | + bool forwards = true; | ||
251 | + uint64_t toaddr = env->xregs[rd]; | ||
252 | + uint64_t fromaddr = env->xregs[rs]; | ||
253 | + uint64_t copysize = env->xregs[rn]; | ||
254 | + uint64_t stagecopysize, step; | ||
255 | + | ||
256 | + check_mops_enabled(env, ra); | ||
257 | + | ||
258 | + | ||
259 | + if (move) { | ||
260 | + /* | ||
261 | + * Copy backwards if necessary. The direction for a non-overlapping | ||
262 | + * copy is IMPDEF; we choose forwards. | ||
263 | + */ | ||
264 | + if (copysize > 0x007FFFFFFFFFFFFFULL) { | ||
265 | + copysize = 0x007FFFFFFFFFFFFFULL; | ||
266 | + } | ||
267 | + uint64_t fs = extract64(fromaddr, 0, 56); | ||
268 | + uint64_t ts = extract64(toaddr, 0, 56); | ||
269 | + uint64_t fe = extract64(fromaddr + copysize, 0, 56); | ||
270 | + | ||
271 | + if (fs < ts && fe > ts) { | ||
272 | + forwards = false; | ||
273 | + } | ||
274 | + } else { | ||
275 | + if (copysize > INT64_MAX) { | ||
276 | + copysize = INT64_MAX; | ||
277 | + } | ||
278 | + } | ||
279 | + | ||
280 | + if (!mte_checks_needed(fromaddr, rdesc)) { | ||
281 | + rdesc = 0; | ||
282 | + } | ||
283 | + if (!mte_checks_needed(toaddr, wdesc)) { | ||
284 | + wdesc = 0; | ||
285 | + } | ||
286 | + | ||
287 | + if (forwards) { | ||
288 | + stagecopysize = MIN(copysize, page_limit(toaddr)); | ||
289 | + stagecopysize = MIN(stagecopysize, page_limit(fromaddr)); | ||
290 | + while (stagecopysize) { | ||
291 | + env->xregs[rd] = toaddr; | ||
292 | + env->xregs[rs] = fromaddr; | ||
293 | + env->xregs[rn] = copysize; | ||
294 | + step = copy_step(env, toaddr, fromaddr, stagecopysize, | ||
295 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
296 | + toaddr += step; | ||
297 | + fromaddr += step; | ||
298 | + copysize -= step; | ||
299 | + stagecopysize -= step; | ||
300 | + } | ||
301 | + /* Insn completed, so update registers to the Option A format */ | ||
302 | + env->xregs[rd] = toaddr + copysize; | ||
303 | + env->xregs[rs] = fromaddr + copysize; | ||
304 | + env->xregs[rn] = -copysize; | ||
305 | + } else { | ||
306 | + /* | ||
307 | + * In a reverse copy the to and from addrs in Xs and Xd are the start | ||
308 | + * of the range, but it's more convenient for us to work with pointers | ||
309 | + * to the last byte being copied. | ||
310 | + */ | ||
311 | + toaddr += copysize - 1; | ||
312 | + fromaddr += copysize - 1; | ||
313 | + stagecopysize = MIN(copysize, page_limit_rev(toaddr)); | ||
314 | + stagecopysize = MIN(stagecopysize, page_limit_rev(fromaddr)); | ||
315 | + while (stagecopysize) { | ||
316 | + env->xregs[rn] = copysize; | ||
317 | + step = copy_step_rev(env, toaddr, fromaddr, stagecopysize, | ||
318 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
319 | + copysize -= step; | ||
320 | + stagecopysize -= step; | ||
321 | + toaddr -= step; | ||
322 | + fromaddr -= step; | ||
323 | + } | ||
324 | + /* | ||
325 | + * Insn completed, so update registers to the Option A format. | ||
326 | + * For a reverse copy this is no different to the CPYP input format. | ||
327 | + */ | ||
328 | + env->xregs[rn] = copysize; | ||
329 | + } | ||
330 | + | ||
331 | + /* Set NZCV = 0000 to indicate we are an Option A implementation */ | ||
332 | + env->NF = 0; | ||
333 | + env->ZF = 1; /* our env->ZF encoding is inverted */ | ||
334 | + env->CF = 0; | ||
335 | + env->VF = 0; | ||
336 | + return; | ||
337 | +} | ||
338 | + | ||
339 | +void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
340 | + uint32_t rdesc) | ||
341 | +{ | ||
342 | + do_cpyp(env, syndrome, wdesc, rdesc, true, GETPC()); | ||
343 | +} | ||
344 | + | ||
345 | +void HELPER(cpyfp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
346 | + uint32_t rdesc) | ||
347 | +{ | ||
348 | + do_cpyp(env, syndrome, wdesc, rdesc, false, GETPC()); | ||
349 | +} | ||
350 | + | ||
351 | +static void do_cpym(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
352 | + uint32_t rdesc, uint32_t move, uintptr_t ra) | ||
353 | +{ | ||
354 | + /* Main: we choose to copy until less than a page remaining */ | ||
355 | + CPUState *cs = env_cpu(env); | ||
356 | + int rd = mops_destreg(syndrome); | ||
357 | + int rs = mops_srcreg(syndrome); | ||
358 | + int rn = mops_sizereg(syndrome); | ||
359 | + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); | ||
360 | + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); | ||
361 | + bool forwards = true; | ||
362 | + uint64_t toaddr, fromaddr, copysize, step; | ||
363 | + | ||
364 | + check_mops_enabled(env, ra); | ||
365 | + | ||
366 | + /* We choose to NOP out "no data to copy" before consistency checks */ | ||
367 | + if (env->xregs[rn] == 0) { | ||
368 | + return; | ||
369 | + } | ||
370 | + | ||
371 | + check_mops_wrong_option(env, syndrome, ra); | ||
372 | + | ||
373 | + if (move) { | ||
374 | + forwards = (int64_t)env->xregs[rn] < 0; | ||
375 | + } | ||
376 | + | ||
377 | + if (forwards) { | ||
378 | + toaddr = env->xregs[rd] + env->xregs[rn]; | ||
379 | + fromaddr = env->xregs[rs] + env->xregs[rn]; | ||
380 | + copysize = -env->xregs[rn]; | ||
381 | + } else { | ||
382 | + copysize = env->xregs[rn]; | ||
383 | + /* This toaddr and fromaddr point to the *last* byte to copy */ | ||
384 | + toaddr = env->xregs[rd] + copysize - 1; | ||
385 | + fromaddr = env->xregs[rs] + copysize - 1; | ||
386 | + } | ||
387 | + | ||
388 | + if (!mte_checks_needed(fromaddr, rdesc)) { | ||
389 | + rdesc = 0; | ||
390 | + } | ||
391 | + if (!mte_checks_needed(toaddr, wdesc)) { | ||
392 | + wdesc = 0; | ||
393 | + } | ||
394 | + | ||
395 | + /* Our implementation has no particular parameter requirements for CPYM */ | ||
396 | + | ||
397 | + /* Do the actual memmove */ | ||
398 | + if (forwards) { | ||
399 | + while (copysize >= TARGET_PAGE_SIZE) { | ||
400 | + step = copy_step(env, toaddr, fromaddr, copysize, | ||
401 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
402 | + toaddr += step; | ||
403 | + fromaddr += step; | ||
404 | + copysize -= step; | ||
405 | + env->xregs[rn] = -copysize; | ||
406 | + if (copysize >= TARGET_PAGE_SIZE && | ||
407 | + unlikely(cpu_loop_exit_requested(cs))) { | ||
408 | + cpu_loop_exit_restore(cs, ra); | ||
409 | + } | ||
410 | + } | ||
411 | + } else { | ||
412 | + while (copysize >= TARGET_PAGE_SIZE) { | ||
413 | + step = copy_step_rev(env, toaddr, fromaddr, copysize, | ||
414 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
415 | + toaddr -= step; | ||
416 | + fromaddr -= step; | ||
417 | + copysize -= step; | ||
418 | + env->xregs[rn] = copysize; | ||
419 | + if (copysize >= TARGET_PAGE_SIZE && | ||
420 | + unlikely(cpu_loop_exit_requested(cs))) { | ||
421 | + cpu_loop_exit_restore(cs, ra); | ||
422 | + } | ||
423 | + } | ||
424 | + } | ||
425 | +} | ||
426 | + | ||
427 | +void HELPER(cpym)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
428 | + uint32_t rdesc) | ||
429 | +{ | ||
430 | + do_cpym(env, syndrome, wdesc, rdesc, true, GETPC()); | ||
431 | +} | ||
432 | + | ||
433 | +void HELPER(cpyfm)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
434 | + uint32_t rdesc) | ||
435 | +{ | ||
436 | + do_cpym(env, syndrome, wdesc, rdesc, false, GETPC()); | ||
437 | +} | ||
438 | + | ||
439 | +static void do_cpye(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
440 | + uint32_t rdesc, uint32_t move, uintptr_t ra) | ||
441 | +{ | ||
442 | + /* Epilogue: do the last partial page */ | ||
443 | + int rd = mops_destreg(syndrome); | ||
444 | + int rs = mops_srcreg(syndrome); | ||
445 | + int rn = mops_sizereg(syndrome); | ||
446 | + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); | ||
447 | + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); | ||
448 | + bool forwards = true; | ||
449 | + uint64_t toaddr, fromaddr, copysize, step; | ||
450 | + | ||
451 | + check_mops_enabled(env, ra); | ||
452 | + | ||
453 | + /* We choose to NOP out "no data to copy" before consistency checks */ | ||
454 | + if (env->xregs[rn] == 0) { | ||
455 | + return; | ||
456 | + } | ||
457 | + | ||
458 | + check_mops_wrong_option(env, syndrome, ra); | ||
459 | + | ||
460 | + if (move) { | ||
461 | + forwards = (int64_t)env->xregs[rn] < 0; | ||
462 | + } | ||
463 | + | ||
464 | + if (forwards) { | ||
465 | + toaddr = env->xregs[rd] + env->xregs[rn]; | ||
466 | + fromaddr = env->xregs[rs] + env->xregs[rn]; | ||
467 | + copysize = -env->xregs[rn]; | ||
468 | + } else { | ||
469 | + copysize = env->xregs[rn]; | ||
470 | + /* This toaddr and fromaddr point to the *last* byte to copy */ | ||
471 | + toaddr = env->xregs[rd] + copysize - 1; | ||
472 | + fromaddr = env->xregs[rs] + copysize - 1; | ||
473 | + } | ||
474 | + | ||
475 | + if (!mte_checks_needed(fromaddr, rdesc)) { | ||
476 | + rdesc = 0; | ||
477 | + } | ||
478 | + if (!mte_checks_needed(toaddr, wdesc)) { | ||
479 | + wdesc = 0; | ||
480 | + } | ||
481 | + | ||
482 | + /* Check the size; we don't want to have do a check-for-interrupts */ | ||
483 | + if (copysize >= TARGET_PAGE_SIZE) { | ||
484 | + raise_exception_ra(env, EXCP_UDEF, syndrome, | ||
485 | + mops_mismatch_exception_target_el(env), ra); | ||
486 | + } | ||
487 | + | ||
488 | + /* Do the actual memmove */ | ||
489 | + if (forwards) { | ||
490 | + while (copysize > 0) { | ||
491 | + step = copy_step(env, toaddr, fromaddr, copysize, | ||
492 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
493 | + toaddr += step; | ||
494 | + fromaddr += step; | ||
495 | + copysize -= step; | ||
496 | + env->xregs[rn] = -copysize; | ||
497 | + } | ||
498 | + } else { | ||
499 | + while (copysize > 0) { | ||
500 | + step = copy_step_rev(env, toaddr, fromaddr, copysize, | ||
501 | + wmemidx, rmemidx, &wdesc, &rdesc, ra); | ||
502 | + toaddr -= step; | ||
503 | + fromaddr -= step; | ||
504 | + copysize -= step; | ||
505 | + env->xregs[rn] = copysize; | ||
506 | + } | ||
507 | + } | ||
508 | +} | ||
509 | + | ||
510 | +void HELPER(cpye)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
511 | + uint32_t rdesc) | ||
512 | +{ | ||
513 | + do_cpye(env, syndrome, wdesc, rdesc, true, GETPC()); | ||
514 | +} | ||
515 | + | ||
516 | +void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, | ||
517 | + uint32_t rdesc) | ||
518 | +{ | ||
519 | + do_cpye(env, syndrome, wdesc, rdesc, false, GETPC()); | ||
520 | +} | ||
521 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
522 | index XXXXXXX..XXXXXXX 100644 | ||
523 | --- a/target/arm/tcg/translate-a64.c | ||
524 | +++ b/target/arm/tcg/translate-a64.c | ||
525 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp) | ||
526 | TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm) | ||
527 | TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge) | ||
528 | |||
529 | +typedef void CpyFn(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32); | ||
530 | + | ||
531 | +static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn) | ||
532 | +{ | ||
533 | + int rmemidx, wmemidx; | ||
534 | + uint32_t syndrome, rdesc = 0, wdesc = 0; | ||
535 | + bool wunpriv = extract32(a->options, 0, 1); | ||
536 | + bool runpriv = extract32(a->options, 1, 1); | ||
537 | + | ||
538 | + /* | ||
539 | + * UNPREDICTABLE cases: we choose to UNDEF, which allows | ||
540 | + * us to pull this check before the CheckMOPSEnabled() test | ||
541 | + * (which we do in the helper function) | ||
542 | + */ | ||
543 | + if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || | ||
544 | + a->rd == 31 || a->rs == 31 || a->rn == 31) { | ||
545 | + return false; | ||
546 | + } | ||
547 | + | ||
548 | + rmemidx = get_a64_user_mem_index(s, runpriv); | ||
549 | + wmemidx = get_a64_user_mem_index(s, wunpriv); | ||
550 | + | ||
551 | + /* | ||
552 | + * We pass option_a == true, matching our implementation; | ||
553 | + * we pass wrong_option == false: helper function may set that bit. | ||
554 | + */ | ||
555 | + syndrome = syn_mop(false, false, a->options, is_epilogue, | ||
556 | + false, true, a->rd, a->rs, a->rn); | ||
557 | + | ||
558 | + /* If we need to do MTE tag checking, assemble the descriptors */ | ||
559 | + if (s->mte_active[runpriv]) { | ||
560 | + rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid); | ||
561 | + rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma); | ||
562 | + } | ||
563 | + if (s->mte_active[wunpriv]) { | ||
564 | + wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid); | ||
565 | + wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma); | ||
566 | + wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true); | ||
567 | + } | ||
568 | + /* The helper function needs these parts of the descriptor regardless */ | ||
569 | + rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx); | ||
570 | + wdesc = FIELD_DP32(wdesc, MTEDESC, MIDX, wmemidx); | ||
571 | + | ||
572 | + /* | ||
573 | + * The helper needs the register numbers, but since they're in | ||
574 | + * the syndrome anyway, we let it extract them from there rather | ||
575 | + * than passing in an extra three integer arguments. | ||
576 | + */ | ||
577 | + fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc), | ||
578 | + tcg_constant_i32(rdesc)); | ||
579 | + return true; | ||
580 | +} | ||
581 | + | ||
582 | +TRANS_FEAT(CPYP, aa64_mops, do_CPY, a, false, gen_helper_cpyp) | ||
583 | +TRANS_FEAT(CPYM, aa64_mops, do_CPY, a, false, gen_helper_cpym) | ||
584 | +TRANS_FEAT(CPYE, aa64_mops, do_CPY, a, true, gen_helper_cpye) | ||
585 | +TRANS_FEAT(CPYFP, aa64_mops, do_CPY, a, false, gen_helper_cpyfp) | ||
586 | +TRANS_FEAT(CPYFM, aa64_mops, do_CPY, a, false, gen_helper_cpyfm) | ||
587 | +TRANS_FEAT(CPYFE, aa64_mops, do_CPY, a, true, gen_helper_cpyfe) | ||
588 | + | ||
589 | typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); | ||
590 | |||
591 | static bool gen_rri(DisasContext *s, arg_rri_sf *a, | ||
28 | -- | 592 | -- |
29 | 2.34.1 | 593 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | Enable FEAT_MOPS on the AArch64 'max' CPU, and add it to |
---|---|---|---|
2 | the list of features we implement. | ||
2 | 3 | ||
3 | This feature allows the operating system to set TCR_ELx.HWU* | ||
4 | to allow the implementation to use the PBHA bits from the | ||
5 | block and page descriptors for for IMPLEMENTATION DEFINED | ||
6 | purposes. Since QEMU has no need to use these bits, we may | ||
7 | simply ignore them. | ||
8 | |||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Message-id: 20230811214031.171020-11-richard.henderson@linaro.org | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20230912140434.1333369-13-peter.maydell@linaro.org | ||
13 | --- | 7 | --- |
14 | docs/system/arm/emulation.rst | 1 + | 8 | docs/system/arm/emulation.rst | 1 + |
15 | target/arm/tcg/cpu32.c | 2 +- | 9 | linux-user/elfload.c | 1 + |
16 | target/arm/tcg/cpu64.c | 2 +- | 10 | target/arm/tcg/cpu64.c | 1 + |
17 | 3 files changed, 3 insertions(+), 2 deletions(-) | 11 | 3 files changed, 3 insertions(+) |
18 | 12 | ||
19 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst | 13 | diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst |
20 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/docs/system/arm/emulation.rst | 15 | --- a/docs/system/arm/emulation.rst |
22 | +++ b/docs/system/arm/emulation.rst | 16 | +++ b/docs/system/arm/emulation.rst |
23 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: | 17 | @@ -XXX,XX +XXX,XX @@ the following architecture extensions: |
24 | - FEAT_HAFDBS (Hardware management of the access flag and dirty bit state) | 18 | - FEAT_LSE (Large System Extensions) |
25 | - FEAT_HCX (Support for the HCRX_EL2 register) | 19 | - FEAT_LSE2 (Large System Extensions v2) |
26 | - FEAT_HPDS (Hierarchical permission disables) | 20 | - FEAT_LVA (Large Virtual Address space) |
27 | +- FEAT_HPDS2 (Translation table page-based hardware attributes) | 21 | +- FEAT_MOPS (Standardization of memory operations) |
28 | - FEAT_I8MM (AArch64 Int8 matrix multiplication instructions) | 22 | - FEAT_MTE (Memory Tagging Extension) |
29 | - FEAT_IDST (ID space trap handling) | 23 | - FEAT_MTE2 (Memory Tagging Extension) |
30 | - FEAT_IESB (Implicit error synchronization event) | 24 | - FEAT_MTE3 (MTE Asymmetric Fault Handling) |
31 | diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c | 25 | diff --git a/linux-user/elfload.c b/linux-user/elfload.c |
32 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/target/arm/tcg/cpu32.c | 27 | --- a/linux-user/elfload.c |
34 | +++ b/target/arm/tcg/cpu32.c | 28 | +++ b/linux-user/elfload.c |
35 | @@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu) | 29 | @@ -XXX,XX +XXX,XX @@ uint32_t get_elf_hwcap2(void) |
36 | cpu->isar.id_mmfr3 = t; | 30 | GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); |
37 | 31 | GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); | |
38 | t = cpu->isar.id_mmfr4; | 32 | GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); |
39 | - t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */ | 33 | + GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); |
40 | + t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */ | 34 | |
41 | t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ | 35 | return hwcaps; |
42 | t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */ | 36 | } |
43 | t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */ | ||
44 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | 37 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c |
45 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
46 | --- a/target/arm/tcg/cpu64.c | 39 | --- a/target/arm/tcg/cpu64.c |
47 | +++ b/target/arm/tcg/cpu64.c | 40 | +++ b/target/arm/tcg/cpu64.c |
48 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) | 41 | @@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj) |
49 | t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */ | 42 | cpu->isar.id_aa64isar1 = t; |
50 | t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */ | 43 | |
51 | t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */ | 44 | t = cpu->isar.id_aa64isar2; |
52 | - t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */ | 45 | + t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */ |
53 | + t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 2); /* FEAT_HPDS2 */ | 46 | t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ |
54 | t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */ | 47 | cpu->isar.id_aa64isar2 = t; |
55 | t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3); /* FEAT_PAN3 */ | 48 | |
56 | t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ | ||
57 | -- | 49 | -- |
58 | 2.34.1 | 50 | 2.34.1 | diff view generated by jsdifflib |
1 | The functions qemu_get_timedate() and qemu_timedate_diff() take | 1 | Avoid a dynamic stack allocation in qjack_client_init(), by using |
---|---|---|---|
2 | and return a time offset as an integer. Coverity points out that | 2 | a g_autofree heap allocation instead. |
3 | means that when an RTC device implementation holds an offset | ||
4 | as a time_t, as the m48t59 does, the time_t will get truncated. | ||
5 | (CID 1507157, 1517772). | ||
6 | 3 | ||
7 | The functions work with time_t internally, so make them use that type | 4 | (We stick with allocate + snprintf() because the JACK API requires |
8 | in their APIs. | 5 | the name to be no more than its maximum size, so g_strdup_printf() |
6 | would require an extra truncation step.) | ||
9 | 7 | ||
10 | Note that this won't help any Y2038 issues where either the device | 8 | The codebase has very few VLAs, and if we can get rid of them all we |
11 | model itself is keeping the offset in a 32-bit integer, or where the | 9 | can make the compiler error on new additions. This is a defensive |
12 | hardware under emulation has Y2038 or other rollover problems. If we | 10 | measure against security bugs where an on-stack dynamic allocation |
13 | missed any cases of the former then hopefully Coverity will warn us | 11 | isn't correctly size-checked (e.g. CVE-2021-3527). |
14 | about them since after this patch we'd be truncating a time_t in | ||
15 | assignments from qemu_timedate_diff().) | ||
16 | 12 | ||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 14 | Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> |
15 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> | ||
16 | Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com> | ||
17 | Message-id: 20230818155846.1651287-2-peter.maydell@linaro.org | ||
19 | --- | 18 | --- |
20 | include/sysemu/rtc.h | 4 ++-- | 19 | audio/jackaudio.c | 5 +++-- |
21 | softmmu/rtc.c | 4 ++-- | 20 | 1 file changed, 3 insertions(+), 2 deletions(-) |
22 | 2 files changed, 4 insertions(+), 4 deletions(-) | ||
23 | 21 | ||
24 | diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h | 22 | diff --git a/audio/jackaudio.c b/audio/jackaudio.c |
25 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/sysemu/rtc.h | 24 | --- a/audio/jackaudio.c |
27 | +++ b/include/sysemu/rtc.h | 25 | +++ b/audio/jackaudio.c |
28 | @@ -XXX,XX +XXX,XX @@ | 26 | @@ -XXX,XX +XXX,XX @@ static void qjack_client_connect_ports(QJackClient *c) |
29 | * The behaviour of the clock whose value this function returns will | 27 | static int qjack_client_init(QJackClient *c) |
30 | * depend on the -rtc command line option passed by the user. | ||
31 | */ | ||
32 | -void qemu_get_timedate(struct tm *tm, int offset); | ||
33 | +void qemu_get_timedate(struct tm *tm, time_t offset); | ||
34 | |||
35 | /** | ||
36 | * qemu_timedate_diff: Return difference between a struct tm and the RTC | ||
37 | @@ -XXX,XX +XXX,XX @@ void qemu_get_timedate(struct tm *tm, int offset); | ||
38 | * a timestamp one hour further ahead than the current RTC time | ||
39 | * then this function will return 3600. | ||
40 | */ | ||
41 | -int qemu_timedate_diff(struct tm *tm); | ||
42 | +time_t qemu_timedate_diff(struct tm *tm); | ||
43 | |||
44 | #endif | ||
45 | diff --git a/softmmu/rtc.c b/softmmu/rtc.c | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/softmmu/rtc.c | ||
48 | +++ b/softmmu/rtc.c | ||
49 | @@ -XXX,XX +XXX,XX @@ static time_t qemu_ref_timedate(QEMUClockType clock) | ||
50 | return value; | ||
51 | } | ||
52 | |||
53 | -void qemu_get_timedate(struct tm *tm, int offset) | ||
54 | +void qemu_get_timedate(struct tm *tm, time_t offset) | ||
55 | { | 28 | { |
56 | time_t ti = qemu_ref_timedate(rtc_clock); | 29 | jack_status_t status; |
57 | 30 | - char client_name[jack_client_name_size()]; | |
58 | @@ -XXX,XX +XXX,XX @@ void qemu_get_timedate(struct tm *tm, int offset) | 31 | + int client_name_len = jack_client_name_size(); /* includes NUL */ |
59 | } | 32 | + g_autofree char *client_name = g_new(char, client_name_len); |
60 | } | 33 | jack_options_t options = JackNullOption; |
61 | 34 | ||
62 | -int qemu_timedate_diff(struct tm *tm) | 35 | if (c->state == QJACK_STATE_RUNNING) { |
63 | +time_t qemu_timedate_diff(struct tm *tm) | 36 | @@ -XXX,XX +XXX,XX @@ static int qjack_client_init(QJackClient *c) |
64 | { | 37 | |
65 | time_t seconds; | 38 | c->connect_ports = true; |
39 | |||
40 | - snprintf(client_name, sizeof(client_name), "%s-%s", | ||
41 | + snprintf(client_name, client_name_len, "%s-%s", | ||
42 | c->out ? "out" : "in", | ||
43 | c->opt->client_name ? c->opt->client_name : audio_application_name()); | ||
66 | 44 | ||
67 | -- | 45 | -- |
68 | 2.34.1 | 46 | 2.34.1 |
69 | 47 | ||
70 | 48 | diff view generated by jsdifflib |
1 | M-profile CPUs generally allow configuration of the number of MPU | 1 | Avoid a dynamic stack allocation in qjack_process(). Since this |
---|---|---|---|
2 | regions that they have. We don't currently model this, so our | 2 | function is a JACK process callback, we are not permitted to malloc() |
3 | implementations of some of the board models provide CPUs with the | 3 | here, so we allocate a working buffer in qjack_client_init() instead. |
4 | wrong number of regions. RTOSes like Zephyr that hardcode the | ||
5 | expected number of regions may therefore not run on the model if they | ||
6 | are set up to run on real hardware. | ||
7 | 4 | ||
8 | Add properties mpu-ns-regions and mpu-s-regions to the ARMV7M object, | 5 | The codebase has very few VLAs, and if we can get rid of them all we |
9 | matching the ability of hardware to configure the number of Secure | 6 | can make the compiler error on new additions. This is a defensive |
10 | and NonSecure regions separately. Our actual CPU implementation | 7 | measure against security bugs where an on-stack dynamic allocation |
11 | doesn't currently support that, and it happens that none of the MPS | 8 | isn't correctly size-checked (e.g. CVE-2021-3527). |
12 | boards we model set the number of regions differently for Secure vs | ||
13 | NonSecure, so we provide an interface to the boards and SoCs that | ||
14 | won't need to change if we ever do add that functionality in future, | ||
15 | but make it an error to configure the two properties to different | ||
16 | values. | ||
17 | |||
18 | (The property name on the CPU is the somewhat misnamed-for-M-profile | ||
19 | "pmsav7-dregion", so we don't follow that naming convention for | ||
20 | the properties here. The TRM doesn't say what the CPU configuration | ||
21 | variable names are, so we pick something, and follow the lowercase | ||
22 | convention we already have for properties here.) | ||
23 | 9 | ||
24 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
25 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 11 | Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> |
26 | Message-id: 20230724174335.2150499-3-peter.maydell@linaro.org | 12 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> |
13 | Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com> | ||
14 | Message-id: 20230818155846.1651287-3-peter.maydell@linaro.org | ||
27 | --- | 15 | --- |
28 | include/hw/arm/armv7m.h | 8 ++++++++ | 16 | audio/jackaudio.c | 16 +++++++++++----- |
29 | hw/arm/armv7m.c | 21 +++++++++++++++++++++ | 17 | 1 file changed, 11 insertions(+), 5 deletions(-) |
30 | 2 files changed, 29 insertions(+) | ||
31 | 18 | ||
32 | diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h | 19 | diff --git a/audio/jackaudio.c b/audio/jackaudio.c |
33 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/include/hw/arm/armv7m.h | 21 | --- a/audio/jackaudio.c |
35 | +++ b/include/hw/arm/armv7m.h | 22 | +++ b/audio/jackaudio.c |
36 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M) | 23 | @@ -XXX,XX +XXX,XX @@ typedef struct QJackClient { |
37 | * + Property "vfp": enable VFP (forwarded to CPU object) | 24 | int buffersize; |
38 | * + Property "dsp": enable DSP (forwarded to CPU object) | 25 | jack_port_t **port; |
39 | * + Property "enable-bitband": expose bitbanded IO | 26 | QJackBuffer fifo; |
40 | + * + Property "mpu-ns-regions": number of Non-Secure MPU regions (forwarded | 27 | + |
41 | + * to CPU object pmsav7-dregion property; default is whatever the default | 28 | + /* Used as workspace by qjack_process() */ |
42 | + * for the CPU is) | 29 | + float **process_buffers; |
43 | + * + Property "mpu-s-regions": number of Secure MPU regions (default is | 30 | } |
44 | + * whatever the default for the CPU is; must currently be set to the same | 31 | QJackClient; |
45 | + * value as mpu-ns-regions if the CPU implements the Security Extension) | 32 | |
46 | * + Clock input "refclk" is the external reference clock for the systick timers | 33 | @@ -XXX,XX +XXX,XX @@ static int qjack_process(jack_nframes_t nframes, void *arg) |
47 | * + Clock input "cpuclk" is the main CPU clock | 34 | } |
48 | */ | 35 | |
49 | @@ -XXX,XX +XXX,XX @@ struct ARMv7MState { | 36 | /* get the buffers for the ports */ |
50 | Object *idau; | 37 | - float *buffers[c->nchannels]; |
51 | uint32_t init_svtor; | 38 | for (int i = 0; i < c->nchannels; ++i) { |
52 | uint32_t init_nsvtor; | 39 | - buffers[i] = jack_port_get_buffer(c->port[i], nframes); |
53 | + uint32_t mpu_ns_regions; | 40 | + c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes); |
54 | + uint32_t mpu_s_regions; | 41 | } |
55 | bool enable_bitband; | 42 | |
56 | bool start_powered_off; | 43 | if (c->out) { |
57 | bool vfp; | 44 | if (likely(c->enabled)) { |
58 | diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c | 45 | - qjack_buffer_read_l(&c->fifo, buffers, nframes); |
59 | index XXXXXXX..XXXXXXX 100644 | 46 | + qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes); |
60 | --- a/hw/arm/armv7m.c | 47 | } else { |
61 | +++ b/hw/arm/armv7m.c | 48 | for (int i = 0; i < c->nchannels; ++i) { |
62 | @@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp) | 49 | - memset(buffers[i], 0, nframes * sizeof(float)); |
50 | + memset(c->process_buffers[i], 0, nframes * sizeof(float)); | ||
51 | } | ||
52 | } | ||
53 | } else { | ||
54 | if (likely(c->enabled)) { | ||
55 | - qjack_buffer_write_l(&c->fifo, buffers, nframes); | ||
56 | + qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes); | ||
63 | } | 57 | } |
64 | } | 58 | } |
65 | 59 | ||
66 | + /* | 60 | @@ -XXX,XX +XXX,XX @@ static int qjack_client_init(QJackClient *c) |
67 | + * Real M-profile hardware can be configured with a different number of | 61 | jack_get_client_name(c->client)); |
68 | + * MPU regions for Secure vs NonSecure. QEMU's CPU implementation doesn't | 62 | } |
69 | + * support that yet, so catch attempts to select that. | 63 | |
70 | + */ | 64 | + /* Allocate working buffer for process callback */ |
71 | + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && | 65 | + c->process_buffers = g_new(float *, c->nchannels); |
72 | + s->mpu_ns_regions != s->mpu_s_regions) { | ||
73 | + error_setg(errp, | ||
74 | + "mpu-ns-regions and mpu-s-regions properties must have the same value"); | ||
75 | + return; | ||
76 | + } | ||
77 | + if (s->mpu_ns_regions != UINT_MAX && | ||
78 | + object_property_find(OBJECT(s->cpu), "pmsav7-dregion")) { | ||
79 | + if (!object_property_set_uint(OBJECT(s->cpu), "pmsav7-dregion", | ||
80 | + s->mpu_ns_regions, errp)) { | ||
81 | + return; | ||
82 | + } | ||
83 | + } | ||
84 | + | 66 | + |
85 | /* | 67 | jack_set_process_callback(c->client, qjack_process , c); |
86 | * Tell the CPU where the NVIC is; it will fail realize if it doesn't | 68 | jack_set_port_registration_callback(c->client, qjack_port_registration, c); |
87 | * have one. Similarly, tell the NVIC where its CPU is. | 69 | jack_set_xrun_callback(c->client, qjack_xrun, c); |
88 | @@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = { | 70 | @@ -XXX,XX +XXX,XX @@ static void qjack_client_fini_locked(QJackClient *c) |
89 | false), | 71 | |
90 | DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true), | 72 | qjack_buffer_free(&c->fifo); |
91 | DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true), | 73 | g_free(c->port); |
92 | + DEFINE_PROP_UINT32("mpu-ns-regions", ARMv7MState, mpu_ns_regions, UINT_MAX), | 74 | + g_free(c->process_buffers); |
93 | + DEFINE_PROP_UINT32("mpu-s-regions", ARMv7MState, mpu_s_regions, UINT_MAX), | 75 | |
94 | DEFINE_PROP_END_OF_LIST(), | 76 | c->state = QJACK_STATE_DISCONNECTED; |
95 | }; | 77 | /* fallthrough */ |
96 | |||
97 | -- | 78 | -- |
98 | 2.34.1 | 79 | 2.34.1 |
99 | 80 | ||
100 | 81 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | There is only one additional EL1 register modeled, which | 3 | Armv8.1+ cpus have Virtual Host Extension (VHE) which added non-secure |
4 | also needs to use access_actlr_w. | 4 | EL2 virtual timer. |
5 | 5 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | This change adds it to fullfil Arm BSA (Base System Architecture) |
7 | requirements. | ||
8 | |||
9 | Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> | ||
10 | Message-id: 20230913140610.214893-2-marcin.juszkiewicz@linaro.org | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Message-id: 20230811214031.171020-8-richard.henderson@linaro.org | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 13 | --- |
11 | target/arm/tcg/cpu64.c | 3 ++- | 14 | hw/arm/sbsa-ref.c | 2 ++ |
12 | 1 file changed, 2 insertions(+), 1 deletion(-) | 15 | 1 file changed, 2 insertions(+) |
13 | 16 | ||
14 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | 17 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/arm/tcg/cpu64.c | 19 | --- a/hw/arm/sbsa-ref.c |
17 | +++ b/target/arm/tcg/cpu64.c | 20 | +++ b/hw/arm/sbsa-ref.c |
18 | @@ -XXX,XX +XXX,XX @@ static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu) | 21 | @@ -XXX,XX +XXX,XX @@ |
19 | static const ARMCPRegInfo neoverse_v1_cp_reginfo[] = { | 22 | #define ARCH_TIMER_S_EL1_IRQ 13 |
20 | { .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64, | 23 | #define ARCH_TIMER_NS_EL1_IRQ 14 |
21 | .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5, | 24 | #define ARCH_TIMER_NS_EL2_IRQ 10 |
22 | - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | 25 | +#define ARCH_TIMER_NS_EL2_VIRT_IRQ 12 |
23 | + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, | 26 | |
24 | + .accessfn = access_actlr_w }, | 27 | enum { |
25 | { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64, | 28 | SBSA_FLASH, |
26 | .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0, | 29 | @@ -XXX,XX +XXX,XX @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem) |
27 | .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | 30 | [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, |
31 | [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, | ||
32 | [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, | ||
33 | + [GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ, | ||
34 | }; | ||
35 | |||
36 | for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { | ||
28 | -- | 37 | -- |
29 | 2.34.1 | 38 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | From: Viktor Prutyanov <viktor@daynix.com> |
---|---|---|---|
2 | 2 | ||
3 | * Add Addr and size definition for most i.MX6UL devices in i.MX6UL header file. | 3 | PE export name check introduced in d399d6b179 isn't reliable enough, |
4 | * Use those newly defined named constants whenever possible. | 4 | because a page with the export directory may be not present for some |
5 | * Standardize the way we init a familly of unimplemented devices | 5 | reason. On the other hand, elf2dmp retrieves the PDB name in any case. |
6 | - SAI | 6 | It can be also used to check that a PE image is the kernel image. So, |
7 | - PWM | 7 | check PDB name when searching for Windows kernel image. |
8 | - CAN | ||
9 | * Add/rework few comments | ||
10 | 8 | ||
11 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | 9 | Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2165917 |
12 | Message-id: d579043fbd4e4b490370783fda43fc02c8e9be75.1692964892.git.jcd@tribudubois.net | 10 | |
13 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Viktor Prutyanov <viktor@daynix.com> |
12 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
13 | Message-id: 20230915170153.10959-2-viktor@daynix.com | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 15 | --- |
16 | include/hw/arm/fsl-imx6ul.h | 156 +++++++++++++++++++++++++++++++----- | 16 | contrib/elf2dmp/main.c | 93 +++++++++++++++--------------------------- |
17 | hw/arm/fsl-imx6ul.c | 147 ++++++++++++++++++++++----------- | 17 | 1 file changed, 33 insertions(+), 60 deletions(-) |
18 | 2 files changed, 232 insertions(+), 71 deletions(-) | ||
19 | 18 | ||
20 | diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h | 19 | diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c |
21 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/hw/arm/fsl-imx6ul.h | 21 | --- a/contrib/elf2dmp/main.c |
23 | +++ b/include/hw/arm/fsl-imx6ul.h | 22 | +++ b/contrib/elf2dmp/main.c |
24 | @@ -XXX,XX +XXX,XX @@ | 23 | @@ -XXX,XX +XXX,XX @@ static int write_dump(struct pa_space *ps, |
25 | #include "exec/memory.h" | 24 | return fclose(dmp_file); |
26 | #include "cpu.h" | 25 | } |
27 | #include "qom/object.h" | 26 | |
28 | +#include "qemu/units.h" | 27 | -static bool pe_check_export_name(uint64_t base, void *start_addr, |
29 | 28 | - struct va_space *vs) | |
30 | #define TYPE_FSL_IMX6UL "fsl-imx6ul" | 29 | -{ |
31 | OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6ULState, FSL_IMX6UL) | 30 | - IMAGE_EXPORT_DIRECTORY export_dir; |
32 | @@ -XXX,XX +XXX,XX @@ enum FslIMX6ULConfiguration { | 31 | - const char *pe_name; |
33 | FSL_IMX6UL_NUM_ADCS = 2, | 32 | - |
34 | FSL_IMX6UL_NUM_USB_PHYS = 2, | 33 | - if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_EXPORT_DIRECTORY, |
35 | FSL_IMX6UL_NUM_USBS = 2, | 34 | - &export_dir, sizeof(export_dir), vs)) { |
36 | + FSL_IMX6UL_NUM_SAIS = 3, | 35 | - return false; |
37 | + FSL_IMX6UL_NUM_CANS = 2, | 36 | - } |
38 | + FSL_IMX6UL_NUM_PWMS = 4, | 37 | - |
39 | }; | 38 | - pe_name = va_space_resolve(vs, base + export_dir.Name); |
40 | 39 | - if (!pe_name) { | |
41 | struct FslIMX6ULState { | 40 | - return false; |
42 | @@ -XXX,XX +XXX,XX @@ struct FslIMX6ULState { | 41 | - } |
43 | 42 | - | |
44 | enum FslIMX6ULMemoryMap { | 43 | - return !strcmp(pe_name, PE_NAME); |
45 | FSL_IMX6UL_MMDC_ADDR = 0x80000000, | 44 | -} |
46 | - FSL_IMX6UL_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL, | 45 | - |
47 | + FSL_IMX6UL_MMDC_SIZE = (2 * GiB), | 46 | -static int pe_get_pdb_symstore_hash(uint64_t base, void *start_addr, |
48 | 47 | - char *hash, struct va_space *vs) | |
49 | FSL_IMX6UL_QSPI1_MEM_ADDR = 0x60000000, | 48 | +static bool pe_check_pdb_name(uint64_t base, void *start_addr, |
50 | - FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000, | 49 | + struct va_space *vs, OMFSignatureRSDS *rsds) |
51 | - FSL_IMX6UL_EIM_CS_ADDR = 0x50000000, | 50 | { |
52 | - FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000, | 51 | const char sign_rsds[4] = "RSDS"; |
53 | - FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000, | 52 | IMAGE_DEBUG_DIRECTORY debug_dir; |
54 | + FSL_IMX6UL_QSPI1_MEM_SIZE = (256 * MiB), | 53 | - OMFSignatureRSDS rsds; |
55 | 54 | - char *pdb_name; | |
56 | - /* AIPS-2 */ | 55 | - size_t pdb_name_sz; |
57 | + FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000, | 56 | - size_t i; |
58 | + FSL_IMX6UL_EIM_ALIAS_SIZE = (128 * MiB), | 57 | + char pdb_name[sizeof(PDB_NAME)]; |
59 | + | 58 | |
60 | + FSL_IMX6UL_EIM_CS_ADDR = 0x50000000, | 59 | if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY, |
61 | + FSL_IMX6UL_EIM_CS_SIZE = (128 * MiB), | 60 | &debug_dir, sizeof(debug_dir), vs)) { |
62 | + | 61 | eprintf("Failed to get Debug Directory\n"); |
63 | + FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000, | 62 | - return 1; |
64 | + FSL_IMX6UL_AES_ENCRYPT_SIZE = (1 * MiB), | 63 | + return false; |
65 | + | ||
66 | + FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000, | ||
67 | + FSL_IMX6UL_QSPI1_RX_SIZE = (32 * MiB), | ||
68 | + | ||
69 | + /* AIPS-2 Begin */ | ||
70 | FSL_IMX6UL_UART6_ADDR = 0x021FC000, | ||
71 | + | ||
72 | FSL_IMX6UL_I2C4_ADDR = 0x021F8000, | ||
73 | + | ||
74 | FSL_IMX6UL_UART5_ADDR = 0x021F4000, | ||
75 | FSL_IMX6UL_UART4_ADDR = 0x021F0000, | ||
76 | FSL_IMX6UL_UART3_ADDR = 0x021EC000, | ||
77 | FSL_IMX6UL_UART2_ADDR = 0x021E8000, | ||
78 | + | ||
79 | FSL_IMX6UL_WDOG3_ADDR = 0x021E4000, | ||
80 | + | ||
81 | FSL_IMX6UL_QSPI_ADDR = 0x021E0000, | ||
82 | + FSL_IMX6UL_QSPI_SIZE = 0x500, | ||
83 | + | ||
84 | FSL_IMX6UL_SYS_CNT_CTRL_ADDR = 0x021DC000, | ||
85 | + FSL_IMX6UL_SYS_CNT_CTRL_SIZE = (16 * KiB), | ||
86 | + | ||
87 | FSL_IMX6UL_SYS_CNT_CMP_ADDR = 0x021D8000, | ||
88 | + FSL_IMX6UL_SYS_CNT_CMP_SIZE = (16 * KiB), | ||
89 | + | ||
90 | FSL_IMX6UL_SYS_CNT_RD_ADDR = 0x021D4000, | ||
91 | + FSL_IMX6UL_SYS_CNT_RD_SIZE = (16 * KiB), | ||
92 | + | ||
93 | FSL_IMX6UL_TZASC_ADDR = 0x021D0000, | ||
94 | + FSL_IMX6UL_TZASC_SIZE = (16 * KiB), | ||
95 | + | ||
96 | FSL_IMX6UL_PXP_ADDR = 0x021CC000, | ||
97 | + FSL_IMX6UL_PXP_SIZE = (16 * KiB), | ||
98 | + | ||
99 | FSL_IMX6UL_LCDIF_ADDR = 0x021C8000, | ||
100 | + FSL_IMX6UL_LCDIF_SIZE = 0x100, | ||
101 | + | ||
102 | FSL_IMX6UL_CSI_ADDR = 0x021C4000, | ||
103 | + FSL_IMX6UL_CSI_SIZE = 0x100, | ||
104 | + | ||
105 | FSL_IMX6UL_CSU_ADDR = 0x021C0000, | ||
106 | + FSL_IMX6UL_CSU_SIZE = (16 * KiB), | ||
107 | + | ||
108 | FSL_IMX6UL_OCOTP_CTRL_ADDR = 0x021BC000, | ||
109 | + FSL_IMX6UL_OCOTP_CTRL_SIZE = (4 * KiB), | ||
110 | + | ||
111 | FSL_IMX6UL_EIM_ADDR = 0x021B8000, | ||
112 | + FSL_IMX6UL_EIM_SIZE = 0x100, | ||
113 | + | ||
114 | FSL_IMX6UL_SIM2_ADDR = 0x021B4000, | ||
115 | + | ||
116 | FSL_IMX6UL_MMDC_CFG_ADDR = 0x021B0000, | ||
117 | + FSL_IMX6UL_MMDC_CFG_SIZE = (4 * KiB), | ||
118 | + | ||
119 | FSL_IMX6UL_ROMCP_ADDR = 0x021AC000, | ||
120 | + FSL_IMX6UL_ROMCP_SIZE = 0x300, | ||
121 | + | ||
122 | FSL_IMX6UL_I2C3_ADDR = 0x021A8000, | ||
123 | FSL_IMX6UL_I2C2_ADDR = 0x021A4000, | ||
124 | FSL_IMX6UL_I2C1_ADDR = 0x021A0000, | ||
125 | + | ||
126 | FSL_IMX6UL_ADC2_ADDR = 0x0219C000, | ||
127 | FSL_IMX6UL_ADC1_ADDR = 0x02198000, | ||
128 | + FSL_IMX6UL_ADCn_SIZE = 0x100, | ||
129 | + | ||
130 | FSL_IMX6UL_USDHC2_ADDR = 0x02194000, | ||
131 | FSL_IMX6UL_USDHC1_ADDR = 0x02190000, | ||
132 | - FSL_IMX6UL_SIM1_ADDR = 0x0218C000, | ||
133 | - FSL_IMX6UL_ENET1_ADDR = 0x02188000, | ||
134 | - FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800, | ||
135 | - FSL_IMX6UL_USBO2_USB_ADDR = 0x02184000, | ||
136 | - FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000, | ||
137 | - FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000, | ||
138 | - FSL_IMX6UL_CAAM_ADDR = 0x02140000, | ||
139 | - FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000, | ||
140 | |||
141 | - /* AIPS-1 */ | ||
142 | + FSL_IMX6UL_SIM1_ADDR = 0x0218C000, | ||
143 | + FSL_IMX6UL_SIMn_SIZE = (16 * KiB), | ||
144 | + | ||
145 | + FSL_IMX6UL_ENET1_ADDR = 0x02188000, | ||
146 | + | ||
147 | + FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800, | ||
148 | + FSL_IMX6UL_USBO2_USB1_ADDR = 0x02184000, | ||
149 | + FSL_IMX6UL_USBO2_USB2_ADDR = 0x02184200, | ||
150 | + | ||
151 | + FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000, | ||
152 | + FSL_IMX6UL_USBO2_PL301_SIZE = (16 * KiB), | ||
153 | + | ||
154 | + FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000, | ||
155 | + FSL_IMX6UL_AIPS2_CFG_SIZE = 0x100, | ||
156 | + | ||
157 | + FSL_IMX6UL_CAAM_ADDR = 0x02140000, | ||
158 | + FSL_IMX6UL_CAAM_SIZE = (16 * KiB), | ||
159 | + | ||
160 | + FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000, | ||
161 | + FSL_IMX6UL_A7MPCORE_DAP_SIZE = (4 * KiB), | ||
162 | + /* AIPS-2 End */ | ||
163 | + | ||
164 | + /* AIPS-1 Begin */ | ||
165 | FSL_IMX6UL_PWM8_ADDR = 0x020FC000, | ||
166 | FSL_IMX6UL_PWM7_ADDR = 0x020F8000, | ||
167 | FSL_IMX6UL_PWM6_ADDR = 0x020F4000, | ||
168 | FSL_IMX6UL_PWM5_ADDR = 0x020F0000, | ||
169 | + | ||
170 | FSL_IMX6UL_SDMA_ADDR = 0x020EC000, | ||
171 | + FSL_IMX6UL_SDMA_SIZE = 0x300, | ||
172 | + | ||
173 | FSL_IMX6UL_GPT2_ADDR = 0x020E8000, | ||
174 | + | ||
175 | FSL_IMX6UL_IOMUXC_GPR_ADDR = 0x020E4000, | ||
176 | + FSL_IMX6UL_IOMUXC_GPR_SIZE = 0x40, | ||
177 | + | ||
178 | FSL_IMX6UL_IOMUXC_ADDR = 0x020E0000, | ||
179 | + FSL_IMX6UL_IOMUXC_SIZE = 0x700, | ||
180 | + | ||
181 | FSL_IMX6UL_GPC_ADDR = 0x020DC000, | ||
182 | + | ||
183 | FSL_IMX6UL_SRC_ADDR = 0x020D8000, | ||
184 | + | ||
185 | FSL_IMX6UL_EPIT2_ADDR = 0x020D4000, | ||
186 | FSL_IMX6UL_EPIT1_ADDR = 0x020D0000, | ||
187 | + | ||
188 | FSL_IMX6UL_SNVS_HP_ADDR = 0x020CC000, | ||
189 | + | ||
190 | FSL_IMX6UL_USBPHY2_ADDR = 0x020CA000, | ||
191 | - FSL_IMX6UL_USBPHY2_SIZE = (4 * 1024), | ||
192 | FSL_IMX6UL_USBPHY1_ADDR = 0x020C9000, | ||
193 | - FSL_IMX6UL_USBPHY1_SIZE = (4 * 1024), | ||
194 | + | ||
195 | FSL_IMX6UL_ANALOG_ADDR = 0x020C8000, | ||
196 | + FSL_IMX6UL_ANALOG_SIZE = 0x300, | ||
197 | + | ||
198 | FSL_IMX6UL_CCM_ADDR = 0x020C4000, | ||
199 | + | ||
200 | FSL_IMX6UL_WDOG2_ADDR = 0x020C0000, | ||
201 | FSL_IMX6UL_WDOG1_ADDR = 0x020BC000, | ||
202 | + | ||
203 | FSL_IMX6UL_KPP_ADDR = 0x020B8000, | ||
204 | + FSL_IMX6UL_KPP_SIZE = 0x10, | ||
205 | + | ||
206 | FSL_IMX6UL_ENET2_ADDR = 0x020B4000, | ||
207 | + | ||
208 | FSL_IMX6UL_SNVS_LP_ADDR = 0x020B0000, | ||
209 | + FSL_IMX6UL_SNVS_LP_SIZE = (16 * KiB), | ||
210 | + | ||
211 | FSL_IMX6UL_GPIO5_ADDR = 0x020AC000, | ||
212 | FSL_IMX6UL_GPIO4_ADDR = 0x020A8000, | ||
213 | FSL_IMX6UL_GPIO3_ADDR = 0x020A4000, | ||
214 | FSL_IMX6UL_GPIO2_ADDR = 0x020A0000, | ||
215 | FSL_IMX6UL_GPIO1_ADDR = 0x0209C000, | ||
216 | + | ||
217 | FSL_IMX6UL_GPT1_ADDR = 0x02098000, | ||
218 | + | ||
219 | FSL_IMX6UL_CAN2_ADDR = 0x02094000, | ||
220 | FSL_IMX6UL_CAN1_ADDR = 0x02090000, | ||
221 | + FSL_IMX6UL_CANn_SIZE = (4 * KiB), | ||
222 | + | ||
223 | FSL_IMX6UL_PWM4_ADDR = 0x0208C000, | ||
224 | FSL_IMX6UL_PWM3_ADDR = 0x02088000, | ||
225 | FSL_IMX6UL_PWM2_ADDR = 0x02084000, | ||
226 | FSL_IMX6UL_PWM1_ADDR = 0x02080000, | ||
227 | + FSL_IMX6UL_PWMn_SIZE = 0x20, | ||
228 | + | ||
229 | FSL_IMX6UL_AIPS1_CFG_ADDR = 0x0207C000, | ||
230 | + FSL_IMX6UL_AIPS1_CFG_SIZE = (16 * KiB), | ||
231 | + | ||
232 | FSL_IMX6UL_BEE_ADDR = 0x02044000, | ||
233 | + FSL_IMX6UL_BEE_SIZE = (16 * KiB), | ||
234 | + | ||
235 | FSL_IMX6UL_TOUCH_CTRL_ADDR = 0x02040000, | ||
236 | + FSL_IMX6UL_TOUCH_CTRL_SIZE = 0x100, | ||
237 | + | ||
238 | FSL_IMX6UL_SPBA_ADDR = 0x0203C000, | ||
239 | + FSL_IMX6UL_SPBA_SIZE = 0x100, | ||
240 | + | ||
241 | FSL_IMX6UL_ASRC_ADDR = 0x02034000, | ||
242 | + FSL_IMX6UL_ASRC_SIZE = 0x100, | ||
243 | + | ||
244 | FSL_IMX6UL_SAI3_ADDR = 0x02030000, | ||
245 | FSL_IMX6UL_SAI2_ADDR = 0x0202C000, | ||
246 | FSL_IMX6UL_SAI1_ADDR = 0x02028000, | ||
247 | + FSL_IMX6UL_SAIn_SIZE = 0x200, | ||
248 | + | ||
249 | FSL_IMX6UL_UART8_ADDR = 0x02024000, | ||
250 | FSL_IMX6UL_UART1_ADDR = 0x02020000, | ||
251 | FSL_IMX6UL_UART7_ADDR = 0x02018000, | ||
252 | + | ||
253 | FSL_IMX6UL_ECSPI4_ADDR = 0x02014000, | ||
254 | FSL_IMX6UL_ECSPI3_ADDR = 0x02010000, | ||
255 | FSL_IMX6UL_ECSPI2_ADDR = 0x0200C000, | ||
256 | FSL_IMX6UL_ECSPI1_ADDR = 0x02008000, | ||
257 | + | ||
258 | FSL_IMX6UL_SPDIF_ADDR = 0x02004000, | ||
259 | + FSL_IMX6UL_SPDIF_SIZE = 0x100, | ||
260 | + /* AIPS-1 End */ | ||
261 | + | ||
262 | + FSL_IMX6UL_BCH_ADDR = 0x01808000, | ||
263 | + FSL_IMX6UL_BCH_SIZE = 0x200, | ||
264 | + | ||
265 | + FSL_IMX6UL_GPMI_ADDR = 0x01806000, | ||
266 | + FSL_IMX6UL_GPMI_SIZE = 0x200, | ||
267 | |||
268 | FSL_IMX6UL_APBH_DMA_ADDR = 0x01804000, | ||
269 | - FSL_IMX6UL_APBH_DMA_SIZE = (32 * 1024), | ||
270 | + FSL_IMX6UL_APBH_DMA_SIZE = (4 * KiB), | ||
271 | |||
272 | FSL_IMX6UL_A7MPCORE_ADDR = 0x00A00000, | ||
273 | |||
274 | FSL_IMX6UL_OCRAM_ALIAS_ADDR = 0x00920000, | ||
275 | - FSL_IMX6UL_OCRAM_ALIAS_SIZE = 0x00060000, | ||
276 | + FSL_IMX6UL_OCRAM_ALIAS_SIZE = (384 * KiB), | ||
277 | + | ||
278 | FSL_IMX6UL_OCRAM_MEM_ADDR = 0x00900000, | ||
279 | - FSL_IMX6UL_OCRAM_MEM_SIZE = 0x00020000, | ||
280 | + FSL_IMX6UL_OCRAM_MEM_SIZE = (128 * KiB), | ||
281 | + | ||
282 | FSL_IMX6UL_CAAM_MEM_ADDR = 0x00100000, | ||
283 | - FSL_IMX6UL_CAAM_MEM_SIZE = 0x00008000, | ||
284 | + FSL_IMX6UL_CAAM_MEM_SIZE = (32 * KiB), | ||
285 | + | ||
286 | FSL_IMX6UL_ROM_ADDR = 0x00000000, | ||
287 | - FSL_IMX6UL_ROM_SIZE = 0x00018000, | ||
288 | + FSL_IMX6UL_ROM_SIZE = (96 * KiB), | ||
289 | }; | ||
290 | |||
291 | enum FslIMX6ULIRQs { | ||
292 | diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c | ||
293 | index XXXXXXX..XXXXXXX 100644 | ||
294 | --- a/hw/arm/fsl-imx6ul.c | ||
295 | +++ b/hw/arm/fsl-imx6ul.c | ||
296 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | ||
297 | object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); | ||
298 | |||
299 | /* | ||
300 | - * GPIOs 1 to 5 | ||
301 | + * GPIOs | ||
302 | */ | ||
303 | for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { | ||
304 | snprintf(name, NAME_SIZE, "gpio%d", i); | ||
305 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | ||
306 | } | 64 | } |
307 | 65 | ||
308 | /* | 66 | if (debug_dir.Type != IMAGE_DEBUG_TYPE_CODEVIEW) { |
309 | - * GPT 1, 2 | 67 | - return 1; |
310 | + * GPTs | 68 | + eprintf("Debug Directory type is not CodeView\n"); |
311 | */ | 69 | + return false; |
312 | for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { | ||
313 | snprintf(name, NAME_SIZE, "gpt%d", i); | ||
314 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | ||
315 | } | 70 | } |
316 | 71 | ||
317 | /* | 72 | if (va_space_rw(vs, |
318 | - * EPIT 1, 2 | 73 | base + debug_dir.AddressOfRawData, |
319 | + * EPITs | 74 | - &rsds, sizeof(rsds), 0)) { |
320 | */ | 75 | - return 1; |
321 | for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { | 76 | + rsds, sizeof(*rsds), 0)) { |
322 | snprintf(name, NAME_SIZE, "epit%d", i + 1); | 77 | + eprintf("Failed to resolve OMFSignatureRSDS\n"); |
323 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | 78 | + return false; |
324 | } | 79 | } |
325 | 80 | ||
326 | /* | 81 | - printf("CodeView signature is \'%.4s\'\n", rsds.Signature); |
327 | - * eCSPI | 82 | - |
328 | + * eCSPIs | 83 | - if (memcmp(&rsds.Signature, sign_rsds, sizeof(sign_rsds))) { |
329 | */ | 84 | - return 1; |
330 | for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { | 85 | + if (memcmp(&rsds->Signature, sign_rsds, sizeof(sign_rsds))) { |
331 | snprintf(name, NAME_SIZE, "spi%d", i + 1); | 86 | + eprintf("CodeView signature is \'%.4s\', \'%s\' expected\n", |
332 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | 87 | + rsds->Signature, sign_rsds); |
88 | + return false; | ||
333 | } | 89 | } |
334 | 90 | ||
335 | /* | 91 | - pdb_name_sz = debug_dir.SizeOfData - sizeof(rsds); |
336 | - * I2C | 92 | - pdb_name = malloc(pdb_name_sz); |
337 | + * I2Cs | 93 | - if (!pdb_name) { |
338 | */ | 94 | - return 1; |
339 | for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { | 95 | + if (debug_dir.SizeOfData - sizeof(*rsds) != sizeof(PDB_NAME)) { |
340 | snprintf(name, NAME_SIZE, "i2c%d", i + 1); | 96 | + eprintf("PDB name size doesn't match\n"); |
341 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | 97 | + return false; |
342 | } | 98 | } |
343 | 99 | ||
344 | /* | 100 | if (va_space_rw(vs, base + debug_dir.AddressOfRawData + |
345 | - * UART | 101 | - offsetof(OMFSignatureRSDS, name), pdb_name, pdb_name_sz, 0)) { |
346 | + * UARTs | 102 | - free(pdb_name); |
347 | */ | 103 | - return 1; |
348 | for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { | 104 | + offsetof(OMFSignatureRSDS, name), pdb_name, sizeof(PDB_NAME), |
349 | snprintf(name, NAME_SIZE, "uart%d", i); | 105 | + 0)) { |
350 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | 106 | + eprintf("Failed to resolve PDB name\n"); |
107 | + return false; | ||
351 | } | 108 | } |
352 | 109 | ||
353 | /* | 110 | printf("PDB name is \'%s\', \'%s\' expected\n", pdb_name, PDB_NAME); |
354 | - * Ethernet | 111 | |
355 | + * Ethernets | 112 | - if (strcmp(pdb_name, PDB_NAME)) { |
356 | */ | 113 | - eprintf("Unexpected PDB name, it seems the kernel isn't found\n"); |
357 | for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { | 114 | - free(pdb_name); |
358 | snprintf(name, NAME_SIZE, "eth%d", i); | 115 | - return 1; |
359 | object_initialize_child(obj, name, &s->eth[i], TYPE_IMX_ENET); | 116 | - } |
117 | + return !strcmp(pdb_name, PDB_NAME); | ||
118 | +} | ||
119 | |||
120 | - free(pdb_name); | ||
121 | - | ||
122 | - sprintf(hash, "%.08x%.04x%.04x%.02x%.02x", rsds.guid.a, rsds.guid.b, | ||
123 | - rsds.guid.c, rsds.guid.d[0], rsds.guid.d[1]); | ||
124 | +static void pe_get_pdb_symstore_hash(OMFSignatureRSDS *rsds, char *hash) | ||
125 | +{ | ||
126 | + sprintf(hash, "%.08x%.04x%.04x%.02x%.02x", rsds->guid.a, rsds->guid.b, | ||
127 | + rsds->guid.c, rsds->guid.d[0], rsds->guid.d[1]); | ||
128 | hash += 20; | ||
129 | - for (i = 0; i < 6; i++, hash += 2) { | ||
130 | - sprintf(hash, "%.02x", rsds.guid.e[i]); | ||
131 | + for (unsigned int i = 0; i < 6; i++, hash += 2) { | ||
132 | + sprintf(hash, "%.02x", rsds->guid.e[i]); | ||
360 | } | 133 | } |
361 | 134 | ||
362 | - /* USB */ | 135 | - sprintf(hash, "%.01x", rsds.age); |
363 | + /* | ||
364 | + * USB PHYs | ||
365 | + */ | ||
366 | for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) { | ||
367 | snprintf(name, NAME_SIZE, "usbphy%d", i); | ||
368 | object_initialize_child(obj, name, &s->usbphy[i], TYPE_IMX_USBPHY); | ||
369 | } | ||
370 | + | ||
371 | + /* | ||
372 | + * USBs | ||
373 | + */ | ||
374 | for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) { | ||
375 | snprintf(name, NAME_SIZE, "usb%d", i); | ||
376 | object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA); | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | - * SDHCI | ||
381 | + * SDHCIs | ||
382 | */ | ||
383 | for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { | ||
384 | snprintf(name, NAME_SIZE, "usdhc%d", i); | ||
385 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | - * Watchdog | ||
390 | + * Watchdogs | ||
391 | */ | ||
392 | for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { | ||
393 | snprintf(name, NAME_SIZE, "wdt%d", i); | ||
394 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
395 | * A7MPCORE DAP | ||
396 | */ | ||
397 | create_unimplemented_device("a7mpcore-dap", FSL_IMX6UL_A7MPCORE_DAP_ADDR, | ||
398 | - 0x100000); | ||
399 | + FSL_IMX6UL_A7MPCORE_DAP_SIZE); | ||
400 | |||
401 | /* | ||
402 | - * GPT 1, 2 | ||
403 | + * GPTs | ||
404 | */ | ||
405 | for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { | ||
406 | static const hwaddr FSL_IMX6UL_GPTn_ADDR[FSL_IMX6UL_NUM_GPTS] = { | ||
407 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | - * EPIT 1, 2 | ||
412 | + * EPITs | ||
413 | */ | ||
414 | for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { | ||
415 | static const hwaddr FSL_IMX6UL_EPITn_ADDR[FSL_IMX6UL_NUM_EPITS] = { | ||
416 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | - * GPIO | ||
421 | + * GPIOs | ||
422 | */ | ||
423 | for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { | ||
424 | static const hwaddr FSL_IMX6UL_GPIOn_ADDR[FSL_IMX6UL_NUM_GPIOS] = { | ||
425 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | - * IOMUXC and IOMUXC_GPR | ||
430 | + * IOMUXC | ||
431 | */ | ||
432 | - for (i = 0; i < 1; i++) { | ||
433 | - static const hwaddr FSL_IMX6UL_IOMUXCn_ADDR[FSL_IMX6UL_NUM_IOMUXCS] = { | ||
434 | - FSL_IMX6UL_IOMUXC_ADDR, | ||
435 | - FSL_IMX6UL_IOMUXC_GPR_ADDR, | ||
436 | - }; | ||
437 | - | 136 | - |
438 | - snprintf(name, NAME_SIZE, "iomuxc%d", i); | 137 | - return 0; |
439 | - create_unimplemented_device(name, FSL_IMX6UL_IOMUXCn_ADDR[i], 0x4000); | 138 | + sprintf(hash, "%.01x", rsds->age); |
139 | } | ||
140 | |||
141 | int main(int argc, char *argv[]) | ||
142 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
143 | KDDEBUGGER_DATA64 *kdbg; | ||
144 | uint64_t KdVersionBlock; | ||
145 | bool kernel_found = false; | ||
146 | + OMFSignatureRSDS rsds; | ||
147 | |||
148 | if (argc != 3) { | ||
149 | eprintf("usage:\n\t%s elf_file dmp_file\n", argv[0]); | ||
150 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
151 | } | ||
152 | |||
153 | if (*(uint16_t *)nt_start_addr == 0x5a4d) { /* MZ */ | ||
154 | - if (pe_check_export_name(KernBase, nt_start_addr, &vs)) { | ||
155 | + printf("Checking candidate KernBase = 0x%016"PRIx64"\n", KernBase); | ||
156 | + if (pe_check_pdb_name(KernBase, nt_start_addr, &vs, &rsds)) { | ||
157 | kernel_found = true; | ||
158 | break; | ||
159 | } | ||
160 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
161 | printf("KernBase = 0x%016"PRIx64", signature is \'%.2s\'\n", KernBase, | ||
162 | (char *)nt_start_addr); | ||
163 | |||
164 | - if (pe_get_pdb_symstore_hash(KernBase, nt_start_addr, pdb_hash, &vs)) { | ||
165 | - eprintf("Failed to get PDB symbol store hash\n"); | ||
166 | - err = 1; | ||
167 | - goto out_ps; | ||
440 | - } | 168 | - } |
441 | + create_unimplemented_device("iomuxc", FSL_IMX6UL_IOMUXC_ADDR, | 169 | + pe_get_pdb_symstore_hash(&rsds, pdb_hash); |
442 | + FSL_IMX6UL_IOMUXC_SIZE); | 170 | |
443 | + create_unimplemented_device("iomuxc_gpr", FSL_IMX6UL_IOMUXC_GPR_ADDR, | 171 | sprintf(pdb_url, "%s%s/%s/%s", SYM_URL_BASE, PDB_NAME, pdb_hash, PDB_NAME); |
444 | + FSL_IMX6UL_IOMUXC_GPR_SIZE); | 172 | printf("PDB URL is %s\n", pdb_url); |
445 | |||
446 | /* | ||
447 | * CCM | ||
448 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
449 | sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort); | ||
450 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX6UL_GPC_ADDR); | ||
451 | |||
452 | - /* Initialize all ECSPI */ | ||
453 | + /* | ||
454 | + * ECSPIs | ||
455 | + */ | ||
456 | for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { | ||
457 | static const hwaddr FSL_IMX6UL_SPIn_ADDR[FSL_IMX6UL_NUM_ECSPIS] = { | ||
458 | FSL_IMX6UL_ECSPI1_ADDR, | ||
459 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | - * I2C | ||
464 | + * I2Cs | ||
465 | */ | ||
466 | for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { | ||
467 | static const hwaddr FSL_IMX6UL_I2Cn_ADDR[FSL_IMX6UL_NUM_I2CS] = { | ||
468 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | - * UART | ||
473 | + * UARTs | ||
474 | */ | ||
475 | for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { | ||
476 | static const hwaddr FSL_IMX6UL_UARTn_ADDR[FSL_IMX6UL_NUM_UARTS] = { | ||
477 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | - * Ethernet | ||
482 | + * Ethernets | ||
483 | * | ||
484 | * We must use two loops since phy_connected affects the other interface | ||
485 | * and we have to set all properties before calling sysbus_realize(). | ||
486 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
487 | FSL_IMX6UL_ENETn_TIMER_IRQ[i])); | ||
488 | } | ||
489 | |||
490 | - /* USB */ | ||
491 | + /* | ||
492 | + * USB PHYs | ||
493 | + */ | ||
494 | for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) { | ||
495 | + static const hwaddr | ||
496 | + FSL_IMX6UL_USB_PHYn_ADDR[FSL_IMX6UL_NUM_USB_PHYS] = { | ||
497 | + FSL_IMX6UL_USBPHY1_ADDR, | ||
498 | + FSL_IMX6UL_USBPHY2_ADDR, | ||
499 | + }; | ||
500 | + | ||
501 | sysbus_realize(SYS_BUS_DEVICE(&s->usbphy[i]), &error_abort); | ||
502 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0, | ||
503 | - FSL_IMX6UL_USBPHY1_ADDR + i * 0x1000); | ||
504 | + FSL_IMX6UL_USB_PHYn_ADDR[i]); | ||
505 | } | ||
506 | |||
507 | + /* | ||
508 | + * USBs | ||
509 | + */ | ||
510 | for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) { | ||
511 | + static const hwaddr FSL_IMX6UL_USB02_USBn_ADDR[FSL_IMX6UL_NUM_USBS] = { | ||
512 | + FSL_IMX6UL_USBO2_USB1_ADDR, | ||
513 | + FSL_IMX6UL_USBO2_USB2_ADDR, | ||
514 | + }; | ||
515 | + | ||
516 | static const int FSL_IMX6UL_USBn_IRQ[] = { | ||
517 | FSL_IMX6UL_USB1_IRQ, | ||
518 | FSL_IMX6UL_USB2_IRQ, | ||
519 | }; | ||
520 | + | ||
521 | sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort); | ||
522 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, | ||
523 | - FSL_IMX6UL_USBO2_USB_ADDR + i * 0x200); | ||
524 | + FSL_IMX6UL_USB02_USBn_ADDR[i]); | ||
525 | sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, | ||
526 | qdev_get_gpio_in(DEVICE(&s->a7mpcore), | ||
527 | FSL_IMX6UL_USBn_IRQ[i])); | ||
528 | } | ||
529 | |||
530 | /* | ||
531 | - * USDHC | ||
532 | + * USDHCs | ||
533 | */ | ||
534 | for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { | ||
535 | static const hwaddr FSL_IMX6UL_USDHCn_ADDR[FSL_IMX6UL_NUM_USDHCS] = { | ||
536 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
537 | sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6UL_SNVS_HP_ADDR); | ||
538 | |||
539 | /* | ||
540 | - * Watchdog | ||
541 | + * Watchdogs | ||
542 | */ | ||
543 | for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { | ||
544 | static const hwaddr FSL_IMX6UL_WDOGn_ADDR[FSL_IMX6UL_NUM_WDTS] = { | ||
545 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
546 | FSL_IMX6UL_WDOG2_ADDR, | ||
547 | FSL_IMX6UL_WDOG3_ADDR, | ||
548 | }; | ||
549 | + | ||
550 | static const int FSL_IMX6UL_WDOGn_IRQ[FSL_IMX6UL_NUM_WDTS] = { | ||
551 | FSL_IMX6UL_WDOG1_IRQ, | ||
552 | FSL_IMX6UL_WDOG2_IRQ, | ||
553 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
554 | /* | ||
555 | * SDMA | ||
556 | */ | ||
557 | - create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000); | ||
558 | + create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, | ||
559 | + FSL_IMX6UL_SDMA_SIZE); | ||
560 | |||
561 | /* | ||
562 | - * SAI (Audio SSI (Synchronous Serial Interface)) | ||
563 | + * SAIs (Audio SSI (Synchronous Serial Interface)) | ||
564 | */ | ||
565 | - create_unimplemented_device("sai1", FSL_IMX6UL_SAI1_ADDR, 0x4000); | ||
566 | - create_unimplemented_device("sai2", FSL_IMX6UL_SAI2_ADDR, 0x4000); | ||
567 | - create_unimplemented_device("sai3", FSL_IMX6UL_SAI3_ADDR, 0x4000); | ||
568 | + for (i = 0; i < FSL_IMX6UL_NUM_SAIS; i++) { | ||
569 | + static const hwaddr FSL_IMX6UL_SAIn_ADDR[FSL_IMX6UL_NUM_SAIS] = { | ||
570 | + FSL_IMX6UL_SAI1_ADDR, | ||
571 | + FSL_IMX6UL_SAI2_ADDR, | ||
572 | + FSL_IMX6UL_SAI3_ADDR, | ||
573 | + }; | ||
574 | + | ||
575 | + snprintf(name, NAME_SIZE, "sai%d", i); | ||
576 | + create_unimplemented_device(name, FSL_IMX6UL_SAIn_ADDR[i], | ||
577 | + FSL_IMX6UL_SAIn_SIZE); | ||
578 | + } | ||
579 | |||
580 | /* | ||
581 | - * PWM | ||
582 | + * PWMs | ||
583 | */ | ||
584 | - create_unimplemented_device("pwm1", FSL_IMX6UL_PWM1_ADDR, 0x4000); | ||
585 | - create_unimplemented_device("pwm2", FSL_IMX6UL_PWM2_ADDR, 0x4000); | ||
586 | - create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000); | ||
587 | - create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000); | ||
588 | + for (i = 0; i < FSL_IMX6UL_NUM_PWMS; i++) { | ||
589 | + static const hwaddr FSL_IMX6UL_PWMn_ADDR[FSL_IMX6UL_NUM_PWMS] = { | ||
590 | + FSL_IMX6UL_PWM1_ADDR, | ||
591 | + FSL_IMX6UL_PWM2_ADDR, | ||
592 | + FSL_IMX6UL_PWM3_ADDR, | ||
593 | + FSL_IMX6UL_PWM4_ADDR, | ||
594 | + }; | ||
595 | + | ||
596 | + snprintf(name, NAME_SIZE, "pwm%d", i); | ||
597 | + create_unimplemented_device(name, FSL_IMX6UL_PWMn_ADDR[i], | ||
598 | + FSL_IMX6UL_PWMn_SIZE); | ||
599 | + } | ||
600 | |||
601 | /* | ||
602 | * Audio ASRC (asynchronous sample rate converter) | ||
603 | */ | ||
604 | - create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, 0x4000); | ||
605 | + create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, | ||
606 | + FSL_IMX6UL_ASRC_SIZE); | ||
607 | |||
608 | /* | ||
609 | - * CAN | ||
610 | + * CANs | ||
611 | */ | ||
612 | - create_unimplemented_device("can1", FSL_IMX6UL_CAN1_ADDR, 0x4000); | ||
613 | - create_unimplemented_device("can2", FSL_IMX6UL_CAN2_ADDR, 0x4000); | ||
614 | + for (i = 0; i < FSL_IMX6UL_NUM_CANS; i++) { | ||
615 | + static const hwaddr FSL_IMX6UL_CANn_ADDR[FSL_IMX6UL_NUM_CANS] = { | ||
616 | + FSL_IMX6UL_CAN1_ADDR, | ||
617 | + FSL_IMX6UL_CAN2_ADDR, | ||
618 | + }; | ||
619 | + | ||
620 | + snprintf(name, NAME_SIZE, "can%d", i); | ||
621 | + create_unimplemented_device(name, FSL_IMX6UL_CANn_ADDR[i], | ||
622 | + FSL_IMX6UL_CANn_SIZE); | ||
623 | + } | ||
624 | |||
625 | /* | ||
626 | * APHB_DMA | ||
627 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | ||
628 | }; | ||
629 | |||
630 | snprintf(name, NAME_SIZE, "adc%d", i); | ||
631 | - create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i], 0x4000); | ||
632 | + create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i], | ||
633 | + FSL_IMX6UL_ADCn_SIZE); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * LCD | ||
638 | */ | ||
639 | - create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, 0x4000); | ||
640 | + create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, | ||
641 | + FSL_IMX6UL_LCDIF_SIZE); | ||
642 | |||
643 | /* | ||
644 | * ROM memory | ||
645 | -- | 173 | -- |
646 | 2.34.1 | 174 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | From: Viktor Prutyanov <viktor@daynix.com> |
---|---|---|---|
2 | 2 | ||
3 | The SRC device is normally used to start the secondary CPU. | 3 | Physical memory ranges may not be aligned to page size in QEMU ELF, but |
4 | DMP can only contain page-aligned runs. So, align them. | ||
4 | 5 | ||
5 | When running Linux directly, QEMU is emulating a PSCI interface that UBOOT | 6 | Signed-off-by: Viktor Prutyanov <viktor@daynix.com> |
6 | is installing at boot time and therefore the fact that the SRC device is | 7 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
7 | unimplemented is hidden as Qemu respond directly to PSCI requets without | 8 | Message-id: 20230915170153.10959-3-viktor@daynix.com |
8 | using the SRC device. | ||
9 | |||
10 | But if you try to run a more bare metal application (maybe uboot itself), | ||
11 | then it is not possible to start the secondary CPU as the SRC is an | ||
12 | unimplemented device. | ||
13 | |||
14 | This patch adds the ability to start the secondary CPU through the SRC | ||
15 | device so that you can use this feature in bare metal applications. | ||
16 | |||
17 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | ||
18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
19 | Message-id: ce9a0162defd2acee5dc7f8a674743de0cded569.1692964892.git.jcd@tribudubois.net | ||
20 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
21 | --- | 10 | --- |
22 | include/hw/arm/fsl-imx7.h | 3 +- | 11 | contrib/elf2dmp/addrspace.h | 1 + |
23 | include/hw/misc/imx7_src.h | 66 +++++++++ | 12 | contrib/elf2dmp/addrspace.c | 31 +++++++++++++++++++++++++++++-- |
24 | hw/arm/fsl-imx7.c | 8 +- | 13 | contrib/elf2dmp/main.c | 5 +++-- |
25 | hw/misc/imx7_src.c | 276 +++++++++++++++++++++++++++++++++++++ | 14 | 3 files changed, 33 insertions(+), 4 deletions(-) |
26 | hw/misc/meson.build | 1 + | ||
27 | hw/misc/trace-events | 4 + | ||
28 | 6 files changed, 356 insertions(+), 2 deletions(-) | ||
29 | create mode 100644 include/hw/misc/imx7_src.h | ||
30 | create mode 100644 hw/misc/imx7_src.c | ||
31 | 15 | ||
32 | diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h | 16 | diff --git a/contrib/elf2dmp/addrspace.h b/contrib/elf2dmp/addrspace.h |
33 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/include/hw/arm/fsl-imx7.h | 18 | --- a/contrib/elf2dmp/addrspace.h |
35 | +++ b/include/hw/arm/fsl-imx7.h | 19 | +++ b/contrib/elf2dmp/addrspace.h |
36 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ |
37 | #include "hw/misc/imx7_ccm.h" | 21 | |
38 | #include "hw/misc/imx7_snvs.h" | 22 | #define ELF2DMP_PAGE_BITS 12 |
39 | #include "hw/misc/imx7_gpr.h" | 23 | #define ELF2DMP_PAGE_SIZE (1ULL << ELF2DMP_PAGE_BITS) |
40 | +#include "hw/misc/imx7_src.h" | 24 | +#define ELF2DMP_PAGE_MASK (ELF2DMP_PAGE_SIZE - 1) |
41 | #include "hw/watchdog/wdt_imx2.h" | 25 | #define ELF2DMP_PFN_MASK (~(ELF2DMP_PAGE_SIZE - 1)) |
42 | #include "hw/gpio/imx_gpio.h" | 26 | |
43 | #include "hw/char/imx_serial.h" | 27 | #define INVALID_PA UINT64_MAX |
44 | @@ -XXX,XX +XXX,XX @@ struct FslIMX7State { | 28 | diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c |
45 | IMX7CCMState ccm; | 29 | index XXXXXXX..XXXXXXX 100644 |
46 | IMX7AnalogState analog; | 30 | --- a/contrib/elf2dmp/addrspace.c |
47 | IMX7SNVSState snvs; | 31 | +++ b/contrib/elf2dmp/addrspace.c |
48 | + IMX7SRCState src; | 32 | @@ -XXX,XX +XXX,XX @@ static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa) |
49 | IMXGPCv2State gpcv2; | 33 | |
50 | IMXSPIState spi[FSL_IMX7_NUM_ECSPIS]; | 34 | for (i = 0; i < ps->block_nr; i++) { |
51 | IMXI2CState i2c[FSL_IMX7_NUM_I2CS]; | 35 | if (ps->block[i].paddr <= pa && |
52 | @@ -XXX,XX +XXX,XX @@ enum FslIMX7MemoryMap { | 36 | - pa <= ps->block[i].paddr + ps->block[i].size) { |
53 | FSL_IMX7_GPC_ADDR = 0x303A0000, | 37 | + pa < ps->block[i].paddr + ps->block[i].size) { |
54 | 38 | return ps->block + i; | |
55 | FSL_IMX7_SRC_ADDR = 0x30390000, | 39 | } |
56 | - FSL_IMX7_SRC_SIZE = (4 * KiB), | 40 | } |
57 | 41 | @@ -XXX,XX +XXX,XX @@ static uint8_t *pa_space_resolve(struct pa_space *ps, uint64_t pa) | |
58 | FSL_IMX7_CCM_ADDR = 0x30380000, | 42 | return block->addr + (pa - block->paddr); |
59 | 43 | } | |
60 | diff --git a/include/hw/misc/imx7_src.h b/include/hw/misc/imx7_src.h | 44 | |
61 | new file mode 100644 | 45 | +static void pa_block_align(struct pa_block *b) |
62 | index XXXXXXX..XXXXXXX | 46 | +{ |
63 | --- /dev/null | 47 | + uint64_t low_align = ((b->paddr - 1) | ELF2DMP_PAGE_MASK) + 1 - b->paddr; |
64 | +++ b/include/hw/misc/imx7_src.h | 48 | + uint64_t high_align = (b->paddr + b->size) & ELF2DMP_PAGE_MASK; |
65 | @@ -XXX,XX +XXX,XX @@ | ||
66 | +/* | ||
67 | + * IMX7 System Reset Controller | ||
68 | + * | ||
69 | + * Copyright (C) 2023 Jean-Christophe Dubois <jcd@tribudubois.net> | ||
70 | + * | ||
71 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
72 | + * See the COPYING file in the top-level directory. | ||
73 | + */ | ||
74 | + | 49 | + |
75 | +#ifndef IMX7_SRC_H | 50 | + if (low_align == 0 && high_align == 0) { |
76 | +#define IMX7_SRC_H | ||
77 | + | ||
78 | +#include "hw/sysbus.h" | ||
79 | +#include "qemu/bitops.h" | ||
80 | +#include "qom/object.h" | ||
81 | + | ||
82 | +#define SRC_SCR 0 | ||
83 | +#define SRC_A7RCR0 1 | ||
84 | +#define SRC_A7RCR1 2 | ||
85 | +#define SRC_M4RCR 3 | ||
86 | +#define SRC_ERCR 5 | ||
87 | +#define SRC_HSICPHY_RCR 7 | ||
88 | +#define SRC_USBOPHY1_RCR 8 | ||
89 | +#define SRC_USBOPHY2_RCR 9 | ||
90 | +#define SRC_MPIPHY_RCR 10 | ||
91 | +#define SRC_PCIEPHY_RCR 11 | ||
92 | +#define SRC_SBMR1 22 | ||
93 | +#define SRC_SRSR 23 | ||
94 | +#define SRC_SISR 26 | ||
95 | +#define SRC_SIMR 27 | ||
96 | +#define SRC_SBMR2 28 | ||
97 | +#define SRC_GPR1 29 | ||
98 | +#define SRC_GPR2 30 | ||
99 | +#define SRC_GPR3 31 | ||
100 | +#define SRC_GPR4 32 | ||
101 | +#define SRC_GPR5 33 | ||
102 | +#define SRC_GPR6 34 | ||
103 | +#define SRC_GPR7 35 | ||
104 | +#define SRC_GPR8 36 | ||
105 | +#define SRC_GPR9 37 | ||
106 | +#define SRC_GPR10 38 | ||
107 | +#define SRC_MAX 39 | ||
108 | + | ||
109 | +/* SRC_A7SCR1 */ | ||
110 | +#define R_CORE1_ENABLE_SHIFT 1 | ||
111 | +#define R_CORE1_ENABLE_LENGTH 1 | ||
112 | +/* SRC_A7SCR0 */ | ||
113 | +#define R_CORE1_RST_SHIFT 5 | ||
114 | +#define R_CORE1_RST_LENGTH 1 | ||
115 | +#define R_CORE0_RST_SHIFT 4 | ||
116 | +#define R_CORE0_RST_LENGTH 1 | ||
117 | + | ||
118 | +#define TYPE_IMX7_SRC "imx7.src" | ||
119 | +OBJECT_DECLARE_SIMPLE_TYPE(IMX7SRCState, IMX7_SRC) | ||
120 | + | ||
121 | +struct IMX7SRCState { | ||
122 | + /* <private> */ | ||
123 | + SysBusDevice parent_obj; | ||
124 | + | ||
125 | + /* <public> */ | ||
126 | + MemoryRegion iomem; | ||
127 | + | ||
128 | + uint32_t regs[SRC_MAX]; | ||
129 | +}; | ||
130 | + | ||
131 | +#endif /* IMX7_SRC_H */ | ||
132 | diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c | ||
133 | index XXXXXXX..XXXXXXX 100644 | ||
134 | --- a/hw/arm/fsl-imx7.c | ||
135 | +++ b/hw/arm/fsl-imx7.c | ||
136 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj) | ||
137 | */ | ||
138 | object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2); | ||
139 | |||
140 | + /* | ||
141 | + * SRC | ||
142 | + */ | ||
143 | + object_initialize_child(obj, "src", &s->src, TYPE_IMX7_SRC); | ||
144 | + | ||
145 | /* | ||
146 | * ECSPIs | ||
147 | */ | ||
148 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) | ||
149 | /* | ||
150 | * SRC | ||
151 | */ | ||
152 | - create_unimplemented_device("src", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE); | ||
153 | + sysbus_realize(SYS_BUS_DEVICE(&s->src), &error_abort); | ||
154 | + sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX7_SRC_ADDR); | ||
155 | |||
156 | /* | ||
157 | * Watchdogs | ||
158 | diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c | ||
159 | new file mode 100644 | ||
160 | index XXXXXXX..XXXXXXX | ||
161 | --- /dev/null | ||
162 | +++ b/hw/misc/imx7_src.c | ||
163 | @@ -XXX,XX +XXX,XX @@ | ||
164 | +/* | ||
165 | + * IMX7 System Reset Controller | ||
166 | + * | ||
167 | + * Copyright (c) 2023 Jean-Christophe Dubois <jcd@tribudubois.net> | ||
168 | + * | ||
169 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
170 | + * See the COPYING file in the top-level directory. | ||
171 | + * | ||
172 | + */ | ||
173 | + | ||
174 | +#include "qemu/osdep.h" | ||
175 | +#include "hw/misc/imx7_src.h" | ||
176 | +#include "migration/vmstate.h" | ||
177 | +#include "qemu/bitops.h" | ||
178 | +#include "qemu/log.h" | ||
179 | +#include "qemu/main-loop.h" | ||
180 | +#include "qemu/module.h" | ||
181 | +#include "target/arm/arm-powerctl.h" | ||
182 | +#include "hw/core/cpu.h" | ||
183 | +#include "hw/registerfields.h" | ||
184 | + | ||
185 | +#include "trace.h" | ||
186 | + | ||
187 | +static const char *imx7_src_reg_name(uint32_t reg) | ||
188 | +{ | ||
189 | + static char unknown[20]; | ||
190 | + | ||
191 | + switch (reg) { | ||
192 | + case SRC_SCR: | ||
193 | + return "SRC_SCR"; | ||
194 | + case SRC_A7RCR0: | ||
195 | + return "SRC_A7RCR0"; | ||
196 | + case SRC_A7RCR1: | ||
197 | + return "SRC_A7RCR1"; | ||
198 | + case SRC_M4RCR: | ||
199 | + return "SRC_M4RCR"; | ||
200 | + case SRC_ERCR: | ||
201 | + return "SRC_ERCR"; | ||
202 | + case SRC_HSICPHY_RCR: | ||
203 | + return "SRC_HSICPHY_RCR"; | ||
204 | + case SRC_USBOPHY1_RCR: | ||
205 | + return "SRC_USBOPHY1_RCR"; | ||
206 | + case SRC_USBOPHY2_RCR: | ||
207 | + return "SRC_USBOPHY2_RCR"; | ||
208 | + case SRC_PCIEPHY_RCR: | ||
209 | + return "SRC_PCIEPHY_RCR"; | ||
210 | + case SRC_SBMR1: | ||
211 | + return "SRC_SBMR1"; | ||
212 | + case SRC_SRSR: | ||
213 | + return "SRC_SRSR"; | ||
214 | + case SRC_SISR: | ||
215 | + return "SRC_SISR"; | ||
216 | + case SRC_SIMR: | ||
217 | + return "SRC_SIMR"; | ||
218 | + case SRC_SBMR2: | ||
219 | + return "SRC_SBMR2"; | ||
220 | + case SRC_GPR1: | ||
221 | + return "SRC_GPR1"; | ||
222 | + case SRC_GPR2: | ||
223 | + return "SRC_GPR2"; | ||
224 | + case SRC_GPR3: | ||
225 | + return "SRC_GPR3"; | ||
226 | + case SRC_GPR4: | ||
227 | + return "SRC_GPR4"; | ||
228 | + case SRC_GPR5: | ||
229 | + return "SRC_GPR5"; | ||
230 | + case SRC_GPR6: | ||
231 | + return "SRC_GPR6"; | ||
232 | + case SRC_GPR7: | ||
233 | + return "SRC_GPR7"; | ||
234 | + case SRC_GPR8: | ||
235 | + return "SRC_GPR8"; | ||
236 | + case SRC_GPR9: | ||
237 | + return "SRC_GPR9"; | ||
238 | + case SRC_GPR10: | ||
239 | + return "SRC_GPR10"; | ||
240 | + default: | ||
241 | + sprintf(unknown, "%u ?", reg); | ||
242 | + return unknown; | ||
243 | + } | ||
244 | +} | ||
245 | + | ||
246 | +static const VMStateDescription vmstate_imx7_src = { | ||
247 | + .name = TYPE_IMX7_SRC, | ||
248 | + .version_id = 1, | ||
249 | + .minimum_version_id = 1, | ||
250 | + .fields = (VMStateField[]) { | ||
251 | + VMSTATE_UINT32_ARRAY(regs, IMX7SRCState, SRC_MAX), | ||
252 | + VMSTATE_END_OF_LIST() | ||
253 | + }, | ||
254 | +}; | ||
255 | + | ||
256 | +static void imx7_src_reset(DeviceState *dev) | ||
257 | +{ | ||
258 | + IMX7SRCState *s = IMX7_SRC(dev); | ||
259 | + | ||
260 | + memset(s->regs, 0, sizeof(s->regs)); | ||
261 | + | ||
262 | + /* Set reset values */ | ||
263 | + s->regs[SRC_SCR] = 0xA0; | ||
264 | + s->regs[SRC_SRSR] = 0x1; | ||
265 | + s->regs[SRC_SIMR] = 0x1F; | ||
266 | +} | ||
267 | + | ||
268 | +static uint64_t imx7_src_read(void *opaque, hwaddr offset, unsigned size) | ||
269 | +{ | ||
270 | + uint32_t value = 0; | ||
271 | + IMX7SRCState *s = (IMX7SRCState *)opaque; | ||
272 | + uint32_t index = offset >> 2; | ||
273 | + | ||
274 | + if (index < SRC_MAX) { | ||
275 | + value = s->regs[index]; | ||
276 | + } else { | ||
277 | + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" | ||
278 | + HWADDR_PRIx "\n", TYPE_IMX7_SRC, __func__, offset); | ||
279 | + } | ||
280 | + | ||
281 | + trace_imx7_src_read(imx7_src_reg_name(index), value); | ||
282 | + | ||
283 | + return value; | ||
284 | +} | ||
285 | + | ||
286 | + | ||
287 | +/* | ||
288 | + * The reset is asynchronous so we need to defer clearing the reset | ||
289 | + * bit until the work is completed. | ||
290 | + */ | ||
291 | + | ||
292 | +struct SRCSCRResetInfo { | ||
293 | + IMX7SRCState *s; | ||
294 | + uint32_t reset_bit; | ||
295 | +}; | ||
296 | + | ||
297 | +static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) | ||
298 | +{ | ||
299 | + struct SRCSCRResetInfo *ri = data.host_ptr; | ||
300 | + IMX7SRCState *s = ri->s; | ||
301 | + | ||
302 | + assert(qemu_mutex_iothread_locked()); | ||
303 | + | ||
304 | + s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0); | ||
305 | + | ||
306 | + trace_imx7_src_write(imx7_src_reg_name(SRC_A7RCR0), s->regs[SRC_A7RCR0]); | ||
307 | + | ||
308 | + g_free(ri); | ||
309 | +} | ||
310 | + | ||
311 | +static void imx7_defer_clear_reset_bit(uint32_t cpuid, | ||
312 | + IMX7SRCState *s, | ||
313 | + uint32_t reset_shift) | ||
314 | +{ | ||
315 | + struct SRCSCRResetInfo *ri; | ||
316 | + CPUState *cpu = arm_get_cpu_by_id(cpuid); | ||
317 | + | ||
318 | + if (!cpu) { | ||
319 | + return; | 51 | + return; |
320 | + } | 52 | + } |
321 | + | 53 | + |
322 | + ri = g_new(struct SRCSCRResetInfo, 1); | 54 | + if (low_align + high_align < b->size) { |
323 | + ri->s = s; | 55 | + printf("Block 0x%"PRIx64"+:0x%"PRIx64" will be aligned to " |
324 | + ri->reset_bit = reset_shift; | 56 | + "0x%"PRIx64"+:0x%"PRIx64"\n", b->paddr, b->size, |
57 | + b->paddr + low_align, b->size - low_align - high_align); | ||
58 | + b->size -= low_align + high_align; | ||
59 | + } else { | ||
60 | + printf("Block 0x%"PRIx64"+:0x%"PRIx64" is too small to align\n", | ||
61 | + b->paddr, b->size); | ||
62 | + b->size = 0; | ||
63 | + } | ||
325 | + | 64 | + |
326 | + async_run_on_cpu(cpu, imx7_clear_reset_bit, RUN_ON_CPU_HOST_PTR(ri)); | 65 | + b->addr += low_align; |
66 | + b->paddr += low_align; | ||
327 | +} | 67 | +} |
328 | + | 68 | + |
69 | int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) | ||
70 | { | ||
71 | Elf64_Half phdr_nr = elf_getphdrnum(qemu_elf->map); | ||
72 | @@ -XXX,XX +XXX,XX @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) | ||
73 | .paddr = phdr[i].p_paddr, | ||
74 | .size = phdr[i].p_filesz, | ||
75 | }; | ||
76 | - block_i++; | ||
77 | + pa_block_align(&ps->block[block_i]); | ||
78 | + block_i = ps->block[block_i].size ? (block_i + 1) : block_i; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | + ps->block_nr = block_i; | ||
329 | + | 83 | + |
330 | +static void imx7_src_write(void *opaque, hwaddr offset, uint64_t value, | 84 | return 0; |
331 | + unsigned size) | 85 | } |
332 | +{ | 86 | |
333 | + IMX7SRCState *s = (IMX7SRCState *)opaque; | 87 | diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c |
334 | + uint32_t index = offset >> 2; | ||
335 | + long unsigned int change_mask; | ||
336 | + uint32_t current_value = value; | ||
337 | + | ||
338 | + if (index >= SRC_MAX) { | ||
339 | + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" | ||
340 | + HWADDR_PRIx "\n", TYPE_IMX7_SRC, __func__, offset); | ||
341 | + return; | ||
342 | + } | ||
343 | + | ||
344 | + trace_imx7_src_write(imx7_src_reg_name(SRC_A7RCR0), s->regs[SRC_A7RCR0]); | ||
345 | + | ||
346 | + change_mask = s->regs[index] ^ (uint32_t)current_value; | ||
347 | + | ||
348 | + switch (index) { | ||
349 | + case SRC_A7RCR0: | ||
350 | + if (FIELD_EX32(change_mask, CORE0, RST)) { | ||
351 | + arm_reset_cpu(0); | ||
352 | + imx7_defer_clear_reset_bit(0, s, R_CORE0_RST_SHIFT); | ||
353 | + } | ||
354 | + if (FIELD_EX32(change_mask, CORE1, RST)) { | ||
355 | + arm_reset_cpu(1); | ||
356 | + imx7_defer_clear_reset_bit(1, s, R_CORE1_RST_SHIFT); | ||
357 | + } | ||
358 | + s->regs[index] = current_value; | ||
359 | + break; | ||
360 | + case SRC_A7RCR1: | ||
361 | + /* | ||
362 | + * On real hardware when the system reset controller starts a | ||
363 | + * secondary CPU it runs through some boot ROM code which reads | ||
364 | + * the SRC_GPRX registers controlling the start address and branches | ||
365 | + * to it. | ||
366 | + * Here we are taking a short cut and branching directly to the | ||
367 | + * requested address (we don't want to run the boot ROM code inside | ||
368 | + * QEMU) | ||
369 | + */ | ||
370 | + if (FIELD_EX32(change_mask, CORE1, ENABLE)) { | ||
371 | + if (FIELD_EX32(current_value, CORE1, ENABLE)) { | ||
372 | + /* CORE 1 is brought up */ | ||
373 | + arm_set_cpu_on(1, s->regs[SRC_GPR3], s->regs[SRC_GPR4], | ||
374 | + 3, false); | ||
375 | + } else { | ||
376 | + /* CORE 1 is shut down */ | ||
377 | + arm_set_cpu_off(1); | ||
378 | + } | ||
379 | + /* We clear the reset bits as the processor changed state */ | ||
380 | + imx7_defer_clear_reset_bit(1, s, R_CORE1_RST_SHIFT); | ||
381 | + clear_bit(R_CORE1_RST_SHIFT, &change_mask); | ||
382 | + } | ||
383 | + s->regs[index] = current_value; | ||
384 | + break; | ||
385 | + default: | ||
386 | + s->regs[index] = current_value; | ||
387 | + break; | ||
388 | + } | ||
389 | +} | ||
390 | + | ||
391 | +static const struct MemoryRegionOps imx7_src_ops = { | ||
392 | + .read = imx7_src_read, | ||
393 | + .write = imx7_src_write, | ||
394 | + .endianness = DEVICE_NATIVE_ENDIAN, | ||
395 | + .valid = { | ||
396 | + /* | ||
397 | + * Our device would not work correctly if the guest was doing | ||
398 | + * unaligned access. This might not be a limitation on the real | ||
399 | + * device but in practice there is no reason for a guest to access | ||
400 | + * this device unaligned. | ||
401 | + */ | ||
402 | + .min_access_size = 4, | ||
403 | + .max_access_size = 4, | ||
404 | + .unaligned = false, | ||
405 | + }, | ||
406 | +}; | ||
407 | + | ||
408 | +static void imx7_src_realize(DeviceState *dev, Error **errp) | ||
409 | +{ | ||
410 | + IMX7SRCState *s = IMX7_SRC(dev); | ||
411 | + | ||
412 | + memory_region_init_io(&s->iomem, OBJECT(dev), &imx7_src_ops, s, | ||
413 | + TYPE_IMX7_SRC, 0x1000); | ||
414 | + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); | ||
415 | +} | ||
416 | + | ||
417 | +static void imx7_src_class_init(ObjectClass *klass, void *data) | ||
418 | +{ | ||
419 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
420 | + | ||
421 | + dc->realize = imx7_src_realize; | ||
422 | + dc->reset = imx7_src_reset; | ||
423 | + dc->vmsd = &vmstate_imx7_src; | ||
424 | + dc->desc = "i.MX6 System Reset Controller"; | ||
425 | +} | ||
426 | + | ||
427 | +static const TypeInfo imx7_src_info = { | ||
428 | + .name = TYPE_IMX7_SRC, | ||
429 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
430 | + .instance_size = sizeof(IMX7SRCState), | ||
431 | + .class_init = imx7_src_class_init, | ||
432 | +}; | ||
433 | + | ||
434 | +static void imx7_src_register_types(void) | ||
435 | +{ | ||
436 | + type_register_static(&imx7_src_info); | ||
437 | +} | ||
438 | + | ||
439 | +type_init(imx7_src_register_types) | ||
440 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
441 | index XXXXXXX..XXXXXXX 100644 | 88 | index XXXXXXX..XXXXXXX 100644 |
442 | --- a/hw/misc/meson.build | 89 | --- a/contrib/elf2dmp/main.c |
443 | +++ b/hw/misc/meson.build | 90 | +++ b/contrib/elf2dmp/main.c |
444 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_IMX', if_true: files( | 91 | @@ -XXX,XX +XXX,XX @@ static int write_dump(struct pa_space *ps, |
445 | 'imx6_src.c', | 92 | for (i = 0; i < ps->block_nr; i++) { |
446 | 'imx6ul_ccm.c', | 93 | struct pa_block *b = &ps->block[i]; |
447 | 'imx7_ccm.c', | 94 | |
448 | + 'imx7_src.c', | 95 | - printf("Writing block #%zu/%zu to file...\n", i, ps->block_nr); |
449 | 'imx7_gpr.c', | 96 | + printf("Writing block #%zu/%zu of %"PRIu64" bytes to file...\n", i, |
450 | 'imx7_snvs.c', | 97 | + ps->block_nr, b->size); |
451 | 'imx_ccm.c', | 98 | if (fwrite(b->addr, b->size, 1, dmp_file) != 1) { |
452 | diff --git a/hw/misc/trace-events b/hw/misc/trace-events | 99 | - eprintf("Failed to write dump header\n"); |
453 | index XXXXXXX..XXXXXXX 100644 | 100 | + eprintf("Failed to write block\n"); |
454 | --- a/hw/misc/trace-events | 101 | fclose(dmp_file); |
455 | +++ b/hw/misc/trace-events | 102 | return 1; |
456 | @@ -XXX,XX +XXX,XX @@ ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d" | 103 | } |
457 | ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 | ||
458 | ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 | ||
459 | |||
460 | +# imx7_src.c | ||
461 | +imx7_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 | ||
462 | +imx7_src_write(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 | ||
463 | + | ||
464 | # iotkit-sysinfo.c | ||
465 | iotkit_sysinfo_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" | ||
466 | iotkit_sysinfo_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" | ||
467 | -- | 104 | -- |
468 | 2.34.1 | 105 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Jean-Christophe Dubois <jcd@tribudubois.net> | 1 | From: Viktor Prutyanov <viktor@daynix.com> |
---|---|---|---|
2 | 2 | ||
3 | i.MX7 IOMUX GPR device is not equivalent to i.MX6UL IOMUXC GPR device. | 3 | DMP supports 42 physical memory runs at most. So, merge adjacent |
4 | In particular, register 22 is not present on i.MX6UL and this is actualy | 4 | physical memory ranges from QEMU ELF when possible to minimize total |
5 | The only register that is really emulated in the i.MX7 IOMUX GPR device. | 5 | number of runs. |
6 | 6 | ||
7 | Note: The i.MX6UL code is actually also implementing the IOMUX GPR device | 7 | Signed-off-by: Viktor Prutyanov <viktor@daynix.com> |
8 | as an unimplemented device at the same bus adress and the 2 instantiations | 8 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
9 | were actualy colliding. So we go back to the unimplemented device for now. | 9 | Message-id: 20230915170153.10959-4-viktor@daynix.com |
10 | 10 | [PMM: fixed format string for printing size_t values] | |
11 | Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net> | ||
12 | Message-id: 48681bf51ee97646479bb261bee19abebbc8074e.1692964892.git.jcd@tribudubois.net | ||
13 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 12 | --- |
16 | include/hw/arm/fsl-imx6ul.h | 2 -- | 13 | contrib/elf2dmp/main.c | 56 ++++++++++++++++++++++++++++++++++++------ |
17 | hw/arm/fsl-imx6ul.c | 11 ----------- | 14 | 1 file changed, 48 insertions(+), 8 deletions(-) |
18 | 2 files changed, 13 deletions(-) | ||
19 | 15 | ||
20 | diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h | 16 | diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c |
21 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/hw/arm/fsl-imx6ul.h | 18 | --- a/contrib/elf2dmp/main.c |
23 | +++ b/include/hw/arm/fsl-imx6ul.h | 19 | +++ b/contrib/elf2dmp/main.c |
24 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ |
25 | #include "hw/misc/imx6ul_ccm.h" | 21 | #define PE_NAME "ntoskrnl.exe" |
26 | #include "hw/misc/imx6_src.h" | 22 | |
27 | #include "hw/misc/imx7_snvs.h" | 23 | #define INITIAL_MXCSR 0x1f80 |
28 | -#include "hw/misc/imx7_gpr.h" | 24 | +#define MAX_NUMBER_OF_RUNS 42 |
29 | #include "hw/intc/imx_gpcv2.h" | 25 | |
30 | #include "hw/watchdog/wdt_imx2.h" | 26 | typedef struct idt_desc { |
31 | #include "hw/gpio/imx_gpio.h" | 27 | uint16_t offset1; /* offset bits 0..15 */ |
32 | @@ -XXX,XX +XXX,XX @@ struct FslIMX6ULState { | 28 | @@ -XXX,XX +XXX,XX @@ static int fix_dtb(struct va_space *vs, QEMU_Elf *qe) |
33 | IMX6SRCState src; | 29 | return 1; |
34 | IMX7SNVSState snvs; | 30 | } |
35 | IMXGPCv2State gpcv2; | 31 | |
36 | - IMX7GPRState gpr; | 32 | +static void try_merge_runs(struct pa_space *ps, |
37 | IMXSPIState spi[FSL_IMX6UL_NUM_ECSPIS]; | 33 | + WinDumpPhyMemDesc64 *PhysicalMemoryBlock) |
38 | IMXI2CState i2c[FSL_IMX6UL_NUM_I2CS]; | 34 | +{ |
39 | IMXSerialState uart[FSL_IMX6UL_NUM_UARTS]; | 35 | + unsigned int merge_cnt = 0, run_idx = 0; |
40 | diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c | 36 | + |
41 | index XXXXXXX..XXXXXXX 100644 | 37 | + PhysicalMemoryBlock->NumberOfRuns = 0; |
42 | --- a/hw/arm/fsl-imx6ul.c | 38 | + |
43 | +++ b/hw/arm/fsl-imx6ul.c | 39 | + for (size_t idx = 0; idx < ps->block_nr; idx++) { |
44 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj) | 40 | + struct pa_block *blk = ps->block + idx; |
45 | */ | 41 | + struct pa_block *next = blk + 1; |
46 | object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); | 42 | + |
47 | 43 | + PhysicalMemoryBlock->NumberOfPages += blk->size / ELF2DMP_PAGE_SIZE; | |
48 | - /* | 44 | + |
49 | - * GPR | 45 | + if (idx + 1 != ps->block_nr && blk->paddr + blk->size == next->paddr) { |
50 | - */ | 46 | + printf("Block #%zu 0x%"PRIx64"+:0x%"PRIx64" and %u previous will be" |
51 | - object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR); | 47 | + " merged\n", idx, blk->paddr, blk->size, merge_cnt); |
52 | - | 48 | + merge_cnt++; |
53 | /* | 49 | + } else { |
54 | * GPIOs 1 to 5 | 50 | + struct pa_block *first_merged = blk - merge_cnt; |
55 | */ | 51 | + |
56 | @@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) | 52 | + printf("Block #%zu 0x%"PRIx64"+:0x%"PRIx64" and %u previous will be" |
57 | FSL_IMX6UL_WDOGn_IRQ[i])); | 53 | + " merged to 0x%"PRIx64"+:0x%"PRIx64" (run #%u)\n", |
54 | + idx, blk->paddr, blk->size, merge_cnt, first_merged->paddr, | ||
55 | + blk->paddr + blk->size - first_merged->paddr, run_idx); | ||
56 | + PhysicalMemoryBlock->Run[run_idx] = (WinDumpPhyMemRun64) { | ||
57 | + .BasePage = first_merged->paddr / ELF2DMP_PAGE_SIZE, | ||
58 | + .PageCount = (blk->paddr + blk->size - first_merged->paddr) / | ||
59 | + ELF2DMP_PAGE_SIZE, | ||
60 | + }; | ||
61 | + PhysicalMemoryBlock->NumberOfRuns++; | ||
62 | + run_idx++; | ||
63 | + merge_cnt = 0; | ||
64 | + } | ||
65 | + } | ||
66 | +} | ||
67 | + | ||
68 | static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, | ||
69 | struct va_space *vs, uint64_t KdDebuggerDataBlock, | ||
70 | KDDEBUGGER_DATA64 *kdbg, uint64_t KdVersionBlock, int nr_cpus) | ||
71 | @@ -XXX,XX +XXX,XX @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, | ||
72 | KUSD_OFFSET_PRODUCT_TYPE); | ||
73 | DBGKD_GET_VERSION64 kvb; | ||
74 | WinDumpHeader64 h; | ||
75 | - size_t i; | ||
76 | |||
77 | QEMU_BUILD_BUG_ON(KUSD_OFFSET_SUITE_MASK >= ELF2DMP_PAGE_SIZE); | ||
78 | QEMU_BUILD_BUG_ON(KUSD_OFFSET_PRODUCT_TYPE >= ELF2DMP_PAGE_SIZE); | ||
79 | @@ -XXX,XX +XXX,XX @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, | ||
80 | .RequiredDumpSpace = sizeof(h), | ||
81 | }; | ||
82 | |||
83 | - for (i = 0; i < ps->block_nr; i++) { | ||
84 | - h.PhysicalMemoryBlock.NumberOfPages += | ||
85 | - ps->block[i].size / ELF2DMP_PAGE_SIZE; | ||
86 | - h.PhysicalMemoryBlock.Run[i] = (WinDumpPhyMemRun64) { | ||
87 | - .BasePage = ps->block[i].paddr / ELF2DMP_PAGE_SIZE, | ||
88 | - .PageCount = ps->block[i].size / ELF2DMP_PAGE_SIZE, | ||
89 | - }; | ||
90 | + if (h.PhysicalMemoryBlock.NumberOfRuns <= MAX_NUMBER_OF_RUNS) { | ||
91 | + for (size_t idx = 0; idx < ps->block_nr; idx++) { | ||
92 | + h.PhysicalMemoryBlock.NumberOfPages += | ||
93 | + ps->block[idx].size / ELF2DMP_PAGE_SIZE; | ||
94 | + h.PhysicalMemoryBlock.Run[idx] = (WinDumpPhyMemRun64) { | ||
95 | + .BasePage = ps->block[idx].paddr / ELF2DMP_PAGE_SIZE, | ||
96 | + .PageCount = ps->block[idx].size / ELF2DMP_PAGE_SIZE, | ||
97 | + }; | ||
98 | + } | ||
99 | + } else { | ||
100 | + try_merge_runs(ps, &h.PhysicalMemoryBlock); | ||
58 | } | 101 | } |
59 | 102 | ||
60 | - /* | 103 | h.RequiredDumpSpace += |
61 | - * GPR | ||
62 | - */ | ||
63 | - sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort); | ||
64 | - sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX6UL_IOMUXC_GPR_ADDR); | ||
65 | - | ||
66 | /* | ||
67 | * SDMA | ||
68 | */ | ||
69 | -- | 104 | -- |
70 | 2.34.1 | 105 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Viktor Prutyanov <viktor@daynix.com> |
---|---|---|---|
2 | 2 | ||
3 | Do not hard-code the constants for Neoverse V1. | 3 | Glib's g_mapped_file_new maps file with PROT_READ|PROT_WRITE and |
4 | MAP_PRIVATE. This leads to premature physical memory allocation of dump | ||
5 | file size on Linux hosts and may fail. On Linux, mapping the file with | ||
6 | MAP_NORESERVE limits the allocation by available memory. | ||
4 | 7 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Viktor Prutyanov <viktor@daynix.com> |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
7 | Message-id: 20230811214031.171020-6-richard.henderson@linaro.org | 10 | Message-id: 20230915170153.10959-5-viktor@daynix.com |
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 12 | --- |
10 | target/arm/tcg/cpu64.c | 48 ++++++++++++++++++++++++++++-------------- | 13 | contrib/elf2dmp/qemu_elf.h | 2 ++ |
11 | 1 file changed, 32 insertions(+), 16 deletions(-) | 14 | contrib/elf2dmp/qemu_elf.c | 68 +++++++++++++++++++++++++++++++------- |
15 | 2 files changed, 58 insertions(+), 12 deletions(-) | ||
12 | 16 | ||
13 | diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c | 17 | diff --git a/contrib/elf2dmp/qemu_elf.h b/contrib/elf2dmp/qemu_elf.h |
14 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/tcg/cpu64.c | 19 | --- a/contrib/elf2dmp/qemu_elf.h |
16 | +++ b/target/arm/tcg/cpu64.c | 20 | +++ b/contrib/elf2dmp/qemu_elf.h |
17 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ typedef struct QEMUCPUState { |
18 | #include "qemu/module.h" | 22 | int is_system(QEMUCPUState *s); |
19 | #include "qapi/visitor.h" | 23 | |
20 | #include "hw/qdev-properties.h" | 24 | typedef struct QEMU_Elf { |
21 | +#include "qemu/units.h" | 25 | +#ifndef CONFIG_LINUX |
22 | #include "internals.h" | 26 | GMappedFile *gmf; |
23 | #include "cpregs.h" | 27 | +#endif |
24 | 28 | size_t size; | |
25 | +static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize, | 29 | void *map; |
26 | + unsigned cachesize) | 30 | QEMUCPUState **state; |
27 | +{ | 31 | diff --git a/contrib/elf2dmp/qemu_elf.c b/contrib/elf2dmp/qemu_elf.c |
28 | + unsigned lg_linesize = ctz32(linesize); | 32 | index XXXXXXX..XXXXXXX 100644 |
29 | + unsigned sets; | 33 | --- a/contrib/elf2dmp/qemu_elf.c |
34 | +++ b/contrib/elf2dmp/qemu_elf.c | ||
35 | @@ -XXX,XX +XXX,XX @@ static bool check_ehdr(QEMU_Elf *qe) | ||
36 | return true; | ||
37 | } | ||
38 | |||
39 | -int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) | ||
40 | +static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename) | ||
41 | { | ||
42 | +#ifdef CONFIG_LINUX | ||
43 | + struct stat st; | ||
44 | + int fd; | ||
30 | + | 45 | + |
31 | + /* | 46 | + printf("Using Linux mmap\n"); |
32 | + * The 64-bit CCSIDR_EL1 format is: | ||
33 | + * [55:32] number of sets - 1 | ||
34 | + * [23:3] associativity - 1 | ||
35 | + * [2:0] log2(linesize) - 4 | ||
36 | + * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc | ||
37 | + */ | ||
38 | + assert(assoc != 0); | ||
39 | + assert(is_power_of_2(linesize)); | ||
40 | + assert(lg_linesize >= 4 && lg_linesize <= 7 + 4); | ||
41 | + | 47 | + |
42 | + /* sets * associativity * linesize == cachesize. */ | 48 | + fd = open(filename, O_RDONLY, 0); |
43 | + sets = cachesize / (assoc * linesize); | 49 | + if (fd == -1) { |
44 | + assert(cachesize % (assoc * linesize) == 0); | 50 | + eprintf("Failed to open ELF dump file \'%s\'\n", filename); |
51 | + return 1; | ||
52 | + } | ||
45 | + | 53 | + |
46 | + return ((uint64_t)(sets - 1) << 32) | 54 | + if (fstat(fd, &st)) { |
47 | + | ((assoc - 1) << 3) | 55 | + eprintf("Failed to get size of ELF dump file\n"); |
48 | + | (lg_linesize - 4); | 56 | + close(fd); |
57 | + return 1; | ||
58 | + } | ||
59 | + qe->size = st.st_size; | ||
60 | + | ||
61 | + qe->map = mmap(NULL, qe->size, PROT_READ | PROT_WRITE, | ||
62 | + MAP_PRIVATE | MAP_NORESERVE, fd, 0); | ||
63 | + if (qe->map == MAP_FAILED) { | ||
64 | + eprintf("Failed to map ELF file\n"); | ||
65 | + close(fd); | ||
66 | + return 1; | ||
67 | + } | ||
68 | + | ||
69 | + close(fd); | ||
70 | +#else | ||
71 | GError *gerr = NULL; | ||
72 | - int err = 0; | ||
73 | + | ||
74 | + printf("Using GLib mmap\n"); | ||
75 | |||
76 | qe->gmf = g_mapped_file_new(filename, TRUE, &gerr); | ||
77 | if (gerr) { | ||
78 | @@ -XXX,XX +XXX,XX @@ int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) | ||
79 | |||
80 | qe->map = g_mapped_file_get_contents(qe->gmf); | ||
81 | qe->size = g_mapped_file_get_length(qe->gmf); | ||
82 | +#endif | ||
83 | + | ||
84 | + return 0; | ||
49 | +} | 85 | +} |
50 | + | 86 | + |
51 | static void aarch64_a35_initfn(Object *obj) | 87 | +static void QEMU_Elf_unmap(QEMU_Elf *qe) |
88 | +{ | ||
89 | +#ifdef CONFIG_LINUX | ||
90 | + munmap(qe->map, qe->size); | ||
91 | +#else | ||
92 | + g_mapped_file_unref(qe->gmf); | ||
93 | +#endif | ||
94 | +} | ||
95 | + | ||
96 | +int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) | ||
97 | +{ | ||
98 | + if (QEMU_Elf_map(qe, filename)) { | ||
99 | + return 1; | ||
100 | + } | ||
101 | |||
102 | if (!check_ehdr(qe)) { | ||
103 | eprintf("Input file has the wrong format\n"); | ||
104 | - err = 1; | ||
105 | - goto out_unmap; | ||
106 | + QEMU_Elf_unmap(qe); | ||
107 | + return 1; | ||
108 | } | ||
109 | |||
110 | if (init_states(qe)) { | ||
111 | eprintf("Failed to extract QEMU CPU states\n"); | ||
112 | - err = 1; | ||
113 | - goto out_unmap; | ||
114 | + QEMU_Elf_unmap(qe); | ||
115 | + return 1; | ||
116 | } | ||
117 | |||
118 | return 0; | ||
119 | - | ||
120 | -out_unmap: | ||
121 | - g_mapped_file_unref(qe->gmf); | ||
122 | - | ||
123 | - return err; | ||
124 | } | ||
125 | |||
126 | void QEMU_Elf_exit(QEMU_Elf *qe) | ||
52 | { | 127 | { |
53 | ARMCPU *cpu = ARM_CPU(obj); | 128 | exit_states(qe); |
54 | @@ -XXX,XX +XXX,XX @@ static void aarch64_neoverse_v1_initfn(Object *obj) | 129 | - g_mapped_file_unref(qe->gmf); |
55 | * The Neoverse-V1 r1p2 TRM lists 32-bit format CCSIDR_EL1 values, | 130 | + QEMU_Elf_unmap(qe); |
56 | * but also says it implements CCIDX, which means they should be | 131 | } |
57 | * 64-bit format. So we here use values which are based on the textual | ||
58 | - * information in chapter 2 of the TRM (and on the fact that | ||
59 | - * sets * associativity * linesize == cachesize). | ||
60 | - * | ||
61 | - * The 64-bit CCSIDR_EL1 format is: | ||
62 | - * [55:32] number of sets - 1 | ||
63 | - * [23:3] associativity - 1 | ||
64 | - * [2:0] log2(linesize) - 4 | ||
65 | - * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc | ||
66 | - * | ||
67 | - * L1: 4-way set associative 64-byte line size, total size 64K, | ||
68 | - * so sets is 256. | ||
69 | + * information in chapter 2 of the TRM: | ||
70 | * | ||
71 | + * L1: 4-way set associative 64-byte line size, total size 64K. | ||
72 | * L2: 8-way set associative, 64 byte line size, either 512K or 1MB. | ||
73 | - * We pick 1MB, so this has 2048 sets. | ||
74 | - * | ||
75 | * L3: No L3 (this matches the CLIDR_EL1 value). | ||
76 | */ | ||
77 | - cpu->ccsidr[0] = 0x000000ff0000001aull; /* 64KB L1 dcache */ | ||
78 | - cpu->ccsidr[1] = 0x000000ff0000001aull; /* 64KB L1 icache */ | ||
79 | - cpu->ccsidr[2] = 0x000007ff0000003aull; /* 1MB L2 cache */ | ||
80 | + cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */ | ||
81 | + cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */ | ||
82 | + cpu->ccsidr[2] = make_ccsidr64(8, 64, 1 * MiB); /* L2 cache */ | ||
83 | |||
84 | /* From 3.2.115 SCTLR_EL3 */ | ||
85 | cpu->reset_sctlr = 0x30c50838; | ||
86 | -- | 132 | -- |
87 | 2.34.1 | 133 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Viktor Prutyanov <viktor@daynix.com> |
---|---|---|---|
2 | 2 | ||
3 | When the cpu support MTE, but the system does not, reduce cpu | 3 | PDB for Windows 11 kernel has slightly different structure compared to |
4 | support to user instructions at EL0 instead of completely | 4 | previous versions. Since elf2dmp don't use the other fields, copy only |
5 | disabling MTE. If we encounter a cpu implementation which does | 5 | 'segments' field from PDB_STREAM_INDEXES. |
6 | something else, we can revisit this setting. | ||
7 | 6 | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Viktor Prutyanov <viktor@daynix.com> |
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
10 | Message-id: 20230811214031.171020-5-richard.henderson@linaro.org | 9 | Message-id: 20230915170153.10959-6-viktor@daynix.com |
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 11 | --- |
13 | target/arm/cpu.c | 7 ++++--- | 12 | contrib/elf2dmp/pdb.h | 2 +- |
14 | 1 file changed, 4 insertions(+), 3 deletions(-) | 13 | contrib/elf2dmp/pdb.c | 15 ++++----------- |
14 | 2 files changed, 5 insertions(+), 12 deletions(-) | ||
15 | 15 | ||
16 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | 16 | diff --git a/contrib/elf2dmp/pdb.h b/contrib/elf2dmp/pdb.h |
17 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/cpu.c | 18 | --- a/contrib/elf2dmp/pdb.h |
19 | +++ b/target/arm/cpu.c | 19 | +++ b/contrib/elf2dmp/pdb.h |
20 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | 20 | @@ -XXX,XX +XXX,XX @@ struct pdb_reader { |
21 | 21 | } ds; | |
22 | #ifndef CONFIG_USER_ONLY | 22 | uint32_t file_used[1024]; |
23 | /* | 23 | PDB_SYMBOLS *symbols; |
24 | - * Disable the MTE feature bits if we do not have tag-memory | 24 | - PDB_STREAM_INDEXES sidx; |
25 | - * provided by the machine. | 25 | + uint16_t segments; |
26 | + * If we do not have tag-memory provided by the machine, | 26 | uint8_t *modimage; |
27 | + * reduce MTE support to instructions enabled at EL0. | 27 | char *segs; |
28 | + * This matches Cortex-A710 BROADCASTMTE input being LOW. | 28 | size_t segs_size; |
29 | */ | 29 | diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c |
30 | if (cpu->tag_memory == NULL) { | 30 | index XXXXXXX..XXXXXXX 100644 |
31 | cpu->isar.id_aa64pfr1 = | 31 | --- a/contrib/elf2dmp/pdb.c |
32 | - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); | 32 | +++ b/contrib/elf2dmp/pdb.c |
33 | + FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1); | 33 | @@ -XXX,XX +XXX,XX @@ static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number) |
34 | } | 34 | static int pdb_init_segments(struct pdb_reader *r) |
35 | #endif | 35 | { |
36 | } | 36 | char *segs; |
37 | - unsigned stream_idx = r->sidx.segments; | ||
38 | + unsigned stream_idx = r->segments; | ||
39 | |||
40 | segs = pdb_ds_read_file(r, stream_idx); | ||
41 | if (!segs) { | ||
42 | @@ -XXX,XX +XXX,XX @@ static int pdb_init_symbols(struct pdb_reader *r) | ||
43 | { | ||
44 | int err = 0; | ||
45 | PDB_SYMBOLS *symbols; | ||
46 | - PDB_STREAM_INDEXES *sidx = &r->sidx; | ||
47 | - | ||
48 | - memset(sidx, -1, sizeof(*sidx)); | ||
49 | |||
50 | symbols = pdb_ds_read_file(r, 3); | ||
51 | if (!symbols) { | ||
52 | @@ -XXX,XX +XXX,XX @@ static int pdb_init_symbols(struct pdb_reader *r) | ||
53 | |||
54 | r->symbols = symbols; | ||
55 | |||
56 | - if (symbols->stream_index_size != sizeof(PDB_STREAM_INDEXES)) { | ||
57 | - err = 1; | ||
58 | - goto out_symbols; | ||
59 | - } | ||
60 | - | ||
61 | - memcpy(sidx, (const char *)symbols + sizeof(PDB_SYMBOLS) + | ||
62 | + r->segments = *(uint16_t *)((const char *)symbols + sizeof(PDB_SYMBOLS) + | ||
63 | symbols->module_size + symbols->offset_size + | ||
64 | symbols->hash_size + symbols->srcmodule_size + | ||
65 | - symbols->pdbimport_size + symbols->unknown2_size, sizeof(*sidx)); | ||
66 | + symbols->pdbimport_size + symbols->unknown2_size + | ||
67 | + offsetof(PDB_STREAM_INDEXES, segments)); | ||
68 | |||
69 | /* Read global symbol table */ | ||
70 | r->modimage = pdb_ds_read_file(r, symbols->gsym_file); | ||
37 | -- | 71 | -- |
38 | 2.34.1 | 72 | 2.34.1 | diff view generated by jsdifflib |