1
First arm pullreq for 6.1 cycle. The big stuff here is RTH's alignment series.
1
A last small test of bug fixes before rc1.
2
2
3
thanks
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4:
6
The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637:
7
7
8
Open 6.1 development tree (2021-04-30 11:15:40 +0100)
8
Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210430
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717
13
13
14
for you to fetch changes up to a6091108aa44e9017af4ca13c43f55a629e3744c:
14
for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4:
15
15
16
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows (2021-04-30 11:16:52 +0100)
16
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
20
* hw/arm/sbsa-ref: set 'slots' property of xhci
21
* hw: add compat machines for 6.1
21
* linux-user: Remove pointless NULL check in clock_adjtime handling
22
* Fault misaligned accesses where the architecture requires it
22
* ptw: Fix S1_ptw_translate() debug path
23
* Fix some corner cases of MTE faults (notably with misaligned accesses)
23
* ptw: Account for FEAT_RME when applying {N}SW, SA bits
24
* Make Thumb store insns UNDEF for Rn==1111
24
* accel/tcg: Zero-pad PC in TCG CPU exec trace lines
25
* hw/arm/smmuv3: Support 16K translation granule
25
* hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
26
26
27
----------------------------------------------------------------
27
----------------------------------------------------------------
28
Cornelia Huck (1):
28
Peter Maydell (5):
29
hw: add compat machines for 6.1
29
linux-user: Remove pointless NULL check in clock_adjtime handling
30
target/arm/ptw.c: Add comments to S1Translate struct fields
31
target/arm: Fix S1_ptw_translate() debug path
32
target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits
33
accel/tcg: Zero-pad PC in TCG CPU exec trace lines
30
34
31
Kunkun Jiang (1):
35
Tong Ho (1):
32
hw/arm/smmuv3: Support 16K translation granule
36
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
33
37
34
Peter Maydell (2):
38
Yuquan Wang (1):
35
target/arm: Make Thumb store insns UNDEF for Rn==1111
39
hw/arm/sbsa-ref: set 'slots' property of xhci
36
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
37
40
38
Richard Henderson (39):
41
accel/tcg/cpu-exec.c | 4 +--
39
target/arm: Fix mte_checkN
42
accel/tcg/translate-all.c | 2 +-
40
target/arm: Split out mte_probe_int
43
hw/arm/sbsa-ref.c | 1 +
41
target/arm: Fix unaligned checks for mte_check1, mte_probe1
44
hw/nvram/xlnx-efuse.c | 11 ++++--
42
test/tcg/aarch64: Add mte-5
45
linux-user/syscall.c | 12 +++----
43
target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1
46
target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------
44
target/arm: Merge mte_check1, mte_checkN
47
6 files changed, 98 insertions(+), 22 deletions(-)
45
target/arm: Rename mte_probe1 to mte_probe
46
target/arm: Simplify sve mte checking
47
target/arm: Remove log2_esize parameter to gen_mte_checkN
48
target/arm: Fix decode of align in VLDST_single
49
target/arm: Rename TBFLAG_A32, SCTLR_B
50
target/arm: Rename TBFLAG_ANY, PSTATE_SS
51
target/arm: Add wrapper macros for accessing tbflags
52
target/arm: Introduce CPUARMTBFlags
53
target/arm: Move mode specific TB flags to tb->cs_base
54
target/arm: Move TBFLAG_AM32 bits to the top
55
target/arm: Move TBFLAG_ANY bits to the bottom
56
target/arm: Add ALIGN_MEM to TBFLAG_ANY
57
target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
58
target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64
59
target/arm: Fix SCTLR_B test for TCGv_i64 load/store
60
target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness
61
target/arm: Enforce word alignment for LDRD/STRD
62
target/arm: Enforce alignment for LDA/LDAH/STL/STLH
63
target/arm: Enforce alignment for LDM/STM
64
target/arm: Enforce alignment for RFE
65
target/arm: Enforce alignment for SRS
66
target/arm: Enforce alignment for VLDM/VSTM
67
target/arm: Enforce alignment for VLDR/VSTR
68
target/arm: Enforce alignment for VLDn (all lanes)
69
target/arm: Enforce alignment for VLDn/VSTn (multiple)
70
target/arm: Enforce alignment for VLDn/VSTn (single)
71
target/arm: Use finalize_memop for aa64 gpr load/store
72
target/arm: Use finalize_memop for aa64 fpr load/store
73
target/arm: Enforce alignment for aa64 load-acq/store-rel
74
target/arm: Use MemOp for size + endian in aa64 vector ld/st
75
target/arm: Enforce alignment for aa64 vector LDn/STn (multiple)
76
target/arm: Enforce alignment for aa64 vector LDn/STn (single)
77
target/arm: Enforce alignment for sve LD1R
78
79
include/hw/boards.h | 3 +
80
include/hw/i386/pc.h | 3 +
81
include/hw/pci-host/gpex.h | 4 +
82
target/arm/cpu.h | 105 ++++++++++-----
83
target/arm/helper-a64.h | 3 +-
84
target/arm/internals.h | 11 +-
85
target/arm/translate-a64.h | 2 +-
86
target/arm/translate.h | 38 ++++++
87
target/arm/neon-ls.decode | 4 +-
88
hw/arm/smmuv3.c | 6 +-
89
hw/arm/virt.c | 7 +-
90
hw/core/machine.c | 5 +
91
hw/i386/pc.c | 3 +
92
hw/i386/pc_piix.c | 14 +-
93
hw/i386/pc_q35.c | 13 +-
94
hw/pci-host/gpex.c | 56 +++++++-
95
hw/ppc/spapr.c | 17 ++-
96
hw/s390x/s390-virtio-ccw.c | 14 +-
97
target/arm/helper-a64.c | 2 +-
98
target/arm/helper.c | 162 ++++++++++++----------
99
target/arm/mte_helper.c | 185 ++++++++++---------------
100
target/arm/sve_helper.c | 100 +++++---------
101
target/arm/translate-a64.c | 236 ++++++++++++++++----------------
102
target/arm/translate-sve.c | 11 +-
103
target/arm/translate.c | 274 ++++++++++++++++++++++----------------
104
tests/tcg/aarch64/mte-5.c | 44 ++++++
105
target/arm/translate-neon.c.inc | 117 ++++++++++++----
106
target/arm/translate-vfp.c.inc | 20 +--
107
tests/tcg/aarch64/Makefile.target | 2 +-
108
29 files changed, 878 insertions(+), 583 deletions(-)
109
create mode 100644 tests/tcg/aarch64/mte-5.c
110
diff view generated by jsdifflib
Deleted patch
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
2
1
3
The driver can query some bits in SMMUv3 IDR5 to learn which
4
translation granules are supported. Arm recommends that SMMUv3
5
implementations support at least 4K and 64K granules. But in
6
the vSMMUv3, there seems to be no reason not to support 16K
7
translation granule. In addition, if 16K is not supported,
8
vSVA will failed to be enabled in the future for 16K guest
9
kernel. So it'd better to support it.
10
11
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
13
Tested-by: Eric Auger <eric.auger@redhat.com>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/arm/smmuv3.c | 6 ++++--
17
1 file changed, 4 insertions(+), 2 deletions(-)
18
19
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/smmuv3.c
22
+++ b/hw/arm/smmuv3.c
23
@@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s)
24
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
25
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
26
27
- /* 4K and 64K granule support */
28
+ /* 4K, 16K and 64K granule support */
29
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
30
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
31
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
32
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
33
34
@@ -XXX,XX +XXX,XX @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
35
36
tg = CD_TG(cd, i);
37
tt->granule_sz = tg2granule(tg, i);
38
- if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
39
+ if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
40
+ tt->granule_sz != 16) || CD_ENDI(cd)) {
41
goto bad_cd;
42
}
43
44
--
45
2.20.1
46
47
diff view generated by jsdifflib
1
Currently the gpex PCI controller implements no special behaviour for
1
From: Yuquan Wang <wangyuquan1236@phytium.com.cn>
2
guest accesses to areas of the PIO and MMIO where it has not mapped
3
any PCI devices, which means that for Arm you end up with a CPU
4
exception due to a data abort.
5
2
6
Most host OSes expect "like an x86 PC" behaviour, where bad accesses
3
This extends the slots of xhci to 64, since the default xhci_sysbus
7
like this return -1 for reads and ignore writes. In the interests of
4
just supports one slot.
8
not being surprising, make host CPU accesses to these windows behave
9
as -1/discard where there's no mapped PCI device.
10
5
11
The old behaviour generally didn't cause any problems, because
6
Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn>
12
almost always the guest OS will map the PCI devices and then only
7
Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn>
13
access where it has mapped them. One corner case where you will see
14
this kind of access is if Linux attempts to probe legacy ISA
15
devices via a PIO window access. So far the only case where we've
16
seen this has been via the syzkaller fuzzer.
17
18
Reported-by: Dmitry Vyukov <dvyukov@google.com>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Acked-by: Michael S. Tsirkin <mst@redhat.com>
9
Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
22
Message-id: 20210325163315.27724-1-peter.maydell@linaro.org
10
Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
23
Fixes: https://bugs.launchpad.net/qemu/+bug/1918917
11
Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
13
---
26
include/hw/pci-host/gpex.h | 4 +++
14
hw/arm/sbsa-ref.c | 1 +
27
hw/core/machine.c | 4 ++-
15
1 file changed, 1 insertion(+)
28
hw/pci-host/gpex.c | 56 ++++++++++++++++++++++++++++++++++++--
29
3 files changed, 60 insertions(+), 4 deletions(-)
30
16
31
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
32
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
33
--- a/include/hw/pci-host/gpex.h
19
--- a/hw/arm/sbsa-ref.c
34
+++ b/include/hw/pci-host/gpex.h
20
+++ b/hw/arm/sbsa-ref.c
35
@@ -XXX,XX +XXX,XX @@ struct GPEXHost {
21
@@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms)
36
22
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
37
MemoryRegion io_ioport;
23
int irq = sbsa_ref_irqmap[SBSA_XHCI];
38
MemoryRegion io_mmio;
24
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
39
+ MemoryRegion io_ioport_window;
25
+ qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS);
40
+ MemoryRegion io_mmio_window;
26
41
qemu_irq irq[GPEX_NUM_IRQS];
27
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
42
int irq_num[GPEX_NUM_IRQS];
28
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
43
+
44
+ bool allow_unmapped_accesses;
45
};
46
47
struct GPEXConfig {
48
diff --git a/hw/core/machine.c b/hw/core/machine.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/core/machine.c
51
+++ b/hw/core/machine.c
52
@@ -XXX,XX +XXX,XX @@
53
#include "hw/virtio/virtio.h"
54
#include "hw/virtio/virtio-pci.h"
55
56
-GlobalProperty hw_compat_6_0[] = {};
57
+GlobalProperty hw_compat_6_0[] = {
58
+ { "gpex-pcihost", "allow-unmapped-accesses", "false" },
59
+};
60
const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
61
62
GlobalProperty hw_compat_5_2[] = {
63
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/pci-host/gpex.c
66
+++ b/hw/pci-host/gpex.c
67
@@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
68
int i;
69
70
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
71
+ sysbus_init_mmio(sbd, &pex->mmio);
72
+
73
+ /*
74
+ * Note that the MemoryRegions io_mmio and io_ioport that we pass
75
+ * to pci_register_root_bus() are not the same as the
76
+ * MemoryRegions io_mmio_window and io_ioport_window that we
77
+ * expose as SysBus MRs. The difference is in the behaviour of
78
+ * accesses to addresses where no PCI device has been mapped.
79
+ *
80
+ * io_mmio and io_ioport are the underlying PCI view of the PCI
81
+ * address space, and when a PCI device does a bus master access
82
+ * to a bad address this is reported back to it as a transaction
83
+ * failure.
84
+ *
85
+ * io_mmio_window and io_ioport_window implement "unmapped
86
+ * addresses read as -1 and ignore writes"; this is traditional
87
+ * x86 PC behaviour, which is not mandated by the PCI spec proper
88
+ * but expected by much PCI-using guest software, including Linux.
89
+ *
90
+ * In the interests of not being unnecessarily surprising, we
91
+ * implement it in the gpex PCI host controller, by providing the
92
+ * _window MRs, which are containers with io ops that implement
93
+ * the 'background' behaviour and which hold the real PCI MRs as
94
+ * subregions.
95
+ */
96
memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
97
memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
98
99
- sysbus_init_mmio(sbd, &pex->mmio);
100
- sysbus_init_mmio(sbd, &s->io_mmio);
101
- sysbus_init_mmio(sbd, &s->io_ioport);
102
+ if (s->allow_unmapped_accesses) {
103
+ memory_region_init_io(&s->io_mmio_window, OBJECT(s),
104
+ &unassigned_io_ops, OBJECT(s),
105
+ "gpex_mmio_window", UINT64_MAX);
106
+ memory_region_init_io(&s->io_ioport_window, OBJECT(s),
107
+ &unassigned_io_ops, OBJECT(s),
108
+ "gpex_ioport_window", 64 * 1024);
109
+
110
+ memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio);
111
+ memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport);
112
+ sysbus_init_mmio(sbd, &s->io_mmio_window);
113
+ sysbus_init_mmio(sbd, &s->io_ioport_window);
114
+ } else {
115
+ sysbus_init_mmio(sbd, &s->io_mmio);
116
+ sysbus_init_mmio(sbd, &s->io_ioport);
117
+ }
118
+
119
for (i = 0; i < GPEX_NUM_IRQS; i++) {
120
sysbus_init_irq(sbd, &s->irq[i]);
121
s->irq_num[i] = -1;
122
@@ -XXX,XX +XXX,XX @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
123
return "0000:00";
124
}
125
126
+static Property gpex_host_properties[] = {
127
+ /*
128
+ * Permit CPU accesses to unmapped areas of the PIO and MMIO windows
129
+ * (discarding writes and returning -1 for reads) rather than aborting.
130
+ */
131
+ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
132
+ allow_unmapped_accesses, true),
133
+ DEFINE_PROP_END_OF_LIST(),
134
+};
135
+
136
static void gpex_host_class_init(ObjectClass *klass, void *data)
137
{
138
DeviceClass *dc = DEVICE_CLASS(klass);
139
@@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data)
140
dc->realize = gpex_host_realize;
141
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
142
dc->fw_name = "pci";
143
+ device_class_set_props(dc, gpex_host_properties);
144
}
145
146
static void gpex_host_initfn(Object *obj)
147
--
29
--
148
2.20.1
30
2.34.1
149
150
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to
2
the address of the local variable htx. This means it can never be
3
NULL, but later in the code we check it for NULL anyway. Coverity
4
complains about this (CID 1507683) because the NULL check comes after
5
a call to clock_adjtime() that assumes it is non-NULL.
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Since phtx is always &htx, and is used only in three places, it's not
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
really necessary. Remove it, bringing the code structure in to line
5
Message-id: 20210419202257.161730-31-richard.henderson@linaro.org
9
with that for TARGET_NR_clock_adjtime64, which already uses a simple
10
'&htx' when it wants a pointer to 'htx'.
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org
7
---
16
---
8
target/arm/translate-a64.c | 9 +++++----
17
linux-user/syscall.c | 12 +++++-------
9
1 file changed, 5 insertions(+), 4 deletions(-)
18
1 file changed, 5 insertions(+), 7 deletions(-)
10
19
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
20
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
22
--- a/linux-user/syscall.c
14
+++ b/target/arm/translate-a64.c
23
+++ b/linux-user/syscall.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
24
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
16
int index = is_q << 3 | S << 2 | size;
25
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
17
int xs, total;
26
case TARGET_NR_clock_adjtime:
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
27
{
19
+ MemOp mop;
28
- struct timex htx, *phtx = &htx;
20
29
+ struct timex htx;
21
if (extract32(insn, 31, 1)) {
30
22
unallocated_encoding(s);
31
- if (target_to_host_timex(phtx, arg2) != 0) {
23
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
32
+ if (target_to_host_timex(&htx, arg2) != 0) {
24
33
return -TARGET_EFAULT;
25
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
34
}
26
total);
35
- ret = get_errno(clock_adjtime(arg1, phtx));
27
+ mop = finalize_memop(s, scale);
36
- if (!is_error(ret) && phtx) {
28
37
- if (host_to_target_timex(arg2, phtx) != 0) {
29
tcg_ebytes = tcg_const_i64(1 << scale);
38
- return -TARGET_EFAULT;
30
for (xs = 0; xs < selem; xs++) {
39
- }
31
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
40
+ ret = get_errno(clock_adjtime(arg1, &htx));
32
/* Load and replicate to all elements */
41
+ if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
33
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
42
+ return -TARGET_EFAULT;
34
35
- tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
36
- get_mem_index(s), s->be_data + scale);
37
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
38
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
39
(is_q + 1) * 8, vec_full_reg_size(s),
40
tcg_tmp);
41
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
42
} else {
43
/* Load/store one element per register */
44
if (is_load) {
45
- do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
46
+ do_vec_ld(s, rt, index, clean_addr, mop);
47
} else {
48
- do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
49
+ do_vec_st(s, rt, index, clean_addr, mop);
50
}
43
}
51
}
44
}
52
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
45
return ret;
53
--
46
--
54
2.20.1
47
2.34.1
55
48
56
49
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Add comments to the in_* fields in the S1Translate struct
2
that explain what they're doing.
2
3
3
In preparation for splitting tb->flags across multiple
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
fields, introduce a structure to hold the value(s).
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
So far this only migrates the one uint32_t and fixes
6
Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org
6
all of the places that require adjustment to match.
7
---
8
target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++
9
1 file changed, 40 insertions(+)
7
10
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-6-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 26 ++++++++++++---------
14
target/arm/translate.h | 11 +++++++++
15
target/arm/helper.c | 48 +++++++++++++++++++++-----------------
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 7 +++---
18
5 files changed, 57 insertions(+), 37 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
13
--- a/target/arm/ptw.c
23
+++ b/target/arm/cpu.h
14
+++ b/target/arm/ptw.c
24
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
15
@@ -XXX,XX +XXX,XX @@
25
} ARMPACKey;
26
#endif
16
#endif
27
17
28
+/* See the commentary above the TBFLAG field definitions. */
18
typedef struct S1Translate {
29
+typedef struct CPUARMTBFlags {
19
+ /*
30
+ uint32_t flags;
20
+ * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
31
+} CPUARMTBFlags;
21
+ * Together with in_space, specifies the architectural translation regime.
32
22
+ */
33
typedef struct CPUARMState {
23
ARMMMUIdx in_mmu_idx;
34
/* Regs for current mode. */
24
+ /*
35
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
25
+ * in_ptw_idx: specifies which mmuidx to use for the actual
36
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
26
+ * page table descriptor load operations. This will be one of the
37
27
+ * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
38
/* Cached TBFLAGS state. See below for which bits are included. */
28
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
39
- uint32_t hflags;
29
+ * this field is updated accordingly.
40
+ CPUARMTBFlags hflags;
30
+ */
41
31
ARMMMUIdx in_ptw_idx;
42
/* Frequently accessed CPSR bits are stored separately for efficiency.
32
+ /*
43
This contains all the other bits. Use cpsr_{read,write} to access
33
+ * in_space: the security space for this walk. This plus
44
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
34
+ * the in_mmu_idx specify the architectural translation regime.
45
* Helpers for using the above.
35
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
46
*/
36
+ * this field is updated accordingly.
47
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
37
+ *
48
- (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
38
+ * Note that the security space for the in_ptw_idx may be different
49
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
39
+ * from that for the in_mmu_idx. We do not need to explicitly track
50
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
40
+ * the in_ptw_idx security space because:
51
- (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
41
+ * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
52
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
42
+ * itself specifies the security space
53
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
43
+ * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
54
- (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
44
+ * space used for ptw reads is the same as that of the security
55
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
45
+ * space of the stage 1 translation for all cases except where
56
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
46
+ * stage 1 is Secure; in that case the only possibilities for
57
- (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
47
+ * the ptw read are Secure and NonSecure, and the in_ptw_idx
58
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
48
+ * value being Stage2 vs Stage2_S distinguishes those.
59
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
49
+ */
60
- (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
50
ARMSecuritySpace in_space;
61
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
51
+ /*
62
52
+ * in_secure: whether the translation regime is a Secure one.
63
-#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
53
+ * This is always equal to arm_space_is_secure(in_space).
64
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
54
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
65
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
55
+ * this field is updated accordingly.
66
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
56
+ */
67
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
57
bool in_secure;
68
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
58
+ /*
69
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
59
+ * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
70
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
60
+ * accesses will not update the guest page table access flags
71
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
61
+ * and will not change the state of the softmmu TLBs.
72
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
62
+ */
73
63
bool in_debug;
74
/**
64
/*
75
* cpu_mmu_index:
65
* If this is stage 2 of a stage 1+2 page table walk, then this must
76
diff --git a/target/arm/translate.h b/target/arm/translate.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate.h
79
+++ b/target/arm/translate.h
80
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
82
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
83
84
+/**
85
+ * arm_tbflags_from_tb:
86
+ * @tb: the TranslationBlock
87
+ *
88
+ * Extract the flag values from @tb.
89
+ */
90
+static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
91
+{
92
+ return (CPUARMTBFlags){ tb->flags };
93
+}
94
+
95
/*
96
* Enum for argument to fpstatus_ptr().
97
*/
98
diff --git a/target/arm/helper.c b/target/arm/helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/helper.c
101
+++ b/target/arm/helper.c
102
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
103
}
104
#endif
105
106
-static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
107
- ARMMMUIdx mmu_idx, uint32_t flags)
108
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
109
+ ARMMMUIdx mmu_idx,
110
+ CPUARMTBFlags flags)
111
{
112
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
113
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
114
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
115
return flags;
116
}
117
118
-static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
119
- ARMMMUIdx mmu_idx, uint32_t flags)
120
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
121
+ ARMMMUIdx mmu_idx,
122
+ CPUARMTBFlags flags)
123
{
124
bool sctlr_b = arm_sctlr_b(env);
125
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
127
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
128
}
129
130
-static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
131
- ARMMMUIdx mmu_idx)
132
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
133
+ ARMMMUIdx mmu_idx)
134
{
135
- uint32_t flags = 0;
136
+ CPUARMTBFlags flags = {};
137
138
if (arm_v7m_is_handler_mode(env)) {
139
DP_TBFLAG_M32(flags, HANDLER, 1);
140
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
141
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
142
}
143
144
-static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
145
+static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
146
{
147
- int flags = 0;
148
+ CPUARMTBFlags flags = {};
149
150
DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
151
return flags;
152
}
153
154
-static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
155
- ARMMMUIdx mmu_idx)
156
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
157
+ ARMMMUIdx mmu_idx)
158
{
159
- uint32_t flags = rebuild_hflags_aprofile(env);
160
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
161
162
if (arm_el_is_aa64(env, 1)) {
163
DP_TBFLAG_A32(flags, VFPEN, 1);
164
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
165
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
166
}
167
168
-static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
169
- ARMMMUIdx mmu_idx)
170
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
171
+ ARMMMUIdx mmu_idx)
172
{
173
- uint32_t flags = rebuild_hflags_aprofile(env);
174
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
175
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
176
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
177
uint64_t sctlr;
178
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
179
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
180
}
181
182
-static uint32_t rebuild_hflags_internal(CPUARMState *env)
183
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
184
{
185
int el = arm_current_el(env);
186
int fp_el = fp_exception_el(env, el);
187
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
188
int el = arm_current_el(env);
189
int fp_el = fp_exception_el(env, el);
190
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
191
+
192
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
193
}
194
195
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
196
static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
197
{
198
#ifdef CONFIG_DEBUG_TCG
199
- uint32_t env_flags_current = env->hflags;
200
- uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
201
+ CPUARMTBFlags c = env->hflags;
202
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
203
204
- if (unlikely(env_flags_current != env_flags_rebuilt)) {
205
+ if (unlikely(c.flags != r.flags)) {
206
fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
207
- env_flags_current, env_flags_rebuilt);
208
+ c.flags, r.flags);
209
abort();
210
}
211
#endif
212
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
213
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
214
target_ulong *cs_base, uint32_t *pflags)
215
{
216
- uint32_t flags = env->hflags;
217
+ CPUARMTBFlags flags;
218
219
*cs_base = 0;
220
assert_hflags_rebuild_correctly(env);
221
+ flags = env->hflags;
222
223
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
224
*pc = env->pc;
225
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
226
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
227
}
228
229
- *pflags = flags;
230
+ *pflags = flags.flags;
231
}
232
233
#ifdef TARGET_AARCH64
234
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/target/arm/translate-a64.c
237
+++ b/target/arm/translate-a64.c
238
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
239
DisasContext *dc = container_of(dcbase, DisasContext, base);
240
CPUARMState *env = cpu->env_ptr;
241
ARMCPU *arm_cpu = env_archcpu(env);
242
- uint32_t tb_flags = dc->base.tb->flags;
243
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
244
int bound, core_mmu_idx;
245
246
dc->isar = &arm_cpu->isar;
247
diff --git a/target/arm/translate.c b/target/arm/translate.c
248
index XXXXXXX..XXXXXXX 100644
249
--- a/target/arm/translate.c
250
+++ b/target/arm/translate.c
251
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
252
DisasContext *dc = container_of(dcbase, DisasContext, base);
253
CPUARMState *env = cs->env_ptr;
254
ARMCPU *cpu = env_archcpu(env);
255
- uint32_t tb_flags = dc->base.tb->flags;
256
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
257
uint32_t condexec, core_mmu_idx;
258
259
dc->isar = &cpu->isar;
260
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
261
{
262
DisasContext dc = { };
263
const TranslatorOps *ops = &arm_translator_ops;
264
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
265
266
- if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
267
+ if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
268
ops = &thumb_translator_ops;
269
}
270
#ifdef TARGET_AARCH64
271
- if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
272
+ if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
273
ops = &aarch64_translator_ops;
274
}
275
#endif
276
--
66
--
277
2.20.1
67
2.34.1
278
279
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate()
2
so that the debug-access "call get_phys_addr_*" codepath is used both
3
when S1 is doing ptw reads from stage 2 and when it is doing ptw
4
reads from physical memory. However, we didn't update the
5
calculation of s2ptw->in_space and s2ptw->in_secure to account for
6
the "ptw reads from physical memory" case. This meant that debug
7
accesses when in Secure state broke.
2
8
3
We were incorrectly assuming that only the first byte of an MTE access
9
Create a new function S2_security_space() which returns the
4
is checked against the tags. But per the ARM, unaligned accesses are
10
correct security space to use for the ptw load, and use it to
5
pre-decomposed into single-byte accesses. So by the time we reach the
11
determine the correct .in_secure and .in_space fields for the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
12
stage 2 lookup for the ptw load.
7
13
8
We cannot tell a priori whether or not a given scalar access is aligned,
14
Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
9
therefore we must at least check. Use mte_probe_int, which is already
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
set up for checking multiple granules.
16
Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
11
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Buglink: https://bugs.launchpad.net/bugs/1921948
18
Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org
13
Tested-by: Alex Bennée <alex.bennee@linaro.org>
19
Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate")
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210416183106.1516563-4-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
21
---
19
target/arm/mte_helper.c | 109 +++++++++++++---------------------------
22
target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++-----
20
1 file changed, 35 insertions(+), 74 deletions(-)
23
1 file changed, 32 insertions(+), 5 deletions(-)
21
24
22
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
25
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
23
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/mte_helper.c
27
--- a/target/arm/ptw.c
25
+++ b/target/arm/mte_helper.c
28
+++ b/target/arm/ptw.c
26
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
29
@@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
27
}
30
}
28
}
31
}
29
32
30
-/*
33
+static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
31
- * Perform an MTE checked access for a single logical or atomic access.
34
+ ARMMMUIdx s2_mmu_idx)
32
- */
33
-static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
34
- uintptr_t ra, int bit55)
35
-{
36
- int mem_tag, mmu_idx, ptr_tag, size;
37
- MMUAccessType type;
38
- uint8_t *mem;
39
-
40
- ptr_tag = allocation_tag_from_addr(ptr);
41
-
42
- if (tcma_check(desc, bit55, ptr_tag)) {
43
- return true;
44
- }
45
-
46
- mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
47
- type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
48
- size = FIELD_EX32(desc, MTEDESC, ESIZE);
49
-
50
- mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
51
- MMU_DATA_LOAD, 1, ra);
52
- if (!mem) {
53
- return true;
54
- }
55
-
56
- mem_tag = load_tag1(ptr, mem);
57
- return ptr_tag == mem_tag;
58
-}
59
-
60
-/*
61
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
62
- * Returns false if the access is Checked and the check failed. This
63
- * is only intended to probe the tag -- the validity of the page must
64
- * be checked beforehand.
65
- */
66
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
-{
68
- int bit55 = extract64(ptr, 55, 1);
69
-
70
- /* If TBI is disabled, the access is unchecked. */
71
- if (unlikely(!tbi_check(desc, bit55))) {
72
- return true;
73
- }
74
-
75
- return mte_probe1_int(env, desc, ptr, 0, bit55);
76
-}
77
-
78
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
79
- uint64_t ptr, uintptr_t ra)
80
-{
81
- int bit55 = extract64(ptr, 55, 1);
82
-
83
- /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
84
- if (unlikely(!tbi_check(desc, bit55))) {
85
- return ptr;
86
- }
87
-
88
- if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
89
- mte_check_fail(env, desc, ptr, ra);
90
- }
91
-
92
- return useronly_clean_ptr(ptr);
93
-}
94
-
95
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
96
-{
97
- return mte_check1(env, desc, ptr, GETPC());
98
-}
99
-
100
-/*
101
- * Perform an MTE checked access for multiple logical accesses.
102
- */
103
-
104
/**
105
* checkN:
106
* @tag: tag memory to test
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
return mte_checkN(env, desc, ptr, GETPC());
109
}
110
111
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
35
+{
114
+ uint64_t fault;
36
+ /*
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
37
+ * Return the security space to use for stage 2 when doing
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
38
+ * the S1 page table descriptor load.
117
+
39
+ */
118
+ if (unlikely(ret == 0)) {
40
+ if (regime_is_stage2(s2_mmu_idx)) {
119
+ mte_check_fail(env, desc, fault, ra);
41
+ /*
120
+ } else if (ret < 0) {
42
+ * The security space for ptw reads is almost always the same
121
+ return ptr;
43
+ * as that of the security space of the stage 1 translation.
44
+ * The only exception is when stage 1 is Secure; in that case
45
+ * the ptw read might be to the Secure or the NonSecure space
46
+ * (but never Realm or Root), and the s2_mmu_idx tells us which.
47
+ * Root translations are always single-stage.
48
+ */
49
+ if (s1_space == ARMSS_Secure) {
50
+ return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
51
+ } else {
52
+ assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
53
+ assert(s1_space != ARMSS_Root);
54
+ return s1_space;
55
+ }
56
+ } else {
57
+ /* ptw loads are from phys: the mmu idx itself says which space */
58
+ return arm_phys_to_space(s2_mmu_idx);
122
+ }
59
+ }
123
+ return useronly_clean_ptr(ptr);
124
+}
60
+}
125
+
61
+
126
+uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
62
/* Translate a S1 pagetable walk through S2 if needed. */
127
+{
63
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
128
+ return mte_check1(env, desc, ptr, GETPC());
64
hwaddr addr, ARMMMUFaultInfo *fi)
129
+}
65
{
130
+
66
- ARMSecuritySpace space = ptw->in_space;
131
+/*
67
bool is_secure = ptw->in_secure;
132
+ * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
68
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
133
+ * Returns false if the access is Checked and the check failed. This
69
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
134
+ * is only intended to probe the tag -- the validity of the page must
70
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
135
+ * be checked beforehand.
71
* From gdbstub, do not use softmmu so that we don't modify the
136
+ */
72
* state of the cpu at all, including softmmu tlb contents.
137
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
73
*/
138
+{
74
+ ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
139
+ uint64_t fault;
75
S1Translate s2ptw = {
140
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
76
.in_mmu_idx = s2_mmu_idx,
141
+ int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
77
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
142
+
78
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
143
+ return ret != 0;
79
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
144
+}
80
- : space == ARMSS_Realm ? ARMSS_Realm
145
+
81
- : ARMSS_NonSecure),
146
/*
82
+ .in_secure = arm_space_is_secure(s2_space),
147
* Perform an MTE checked access for DC_ZVA.
83
+ .in_space = s2_space,
148
*/
84
.in_debug = true,
85
};
86
GetPhysAddrResult s2 = { };
149
--
87
--
150
2.20.1
88
2.34.1
151
152
diff view generated by jsdifflib
1
The Arm ARM specifies that for Thumb encodings of the various plain
1
In get_phys_addr_twostage() the code that applies the effects of
2
store insns, if the Rn field is 1111 then we must UNDEF. This is
2
VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure.
3
different from the Arm encodings, where this case is either
3
Now we also have f.attrs.space for FEAT_RME, we need to keep the two
4
UNPREDICTABLE or has well-defined behaviour. The exclusive stores,
4
in sync.
5
store-release and STRD do not have this UNDEF case for any encoding.
6
5
7
Enforce the UNDEF for this case in the Thumb plain store insns.
6
These bits only have an effect for Secure space translations, not
7
for Root, so use the input in_space field to determine whether to
8
apply them rather than the input is_secure. This doesn't actually
9
make a difference because Root translations are never two-stage,
10
but it's a little clearer.
8
11
9
Fixes: https://bugs.launchpad.net/qemu/+bug/1922887
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210408162402.5822-1-peter.maydell@linaro.org
14
Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org
13
---
15
---
14
target/arm/translate.c | 16 ++++++++++++++++
16
target/arm/ptw.c | 13 ++++++++-----
15
1 file changed, 16 insertions(+)
17
1 file changed, 8 insertions(+), 5 deletions(-)
16
18
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate.c
21
--- a/target/arm/ptw.c
20
+++ b/target/arm/translate.c
22
+++ b/target/arm/ptw.c
21
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
23
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
22
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
24
hwaddr ipa;
23
TCGv_i32 addr, tmp;
25
int s1_prot, s1_lgpgsz;
24
26
bool is_secure = ptw->in_secure;
25
+ /*
27
+ ARMSecuritySpace in_space = ptw->in_space;
26
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
28
bool ret, ipa_secure;
27
+ * is either UNPREDICTABLE or has defined behaviour
29
ARMCacheAttrs cacheattrs1;
28
+ */
30
ARMSecuritySpace ipa_space;
29
+ if (s->thumb && a->rn == 15) {
31
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
30
+ return false;
32
* Check if IPA translates to secure or non-secure PA space.
33
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
34
*/
35
- result->f.attrs.secure =
36
- (is_secure
37
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
38
- && (ipa_secure
39
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
40
+ if (in_space == ARMSS_Secure) {
41
+ result->f.attrs.secure =
42
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
43
+ && (ipa_secure
44
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
45
+ result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
31
+ }
46
+ }
32
+
47
33
addr = op_addr_rr_pre(s, a);
48
return false;
34
49
}
35
tmp = load_reg(s, a->rt);
36
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
37
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
38
TCGv_i32 addr, tmp;
39
40
+ /*
41
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
42
+ * is either UNPREDICTABLE or has defined behaviour
43
+ */
44
+ if (s->thumb && a->rn == 15) {
45
+ return false;
46
+ }
47
+
48
addr = op_addr_ri_pre(s, a);
49
50
tmp = load_reg(s, a->rt);
51
--
50
--
52
2.20.1
51
2.34.1
53
54
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We were incorrectly assuming that only the first byte of an MTE access
4
is checked against the tags. But per the ARM, unaligned accesses are
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
7
8
Therefore, the first failure is always either the first byte of the
9
access, or the first byte of the granule.
10
11
In addition, some of the arithmetic is off for last-first -> count.
12
This does not become directly visible until a later patch that passes
13
single bytes into this function, so ptr == ptr_last.
14
15
Buglink: https://bugs.launchpad.net/bugs/1921948
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210416183106.1516563-2-richard.henderson@linaro.org
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
[PMM: tweaked a comment]
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
22
target/arm/mte_helper.c | 40 ++++++++++++++++++----------------------
23
1 file changed, 18 insertions(+), 22 deletions(-)
24
25
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mte_helper.c
28
+++ b/target/arm/mte_helper.c
29
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
30
uint64_t ptr, uintptr_t ra)
31
{
32
int mmu_idx, ptr_tag, bit55;
33
- uint64_t ptr_last, ptr_end, prev_page, next_page;
34
- uint64_t tag_first, tag_end;
35
- uint64_t tag_byte_first, tag_byte_end;
36
- uint32_t esize, total, tag_count, tag_size, n, c;
37
+ uint64_t ptr_last, prev_page, next_page;
38
+ uint64_t tag_first, tag_last;
39
+ uint64_t tag_byte_first, tag_byte_last;
40
+ uint32_t total, tag_count, tag_size, n, c;
41
uint8_t *mem1, *mem2;
42
MMUAccessType type;
43
44
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
45
46
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
47
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
48
- esize = FIELD_EX32(desc, MTEDESC, ESIZE);
49
total = FIELD_EX32(desc, MTEDESC, TSIZE);
50
51
- /* Find the addr of the end of the access, and of the last element. */
52
- ptr_end = ptr + total;
53
- ptr_last = ptr_end - esize;
54
+ /* Find the addr of the end of the access */
55
+ ptr_last = ptr + total - 1;
56
57
/* Round the bounds to the tag granule, and compute the number of tags. */
58
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
59
- tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
60
- tag_count = (tag_end - tag_first) / TAG_GRANULE;
61
+ tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
62
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
63
64
/* Round the bounds to twice the tag granule, and compute the bytes. */
65
tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
66
- tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
67
+ tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
68
69
/* Locate the page boundaries. */
70
prev_page = ptr & TARGET_PAGE_MASK;
71
next_page = prev_page + TARGET_PAGE_SIZE;
72
73
- if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
74
+ if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
75
/* Memory access stays on one page. */
76
- tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
77
+ tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
78
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
79
MMU_DATA_LOAD, tag_size, ra);
80
if (!mem1) {
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
83
MMU_DATA_LOAD, tag_size, ra);
84
85
- tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
86
+ tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
87
mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
88
- ptr_end - next_page,
89
+ ptr_last - next_page + 1,
90
MMU_DATA_LOAD, tag_size, ra);
91
92
/*
93
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
94
}
95
96
/*
97
- * If we failed, we know which granule. Compute the element that
98
- * is first in that granule, and signal failure on that element.
99
+ * If we failed, we know which granule. For the first granule, the
100
+ * failure address is @ptr, the first byte accessed. Otherwise the
101
+ * failure address is the first byte of the nth granule.
102
*/
103
if (unlikely(n < tag_count)) {
104
- uint64_t fail_ofs;
105
-
106
- fail_ofs = tag_first + n * TAG_GRANULE - ptr;
107
- fail_ofs = ROUND_UP(fail_ofs, esize);
108
- mte_check_fail(env, desc, ptr + fail_ofs, ra);
109
+ uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
110
+ mte_check_fail(env, desc, fault, ra);
111
}
112
113
done:
114
--
115
2.20.1
116
117
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Split out a helper function from mte_checkN to perform
4
all of the checking and address manpulation. So far,
5
just use this in mte_checkN itself.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210416183106.1516563-3-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/mte_helper.c | 52 +++++++++++++++++++++++++++++++----------
14
1 file changed, 40 insertions(+), 12 deletions(-)
15
16
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/mte_helper.c
19
+++ b/target/arm/mte_helper.c
20
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
21
return n;
22
}
23
24
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
25
- uint64_t ptr, uintptr_t ra)
26
+/**
27
+ * mte_probe_int() - helper for mte_probe and mte_check
28
+ * @env: CPU environment
29
+ * @desc: MTEDESC descriptor
30
+ * @ptr: virtual address of the base of the access
31
+ * @fault: return virtual address of the first check failure
32
+ *
33
+ * Internal routine for both mte_probe and mte_check.
34
+ * Return zero on failure, filling in *fault.
35
+ * Return negative on trivial success for tbi disabled.
36
+ * Return positive on success with tbi enabled.
37
+ */
38
+static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
39
+ uintptr_t ra, uint32_t total, uint64_t *fault)
40
{
41
int mmu_idx, ptr_tag, bit55;
42
uint64_t ptr_last, prev_page, next_page;
43
uint64_t tag_first, tag_last;
44
uint64_t tag_byte_first, tag_byte_last;
45
- uint32_t total, tag_count, tag_size, n, c;
46
+ uint32_t tag_count, tag_size, n, c;
47
uint8_t *mem1, *mem2;
48
MMUAccessType type;
49
50
bit55 = extract64(ptr, 55, 1);
51
+ *fault = ptr;
52
53
/* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
54
if (unlikely(!tbi_check(desc, bit55))) {
55
- return ptr;
56
+ return -1;
57
}
58
59
ptr_tag = allocation_tag_from_addr(ptr);
60
61
if (tcma_check(desc, bit55, ptr_tag)) {
62
- goto done;
63
+ return 1;
64
}
65
66
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
67
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
68
- total = FIELD_EX32(desc, MTEDESC, TSIZE);
69
70
/* Find the addr of the end of the access */
71
ptr_last = ptr + total - 1;
72
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
73
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
74
MMU_DATA_LOAD, tag_size, ra);
75
if (!mem1) {
76
- goto done;
77
+ return 1;
78
}
79
/* Perform all of the comparisons. */
80
n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
}
83
if (n == c) {
84
if (!mem2) {
85
- goto done;
86
+ return 1;
87
}
88
n += checkN(mem2, 0, ptr_tag, tag_count - c);
89
}
90
}
91
92
+ if (likely(n == tag_count)) {
93
+ return 1;
94
+ }
95
+
96
/*
97
* If we failed, we know which granule. For the first granule, the
98
* failure address is @ptr, the first byte accessed. Otherwise the
99
* failure address is the first byte of the nth granule.
100
*/
101
- if (unlikely(n < tag_count)) {
102
- uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
103
- mte_check_fail(env, desc, fault, ra);
104
+ if (n > 0) {
105
+ *fault = tag_first + n * TAG_GRANULE;
106
}
107
+ return 0;
108
+}
109
110
- done:
111
+uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
114
+ uint64_t fault;
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
117
+
118
+ if (unlikely(ret == 0)) {
119
+ mte_check_fail(env, desc, fault, ra);
120
+ } else if (ret < 0) {
121
+ return ptr;
122
+ }
123
return useronly_clean_ptr(ptr);
124
}
125
126
--
127
2.20.1
128
129
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Buglink: https://bugs.launchpad.net/bugs/1921948
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210416183106.1516563-5-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
tests/tcg/aarch64/mte-5.c | 44 +++++++++++++++++++++++++++++++
10
tests/tcg/aarch64/Makefile.target | 2 +-
11
2 files changed, 45 insertions(+), 1 deletion(-)
12
create mode 100644 tests/tcg/aarch64/mte-5.c
13
14
diff --git a/tests/tcg/aarch64/mte-5.c b/tests/tcg/aarch64/mte-5.c
15
new file mode 100644
16
index XXXXXXX..XXXXXXX
17
--- /dev/null
18
+++ b/tests/tcg/aarch64/mte-5.c
19
@@ -XXX,XX +XXX,XX @@
20
+/*
21
+ * Memory tagging, faulting unaligned access.
22
+ *
23
+ * Copyright (c) 2021 Linaro Ltd
24
+ * SPDX-License-Identifier: GPL-2.0-or-later
25
+ */
26
+
27
+#include "mte.h"
28
+
29
+void pass(int sig, siginfo_t *info, void *uc)
30
+{
31
+ assert(info->si_code == SEGV_MTESERR);
32
+ exit(0);
33
+}
34
+
35
+int main(int ac, char **av)
36
+{
37
+ struct sigaction sa;
38
+ void *p0, *p1, *p2;
39
+ long excl = 1;
40
+
41
+ enable_mte(PR_MTE_TCF_SYNC);
42
+ p0 = alloc_mte_mem(sizeof(*p0));
43
+
44
+ /* Create two differently tagged pointers. */
45
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
46
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
47
+ assert(excl != 1);
48
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
49
+ assert(p1 != p2);
50
+
51
+ memset(&sa, 0, sizeof(sa));
52
+ sa.sa_sigaction = pass;
53
+ sa.sa_flags = SA_SIGINFO;
54
+ sigaction(SIGSEGV, &sa, NULL);
55
+
56
+ /* Store store two different tags in sequential granules. */
57
+ asm("stg %0, [%0]" : : "r"(p1));
58
+ asm("stg %0, [%0]" : : "r"(p2 + 16));
59
+
60
+ /* Perform an unaligned load crossing the granules. */
61
+ asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12));
62
+ abort();
63
+}
64
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tests/tcg/aarch64/Makefile.target
67
+++ b/tests/tcg/aarch64/Makefile.target
68
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += bti-2
69
70
# MTE Tests
71
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
72
-AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-6
73
+AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6
74
mte-%: CFLAGS += -march=armv8.5-a+memtag
75
endif
76
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
After recent changes, mte_checkN does not use ESIZE,
4
and mte_check1 never used TSIZE. We can combine the
5
two into a single field: SIZEM1.
6
7
Choose to pass size - 1 because size == 0 is never used,
8
our immediate need in mte_probe_int is for the address
9
of the last byte (ptr + size - 1), and since almost all
10
operations are powers of 2, this makes the immediate
11
constant one bit smaller.
12
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20210416183106.1516563-6-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/internals.h | 4 ++--
19
target/arm/mte_helper.c | 18 ++++++++----------
20
target/arm/translate-a64.c | 5 ++---
21
target/arm/translate-sve.c | 5 ++---
22
4 files changed, 14 insertions(+), 18 deletions(-)
23
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/internals.h
27
+++ b/target/arm/internals.h
28
@@ -XXX,XX +XXX,XX @@
29
#define TARGET_ARM_INTERNALS_H
30
31
#include "hw/registerfields.h"
32
+#include "tcg/tcg-gvec-desc.h"
33
#include "syndrome.h"
34
35
/* register banks for CPU modes */
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4)
37
FIELD(MTEDESC, TBI, 4, 2)
38
FIELD(MTEDESC, TCMA, 6, 2)
39
FIELD(MTEDESC, WRITE, 8, 1)
40
-FIELD(MTEDESC, ESIZE, 9, 5)
41
-FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
42
+FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
43
44
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
45
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
46
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mte_helper.c
49
+++ b/target/arm/mte_helper.c
50
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
51
* Return positive on success with tbi enabled.
52
*/
53
static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
54
- uintptr_t ra, uint32_t total, uint64_t *fault)
55
+ uintptr_t ra, uint64_t *fault)
56
{
57
int mmu_idx, ptr_tag, bit55;
58
uint64_t ptr_last, prev_page, next_page;
59
uint64_t tag_first, tag_last;
60
uint64_t tag_byte_first, tag_byte_last;
61
- uint32_t tag_count, tag_size, n, c;
62
+ uint32_t sizem1, tag_count, tag_size, n, c;
63
uint8_t *mem1, *mem2;
64
MMUAccessType type;
65
66
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
67
68
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
69
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
70
+ sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
71
72
/* Find the addr of the end of the access */
73
- ptr_last = ptr + total - 1;
74
+ ptr_last = ptr + sizem1;
75
76
/* Round the bounds to the tag granule, and compute the number of tags. */
77
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
78
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
79
if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
80
/* Memory access stays on one page. */
81
tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
82
- mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
83
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
84
MMU_DATA_LOAD, tag_size, ra);
85
if (!mem1) {
86
return 1;
87
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
88
uint64_t ptr, uintptr_t ra)
89
{
90
uint64_t fault;
91
- uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
92
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
93
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
94
95
if (unlikely(ret == 0)) {
96
mte_check_fail(env, desc, fault, ra);
97
@@ -XXX,XX +XXX,XX @@ uint64_t mte_check1(CPUARMState *env, uint32_t desc,
98
uint64_t ptr, uintptr_t ra)
99
{
100
uint64_t fault;
101
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
102
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
103
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
104
105
if (unlikely(ret == 0)) {
106
mte_check_fail(env, desc, fault, ra);
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
109
{
110
uint64_t fault;
111
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
112
- int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
113
+ int ret = mte_probe_int(env, desc, ptr, 0, &fault);
114
115
return ret != 0;
116
}
117
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-a64.c
120
+++ b/target/arm/translate-a64.c
121
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
122
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
123
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
124
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
125
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
126
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
127
tcg_desc = tcg_const_i32(desc);
128
129
ret = new_tmp_a64(s);
130
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
131
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
132
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
133
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
134
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
135
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
136
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
137
tcg_desc = tcg_const_i32(desc);
138
139
ret = new_tmp_a64(s);
140
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate-sve.c
143
+++ b/target/arm/translate-sve.c
144
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
145
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
146
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
147
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
148
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
149
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz);
150
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
151
desc <<= SVE_MTEDESC_SHIFT;
152
} else {
153
addr = clean_data_tbi(s, addr);
154
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
155
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
156
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
157
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
158
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
159
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
160
desc <<= SVE_MTEDESC_SHIFT;
161
}
162
desc = simd_desc(vsz, vsz, desc | scale);
163
--
164
2.20.1
165
166
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
The mte_check1 and mte_checkN functions are now identical.
4
Drop mte_check1 and rename mte_checkN to mte_check.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210416183106.1516563-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper-a64.h | 3 +--
12
target/arm/internals.h | 5 +----
13
target/arm/mte_helper.c | 26 +++-----------------------
14
target/arm/sve_helper.c | 14 +++++++-------
15
target/arm/translate-a64.c | 4 ++--
16
5 files changed, 14 insertions(+), 38 deletions(-)
17
18
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-a64.h
21
+++ b/target/arm/helper-a64.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
23
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
24
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
25
26
-DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
27
-DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
28
+DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64)
29
DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
30
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
31
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
32
diff --git a/target/arm/internals.h b/target/arm/internals.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/internals.h
35
+++ b/target/arm/internals.h
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, WRITE, 8, 1)
37
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
38
39
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
40
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
41
- uint64_t ptr, uintptr_t ra);
42
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
43
- uint64_t ptr, uintptr_t ra);
44
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
45
46
static inline int allocation_tag_from_addr(uint64_t ptr)
47
{
48
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mte_helper.c
51
+++ b/target/arm/mte_helper.c
52
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
53
return 0;
54
}
55
56
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
57
- uint64_t ptr, uintptr_t ra)
58
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
59
{
60
uint64_t fault;
61
int ret = mte_probe_int(env, desc, ptr, ra, &fault);
62
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
63
return useronly_clean_ptr(ptr);
64
}
65
66
-uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
+uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
68
{
69
- return mte_checkN(env, desc, ptr, GETPC());
70
-}
71
-
72
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
73
- uint64_t ptr, uintptr_t ra)
74
-{
75
- uint64_t fault;
76
- int ret = mte_probe_int(env, desc, ptr, ra, &fault);
77
-
78
- if (unlikely(ret == 0)) {
79
- mte_check_fail(env, desc, fault, ra);
80
- } else if (ret < 0) {
81
- return ptr;
82
- }
83
- return useronly_clean_ptr(ptr);
84
-}
85
-
86
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
87
-{
88
- return mte_check1(env, desc, ptr, GETPC());
89
+ return mte_check(env, desc, ptr, GETPC());
90
}
91
92
/*
93
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/sve_helper.c
96
+++ b/target/arm/sve_helper.c
97
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
98
uintptr_t ra)
99
{
100
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
101
- mtedesc, ra, mte_check1);
102
+ mtedesc, ra, mte_check);
103
}
104
105
static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
106
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
107
uintptr_t ra)
108
{
109
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
110
- mtedesc, ra, mte_checkN);
111
+ mtedesc, ra, mte_check);
112
}
113
114
115
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
116
if (fault == FAULT_FIRST) {
117
/* Trapping mte check for the first-fault element. */
118
if (mtedesc) {
119
- mte_check1(env, mtedesc, addr + mem_off, retaddr);
120
+ mte_check(env, mtedesc, addr + mem_off, retaddr);
121
}
122
123
/*
124
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
125
info.attrs, BP_MEM_READ, retaddr);
126
}
127
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
128
- mte_check1(env, mtedesc, addr, retaddr);
129
+ mte_check(env, mtedesc, addr, retaddr);
130
}
131
host_fn(&scratch, reg_off, info.host);
132
} else {
133
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
134
BP_MEM_READ, retaddr);
135
}
136
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
137
- mte_check1(env, mtedesc, addr, retaddr);
138
+ mte_check(env, mtedesc, addr, retaddr);
139
}
140
tlb_fn(env, &scratch, reg_off, addr, retaddr);
141
}
142
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
143
*/
144
addr = base + (off_fn(vm, reg_off) << scale);
145
if (mtedesc) {
146
- mte_check1(env, mtedesc, addr, retaddr);
147
+ mte_check(env, mtedesc, addr, retaddr);
148
}
149
tlb_fn(env, vd, reg_off, addr, retaddr);
150
151
@@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
152
}
153
154
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
155
- mte_check1(env, mtedesc, addr, retaddr);
156
+ mte_check(env, mtedesc, addr, retaddr);
157
}
158
}
159
i += 1;
160
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/arm/translate-a64.c
163
+++ b/target/arm/translate-a64.c
164
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
165
tcg_desc = tcg_const_i32(desc);
166
167
ret = new_tmp_a64(s);
168
- gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
169
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
170
tcg_temp_free_i32(tcg_desc);
171
172
return ret;
173
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
174
tcg_desc = tcg_const_i32(desc);
175
176
ret = new_tmp_a64(s);
177
- gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
178
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
179
tcg_temp_free_i32(tcg_desc);
180
181
return ret;
182
--
183
2.20.1
184
185
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
For consistency with the mte_check1 + mte_checkN merge
4
to mte_check, rename the probe function as well.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210416183106.1516563-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/internals.h | 2 +-
12
target/arm/mte_helper.c | 6 +++---
13
target/arm/sve_helper.c | 6 +++---
14
3 files changed, 7 insertions(+), 7 deletions(-)
15
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/internals.h
19
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, TCMA, 6, 2)
21
FIELD(MTEDESC, WRITE, 8, 1)
22
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
23
24
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
25
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
26
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
27
28
static inline int allocation_tag_from_addr(uint64_t ptr)
29
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mte_helper.c
32
+++ b/target/arm/mte_helper.c
33
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
34
* exception for inaccessible pages, and resolves the virtual address
35
* into the softmmu tlb.
36
*
37
- * When RA == 0, this is for mte_probe1. The page is expected to be
38
+ * When RA == 0, this is for mte_probe. The page is expected to be
39
* valid. Indicate to probe_access_flags no-fault, then assert that
40
* we received a valid page.
41
*/
42
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
43
}
44
45
/*
46
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
47
+ * No-fault version of mte_check, to be used by SVE for MemSingleNF.
48
* Returns false if the access is Checked and the check failed. This
49
* is only intended to probe the tag -- the validity of the page must
50
* be checked beforehand.
51
*/
52
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
53
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
54
{
55
uint64_t fault;
56
int ret = mte_probe_int(env, desc, ptr, 0, &fault);
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/sve_helper.c
60
+++ b/target/arm/sve_helper.c
61
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
62
/* Watchpoint hit, see below. */
63
goto do_fault;
64
}
65
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
66
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
67
goto do_fault;
68
}
69
/*
70
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
71
& BP_MEM_READ)) {
72
goto do_fault;
73
}
74
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
75
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
76
goto do_fault;
77
}
78
host_fn(vd, reg_off, host + mem_off);
79
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
80
}
81
if (mtedesc &&
82
arm_tlb_mte_tagged(&info.attrs) &&
83
- !mte_probe1(env, mtedesc, addr)) {
84
+ !mte_probe(env, mtedesc, addr)) {
85
goto fault;
86
}
87
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that mte_check1 and mte_checkN have been merged, we can
4
merge sve_cont_ldst_mte_check1 and sve_cont_ldst_mte_checkN.
5
6
Which means that we can eliminate the function pointer into
7
sve_ldN_r and sve_stN_r, calling sve_cont_ldst_mte_check directly.
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210416183106.1516563-9-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/sve_helper.c | 84 +++++++++++++----------------------------
15
1 file changed, 26 insertions(+), 58 deletions(-)
16
17
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sve_helper.c
20
+++ b/target/arm/sve_helper.c
21
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
22
#endif
23
}
24
25
-typedef uint64_t mte_check_fn(CPUARMState *, uint32_t, uint64_t, uintptr_t);
26
-
27
-static inline QEMU_ALWAYS_INLINE
28
-void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
29
- uint64_t *vg, target_ulong addr, int esize,
30
- int msize, uint32_t mtedesc, uintptr_t ra,
31
- mte_check_fn *check)
32
+static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
33
+ uint64_t *vg, target_ulong addr, int esize,
34
+ int msize, uint32_t mtedesc, uintptr_t ra)
35
{
36
intptr_t mem_off, reg_off, reg_last;
37
38
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
39
uint64_t pg = vg[reg_off >> 6];
40
do {
41
if ((pg >> (reg_off & 63)) & 1) {
42
- check(env, mtedesc, addr, ra);
43
+ mte_check(env, mtedesc, addr, ra);
44
}
45
reg_off += esize;
46
mem_off += msize;
47
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
48
uint64_t pg = vg[reg_off >> 6];
49
do {
50
if ((pg >> (reg_off & 63)) & 1) {
51
- check(env, mtedesc, addr, ra);
52
+ mte_check(env, mtedesc, addr, ra);
53
}
54
reg_off += esize;
55
mem_off += msize;
56
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
57
}
58
}
59
60
-typedef void sve_cont_ldst_mte_check_fn(SVEContLdSt *info, CPUARMState *env,
61
- uint64_t *vg, target_ulong addr,
62
- int esize, int msize, uint32_t mtedesc,
63
- uintptr_t ra);
64
-
65
-static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
66
- uint64_t *vg, target_ulong addr,
67
- int esize, int msize, uint32_t mtedesc,
68
- uintptr_t ra)
69
-{
70
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
71
- mtedesc, ra, mte_check);
72
-}
73
-
74
-static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
75
- uint64_t *vg, target_ulong addr,
76
- int esize, int msize, uint32_t mtedesc,
77
- uintptr_t ra)
78
-{
79
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
80
- mtedesc, ra, mte_check);
81
-}
82
-
83
-
84
/*
85
* Common helper for all contiguous 1,2,3,4-register predicated stores.
86
*/
87
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
88
uint32_t desc, const uintptr_t retaddr,
89
const int esz, const int msz, const int N, uint32_t mtedesc,
90
sve_ldst1_host_fn *host_fn,
91
- sve_ldst1_tlb_fn *tlb_fn,
92
- sve_cont_ldst_mte_check_fn *mte_check_fn)
93
+ sve_ldst1_tlb_fn *tlb_fn)
94
{
95
const unsigned rd = simd_data(desc);
96
const intptr_t reg_max = simd_oprsz(desc);
97
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
98
* Handle mte checks for all active elements.
99
* Since TBI must be set for MTE, !mtedesc => !mte_active.
100
*/
101
- if (mte_check_fn && mtedesc) {
102
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
103
- mtedesc, retaddr);
104
+ if (mtedesc) {
105
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
106
+ mtedesc, retaddr);
107
}
108
109
flags = info.page[0].flags | info.page[1].flags;
110
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
111
mtedesc = 0;
112
}
113
114
- sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
115
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
116
+ sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
117
}
118
119
#define DO_LD1_1(NAME, ESZ) \
120
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
121
target_ulong addr, uint32_t desc) \
122
{ \
123
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
124
- sve_##NAME##_host, sve_##NAME##_tlb, NULL); \
125
+ sve_##NAME##_host, sve_##NAME##_tlb); \
126
} \
127
void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
128
target_ulong addr, uint32_t desc) \
129
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
130
target_ulong addr, uint32_t desc) \
131
{ \
132
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
133
- sve_##NAME##_le_host, sve_##NAME##_le_tlb, NULL); \
134
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
135
} \
136
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
137
target_ulong addr, uint32_t desc) \
138
{ \
139
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
140
- sve_##NAME##_be_host, sve_##NAME##_be_tlb, NULL); \
141
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
142
} \
143
void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
144
- target_ulong addr, uint32_t desc) \
145
+ target_ulong addr, uint32_t desc) \
146
{ \
147
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
148
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
149
} \
150
void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
151
- target_ulong addr, uint32_t desc) \
152
+ target_ulong addr, uint32_t desc) \
153
{ \
154
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
155
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
156
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
157
target_ulong addr, uint32_t desc) \
158
{ \
159
sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
160
- sve_ld1bb_host, sve_ld1bb_tlb, NULL); \
161
+ sve_ld1bb_host, sve_ld1bb_tlb); \
162
} \
163
void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
164
target_ulong addr, uint32_t desc) \
165
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
166
target_ulong addr, uint32_t desc) \
167
{ \
168
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
169
- sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb, NULL); \
170
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
171
} \
172
void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
173
target_ulong addr, uint32_t desc) \
174
{ \
175
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
176
- sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb, NULL); \
177
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
178
} \
179
void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
180
target_ulong addr, uint32_t desc) \
181
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
182
uint32_t desc, const uintptr_t retaddr,
183
const int esz, const int msz, const int N, uint32_t mtedesc,
184
sve_ldst1_host_fn *host_fn,
185
- sve_ldst1_tlb_fn *tlb_fn,
186
- sve_cont_ldst_mte_check_fn *mte_check_fn)
187
+ sve_ldst1_tlb_fn *tlb_fn)
188
{
189
const unsigned rd = simd_data(desc);
190
const intptr_t reg_max = simd_oprsz(desc);
191
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
192
* Handle mte checks for all active elements.
193
* Since TBI must be set for MTE, !mtedesc => !mte_active.
194
*/
195
- if (mte_check_fn && mtedesc) {
196
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
197
- mtedesc, retaddr);
198
+ if (mtedesc) {
199
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
200
+ mtedesc, retaddr);
201
}
202
203
flags = info.page[0].flags | info.page[1].flags;
204
@@ -XXX,XX +XXX,XX @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
205
mtedesc = 0;
206
}
207
208
- sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
209
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
210
+ sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
211
}
212
213
#define DO_STN_1(N, NAME, ESZ) \
214
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
215
target_ulong addr, uint32_t desc) \
216
{ \
217
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
218
- sve_st1##NAME##_host, sve_st1##NAME##_tlb, NULL); \
219
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
220
} \
221
void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
222
target_ulong addr, uint32_t desc) \
223
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
224
target_ulong addr, uint32_t desc) \
225
{ \
226
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
227
- sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb, NULL); \
228
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
229
} \
230
void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
231
target_ulong addr, uint32_t desc) \
232
{ \
233
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
234
- sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb, NULL); \
235
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
236
} \
237
void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
238
target_ulong addr, uint32_t desc) \
239
--
240
2.20.1
241
242
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
The log2_esize parameter is not used except trivially.
4
Drop the parameter and the deferral to gen_mte_check1.
5
6
This fixes a bug in that the parameters as documented
7
in the header file were the reverse from those in the
8
implementation. Which meant that translate-sve.c was
9
passing the parameters in the wrong order.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210416183106.1516563-10-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/translate-a64.h | 2 +-
17
target/arm/translate-a64.c | 15 +++++++--------
18
target/arm/translate-sve.c | 4 ++--
19
3 files changed, 10 insertions(+), 11 deletions(-)
20
21
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/translate-a64.h
24
+++ b/target/arm/translate-a64.h
25
@@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
26
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
27
bool tag_checked, int log2_size);
28
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
29
- bool tag_checked, int count, int log2_esize);
30
+ bool tag_checked, int size);
31
32
/* We should have at some point before trying to access an FP register
33
* done the necessary access check, so assert that
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-a64.c
37
+++ b/target/arm/translate-a64.c
38
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
39
* For MTE, check multiple logical sequential accesses.
40
*/
41
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
42
- bool tag_checked, int log2_esize, int total_size)
43
+ bool tag_checked, int size)
44
{
45
- if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
46
+ if (tag_checked && s->mte_active[0]) {
47
TCGv_i32 tcg_desc;
48
TCGv_i64 ret;
49
int desc = 0;
50
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
51
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
52
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
53
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
54
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
55
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
56
tcg_desc = tcg_const_i32(desc);
57
58
ret = new_tmp_a64(s);
59
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
60
61
return ret;
62
}
63
- return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
64
+ return clean_data_tbi(s, addr);
65
}
66
67
typedef struct DisasCompare64 {
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
69
}
70
71
clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
72
- (wback || rn != 31) && !set_tag,
73
- size, 2 << size);
74
+ (wback || rn != 31) && !set_tag, 2 << size);
75
76
if (is_vector) {
77
if (is_load) {
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
79
* promote consecutive little-endian elements below.
80
*/
81
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
82
- size, total);
83
+ total);
84
85
/*
86
* Consecutive little-endian elements from a single register
87
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
88
tcg_rn = cpu_reg_sp(s, rn);
89
90
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
91
- scale, total);
92
+ total);
93
94
tcg_ebytes = tcg_const_i64(1 << scale);
95
for (xs = 0; xs < selem; xs++) {
96
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-sve.c
99
+++ b/target/arm/translate-sve.c
100
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
101
102
dirty_addr = tcg_temp_new_i64();
103
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
104
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
105
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
106
tcg_temp_free_i64(dirty_addr);
107
108
/*
109
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
110
111
dirty_addr = tcg_temp_new_i64();
112
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
113
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
114
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
115
tcg_temp_free_i64(dirty_addr);
116
117
/* Note that unpredicated load/store of vector/predicate registers
118
--
119
2.20.1
120
121
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
The encoding of size = 2 and size = 3 had the incorrect decode
4
for align, overlapping the stride field. This error was hidden
5
by what should have been unnecessary masking in translate.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/neon-ls.decode | 4 ++--
13
target/arm/translate-neon.c.inc | 4 ++--
14
2 files changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
19
+++ b/target/arm/neon-ls.decode
20
@@ -XXX,XX +XXX,XX @@ VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
21
22
VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
23
vd=%vd_dp size=0 stride=1
24
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
25
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \
26
vd=%vd_dp size=1 stride=%imm1_5_p1
27
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
28
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \
29
vd=%vd_dp size=2 stride=%imm1_6_p1
30
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-neon.c.inc
33
+++ b/target/arm/translate-neon.c.inc
34
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
35
switch (nregs) {
36
case 1:
37
if (((a->align & (1 << a->size)) != 0) ||
38
- (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
39
+ (a->size == 2 && (a->align == 1 || a->align == 2))) {
40
return false;
41
}
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
44
}
45
break;
46
case 4:
47
- if ((a->size == 2) && ((a->align & 3) == 3)) {
48
+ if (a->size == 2 && a->align == 3) {
49
return false;
50
}
51
break;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We're about to rearrange the macro expansion surrounding tbflags,
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
6
7
So SCTLR_B -> SCTLR__B in the 3 uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-3-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/cpu.h | 2 +-
15
target/arm/helper.c | 2 +-
16
target/arm/translate.c | 2 +-
17
3 files changed, 3 insertions(+), 3 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
24
*/
25
FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
26
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
27
-FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
28
+FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
29
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
30
/*
31
* Indicates whether cp register reads and writes by guest code should access
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/helper.c
35
+++ b/target/arm/helper.c
36
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
37
bool sctlr_b = arm_sctlr_b(env);
38
39
if (sctlr_b) {
40
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
41
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
42
}
43
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
44
flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
45
diff --git a/target/arm/translate.c b/target/arm/translate.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate.c
48
+++ b/target/arm/translate.c
49
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
50
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
51
dc->debug_target_el =
52
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
53
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
54
+ dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
55
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
56
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
57
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
58
--
59
2.20.1
60
61
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We're about to rearrange the macro expansion surrounding tbflags,
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
6
7
So PSTATE_SS -> PSTATE__SS in the uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-4-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/cpu.h | 2 +-
15
target/arm/helper.c | 4 ++--
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 2 +-
18
4 files changed, 5 insertions(+), 5 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
25
*/
26
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
27
FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
28
-FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
30
FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
31
FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
32
/* Target EL if we take a floating-point-disabled exception */
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/helper.c
36
+++ b/target/arm/helper.c
37
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
38
* 0 x Inactive (the TB flag for SS is always 0)
39
* 1 0 Active-pending
40
* 1 1 Active-not-pending
41
- * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
42
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
43
*/
44
if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
45
(env->pstate & PSTATE_SS)) {
46
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
47
+ flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
48
}
49
50
*pflags = flags;
51
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-a64.c
54
+++ b/target/arm/translate-a64.c
55
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
56
* end the TB
57
*/
58
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
59
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
60
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
61
dc->is_ldex = false;
62
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
63
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
69
* end the TB
70
*/
71
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
72
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
73
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
74
dc->is_ldex = false;
75
76
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We're about to split tbflags into two parts. These macros
4
will ensure that the correct part is used with the correct
5
set of bits.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 22 +++++++++-
13
target/arm/helper-a64.c | 2 +-
14
target/arm/helper.c | 85 +++++++++++++++++---------------------
15
target/arm/translate-a64.c | 36 ++++++++--------
16
target/arm/translate.c | 48 ++++++++++-----------
17
5 files changed, 101 insertions(+), 92 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2)
24
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
25
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
26
27
+/*
28
+ * Helpers for using the above.
29
+ */
30
+#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
31
+ (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
32
+#define DP_TBFLAG_A64(DST, WHICH, VAL) \
33
+ (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
34
+#define DP_TBFLAG_A32(DST, WHICH, VAL) \
35
+ (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
36
+#define DP_TBFLAG_M32(DST, WHICH, VAL) \
37
+ (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
38
+#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
39
+ (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
40
+
41
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
42
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
43
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
44
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
45
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
46
+
47
/**
48
* cpu_mmu_index:
49
* @env: The cpu environment
50
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
51
*/
52
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
53
{
54
- return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX);
55
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
56
}
57
58
static inline bool bswap_code(bool sctlr_b)
59
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/helper-a64.c
62
+++ b/target/arm/helper-a64.c
63
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
64
* the hflags rebuild, since we can pull the composite TBII field
65
* from there.
66
*/
67
- tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII);
68
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
69
if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
70
/* TBI is enabled. */
71
int core_mmu_idx = cpu_mmu_index(env, false);
72
diff --git a/target/arm/helper.c b/target/arm/helper.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/helper.c
75
+++ b/target/arm/helper.c
76
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
77
static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
78
ARMMMUIdx mmu_idx, uint32_t flags)
79
{
80
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
81
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
82
- arm_to_core_mmu_idx(mmu_idx));
83
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
84
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
85
86
if (arm_singlestep_active(env)) {
87
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
88
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
89
}
90
return flags;
91
}
92
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
93
bool sctlr_b = arm_sctlr_b(env);
94
95
if (sctlr_b) {
96
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
97
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
98
}
99
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
100
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
101
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
102
}
103
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
104
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
105
106
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
107
}
108
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
109
uint32_t flags = 0;
110
111
if (arm_v7m_is_handler_mode(env)) {
112
- flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
113
+ DP_TBFLAG_M32(flags, HANDLER, 1);
114
}
115
116
/*
117
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
118
if (arm_feature(env, ARM_FEATURE_V8) &&
119
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
120
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
121
- flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
122
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
123
}
124
125
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
127
{
128
int flags = 0;
129
130
- flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
131
- arm_debug_target_el(env));
132
+ DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
133
return flags;
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
137
uint32_t flags = rebuild_hflags_aprofile(env);
138
139
if (arm_el_is_aa64(env, 1)) {
140
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
141
+ DP_TBFLAG_A32(flags, VFPEN, 1);
142
}
143
144
if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
145
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
146
- flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
147
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
148
}
149
150
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
151
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
152
uint64_t sctlr;
153
int tbii, tbid;
154
155
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
156
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
157
158
/* Get control bits for tagged addresses. */
159
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
160
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
161
162
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
163
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
164
+ DP_TBFLAG_A64(flags, TBII, tbii);
165
+ DP_TBFLAG_A64(flags, TBID, tbid);
166
167
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
168
int sve_el = sve_exception_el(env, el);
169
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
170
} else {
171
zcr_len = sve_zcr_len_for_el(env, el);
172
}
173
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
174
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
175
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
176
+ DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
177
}
178
179
sctlr = regime_sctlr(env, stage1);
180
181
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
182
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
183
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
184
}
185
186
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
187
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
188
* The decision of which action to take is left to a helper.
189
*/
190
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
191
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
192
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
193
}
194
}
195
196
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
197
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
198
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
199
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
200
+ DP_TBFLAG_A64(flags, BT, 1);
201
}
202
}
203
204
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
205
case ARMMMUIdx_SE10_1:
206
case ARMMMUIdx_SE10_1_PAN:
207
/* TODO: ARMv8.3-NV */
208
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
209
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
210
break;
211
case ARMMMUIdx_E20_2:
212
case ARMMMUIdx_E20_2_PAN:
213
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
214
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
215
*/
216
if (env->cp15.hcr_el2 & HCR_TGE) {
217
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
218
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
219
}
220
break;
221
default:
222
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
223
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
224
*/
225
if (allocation_tag_access_enabled(env, el, sctlr)) {
226
- flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
227
+ DP_TBFLAG_A64(flags, ATA, 1);
228
if (tbid
229
&& !(env->pstate & PSTATE_TCO)
230
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
231
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
232
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
233
}
234
}
235
/* And again for unprivileged accesses, if required. */
236
- if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
237
+ if (EX_TBFLAG_A64(flags, UNPRIV)
238
&& tbid
239
&& !(env->pstate & PSTATE_TCO)
240
&& (sctlr & SCTLR_TCF0)
241
&& allocation_tag_access_enabled(env, 0, sctlr)) {
242
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
243
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
244
}
245
/* Cache TCMA as well as TBI. */
246
- flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
247
- aa64_va_parameter_tcma(tcr, mmu_idx));
248
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
249
}
250
251
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
252
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
253
*cs_base = 0;
254
assert_hflags_rebuild_correctly(env);
255
256
- if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
257
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
258
*pc = env->pc;
259
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
260
- flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
261
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
262
}
263
} else {
264
*pc = env->regs[15];
265
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
266
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
267
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
268
!= env->v7m.secure) {
269
- flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
270
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
271
}
272
273
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
274
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
275
* active FP context; we must create a new FP context before
276
* executing any FP insn.
277
*/
278
- flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
279
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
280
}
281
282
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
283
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
284
- flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
285
+ DP_TBFLAG_M32(flags, LSPACT, 1);
286
}
287
} else {
288
/*
289
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
290
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
291
*/
292
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
293
- flags = FIELD_DP32(flags, TBFLAG_A32,
294
- XSCALE_CPAR, env->cp15.c15_cpar);
295
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
296
} else {
297
- flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
298
- env->vfp.vec_len);
299
- flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
300
- env->vfp.vec_stride);
301
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
302
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
303
}
304
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
305
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
306
+ DP_TBFLAG_A32(flags, VFPEN, 1);
307
}
308
}
309
310
- flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
311
- flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
312
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
313
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
314
}
315
316
/*
317
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
318
* 1 1 Active-not-pending
319
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
320
*/
321
- if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
322
- (env->pstate & PSTATE_SS)) {
323
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
324
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
325
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
326
}
327
328
*pflags = flags;
329
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
330
index XXXXXXX..XXXXXXX 100644
331
--- a/target/arm/translate-a64.c
332
+++ b/target/arm/translate-a64.c
333
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
334
!arm_el_is_aa64(env, 3);
335
dc->thumb = 0;
336
dc->sctlr_b = 0;
337
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
338
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
339
dc->condexec_mask = 0;
340
dc->condexec_cond = 0;
341
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
342
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
343
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
344
- dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
345
- dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
346
- dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
347
+ dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
348
+ dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
349
+ dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
350
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
351
#if !defined(CONFIG_USER_ONLY)
352
dc->user = (dc->current_el == 0);
353
#endif
354
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
355
- dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
356
- dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
357
- dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
358
- dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
359
- dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
360
- dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
361
- dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
362
- dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
363
- dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
364
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
365
+ dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
366
+ dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
367
+ dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
368
+ dc->bt = EX_TBFLAG_A64(tb_flags, BT);
369
+ dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
370
+ dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
371
+ dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
372
+ dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
373
+ dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
374
dc->vec_len = 0;
375
dc->vec_stride = 0;
376
dc->cp_regs = arm_cpu->cp_regs;
377
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
378
* emit code to generate a software step exception
379
* end the TB
380
*/
381
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
382
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
383
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
384
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
385
dc->is_ldex = false;
386
- dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
387
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
388
389
/* Bound the number of insns to execute to those left on the page. */
390
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
391
diff --git a/target/arm/translate.c b/target/arm/translate.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/arm/translate.c
394
+++ b/target/arm/translate.c
395
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
396
*/
397
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
398
!arm_el_is_aa64(env, 3);
399
- dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
400
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
401
- condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
402
+ dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
403
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
404
+ condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
405
dc->condexec_mask = (condexec & 0xf) << 1;
406
dc->condexec_cond = condexec >> 4;
407
408
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
409
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
410
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
411
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
412
#if !defined(CONFIG_USER_ONLY)
413
dc->user = (dc->current_el == 0);
414
#endif
415
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
416
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
417
418
if (arm_feature(env, ARM_FEATURE_M)) {
419
dc->vfp_enabled = 1;
420
dc->be_data = MO_TE;
421
- dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
422
+ dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
423
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
424
regime_is_secure(env, dc->mmu_idx);
425
- dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
426
- dc->v8m_fpccr_s_wrong =
427
- FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
428
+ dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
429
+ dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
430
dc->v7m_new_fp_ctxt_needed =
431
- FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
432
- dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
433
+ EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
434
+ dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
435
} else {
436
- dc->be_data =
437
- FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
438
- dc->debug_target_el =
439
- FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
440
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
441
- dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
442
- dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
443
- dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
444
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
445
+ dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
446
+ dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
447
+ dc->ns = EX_TBFLAG_A32(tb_flags, NS);
448
+ dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
449
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
450
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
451
+ dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
452
} else {
453
- dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
454
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
455
+ dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
456
+ dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
457
}
458
}
459
dc->cp_regs = cpu->cp_regs;
460
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
461
* emit code to generate a software step exception
462
* end the TB
463
*/
464
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
465
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
466
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
467
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
468
dc->is_ldex = false;
469
470
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
471
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
472
DisasContext dc = { };
473
const TranslatorOps *ops = &arm_translator_ops;
474
475
- if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
476
+ if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
477
ops = &thumb_translator_ops;
478
}
479
#ifdef TARGET_AARCH64
480
- if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
481
+ if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
482
ops = &aarch64_translator_ops;
483
}
484
#endif
485
--
486
2.20.1
487
488
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that we have all of the proper macros defined, expanding
4
the CPUARMTBFlags structure and populating the two TB fields
5
is relatively simple.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-7-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 49 ++++++++++++++++++++++++------------------
13
target/arm/translate.h | 2 +-
14
target/arm/helper.c | 10 +++++----
15
3 files changed, 35 insertions(+), 26 deletions(-)
16
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
22
/* See the commentary above the TBFLAG field definitions. */
23
typedef struct CPUARMTBFlags {
24
uint32_t flags;
25
+ target_ulong flags2;
26
} CPUARMTBFlags;
27
28
typedef struct CPUARMState {
29
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
30
#include "exec/cpu-all.h"
31
32
/*
33
- * Bit usage in the TB flags field: bit 31 indicates whether we are
34
- * in 32 or 64 bit mode. The meaning of the other bits depends on that.
35
- * We put flags which are shared between 32 and 64 bit mode at the top
36
- * of the word, and flags which apply to only one mode at the bottom.
37
+ * We have more than 32-bits worth of state per TB, so we split the data
38
+ * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
39
+ * We collect these two parts in CPUARMTBFlags where they are named
40
+ * flags and flags2 respectively.
41
*
42
- * 31 20 18 14 9 0
43
- * +--------------+-----+-----+----------+--------------+
44
- * | | | TBFLAG_A32 | |
45
- * | | +-----+----------+ TBFLAG_AM32 |
46
- * | TBFLAG_ANY | |TBFLAG_M32| |
47
- * | +-----------+----------+--------------|
48
- * | | TBFLAG_A64 |
49
- * +--------------+-------------------------------------+
50
- * 31 20 0
51
+ * The flags that are shared between all execution modes, TBFLAG_ANY,
52
+ * are stored in flags. The flags that are specific to a given mode
53
+ * are stores in flags2. Since cs_base is sized on the configured
54
+ * address size, flags2 always has 64-bits for A64, and a minimum of
55
+ * 32-bits for A32 and M32.
56
+ *
57
+ * The bits for 32-bit A-profile and M-profile partially overlap:
58
+ *
59
+ * 18 9 0
60
+ * +----------------+--------------+
61
+ * | TBFLAG_A32 | |
62
+ * +-----+----------+ TBFLAG_AM32 |
63
+ * | |TBFLAG_M32| |
64
+ * +-----+----------+--------------+
65
+ * 14 9 0
66
*
67
* Unless otherwise noted, these bits are cached in env->hflags.
68
*/
69
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
70
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
71
(DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
72
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
73
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
74
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
75
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
76
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
77
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
78
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
79
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
80
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
81
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
82
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
83
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
84
85
#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
86
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
87
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
88
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
89
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
90
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
91
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
92
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
93
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
94
95
/**
96
* cpu_mmu_index:
97
diff --git a/target/arm/translate.h b/target/arm/translate.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/translate.h
100
+++ b/target/arm/translate.h
101
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
102
*/
103
static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
104
{
105
- return (CPUARMTBFlags){ tb->flags };
106
+ return (CPUARMTBFlags){ tb->flags, tb->cs_base };
107
}
108
109
/*
110
diff --git a/target/arm/helper.c b/target/arm/helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/helper.c
113
+++ b/target/arm/helper.c
114
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
115
CPUARMTBFlags c = env->hflags;
116
CPUARMTBFlags r = rebuild_hflags_internal(env);
117
118
- if (unlikely(c.flags != r.flags)) {
119
- fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
120
- c.flags, r.flags);
121
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
122
+ fprintf(stderr, "TCG hflags mismatch "
123
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
124
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
125
+ c.flags, c.flags2, r.flags, r.flags2);
126
abort();
127
}
128
#endif
129
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
130
{
131
CPUARMTBFlags flags;
132
133
- *cs_base = 0;
134
assert_hflags_rebuild_correctly(env);
135
flags = env->hflags;
136
137
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
138
}
139
140
*pflags = flags.flags;
141
+ *cs_base = flags.flags2;
142
}
143
144
#ifdef TARGET_AARCH64
145
--
146
2.20.1
147
148
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that these bits have been moved out of tb->flags,
4
where TBFLAG_ANY was filling from the top, move AM32
5
to fill from the top, and A32 and M32 to fill from the
6
bottom. This means fewer changes when adding new bits.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-9-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 42 +++++++++++++++++++++---------------------
14
1 file changed, 21 insertions(+), 21 deletions(-)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
21
*
22
* The bits for 32-bit A-profile and M-profile partially overlap:
23
*
24
- * 18 9 0
25
- * +----------------+--------------+
26
- * | TBFLAG_A32 | |
27
- * +-----+----------+ TBFLAG_AM32 |
28
- * | |TBFLAG_M32| |
29
- * +-----+----------+--------------+
30
- * 14 9 0
31
+ * 31 23 11 10 0
32
+ * +-------------+----------+----------------+
33
+ * | | | TBFLAG_A32 |
34
+ * | TBFLAG_AM32 | +-----+----------+
35
+ * | | |TBFLAG_M32|
36
+ * +-------------+----------------+----------+
37
+ * 31 23 5 4 0
38
*
39
* Unless otherwise noted, these bits are cached in env->hflags.
40
*/
41
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
42
/*
43
* Bit usage when in AArch32 state, both A- and M-profile.
44
*/
45
-FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
46
-FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
47
+FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
48
+FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
49
50
/*
51
* Bit usage when in AArch32 state, for A-profile only.
52
*/
53
-FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
54
-FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
55
+FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
56
+FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
57
/*
58
* We store the bottom two bits of the CPAR as TB flags and handle
59
* checks on the other bits at runtime. This shares the same bits as
60
* VECSTRIDE, which is OK as no XScale CPU has VFP.
61
* Not cached, because VECLEN+VECSTRIDE are not cached.
62
*/
63
-FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
64
-FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
65
-FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
66
-FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
67
+FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
68
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
69
+FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
70
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
71
/*
72
* Indicates whether cp register reads and writes by guest code should access
73
* the secure or nonsecure bank of banked registers; note that this is not
74
* the same thing as the current security state of the processor!
75
*/
76
-FIELD(TBFLAG_A32, NS, 17, 1)
77
+FIELD(TBFLAG_A32, NS, 10, 1)
78
79
/*
80
* Bit usage when in AArch32 state, for M-profile only.
81
*/
82
/* Handler (ie not Thread) mode */
83
-FIELD(TBFLAG_M32, HANDLER, 9, 1)
84
+FIELD(TBFLAG_M32, HANDLER, 0, 1)
85
/* Whether we should generate stack-limit checks */
86
-FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
87
+FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
88
/* Set if FPCCR.LSPACT is set */
89
-FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
90
+FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
91
/* Set if we must create a new FP context */
92
-FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
93
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
94
/* Set if FPCCR.S does not match current security state */
95
-FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
96
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
97
98
/*
99
* Bit usage when in AArch64 state
100
--
101
2.20.1
102
103
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that other bits have been moved out of tb->flags,
4
there's no point in filling from the top.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 14 +++++++-------
12
1 file changed, 7 insertions(+), 7 deletions(-)
13
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
19
*
20
* Unless otherwise noted, these bits are cached in env->hflags.
21
*/
22
-FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
23
-FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
24
-FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
25
-FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
26
-FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
27
+FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
28
+FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
30
+FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
31
+FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
32
/* Target EL if we take a floating-point-disabled exception */
33
-FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
34
+FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
35
/* For A-profile only, target EL for debug exceptions. */
36
-FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
37
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
38
39
/*
40
* Bit usage when in AArch32 state, both A- and M-profile.
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Use this to signal when memory access alignment is required.
4
This value comes from the CCR register for M-profile, and
5
from the SCTLR register for A-profile.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-11-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 2 ++
13
target/arm/translate.h | 2 ++
14
target/arm/helper.c | 19 +++++++++++++++++--
15
target/arm/translate-a64.c | 1 +
16
target/arm/translate.c | 7 +++----
17
5 files changed, 25 insertions(+), 6 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
24
FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
25
/* For A-profile only, target EL for debug exceptions. */
26
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
27
+/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
28
+FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
29
30
/*
31
* Bit usage when in AArch32 state, both A- and M-profile.
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
37
bool bt;
38
/* True if any CP15 access is trapped by HSTR_EL2 */
39
bool hstr_active;
40
+ /* True if memory operations require alignment */
41
+ bool align_mem;
42
/*
43
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
44
* < 0, set by the current instruction.
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
50
ARMMMUIdx mmu_idx)
51
{
52
CPUARMTBFlags flags = {};
53
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
54
+
55
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
56
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
57
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
58
+ }
59
60
if (arm_v7m_is_handler_mode(env)) {
61
DP_TBFLAG_M32(flags, HANDLER, 1);
62
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
63
*/
64
if (arm_feature(env, ARM_FEATURE_V8) &&
65
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
66
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
67
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
68
DP_TBFLAG_M32(flags, STACKCHECK, 1);
69
}
70
71
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
72
ARMMMUIdx mmu_idx)
73
{
74
CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
75
+ int el = arm_current_el(env);
76
+
77
+ if (arm_sctlr(env, el) & SCTLR_A) {
78
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
79
+ }
80
81
if (arm_el_is_aa64(env, 1)) {
82
DP_TBFLAG_A32(flags, VFPEN, 1);
83
}
84
85
- if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
86
+ if (el < 2 && env->cp15.hstr_el2 &&
87
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
88
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
89
}
90
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
91
92
sctlr = regime_sctlr(env, stage1);
93
94
+ if (sctlr & SCTLR_A) {
95
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
96
+ }
97
+
98
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
99
DP_TBFLAG_ANY(flags, BE_DATA, 1);
100
}
101
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/arm/translate-a64.c
104
+++ b/target/arm/translate-a64.c
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
106
dc->user = (dc->current_el == 0);
107
#endif
108
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
109
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
110
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
111
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
112
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
113
diff --git a/target/arm/translate.c b/target/arm/translate.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate.c
116
+++ b/target/arm/translate.c
117
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
118
{
119
TCGv addr;
120
121
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
122
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
123
+ if (s->align_mem) {
124
opc |= MO_ALIGN;
125
}
126
127
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
128
{
129
TCGv addr;
130
131
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
132
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
133
+ if (s->align_mem) {
134
opc |= MO_ALIGN;
135
}
136
137
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
138
dc->user = (dc->current_el == 0);
139
#endif
140
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
141
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
142
143
if (arm_feature(env, ARM_FEATURE_M)) {
144
dc->vfp_enabled = 1;
145
--
146
2.20.1
147
148
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a finalize_memop function that computes alignment and
4
endianness and returns the final MemOp for the operation.
5
6
Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special
7
handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32
8
so that s->be_data is not added by the callers.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-12-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.h | 24 ++++++++
16
target/arm/translate.c | 100 +++++++++++++++++---------------
17
target/arm/translate-neon.c.inc | 9 +--
18
3 files changed, 79 insertions(+), 54 deletions(-)
19
20
diff --git a/target/arm/translate.h b/target/arm/translate.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.h
23
+++ b/target/arm/translate.h
24
@@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
25
return statusptr;
26
}
27
28
+/**
29
+ * finalize_memop:
30
+ * @s: DisasContext
31
+ * @opc: size+sign+align of the memory operation
32
+ *
33
+ * Build the complete MemOp for a memory operation, including alignment
34
+ * and endianness.
35
+ *
36
+ * If (op & MO_AMASK) then the operation already contains the required
37
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
38
+ * unaligned operation, e.g. for AccType_NORMAL.
39
+ *
40
+ * In the latter case, there are configuration bits that require alignment,
41
+ * and this is applied here. Note that there is no way to indicate that
42
+ * no alignment should ever be enforced; this must be handled manually.
43
+ */
44
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
45
+{
46
+ if (s->align_mem && !(opc & MO_AMASK)) {
47
+ opc |= MO_ALIGN;
48
+ }
49
+ return opc | s->be_data;
50
+}
51
+
52
#endif /* TARGET_ARM_TRANSLATE_H */
53
diff --git a/target/arm/translate.c b/target/arm/translate.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/translate.c
56
+++ b/target/arm/translate.c
57
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
58
#define IS_USER_ONLY 0
59
#endif
60
61
-/* Abstractions of "generate code to do a guest load/store for
62
+/*
63
+ * Abstractions of "generate code to do a guest load/store for
64
* AArch32", where a vaddr is always 32 bits (and is zero
65
* extended if we're a 64 bit core) and data is also
66
* 32 bits unless specifically doing a 64 bit access.
67
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
68
* that the address argument is TCGv_i32 rather than TCGv.
69
*/
70
71
-static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
72
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
73
{
74
TCGv addr = tcg_temp_new();
75
tcg_gen_extu_i32_tl(addr, a32);
76
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
77
return addr;
78
}
79
80
+/*
81
+ * Internal routines are used for NEON cases where the endianness
82
+ * and/or alignment has already been taken into account and manipulated.
83
+ */
84
+static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
85
+ TCGv_i32 a32, int index, MemOp opc)
86
+{
87
+ TCGv addr = gen_aa32_addr(s, a32, opc);
88
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
89
+ tcg_temp_free(addr);
90
+}
91
+
92
+static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
93
+ TCGv_i32 a32, int index, MemOp opc)
94
+{
95
+ TCGv addr = gen_aa32_addr(s, a32, opc);
96
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
97
+ tcg_temp_free(addr);
98
+}
99
+
100
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
101
int index, MemOp opc)
102
{
103
- TCGv addr;
104
-
105
- if (s->align_mem) {
106
- opc |= MO_ALIGN;
107
- }
108
-
109
- addr = gen_aa32_addr(s, a32, opc);
110
- tcg_gen_qemu_ld_i32(val, addr, index, opc);
111
- tcg_temp_free(addr);
112
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
113
}
114
115
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
116
int index, MemOp opc)
117
{
118
- TCGv addr;
119
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
120
+}
121
122
- if (s->align_mem) {
123
- opc |= MO_ALIGN;
124
+#define DO_GEN_LD(SUFF, OPC) \
125
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
126
+ TCGv_i32 a32, int index) \
127
+ { \
128
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
129
}
130
131
- addr = gen_aa32_addr(s, a32, opc);
132
- tcg_gen_qemu_st_i32(val, addr, index, opc);
133
- tcg_temp_free(addr);
134
-}
135
-
136
-#define DO_GEN_LD(SUFF, OPC) \
137
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
138
- TCGv_i32 a32, int index) \
139
-{ \
140
- gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
141
-}
142
-
143
-#define DO_GEN_ST(SUFF, OPC) \
144
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
145
- TCGv_i32 a32, int index) \
146
-{ \
147
- gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
148
-}
149
+#define DO_GEN_ST(SUFF, OPC) \
150
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
151
+ TCGv_i32 a32, int index) \
152
+ { \
153
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
154
+ }
155
156
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
157
{
158
@@ -XXX,XX +XXX,XX @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
159
addr = op_addr_rr_pre(s, a);
160
161
tmp = tcg_temp_new_i32();
162
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
163
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
164
disas_set_da_iss(s, mop, issinfo);
165
166
/*
167
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
168
addr = op_addr_rr_pre(s, a);
169
170
tmp = load_reg(s, a->rt);
171
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
172
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
173
disas_set_da_iss(s, mop, issinfo);
174
tcg_temp_free_i32(tmp);
175
176
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
177
addr = op_addr_rr_pre(s, a);
178
179
tmp = tcg_temp_new_i32();
180
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
181
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
182
store_reg(s, a->rt, tmp);
183
184
tcg_gen_addi_i32(addr, addr, 4);
185
186
tmp = tcg_temp_new_i32();
187
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
188
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
189
store_reg(s, a->rt + 1, tmp);
190
191
/* LDRD w/ base writeback is undefined if the registers overlap. */
192
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
193
addr = op_addr_rr_pre(s, a);
194
195
tmp = load_reg(s, a->rt);
196
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
197
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
198
tcg_temp_free_i32(tmp);
199
200
tcg_gen_addi_i32(addr, addr, 4);
201
202
tmp = load_reg(s, a->rt + 1);
203
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
204
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
205
tcg_temp_free_i32(tmp);
206
207
op_addr_rr_post(s, a, addr, -4);
208
@@ -XXX,XX +XXX,XX @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
209
addr = op_addr_ri_pre(s, a);
210
211
tmp = tcg_temp_new_i32();
212
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
213
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
214
disas_set_da_iss(s, mop, issinfo);
215
216
/*
217
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
218
addr = op_addr_ri_pre(s, a);
219
220
tmp = load_reg(s, a->rt);
221
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
222
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
223
disas_set_da_iss(s, mop, issinfo);
224
tcg_temp_free_i32(tmp);
225
226
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
227
addr = op_addr_ri_pre(s, a);
228
229
tmp = tcg_temp_new_i32();
230
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
231
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
232
store_reg(s, a->rt, tmp);
233
234
tcg_gen_addi_i32(addr, addr, 4);
235
236
tmp = tcg_temp_new_i32();
237
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
238
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
239
store_reg(s, rt2, tmp);
240
241
/* LDRD w/ base writeback is undefined if the registers overlap. */
242
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
243
addr = op_addr_ri_pre(s, a);
244
245
tmp = load_reg(s, a->rt);
246
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
247
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
248
tcg_temp_free_i32(tmp);
249
250
tcg_gen_addi_i32(addr, addr, 4);
251
252
tmp = load_reg(s, rt2);
253
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
254
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
255
tcg_temp_free_i32(tmp);
256
257
op_addr_ri_post(s, a, addr, -4);
258
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
259
addr = load_reg(s, a->rn);
260
tmp = load_reg(s, a->rt);
261
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
262
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
263
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
264
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
265
266
tcg_temp_free_i32(tmp);
267
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
268
269
addr = load_reg(s, a->rn);
270
tmp = tcg_temp_new_i32();
271
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
272
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
273
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
274
tcg_temp_free_i32(addr);
275
276
@@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
277
addr = load_reg(s, a->rn);
278
tcg_gen_add_i32(addr, addr, tmp);
279
280
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
281
- half ? MO_UW | s->be_data : MO_UB);
282
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
283
tcg_temp_free_i32(addr);
284
285
tcg_gen_add_i32(tmp, tmp, tmp);
286
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
287
index XXXXXXX..XXXXXXX 100644
288
--- a/target/arm/translate-neon.c.inc
289
+++ b/target/arm/translate-neon.c.inc
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
291
addr = tcg_temp_new_i32();
292
load_reg_var(s, addr, a->rn);
293
for (reg = 0; reg < nregs; reg++) {
294
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
295
- s->be_data | size);
296
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
297
if ((vd & 1) && vec_size == 16) {
298
/*
299
* We cannot write 16 bytes at once because the
300
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
301
*/
302
for (reg = 0; reg < nregs; reg++) {
303
if (a->l) {
304
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
305
- s->be_data | a->size);
306
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
307
neon_store_element(vd, a->reg_idx, a->size, tmp);
308
} else { /* Store */
309
neon_load_element(tmp, vd, a->reg_idx, a->size);
310
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
311
- s->be_data | a->size);
312
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
313
}
314
vd += a->stride;
315
tcg_gen_addi_i32(addr, addr, 1 << a->size);
316
--
317
2.20.1
318
319
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In commit f0a08b0913befbd we changed the type of the PC from
2
target_ulong to vaddr. In doing so we inadvertently dropped the
3
zero-padding on the PC in trace lines (the second item inside the []
4
in these lines). They used to look like this on AArch64, for
5
instance:
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000]
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
5
Message-id: 20210419202257.161730-24-richard.henderson@linaro.org
9
and now they look like this:
10
Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000]
11
12
and if the PC happens to be somewhere low like 0x5000
13
then the field is shown as /5000/.
14
15
This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier,
16
depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64
17
with no width specifier.
18
19
Restore the zero-padding by adding an 016 width specifier to
20
this tracing and a couple of others that were similarly recently
21
changed to use VADDR_PRIx without a width specifier.
22
23
We can't unfortunately restore the "32-bit guests are padded to
24
8 hex digits and 64-bit guests to 16 hex digits" behaviour so
25
easily.
26
27
Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr")
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
30
Reviewed-by: Anton Johansson <anjo@rev.ng>
31
Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org
7
---
32
---
8
target/arm/translate-neon.c.inc | 27 ++++++++++++++++++++++-----
33
accel/tcg/cpu-exec.c | 4 ++--
9
1 file changed, 22 insertions(+), 5 deletions(-)
34
accel/tcg/translate-all.c | 2 +-
35
2 files changed, 3 insertions(+), 3 deletions(-)
10
36
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
37
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
12
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
39
--- a/accel/tcg/cpu-exec.c
14
+++ b/target/arm/translate-neon.c.inc
40
+++ b/accel/tcg/cpu-exec.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
41
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
16
{
42
if (qemu_log_in_addr_range(pc)) {
17
/* Neon load/store multiple structures */
43
qemu_log_mask(CPU_LOG_EXEC,
18
int nregs, interleave, spacing, reg, n;
44
"Trace %d: %p [%08" PRIx64
19
- MemOp endian = s->be_data;
45
- "/%" VADDR_PRIx "/%08x/%08x] %s\n",
20
+ MemOp mop, align, endian;
46
+ "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
21
int mmu_idx = get_mem_index(s);
47
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
22
int size = a->size;
48
tb->flags, tb->cflags, lookup_symbol(pc));
23
TCGv_i64 tmp64;
49
24
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
50
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
25
}
51
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
26
52
vaddr pc = log_pc(cpu, last_tb);
27
/* For our purposes, bytes are always little-endian. */
53
if (qemu_log_in_addr_range(pc)) {
28
+ endian = s->be_data;
54
- qemu_log("Stopped execution of TB chain before %p [%"
29
if (size == 0) {
55
+ qemu_log("Stopped execution of TB chain before %p [%016"
30
endian = MO_LE;
56
VADDR_PRIx "] %s\n",
31
}
57
last_tb->tc.ptr, pc, lookup_symbol(pc));
32
+
33
+ /* Enforce alignment requested by the instruction */
34
+ if (a->align) {
35
+ align = pow2_align(a->align + 2); /* 4 ** a->align */
36
+ } else {
37
+ align = s->align_mem ? MO_ALIGN : 0;
38
+ }
39
+
40
/*
41
* Consecutive little-endian elements from a single register
42
* can be promoted to a larger little-endian operation.
43
*/
44
if (interleave == 1 && endian == MO_LE) {
45
+ /* Retain any natural alignment. */
46
+ if (align == MO_ALIGN) {
47
+ align = pow2_align(size);
48
+ }
49
size = 3;
50
}
51
+
52
tmp64 = tcg_temp_new_i64();
53
addr = tcg_temp_new_i32();
54
tmp = tcg_const_i32(1 << size);
55
load_reg_var(s, addr, a->rn);
56
+
57
+ mop = endian | size | align;
58
for (reg = 0; reg < nregs; reg++) {
59
for (n = 0; n < 8 >> size; n++) {
60
int xs;
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
62
int tt = a->vd + reg + spacing * xs;
63
64
if (a->l) {
65
- gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
66
- endian | size);
67
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
68
neon_store_element64(tt, n, size, tmp64);
69
} else {
70
neon_load_element64(tmp64, tt, n, size);
71
- gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
72
- endian | size);
73
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
74
}
75
tcg_gen_add_i32(addr, addr, tmp);
76
+
77
+ /* Subsequent memory operations inherit alignment */
78
+ mop &= ~MO_AMASK;
79
}
58
}
59
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/accel/tcg/translate-all.c
62
+++ b/accel/tcg/translate-all.c
63
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
64
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
65
vaddr pc = log_pc(cpu, tb);
66
if (qemu_log_in_addr_range(pc)) {
67
- qemu_log("cpu_io_recompile: rewound execution of TB to %"
68
+ qemu_log("cpu_io_recompile: rewound execution of TB to %016"
69
VADDR_PRIx "\n", pc);
80
}
70
}
81
}
71
}
82
--
72
--
83
2.20.1
73
2.34.1
84
74
85
75
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tong Ho <tong.ho@amd.com>
2
2
3
This is the only caller. Adjust some commentary to talk
3
Add a check in the bit-set operation to write the backstore
4
about SCTLR_B instead of the vanishing function.
4
only if the affected bit is 0 before.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
With this in place, there will be no need for callers to
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
do the checking in order to avoid unnecessary writes.
8
Message-id: 20210419202257.161730-13-richard.henderson@linaro.org
8
9
Signed-off-by: Tong Ho <tong.ho@amd.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
14
---
11
target/arm/translate.c | 37 ++++++++++++++++---------------------
15
hw/nvram/xlnx-efuse.c | 11 +++++++++--
12
1 file changed, 16 insertions(+), 21 deletions(-)
16
1 file changed, 9 insertions(+), 2 deletions(-)
13
17
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
20
--- a/hw/nvram/xlnx-efuse.c
17
+++ b/target/arm/translate.c
21
+++ b/hw/nvram/xlnx-efuse.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
22
@@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k)
19
gen_aa32_st_i32(s, val, a32, index, OPC); \
23
24
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
25
{
26
+ uint32_t set, *row;
27
+
28
if (efuse_ro_bits_find(s, bit)) {
29
g_autofree char *path = object_get_canonical_path(OBJECT(s));
30
31
@@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
32
return false;
20
}
33
}
21
34
22
-static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
35
- s->fuse32[bit / 32] |= 1 << (bit % 32);
23
-{
36
- efuse_bdrv_sync(s, bit);
24
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
37
+ /* Avoid back-end write unless there is a real update */
25
- if (!IS_USER_ONLY && s->sctlr_b) {
38
+ row = &s->fuse32[bit / 32];
26
- tcg_gen_rotri_i64(val, val, 32);
39
+ set = 1 << (bit % 32);
27
- }
40
+ if (!(set & *row)) {
28
-}
41
+ *row |= set;
29
-
42
+ efuse_bdrv_sync(s, bit);
30
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
31
int index, MemOp opc)
32
{
33
TCGv addr = gen_aa32_addr(s, a32, opc);
34
tcg_gen_qemu_ld_i64(val, addr, index, opc);
35
- gen_aa32_frob64(s, val);
36
+
37
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
38
+ if (!IS_USER_ONLY && s->sctlr_b) {
39
+ tcg_gen_rotri_i64(val, val, 32);
40
+ }
43
+ }
41
+
44
return true;
42
tcg_temp_free(addr);
43
}
45
}
44
46
45
@@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
46
TCGv_i32 tmp2 = tcg_temp_new_i32();
47
TCGv_i64 t64 = tcg_temp_new_i64();
48
49
- /* For AArch32, architecturally the 32-bit word at the lowest
50
+ /*
51
+ * For AArch32, architecturally the 32-bit word at the lowest
52
* address is always Rt and the one at addr+4 is Rt2, even if
53
* the CPU is big-endian. That means we don't want to do a
54
- * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
55
- * for an architecturally 64-bit access, but instead do a
56
- * 64-bit access using MO_BE if appropriate and then split
57
- * the two halves.
58
- * This only makes a difference for BE32 user-mode, where
59
- * frob64() must not flip the two halves of the 64-bit data
60
- * but this code must treat BE32 user-mode like BE32 system.
61
+ * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
62
+ * architecturally 64-bit access, but instead do a 64-bit access
63
+ * using MO_BE if appropriate and then split the two halves.
64
*/
65
TCGv taddr = gen_aa32_addr(s, addr, opc);
66
67
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
68
TCGv_i64 n64 = tcg_temp_new_i64();
69
70
t2 = load_reg(s, rt2);
71
- /* For AArch32, architecturally the 32-bit word at the lowest
72
+
73
+ /*
74
+ * For AArch32, architecturally the 32-bit word at the lowest
75
* address is always Rt and the one at addr+4 is Rt2, even if
76
* the CPU is big-endian. Since we're going to treat this as a
77
* single 64-bit BE store, we need to put the two halves in the
78
* opposite order for BE to LE, so that they end up in the right
79
- * places.
80
- * We don't want gen_aa32_frob64() because that does the wrong
81
- * thing for BE32 usermode.
82
+ * places. We don't want gen_aa32_st_i64, because that checks
83
+ * SCTLR_B as if for an architectural 64-bit access.
84
*/
85
if (s->be_data == MO_BE) {
86
tcg_gen_concat_i32_i64(n64, t2, t1);
87
--
47
--
88
2.20.1
48
2.34.1
89
49
90
50
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Just because operating on a TCGv_i64 temporary does not
4
mean that we're performing a 64-bit operation. Restrict
5
the frobbing to actual 64-bit operations.
6
7
This bug is not currently visible because all current
8
users of these two functions always pass MO_64.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-14-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.c | 4 ++--
16
1 file changed, 2 insertions(+), 2 deletions(-)
17
18
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate.c
21
+++ b/target/arm/translate.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
23
tcg_gen_qemu_ld_i64(val, addr, index, opc);
24
25
/* Not needed for user-mode BE32, where we use MO_BE instead. */
26
- if (!IS_USER_ONLY && s->sctlr_b) {
27
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
28
tcg_gen_rotri_i64(val, val, 32);
29
}
30
31
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
32
TCGv addr = gen_aa32_addr(s, a32, opc);
33
34
/* Not needed for user-mode BE32, where we use MO_BE instead. */
35
- if (!IS_USER_ONLY && s->sctlr_b) {
36
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
37
TCGv_i64 tmp = tcg_temp_new_i64();
38
tcg_gen_rotri_i64(tmp, val, 32);
39
tcg_gen_qemu_st_i64(tmp, addr, index, opc);
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Adjust the interface to match what has been done to the
4
TCGv_i32 load/store functions.
5
6
This is less obvious, because at present the only user of
7
these functions, trans_VLDST_multiple, also wants to manipulate
8
the endianness to speed up loading multiple bytes. Thus we
9
retain an "internal" interface which is identical to the
10
current gen_aa32_{ld,st}_i64 interface.
11
12
The "new" interface will gain users as we remove the legacy
13
interfaces, gen_aa32_ld64 and gen_aa32_st64.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210419202257.161730-15-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
target/arm/translate.c | 78 +++++++++++++++++++--------------
21
target/arm/translate-neon.c.inc | 6 ++-
22
2 files changed, 49 insertions(+), 35 deletions(-)
23
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate.c
27
+++ b/target/arm/translate.c
28
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
29
tcg_temp_free(addr);
30
}
31
32
+static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
33
+ TCGv_i32 a32, int index, MemOp opc)
34
+{
35
+ TCGv addr = gen_aa32_addr(s, a32, opc);
36
+
37
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
38
+
39
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
40
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
41
+ tcg_gen_rotri_i64(val, val, 32);
42
+ }
43
+ tcg_temp_free(addr);
44
+}
45
+
46
+static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
47
+ TCGv_i32 a32, int index, MemOp opc)
48
+{
49
+ TCGv addr = gen_aa32_addr(s, a32, opc);
50
+
51
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
52
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
53
+ TCGv_i64 tmp = tcg_temp_new_i64();
54
+ tcg_gen_rotri_i64(tmp, val, 32);
55
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
56
+ tcg_temp_free_i64(tmp);
57
+ } else {
58
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
59
+ }
60
+ tcg_temp_free(addr);
61
+}
62
+
63
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
64
int index, MemOp opc)
65
{
66
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
67
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
68
}
69
70
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
71
+ int index, MemOp opc)
72
+{
73
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
74
+}
75
+
76
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
77
+ int index, MemOp opc)
78
+{
79
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
80
+}
81
+
82
#define DO_GEN_LD(SUFF, OPC) \
83
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
84
TCGv_i32 a32, int index) \
85
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
86
gen_aa32_st_i32(s, val, a32, index, OPC); \
87
}
88
89
-static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
90
- int index, MemOp opc)
91
-{
92
- TCGv addr = gen_aa32_addr(s, a32, opc);
93
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
94
-
95
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
96
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
97
- tcg_gen_rotri_i64(val, val, 32);
98
- }
99
-
100
- tcg_temp_free(addr);
101
-}
102
-
103
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
104
TCGv_i32 a32, int index)
105
{
106
- gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
107
-}
108
-
109
-static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
110
- int index, MemOp opc)
111
-{
112
- TCGv addr = gen_aa32_addr(s, a32, opc);
113
-
114
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
115
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
116
- TCGv_i64 tmp = tcg_temp_new_i64();
117
- tcg_gen_rotri_i64(tmp, val, 32);
118
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
119
- tcg_temp_free_i64(tmp);
120
- } else {
121
- tcg_gen_qemu_st_i64(val, addr, index, opc);
122
- }
123
- tcg_temp_free(addr);
124
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
125
}
126
127
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
128
TCGv_i32 a32, int index)
129
{
130
- gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
131
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
132
}
133
134
DO_GEN_LD(8u, MO_UB)
135
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
136
index XXXXXXX..XXXXXXX 100644
137
--- a/target/arm/translate-neon.c.inc
138
+++ b/target/arm/translate-neon.c.inc
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
140
int tt = a->vd + reg + spacing * xs;
141
142
if (a->l) {
143
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
144
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
145
+ endian | size);
146
neon_store_element64(tt, n, size, tmp64);
147
} else {
148
neon_load_element64(tmp64, tt, n, size);
149
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
150
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
151
+ endian | size);
152
}
153
tcg_gen_add_i32(addr, addr, tmp);
154
}
155
--
156
2.20.1
157
158
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Buglink: https://bugs.launchpad.net/qemu/+bug/1905356
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210419202257.161730-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/translate.c | 16 ++++++++--------
10
1 file changed, 8 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
17
addr = op_addr_rr_pre(s, a);
18
19
tmp = tcg_temp_new_i32();
20
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
21
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
22
store_reg(s, a->rt, tmp);
23
24
tcg_gen_addi_i32(addr, addr, 4);
25
26
tmp = tcg_temp_new_i32();
27
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
28
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
29
store_reg(s, a->rt + 1, tmp);
30
31
/* LDRD w/ base writeback is undefined if the registers overlap. */
32
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
37
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
38
tcg_temp_free_i32(tmp);
39
40
tcg_gen_addi_i32(addr, addr, 4);
41
42
tmp = load_reg(s, a->rt + 1);
43
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
44
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
45
tcg_temp_free_i32(tmp);
46
47
op_addr_rr_post(s, a, addr, -4);
48
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
49
addr = op_addr_ri_pre(s, a);
50
51
tmp = tcg_temp_new_i32();
52
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
53
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
54
store_reg(s, a->rt, tmp);
55
56
tcg_gen_addi_i32(addr, addr, 4);
57
58
tmp = tcg_temp_new_i32();
59
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
60
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
61
store_reg(s, rt2, tmp);
62
63
/* LDRD w/ base writeback is undefined if the registers overlap. */
64
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
65
addr = op_addr_ri_pre(s, a);
66
67
tmp = load_reg(s, a->rt);
68
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
69
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
70
tcg_temp_free_i32(tmp);
71
72
tcg_gen_addi_i32(addr, addr, 4);
73
74
tmp = load_reg(s, rt2);
75
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
76
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
77
tcg_temp_free_i32(tmp);
78
79
op_addr_ri_post(s, a, addr, -4);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
16
addr = load_reg(s, a->rn);
17
tmp = load_reg(s, a->rt);
18
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
19
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
21
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
22
23
tcg_temp_free_i32(tmp);
24
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
25
26
addr = load_reg(s, a->rn);
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
29
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
30
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
31
tcg_temp_free_i32(addr);
32
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-18-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
16
} else {
17
tmp = load_reg(s, i);
18
}
19
- gen_aa32_st32(s, tmp, addr, mem_idx);
20
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
21
tcg_temp_free_i32(tmp);
22
23
/* No need to add after the last transfer. */
24
@@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
25
}
26
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld32u(s, tmp, addr, mem_idx);
29
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
30
if (user) {
31
tmp2 = tcg_const_i32(i);
32
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_RFE(DisasContext *s, arg_RFE *a)
16
17
/* Load PC into tmp and CPSR into tmp2. */
18
t1 = tcg_temp_new_i32();
19
- gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
tcg_gen_addi_i32(addr, addr, 4);
22
t2 = tcg_temp_new_i32();
23
- gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
24
+ gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
25
26
if (a->w) {
27
/* Base writeback. */
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-20-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
16
}
17
tcg_gen_addi_i32(addr, addr, offset);
18
tmp = load_reg(s, 14);
19
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
tcg_temp_free_i32(tmp);
22
tmp = load_cpu_field(spsr);
23
tcg_gen_addi_i32(addr, addr, 4);
24
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
26
tcg_temp_free_i32(tmp);
27
if (writeback) {
28
switch (amode) {
29
--
30
2.20.1
31
32
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 8 ++++----
9
1 file changed, 4 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
16
for (i = 0; i < n; i++) {
17
if (a->l) {
18
/* load */
19
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd + i);
22
} else {
23
/* store */
24
vfp_load_reg32(tmp, a->vd + i);
25
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
26
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
27
}
28
tcg_gen_addi_i32(addr, addr, offset);
29
}
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
31
for (i = 0; i < n; i++) {
32
if (a->l) {
33
/* load */
34
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
35
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
36
vfp_store_reg64(tmp, a->vd + i);
37
} else {
38
/* store */
39
vfp_load_reg64(tmp, a->vd + i);
40
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
41
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
42
}
43
tcg_gen_addi_i32(addr, addr, offset);
44
}
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-22-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 12 ++++++------
9
1 file changed, 6 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
16
addr = add_reg_for_lit(s, a->rn, offset);
17
tmp = tcg_temp_new_i32();
18
if (a->l) {
19
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd);
22
} else {
23
vfp_load_reg32(tmp, a->vd);
24
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
26
}
27
tcg_temp_free_i32(tmp);
28
tcg_temp_free_i32(addr);
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
30
addr = add_reg_for_lit(s, a->rn, offset);
31
tmp = tcg_temp_new_i32();
32
if (a->l) {
33
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
34
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
35
vfp_store_reg32(tmp, a->vd);
36
} else {
37
vfp_load_reg32(tmp, a->vd);
38
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
39
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
40
}
41
tcg_temp_free_i32(tmp);
42
tcg_temp_free_i32(addr);
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
44
addr = add_reg_for_lit(s, a->rn, offset);
45
tmp = tcg_temp_new_i64();
46
if (a->l) {
47
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
48
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
49
vfp_store_reg64(tmp, a->vd);
50
} else {
51
vfp_load_reg64(tmp, a->vd);
52
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
53
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
54
}
55
tcg_temp_free_i64(tmp);
56
tcg_temp_free_i32(addr);
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-23-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.h | 1 +
9
target/arm/translate.c | 15 +++++++++++++
10
target/arm/translate-neon.c.inc | 37 +++++++++++++++++++++++++--------
11
3 files changed, 44 insertions(+), 9 deletions(-)
12
13
diff --git a/target/arm/translate.h b/target/arm/translate.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.h
16
+++ b/target/arm/translate.h
17
@@ -XXX,XX +XXX,XX @@ void arm_test_cc(DisasCompare *cmp, int cc);
18
void arm_free_cc(DisasCompare *cmp);
19
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
20
void arm_gen_test_cc(int cc, TCGLabel *label);
21
+MemOp pow2_align(unsigned i);
22
23
/* Return state of Alternate Half-precision flag, caller frees result */
24
static inline TCGv_i32 get_ahp_flag(void)
25
diff --git a/target/arm/translate.c b/target/arm/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.c
28
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
30
#define IS_USER_ONLY 0
31
#endif
32
33
+MemOp pow2_align(unsigned i)
34
+{
35
+ static const MemOp mop_align[] = {
36
+ 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16,
37
+ /*
38
+ * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such
39
+ * that 256-bit alignment (MO_ALIGN_32) cannot be supported:
40
+ * see get_alignment_bits(). Enforce only 128-bit alignment for now.
41
+ */
42
+ MO_ALIGN_16
43
+ };
44
+ g_assert(i < ARRAY_SIZE(mop_align));
45
+ return mop_align[i];
46
+}
47
+
48
/*
49
* Abstractions of "generate code to do a guest load/store for
50
* AArch32", where a vaddr is always 32 bits (and is zero
51
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-neon.c.inc
54
+++ b/target/arm/translate-neon.c.inc
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
56
int size = a->size;
57
int nregs = a->n + 1;
58
TCGv_i32 addr, tmp;
59
+ MemOp mop, align;
60
61
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
62
return false;
63
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
64
return false;
65
}
66
67
+ align = 0;
68
if (size == 3) {
69
if (nregs != 4 || a->a == 0) {
70
return false;
71
}
72
/* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
73
- size = 2;
74
- }
75
- if (nregs == 1 && a->a == 1 && size == 0) {
76
- return false;
77
- }
78
- if (nregs == 3 && a->a == 1) {
79
- return false;
80
+ size = MO_32;
81
+ align = MO_ALIGN_16;
82
+ } else if (a->a) {
83
+ switch (nregs) {
84
+ case 1:
85
+ if (size == 0) {
86
+ return false;
87
+ }
88
+ align = MO_ALIGN;
89
+ break;
90
+ case 2:
91
+ align = pow2_align(size + 1);
92
+ break;
93
+ case 3:
94
+ return false;
95
+ case 4:
96
+ align = pow2_align(size + 2);
97
+ break;
98
+ default:
99
+ g_assert_not_reached();
100
+ }
101
}
102
103
if (!vfp_access_check(s)) {
104
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
105
*/
106
stride = a->t ? 2 : 1;
107
vec_size = nregs == 1 ? stride * 8 : 8;
108
-
109
+ mop = size | align;
110
tmp = tcg_temp_new_i32();
111
addr = tcg_temp_new_i32();
112
load_reg_var(s, addr, a->rn);
113
for (reg = 0; reg < nregs; reg++) {
114
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
115
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
116
if ((vd & 1) && vec_size == 16) {
117
/*
118
* We cannot write 16 bytes at once because the
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
120
}
121
tcg_gen_addi_i32(addr, addr, 1 << size);
122
vd += stride;
123
+
124
+ /* Subsequent memory operations inherit alignment */
125
+ mop &= ~MO_AMASK;
126
}
127
tcg_temp_free_i32(tmp);
128
tcg_temp_free_i32(addr);
129
--
130
2.20.1
131
132
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-25-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++-----
9
1 file changed, 42 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
14
+++ b/target/arm/translate-neon.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
16
int nregs = a->n + 1;
17
int vd = a->vd;
18
TCGv_i32 addr, tmp;
19
+ MemOp mop;
20
21
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
22
return false;
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
24
return true;
25
}
26
27
+ /* Pick up SCTLR settings */
28
+ mop = finalize_memop(s, a->size);
29
+
30
+ if (a->align) {
31
+ MemOp align_op;
32
+
33
+ switch (nregs) {
34
+ case 1:
35
+ /* For VLD1, use natural alignment. */
36
+ align_op = MO_ALIGN;
37
+ break;
38
+ case 2:
39
+ /* For VLD2, use double alignment. */
40
+ align_op = pow2_align(a->size + 1);
41
+ break;
42
+ case 4:
43
+ if (a->size == MO_32) {
44
+ /*
45
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
46
+ * quad alignment; align = 3 is rejected above.
47
+ */
48
+ align_op = pow2_align(a->size + a->align);
49
+ } else {
50
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
51
+ align_op = pow2_align(a->size + 2);
52
+ }
53
+ break;
54
+ default:
55
+ /* For VLD3, the alignment field is zero and rejected above. */
56
+ g_assert_not_reached();
57
+ }
58
+
59
+ mop = (mop & ~MO_AMASK) | align_op;
60
+ }
61
+
62
tmp = tcg_temp_new_i32();
63
addr = tcg_temp_new_i32();
64
load_reg_var(s, addr, a->rn);
65
- /*
66
- * TODO: if we implemented alignment exceptions, we should check
67
- * addr against the alignment encoded in a->align here.
68
- */
69
+
70
for (reg = 0; reg < nregs; reg++) {
71
if (a->l) {
72
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
73
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
74
neon_store_element(vd, a->reg_idx, a->size, tmp);
75
} else { /* Store */
76
neon_load_element(tmp, vd, a->reg_idx, a->size);
77
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
78
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
79
}
80
vd += a->stride;
81
tcg_gen_addi_i32(addr, addr, 1 << a->size);
82
+
83
+ /* Subsequent memory operations inherit alignment */
84
+ mop &= ~MO_AMASK;
85
}
86
tcg_temp_free_i32(addr);
87
tcg_temp_free_i32(tmp);
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
In the case of gpr load, merge the size and is_signed arguments;
4
otherwise, simply convert size to memop.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-26-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-a64.c | 78 ++++++++++++++++----------------------
12
1 file changed, 33 insertions(+), 45 deletions(-)
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
19
* Store from GPR register to memory.
20
*/
21
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
22
- TCGv_i64 tcg_addr, int size, int memidx,
23
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
24
bool iss_valid,
25
unsigned int iss_srt,
26
bool iss_sf, bool iss_ar)
27
{
28
- g_assert(size <= 3);
29
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
30
+ memop = finalize_memop(s, memop);
31
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
32
33
if (iss_valid) {
34
uint32_t syn;
35
36
syn = syn_data_abort_with_iss(0,
37
- size,
38
+ (memop & MO_SIZE),
39
false,
40
iss_srt,
41
iss_sf,
42
@@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
43
}
44
45
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
46
- TCGv_i64 tcg_addr, int size,
47
+ TCGv_i64 tcg_addr, MemOp memop,
48
bool iss_valid,
49
unsigned int iss_srt,
50
bool iss_sf, bool iss_ar)
51
{
52
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
53
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
54
iss_valid, iss_srt, iss_sf, iss_ar);
55
}
56
57
/*
58
* Load from memory to GPR register
59
*/
60
-static void do_gpr_ld_memidx(DisasContext *s,
61
- TCGv_i64 dest, TCGv_i64 tcg_addr,
62
- int size, bool is_signed,
63
- bool extend, int memidx,
64
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
65
+ MemOp memop, bool extend, int memidx,
66
bool iss_valid, unsigned int iss_srt,
67
bool iss_sf, bool iss_ar)
68
{
69
- MemOp memop = s->be_data + size;
70
-
71
- g_assert(size <= 3);
72
-
73
- if (is_signed) {
74
- memop += MO_SIGN;
75
- }
76
-
77
+ memop = finalize_memop(s, memop);
78
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
79
80
- if (extend && is_signed) {
81
- g_assert(size < 3);
82
+ if (extend && (memop & MO_SIGN)) {
83
+ g_assert((memop & MO_SIZE) <= MO_32);
84
tcg_gen_ext32u_i64(dest, dest);
85
}
86
87
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
88
uint32_t syn;
89
90
syn = syn_data_abort_with_iss(0,
91
- size,
92
- is_signed,
93
+ (memop & MO_SIZE),
94
+ (memop & MO_SIGN) != 0,
95
iss_srt,
96
iss_sf,
97
iss_ar,
98
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
99
}
100
}
101
102
-static void do_gpr_ld(DisasContext *s,
103
- TCGv_i64 dest, TCGv_i64 tcg_addr,
104
- int size, bool is_signed, bool extend,
105
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
106
+ MemOp memop, bool extend,
107
bool iss_valid, unsigned int iss_srt,
108
bool iss_sf, bool iss_ar)
109
{
110
- do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
111
- get_mem_index(s),
112
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
113
iss_valid, iss_srt, iss_sf, iss_ar);
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
117
}
118
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
119
false, rn != 31, size);
120
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
121
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
122
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
123
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
124
return;
125
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
126
/* Only unsigned 32bit loads target 32bit registers. */
127
bool iss_sf = opc != 0;
128
129
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
130
- true, rt, iss_sf, false);
131
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
132
+ false, true, rt, iss_sf, false);
133
}
134
tcg_temp_free_i64(clean_addr);
135
}
136
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
137
/* Do not modify tcg_rt before recognizing any exception
138
* from the second load.
139
*/
140
- do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
141
- false, 0, false, false);
142
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
143
+ false, false, 0, false, false);
144
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
145
- do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
146
- false, 0, false, false);
147
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
148
+ false, false, 0, false, false);
149
150
tcg_gen_mov_i64(tcg_rt, tmp);
151
tcg_temp_free_i64(tmp);
152
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
153
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
154
iss_valid, rt, iss_sf, false);
155
} else {
156
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
157
- is_signed, is_extended, memidx,
158
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
159
+ is_extended, memidx,
160
iss_valid, rt, iss_sf, false);
161
}
162
}
163
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
164
do_gpr_st(s, tcg_rt, clean_addr, size,
165
true, rt, iss_sf, false);
166
} else {
167
- do_gpr_ld(s, tcg_rt, clean_addr, size,
168
- is_signed, is_extended,
169
- true, rt, iss_sf, false);
170
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
171
+ is_extended, true, rt, iss_sf, false);
172
}
173
}
174
}
175
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
176
do_gpr_st(s, tcg_rt, clean_addr, size,
177
true, rt, iss_sf, false);
178
} else {
179
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
180
- true, rt, iss_sf, false);
181
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
182
+ is_extended, true, rt, iss_sf, false);
183
}
184
}
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
187
* full load-acquire (we only need "load-acquire processor consistent"),
188
* but we choose to implement them as full LDAQ.
189
*/
190
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
191
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
192
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
193
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
194
return;
195
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
196
is_wback || rn != 31, size);
197
198
tcg_rt = cpu_reg(s, rt);
199
- do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
200
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
201
/* extend */ false, /* iss_valid */ !is_wback,
202
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
203
204
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
205
* Load-AcquirePC semantics; we implement as the slightly more
206
* restrictive Load-Acquire.
207
*/
208
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
209
- true, rt, iss_sf, true);
210
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
211
+ extend, true, rt, iss_sf, true);
212
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
213
}
214
}
215
--
216
2.20.1
217
218
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
For 128-bit load/store, use 16-byte alignment. This
4
requires that we perform the two operations in the
5
correct order so that we generate the alignment fault
6
before modifying memory.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-27-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
14
1 file changed, 26 insertions(+), 16 deletions(-)
15
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
21
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
22
{
23
/* This writes the bottom N bits of a 128 bit wide vector to memory */
24
- TCGv_i64 tmp = tcg_temp_new_i64();
25
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
26
+ TCGv_i64 tmplo = tcg_temp_new_i64();
27
+ MemOp mop;
28
+
29
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
30
+
31
if (size < 4) {
32
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
33
- s->be_data + size);
34
+ mop = finalize_memop(s, size);
35
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
36
} else {
37
bool be = s->be_data == MO_BE;
38
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
39
+ TCGv_i64 tmphi = tcg_temp_new_i64();
40
41
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
42
+
43
+ mop = s->be_data | MO_Q;
44
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
45
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
46
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
47
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
48
- s->be_data | MO_Q);
49
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
50
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
51
- s->be_data | MO_Q);
52
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
53
+ get_mem_index(s), mop);
54
+
55
tcg_temp_free_i64(tcg_hiaddr);
56
+ tcg_temp_free_i64(tmphi);
57
}
58
59
- tcg_temp_free_i64(tmp);
60
+ tcg_temp_free_i64(tmplo);
61
}
62
63
/*
64
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
65
/* This always zero-extends and writes to a full 128 bit wide vector */
66
TCGv_i64 tmplo = tcg_temp_new_i64();
67
TCGv_i64 tmphi = NULL;
68
+ MemOp mop;
69
70
if (size < 4) {
71
- MemOp memop = s->be_data + size;
72
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
73
+ mop = finalize_memop(s, size);
74
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
75
} else {
76
bool be = s->be_data == MO_BE;
77
TCGv_i64 tcg_hiaddr;
78
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
79
tmphi = tcg_temp_new_i64();
80
tcg_hiaddr = tcg_temp_new_i64();
81
82
+ mop = s->be_data | MO_Q;
83
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
84
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
85
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
86
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
87
- s->be_data | MO_Q);
88
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
89
- s->be_data | MO_Q);
90
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
91
+ get_mem_index(s), mop);
92
tcg_temp_free_i64(tcg_hiaddr);
93
}
94
95
--
96
2.20.1
97
98
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-28-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 23 ++++++++++++++---------
9
1 file changed, 14 insertions(+), 9 deletions(-)
10
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
16
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
17
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
18
true, rn != 31, size);
19
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
20
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
21
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
22
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
23
return;
24
25
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
26
}
27
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
28
false, rn != 31, size);
29
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
30
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
31
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
32
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
33
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
34
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
35
return;
36
37
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
38
int size = extract32(insn, 30, 2);
39
TCGv_i64 clean_addr, dirty_addr;
40
bool is_store = false;
41
- bool is_signed = false;
42
bool extend = false;
43
bool iss_sf;
44
+ MemOp mop;
45
46
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
47
unallocated_encoding(s);
48
return;
49
}
50
51
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
52
+ mop = size | MO_ALIGN;
53
+
54
switch (opc) {
55
case 0: /* STLURB */
56
is_store = true;
57
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
58
unallocated_encoding(s);
59
return;
60
}
61
- is_signed = true;
62
+ mop |= MO_SIGN;
63
break;
64
case 3: /* LDAPURS* 32-bit variant */
65
if (size > 1) {
66
unallocated_encoding(s);
67
return;
68
}
69
- is_signed = true;
70
+ mop |= MO_SIGN;
71
extend = true; /* zero-extend 32->64 after signed load */
72
break;
73
default:
74
g_assert_not_reached();
75
}
76
77
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
78
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
79
80
if (rn == 31) {
81
gen_check_sp_alignment(s);
82
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
83
if (is_store) {
84
/* Store-Release semantics */
85
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
86
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
87
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
88
} else {
89
/*
90
* Load-AcquirePC semantics; we implement as the slightly more
91
* restrictive Load-Acquire.
92
*/
93
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
94
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
95
extend, true, rt, iss_sf, true);
96
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
97
}
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-29-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 20 ++++++++++----------
9
1 file changed, 10 insertions(+), 10 deletions(-)
10
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
16
17
/* Store from vector register to memory */
18
static void do_vec_st(DisasContext *s, int srcidx, int element,
19
- TCGv_i64 tcg_addr, int size, MemOp endian)
20
+ TCGv_i64 tcg_addr, MemOp mop)
21
{
22
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
23
24
- read_vec_element(s, tcg_tmp, srcidx, element, size);
25
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
26
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
27
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
28
29
tcg_temp_free_i64(tcg_tmp);
30
}
31
32
/* Load from memory to vector register */
33
static void do_vec_ld(DisasContext *s, int destidx, int element,
34
- TCGv_i64 tcg_addr, int size, MemOp endian)
35
+ TCGv_i64 tcg_addr, MemOp mop)
36
{
37
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
38
39
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
40
- write_vec_element(s, tcg_tmp, destidx, element, size);
41
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
42
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
43
44
tcg_temp_free_i64(tcg_tmp);
45
}
46
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
47
for (xs = 0; xs < selem; xs++) {
48
int tt = (rt + r + xs) % 32;
49
if (is_store) {
50
- do_vec_st(s, tt, e, clean_addr, size, endian);
51
+ do_vec_st(s, tt, e, clean_addr, size | endian);
52
} else {
53
- do_vec_ld(s, tt, e, clean_addr, size, endian);
54
+ do_vec_ld(s, tt, e, clean_addr, size | endian);
55
}
56
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
57
}
58
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
59
} else {
60
/* Load/store one element per register */
61
if (is_load) {
62
- do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
63
+ do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
64
} else {
65
- do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
66
+ do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
67
}
68
}
69
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-30-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 15 +++++++++++----
9
1 file changed, 11 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
16
bool is_postidx = extract32(insn, 23, 1);
17
bool is_q = extract32(insn, 30, 1);
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
19
- MemOp endian = s->be_data;
20
+ MemOp endian, align, mop;
21
22
int total; /* total bytes */
23
int elements; /* elements per vector */
24
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
25
}
26
27
/* For our purposes, bytes are always little-endian. */
28
+ endian = s->be_data;
29
if (size == 0) {
30
endian = MO_LE;
31
}
32
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
33
* Consecutive little-endian elements from a single register
34
* can be promoted to a larger little-endian operation.
35
*/
36
+ align = MO_ALIGN;
37
if (selem == 1 && endian == MO_LE) {
38
+ align = pow2_align(size);
39
size = 3;
40
}
41
- elements = (is_q ? 16 : 8) >> size;
42
+ if (!s->align_mem) {
43
+ align = 0;
44
+ }
45
+ mop = endian | size | align;
46
47
+ elements = (is_q ? 16 : 8) >> size;
48
tcg_ebytes = tcg_const_i64(1 << size);
49
for (r = 0; r < rpt; r++) {
50
int e;
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
52
for (xs = 0; xs < selem; xs++) {
53
int tt = (rt + r + xs) % 32;
54
if (is_store) {
55
- do_vec_st(s, tt, e, clean_addr, size | endian);
56
+ do_vec_st(s, tt, e, clean_addr, mop);
57
} else {
58
- do_vec_ld(s, tt, e, clean_addr, size | endian);
59
+ do_vec_ld(s, tt, e, clean_addr, mop);
60
}
61
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
62
}
63
--
64
2.20.1
65
66
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-32-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-sve.c | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
11
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-sve.c
14
+++ b/target/arm/translate-sve.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
16
clean_addr = gen_mte_check1(s, temp, false, true, msz);
17
18
tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
19
- s->be_data | dtype_mop[a->dtype]);
20
+ finalize_memop(s, dtype_mop[a->dtype]));
21
22
/* Broadcast to *all* elements. */
23
tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
Deleted patch
1
From: Cornelia Huck <cohuck@redhat.com>
2
1
3
Add 6.1 machine types for arm/i440fx/q35/s390x/spapr.
4
5
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
6
Acked-by: Greg Kurz <groug@kaod.org>
7
Message-id: 20210331111900.118274-1-cohuck@redhat.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
include/hw/boards.h | 3 +++
12
include/hw/i386/pc.h | 3 +++
13
hw/arm/virt.c | 7 ++++++-
14
hw/core/machine.c | 3 +++
15
hw/i386/pc.c | 3 +++
16
hw/i386/pc_piix.c | 14 +++++++++++++-
17
hw/i386/pc_q35.c | 13 ++++++++++++-
18
hw/ppc/spapr.c | 17 ++++++++++++++---
19
hw/s390x/s390-virtio-ccw.c | 14 +++++++++++++-
20
9 files changed, 70 insertions(+), 7 deletions(-)
21
22
diff --git a/include/hw/boards.h b/include/hw/boards.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/boards.h
25
+++ b/include/hw/boards.h
26
@@ -XXX,XX +XXX,XX @@ struct MachineState {
27
} \
28
type_init(machine_initfn##_register_types)
29
30
+extern GlobalProperty hw_compat_6_0[];
31
+extern const size_t hw_compat_6_0_len;
32
+
33
extern GlobalProperty hw_compat_5_2[];
34
extern const size_t hw_compat_5_2_len;
35
36
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/i386/pc.h
39
+++ b/include/hw/i386/pc.h
40
@@ -XXX,XX +XXX,XX @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
41
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
42
const CPUArchIdList *apic_ids, GArray *entry);
43
44
+extern GlobalProperty pc_compat_6_0[];
45
+extern const size_t pc_compat_6_0_len;
46
+
47
extern GlobalProperty pc_compat_5_2[];
48
extern const size_t pc_compat_5_2_len;
49
50
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/hw/arm/virt.c
53
+++ b/hw/arm/virt.c
54
@@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void)
55
}
56
type_init(machvirt_machine_init);
57
58
+static void virt_machine_6_1_options(MachineClass *mc)
59
+{
60
+}
61
+DEFINE_VIRT_MACHINE_AS_LATEST(6, 1)
62
+
63
static void virt_machine_6_0_options(MachineClass *mc)
64
{
65
}
66
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 0)
67
+DEFINE_VIRT_MACHINE(6, 0)
68
69
static void virt_machine_5_2_options(MachineClass *mc)
70
{
71
diff --git a/hw/core/machine.c b/hw/core/machine.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/core/machine.c
74
+++ b/hw/core/machine.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "hw/virtio/virtio.h"
77
#include "hw/virtio/virtio-pci.h"
78
79
+GlobalProperty hw_compat_6_0[] = {};
80
+const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
81
+
82
GlobalProperty hw_compat_5_2[] = {
83
{ "ICH9-LPC", "smm-compat", "on"},
84
{ "PIIX4_PM", "smm-compat", "on"},
85
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/i386/pc.c
88
+++ b/hw/i386/pc.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "trace.h"
91
#include CONFIG_DEVICES
92
93
+GlobalProperty pc_compat_6_0[] = {};
94
+const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0);
95
+
96
GlobalProperty pc_compat_5_2[] = {
97
{ "ICH9-LPC", "x-smi-cpu-hotunplug", "off" },
98
};
99
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/hw/i386/pc_piix.c
102
+++ b/hw/i386/pc_piix.c
103
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_machine_options(MachineClass *m)
104
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
105
}
106
107
-static void pc_i440fx_6_0_machine_options(MachineClass *m)
108
+static void pc_i440fx_6_1_machine_options(MachineClass *m)
109
{
110
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
111
pc_i440fx_machine_options(m);
112
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_6_0_machine_options(MachineClass *m)
113
pcmc->default_cpu_version = 1;
114
}
115
116
+DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL,
117
+ pc_i440fx_6_1_machine_options);
118
+
119
+static void pc_i440fx_6_0_machine_options(MachineClass *m)
120
+{
121
+ pc_i440fx_6_1_machine_options(m);
122
+ m->alias = NULL;
123
+ m->is_default = false;
124
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
125
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
126
+}
127
+
128
DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL,
129
pc_i440fx_6_0_machine_options);
130
131
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/i386/pc_q35.c
134
+++ b/hw/i386/pc_q35.c
135
@@ -XXX,XX +XXX,XX @@ static void pc_q35_machine_options(MachineClass *m)
136
m->max_cpus = 288;
137
}
138
139
-static void pc_q35_6_0_machine_options(MachineClass *m)
140
+static void pc_q35_6_1_machine_options(MachineClass *m)
141
{
142
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
143
pc_q35_machine_options(m);
144
@@ -XXX,XX +XXX,XX @@ static void pc_q35_6_0_machine_options(MachineClass *m)
145
pcmc->default_cpu_version = 1;
146
}
147
148
+DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL,
149
+ pc_q35_6_1_machine_options);
150
+
151
+static void pc_q35_6_0_machine_options(MachineClass *m)
152
+{
153
+ pc_q35_6_1_machine_options(m);
154
+ m->alias = NULL;
155
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
156
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
157
+}
158
+
159
DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL,
160
pc_q35_6_0_machine_options);
161
162
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/hw/ppc/spapr.c
165
+++ b/hw/ppc/spapr.c
166
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_latest_class_options(MachineClass *mc)
167
type_init(spapr_machine_register_##suffix)
168
169
/*
170
- * pseries-6.0
171
+ * pseries-6.1
172
*/
173
-static void spapr_machine_6_0_class_options(MachineClass *mc)
174
+static void spapr_machine_6_1_class_options(MachineClass *mc)
175
{
176
/* Defaults for the latest behaviour inherited from the base class */
177
}
178
179
-DEFINE_SPAPR_MACHINE(6_0, "6.0", true);
180
+DEFINE_SPAPR_MACHINE(6_1, "6.1", true);
181
+
182
+/*
183
+ * pseries-6.0
184
+ */
185
+static void spapr_machine_6_0_class_options(MachineClass *mc)
186
+{
187
+ spapr_machine_6_1_class_options(mc);
188
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
189
+}
190
+
191
+DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
192
193
/*
194
* pseries-5.2
195
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/hw/s390x/s390-virtio-ccw.c
198
+++ b/hw/s390x/s390-virtio-ccw.c
199
@@ -XXX,XX +XXX,XX @@ bool css_migration_enabled(void)
200
} \
201
type_init(ccw_machine_register_##suffix)
202
203
+static void ccw_machine_6_1_instance_options(MachineState *machine)
204
+{
205
+}
206
+
207
+static void ccw_machine_6_1_class_options(MachineClass *mc)
208
+{
209
+}
210
+DEFINE_CCW_MACHINE(6_1, "6.1", true);
211
+
212
static void ccw_machine_6_0_instance_options(MachineState *machine)
213
{
214
+ ccw_machine_6_1_instance_options(machine);
215
}
216
217
static void ccw_machine_6_0_class_options(MachineClass *mc)
218
{
219
+ ccw_machine_6_1_class_options(mc);
220
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
221
}
222
-DEFINE_CCW_MACHINE(6_0, "6.0", true);
223
+DEFINE_CCW_MACHINE(6_0, "6.0", false);
224
225
static void ccw_machine_5_2_instance_options(MachineState *machine)
226
{
227
--
228
2.20.1
229
230
diff view generated by jsdifflib