1
target-arm queue. This has the "plumb txattrs through various
1
First arm pullreq of 4.2...
2
bits of exec.c" patches, and a collection of bug fixes from
3
various people.
4
2
5
thanks
3
thanks
6
-- PMM
4
-- PMM
7
5
6
The following changes since commit 27608c7c66bd923eb5e5faab80e795408cbe2b51:
8
7
9
8
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190814a' into staging (2019-08-16 12:00:18 +0100)
10
The following changes since commit a3ac12fba028df90f7b3dbec924995c126c41022:
11
12
Merge remote-tracking branch 'remotes/ehabkost/tags/numa-next-pull-request' into staging (2018-05-31 11:12:36 +0100)
13
9
14
are available in the Git repository at:
10
are available in the Git repository at:
15
11
16
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180531
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190816
17
13
18
for you to fetch changes up to 49d1dca0520ea71bc21867fab6647f474fcf857b:
14
for you to fetch changes up to 664b7e3b97d6376f3329986c465b3782458b0f8b:
19
15
20
KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice (2018-05-31 14:52:53 +0100)
16
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word (2019-08-16 14:02:53 +0100)
21
17
22
----------------------------------------------------------------
18
----------------------------------------------------------------
23
target-arm queue:
19
target-arm queue:
24
* target/arm: Honour FPCR.FZ in FRECPX
20
* target/arm: generate a custom MIDR for -cpu max
25
* MAINTAINERS: Add entries for newer MPS2 boards and devices
21
* hw/misc/zynq_slcr: refactor to use standard register definition
26
* hw/intc/arm_gicv3: Fix APxR<n> register dispatching
22
* Set ENET_BD_BDU in I.MX FEC controller
27
* arm_gicv3_kvm: fix bug in writing zero bits back to the in-kernel
23
* target/arm: Fix routing of singlestep exceptions
28
GIC state
24
* refactor a32/t32 decoder handling of PC
29
* tcg: Fix helper function vs host abi for float16
25
* minor optimisations/cleanups of some a32/t32 codegen
30
* arm: fix qemu crash on startup with -bios option
26
* target/arm/cpu64: Ensure kvm really supports aarch64=off
31
* arm: fix malloc type mismatch
27
* target/arm/cpu: Ensure we can use the pmu with kvm
32
* xlnx-zdma: Correct mem leaks and memset to zero on desc unaligned errors
28
* target/arm: Minor cleanups preparatory to KVM SVE support
33
* Correct CPACR reset value for v7 cores
34
* memory.h: Improve IOMMU related documentation
35
* exec: Plumb transaction attributes through various functions in
36
preparation for allowing IOMMUs to see them
37
* vmstate.h: Provide VMSTATE_BOOL_SUB_ARRAY
38
* ARM: ACPI: Fix use-after-free due to memory realloc
39
* KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice
40
29
41
----------------------------------------------------------------
30
----------------------------------------------------------------
42
Francisco Iglesias (1):
31
Aaron Hill (1):
43
xlnx-zdma: Correct mem leaks and memset to zero on desc unaligned errors
32
Set ENET_BD_BDU in I.MX FEC controller
44
33
45
Igor Mammedov (1):
34
Alex Bennée (1):
46
arm: fix qemu crash on startup with -bios option
35
target/arm: generate a custom MIDR for -cpu max
47
36
48
Jan Kiszka (1):
37
Andrew Jones (6):
49
hw/intc/arm_gicv3: Fix APxR<n> register dispatching
38
target/arm/cpu64: Ensure kvm really supports aarch64=off
39
target/arm/cpu: Ensure we can use the pmu with kvm
40
target/arm/helper: zcr: Add build bug next to value range assumption
41
target/arm/cpu: Use div-round-up to determine predicate register array size
42
target/arm/kvm64: Fix error returns
43
target/arm/kvm64: Move the get/put of fpsimd registers out
50
44
51
Paolo Bonzini (1):
45
Damien Hedde (1):
52
arm: fix malloc type mismatch
46
hw/misc/zynq_slcr: use standard register definition
53
47
54
Peter Maydell (17):
48
Peter Maydell (2):
55
target/arm: Honour FPCR.FZ in FRECPX
49
target/arm: Factor out 'generate singlestep exception' function
56
MAINTAINERS: Add entries for newer MPS2 boards and devices
50
target/arm: Fix routing of singlestep exceptions
57
Correct CPACR reset value for v7 cores
58
memory.h: Improve IOMMU related documentation
59
Make tb_invalidate_phys_addr() take a MemTxAttrs argument
60
Make address_space_translate{, _cached}() take a MemTxAttrs argument
61
Make address_space_map() take a MemTxAttrs argument
62
Make address_space_access_valid() take a MemTxAttrs argument
63
Make flatview_extend_translation() take a MemTxAttrs argument
64
Make memory_region_access_valid() take a MemTxAttrs argument
65
Make MemoryRegion valid.accepts callback take a MemTxAttrs argument
66
Make flatview_access_valid() take a MemTxAttrs argument
67
Make flatview_translate() take a MemTxAttrs argument
68
Make address_space_get_iotlb_entry() take a MemTxAttrs argument
69
Make flatview_do_translate() take a MemTxAttrs argument
70
Make address_space_translate_iommu take a MemTxAttrs argument
71
vmstate.h: Provide VMSTATE_BOOL_SUB_ARRAY
72
51
73
Richard Henderson (1):
52
Richard Henderson (18):
74
tcg: Fix helper function vs host abi for float16
53
target/arm: Pass in pc to thumb_insn_is_16bit
54
target/arm: Introduce pc_curr
55
target/arm: Introduce read_pc
56
target/arm: Introduce add_reg_for_lit
57
target/arm: Remove redundant s->pc & ~1
58
target/arm: Replace s->pc with s->base.pc_next
59
target/arm: Replace offset with pc in gen_exception_insn
60
target/arm: Replace offset with pc in gen_exception_internal_insn
61
target/arm: Remove offset argument to gen_exception_bkpt_insn
62
target/arm: Use unallocated_encoding for aarch32
63
target/arm: Remove helper_double_saturate
64
target/arm: Use tcg_gen_extract_i32 for shifter_out_im
65
target/arm: Use tcg_gen_deposit_i32 for PKHBT, PKHTB
66
target/arm: Remove redundant shift tests
67
target/arm: Use ror32 instead of open-coding the operation
68
target/arm: Use tcg_gen_rotri_i32 for gen_swap_half
69
target/arm: Simplify SMMLA, SMMLAR, SMMLS, SMMLSR
70
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word
75
71
76
Shannon Zhao (3):
72
target/arm/cpu.h | 13 +-
77
arm_gicv3_kvm: increase clroffset accordingly
73
target/arm/helper.h | 1 -
78
ARM: ACPI: Fix use-after-free due to memory realloc
74
target/arm/kvm_arm.h | 28 ++
79
KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice
75
target/arm/translate-a64.h | 4 +-
76
target/arm/translate.h | 39 ++-
77
hw/misc/zynq_slcr.c | 450 ++++++++++++++++----------------
78
hw/net/imx_fec.c | 4 +
79
target/arm/cpu.c | 30 ++-
80
target/arm/cpu64.c | 31 ++-
81
target/arm/helper.c | 7 +
82
target/arm/kvm.c | 7 +
83
target/arm/kvm64.c | 161 +++++++-----
84
target/arm/op_helper.c | 15 --
85
target/arm/translate-a64.c | 130 ++++------
86
target/arm/translate-vfp.inc.c | 45 +---
87
target/arm/translate.c | 572 +++++++++++++++++------------------------
88
16 files changed, 771 insertions(+), 766 deletions(-)
80
89
81
include/exec/exec-all.h | 5 +-
82
include/exec/helper-head.h | 2 +-
83
include/exec/memory-internal.h | 3 +-
84
include/exec/memory.h | 128 +++++++++++++++++++++++++++++++++++------
85
include/migration/vmstate.h | 3 +
86
include/sysemu/dma.h | 6 +-
87
accel/tcg/translate-all.c | 4 +-
88
exec.c | 95 ++++++++++++++++++------------
89
hw/arm/boot.c | 18 +++---
90
hw/arm/virt-acpi-build.c | 20 +++++--
91
hw/dma/xlnx-zdma.c | 10 +++-
92
hw/hppa/dino.c | 3 +-
93
hw/intc/arm_gic_kvm.c | 1 -
94
hw/intc/arm_gicv3_cpuif.c | 12 ++--
95
hw/intc/arm_gicv3_kvm.c | 2 +-
96
hw/nvram/fw_cfg.c | 12 ++--
97
hw/s390x/s390-pci-inst.c | 3 +-
98
hw/scsi/esp.c | 3 +-
99
hw/vfio/common.c | 3 +-
100
hw/virtio/vhost.c | 3 +-
101
hw/xen/xen_pt_msi.c | 3 +-
102
memory.c | 12 ++--
103
memory_ldst.inc.c | 18 +++---
104
target/arm/gdbstub.c | 3 +-
105
target/arm/helper-a64.c | 41 +++++++------
106
target/arm/helper.c | 90 ++++++++++++++++-------------
107
target/ppc/mmu-hash64.c | 3 +-
108
target/riscv/helper.c | 2 +-
109
target/s390x/diag.c | 6 +-
110
target/s390x/excp_helper.c | 3 +-
111
target/s390x/mmu_helper.c | 3 +-
112
target/s390x/sigp.c | 3 +-
113
target/xtensa/op_helper.c | 3 +-
114
MAINTAINERS | 9 ++-
115
34 files changed, 353 insertions(+), 182 deletions(-)
116
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
kvm_irqchip_create called by kvm_init will call kvm_init_irq_routing to
3
While most features are now detected by probing the ID_* registers
4
initialize global capability variables. If we call kvm_init_irq_routing in
4
kernels can (and do) use MIDR_EL1 for working out of they have to
5
GIC realize function, previous allocated memory will leak.
5
apply errata. This can trip up warnings in the kernel as it tries to
6
work out if it should apply workarounds to features that don't
7
actually exist in the reported CPU type.
6
8
7
Fix this by deleting the unnecessary call.
9
Avoid this problem by synthesising our own MIDR value.
8
10
9
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
11
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 1527750994-14360-1-git-send-email-zhaoshenglong@huawei.com
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20190726113950.7499-1-alex.bennee@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
16
---
14
hw/intc/arm_gic_kvm.c | 1 -
17
target/arm/cpu.h | 6 ++++++
15
hw/intc/arm_gicv3_kvm.c | 1 -
18
target/arm/cpu64.c | 19 +++++++++++++++++++
16
2 files changed, 2 deletions(-)
19
2 files changed, 25 insertions(+)
17
20
18
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/intc/arm_gic_kvm.c
23
--- a/target/arm/cpu.h
21
+++ b/hw/intc/arm_gic_kvm.c
24
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
25
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
23
26
/*
24
if (kvm_has_gsi_routing()) {
27
* System register ID fields.
25
/* set up irq routing */
28
*/
26
- kvm_init_irq_routing(kvm_state);
29
+FIELD(MIDR_EL1, REVISION, 0, 4)
27
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
30
+FIELD(MIDR_EL1, PARTNUM, 4, 12)
28
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
31
+FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
29
}
32
+FIELD(MIDR_EL1, VARIANT, 20, 4)
30
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
33
+FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
34
+
35
FIELD(ID_ISAR0, SWAP, 0, 4)
36
FIELD(ID_ISAR0, BITCOUNT, 4, 4)
37
FIELD(ID_ISAR0, BITFIELD, 8, 4)
38
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/intc/arm_gicv3_kvm.c
40
--- a/target/arm/cpu64.c
33
+++ b/hw/intc/arm_gicv3_kvm.c
41
+++ b/target/arm/cpu64.c
34
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
42
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
35
43
uint32_t u;
36
if (kvm_has_gsi_routing()) {
44
aarch64_a57_initfn(obj);
37
/* set up irq routing */
45
38
- kvm_init_irq_routing(kvm_state);
46
+ /*
39
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
47
+ * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
40
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
48
+ * one and try to apply errata workarounds or use impdef features we
41
}
49
+ * don't provide.
50
+ * An IMPLEMENTER field of 0 means "reserved for software use";
51
+ * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
52
+ * to see which features are present";
53
+ * the VARIANT, PARTNUM and REVISION fields are all implementation
54
+ * defined and we choose to define PARTNUM just in case guest
55
+ * code needs to distinguish this QEMU CPU from other software
56
+ * implementations, though this shouldn't be needed.
57
+ */
58
+ t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
59
+ t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
60
+ t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
61
+ t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
62
+ t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
63
+ cpu->midr = t;
64
+
65
t = cpu->isar.id_aa64isar0;
66
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
67
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
42
--
68
--
43
2.17.1
69
2.20.1
44
70
45
71
diff view generated by jsdifflib
1
The FRECPX instructions should (like most other floating point operations)
1
From: Damien Hedde <damien.hedde@greensocs.com>
2
honour the FPCR.FZ bit which specifies whether input denormals should
3
be flushed to zero (or FZ16 for the half-precision version).
4
We forgot to implement this, which doesn't affect the results (since
5
the calculation doesn't actually care about the mantissa bits) but did
6
mean we were failing to set the FPSR.IDC bit.
7
2
3
Replace the zynq_slcr registers enum and macros using the
4
hw/registerfields.h macros.
5
6
Signed-off-by: Damien Hedde <damien.hedde@greensocs.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20190729145654.14644-30-damien.hedde@greensocs.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20180521172712.19930-1-peter.maydell@linaro.org
11
---
11
---
12
target/arm/helper-a64.c | 6 ++++++
12
hw/misc/zynq_slcr.c | 450 ++++++++++++++++++++++----------------------
13
1 file changed, 6 insertions(+)
13
1 file changed, 225 insertions(+), 225 deletions(-)
14
14
15
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-a64.c
17
--- a/hw/misc/zynq_slcr.c
18
+++ b/target/arm/helper-a64.c
18
+++ b/hw/misc/zynq_slcr.c
19
@@ -XXX,XX +XXX,XX @@ float16 HELPER(frecpx_f16)(float16 a, void *fpstp)
19
@@ -XXX,XX +XXX,XX @@
20
return nan;
20
#include "sysemu/sysemu.h"
21
#include "qemu/log.h"
22
#include "qemu/module.h"
23
+#include "hw/registerfields.h"
24
25
#ifndef ZYNQ_SLCR_ERR_DEBUG
26
#define ZYNQ_SLCR_ERR_DEBUG 0
27
@@ -XXX,XX +XXX,XX @@
28
#define XILINX_LOCK_KEY 0x767b
29
#define XILINX_UNLOCK_KEY 0xdf0d
30
31
-#define R_PSS_RST_CTRL_SOFT_RST 0x1
32
+REG32(SCL, 0x000)
33
+REG32(LOCK, 0x004)
34
+REG32(UNLOCK, 0x008)
35
+REG32(LOCKSTA, 0x00c)
36
37
-enum {
38
- SCL = 0x000 / 4,
39
- LOCK,
40
- UNLOCK,
41
- LOCKSTA,
42
+REG32(ARM_PLL_CTRL, 0x100)
43
+REG32(DDR_PLL_CTRL, 0x104)
44
+REG32(IO_PLL_CTRL, 0x108)
45
+REG32(PLL_STATUS, 0x10c)
46
+REG32(ARM_PLL_CFG, 0x110)
47
+REG32(DDR_PLL_CFG, 0x114)
48
+REG32(IO_PLL_CFG, 0x118)
49
50
- ARM_PLL_CTRL = 0x100 / 4,
51
- DDR_PLL_CTRL,
52
- IO_PLL_CTRL,
53
- PLL_STATUS,
54
- ARM_PLL_CFG,
55
- DDR_PLL_CFG,
56
- IO_PLL_CFG,
57
-
58
- ARM_CLK_CTRL = 0x120 / 4,
59
- DDR_CLK_CTRL,
60
- DCI_CLK_CTRL,
61
- APER_CLK_CTRL,
62
- USB0_CLK_CTRL,
63
- USB1_CLK_CTRL,
64
- GEM0_RCLK_CTRL,
65
- GEM1_RCLK_CTRL,
66
- GEM0_CLK_CTRL,
67
- GEM1_CLK_CTRL,
68
- SMC_CLK_CTRL,
69
- LQSPI_CLK_CTRL,
70
- SDIO_CLK_CTRL,
71
- UART_CLK_CTRL,
72
- SPI_CLK_CTRL,
73
- CAN_CLK_CTRL,
74
- CAN_MIOCLK_CTRL,
75
- DBG_CLK_CTRL,
76
- PCAP_CLK_CTRL,
77
- TOPSW_CLK_CTRL,
78
+REG32(ARM_CLK_CTRL, 0x120)
79
+REG32(DDR_CLK_CTRL, 0x124)
80
+REG32(DCI_CLK_CTRL, 0x128)
81
+REG32(APER_CLK_CTRL, 0x12c)
82
+REG32(USB0_CLK_CTRL, 0x130)
83
+REG32(USB1_CLK_CTRL, 0x134)
84
+REG32(GEM0_RCLK_CTRL, 0x138)
85
+REG32(GEM1_RCLK_CTRL, 0x13c)
86
+REG32(GEM0_CLK_CTRL, 0x140)
87
+REG32(GEM1_CLK_CTRL, 0x144)
88
+REG32(SMC_CLK_CTRL, 0x148)
89
+REG32(LQSPI_CLK_CTRL, 0x14c)
90
+REG32(SDIO_CLK_CTRL, 0x150)
91
+REG32(UART_CLK_CTRL, 0x154)
92
+REG32(SPI_CLK_CTRL, 0x158)
93
+REG32(CAN_CLK_CTRL, 0x15c)
94
+REG32(CAN_MIOCLK_CTRL, 0x160)
95
+REG32(DBG_CLK_CTRL, 0x164)
96
+REG32(PCAP_CLK_CTRL, 0x168)
97
+REG32(TOPSW_CLK_CTRL, 0x16c)
98
99
#define FPGA_CTRL_REGS(n, start) \
100
- FPGA ## n ## _CLK_CTRL = (start) / 4, \
101
- FPGA ## n ## _THR_CTRL, \
102
- FPGA ## n ## _THR_CNT, \
103
- FPGA ## n ## _THR_STA,
104
- FPGA_CTRL_REGS(0, 0x170)
105
- FPGA_CTRL_REGS(1, 0x180)
106
- FPGA_CTRL_REGS(2, 0x190)
107
- FPGA_CTRL_REGS(3, 0x1a0)
108
+ REG32(FPGA ## n ## _CLK_CTRL, (start)) \
109
+ REG32(FPGA ## n ## _THR_CTRL, (start) + 0x4)\
110
+ REG32(FPGA ## n ## _THR_CNT, (start) + 0x8)\
111
+ REG32(FPGA ## n ## _THR_STA, (start) + 0xc)
112
+FPGA_CTRL_REGS(0, 0x170)
113
+FPGA_CTRL_REGS(1, 0x180)
114
+FPGA_CTRL_REGS(2, 0x190)
115
+FPGA_CTRL_REGS(3, 0x1a0)
116
117
- BANDGAP_TRIP = 0x1b8 / 4,
118
- PLL_PREDIVISOR = 0x1c0 / 4,
119
- CLK_621_TRUE,
120
+REG32(BANDGAP_TRIP, 0x1b8)
121
+REG32(PLL_PREDIVISOR, 0x1c0)
122
+REG32(CLK_621_TRUE, 0x1c4)
123
124
- PSS_RST_CTRL = 0x200 / 4,
125
- DDR_RST_CTRL,
126
- TOPSW_RESET_CTRL,
127
- DMAC_RST_CTRL,
128
- USB_RST_CTRL,
129
- GEM_RST_CTRL,
130
- SDIO_RST_CTRL,
131
- SPI_RST_CTRL,
132
- CAN_RST_CTRL,
133
- I2C_RST_CTRL,
134
- UART_RST_CTRL,
135
- GPIO_RST_CTRL,
136
- LQSPI_RST_CTRL,
137
- SMC_RST_CTRL,
138
- OCM_RST_CTRL,
139
- FPGA_RST_CTRL = 0x240 / 4,
140
- A9_CPU_RST_CTRL,
141
+REG32(PSS_RST_CTRL, 0x200)
142
+ FIELD(PSS_RST_CTRL, SOFT_RST, 0, 1)
143
+REG32(DDR_RST_CTRL, 0x204)
144
+REG32(TOPSW_RESET_CTRL, 0x208)
145
+REG32(DMAC_RST_CTRL, 0x20c)
146
+REG32(USB_RST_CTRL, 0x210)
147
+REG32(GEM_RST_CTRL, 0x214)
148
+REG32(SDIO_RST_CTRL, 0x218)
149
+REG32(SPI_RST_CTRL, 0x21c)
150
+REG32(CAN_RST_CTRL, 0x220)
151
+REG32(I2C_RST_CTRL, 0x224)
152
+REG32(UART_RST_CTRL, 0x228)
153
+REG32(GPIO_RST_CTRL, 0x22c)
154
+REG32(LQSPI_RST_CTRL, 0x230)
155
+REG32(SMC_RST_CTRL, 0x234)
156
+REG32(OCM_RST_CTRL, 0x238)
157
+REG32(FPGA_RST_CTRL, 0x240)
158
+REG32(A9_CPU_RST_CTRL, 0x244)
159
160
- RS_AWDT_CTRL = 0x24c / 4,
161
- RST_REASON,
162
+REG32(RS_AWDT_CTRL, 0x24c)
163
+REG32(RST_REASON, 0x250)
164
165
- REBOOT_STATUS = 0x258 / 4,
166
- BOOT_MODE,
167
+REG32(REBOOT_STATUS, 0x258)
168
+REG32(BOOT_MODE, 0x25c)
169
170
- APU_CTRL = 0x300 / 4,
171
- WDT_CLK_SEL,
172
+REG32(APU_CTRL, 0x300)
173
+REG32(WDT_CLK_SEL, 0x304)
174
175
- TZ_DMA_NS = 0x440 / 4,
176
- TZ_DMA_IRQ_NS,
177
- TZ_DMA_PERIPH_NS,
178
+REG32(TZ_DMA_NS, 0x440)
179
+REG32(TZ_DMA_IRQ_NS, 0x444)
180
+REG32(TZ_DMA_PERIPH_NS, 0x448)
181
182
- PSS_IDCODE = 0x530 / 4,
183
+REG32(PSS_IDCODE, 0x530)
184
185
- DDR_URGENT = 0x600 / 4,
186
- DDR_CAL_START = 0x60c / 4,
187
- DDR_REF_START = 0x614 / 4,
188
- DDR_CMD_STA,
189
- DDR_URGENT_SEL,
190
- DDR_DFI_STATUS,
191
+REG32(DDR_URGENT, 0x600)
192
+REG32(DDR_CAL_START, 0x60c)
193
+REG32(DDR_REF_START, 0x614)
194
+REG32(DDR_CMD_STA, 0x618)
195
+REG32(DDR_URGENT_SEL, 0x61c)
196
+REG32(DDR_DFI_STATUS, 0x620)
197
198
- MIO = 0x700 / 4,
199
+REG32(MIO, 0x700)
200
#define MIO_LENGTH 54
201
202
- MIO_LOOPBACK = 0x804 / 4,
203
- MIO_MST_TRI0,
204
- MIO_MST_TRI1,
205
+REG32(MIO_LOOPBACK, 0x804)
206
+REG32(MIO_MST_TRI0, 0x808)
207
+REG32(MIO_MST_TRI1, 0x80c)
208
209
- SD0_WP_CD_SEL = 0x830 / 4,
210
- SD1_WP_CD_SEL,
211
+REG32(SD0_WP_CD_SEL, 0x830)
212
+REG32(SD1_WP_CD_SEL, 0x834)
213
214
- LVL_SHFTR_EN = 0x900 / 4,
215
- OCM_CFG = 0x910 / 4,
216
+REG32(LVL_SHFTR_EN, 0x900)
217
+REG32(OCM_CFG, 0x910)
218
219
- CPU_RAM = 0xa00 / 4,
220
+REG32(CPU_RAM, 0xa00)
221
222
- IOU = 0xa30 / 4,
223
+REG32(IOU, 0xa30)
224
225
- DMAC_RAM = 0xa50 / 4,
226
+REG32(DMAC_RAM, 0xa50)
227
228
- AFI0 = 0xa60 / 4,
229
- AFI1 = AFI0 + 3,
230
- AFI2 = AFI1 + 3,
231
- AFI3 = AFI2 + 3,
232
+REG32(AFI0, 0xa60)
233
+REG32(AFI1, 0xa6c)
234
+REG32(AFI2, 0xa78)
235
+REG32(AFI3, 0xa84)
236
#define AFI_LENGTH 3
237
238
- OCM = 0xa90 / 4,
239
+REG32(OCM, 0xa90)
240
241
- DEVCI_RAM = 0xaa0 / 4,
242
+REG32(DEVCI_RAM, 0xaa0)
243
244
- CSG_RAM = 0xab0 / 4,
245
+REG32(CSG_RAM, 0xab0)
246
247
- GPIOB_CTRL = 0xb00 / 4,
248
- GPIOB_CFG_CMOS18,
249
- GPIOB_CFG_CMOS25,
250
- GPIOB_CFG_CMOS33,
251
- GPIOB_CFG_HSTL = 0xb14 / 4,
252
- GPIOB_DRVR_BIAS_CTRL,
253
+REG32(GPIOB_CTRL, 0xb00)
254
+REG32(GPIOB_CFG_CMOS18, 0xb04)
255
+REG32(GPIOB_CFG_CMOS25, 0xb08)
256
+REG32(GPIOB_CFG_CMOS33, 0xb0c)
257
+REG32(GPIOB_CFG_HSTL, 0xb14)
258
+REG32(GPIOB_DRVR_BIAS_CTRL, 0xb18)
259
260
- DDRIOB = 0xb40 / 4,
261
+REG32(DDRIOB, 0xb40)
262
#define DDRIOB_LENGTH 14
263
-};
264
265
#define ZYNQ_SLCR_MMIO_SIZE 0x1000
266
#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4)
267
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_reset(DeviceState *d)
268
269
DB_PRINT("RESET\n");
270
271
- s->regs[LOCKSTA] = 1;
272
+ s->regs[R_LOCKSTA] = 1;
273
/* 0x100 - 0x11C */
274
- s->regs[ARM_PLL_CTRL] = 0x0001A008;
275
- s->regs[DDR_PLL_CTRL] = 0x0001A008;
276
- s->regs[IO_PLL_CTRL] = 0x0001A008;
277
- s->regs[PLL_STATUS] = 0x0000003F;
278
- s->regs[ARM_PLL_CFG] = 0x00014000;
279
- s->regs[DDR_PLL_CFG] = 0x00014000;
280
- s->regs[IO_PLL_CFG] = 0x00014000;
281
+ s->regs[R_ARM_PLL_CTRL] = 0x0001A008;
282
+ s->regs[R_DDR_PLL_CTRL] = 0x0001A008;
283
+ s->regs[R_IO_PLL_CTRL] = 0x0001A008;
284
+ s->regs[R_PLL_STATUS] = 0x0000003F;
285
+ s->regs[R_ARM_PLL_CFG] = 0x00014000;
286
+ s->regs[R_DDR_PLL_CFG] = 0x00014000;
287
+ s->regs[R_IO_PLL_CFG] = 0x00014000;
288
289
/* 0x120 - 0x16C */
290
- s->regs[ARM_CLK_CTRL] = 0x1F000400;
291
- s->regs[DDR_CLK_CTRL] = 0x18400003;
292
- s->regs[DCI_CLK_CTRL] = 0x01E03201;
293
- s->regs[APER_CLK_CTRL] = 0x01FFCCCD;
294
- s->regs[USB0_CLK_CTRL] = s->regs[USB1_CLK_CTRL] = 0x00101941;
295
- s->regs[GEM0_RCLK_CTRL] = s->regs[GEM1_RCLK_CTRL] = 0x00000001;
296
- s->regs[GEM0_CLK_CTRL] = s->regs[GEM1_CLK_CTRL] = 0x00003C01;
297
- s->regs[SMC_CLK_CTRL] = 0x00003C01;
298
- s->regs[LQSPI_CLK_CTRL] = 0x00002821;
299
- s->regs[SDIO_CLK_CTRL] = 0x00001E03;
300
- s->regs[UART_CLK_CTRL] = 0x00003F03;
301
- s->regs[SPI_CLK_CTRL] = 0x00003F03;
302
- s->regs[CAN_CLK_CTRL] = 0x00501903;
303
- s->regs[DBG_CLK_CTRL] = 0x00000F03;
304
- s->regs[PCAP_CLK_CTRL] = 0x00000F01;
305
+ s->regs[R_ARM_CLK_CTRL] = 0x1F000400;
306
+ s->regs[R_DDR_CLK_CTRL] = 0x18400003;
307
+ s->regs[R_DCI_CLK_CTRL] = 0x01E03201;
308
+ s->regs[R_APER_CLK_CTRL] = 0x01FFCCCD;
309
+ s->regs[R_USB0_CLK_CTRL] = s->regs[R_USB1_CLK_CTRL] = 0x00101941;
310
+ s->regs[R_GEM0_RCLK_CTRL] = s->regs[R_GEM1_RCLK_CTRL] = 0x00000001;
311
+ s->regs[R_GEM0_CLK_CTRL] = s->regs[R_GEM1_CLK_CTRL] = 0x00003C01;
312
+ s->regs[R_SMC_CLK_CTRL] = 0x00003C01;
313
+ s->regs[R_LQSPI_CLK_CTRL] = 0x00002821;
314
+ s->regs[R_SDIO_CLK_CTRL] = 0x00001E03;
315
+ s->regs[R_UART_CLK_CTRL] = 0x00003F03;
316
+ s->regs[R_SPI_CLK_CTRL] = 0x00003F03;
317
+ s->regs[R_CAN_CLK_CTRL] = 0x00501903;
318
+ s->regs[R_DBG_CLK_CTRL] = 0x00000F03;
319
+ s->regs[R_PCAP_CLK_CTRL] = 0x00000F01;
320
321
/* 0x170 - 0x1AC */
322
- s->regs[FPGA0_CLK_CTRL] = s->regs[FPGA1_CLK_CTRL] = s->regs[FPGA2_CLK_CTRL]
323
- = s->regs[FPGA3_CLK_CTRL] = 0x00101800;
324
- s->regs[FPGA0_THR_STA] = s->regs[FPGA1_THR_STA] = s->regs[FPGA2_THR_STA]
325
- = s->regs[FPGA3_THR_STA] = 0x00010000;
326
+ s->regs[R_FPGA0_CLK_CTRL] = s->regs[R_FPGA1_CLK_CTRL]
327
+ = s->regs[R_FPGA2_CLK_CTRL]
328
+ = s->regs[R_FPGA3_CLK_CTRL] = 0x00101800;
329
+ s->regs[R_FPGA0_THR_STA] = s->regs[R_FPGA1_THR_STA]
330
+ = s->regs[R_FPGA2_THR_STA]
331
+ = s->regs[R_FPGA3_THR_STA] = 0x00010000;
332
333
/* 0x1B0 - 0x1D8 */
334
- s->regs[BANDGAP_TRIP] = 0x0000001F;
335
- s->regs[PLL_PREDIVISOR] = 0x00000001;
336
- s->regs[CLK_621_TRUE] = 0x00000001;
337
+ s->regs[R_BANDGAP_TRIP] = 0x0000001F;
338
+ s->regs[R_PLL_PREDIVISOR] = 0x00000001;
339
+ s->regs[R_CLK_621_TRUE] = 0x00000001;
340
341
/* 0x200 - 0x25C */
342
- s->regs[FPGA_RST_CTRL] = 0x01F33F0F;
343
- s->regs[RST_REASON] = 0x00000040;
344
+ s->regs[R_FPGA_RST_CTRL] = 0x01F33F0F;
345
+ s->regs[R_RST_REASON] = 0x00000040;
346
347
- s->regs[BOOT_MODE] = 0x00000001;
348
+ s->regs[R_BOOT_MODE] = 0x00000001;
349
350
/* 0x700 - 0x7D4 */
351
for (i = 0; i < 54; i++) {
352
- s->regs[MIO + i] = 0x00001601;
353
+ s->regs[R_MIO + i] = 0x00001601;
21
}
354
}
22
355
for (i = 2; i <= 8; i++) {
23
+ a = float16_squash_input_denormal(a, fpst);
356
- s->regs[MIO + i] = 0x00000601;
24
+
357
+ s->regs[R_MIO + i] = 0x00000601;
25
val16 = float16_val(a);
26
sbit = 0x8000 & val16;
27
exp = extract32(val16, 10, 5);
28
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
29
return nan;
30
}
358
}
31
359
32
+ a = float32_squash_input_denormal(a, fpst);
360
- s->regs[MIO_MST_TRI0] = s->regs[MIO_MST_TRI1] = 0xFFFFFFFF;
33
+
361
+ s->regs[R_MIO_MST_TRI0] = s->regs[R_MIO_MST_TRI1] = 0xFFFFFFFF;
34
val32 = float32_val(a);
362
35
sbit = 0x80000000ULL & val32;
363
- s->regs[CPU_RAM + 0] = s->regs[CPU_RAM + 1] = s->regs[CPU_RAM + 3]
36
exp = extract32(val32, 23, 8);
364
- = s->regs[CPU_RAM + 4] = s->regs[CPU_RAM + 7]
37
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
365
- = 0x00010101;
38
return nan;
366
- s->regs[CPU_RAM + 2] = s->regs[CPU_RAM + 5] = 0x01010101;
367
- s->regs[CPU_RAM + 6] = 0x00000001;
368
+ s->regs[R_CPU_RAM + 0] = s->regs[R_CPU_RAM + 1] = s->regs[R_CPU_RAM + 3]
369
+ = s->regs[R_CPU_RAM + 4] = s->regs[R_CPU_RAM + 7]
370
+ = 0x00010101;
371
+ s->regs[R_CPU_RAM + 2] = s->regs[R_CPU_RAM + 5] = 0x01010101;
372
+ s->regs[R_CPU_RAM + 6] = 0x00000001;
373
374
- s->regs[IOU + 0] = s->regs[IOU + 1] = s->regs[IOU + 2] = s->regs[IOU + 3]
375
- = 0x09090909;
376
- s->regs[IOU + 4] = s->regs[IOU + 5] = 0x00090909;
377
- s->regs[IOU + 6] = 0x00000909;
378
+ s->regs[R_IOU + 0] = s->regs[R_IOU + 1] = s->regs[R_IOU + 2]
379
+ = s->regs[R_IOU + 3] = 0x09090909;
380
+ s->regs[R_IOU + 4] = s->regs[R_IOU + 5] = 0x00090909;
381
+ s->regs[R_IOU + 6] = 0x00000909;
382
383
- s->regs[DMAC_RAM] = 0x00000009;
384
+ s->regs[R_DMAC_RAM] = 0x00000009;
385
386
- s->regs[AFI0 + 0] = s->regs[AFI0 + 1] = 0x09090909;
387
- s->regs[AFI1 + 0] = s->regs[AFI1 + 1] = 0x09090909;
388
- s->regs[AFI2 + 0] = s->regs[AFI2 + 1] = 0x09090909;
389
- s->regs[AFI3 + 0] = s->regs[AFI3 + 1] = 0x09090909;
390
- s->regs[AFI0 + 2] = s->regs[AFI1 + 2] = s->regs[AFI2 + 2]
391
- = s->regs[AFI3 + 2] = 0x00000909;
392
+ s->regs[R_AFI0 + 0] = s->regs[R_AFI0 + 1] = 0x09090909;
393
+ s->regs[R_AFI1 + 0] = s->regs[R_AFI1 + 1] = 0x09090909;
394
+ s->regs[R_AFI2 + 0] = s->regs[R_AFI2 + 1] = 0x09090909;
395
+ s->regs[R_AFI3 + 0] = s->regs[R_AFI3 + 1] = 0x09090909;
396
+ s->regs[R_AFI0 + 2] = s->regs[R_AFI1 + 2] = s->regs[R_AFI2 + 2]
397
+ = s->regs[R_AFI3 + 2] = 0x00000909;
398
399
- s->regs[OCM + 0] = 0x01010101;
400
- s->regs[OCM + 1] = s->regs[OCM + 2] = 0x09090909;
401
+ s->regs[R_OCM + 0] = 0x01010101;
402
+ s->regs[R_OCM + 1] = s->regs[R_OCM + 2] = 0x09090909;
403
404
- s->regs[DEVCI_RAM] = 0x00000909;
405
- s->regs[CSG_RAM] = 0x00000001;
406
+ s->regs[R_DEVCI_RAM] = 0x00000909;
407
+ s->regs[R_CSG_RAM] = 0x00000001;
408
409
- s->regs[DDRIOB + 0] = s->regs[DDRIOB + 1] = s->regs[DDRIOB + 2]
410
- = s->regs[DDRIOB + 3] = 0x00000e00;
411
- s->regs[DDRIOB + 4] = s->regs[DDRIOB + 5] = s->regs[DDRIOB + 6]
412
- = 0x00000e00;
413
- s->regs[DDRIOB + 12] = 0x00000021;
414
+ s->regs[R_DDRIOB + 0] = s->regs[R_DDRIOB + 1] = s->regs[R_DDRIOB + 2]
415
+ = s->regs[R_DDRIOB + 3] = 0x00000e00;
416
+ s->regs[R_DDRIOB + 4] = s->regs[R_DDRIOB + 5] = s->regs[R_DDRIOB + 6]
417
+ = 0x00000e00;
418
+ s->regs[R_DDRIOB + 12] = 0x00000021;
419
}
420
421
422
static bool zynq_slcr_check_offset(hwaddr offset, bool rnw)
423
{
424
switch (offset) {
425
- case LOCK:
426
- case UNLOCK:
427
- case DDR_CAL_START:
428
- case DDR_REF_START:
429
+ case R_LOCK:
430
+ case R_UNLOCK:
431
+ case R_DDR_CAL_START:
432
+ case R_DDR_REF_START:
433
return !rnw; /* Write only */
434
- case LOCKSTA:
435
- case FPGA0_THR_STA:
436
- case FPGA1_THR_STA:
437
- case FPGA2_THR_STA:
438
- case FPGA3_THR_STA:
439
- case BOOT_MODE:
440
- case PSS_IDCODE:
441
- case DDR_CMD_STA:
442
- case DDR_DFI_STATUS:
443
- case PLL_STATUS:
444
+ case R_LOCKSTA:
445
+ case R_FPGA0_THR_STA:
446
+ case R_FPGA1_THR_STA:
447
+ case R_FPGA2_THR_STA:
448
+ case R_FPGA3_THR_STA:
449
+ case R_BOOT_MODE:
450
+ case R_PSS_IDCODE:
451
+ case R_DDR_CMD_STA:
452
+ case R_DDR_DFI_STATUS:
453
+ case R_PLL_STATUS:
454
return rnw;/* read only */
455
- case SCL:
456
- case ARM_PLL_CTRL ... IO_PLL_CTRL:
457
- case ARM_PLL_CFG ... IO_PLL_CFG:
458
- case ARM_CLK_CTRL ... TOPSW_CLK_CTRL:
459
- case FPGA0_CLK_CTRL ... FPGA0_THR_CNT:
460
- case FPGA1_CLK_CTRL ... FPGA1_THR_CNT:
461
- case FPGA2_CLK_CTRL ... FPGA2_THR_CNT:
462
- case FPGA3_CLK_CTRL ... FPGA3_THR_CNT:
463
- case BANDGAP_TRIP:
464
- case PLL_PREDIVISOR:
465
- case CLK_621_TRUE:
466
- case PSS_RST_CTRL ... A9_CPU_RST_CTRL:
467
- case RS_AWDT_CTRL:
468
- case RST_REASON:
469
- case REBOOT_STATUS:
470
- case APU_CTRL:
471
- case WDT_CLK_SEL:
472
- case TZ_DMA_NS ... TZ_DMA_PERIPH_NS:
473
- case DDR_URGENT:
474
- case DDR_URGENT_SEL:
475
- case MIO ... MIO + MIO_LENGTH - 1:
476
- case MIO_LOOPBACK ... MIO_MST_TRI1:
477
- case SD0_WP_CD_SEL:
478
- case SD1_WP_CD_SEL:
479
- case LVL_SHFTR_EN:
480
- case OCM_CFG:
481
- case CPU_RAM:
482
- case IOU:
483
- case DMAC_RAM:
484
- case AFI0 ... AFI3 + AFI_LENGTH - 1:
485
- case OCM:
486
- case DEVCI_RAM:
487
- case CSG_RAM:
488
- case GPIOB_CTRL ... GPIOB_CFG_CMOS33:
489
- case GPIOB_CFG_HSTL:
490
- case GPIOB_DRVR_BIAS_CTRL:
491
- case DDRIOB ... DDRIOB + DDRIOB_LENGTH - 1:
492
+ case R_SCL:
493
+ case R_ARM_PLL_CTRL ... R_IO_PLL_CTRL:
494
+ case R_ARM_PLL_CFG ... R_IO_PLL_CFG:
495
+ case R_ARM_CLK_CTRL ... R_TOPSW_CLK_CTRL:
496
+ case R_FPGA0_CLK_CTRL ... R_FPGA0_THR_CNT:
497
+ case R_FPGA1_CLK_CTRL ... R_FPGA1_THR_CNT:
498
+ case R_FPGA2_CLK_CTRL ... R_FPGA2_THR_CNT:
499
+ case R_FPGA3_CLK_CTRL ... R_FPGA3_THR_CNT:
500
+ case R_BANDGAP_TRIP:
501
+ case R_PLL_PREDIVISOR:
502
+ case R_CLK_621_TRUE:
503
+ case R_PSS_RST_CTRL ... R_A9_CPU_RST_CTRL:
504
+ case R_RS_AWDT_CTRL:
505
+ case R_RST_REASON:
506
+ case R_REBOOT_STATUS:
507
+ case R_APU_CTRL:
508
+ case R_WDT_CLK_SEL:
509
+ case R_TZ_DMA_NS ... R_TZ_DMA_PERIPH_NS:
510
+ case R_DDR_URGENT:
511
+ case R_DDR_URGENT_SEL:
512
+ case R_MIO ... R_MIO + MIO_LENGTH - 1:
513
+ case R_MIO_LOOPBACK ... R_MIO_MST_TRI1:
514
+ case R_SD0_WP_CD_SEL:
515
+ case R_SD1_WP_CD_SEL:
516
+ case R_LVL_SHFTR_EN:
517
+ case R_OCM_CFG:
518
+ case R_CPU_RAM:
519
+ case R_IOU:
520
+ case R_DMAC_RAM:
521
+ case R_AFI0 ... R_AFI3 + AFI_LENGTH - 1:
522
+ case R_OCM:
523
+ case R_DEVCI_RAM:
524
+ case R_CSG_RAM:
525
+ case R_GPIOB_CTRL ... R_GPIOB_CFG_CMOS33:
526
+ case R_GPIOB_CFG_HSTL:
527
+ case R_GPIOB_DRVR_BIAS_CTRL:
528
+ case R_DDRIOB ... R_DDRIOB + DDRIOB_LENGTH - 1:
529
return true;
530
default:
531
return false;
532
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
39
}
533
}
40
534
41
+ a = float64_squash_input_denormal(a, fpst);
535
switch (offset) {
42
+
536
- case SCL:
43
val64 = float64_val(a);
537
- s->regs[SCL] = val & 0x1;
44
sbit = 0x8000000000000000ULL & val64;
538
+ case R_SCL:
45
exp = extract64(float64_val(a), 52, 11);
539
+ s->regs[R_SCL] = val & 0x1;
540
return;
541
- case LOCK:
542
+ case R_LOCK:
543
if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
544
DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
545
(unsigned)val & 0xFFFF);
546
- s->regs[LOCKSTA] = 1;
547
+ s->regs[R_LOCKSTA] = 1;
548
} else {
549
DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
550
(int)offset, (unsigned)val & 0xFFFF);
551
}
552
return;
553
- case UNLOCK:
554
+ case R_UNLOCK:
555
if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
556
DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
557
(unsigned)val & 0xFFFF);
558
- s->regs[LOCKSTA] = 0;
559
+ s->regs[R_LOCKSTA] = 0;
560
} else {
561
DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
562
(int)offset, (unsigned)val & 0xFFFF);
563
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
564
return;
565
}
566
567
- if (s->regs[LOCKSTA]) {
568
+ if (s->regs[R_LOCKSTA]) {
569
qemu_log_mask(LOG_GUEST_ERROR,
570
"SCLR registers are locked. Unlock them first\n");
571
return;
572
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
573
s->regs[offset] = val;
574
575
switch (offset) {
576
- case PSS_RST_CTRL:
577
- if (val & R_PSS_RST_CTRL_SOFT_RST) {
578
+ case R_PSS_RST_CTRL:
579
+ if (FIELD_EX32(val, PSS_RST_CTRL, SOFT_RST)) {
580
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
581
}
582
break;
46
--
583
--
47
2.17.1
584
2.20.1
48
585
49
586
diff view generated by jsdifflib
1
Provide a VMSTATE_BOOL_SUB_ARRAY to go with VMSTATE_UINT8_SUB_ARRAY
1
From: Aaron Hill <aa1ronham@gmail.com>
2
and friends.
3
2
3
This commit properly sets the ENET_BD_BDU flag once the emulated FEC controller
4
has finished processing the last descriptor. This is done for both transmit
5
and receive descriptors.
6
7
This allows the QNX 7.0.0 BSP for the Sabrelite board (which can be
8
found at http://blackberry.qnx.com/en/developers/bsp) to properly
9
control the FEC. Without this patch, the BSP ethernet driver will never
10
re-use FEC descriptors, as the unset ENET_BD_BDU flag will cause
11
it to believe that the descriptors are still in use by the NIC.
12
13
Note that Linux does not appear to use this field at all, and is
14
unaffected by this patch.
15
16
Without this patch, QNX will think that the NIC is still processing its
17
transaction descriptors, and won't send any more data over the network.
18
19
For reference:
20
21
On page 1192 of the I.MX 6DQ reference manual revision (Rev. 5, 06/2018),
22
which can be found at https://www.nxp.com/products/processors-and-microcontrollers/arm-based-processors-and-mcus/i.mx-applications-processors/i.mx-6-processors/i.mx-6quad-processors-high-performance-3d-graphics-hd-video-arm-cortex-a9-core:i.MX6Q?&tab=Documentation_Tab&linkline=Application-Note
23
24
the 'BDU' field is described as follows for the 'Enhanced transmit
25
buffer descriptor':
26
27
'Last buffer descriptor update done. Indicates that the last BD data has been updated by
28
uDMA. This field is written by the user (=0) and uDMA (=1).'
29
30
The same description is used for the receive buffer descriptor.
31
32
Signed-off-by: Aaron Hill <aa1ronham@gmail.com>
33
Message-id: 20190805142417.10433-1-aaron.hill@alertinnovation.com
34
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Message-id: 20180521140402.23318-23-peter.maydell@linaro.org
7
---
36
---
8
include/migration/vmstate.h | 3 +++
37
hw/net/imx_fec.c | 4 ++++
9
1 file changed, 3 insertions(+)
38
1 file changed, 4 insertions(+)
10
39
11
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
40
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
12
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
13
--- a/include/migration/vmstate.h
42
--- a/hw/net/imx_fec.c
14
+++ b/include/migration/vmstate.h
43
+++ b/hw/net/imx_fec.c
15
@@ -XXX,XX +XXX,XX @@ extern const VMStateInfo vmstate_info_qtailq;
44
@@ -XXX,XX +XXX,XX @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
16
#define VMSTATE_BOOL_ARRAY(_f, _s, _n) \
45
if (bd.option & ENET_BD_TX_INT) {
17
VMSTATE_BOOL_ARRAY_V(_f, _s, _n, 0)
46
s->regs[ENET_EIR] |= int_txf;
18
47
}
19
+#define VMSTATE_BOOL_SUB_ARRAY(_f, _s, _start, _num) \
48
+ /* Indicate that we've updated the last buffer descriptor. */
20
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_bool, bool)
49
+ bd.last_buffer = ENET_BD_BDU;
21
+
50
}
22
#define VMSTATE_UINT16_ARRAY_V(_f, _s, _n, _v) \
51
if (bd.option & ENET_BD_TX_INT) {
23
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint16, uint16_t)
52
s->regs[ENET_EIR] |= int_txb;
24
53
@@ -XXX,XX +XXX,XX @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
54
/* Last buffer in frame. */
55
bd.flags |= flags | ENET_BD_L;
56
FEC_PRINTF("rx frame flags %04x\n", bd.flags);
57
+ /* Indicate that we've updated the last buffer descriptor. */
58
+ bd.last_buffer = ENET_BD_BDU;
59
if (bd.option & ENET_BD_RX_INT) {
60
s->regs[ENET_EIR] |= ENET_INT_RXF;
61
}
25
--
62
--
26
2.17.1
63
2.20.1
27
64
28
65
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
Factor out code to 'generate a singlestep exception', which is
2
add MemTxAttrs as an argument to address_space_access_valid().
2
currently repeated in four places.
3
Its callers either have an attrs value to hand, or don't care
3
4
and can use MEMTXATTRS_UNSPECIFIED.
4
To do this we need to also pull the identical copies of the
5
gen-exception() function out of translate-a64.c and translate.c
6
into translate.h.
7
8
(There is a bug in the code: we're taking the exception to the wrong
9
target EL. This will be simpler to fix if there's only one place to
10
do it.)
5
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20190805130952.4415-2-peter.maydell@linaro.org
9
Message-id: 20180521140402.23318-6-peter.maydell@linaro.org
10
---
16
---
11
include/exec/memory.h | 4 +++-
17
target/arm/translate.h | 23 +++++++++++++++++++++++
12
include/sysemu/dma.h | 3 ++-
18
target/arm/translate-a64.c | 19 ++-----------------
13
exec.c | 3 ++-
19
target/arm/translate.c | 20 ++------------------
14
target/s390x/diag.c | 6 ++++--
20
3 files changed, 27 insertions(+), 35 deletions(-)
15
target/s390x/excp_helper.c | 3 ++-
16
target/s390x/mmu_helper.c | 3 ++-
17
target/s390x/sigp.c | 3 ++-
18
7 files changed, 17 insertions(+), 8 deletions(-)
19
21
20
diff --git a/include/exec/memory.h b/include/exec/memory.h
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
21
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/memory.h
24
--- a/target/arm/translate.h
23
+++ b/include/exec/memory.h
25
+++ b/target/arm/translate.h
24
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
26
@@ -XXX,XX +XXX,XX @@
25
* @addr: address within that address space
27
#define TARGET_ARM_TRANSLATE_H
26
* @len: length of the area to be checked
28
27
* @is_write: indicates the transfer direction
29
#include "exec/translator.h"
28
+ * @attrs: memory attributes
30
+#include "internals.h"
29
*/
31
30
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
32
31
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len,
33
/* internal defines */
32
+ bool is_write, MemTxAttrs attrs);
34
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
33
35
}
34
/* address_space_map: map a physical memory region into a host virtual address
36
}
35
*
37
36
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
38
+static inline void gen_exception(int excp, uint32_t syndrome,
39
+ uint32_t target_el)
40
+{
41
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
42
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
43
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
44
+
45
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
46
+ tcg_syn, tcg_el);
47
+
48
+ tcg_temp_free_i32(tcg_el);
49
+ tcg_temp_free_i32(tcg_syn);
50
+ tcg_temp_free_i32(tcg_excp);
51
+}
52
+
53
+/* Generate an architectural singlestep exception */
54
+static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
55
+{
56
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
57
+ default_exception_el(s));
58
+}
59
+
60
/*
61
* Given a VFP floating point constant encoded into an 8 bit immediate in an
62
* instruction, expand it to the actual constant value of the specified
63
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
37
index XXXXXXX..XXXXXXX 100644
64
index XXXXXXX..XXXXXXX 100644
38
--- a/include/sysemu/dma.h
65
--- a/target/arm/translate-a64.c
39
+++ b/include/sysemu/dma.h
66
+++ b/target/arm/translate-a64.c
40
@@ -XXX,XX +XXX,XX @@ static inline bool dma_memory_valid(AddressSpace *as,
67
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
41
DMADirection dir)
68
tcg_temp_free_i32(tcg_excp);
69
}
70
71
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
72
-{
73
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
74
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
75
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
76
-
77
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
78
- tcg_syn, tcg_el);
79
- tcg_temp_free_i32(tcg_el);
80
- tcg_temp_free_i32(tcg_syn);
81
- tcg_temp_free_i32(tcg_excp);
82
-}
83
-
84
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
42
{
85
{
43
return address_space_access_valid(as, addr, len,
86
gen_a64_set_pc_im(s->pc - offset);
44
- dir == DMA_DIRECTION_FROM_DEVICE);
87
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
45
+ dir == DMA_DIRECTION_FROM_DEVICE,
88
* of the exception, and our syndrome information is always correct.
46
+ MEMTXATTRS_UNSPECIFIED);
89
*/
90
gen_ss_advance(s);
91
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
92
- default_exception_el(s));
93
+ gen_swstep_exception(s, 1, s->is_ldex);
94
s->base.is_jmp = DISAS_NORETURN;
47
}
95
}
48
96
49
static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
97
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
50
diff --git a/exec.c b/exec.c
98
* bits should be zero.
99
*/
100
assert(dc->base.num_insns == 1);
101
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
102
- default_exception_el(dc));
103
+ gen_swstep_exception(dc, 0, 0);
104
dc->base.is_jmp = DISAS_NORETURN;
105
} else {
106
disas_a64_insn(env, dc);
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
51
index XXXXXXX..XXXXXXX 100644
108
index XXXXXXX..XXXXXXX 100644
52
--- a/exec.c
109
--- a/target/arm/translate.c
53
+++ b/exec.c
110
+++ b/target/arm/translate.c
54
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
111
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
112
tcg_temp_free_i32(tcg_excp);
55
}
113
}
56
114
57
bool address_space_access_valid(AddressSpace *as, hwaddr addr,
115
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
58
- int len, bool is_write)
116
-{
59
+ int len, bool is_write,
117
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
60
+ MemTxAttrs attrs)
118
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
119
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
120
-
121
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
122
- tcg_syn, tcg_el);
123
-
124
- tcg_temp_free_i32(tcg_el);
125
- tcg_temp_free_i32(tcg_syn);
126
- tcg_temp_free_i32(tcg_excp);
127
-}
128
-
129
static void gen_step_complete_exception(DisasContext *s)
61
{
130
{
62
FlatView *fv;
131
/* We just completed step of an insn. Move from Active-not-pending
63
bool result;
132
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
64
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
133
* of the exception, and our syndrome information is always correct.
65
index XXXXXXX..XXXXXXX 100644
134
*/
66
--- a/target/s390x/diag.c
135
gen_ss_advance(s);
67
+++ b/target/s390x/diag.c
136
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
68
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
137
- default_exception_el(s));
69
return;
138
+ gen_swstep_exception(s, 1, s->is_ldex);
70
}
139
s->base.is_jmp = DISAS_NORETURN;
71
if (!address_space_access_valid(&address_space_memory, addr,
140
}
72
- sizeof(IplParameterBlock), false)) {
141
73
+ sizeof(IplParameterBlock), false,
142
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
74
+ MEMTXATTRS_UNSPECIFIED)) {
143
* bits should be zero.
75
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
144
*/
76
return;
145
assert(dc->base.num_insns == 1);
77
}
146
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
78
@@ -XXX,XX +XXX,XX @@ out:
147
- default_exception_el(dc));
79
return;
148
+ gen_swstep_exception(dc, 0, 0);
80
}
149
dc->base.is_jmp = DISAS_NORETURN;
81
if (!address_space_access_valid(&address_space_memory, addr,
150
return true;
82
- sizeof(IplParameterBlock), true)) {
83
+ sizeof(IplParameterBlock), true,
84
+ MEMTXATTRS_UNSPECIFIED)) {
85
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
86
return;
87
}
88
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/s390x/excp_helper.c
91
+++ b/target/s390x/excp_helper.c
92
@@ -XXX,XX +XXX,XX @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
93
94
/* check out of RAM access */
95
if (!address_space_access_valid(&address_space_memory, raddr,
96
- TARGET_PAGE_SIZE, rw)) {
97
+ TARGET_PAGE_SIZE, rw,
98
+ MEMTXATTRS_UNSPECIFIED)) {
99
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
100
(uint64_t)raddr, (uint64_t)ram_size);
101
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
102
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/s390x/mmu_helper.c
105
+++ b/target/s390x/mmu_helper.c
106
@@ -XXX,XX +XXX,XX @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
107
return ret;
108
}
109
if (!address_space_access_valid(&address_space_memory, pages[i],
110
- TARGET_PAGE_SIZE, is_write)) {
111
+ TARGET_PAGE_SIZE, is_write,
112
+ MEMTXATTRS_UNSPECIFIED)) {
113
trigger_access_exception(env, PGM_ADDRESSING, ILEN_AUTO, 0);
114
return -EFAULT;
115
}
116
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/s390x/sigp.c
119
+++ b/target/s390x/sigp.c
120
@@ -XXX,XX +XXX,XX @@ static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
121
cpu_synchronize_state(cs);
122
123
if (!address_space_access_valid(&address_space_memory, addr,
124
- sizeof(struct LowCore), false)) {
125
+ sizeof(struct LowCore), false,
126
+ MEMTXATTRS_UNSPECIFIED)) {
127
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
128
return;
129
}
151
}
130
--
152
--
131
2.17.1
153
2.20.1
132
154
133
155
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
When generating an architectural single-step exception we were
2
add MemTxAttrs as an argument to the MemoryRegion valid.accepts
2
routing it to the "default exception level", which is to say
3
callback. We'll need this for subpage_accepts().
3
the same exception level we execute at except that EL0 exceptions
4
go to EL1. This is incorrect because the debug exception level
5
can be configured by the guest for situations such as single
6
stepping of EL0 and EL1 code by EL2.
4
7
5
We could take the approach we used with the read and write
8
We have to track the target debug exception level in the TB
6
callbacks and add new a new _with_attrs version, but since there
9
flags, because it is dependent on CPU state like HCR_EL2.TGE
7
are so few implementations of the accepts hook we just change
10
and MDCR_EL2.TDE. (That we were previously calling the
8
them all.
11
arm_debug_target_el() function to determine dc->ss_same_el
12
is itself a bug, though one that would only have manifested
13
as incorrect syndrome information.) Since we are out of TB
14
flag bits unless we want to expand into the cs_base field,
15
we share some bits with the M-profile only HANDLER and
16
STACKCHECK bits, since only A-profile has this singlestep.
9
17
18
Fixes: https://bugs.launchpad.net/qemu/+bug/1838913
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
20
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Tested-by: Alex Bennée <alex.bennee@linaro.org>
13
Message-id: 20180521140402.23318-9-peter.maydell@linaro.org
22
Message-id: 20190805130952.4415-3-peter.maydell@linaro.org
14
---
23
---
15
include/exec/memory.h | 3 ++-
24
target/arm/cpu.h | 5 +++++
16
exec.c | 9 ++++++---
25
target/arm/translate.h | 15 +++++++++++----
17
hw/hppa/dino.c | 3 ++-
26
target/arm/helper.c | 6 ++++++
18
hw/nvram/fw_cfg.c | 12 ++++++++----
27
target/arm/translate-a64.c | 2 +-
19
hw/scsi/esp.c | 3 ++-
28
target/arm/translate.c | 4 +++-
20
hw/xen/xen_pt_msi.c | 3 ++-
29
5 files changed, 26 insertions(+), 6 deletions(-)
21
memory.c | 5 +++--
22
7 files changed, 25 insertions(+), 13 deletions(-)
23
30
24
diff --git a/include/exec/memory.h b/include/exec/memory.h
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
25
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/memory.h
33
--- a/target/arm/cpu.h
27
+++ b/include/exec/memory.h
34
+++ b/target/arm/cpu.h
28
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionOps {
35
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
29
* as a machine check exception).
36
/* Target EL if we take a floating-point-disabled exception */
30
*/
37
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
31
bool (*accepts)(void *opaque, hwaddr addr,
38
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
32
- unsigned size, bool is_write);
39
+/*
33
+ unsigned size, bool is_write,
40
+ * For A-profile only, target EL for debug exceptions.
34
+ MemTxAttrs attrs);
41
+ * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
35
} valid;
42
+ */
36
/* Internal implementation constraints: */
43
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
37
struct {
44
38
diff --git a/exec.c b/exec.c
45
/* Bit usage when in AArch32 state: */
46
FIELD(TBFLAG_A32, THUMB, 0, 1)
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
39
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
40
--- a/exec.c
49
--- a/target/arm/translate.h
41
+++ b/exec.c
50
+++ b/target/arm/translate.h
42
@@ -XXX,XX +XXX,XX @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
52
uint32_t svc_imm;
53
int aarch64;
54
int current_el;
55
+ /* Debug target exception level for single-step exceptions */
56
+ int debug_target_el;
57
GHashTable *cp_regs;
58
uint64_t features; /* CPU features bits */
59
/* Because unallocated encodings generate different exception syndrome
60
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
61
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
62
*/
63
bool is_ldex;
64
- /* True if a single-step exception will be taken to the current EL */
65
- bool ss_same_el;
66
/* True if v8.3-PAuth is active. */
67
bool pauth_active;
68
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
69
@@ -XXX,XX +XXX,XX @@ static inline void gen_exception(int excp, uint32_t syndrome,
70
/* Generate an architectural singlestep exception */
71
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
72
{
73
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
74
- default_exception_el(s));
75
+ bool same_el = (s->debug_target_el == s->current_el);
76
+
77
+ /*
78
+ * If singlestep is targeting a lower EL than the current one,
79
+ * then s->ss_active must be false and we can never get here.
80
+ */
81
+ assert(s->debug_target_el >= s->current_el);
82
+
83
+ gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
43
}
84
}
44
85
45
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
86
/*
46
- unsigned size, bool is_write)
87
diff --git a/target/arm/helper.c b/target/arm/helper.c
47
+ unsigned size, bool is_write,
48
+ MemTxAttrs attrs)
49
{
50
return is_write;
51
}
52
@@ -XXX,XX +XXX,XX @@ static MemTxResult subpage_write(void *opaque, hwaddr addr,
53
}
54
55
static bool subpage_accepts(void *opaque, hwaddr addr,
56
- unsigned len, bool is_write)
57
+ unsigned len, bool is_write,
58
+ MemTxAttrs attrs)
59
{
60
subpage_t *subpage = opaque;
61
#if defined(DEBUG_SUBPAGE)
62
@@ -XXX,XX +XXX,XX @@ static void readonly_mem_write(void *opaque, hwaddr addr,
63
}
64
65
static bool readonly_mem_accepts(void *opaque, hwaddr addr,
66
- unsigned size, bool is_write)
67
+ unsigned size, bool is_write,
68
+ MemTxAttrs attrs)
69
{
70
return is_write;
71
}
72
diff --git a/hw/hppa/dino.c b/hw/hppa/dino.c
73
index XXXXXXX..XXXXXXX 100644
88
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/hppa/dino.c
89
--- a/target/arm/helper.c
75
+++ b/hw/hppa/dino.c
90
+++ b/target/arm/helper.c
76
@@ -XXX,XX +XXX,XX @@ static void gsc_to_pci_forwarding(DinoState *s)
91
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
77
}
78
79
static bool dino_chip_mem_valid(void *opaque, hwaddr addr,
80
- unsigned size, bool is_write)
81
+ unsigned size, bool is_write,
82
+ MemTxAttrs attrs)
83
{
84
switch (addr) {
85
case DINO_IAR0:
86
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/hw/nvram/fw_cfg.c
89
+++ b/hw/nvram/fw_cfg.c
90
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
91
}
92
93
static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
94
- unsigned size, bool is_write)
95
+ unsigned size, bool is_write,
96
+ MemTxAttrs attrs)
97
{
98
return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
99
(size == 8 && addr == 0));
100
}
101
102
static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
103
- unsigned size, bool is_write)
104
+ unsigned size, bool is_write,
105
+ MemTxAttrs attrs)
106
{
107
return addr == 0;
108
}
109
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
110
}
111
112
static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
113
- unsigned size, bool is_write)
114
+ unsigned size, bool is_write,
115
+ MemTxAttrs attrs)
116
{
117
return is_write && size == 2;
118
}
119
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_comb_write(void *opaque, hwaddr addr,
120
}
121
122
static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
123
- unsigned size, bool is_write)
124
+ unsigned size, bool is_write,
125
+ MemTxAttrs attrs)
126
{
127
return (size == 1) || (is_write && size == 2);
128
}
129
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/hw/scsi/esp.c
132
+++ b/hw/scsi/esp.c
133
@@ -XXX,XX +XXX,XX @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
134
}
135
136
static bool esp_mem_accepts(void *opaque, hwaddr addr,
137
- unsigned size, bool is_write)
138
+ unsigned size, bool is_write,
139
+ MemTxAttrs attrs)
140
{
141
return (size == 1) || (is_write && size == 4);
142
}
143
diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/hw/xen/xen_pt_msi.c
146
+++ b/hw/xen/xen_pt_msi.c
147
@@ -XXX,XX +XXX,XX @@ static uint64_t pci_msix_read(void *opaque, hwaddr addr,
148
}
149
150
static bool pci_msix_accepts(void *opaque, hwaddr addr,
151
- unsigned size, bool is_write)
152
+ unsigned size, bool is_write,
153
+ MemTxAttrs attrs)
154
{
155
return !(addr & (size - 1));
156
}
157
diff --git a/memory.c b/memory.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/memory.c
160
+++ b/memory.c
161
@@ -XXX,XX +XXX,XX @@ static void unassigned_mem_write(void *opaque, hwaddr addr,
162
}
163
164
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
165
- unsigned size, bool is_write)
166
+ unsigned size, bool is_write,
167
+ MemTxAttrs attrs)
168
{
169
return false;
170
}
171
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
172
access_size = MAX(MIN(size, access_size_max), access_size_min);
173
for (i = 0; i < size; i += access_size) {
174
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
175
- is_write)) {
176
+ is_write, attrs)) {
177
return false;
178
}
92
}
179
}
93
}
94
95
+ if (!arm_feature(env, ARM_FEATURE_M)) {
96
+ int target_el = arm_debug_target_el(env);
97
+
98
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
99
+ }
100
+
101
*pflags = flags;
102
*cs_base = 0;
103
}
104
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/translate-a64.c
107
+++ b/target/arm/translate-a64.c
108
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
109
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
110
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
111
dc->is_ldex = false;
112
- dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
113
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
114
115
/* Bound the number of insns to execute to those left on the page. */
116
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
117
diff --git a/target/arm/translate.c b/target/arm/translate.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate.c
120
+++ b/target/arm/translate.c
121
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
122
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
123
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
124
dc->is_ldex = false;
125
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
126
+ if (!arm_feature(env, ARM_FEATURE_M)) {
127
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
128
+ }
129
130
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
131
180
--
132
--
181
2.17.1
133
2.20.1
182
134
183
135
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to tb_invalidate_phys_addr().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
This function is used in two different contexts, and it will be
4
clearer if the function is given the address to which it applies.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-2-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20180521140402.23318-3-peter.maydell@linaro.org
10
---
11
---
11
include/exec/exec-all.h | 5 +++--
12
target/arm/translate.c | 14 +++++++-------
12
accel/tcg/translate-all.c | 2 +-
13
1 file changed, 7 insertions(+), 7 deletions(-)
13
exec.c | 2 +-
14
target/xtensa/op_helper.c | 3 ++-
15
4 files changed, 7 insertions(+), 5 deletions(-)
16
14
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
17
--- a/target/arm/translate.c
20
+++ b/include/exec/exec-all.h
18
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
19
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
22
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
23
hwaddr paddr, int prot,
24
int mmu_idx, target_ulong size);
25
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
26
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
27
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
28
uintptr_t retaddr);
29
#else
30
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
31
uint16_t idxmap)
32
{
33
}
34
-static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
35
+static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr,
36
+ MemTxAttrs attrs)
37
{
38
}
39
#endif
40
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/translate-all.c
43
+++ b/accel/tcg/translate-all.c
44
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
45
}
46
47
#if !defined(CONFIG_USER_ONLY)
48
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
49
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
50
{
51
ram_addr_t ram_addr;
52
MemoryRegion *mr;
53
diff --git a/exec.c b/exec.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/exec.c
56
+++ b/exec.c
57
@@ -XXX,XX +XXX,XX @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
58
if (phys != -1) {
59
/* Locks grabbed by tb_invalidate_phys_addr */
60
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
61
- phys | (pc & ~TARGET_PAGE_MASK));
62
+ phys | (pc & ~TARGET_PAGE_MASK), attrs);
63
}
20
}
64
}
21
}
65
#endif
22
66
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
23
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
67
index XXXXXXX..XXXXXXX 100644
24
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
68
--- a/target/xtensa/op_helper.c
25
{
69
+++ b/target/xtensa/op_helper.c
26
- /* Return true if this is a 16 bit instruction. We must be precise
70
@@ -XXX,XX +XXX,XX @@ static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
27
- * about this (matching the decode). We assume that s->pc still
71
int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
28
- * points to the first 16 bits of the insn.
72
&paddr, &page_size, &access);
29
+ /*
73
if (ret == 0) {
30
+ * Return true if this is a 16 bit instruction. We must be precise
74
- tb_invalidate_phys_addr(&address_space_memory, paddr);
31
+ * about this (matching the decode).
75
+ tb_invalidate_phys_addr(&address_space_memory, paddr,
32
*/
76
+ MEMTXATTRS_UNSPECIFIED);
33
if ((insn >> 11) < 0x1d) {
34
/* Definitely a 16-bit instruction */
35
@@ -XXX,XX +XXX,XX @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
36
return false;
77
}
37
}
38
39
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
40
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
41
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
42
* is not on the next page; we merge this into a 32-bit
43
* insn.
44
@@ -XXX,XX +XXX,XX @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
45
*/
46
uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
47
48
- return !thumb_insn_is_16bit(s, insn);
49
+ return !thumb_insn_is_16bit(s, s->pc, insn);
78
}
50
}
79
51
52
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
53
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
54
}
55
56
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
57
- is_16bit = thumb_insn_is_16bit(dc, insn);
58
+ is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
59
dc->pc += 2;
60
if (!is_16bit) {
61
uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
80
--
62
--
81
2.17.1
63
2.20.1
82
64
83
65
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Add a new field to retain the address of the instruction currently
4
being translated. The 32-bit uses are all within subroutines used
5
by a32 and t32. This will become less obvious when t16 support is
6
merged with a32+t32, and having a clear definition will help.
7
8
Convert aarch64 as well for consistency. Note that there is one
9
instance of a pre-assert fprintf that used the wrong value for the
10
address of the current instruction.
11
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190807045335.1361-3-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/translate-a64.h | 2 +-
19
target/arm/translate.h | 2 ++
20
target/arm/translate-a64.c | 21 +++++++++++----------
21
target/arm/translate.c | 14 ++++++++------
22
4 files changed, 22 insertions(+), 17 deletions(-)
23
24
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-a64.h
27
+++ b/target/arm/translate-a64.h
28
@@ -XXX,XX +XXX,XX @@ void unallocated_encoding(DisasContext *s);
29
qemu_log_mask(LOG_UNIMP, \
30
"%s:%d: unsupported instruction encoding 0x%08x " \
31
"at pc=%016" PRIx64 "\n", \
32
- __FILE__, __LINE__, insn, s->pc - 4); \
33
+ __FILE__, __LINE__, insn, s->pc_curr); \
34
unallocated_encoding(s); \
35
} while (0)
36
37
diff --git a/target/arm/translate.h b/target/arm/translate.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate.h
40
+++ b/target/arm/translate.h
41
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
42
const ARMISARegisters *isar;
43
44
target_ulong pc;
45
+ /* The address of the current instruction being translated. */
46
+ target_ulong pc_curr;
47
target_ulong page_start;
48
uint32_t insn;
49
/* Nonzero if this instruction has been conditionally skipped. */
50
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-a64.c
53
+++ b/target/arm/translate-a64.c
54
@@ -XXX,XX +XXX,XX @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
55
*/
56
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
57
{
58
- uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
59
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
60
61
if (insn & (1U << 31)) {
62
/* BL Branch with link */
63
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
64
sf = extract32(insn, 31, 1);
65
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
66
rt = extract32(insn, 0, 5);
67
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
68
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
69
70
tcg_cmp = read_cpu_reg(s, rt, sf);
71
label_match = gen_new_label();
72
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
73
74
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
75
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
76
- addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
77
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
78
rt = extract32(insn, 0, 5);
79
80
tcg_cmp = tcg_temp_new_i64();
81
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
82
unallocated_encoding(s);
83
return;
84
}
85
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
86
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
87
cond = extract32(insn, 0, 4);
88
89
reset_btype(s);
90
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
91
TCGv_i32 tcg_syn, tcg_isread;
92
uint32_t syndrome;
93
94
- gen_a64_set_pc_im(s->pc - 4);
95
+ gen_a64_set_pc_im(s->pc_curr);
96
tmpptr = tcg_const_ptr(ri);
97
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
98
tcg_syn = tcg_const_i32(syndrome);
99
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
100
/* The pre HVC helper handles cases when HVC gets trapped
101
* as an undefined insn by runtime configuration.
102
*/
103
- gen_a64_set_pc_im(s->pc - 4);
104
+ gen_a64_set_pc_im(s->pc_curr);
105
gen_helper_pre_hvc(cpu_env);
106
gen_ss_advance(s);
107
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
108
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
109
unallocated_encoding(s);
110
break;
111
}
112
- gen_a64_set_pc_im(s->pc - 4);
113
+ gen_a64_set_pc_im(s->pc_curr);
114
tmp = tcg_const_i32(syn_aa64_smc(imm16));
115
gen_helper_pre_smc(cpu_env, tmp);
116
tcg_temp_free_i32(tmp);
117
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
118
119
tcg_rt = cpu_reg(s, rt);
120
121
- clean_addr = tcg_const_i64((s->pc - 4) + imm);
122
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
123
if (is_vector) {
124
do_fp_ld(s, rt, clean_addr, size);
125
} else {
126
@@ -XXX,XX +XXX,XX @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
127
offset = sextract64(insn, 5, 19);
128
offset = offset << 2 | extract32(insn, 29, 2);
129
rd = extract32(insn, 0, 5);
130
- base = s->pc - 4;
131
+ base = s->pc_curr;
132
133
if (page) {
134
/* ADRP (page based) */
135
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
136
break;
137
default:
138
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
139
- __func__, insn, fpopcode, s->pc);
140
+ __func__, insn, fpopcode, s->pc_curr);
141
g_assert_not_reached();
142
}
143
144
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
145
{
146
uint32_t insn;
147
148
+ s->pc_curr = s->pc;
149
insn = arm_ldl_code(env, s->pc, s->sctlr_b);
150
s->insn = insn;
151
s->pc += 4;
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate.c
155
+++ b/target/arm/translate.c
156
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
157
* as an undefined insn by runtime configuration (ie before
158
* the insn really executes).
159
*/
160
- gen_set_pc_im(s, s->pc - 4);
161
+ gen_set_pc_im(s, s->pc_curr);
162
gen_helper_pre_hvc(cpu_env);
163
/* Otherwise we will treat this as a real exception which
164
* happens after execution of the insn. (The distinction matters
165
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
166
*/
167
TCGv_i32 tmp;
168
169
- gen_set_pc_im(s, s->pc - 4);
170
+ gen_set_pc_im(s, s->pc_curr);
171
tmp = tcg_const_i32(syn_aa32_smc());
172
gen_helper_pre_smc(cpu_env, tmp);
173
tcg_temp_free_i32(tmp);
174
@@ -XXX,XX +XXX,XX @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
175
176
/* Sync state because msr_banked() can raise exceptions */
177
gen_set_condexec(s);
178
- gen_set_pc_im(s, s->pc - 4);
179
+ gen_set_pc_im(s, s->pc_curr);
180
tcg_reg = load_reg(s, rn);
181
tcg_tgtmode = tcg_const_i32(tgtmode);
182
tcg_regno = tcg_const_i32(regno);
183
@@ -XXX,XX +XXX,XX @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
184
185
/* Sync state because mrs_banked() can raise exceptions */
186
gen_set_condexec(s);
187
- gen_set_pc_im(s, s->pc - 4);
188
+ gen_set_pc_im(s, s->pc_curr);
189
tcg_reg = tcg_temp_new_i32();
190
tcg_tgtmode = tcg_const_i32(tgtmode);
191
tcg_regno = tcg_const_i32(regno);
192
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
193
}
194
195
gen_set_condexec(s);
196
- gen_set_pc_im(s, s->pc - 4);
197
+ gen_set_pc_im(s, s->pc_curr);
198
tmpptr = tcg_const_ptr(ri);
199
tcg_syn = tcg_const_i32(syndrome);
200
tcg_isread = tcg_const_i32(isread);
201
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
202
tmp = tcg_const_i32(mode);
203
/* get_r13_banked() will raise an exception if called from System mode */
204
gen_set_condexec(s);
205
- gen_set_pc_im(s, s->pc - 4);
206
+ gen_set_pc_im(s, s->pc_curr);
207
gen_helper_get_r13_banked(addr, cpu_env, tmp);
208
tcg_temp_free_i32(tmp);
209
switch (amode) {
210
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
211
return;
212
}
213
214
+ dc->pc_curr = dc->pc;
215
insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
216
dc->insn = insn;
217
dc->pc += 4;
218
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
219
return;
220
}
221
222
+ dc->pc_curr = dc->pc;
223
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
224
is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
225
dc->pc += 2;
226
--
227
2.20.1
228
229
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to memory_region_access_valid().
2
3
Its callers either have an attrs value to hand, or don't care
3
We currently have 3 different ways of computing the architectural
4
and can use MEMTXATTRS_UNSPECIFIED.
4
value of "PC" as seen in the ARM ARM.
5
5
6
The callsite in flatview_access_valid() is part of a recursive
6
The value of s->pc has been incremented past the current insn,
7
loop flatview_access_valid() -> memory_region_access_valid() ->
7
but that is all. Thus for a32, PC = s->pc + 4; for t32, PC = s->pc;
8
subpage_accepts() -> flatview_access_valid(); we make it pass
8
for t16, PC = s->pc + 2. These differing computations make it
9
MEMTXATTRS_UNSPECIFIED for now, until the next several commits
9
impossible at present to unify the various code paths.
10
have plumbed an attrs parameter through the rest of the loop
10
11
and we can add an attrs parameter to flatview_access_valid().
11
With the newly introduced s->pc_curr, we can compute the correct
12
12
value for all cases, using the formula given in the ARM ARM.
13
14
This changes the behaviour for load_reg() and load_reg_var()
15
when called with reg==15 from a 32-bit Thumb instruction:
16
previously they would have returned the incorrect value
17
of pc_curr + 6, and now they will return the architecturally
18
correct value of PC, which is pc_curr + 4. This will not
19
affect well-behaved guest software, because all of the places
20
we call these functions from T32 code are instructions where
21
using r15 is UNPREDICTABLE. Using the architectural PC value
22
here is more consistent with the T16 and A32 behaviour.
23
24
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
26
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
27
Message-id: 20190807045335.1361-4-richard.henderson@linaro.org
28
[PMM: added commit message note about UNPREDICTABLE T32 cases]
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20180521140402.23318-8-peter.maydell@linaro.org
17
---
30
---
18
include/exec/memory-internal.h | 3 ++-
31
target/arm/translate.c | 59 ++++++++++++++++--------------------------
19
exec.c | 4 +++-
32
1 file changed, 23 insertions(+), 36 deletions(-)
20
hw/s390x/s390-pci-inst.c | 3 ++-
33
21
memory.c | 7 ++++---
34
diff --git a/target/arm/translate.c b/target/arm/translate.c
22
4 files changed, 11 insertions(+), 6 deletions(-)
23
24
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
25
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/memory-internal.h
36
--- a/target/arm/translate.c
27
+++ b/include/exec/memory-internal.h
37
+++ b/target/arm/translate.c
28
@@ -XXX,XX +XXX,XX @@ void flatview_unref(FlatView *view);
38
@@ -XXX,XX +XXX,XX @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
29
extern const MemoryRegionOps unassigned_mem_ops;
39
#define store_cpu_field(var, name) \
30
40
store_cpu_offset(var, offsetof(CPUARMState, name))
31
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
41
32
- unsigned size, bool is_write);
42
+/* The architectural value of PC. */
33
+ unsigned size, bool is_write,
43
+static uint32_t read_pc(DisasContext *s)
34
+ MemTxAttrs attrs);
44
+{
35
45
+ return s->pc_curr + (s->thumb ? 4 : 8);
36
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
46
+}
37
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
47
+
38
diff --git a/exec.c b/exec.c
48
/* Set a variable to the value of a CPU register. */
39
index XXXXXXX..XXXXXXX 100644
49
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
40
--- a/exec.c
50
{
41
+++ b/exec.c
51
if (reg == 15) {
42
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
52
- uint32_t addr;
43
mr = flatview_translate(fv, addr, &xlat, &l, is_write);
53
- /* normally, since we updated PC, we need only to add one insn */
44
if (!memory_access_is_direct(mr, is_write)) {
54
- if (s->thumb)
45
l = memory_access_size(mr, l, addr);
55
- addr = (long)s->pc + 2;
46
- if (!memory_region_access_valid(mr, xlat, l, is_write)) {
56
- else
47
+ /* When our callers all have attrs we'll pass them through here */
57
- addr = (long)s->pc + 4;
48
+ if (!memory_region_access_valid(mr, xlat, l, is_write,
58
- tcg_gen_movi_i32(var, addr);
49
+ MEMTXATTRS_UNSPECIFIED)) {
59
+ tcg_gen_movi_i32(var, read_pc(s));
50
return false;
60
} else {
61
tcg_gen_mov_i32(var, cpu_R[reg]);
62
}
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
64
/* branch link and change to thumb (blx <offset>) */
65
int32_t offset;
66
67
- val = (uint32_t)s->pc;
68
tmp = tcg_temp_new_i32();
69
- tcg_gen_movi_i32(tmp, val);
70
+ tcg_gen_movi_i32(tmp, s->pc);
71
store_reg(s, 14, tmp);
72
/* Sign-extend the 24-bit offset */
73
offset = (((int32_t)insn) << 8) >> 8;
74
+ val = read_pc(s);
75
/* offset * 4 + bit24 * 2 + (thumb bit) */
76
val += (offset << 2) | ((insn >> 23) & 2) | 1;
77
- /* pipeline offset */
78
- val += 4;
79
/* protected by ARCH(5); above, near the start of uncond block */
80
gen_bx_im(s, val);
81
return;
82
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
83
} else {
84
/* store */
85
if (i == 15) {
86
- /* special case: r15 = PC + 8 */
87
- val = (long)s->pc + 4;
88
tmp = tcg_temp_new_i32();
89
- tcg_gen_movi_i32(tmp, val);
90
+ tcg_gen_movi_i32(tmp, read_pc(s));
91
} else if (user) {
92
tmp = tcg_temp_new_i32();
93
tmp2 = tcg_const_i32(i);
94
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
95
int32_t offset;
96
97
/* branch (and link) */
98
- val = (int32_t)s->pc;
99
if (insn & (1 << 24)) {
100
tmp = tcg_temp_new_i32();
101
- tcg_gen_movi_i32(tmp, val);
102
+ tcg_gen_movi_i32(tmp, s->pc);
103
store_reg(s, 14, tmp);
104
}
105
offset = sextract32(insn << 2, 0, 26);
106
- val += offset + 4;
107
- gen_jmp(s, val);
108
+ gen_jmp(s, read_pc(s) + offset);
51
}
109
}
110
break;
111
case 0xc:
112
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
113
tcg_temp_free_i32(addr);
114
} else if ((insn & (7 << 5)) == 0) {
115
/* Table Branch. */
116
- if (rn == 15) {
117
- addr = tcg_temp_new_i32();
118
- tcg_gen_movi_i32(addr, s->pc);
119
- } else {
120
- addr = load_reg(s, rn);
121
- }
122
+ addr = load_reg(s, rn);
123
tmp = load_reg(s, rm);
124
tcg_gen_add_i32(addr, addr, tmp);
125
if (insn & (1 << 4)) {
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
127
}
128
tcg_temp_free_i32(addr);
129
tcg_gen_shli_i32(tmp, tmp, 1);
130
- tcg_gen_addi_i32(tmp, tmp, s->pc);
131
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
132
store_reg(s, 15, tmp);
133
} else {
134
bool is_lasr = false;
135
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
136
tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
137
}
138
139
- offset += s->pc;
140
+ offset += read_pc(s);
141
if (insn & (1 << 12)) {
142
/* b/bl */
143
gen_jmp(s, offset);
144
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
145
offset |= (insn & (1 << 11)) << 8;
146
147
/* jump to the offset */
148
- gen_jmp(s, s->pc + offset);
149
+ gen_jmp(s, read_pc(s) + offset);
150
}
151
} else {
152
/*
153
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
154
if (insn & (1 << 11)) {
155
rd = (insn >> 8) & 7;
156
/* load pc-relative. Bit 1 of PC is ignored. */
157
- val = s->pc + 2 + ((insn & 0xff) * 4);
158
+ val = read_pc(s) + ((insn & 0xff) * 4);
159
val &= ~(uint32_t)2;
160
addr = tcg_temp_new_i32();
161
tcg_gen_movi_i32(addr, val);
162
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
163
} else {
164
/* PC. bit 1 is ignored. */
165
tmp = tcg_temp_new_i32();
166
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
167
+ tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
52
}
168
}
53
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
169
val = (insn & 0xff) * 4;
54
index XXXXXXX..XXXXXXX 100644
170
tcg_gen_addi_i32(tmp, tmp, val);
55
--- a/hw/s390x/s390-pci-inst.c
171
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
56
+++ b/hw/s390x/s390-pci-inst.c
172
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
57
@@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
173
tcg_temp_free_i32(tmp);
58
mr = s390_get_subregion(mr, offset, len);
174
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
59
offset -= mr->addr;
175
- val = (uint32_t)s->pc + 2;
60
176
- val += offset;
61
- if (!memory_region_access_valid(mr, offset, len, true)) {
177
- gen_jmp(s, val);
62
+ if (!memory_region_access_valid(mr, offset, len, true,
178
+ gen_jmp(s, read_pc(s) + offset);
63
+ MEMTXATTRS_UNSPECIFIED)) {
179
break;
64
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
180
65
return 0;
181
case 15: /* IT, nop-hint. */
66
}
182
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
67
diff --git a/memory.c b/memory.c
183
arm_skip_unless(s, cond);
68
index XXXXXXX..XXXXXXX 100644
184
69
--- a/memory.c
185
/* jump to the offset */
70
+++ b/memory.c
186
- val = (uint32_t)s->pc + 2;
71
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps ram_device_mem_ops = {
187
+ val = read_pc(s);
72
bool memory_region_access_valid(MemoryRegion *mr,
188
offset = ((int32_t)insn << 24) >> 24;
73
hwaddr addr,
189
val += offset << 1;
74
unsigned size,
190
gen_jmp(s, val);
75
- bool is_write)
191
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
76
+ bool is_write,
192
break;
77
+ MemTxAttrs attrs)
193
}
78
{
194
/* unconditional branch */
79
int access_size_min, access_size_max;
195
- val = (uint32_t)s->pc;
80
int access_size, i;
196
+ val = read_pc(s);
81
@@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
197
offset = ((int32_t)insn << 21) >> 21;
82
{
198
- val += (offset << 1) + 2;
83
MemTxResult r;
199
+ val += offset << 1;
84
200
gen_jmp(s, val);
85
- if (!memory_region_access_valid(mr, addr, size, false)) {
201
break;
86
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
202
87
*pval = unassigned_mem_read(mr, addr, size);
203
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
88
return MEMTX_DECODE_ERROR;
204
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
89
}
205
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
90
@@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
206
91
unsigned size,
207
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
92
MemTxAttrs attrs)
208
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
93
{
209
}
94
- if (!memory_region_access_valid(mr, addr, size, true)) {
210
break;
95
+ if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
96
unassigned_mem_write(mr, addr, data, size);
97
return MEMTX_DECODE_ERROR;
98
}
211
}
99
--
212
--
100
2.17.1
213
2.20.1
101
214
102
215
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Provide a common routine for the places that require ALIGN(PC, 4)
4
as the base address as opposed to plain PC. The two are always
5
the same for A32, but the difference is meaningful for thumb mode.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-5-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/translate-vfp.inc.c | 38 ++------
14
target/arm/translate.c | 166 +++++++++++++++------------------
15
2 files changed, 82 insertions(+), 122 deletions(-)
16
17
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate-vfp.inc.c
20
+++ b/target/arm/translate-vfp.inc.c
21
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
22
offset = -offset;
23
}
24
25
- if (s->thumb && a->rn == 15) {
26
- /* This is actually UNPREDICTABLE */
27
- addr = tcg_temp_new_i32();
28
- tcg_gen_movi_i32(addr, s->pc & ~2);
29
- } else {
30
- addr = load_reg(s, a->rn);
31
- }
32
- tcg_gen_addi_i32(addr, addr, offset);
33
+ /* For thumb, use of PC is UNPREDICTABLE. */
34
+ addr = add_reg_for_lit(s, a->rn, offset);
35
tmp = tcg_temp_new_i32();
36
if (a->l) {
37
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
39
offset = -offset;
40
}
41
42
- if (s->thumb && a->rn == 15) {
43
- /* This is actually UNPREDICTABLE */
44
- addr = tcg_temp_new_i32();
45
- tcg_gen_movi_i32(addr, s->pc & ~2);
46
- } else {
47
- addr = load_reg(s, a->rn);
48
- }
49
- tcg_gen_addi_i32(addr, addr, offset);
50
+ /* For thumb, use of PC is UNPREDICTABLE. */
51
+ addr = add_reg_for_lit(s, a->rn, offset);
52
tmp = tcg_temp_new_i64();
53
if (a->l) {
54
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
56
return true;
57
}
58
59
- if (s->thumb && a->rn == 15) {
60
- /* This is actually UNPREDICTABLE */
61
- addr = tcg_temp_new_i32();
62
- tcg_gen_movi_i32(addr, s->pc & ~2);
63
- } else {
64
- addr = load_reg(s, a->rn);
65
- }
66
+ /* For thumb, use of PC is UNPREDICTABLE. */
67
+ addr = add_reg_for_lit(s, a->rn, 0);
68
if (a->p) {
69
/* pre-decrement */
70
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
71
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
72
return true;
73
}
74
75
- if (s->thumb && a->rn == 15) {
76
- /* This is actually UNPREDICTABLE */
77
- addr = tcg_temp_new_i32();
78
- tcg_gen_movi_i32(addr, s->pc & ~2);
79
- } else {
80
- addr = load_reg(s, a->rn);
81
- }
82
+ /* For thumb, use of PC is UNPREDICTABLE. */
83
+ addr = add_reg_for_lit(s, a->rn, 0);
84
if (a->p) {
85
/* pre-decrement */
86
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
87
diff --git a/target/arm/translate.c b/target/arm/translate.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/translate.c
90
+++ b/target/arm/translate.c
91
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg)
92
return tmp;
93
}
94
95
+/*
96
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
97
+ * This is used for load/store for which use of PC implies (literal),
98
+ * or ADD that implies ADR.
99
+ */
100
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
101
+{
102
+ TCGv_i32 tmp = tcg_temp_new_i32();
103
+
104
+ if (reg == 15) {
105
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
106
+ } else {
107
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
108
+ }
109
+ return tmp;
110
+}
111
+
112
/* Set a CPU register. The source must be a temporary and will be
113
marked as dead. */
114
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
115
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
116
*/
117
bool wback = extract32(insn, 21, 1);
118
119
- if (rn == 15) {
120
- if (insn & (1 << 21)) {
121
- /* UNPREDICTABLE */
122
- goto illegal_op;
123
- }
124
- addr = tcg_temp_new_i32();
125
- tcg_gen_movi_i32(addr, s->pc & ~3);
126
- } else {
127
- addr = load_reg(s, rn);
128
+ if (rn == 15 && (insn & (1 << 21))) {
129
+ /* UNPREDICTABLE */
130
+ goto illegal_op;
131
}
132
+
133
+ addr = add_reg_for_lit(s, rn, 0);
134
offset = (insn & 0xff) * 4;
135
if ((insn & (1 << 23)) == 0) {
136
offset = -offset;
137
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
138
store_reg(s, rd, tmp);
139
} else {
140
/* Add/sub 12-bit immediate. */
141
- if (rn == 15) {
142
- offset = s->pc & ~(uint32_t)3;
143
- if (insn & (1 << 23))
144
- offset -= imm;
145
- else
146
- offset += imm;
147
- tmp = tcg_temp_new_i32();
148
- tcg_gen_movi_i32(tmp, offset);
149
- store_reg(s, rd, tmp);
150
+ if (insn & (1 << 23)) {
151
+ imm = -imm;
152
+ }
153
+ tmp = add_reg_for_lit(s, rn, imm);
154
+ if (rn == 13 && rd == 13) {
155
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
156
+ store_sp_checked(s, tmp);
157
} else {
158
- tmp = load_reg(s, rn);
159
- if (insn & (1 << 23))
160
- tcg_gen_subi_i32(tmp, tmp, imm);
161
- else
162
- tcg_gen_addi_i32(tmp, tmp, imm);
163
- if (rn == 13 && rd == 13) {
164
- /* ADD SP, SP, imm or SUB SP, SP, imm */
165
- store_sp_checked(s, tmp);
166
- } else {
167
- store_reg(s, rd, tmp);
168
- }
169
+ store_reg(s, rd, tmp);
170
}
171
}
172
}
173
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
174
}
175
}
176
memidx = get_mem_index(s);
177
- if (rn == 15) {
178
- addr = tcg_temp_new_i32();
179
- /* PC relative. */
180
- /* s->pc has already been incremented by 4. */
181
- imm = s->pc & 0xfffffffc;
182
- if (insn & (1 << 23))
183
- imm += insn & 0xfff;
184
- else
185
- imm -= insn & 0xfff;
186
- tcg_gen_movi_i32(addr, imm);
187
+ imm = insn & 0xfff;
188
+ if (insn & (1 << 23)) {
189
+ /* PC relative or Positive offset. */
190
+ addr = add_reg_for_lit(s, rn, imm);
191
+ } else if (rn == 15) {
192
+ /* PC relative with negative offset. */
193
+ addr = add_reg_for_lit(s, rn, -imm);
194
} else {
195
addr = load_reg(s, rn);
196
- if (insn & (1 << 23)) {
197
- /* Positive offset. */
198
- imm = insn & 0xfff;
199
- tcg_gen_addi_i32(addr, addr, imm);
200
- } else {
201
- imm = insn & 0xff;
202
- switch ((insn >> 8) & 0xf) {
203
- case 0x0: /* Shifted Register. */
204
- shift = (insn >> 4) & 0xf;
205
- if (shift > 3) {
206
- tcg_temp_free_i32(addr);
207
- goto illegal_op;
208
- }
209
- tmp = load_reg(s, rm);
210
- if (shift)
211
- tcg_gen_shli_i32(tmp, tmp, shift);
212
- tcg_gen_add_i32(addr, addr, tmp);
213
- tcg_temp_free_i32(tmp);
214
- break;
215
- case 0xc: /* Negative offset. */
216
- tcg_gen_addi_i32(addr, addr, -imm);
217
- break;
218
- case 0xe: /* User privilege. */
219
- tcg_gen_addi_i32(addr, addr, imm);
220
- memidx = get_a32_user_mem_index(s);
221
- break;
222
- case 0x9: /* Post-decrement. */
223
- imm = -imm;
224
- /* Fall through. */
225
- case 0xb: /* Post-increment. */
226
- postinc = 1;
227
- writeback = 1;
228
- break;
229
- case 0xd: /* Pre-decrement. */
230
- imm = -imm;
231
- /* Fall through. */
232
- case 0xf: /* Pre-increment. */
233
- writeback = 1;
234
- break;
235
- default:
236
+ imm = insn & 0xff;
237
+ switch ((insn >> 8) & 0xf) {
238
+ case 0x0: /* Shifted Register. */
239
+ shift = (insn >> 4) & 0xf;
240
+ if (shift > 3) {
241
tcg_temp_free_i32(addr);
242
goto illegal_op;
243
}
244
+ tmp = load_reg(s, rm);
245
+ if (shift) {
246
+ tcg_gen_shli_i32(tmp, tmp, shift);
247
+ }
248
+ tcg_gen_add_i32(addr, addr, tmp);
249
+ tcg_temp_free_i32(tmp);
250
+ break;
251
+ case 0xc: /* Negative offset. */
252
+ tcg_gen_addi_i32(addr, addr, -imm);
253
+ break;
254
+ case 0xe: /* User privilege. */
255
+ tcg_gen_addi_i32(addr, addr, imm);
256
+ memidx = get_a32_user_mem_index(s);
257
+ break;
258
+ case 0x9: /* Post-decrement. */
259
+ imm = -imm;
260
+ /* Fall through. */
261
+ case 0xb: /* Post-increment. */
262
+ postinc = 1;
263
+ writeback = 1;
264
+ break;
265
+ case 0xd: /* Pre-decrement. */
266
+ imm = -imm;
267
+ /* Fall through. */
268
+ case 0xf: /* Pre-increment. */
269
+ writeback = 1;
270
+ break;
271
+ default:
272
+ tcg_temp_free_i32(addr);
273
+ goto illegal_op;
274
}
275
}
276
277
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
278
if (insn & (1 << 11)) {
279
rd = (insn >> 8) & 7;
280
/* load pc-relative. Bit 1 of PC is ignored. */
281
- val = read_pc(s) + ((insn & 0xff) * 4);
282
- val &= ~(uint32_t)2;
283
- addr = tcg_temp_new_i32();
284
- tcg_gen_movi_i32(addr, val);
285
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
286
tmp = tcg_temp_new_i32();
287
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
288
rd | ISSIs16Bit);
289
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
290
* - Add PC/SP (immediate)
291
*/
292
rd = (insn >> 8) & 7;
293
- if (insn & (1 << 11)) {
294
- /* SP */
295
- tmp = load_reg(s, 13);
296
- } else {
297
- /* PC. bit 1 is ignored. */
298
- tmp = tcg_temp_new_i32();
299
- tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
300
- }
301
val = (insn & 0xff) * 4;
302
- tcg_gen_addi_i32(tmp, tmp, val);
303
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
304
store_reg(s, rd, tmp);
305
break;
306
307
--
308
2.20.1
309
310
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to flatview_do_translate().
3
2
3
The thumb bit has already been removed from s->pc, and is always even.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190807045335.1361-6-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180521140402.23318-13-peter.maydell@linaro.org
8
---
10
---
9
exec.c | 9 ++++++---
11
target/arm/translate.c | 10 +++++-----
10
1 file changed, 6 insertions(+), 3 deletions(-)
12
1 file changed, 5 insertions(+), 5 deletions(-)
11
13
12
diff --git a/exec.c b/exec.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/exec.c
16
--- a/target/arm/translate.c
15
+++ b/exec.c
17
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ unassigned:
18
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
17
* @is_write: whether the translation operation is for write
19
/* Force a TB lookup after an instruction that changes the CPU state. */
18
* @is_mmio: whether this can be MMIO, set true if it can
20
static inline void gen_lookup_tb(DisasContext *s)
19
* @target_as: the address space targeted by the IOMMU
20
+ * @attrs: memory transaction attributes
21
*
22
* This function is called from RCU critical section
23
*/
24
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
25
hwaddr *page_mask_out,
26
bool is_write,
27
bool is_mmio,
28
- AddressSpace **target_as)
29
+ AddressSpace **target_as,
30
+ MemTxAttrs attrs)
31
{
21
{
32
MemoryRegionSection *section;
22
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
33
IOMMUMemoryRegion *iommu_mr;
23
+ tcg_gen_movi_i32(cpu_R[15], s->pc);
34
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
24
s->base.is_jmp = DISAS_EXIT;
35
* but page mask.
25
}
36
*/
26
37
section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
27
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
38
- NULL, &page_mask, is_write, false, &as);
28
* self-modifying code correctly and also to take
39
+ NULL, &page_mask, is_write, false, &as,
29
* any pending interrupts immediately.
40
+ attrs);
30
*/
41
31
- gen_goto_tb(s, 0, s->pc & ~1);
42
/* Illegal translation */
32
+ gen_goto_tb(s, 0, s->pc);
43
if (section.mr == &io_mem_unassigned) {
33
return;
44
@@ -XXX,XX +XXX,XX @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
34
case 7: /* sb */
45
35
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
46
/* This can be MMIO, so setup MMIO bit. */
36
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
47
section = flatview_do_translate(fv, addr, xlat, plen, NULL,
37
* for TCG; MB and end the TB instead.
48
- is_write, true, &as);
38
*/
49
+ is_write, true, &as, attrs);
39
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
50
mr = section.mr;
40
- gen_goto_tb(s, 0, s->pc & ~1);
51
41
+ gen_goto_tb(s, 0, s->pc);
52
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
42
return;
43
default:
44
goto illegal_op;
45
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
46
* and also to take any pending interrupts
47
* immediately.
48
*/
49
- gen_goto_tb(s, 0, s->pc & ~1);
50
+ gen_goto_tb(s, 0, s->pc);
51
break;
52
case 7: /* sb */
53
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
55
* for TCG; MB and end the TB instead.
56
*/
57
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
58
- gen_goto_tb(s, 0, s->pc & ~1);
59
+ gen_goto_tb(s, 0, s->pc);
60
break;
61
default:
62
goto illegal_op;
53
--
63
--
54
2.17.1
64
2.20.1
55
65
56
66
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to address_space_get_iotlb_entry().
3
2
3
We must update s->base.pc_next when we return from the translate_insn
4
hook to the main translator loop. By incrementing s->base.pc_next
5
immediately after reading the insn word, "pc_next" contains the address
6
of the next instruction throughout translation.
7
8
All remaining uses of s->pc are referencing the address of the next insn,
9
so this is now a simple global replacement. Remove the "s->pc" field.
10
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Message-id: 20190807045335.1361-7-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180521140402.23318-12-peter.maydell@linaro.org
8
---
16
---
9
include/exec/memory.h | 2 +-
17
target/arm/translate.h | 1 -
10
exec.c | 2 +-
18
target/arm/translate-a64.c | 51 +++++++++---------
11
hw/virtio/vhost.c | 3 ++-
19
target/arm/translate.c | 103 ++++++++++++++++++-------------------
12
3 files changed, 4 insertions(+), 3 deletions(-)
20
3 files changed, 72 insertions(+), 83 deletions(-)
13
21
14
diff --git a/include/exec/memory.h b/include/exec/memory.h
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
15
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memory.h
24
--- a/target/arm/translate.h
17
+++ b/include/exec/memory.h
25
+++ b/target/arm/translate.h
18
@@ -XXX,XX +XXX,XX @@ void address_space_cache_destroy(MemoryRegionCache *cache);
26
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
19
* entry. Should be called from an RCU critical section.
27
DisasContextBase base;
20
*/
28
const ARMISARegisters *isar;
21
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
29
22
- bool is_write);
30
- target_ulong pc;
23
+ bool is_write, MemTxAttrs attrs);
31
/* The address of the current instruction being translated. */
24
32
target_ulong pc_curr;
25
/* address_space_translate: translate an address range into an address space
33
target_ulong page_start;
26
* into a MemoryRegion and an address range into that section. Should be
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
27
diff --git a/exec.c b/exec.c
28
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
29
--- a/exec.c
36
--- a/target/arm/translate-a64.c
30
+++ b/exec.c
37
+++ b/target/arm/translate-a64.c
31
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
38
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
32
39
33
/* Called from RCU critical section */
40
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
34
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
41
{
35
- bool is_write)
42
- gen_a64_set_pc_im(s->pc - offset);
36
+ bool is_write, MemTxAttrs attrs)
43
+ gen_a64_set_pc_im(s->base.pc_next - offset);
37
{
44
gen_exception_internal(excp);
38
MemoryRegionSection section;
45
s->base.is_jmp = DISAS_NORETURN;
39
hwaddr xlat, page_mask;
46
}
40
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
47
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
48
static void gen_exception_insn(DisasContext *s, int offset, int excp,
49
uint32_t syndrome, uint32_t target_el)
50
{
51
- gen_a64_set_pc_im(s->pc - offset);
52
+ gen_a64_set_pc_im(s->base.pc_next - offset);
53
gen_exception(excp, syndrome, target_el);
54
s->base.is_jmp = DISAS_NORETURN;
55
}
56
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset,
57
{
58
TCGv_i32 tcg_syn;
59
60
- gen_a64_set_pc_im(s->pc - offset);
61
+ gen_a64_set_pc_im(s->base.pc_next - offset);
62
tcg_syn = tcg_const_i32(syndrome);
63
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
64
tcg_temp_free_i32(tcg_syn);
65
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
66
67
if (insn & (1U << 31)) {
68
/* BL Branch with link */
69
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
70
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
71
}
72
73
/* B Branch / BL Branch with link */
74
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
75
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
76
tcg_cmp, 0, label_match);
77
78
- gen_goto_tb(s, 0, s->pc);
79
+ gen_goto_tb(s, 0, s->base.pc_next);
80
gen_set_label(label_match);
81
gen_goto_tb(s, 1, addr);
82
}
83
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
84
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
85
tcg_cmp, 0, label_match);
86
tcg_temp_free_i64(tcg_cmp);
87
- gen_goto_tb(s, 0, s->pc);
88
+ gen_goto_tb(s, 0, s->base.pc_next);
89
gen_set_label(label_match);
90
gen_goto_tb(s, 1, addr);
91
}
92
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
93
/* genuinely conditional branches */
94
TCGLabel *label_match = gen_new_label();
95
arm_gen_test_cc(cond, label_match);
96
- gen_goto_tb(s, 0, s->pc);
97
+ gen_goto_tb(s, 0, s->base.pc_next);
98
gen_set_label(label_match);
99
gen_goto_tb(s, 1, addr);
100
} else {
101
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
102
* any pending interrupts immediately.
103
*/
104
reset_btype(s);
105
- gen_goto_tb(s, 0, s->pc);
106
+ gen_goto_tb(s, 0, s->base.pc_next);
107
return;
108
109
case 7: /* SB */
110
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
111
* MB and end the TB instead.
112
*/
113
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
114
- gen_goto_tb(s, 0, s->pc);
115
+ gen_goto_tb(s, 0, s->base.pc_next);
116
return;
117
118
default:
119
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
120
gen_a64_set_pc(s, dst);
121
/* BLR also needs to load return address */
122
if (opc == 1) {
123
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
124
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
125
}
126
break;
127
128
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
129
gen_a64_set_pc(s, dst);
130
/* BLRAA also needs to load return address */
131
if (opc == 9) {
132
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
133
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
134
}
135
break;
136
137
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
138
{
139
uint32_t insn;
140
141
- s->pc_curr = s->pc;
142
- insn = arm_ldl_code(env, s->pc, s->sctlr_b);
143
+ s->pc_curr = s->base.pc_next;
144
+ insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
145
s->insn = insn;
146
- s->pc += 4;
147
+ s->base.pc_next += 4;
148
149
s->fp_access_checked = false;
150
151
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
152
int bound, core_mmu_idx;
153
154
dc->isar = &arm_cpu->isar;
155
- dc->pc = dc->base.pc_first;
156
dc->condjmp = 0;
157
158
dc->aarch64 = 1;
159
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
160
{
161
DisasContext *dc = container_of(dcbase, DisasContext, base);
162
163
- tcg_gen_insn_start(dc->pc, 0, 0);
164
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
165
dc->insn_start = tcg_last_op();
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
169
DisasContext *dc = container_of(dcbase, DisasContext, base);
170
171
if (bp->flags & BP_CPU) {
172
- gen_a64_set_pc_im(dc->pc);
173
+ gen_a64_set_pc_im(dc->base.pc_next);
174
gen_helper_check_breakpoints(cpu_env);
175
/* End the TB early; it likely won't be executed */
176
dc->base.is_jmp = DISAS_TOO_MANY;
177
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
178
to for it to be properly cleared -- thus we
179
increment the PC here so that the logic setting
180
tb->size below does the right thing. */
181
- dc->pc += 4;
182
+ dc->base.pc_next += 4;
183
dc->base.is_jmp = DISAS_NORETURN;
184
}
185
186
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
187
disas_a64_insn(env, dc);
188
}
189
190
- dc->base.pc_next = dc->pc;
191
translator_loop_temp_check(&dc->base);
192
}
193
194
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
195
*/
196
switch (dc->base.is_jmp) {
197
default:
198
- gen_a64_set_pc_im(dc->pc);
199
+ gen_a64_set_pc_im(dc->base.pc_next);
200
/* fall through */
201
case DISAS_EXIT:
202
case DISAS_JUMP:
203
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
204
switch (dc->base.is_jmp) {
205
case DISAS_NEXT:
206
case DISAS_TOO_MANY:
207
- gen_goto_tb(dc, 1, dc->pc);
208
+ gen_goto_tb(dc, 1, dc->base.pc_next);
209
break;
210
default:
211
case DISAS_UPDATE:
212
- gen_a64_set_pc_im(dc->pc);
213
+ gen_a64_set_pc_im(dc->base.pc_next);
214
/* fall through */
215
case DISAS_EXIT:
216
tcg_gen_exit_tb(NULL, 0);
217
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
218
case DISAS_SWI:
219
break;
220
case DISAS_WFE:
221
- gen_a64_set_pc_im(dc->pc);
222
+ gen_a64_set_pc_im(dc->base.pc_next);
223
gen_helper_wfe(cpu_env);
224
break;
225
case DISAS_YIELD:
226
- gen_a64_set_pc_im(dc->pc);
227
+ gen_a64_set_pc_im(dc->base.pc_next);
228
gen_helper_yield(cpu_env);
229
break;
230
case DISAS_WFI:
231
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
232
*/
233
TCGv_i32 tmp = tcg_const_i32(4);
234
235
- gen_a64_set_pc_im(dc->pc);
236
+ gen_a64_set_pc_im(dc->base.pc_next);
237
gen_helper_wfi(cpu_env, tmp);
238
tcg_temp_free_i32(tmp);
239
/* The helper doesn't necessarily throw an exception, but we
240
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
241
}
242
}
243
}
244
-
245
- /* Functions above can change dc->pc, so re-align db->pc_next */
246
- dc->base.pc_next = dc->pc;
247
}
248
249
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
250
diff --git a/target/arm/translate.c b/target/arm/translate.c
41
index XXXXXXX..XXXXXXX 100644
251
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/virtio/vhost.c
252
--- a/target/arm/translate.c
43
+++ b/hw/virtio/vhost.c
253
+++ b/target/arm/translate.c
44
@@ -XXX,XX +XXX,XX @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
254
@@ -XXX,XX +XXX,XX @@ static inline void gen_blxns(DisasContext *s, int rm)
45
trace_vhost_iotlb_miss(dev, 1);
255
* We do however need to set the PC, because the blxns helper reads it.
46
256
* The blxns helper may throw an exception.
47
iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
257
*/
48
- iova, write);
258
- gen_set_pc_im(s, s->pc);
49
+ iova, write,
259
+ gen_set_pc_im(s, s->base.pc_next);
50
+ MEMTXATTRS_UNSPECIFIED);
260
gen_helper_v7m_blxns(cpu_env, var);
51
if (iotlb.target_as != NULL) {
261
tcg_temp_free_i32(var);
52
ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
262
s->base.is_jmp = DISAS_EXIT;
53
&uaddr, &len);
263
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
264
* for single stepping.)
265
*/
266
s->svc_imm = imm16;
267
- gen_set_pc_im(s, s->pc);
268
+ gen_set_pc_im(s, s->base.pc_next);
269
s->base.is_jmp = DISAS_HVC;
270
}
271
272
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
273
tmp = tcg_const_i32(syn_aa32_smc());
274
gen_helper_pre_smc(cpu_env, tmp);
275
tcg_temp_free_i32(tmp);
276
- gen_set_pc_im(s, s->pc);
277
+ gen_set_pc_im(s, s->base.pc_next);
278
s->base.is_jmp = DISAS_SMC;
279
}
280
281
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
282
{
283
gen_set_condexec(s);
284
- gen_set_pc_im(s, s->pc - offset);
285
+ gen_set_pc_im(s, s->base.pc_next - offset);
286
gen_exception_internal(excp);
287
s->base.is_jmp = DISAS_NORETURN;
288
}
289
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
290
int syn, uint32_t target_el)
291
{
292
gen_set_condexec(s);
293
- gen_set_pc_im(s, s->pc - offset);
294
+ gen_set_pc_im(s, s->base.pc_next - offset);
295
gen_exception(excp, syn, target_el);
296
s->base.is_jmp = DISAS_NORETURN;
297
}
298
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
299
TCGv_i32 tcg_syn;
300
301
gen_set_condexec(s);
302
- gen_set_pc_im(s, s->pc - offset);
303
+ gen_set_pc_im(s, s->base.pc_next - offset);
304
tcg_syn = tcg_const_i32(syn);
305
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
306
tcg_temp_free_i32(tcg_syn);
307
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
308
/* Force a TB lookup after an instruction that changes the CPU state. */
309
static inline void gen_lookup_tb(DisasContext *s)
310
{
311
- tcg_gen_movi_i32(cpu_R[15], s->pc);
312
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
313
s->base.is_jmp = DISAS_EXIT;
314
}
315
316
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
317
{
318
#ifndef CONFIG_USER_ONLY
319
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
320
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
321
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
322
#else
323
return true;
324
#endif
325
@@ -XXX,XX +XXX,XX @@ static void gen_nop_hint(DisasContext *s, int val)
326
*/
327
case 1: /* yield */
328
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
329
- gen_set_pc_im(s, s->pc);
330
+ gen_set_pc_im(s, s->base.pc_next);
331
s->base.is_jmp = DISAS_YIELD;
332
}
333
break;
334
case 3: /* wfi */
335
- gen_set_pc_im(s, s->pc);
336
+ gen_set_pc_im(s, s->base.pc_next);
337
s->base.is_jmp = DISAS_WFI;
338
break;
339
case 2: /* wfe */
340
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
341
- gen_set_pc_im(s, s->pc);
342
+ gen_set_pc_im(s, s->base.pc_next);
343
s->base.is_jmp = DISAS_WFE;
344
}
345
break;
346
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
347
if (isread) {
348
return 1;
349
}
350
- gen_set_pc_im(s, s->pc);
351
+ gen_set_pc_im(s, s->base.pc_next);
352
s->base.is_jmp = DISAS_WFI;
353
return 0;
354
default:
355
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
356
* self-modifying code correctly and also to take
357
* any pending interrupts immediately.
358
*/
359
- gen_goto_tb(s, 0, s->pc);
360
+ gen_goto_tb(s, 0, s->base.pc_next);
361
return;
362
case 7: /* sb */
363
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
364
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
365
* for TCG; MB and end the TB instead.
366
*/
367
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
368
- gen_goto_tb(s, 0, s->pc);
369
+ gen_goto_tb(s, 0, s->base.pc_next);
370
return;
371
default:
372
goto illegal_op;
373
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
374
int32_t offset;
375
376
tmp = tcg_temp_new_i32();
377
- tcg_gen_movi_i32(tmp, s->pc);
378
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
379
store_reg(s, 14, tmp);
380
/* Sign-extend the 24-bit offset */
381
offset = (((int32_t)insn) << 8) >> 8;
382
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
383
/* branch link/exchange thumb (blx) */
384
tmp = load_reg(s, rm);
385
tmp2 = tcg_temp_new_i32();
386
- tcg_gen_movi_i32(tmp2, s->pc);
387
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
388
store_reg(s, 14, tmp2);
389
gen_bx(s, tmp);
390
break;
391
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
392
/* branch (and link) */
393
if (insn & (1 << 24)) {
394
tmp = tcg_temp_new_i32();
395
- tcg_gen_movi_i32(tmp, s->pc);
396
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
397
store_reg(s, 14, tmp);
398
}
399
offset = sextract32(insn << 2, 0, 26);
400
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
401
break;
402
case 0xf:
403
/* swi */
404
- gen_set_pc_im(s, s->pc);
405
+ gen_set_pc_im(s, s->base.pc_next);
406
s->svc_imm = extract32(insn, 0, 24);
407
s->base.is_jmp = DISAS_SWI;
408
break;
409
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
410
411
if (insn & (1 << 14)) {
412
/* Branch and link. */
413
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
414
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
415
}
416
417
offset += read_pc(s);
418
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
419
* and also to take any pending interrupts
420
* immediately.
421
*/
422
- gen_goto_tb(s, 0, s->pc);
423
+ gen_goto_tb(s, 0, s->base.pc_next);
424
break;
425
case 7: /* sb */
426
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
427
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
428
* for TCG; MB and end the TB instead.
429
*/
430
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
431
- gen_goto_tb(s, 0, s->pc);
432
+ gen_goto_tb(s, 0, s->base.pc_next);
433
break;
434
default:
435
goto illegal_op;
436
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
437
/* BLX/BX */
438
tmp = load_reg(s, rm);
439
if (link) {
440
- val = (uint32_t)s->pc | 1;
441
+ val = (uint32_t)s->base.pc_next | 1;
442
tmp2 = tcg_temp_new_i32();
443
tcg_gen_movi_i32(tmp2, val);
444
store_reg(s, 14, tmp2);
445
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
446
447
if (cond == 0xf) {
448
/* swi */
449
- gen_set_pc_im(s, s->pc);
450
+ gen_set_pc_im(s, s->base.pc_next);
451
s->svc_imm = extract32(insn, 0, 8);
452
s->base.is_jmp = DISAS_SWI;
453
break;
454
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
455
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
456
457
tmp2 = tcg_temp_new_i32();
458
- tcg_gen_movi_i32(tmp2, s->pc | 1);
459
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
460
store_reg(s, 14, tmp2);
461
gen_bx(s, tmp);
462
break;
463
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
464
tcg_gen_addi_i32(tmp, tmp, offset);
465
466
tmp2 = tcg_temp_new_i32();
467
- tcg_gen_movi_i32(tmp2, s->pc | 1);
468
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
469
store_reg(s, 14, tmp2);
470
gen_bx(s, tmp);
471
} else {
472
@@ -XXX,XX +XXX,XX @@ undef:
473
474
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
475
{
476
- /* Return true if the insn at dc->pc might cross a page boundary.
477
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
478
* (False positives are OK, false negatives are not.)
479
* We know this is a Thumb insn, and our caller ensures we are
480
- * only called if dc->pc is less than 4 bytes from the page
481
+ * only called if dc->base.pc_next is less than 4 bytes from the page
482
* boundary, so we cross the page if the first 16 bits indicate
483
* that this is a 32 bit insn.
484
*/
485
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
486
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
487
488
- return !thumb_insn_is_16bit(s, s->pc, insn);
489
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
490
}
491
492
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
493
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
494
uint32_t condexec, core_mmu_idx;
495
496
dc->isar = &cpu->isar;
497
- dc->pc = dc->base.pc_first;
498
dc->condjmp = 0;
499
500
dc->aarch64 = 0;
501
@@ -XXX,XX +XXX,XX @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
502
{
503
DisasContext *dc = container_of(dcbase, DisasContext, base);
504
505
- tcg_gen_insn_start(dc->pc,
506
+ tcg_gen_insn_start(dc->base.pc_next,
507
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
508
0);
509
dc->insn_start = tcg_last_op();
510
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
511
512
if (bp->flags & BP_CPU) {
513
gen_set_condexec(dc);
514
- gen_set_pc_im(dc, dc->pc);
515
+ gen_set_pc_im(dc, dc->base.pc_next);
516
gen_helper_check_breakpoints(cpu_env);
517
/* End the TB early; it's likely not going to be executed */
518
dc->base.is_jmp = DISAS_TOO_MANY;
519
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
520
tb->size below does the right thing. */
521
/* TODO: Advance PC by correct instruction length to
522
* avoid disassembler error messages */
523
- dc->pc += 2;
524
+ dc->base.pc_next += 2;
525
dc->base.is_jmp = DISAS_NORETURN;
526
}
527
528
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
529
{
530
#ifdef CONFIG_USER_ONLY
531
/* Intercept jump to the magic kernel page. */
532
- if (dc->pc >= 0xffff0000) {
533
+ if (dc->base.pc_next >= 0xffff0000) {
534
/* We always get here via a jump, so know we are not in a
535
conditional execution block. */
536
gen_exception_internal(EXCP_KERNEL_TRAP);
537
@@ -XXX,XX +XXX,XX @@ static void arm_post_translate_insn(DisasContext *dc)
538
gen_set_label(dc->condlabel);
539
dc->condjmp = 0;
540
}
541
- dc->base.pc_next = dc->pc;
542
translator_loop_temp_check(&dc->base);
543
}
544
545
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
546
return;
547
}
548
549
- dc->pc_curr = dc->pc;
550
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
551
+ dc->pc_curr = dc->base.pc_next;
552
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
553
dc->insn = insn;
554
- dc->pc += 4;
555
+ dc->base.pc_next += 4;
556
disas_arm_insn(dc, insn);
557
558
arm_post_translate_insn(dc);
559
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
560
return;
561
}
562
563
- dc->pc_curr = dc->pc;
564
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
565
- is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
566
- dc->pc += 2;
567
+ dc->pc_curr = dc->base.pc_next;
568
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
569
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
570
+ dc->base.pc_next += 2;
571
if (!is_16bit) {
572
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
573
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
574
575
insn = insn << 16 | insn2;
576
- dc->pc += 2;
577
+ dc->base.pc_next += 2;
578
}
579
dc->insn = insn;
580
581
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
582
* but isn't very efficient).
583
*/
584
if (dc->base.is_jmp == DISAS_NEXT
585
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
586
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
587
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
588
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
589
&& insn_crosses_page(env, dc)))) {
590
dc->base.is_jmp = DISAS_TOO_MANY;
591
}
592
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
593
case DISAS_NEXT:
594
case DISAS_TOO_MANY:
595
case DISAS_UPDATE:
596
- gen_set_pc_im(dc, dc->pc);
597
+ gen_set_pc_im(dc, dc->base.pc_next);
598
/* fall through */
599
default:
600
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
601
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
602
switch(dc->base.is_jmp) {
603
case DISAS_NEXT:
604
case DISAS_TOO_MANY:
605
- gen_goto_tb(dc, 1, dc->pc);
606
+ gen_goto_tb(dc, 1, dc->base.pc_next);
607
break;
608
case DISAS_JUMP:
609
gen_goto_ptr();
610
break;
611
case DISAS_UPDATE:
612
- gen_set_pc_im(dc, dc->pc);
613
+ gen_set_pc_im(dc, dc->base.pc_next);
614
/* fall through */
615
default:
616
/* indicate that the hash table must be used to find the next TB */
617
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
618
gen_set_label(dc->condlabel);
619
gen_set_condexec(dc);
620
if (unlikely(is_singlestepping(dc))) {
621
- gen_set_pc_im(dc, dc->pc);
622
+ gen_set_pc_im(dc, dc->base.pc_next);
623
gen_singlestep_exception(dc);
624
} else {
625
- gen_goto_tb(dc, 1, dc->pc);
626
+ gen_goto_tb(dc, 1, dc->base.pc_next);
627
}
628
}
629
-
630
- /* Functions above can change dc->pc, so re-align db->pc_next */
631
- dc->base.pc_next = dc->pc;
632
}
633
634
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
54
--
635
--
55
2.17.1
636
2.20.1
56
637
57
638
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
The offset is variable depending on the instruction set, whereas
4
we have stored values for the current pc and the next pc. Passing
5
in the actual value is clearer in intent.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-8-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/translate-a64.c | 25 ++++++++++++++-----------
14
target/arm/translate-vfp.inc.c | 6 +++---
15
target/arm/translate.c | 31 ++++++++++++++++---------------
16
3 files changed, 33 insertions(+), 29 deletions(-)
17
18
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-a64.c
21
+++ b/target/arm/translate-a64.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
23
s->base.is_jmp = DISAS_NORETURN;
24
}
25
26
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
27
+static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
28
uint32_t syndrome, uint32_t target_el)
29
{
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
31
+ gen_a64_set_pc_im(pc);
32
gen_exception(excp, syndrome, target_el);
33
s->base.is_jmp = DISAS_NORETURN;
34
}
35
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
36
void unallocated_encoding(DisasContext *s)
37
{
38
/* Unallocated and reserved encodings are uncategorized */
39
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
40
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
41
default_exception_el(s));
42
}
43
44
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
45
return true;
46
}
47
48
- gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
49
- s->fp_excp_el);
50
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
51
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
52
return false;
53
}
54
55
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
56
bool sve_access_check(DisasContext *s)
57
{
58
if (s->sve_excp_el) {
59
- gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
60
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
61
s->sve_excp_el);
62
return false;
63
}
64
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
65
switch (op2_ll) {
66
case 1: /* SVC */
67
gen_ss_advance(s);
68
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
69
- default_exception_el(s));
70
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
71
+ syn_aa64_svc(imm16), default_exception_el(s));
72
break;
73
case 2: /* HVC */
74
if (s->current_el == 0) {
75
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
76
gen_a64_set_pc_im(s->pc_curr);
77
gen_helper_pre_hvc(cpu_env);
78
gen_ss_advance(s);
79
- gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
80
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
81
+ syn_aa64_hvc(imm16), 2);
82
break;
83
case 3: /* SMC */
84
if (s->current_el == 0) {
85
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
86
gen_helper_pre_smc(cpu_env, tmp);
87
tcg_temp_free_i32(tmp);
88
gen_ss_advance(s);
89
- gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
90
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
91
+ syn_aa64_smc(imm16), 3);
92
break;
93
default:
94
unallocated_encoding(s);
95
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
96
if (s->btype != 0
97
&& s->guarded_page
98
&& !btype_destination_ok(insn, s->bt, s->btype)) {
99
- gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
100
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
101
+ syn_btitrap(s->btype),
102
default_exception_el(s));
103
return;
104
}
105
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate-vfp.inc.c
108
+++ b/target/arm/translate-vfp.inc.c
109
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
110
{
111
if (s->fp_excp_el) {
112
if (arm_dc_feature(s, ARM_FEATURE_M)) {
113
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
114
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
115
s->fp_excp_el);
116
} else {
117
- gen_exception_insn(s, 4, EXCP_UDEF,
118
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
119
syn_fp_access_trap(1, 0xe, false),
120
s->fp_excp_el);
121
}
122
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
123
124
if (!s->vfp_enabled && !ignore_vfp_enabled) {
125
assert(!arm_dc_feature(s, ARM_FEATURE_M));
126
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
127
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
128
default_exception_el(s));
129
return false;
130
}
131
diff --git a/target/arm/translate.c b/target/arm/translate.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/target/arm/translate.c
134
+++ b/target/arm/translate.c
135
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
136
s->base.is_jmp = DISAS_NORETURN;
137
}
138
139
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
140
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
141
int syn, uint32_t target_el)
142
{
143
gen_set_condexec(s);
144
- gen_set_pc_im(s, s->base.pc_next - offset);
145
+ gen_set_pc_im(s, pc);
146
gen_exception(excp, syn, target_el);
147
s->base.is_jmp = DISAS_NORETURN;
148
}
149
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
150
return;
151
}
152
153
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
154
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
155
default_exception_el(s));
156
}
157
158
@@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
159
160
undef:
161
/* If we get here then some access check did not pass */
162
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
163
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
+ syn_uncategorized(), exc_target);
165
return false;
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
169
* for attempts to execute invalid vfp/neon encodings with FP disabled.
170
*/
171
if (s->fp_excp_el) {
172
- gen_exception_insn(s, 4, EXCP_UDEF,
173
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
174
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
175
return 0;
176
}
177
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
178
* for attempts to execute invalid vfp/neon encodings with FP disabled.
179
*/
180
if (s->fp_excp_el) {
181
- gen_exception_insn(s, 4, EXCP_UDEF,
182
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
183
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
184
return 0;
185
}
186
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
187
}
188
189
if (s->fp_excp_el) {
190
- gen_exception_insn(s, 4, EXCP_UDEF,
191
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
192
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
193
return 0;
194
}
195
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
196
off_rm = vfp_reg_offset(0, rm);
197
}
198
if (s->fp_excp_el) {
199
- gen_exception_insn(s, 4, EXCP_UDEF,
200
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
201
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
202
return 0;
203
}
204
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
205
* For the UNPREDICTABLE cases we choose to UNDEF.
206
*/
207
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
208
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
209
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
210
return;
211
}
212
213
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
214
}
215
216
if (undef) {
217
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
218
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
219
default_exception_el(s));
220
return;
221
}
222
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
223
* UsageFault exception.
224
*/
225
if (arm_dc_feature(s, ARM_FEATURE_M)) {
226
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
227
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
228
default_exception_el(s));
229
return;
230
}
231
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
232
break;
233
default:
234
illegal_op:
235
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
236
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
237
default_exception_el(s));
238
break;
239
}
240
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
241
}
242
243
/* All other insns: NOCP */
244
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
245
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
246
default_exception_el(s));
247
break;
248
}
249
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
250
}
251
return;
252
illegal_op:
253
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
254
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
255
default_exception_el(s));
256
}
257
258
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
259
return;
260
illegal_op:
261
undef:
262
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
263
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
264
default_exception_el(s));
265
}
266
267
--
268
2.20.1
269
270
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to address_space_map().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
The offset is variable depending on the instruction set.
4
Passing in the actual value is clearer in intent.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-9-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-5-peter.maydell@linaro.org
10
---
11
---
11
include/exec/memory.h | 3 ++-
12
target/arm/translate-a64.c | 8 ++++----
12
include/sysemu/dma.h | 3 ++-
13
target/arm/translate.c | 8 ++++----
13
exec.c | 6 ++++--
14
2 files changed, 8 insertions(+), 8 deletions(-)
14
target/ppc/mmu-hash64.c | 3 ++-
15
4 files changed, 10 insertions(+), 5 deletions(-)
16
15
17
diff --git a/include/exec/memory.h b/include/exec/memory.h
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/memory.h
18
--- a/target/arm/translate-a64.c
20
+++ b/include/exec/memory.h
19
+++ b/target/arm/translate-a64.c
21
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
22
* @addr: address within that address space
21
tcg_temp_free_i32(tcg_excp);
23
* @plen: pointer to length of buffer; updated on return
22
}
24
* @is_write: indicates the transfer direction
23
25
+ * @attrs: memory attributes
24
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
26
*/
25
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
27
void *address_space_map(AddressSpace *as, hwaddr addr,
26
{
28
- hwaddr *plen, bool is_write);
27
- gen_a64_set_pc_im(s->base.pc_next - offset);
29
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
28
+ gen_a64_set_pc_im(pc);
30
29
gen_exception_internal(excp);
31
/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
30
s->base.is_jmp = DISAS_NORETURN;
32
*
31
}
33
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
32
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
33
break;
34
}
35
#endif
36
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
37
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
38
} else {
39
unsupported_encoding(s, insn);
40
}
41
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
42
/* End the TB early; it likely won't be executed */
43
dc->base.is_jmp = DISAS_TOO_MANY;
44
} else {
45
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
46
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
47
/* The address covered by the breakpoint must be
48
included in [tb->pc, tb->pc + tb->size) in order
49
to for it to be properly cleared -- thus we
50
diff --git a/target/arm/translate.c b/target/arm/translate.c
34
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
35
--- a/include/sysemu/dma.h
52
--- a/target/arm/translate.c
36
+++ b/include/sysemu/dma.h
53
+++ b/target/arm/translate.c
37
@@ -XXX,XX +XXX,XX @@ static inline void *dma_memory_map(AddressSpace *as,
54
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
38
hwaddr xlen = *len;
55
s->base.is_jmp = DISAS_SMC;
39
void *p;
40
41
- p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
42
+ p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
43
+ MEMTXATTRS_UNSPECIFIED);
44
*len = xlen;
45
return p;
46
}
56
}
47
diff --git a/exec.c b/exec.c
57
48
index XXXXXXX..XXXXXXX 100644
58
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
49
--- a/exec.c
59
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
50
+++ b/exec.c
51
@@ -XXX,XX +XXX,XX @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
52
void *address_space_map(AddressSpace *as,
53
hwaddr addr,
54
hwaddr *plen,
55
- bool is_write)
56
+ bool is_write,
57
+ MemTxAttrs attrs)
58
{
60
{
59
hwaddr len = *plen;
61
gen_set_condexec(s);
60
hwaddr l, xlat;
62
- gen_set_pc_im(s, s->base.pc_next - offset);
61
@@ -XXX,XX +XXX,XX @@ void *cpu_physical_memory_map(hwaddr addr,
63
+ gen_set_pc_im(s, pc);
62
hwaddr *plen,
64
gen_exception_internal(excp);
63
int is_write)
65
s->base.is_jmp = DISAS_NORETURN;
64
{
65
- return address_space_map(&address_space_memory, addr, plen, is_write);
66
+ return address_space_map(&address_space_memory, addr, plen, is_write,
67
+ MEMTXATTRS_UNSPECIFIED);
68
}
66
}
69
67
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
70
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
68
s->current_el != 0 &&
71
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
69
#endif
72
index XXXXXXX..XXXXXXX 100644
70
(imm == (s->thumb ? 0x3c : 0xf000))) {
73
--- a/target/ppc/mmu-hash64.c
71
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
74
+++ b/target/ppc/mmu-hash64.c
72
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
75
@@ -XXX,XX +XXX,XX @@ const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
73
return;
76
return NULL;
77
}
74
}
78
75
79
- hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false);
76
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
80
+ hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
77
/* End the TB early; it's likely not going to be executed */
81
+ MEMTXATTRS_UNSPECIFIED);
78
dc->base.is_jmp = DISAS_TOO_MANY;
82
if (plen < (n * HASH_PTE_SIZE_64)) {
79
} else {
83
hw_error("%s: Unable to map all requested HPTEs\n", __func__);
80
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
84
}
81
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
82
/* The address covered by the breakpoint must be
83
included in [tb->pc, tb->pc + tb->size) in order
84
to for it to be properly cleared -- thus we
85
--
85
--
86
2.17.1
86
2.20.1
87
87
88
88
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to flatview_access_valid().
3
Its callers now all have an attrs value to hand, so we can
4
correct our earlier temporary use of MEMTXATTRS_UNSPECIFIED.
5
2
3
Unlike the other more generic gen_exception{,_internal}_insn
4
interfaces, breakpoints always refer to the current instruction.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-10-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-10-peter.maydell@linaro.org
10
---
11
---
11
exec.c | 12 +++++-------
12
target/arm/translate-a64.c | 7 +++----
12
1 file changed, 5 insertions(+), 7 deletions(-)
13
target/arm/translate.c | 8 ++++----
14
2 files changed, 7 insertions(+), 8 deletions(-)
13
15
14
diff --git a/exec.c b/exec.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/exec.c
18
--- a/target/arm/translate-a64.c
17
+++ b/exec.c
19
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
19
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
21
s->base.is_jmp = DISAS_NORETURN;
20
const uint8_t *buf, int len);
21
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
22
- bool is_write);
23
+ bool is_write, MemTxAttrs attrs);
24
25
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
26
unsigned len, MemTxAttrs attrs)
27
@@ -XXX,XX +XXX,XX @@ static bool subpage_accepts(void *opaque, hwaddr addr,
28
#endif
29
30
return flatview_access_valid(subpage->fv, addr + subpage->base,
31
- len, is_write);
32
+ len, is_write, attrs);
33
}
22
}
34
23
35
static const MemoryRegionOps subpage_ops = {
24
-static void gen_exception_bkpt_insn(DisasContext *s, int offset,
36
@@ -XXX,XX +XXX,XX @@ static void cpu_notify_map_clients(void)
25
- uint32_t syndrome)
26
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
27
{
28
TCGv_i32 tcg_syn;
29
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
31
+ gen_a64_set_pc_im(s->pc_curr);
32
tcg_syn = tcg_const_i32(syndrome);
33
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
34
tcg_temp_free_i32(tcg_syn);
35
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
36
break;
37
}
38
/* BRK */
39
- gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
40
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
41
break;
42
case 2:
43
if (op2_ll != 0) {
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/translate.c
47
+++ b/target/arm/translate.c
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
49
s->base.is_jmp = DISAS_NORETURN;
37
}
50
}
38
51
39
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
52
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
40
- bool is_write)
53
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
41
+ bool is_write, MemTxAttrs attrs)
42
{
54
{
43
MemoryRegion *mr;
55
TCGv_i32 tcg_syn;
44
hwaddr l, xlat;
56
45
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
57
gen_set_condexec(s);
46
mr = flatview_translate(fv, addr, &xlat, &l, is_write);
58
- gen_set_pc_im(s, s->base.pc_next - offset);
47
if (!memory_access_is_direct(mr, is_write)) {
59
+ gen_set_pc_im(s, s->pc_curr);
48
l = memory_access_size(mr, l, addr);
60
tcg_syn = tcg_const_i32(syn);
49
- /* When our callers all have attrs we'll pass them through here */
61
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
50
- if (!memory_region_access_valid(mr, xlat, l, is_write,
62
tcg_temp_free_i32(tcg_syn);
51
- MEMTXATTRS_UNSPECIFIED)) {
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
52
+ if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
64
case 1:
53
return false;
65
/* bkpt */
54
}
66
ARCH(5);
67
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
68
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
69
break;
70
case 2:
71
/* Hypervisor call (v7) */
72
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
73
{
74
int imm8 = extract32(insn, 0, 8);
75
ARCH(5);
76
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
77
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
78
break;
55
}
79
}
56
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
80
57
58
rcu_read_lock();
59
fv = address_space_to_flatview(as);
60
- result = flatview_access_valid(fv, addr, len, is_write);
61
+ result = flatview_access_valid(fv, addr, len, is_write, attrs);
62
rcu_read_unlock();
63
return result;
64
}
65
--
81
--
66
2.17.1
82
2.20.1
67
83
68
84
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Depending on the host abi, float16, aka uint16_t, values are
3
Promote this function from aarch64 to fully general use.
4
passed and returned either zero-extended in the host register
4
Use it to unify the code sequences for generating illegal
5
or with garbage at the top of the host register.
5
opcode exceptions.
6
6
7
The tcg code generator has so far been assuming garbage, as that
8
matches the x86 abi, but this is incorrect for other host abis.
9
Further, target/arm has so far been assuming zero-extended results,
10
so that it may store the 16-bit value into a 32-bit slot with the
11
high 16-bits already clear.
12
13
Rectify both problems by mapping "f16" in the helper definition
14
to uint32_t instead of (a typedef for) uint16_t. This forces
15
the host compiler to assume garbage in the upper 16 bits on input
16
and to zero-extend the result on output.
17
18
Cc: qemu-stable@nongnu.org
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
21
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
22
Message-id: 20180522175629.24932-1-richard.henderson@linaro.org
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-11-richard.henderson@linaro.org
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
12
---
26
include/exec/helper-head.h | 2 +-
13
target/arm/translate-a64.h | 2 --
27
target/arm/helper-a64.c | 35 +++++++++--------
14
target/arm/translate.h | 2 ++
28
target/arm/helper.c | 80 +++++++++++++++++++-------------------
15
target/arm/translate-a64.c | 7 -------
29
3 files changed, 59 insertions(+), 58 deletions(-)
16
target/arm/translate-vfp.inc.c | 3 +--
17
target/arm/translate.c | 22 ++++++++++++----------
18
5 files changed, 15 insertions(+), 21 deletions(-)
30
19
31
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
32
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
33
--- a/include/exec/helper-head.h
22
--- a/target/arm/translate-a64.h
34
+++ b/include/exec/helper-head.h
23
+++ b/target/arm/translate-a64.h
35
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@
36
#define dh_ctype_int int
25
#ifndef TARGET_ARM_TRANSLATE_A64_H
37
#define dh_ctype_i64 uint64_t
26
#define TARGET_ARM_TRANSLATE_A64_H
38
#define dh_ctype_s64 int64_t
27
39
-#define dh_ctype_f16 float16
28
-void unallocated_encoding(DisasContext *s);
40
+#define dh_ctype_f16 uint32_t
29
-
41
#define dh_ctype_f32 float32
30
#define unsupported_encoding(s, insn) \
42
#define dh_ctype_f64 float64
31
do { \
43
#define dh_ctype_ptr void *
32
qemu_log_mask(LOG_UNIMP, \
44
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
33
diff --git a/target/arm/translate.h b/target/arm/translate.h
45
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/helper-a64.c
35
--- a/target/arm/translate.h
47
+++ b/target/arm/helper-a64.c
36
+++ b/target/arm/translate.h
48
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float_rel_to_flags(int res)
37
@@ -XXX,XX +XXX,XX @@ typedef struct DisasCompare {
49
return flags;
38
bool value_global;
50
}
39
} DisasCompare;
51
40
52
-uint64_t HELPER(vfp_cmph_a64)(float16 x, float16 y, void *fp_status)
41
+void unallocated_encoding(DisasContext *s);
53
+uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
42
+
54
{
43
/* Share the TCG temporaries common between 32 and 64 bit modes. */
55
return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
44
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
56
}
45
extern TCGv_i64 cpu_exclusive_addr;
57
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
-uint64_t HELPER(vfp_cmpeh_a64)(float16 x, float16 y, void *fp_status)
59
+uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
60
{
61
return float_rel_to_flags(float16_compare(x, y, fp_status));
62
}
63
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
64
#define float64_three make_float64(0x4008000000000000ULL)
65
#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
66
67
-float16 HELPER(recpsf_f16)(float16 a, float16 b, void *fpstp)
68
+uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
69
{
70
float_status *fpst = fpstp;
71
72
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
73
return float64_muladd(a, b, float64_two, 0, fpst);
74
}
75
76
-float16 HELPER(rsqrtsf_f16)(float16 a, float16 b, void *fpstp)
77
+uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
78
{
79
float_status *fpst = fpstp;
80
81
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_addlp_u16)(uint64_t a)
82
}
83
84
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
85
-float16 HELPER(frecpx_f16)(float16 a, void *fpstp)
86
+uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
87
{
88
float_status *fpst = fpstp;
89
uint16_t val16, sbit;
90
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
91
#define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
92
93
#define ADVSIMD_HALFOP(name) \
94
-float16 ADVSIMD_HELPER(name, h)(float16 a, float16 b, void *fpstp) \
95
+uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
96
{ \
97
float_status *fpst = fpstp; \
98
return float16_ ## name(a, b, fpst); \
99
@@ -XXX,XX +XXX,XX @@ ADVSIMD_HALFOP(mulx)
100
ADVSIMD_TWOHALFOP(mulx)
101
102
/* fused multiply-accumulate */
103
-float16 HELPER(advsimd_muladdh)(float16 a, float16 b, float16 c, void *fpstp)
104
+uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
105
+ void *fpstp)
106
{
107
float_status *fpst = fpstp;
108
return float16_muladd(a, b, c, 0, fpst);
109
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
110
111
#define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
112
113
-uint32_t HELPER(advsimd_ceq_f16)(float16 a, float16 b, void *fpstp)
114
+uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
115
{
116
float_status *fpst = fpstp;
117
int compare = float16_compare_quiet(a, b, fpst);
118
return ADVSIMD_CMPRES(compare == float_relation_equal);
119
}
120
121
-uint32_t HELPER(advsimd_cge_f16)(float16 a, float16 b, void *fpstp)
122
+uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
123
{
124
float_status *fpst = fpstp;
125
int compare = float16_compare(a, b, fpst);
126
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_cge_f16)(float16 a, float16 b, void *fpstp)
127
compare == float_relation_equal);
128
}
129
130
-uint32_t HELPER(advsimd_cgt_f16)(float16 a, float16 b, void *fpstp)
131
+uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
132
{
133
float_status *fpst = fpstp;
134
int compare = float16_compare(a, b, fpst);
135
return ADVSIMD_CMPRES(compare == float_relation_greater);
136
}
137
138
-uint32_t HELPER(advsimd_acge_f16)(float16 a, float16 b, void *fpstp)
139
+uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
140
{
141
float_status *fpst = fpstp;
142
float16 f0 = float16_abs(a);
143
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_acge_f16)(float16 a, float16 b, void *fpstp)
144
compare == float_relation_equal);
145
}
146
147
-uint32_t HELPER(advsimd_acgt_f16)(float16 a, float16 b, void *fpstp)
148
+uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
149
{
150
float_status *fpst = fpstp;
151
float16 f0 = float16_abs(a);
152
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_acgt_f16)(float16 a, float16 b, void *fpstp)
153
}
154
155
/* round to integral */
156
-float16 HELPER(advsimd_rinth_exact)(float16 x, void *fp_status)
157
+uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
158
{
159
return float16_round_to_int(x, fp_status);
160
}
161
162
-float16 HELPER(advsimd_rinth)(float16 x, void *fp_status)
163
+uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
164
{
165
int old_flags = get_float_exception_flags(fp_status), new_flags;
166
float16 ret;
167
@@ -XXX,XX +XXX,XX @@ float16 HELPER(advsimd_rinth)(float16 x, void *fp_status)
168
* setting the mode appropriately before calling the helper.
169
*/
170
171
-uint32_t HELPER(advsimd_f16tosinth)(float16 a, void *fpstp)
172
+uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
173
{
174
float_status *fpst = fpstp;
175
176
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_f16tosinth)(float16 a, void *fpstp)
177
return float16_to_int16(a, fpst);
178
}
179
180
-uint32_t HELPER(advsimd_f16touinth)(float16 a, void *fpstp)
181
+uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
182
{
183
float_status *fpst = fpstp;
184
185
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_f16touinth)(float16 a, void *fpstp)
186
* Square Root and Reciprocal square root
187
*/
188
189
-float16 HELPER(sqrt_f16)(float16 a, void *fpstp)
190
+uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
191
{
192
float_status *s = fpstp;
193
194
diff --git a/target/arm/helper.c b/target/arm/helper.c
195
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
196
--- a/target/arm/helper.c
48
--- a/target/arm/translate-a64.c
197
+++ b/target/arm/helper.c
49
+++ b/target/arm/translate-a64.c
198
@@ -XXX,XX +XXX,XX @@ DO_VFP_cmp(d, float64)
50
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
199
200
/* Integer to float and float to integer conversions */
201
202
-#define CONV_ITOF(name, fsz, sign) \
203
- float##fsz HELPER(name)(uint32_t x, void *fpstp) \
204
-{ \
205
- float_status *fpst = fpstp; \
206
- return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
207
+#define CONV_ITOF(name, ftype, fsz, sign) \
208
+ftype HELPER(name)(uint32_t x, void *fpstp) \
209
+{ \
210
+ float_status *fpst = fpstp; \
211
+ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
212
}
213
214
-#define CONV_FTOI(name, fsz, sign, round) \
215
-uint32_t HELPER(name)(float##fsz x, void *fpstp) \
216
-{ \
217
- float_status *fpst = fpstp; \
218
- if (float##fsz##_is_any_nan(x)) { \
219
- float_raise(float_flag_invalid, fpst); \
220
- return 0; \
221
- } \
222
- return float##fsz##_to_##sign##int32##round(x, fpst); \
223
+#define CONV_FTOI(name, ftype, fsz, sign, round) \
224
+uint32_t HELPER(name)(ftype x, void *fpstp) \
225
+{ \
226
+ float_status *fpst = fpstp; \
227
+ if (float##fsz##_is_any_nan(x)) { \
228
+ float_raise(float_flag_invalid, fpst); \
229
+ return 0; \
230
+ } \
231
+ return float##fsz##_to_##sign##int32##round(x, fpst); \
232
}
233
234
-#define FLOAT_CONVS(name, p, fsz, sign) \
235
-CONV_ITOF(vfp_##name##to##p, fsz, sign) \
236
-CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
237
-CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
238
+#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
239
+ CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
240
+ CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
241
+ CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
242
243
-FLOAT_CONVS(si, h, 16, )
244
-FLOAT_CONVS(si, s, 32, )
245
-FLOAT_CONVS(si, d, 64, )
246
-FLOAT_CONVS(ui, h, 16, u)
247
-FLOAT_CONVS(ui, s, 32, u)
248
-FLOAT_CONVS(ui, d, 64, u)
249
+FLOAT_CONVS(si, h, uint32_t, 16, )
250
+FLOAT_CONVS(si, s, float32, 32, )
251
+FLOAT_CONVS(si, d, float64, 64, )
252
+FLOAT_CONVS(ui, h, uint32_t, 16, u)
253
+FLOAT_CONVS(ui, s, float32, 32, u)
254
+FLOAT_CONVS(ui, d, float64, 64, u)
255
256
#undef CONV_ITOF
257
#undef CONV_FTOI
258
@@ -XXX,XX +XXX,XX @@ static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
259
return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst);
260
}
261
262
-float16 HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
263
+uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
264
{
265
return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst);
266
}
267
268
-float16 HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
269
+uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
270
{
271
return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
272
}
273
274
-float16 HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
275
+uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
276
{
277
return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst);
278
}
279
280
-float16 HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
281
+uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
282
{
283
return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst);
284
}
285
@@ -XXX,XX +XXX,XX @@ static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
286
}
51
}
287
}
52
}
288
53
289
-uint32_t HELPER(vfp_toshh)(float16 x, uint32_t shift, void *fpst)
54
-void unallocated_encoding(DisasContext *s)
290
+uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
55
-{
56
- /* Unallocated and reserved encodings are uncategorized */
57
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
58
- default_exception_el(s));
59
-}
60
-
61
static void init_tmp_a64_array(DisasContext *s)
291
{
62
{
292
return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst);
63
#ifdef CONFIG_DEBUG_TCG
64
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-vfp.inc.c
67
+++ b/target/arm/translate-vfp.inc.c
68
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
69
70
if (!s->vfp_enabled && !ignore_vfp_enabled) {
71
assert(!arm_dc_feature(s, ARM_FEATURE_M));
72
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
73
- default_exception_el(s));
74
+ unallocated_encoding(s);
75
return false;
76
}
77
78
diff --git a/target/arm/translate.c b/target/arm/translate.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate.c
81
+++ b/target/arm/translate.c
82
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
83
s->base.is_jmp = DISAS_NORETURN;
293
}
84
}
294
85
295
-uint32_t HELPER(vfp_touhh)(float16 x, uint32_t shift, void *fpst)
86
+void unallocated_encoding(DisasContext *s)
296
+uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
87
+{
88
+ /* Unallocated and reserved encodings are uncategorized */
89
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
90
+ default_exception_el(s));
91
+}
92
+
93
/* Force a TB lookup after an instruction that changes the CPU state. */
94
static inline void gen_lookup_tb(DisasContext *s)
297
{
95
{
298
return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
96
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
97
return;
98
}
99
100
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
101
- default_exception_el(s));
102
+ unallocated_encoding(s);
299
}
103
}
300
104
301
-uint32_t HELPER(vfp_toslh)(float16 x, uint32_t shift, void *fpst)
105
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
302
+uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
106
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
303
{
107
}
304
return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst);
108
109
if (undef) {
110
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
111
- default_exception_el(s));
112
+ unallocated_encoding(s);
113
return;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
117
break;
118
default:
119
illegal_op:
120
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
121
- default_exception_el(s));
122
+ unallocated_encoding(s);
123
break;
124
}
125
}
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
127
}
128
return;
129
illegal_op:
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
131
- default_exception_el(s));
132
+ unallocated_encoding(s);
305
}
133
}
306
134
307
-uint32_t HELPER(vfp_toulh)(float16 x, uint32_t shift, void *fpst)
135
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
308
+uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
136
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
309
{
137
return;
310
return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst);
138
illegal_op:
139
undef:
140
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
141
- default_exception_el(s));
142
+ unallocated_encoding(s);
311
}
143
}
312
144
313
-uint64_t HELPER(vfp_tosqh)(float16 x, uint32_t shift, void *fpst)
145
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
314
+uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
315
{
316
return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst);
317
}
318
319
-uint64_t HELPER(vfp_touqh)(float16 x, uint32_t shift, void *fpst)
320
+uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
321
{
322
return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst);
323
}
324
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
325
}
326
327
/* Half precision conversions. */
328
-float32 HELPER(vfp_fcvt_f16_to_f32)(float16 a, void *fpstp, uint32_t ahp_mode)
329
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
330
{
331
/* Squash FZ16 to 0 for the duration of conversion. In this case,
332
* it would affect flushing input denormals.
333
@@ -XXX,XX +XXX,XX @@ float32 HELPER(vfp_fcvt_f16_to_f32)(float16 a, void *fpstp, uint32_t ahp_mode)
334
return r;
335
}
336
337
-float16 HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
338
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
339
{
340
/* Squash FZ16 to 0 for the duration of conversion. In this case,
341
* it would affect flushing output denormals.
342
@@ -XXX,XX +XXX,XX @@ float16 HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
343
return r;
344
}
345
346
-float64 HELPER(vfp_fcvt_f16_to_f64)(float16 a, void *fpstp, uint32_t ahp_mode)
347
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
348
{
349
/* Squash FZ16 to 0 for the duration of conversion. In this case,
350
* it would affect flushing input denormals.
351
@@ -XXX,XX +XXX,XX @@ float64 HELPER(vfp_fcvt_f16_to_f64)(float16 a, void *fpstp, uint32_t ahp_mode)
352
return r;
353
}
354
355
-float16 HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
356
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
357
{
358
/* Squash FZ16 to 0 for the duration of conversion. In this case,
359
* it would affect flushing output denormals.
360
@@ -XXX,XX +XXX,XX @@ static bool round_to_inf(float_status *fpst, bool sign_bit)
361
g_assert_not_reached();
362
}
363
364
-float16 HELPER(recpe_f16)(float16 input, void *fpstp)
365
+uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
366
{
367
float_status *fpst = fpstp;
368
float16 f16 = float16_squash_input_denormal(input, fpst);
369
@@ -XXX,XX +XXX,XX @@ static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
370
return extract64(estimate, 0, 8) << 44;
371
}
372
373
-float16 HELPER(rsqrte_f16)(float16 input, void *fpstp)
374
+uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
375
{
376
float_status *s = fpstp;
377
float16 f16 = float16_squash_input_denormal(input, s);
378
--
146
--
379
2.17.1
147
2.20.1
380
148
381
149
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to flatview_extend_translation().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
Replace x = double_saturate(y) with x = add_saturate(y, y).
4
There is no need for a separate more specialized helper.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-12-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-7-peter.maydell@linaro.org
10
---
11
---
11
exec.c | 15 ++++++++++-----
12
target/arm/helper.h | 1 -
12
1 file changed, 10 insertions(+), 5 deletions(-)
13
target/arm/op_helper.c | 15 ---------------
14
target/arm/translate.c | 4 ++--
15
3 files changed, 2 insertions(+), 18 deletions(-)
13
16
14
diff --git a/exec.c b/exec.c
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/exec.c
19
--- a/target/arm/helper.h
17
+++ b/exec.c
20
+++ b/target/arm/helper.h
18
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
19
22
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
20
static hwaddr
23
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
21
flatview_extend_translation(FlatView *fv, hwaddr addr,
24
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
22
- hwaddr target_len,
25
-DEF_HELPER_2(double_saturate, i32, env, s32)
23
- MemoryRegion *mr, hwaddr base, hwaddr len,
26
DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
24
- bool is_write)
27
DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
25
+ hwaddr target_len,
28
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
26
+ MemoryRegion *mr, hwaddr base, hwaddr len,
29
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
27
+ bool is_write, MemTxAttrs attrs)
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/op_helper.c
32
+++ b/target/arm/op_helper.c
33
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
34
return res;
35
}
36
37
-uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
38
-{
39
- uint32_t res;
40
- if (val >= 0x40000000) {
41
- res = ~SIGNBIT;
42
- env->QF = 1;
43
- } else if (val <= (int32_t)0xc0000000) {
44
- res = SIGNBIT;
45
- env->QF = 1;
46
- } else {
47
- res = val << 1;
48
- }
49
- return res;
50
-}
51
-
52
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
28
{
53
{
29
hwaddr done = 0;
54
uint32_t res = a + b;
30
hwaddr xlat;
55
diff --git a/target/arm/translate.c b/target/arm/translate.c
31
@@ -XXX,XX +XXX,XX @@ void *address_space_map(AddressSpace *as,
56
index XXXXXXX..XXXXXXX 100644
32
57
--- a/target/arm/translate.c
33
memory_region_ref(mr);
58
+++ b/target/arm/translate.c
34
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
59
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
35
- l, is_write);
60
tmp = load_reg(s, rm);
36
+ l, is_write, attrs);
61
tmp2 = load_reg(s, rn);
37
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
62
if (op1 & 2)
38
rcu_read_unlock();
63
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
39
64
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
40
@@ -XXX,XX +XXX,XX @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
65
if (op1 & 1)
41
mr = cache->mrs.mr;
66
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
42
memory_region_ref(mr);
67
else
43
if (memory_access_is_direct(mr, is_write)) {
68
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
44
+ /* We don't care about the memory attributes here as we're only
69
tmp = load_reg(s, rn);
45
+ * doing this if we found actual RAM, which behaves the same
70
tmp2 = load_reg(s, rm);
46
+ * regardless of attributes; so UNSPECIFIED is fine.
71
if (op & 1)
47
+ */
72
- gen_helper_double_saturate(tmp, cpu_env, tmp);
48
l = flatview_extend_translation(cache->fv, addr, len, mr,
73
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
49
- cache->xlat, l, is_write);
74
if (op & 2)
50
+ cache->xlat, l, is_write,
75
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
51
+ MEMTXATTRS_UNSPECIFIED);
76
else
52
cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
53
} else {
54
cache->ptr = NULL;
55
--
77
--
56
2.17.1
78
2.20.1
57
79
58
80
diff view generated by jsdifflib
1
Add more detail to the documentation for memory_region_init_iommu()
1
From: Andrew Jones <drjones@redhat.com>
2
and other IOMMU-related functions and data structures.
3
2
3
If -cpu <cpu>,aarch64=off is used then KVM must also be used, and it
4
and the host must support running the vcpu in 32-bit mode. Also, if
5
-cpu <cpu>,aarch64=on is used, then it doesn't matter if kvm is
6
enabled or not.
7
8
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
Message-id: 20180521140402.23318-2-peter.maydell@linaro.org
9
---
11
---
10
include/exec/memory.h | 105 ++++++++++++++++++++++++++++++++++++++----
12
target/arm/kvm_arm.h | 14 ++++++++++++++
11
1 file changed, 95 insertions(+), 10 deletions(-)
13
target/arm/cpu64.c | 12 ++++++------
14
target/arm/kvm64.c | 9 +++++++++
15
3 files changed, 29 insertions(+), 6 deletions(-)
12
16
13
diff --git a/include/exec/memory.h b/include/exec/memory.h
17
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/memory.h
19
--- a/target/arm/kvm_arm.h
16
+++ b/include/exec/memory.h
20
+++ b/target/arm/kvm_arm.h
17
@@ -XXX,XX +XXX,XX @@ enum IOMMUMemoryRegionAttr {
21
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
18
IOMMU_ATTR_SPAPR_TCE_FD
22
*/
19
};
23
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
20
24
21
+/**
25
+/**
22
+ * IOMMUMemoryRegionClass:
26
+ * kvm_arm_aarch32_supported:
27
+ * @cs: CPUState
23
+ *
28
+ *
24
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
29
+ * Returns: true if the KVM VCPU can enable AArch32 mode
25
+ * and provide an implementation of at least the @translate method here
30
+ * and false otherwise.
26
+ * to handle requests to the memory region. Other methods are optional.
27
+ *
28
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
29
+ * to report whenever mappings are changed, by calling
30
+ * memory_region_notify_iommu() (or, if necessary, by calling
31
+ * memory_region_notify_one() for each registered notifier).
32
+ */
31
+ */
33
typedef struct IOMMUMemoryRegionClass {
32
+bool kvm_arm_aarch32_supported(CPUState *cs);
34
/* private */
33
+
35
struct DeviceClass parent_class;
34
/**
36
35
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
37
/*
36
* IPA address space supported by KVM
38
- * Return a TLB entry that contains a given address. Flag should
37
@@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
39
- * be the access permission of this translation operation. We can
38
cpu->host_cpu_probe_failed = true;
40
- * set flag to IOMMU_NONE to mean that we don't need any
39
}
41
- * read/write permission checks, like, when for region replay.
40
42
+ * Return a TLB entry that contains a given address.
41
+static inline bool kvm_arm_aarch32_supported(CPUState *cs)
43
+ *
42
+{
44
+ * The IOMMUAccessFlags indicated via @flag are optional and may
43
+ return false;
45
+ * be specified as IOMMU_NONE to indicate that the caller needs
44
+}
46
+ * the full translation information for both reads and writes. If
45
+
47
+ * the access flags are specified then the IOMMU implementation
46
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
48
+ * may use this as an optimization, to stop doing a page table
47
{
49
+ * walk as soon as it knows that the requested permissions are not
48
return -ENOENT;
50
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
49
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
51
+ * full page table walk and report the permissions in the returned
50
index XXXXXXX..XXXXXXX 100644
52
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
51
--- a/target/arm/cpu64.c
53
+ * return different mappings for reads and writes.)
52
+++ b/target/arm/cpu64.c
54
+ *
53
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
55
+ * The returned information remains valid while the caller is
54
* restriction allows us to avoid fixing up functionality that assumes a
56
+ * holding the big QEMU lock or is inside an RCU critical section;
55
* uniform execution state like do_interrupt.
57
+ * if the caller wishes to cache the mapping beyond that it must
58
+ * register an IOMMU notifier so it can invalidate its cached
59
+ * information when the IOMMU mapping changes.
60
+ *
61
+ * @iommu: the IOMMUMemoryRegion
62
+ * @hwaddr: address to be translated within the memory region
63
+ * @flag: requested access permissions
64
*/
56
*/
65
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
57
- if (!kvm_enabled()) {
66
IOMMUAccessFlags flag);
58
- error_setg(errp, "'aarch64' feature cannot be disabled "
67
- /* Returns minimum supported page size */
59
- "unless KVM is enabled");
68
+ /* Returns minimum supported page size in bytes.
60
- return;
69
+ * If this method is not provided then the minimum is assumed to
61
- }
70
+ * be TARGET_PAGE_SIZE.
62
-
71
+ *
63
if (value == false) {
72
+ * @iommu: the IOMMUMemoryRegion
64
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
73
+ */
65
+ error_setg(errp, "'aarch64' feature cannot be disabled "
74
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
66
+ "unless KVM is enabled and 32-bit EL1 "
75
- /* Called when IOMMU Notifier flag changed */
67
+ "is supported");
76
+ /* Called when IOMMU Notifier flag changes (ie when the set of
68
+ return;
77
+ * events which IOMMU users are requesting notification for changes).
69
+ }
78
+ * Optional method -- need not be provided if the IOMMU does not
70
unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
79
+ * need to know exactly which events must be notified.
71
} else {
80
+ *
72
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
81
+ * @iommu: the IOMMUMemoryRegion
73
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
82
+ * @old_flags: events which previously needed to be notified
74
index XXXXXXX..XXXXXXX 100644
83
+ * @new_flags: events which now need to be notified
75
--- a/target/arm/kvm64.c
84
+ */
76
+++ b/target/arm/kvm64.c
85
void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
77
@@ -XXX,XX +XXX,XX @@
86
IOMMUNotifierFlag old_flags,
78
#include "exec/gdbstub.h"
87
IOMMUNotifierFlag new_flags);
79
#include "sysemu/sysemu.h"
88
- /* Set this up to provide customized IOMMU replay function */
80
#include "sysemu/kvm.h"
89
+ /* Called to handle memory_region_iommu_replay().
81
+#include "sysemu/kvm_int.h"
90
+ *
82
#include "kvm_arm.h"
91
+ * The default implementation of memory_region_iommu_replay() is to
83
+#include "hw/boards.h"
92
+ * call the IOMMU translate method for every page in the address space
84
#include "internals.h"
93
+ * with flag == IOMMU_NONE and then call the notifier if translate
85
94
+ * returns a valid mapping. If this method is implemented then it
86
static bool have_guest_debug;
95
+ * overrides the default behaviour, and must provide the full semantics
87
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
96
+ * of memory_region_iommu_replay(), by calling @notifier for every
88
return true;
97
+ * translation present in the IOMMU.
89
}
98
+ *
90
99
+ * Optional method -- an IOMMU only needs to provide this method
91
+bool kvm_arm_aarch32_supported(CPUState *cpu)
100
+ * if the default is inefficient or produces undesirable side effects.
92
+{
101
+ *
93
+ KVMState *s = KVM_STATE(current_machine->accelerator);
102
+ * Note: this is not related to record-and-replay functionality.
94
+
103
+ */
95
+ return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
104
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
96
+}
105
97
+
106
- /* Get IOMMU misc attributes */
98
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
107
- int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr,
99
108
+ /* Get IOMMU misc attributes. This is an optional method that
100
int kvm_arch_init_vcpu(CPUState *cs)
109
+ * can be used to allow users of the IOMMU to get implementation-specific
110
+ * information. The IOMMU implements this method to handle calls
111
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
112
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
113
+ * the IOMMU supports. If the method is unimplemented then
114
+ * memory_region_iommu_get_attr() will always return -EINVAL.
115
+ *
116
+ * @iommu: the IOMMUMemoryRegion
117
+ * @attr: attribute being queried
118
+ * @data: memory to fill in with the attribute data
119
+ *
120
+ * Returns 0 on success, or a negative errno; in particular
121
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
122
+ */
123
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
124
void *data);
125
} IOMMUMemoryRegionClass;
126
127
@@ -XXX,XX +XXX,XX @@ static inline void memory_region_init_reservation(MemoryRegion *mr,
128
* An IOMMU region translates addresses and forwards accesses to a target
129
* memory region.
130
*
131
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
132
+ * @_iommu_mr should be a pointer to enough memory for an instance of
133
+ * that subclass, @instance_size is the size of that subclass, and
134
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
135
+ * instance of the subclass, and its methods will then be called to handle
136
+ * accesses to the memory region. See the documentation of
137
+ * #IOMMUMemoryRegionClass for further details.
138
+ *
139
* @_iommu_mr: the #IOMMUMemoryRegion to be initialized
140
* @instance_size: the IOMMUMemoryRegion subclass instance size
141
* @mrtypename: the type name of the #IOMMUMemoryRegion
142
@@ -XXX,XX +XXX,XX @@ void memory_region_register_iommu_notifier(MemoryRegion *mr,
143
* a notifier with the minimum page granularity returned by
144
* mr->iommu_ops->get_page_size().
145
*
146
+ * Note: this is not related to record-and-replay functionality.
147
+ *
148
* @iommu_mr: the memory region to observe
149
* @n: the notifier to which to replay iommu mappings
150
*/
151
@@ -XXX,XX +XXX,XX @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
152
* memory_region_iommu_replay_all: replay existing IOMMU translations
153
* to all the notifiers registered.
154
*
155
+ * Note: this is not related to record-and-replay functionality.
156
+ *
157
* @iommu_mr: the memory region to observe
158
*/
159
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
160
@@ -XXX,XX +XXX,XX @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
161
* memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
162
* defined on the IOMMU.
163
*
164
- * Returns 0 if succeded, error code otherwise.
165
+ * Returns 0 on success, or a negative errno otherwise. In particular,
166
+ * -EINVAL indicates that the IOMMU does not support the requested
167
+ * attribute.
168
*
169
* @iommu_mr: the memory region
170
* @attr: the requested attribute
171
--
101
--
172
2.17.1
102
2.20.1
173
103
174
104
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
acpi_data_push uses g_array_set_size to resize the memory size. If there
3
We first convert the pmu property from a static property to one with
4
is no enough contiguous memory, the address will be changed. So previous
4
its own accessors. Then we use the set accessor to check if the PMU is
5
pointer could not be used any more. It must update the pointer and use
5
supported when using KVM. Indeed a 32-bit KVM host does not support
6
the new one.
6
the PMU, so this check will catch an attempt to use it at property-set
7
time.
7
8
8
Also, previous codes wrongly use le32 conversion of iort->node_offset
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
for subsequent computations that will result incorrect value if host is
10
not litlle endian. So use the non-converted one instead.
11
12
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
13
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
14
Message-id: 1527663951-14552-1-git-send-email-zhaoshenglong@huawei.com
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
12
---
17
hw/arm/virt-acpi-build.c | 20 +++++++++++++++-----
13
target/arm/kvm_arm.h | 14 ++++++++++++++
18
1 file changed, 15 insertions(+), 5 deletions(-)
14
target/arm/cpu.c | 30 +++++++++++++++++++++++++-----
15
target/arm/kvm.c | 7 +++++++
16
3 files changed, 46 insertions(+), 5 deletions(-)
19
17
20
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
18
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/virt-acpi-build.c
20
--- a/target/arm/kvm_arm.h
23
+++ b/hw/arm/virt-acpi-build.c
21
+++ b/target/arm/kvm_arm.h
24
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
22
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
25
AcpiIortItsGroup *its;
23
*/
26
AcpiIortTable *iort;
24
bool kvm_arm_aarch32_supported(CPUState *cs);
27
AcpiIortSmmu3 *smmu;
25
28
- size_t node_size, iort_length, smmu_offset = 0;
26
+/**
29
+ size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
27
+ * bool kvm_arm_pmu_supported:
30
AcpiIortRC *rc;
28
+ * @cs: CPUState
31
29
+ *
32
iort = acpi_data_push(table_data, sizeof(*iort));
30
+ * Returns: true if the KVM VCPU can enable its PMU
33
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
31
+ * and false otherwise.
34
32
+ */
35
iort_length = sizeof(*iort);
33
+bool kvm_arm_pmu_supported(CPUState *cs);
36
iort->node_count = cpu_to_le32(nb_nodes);
34
+
37
- iort->node_offset = cpu_to_le32(sizeof(*iort));
35
/**
38
+ /*
36
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
39
+ * Use a copy in case table_data->data moves during acpi_data_push
37
* IPA address space supported by KVM
40
+ * operations.
38
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_aarch32_supported(CPUState *cs)
41
+ */
39
return false;
42
+ iort_node_offset = sizeof(*iort);
40
}
43
+ iort->node_offset = cpu_to_le32(iort_node_offset);
41
44
42
+static inline bool kvm_arm_pmu_supported(CPUState *cs)
45
/* ITS group node */
43
+{
46
node_size = sizeof(*its) + sizeof(uint32_t);
44
+ return false;
47
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
45
+}
48
int irq = vms->irqmap[VIRT_SMMU];
46
+
49
47
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
50
/* SMMUv3 node */
48
{
51
- smmu_offset = iort->node_offset + node_size;
49
return -ENOENT;
52
+ smmu_offset = iort_node_offset + node_size;
50
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
53
node_size = sizeof(*smmu) + sizeof(*idmap);
51
index XXXXXXX..XXXXXXX 100644
54
iort_length += node_size;
52
--- a/target/arm/cpu.c
55
smmu = acpi_data_push(table_data, node_size);
53
+++ b/target/arm/cpu.c
56
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
54
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_el3_property =
57
idmap->id_count = cpu_to_le32(0xFFFF);
55
static Property arm_cpu_cfgend_property =
58
idmap->output_base = 0;
56
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
59
/* output IORT node is the ITS group node (the first node) */
57
60
- idmap->output_reference = cpu_to_le32(iort->node_offset);
58
-/* use property name "pmu" to match other archs and virt tools */
61
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
59
-static Property arm_cpu_has_pmu_property =
60
- DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
61
-
62
static Property arm_cpu_has_vfp_property =
63
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
64
65
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_pmsav7_dregion_property =
66
pmsav7_dregion,
67
qdev_prop_uint32, uint32_t);
68
69
+static bool arm_get_pmu(Object *obj, Error **errp)
70
+{
71
+ ARMCPU *cpu = ARM_CPU(obj);
72
+
73
+ return cpu->has_pmu;
74
+}
75
+
76
+static void arm_set_pmu(Object *obj, bool value, Error **errp)
77
+{
78
+ ARMCPU *cpu = ARM_CPU(obj);
79
+
80
+ if (value) {
81
+ if (kvm_enabled() && !kvm_arm_pmu_supported(CPU(cpu))) {
82
+ error_setg(errp, "'pmu' feature not supported by KVM on this host");
83
+ return;
84
+ }
85
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
86
+ } else {
87
+ unset_feature(&cpu->env, ARM_FEATURE_PMU);
88
+ }
89
+ cpu->has_pmu = value;
90
+}
91
+
92
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
93
void *opaque, Error **errp)
94
{
95
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
62
}
96
}
63
97
64
/* Root Complex Node */
98
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
65
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
99
- qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
66
idmap->output_reference = cpu_to_le32(smmu_offset);
100
+ cpu->has_pmu = true;
67
} else {
101
+ object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu,
68
/* output IORT node is the ITS group node (the first node) */
102
&error_abort);
69
- idmap->output_reference = cpu_to_le32(iort->node_offset);
70
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
71
}
103
}
72
104
73
+ /*
105
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
74
+ * Update the pointer address in case table_data->data moves during above
106
index XXXXXXX..XXXXXXX 100644
75
+ * acpi_data_push operations.
107
--- a/target/arm/kvm.c
76
+ */
108
+++ b/target/arm/kvm.c
77
+ iort = (AcpiIortTable *)(table_data->data + iort_start);
109
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
78
iort->length = cpu_to_le32(iort_length);
110
env->features = arm_host_cpu_features.features;
79
111
}
80
build_header(linker, table_data, (void *)(table_data->data + iort_start),
112
113
+bool kvm_arm_pmu_supported(CPUState *cpu)
114
+{
115
+ KVMState *s = KVM_STATE(current_machine->accelerator);
116
+
117
+ return kvm_check_extension(s, KVM_CAP_ARM_PMU_V3);
118
+}
119
+
120
int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
121
{
122
KVMState *s = KVM_STATE(ms->accelerator);
81
--
123
--
82
2.17.1
124
2.20.1
83
125
84
126
diff view generated by jsdifflib
1
In commit f0aff255700 we made cpacr_write() enforce that some CPACR
1
From: Andrew Jones <drjones@redhat.com>
2
bits are RAZ/WI and some are RAO/WI for ARMv7 cores. Unfortunately
3
we forgot to also update the register's reset value. The effect
4
was that (a) a guest that read CPACR on reset would not see ones in
5
the RAO bits, and (b) if you did a migration before the guest did
6
a write to the CPACR then the migration would fail because the
7
destination would enforce the RAO bits and then complain that they
8
didn't match the zero value from the source.
9
2
10
Implement reset for the CPACR using a custom reset function
3
The current implementation of ZCR_ELx matches the architecture, only
11
that just calls cpacr_write(), to avoid having to duplicate
4
implementing the lower four bits, with the rest RAZ/WI. This puts
12
the logic for which bits are RAO.
5
a strict limit on ARM_MAX_VQ of 16. Make sure we don't let ARM_MAX_VQ
6
grow without a corresponding update here.
13
7
14
This bug would affect migration for TCG CPUs which are ARMv7
8
Suggested-by: Dave Martin <Dave.Martin@arm.com>
15
with VFP but without one of Neon or VFPv3.
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
16
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reported-by: Cédric Le Goater <clg@kaod.org>
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Tested-by: Cédric Le Goater <clg@kaod.org>
20
Message-id: 20180522173713.26282-1-peter.maydell@linaro.org
21
---
13
---
22
target/arm/helper.c | 10 +++++++++-
14
target/arm/helper.c | 1 +
23
1 file changed, 9 insertions(+), 1 deletion(-)
15
1 file changed, 1 insertion(+)
24
16
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
26
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper.c
19
--- a/target/arm/helper.c
28
+++ b/target/arm/helper.c
20
+++ b/target/arm/helper.c
29
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
21
@@ -XXX,XX +XXX,XX @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
30
env->cp15.cpacr_el1 = value;
22
int new_len;
31
}
23
32
24
/* Bits other than [3:0] are RAZ/WI. */
33
+static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
25
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
34
+{
26
raw_write(env, ri, value & 0xf);
35
+ /* Call cpacr_write() so that we reset with the correct RAO bits set
27
36
+ * for our CPU features.
28
/*
37
+ */
38
+ cpacr_write(env, ri, 0);
39
+}
40
+
41
static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
42
bool isread)
43
{
44
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
45
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
46
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
47
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
48
- .resetvalue = 0, .writefn = cpacr_write },
49
+ .resetfn = cpacr_reset, .writefn = cpacr_write },
50
REGINFO_SENTINEL
51
};
52
53
--
29
--
54
2.17.1
30
2.20.1
55
31
56
32
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Andrew Jones <drjones@redhat.com>
2
add MemTxAttrs as an argument to address_space_translate()
3
and address_space_translate_cached(). Callers either have an
4
attrs value to hand, or don't care and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
Unless we're guaranteed to always increase ARM_MAX_VQ by a multiple of
4
four, then we should use DIV_ROUND_UP to ensure we get an appropriate
5
array size.
6
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-4-peter.maydell@linaro.org
10
---
10
---
11
include/exec/memory.h | 4 +++-
11
target/arm/cpu.h | 2 +-
12
accel/tcg/translate-all.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
exec.c | 14 +++++++++-----
14
hw/vfio/common.c | 3 ++-
15
memory_ldst.inc.c | 18 +++++++++---------
16
target/riscv/helper.c | 2 +-
17
6 files changed, 25 insertions(+), 18 deletions(-)
18
13
19
diff --git a/include/exec/memory.h b/include/exec/memory.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/memory.h
16
--- a/target/arm/cpu.h
22
+++ b/include/exec/memory.h
17
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
18
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVectorReg {
24
* #MemoryRegion.
19
#ifdef TARGET_AARCH64
25
* @len: pointer to length
20
/* In AArch32 mode, predicate registers do not exist at all. */
26
* @is_write: indicates the transfer direction
21
typedef struct ARMPredicateReg {
27
+ * @attrs: memory attributes
22
- uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
28
*/
23
+ uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
29
MemoryRegion *flatview_translate(FlatView *fv,
24
} ARMPredicateReg;
30
hwaddr addr, hwaddr *xlat,
25
31
@@ -XXX,XX +XXX,XX @@ MemoryRegion *flatview_translate(FlatView *fv,
26
/* In AArch32 mode, PAC keys do not exist at all. */
32
33
static inline MemoryRegion *address_space_translate(AddressSpace *as,
34
hwaddr addr, hwaddr *xlat,
35
- hwaddr *len, bool is_write)
36
+ hwaddr *len, bool is_write,
37
+ MemTxAttrs attrs)
38
{
39
return flatview_translate(address_space_to_flatview(as),
40
addr, xlat, len, is_write);
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
46
hwaddr l = 1;
47
48
rcu_read_lock();
49
- mr = address_space_translate(as, addr, &addr, &l, false);
50
+ mr = address_space_translate(as, addr, &addr, &l, false, attrs);
51
if (!(memory_region_is_ram(mr)
52
|| memory_region_is_romd(mr))) {
53
rcu_read_unlock();
54
diff --git a/exec.c b/exec.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/exec.c
57
+++ b/exec.c
58
@@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
59
rcu_read_lock();
60
while (len > 0) {
61
l = len;
62
- mr = address_space_translate(as, addr, &addr1, &l, true);
63
+ mr = address_space_translate(as, addr, &addr1, &l, true,
64
+ MEMTXATTRS_UNSPECIFIED);
65
66
if (!(memory_region_is_ram(mr) ||
67
memory_region_is_romd(mr))) {
68
@@ -XXX,XX +XXX,XX @@ void address_space_cache_destroy(MemoryRegionCache *cache)
69
*/
70
static inline MemoryRegion *address_space_translate_cached(
71
MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
72
- hwaddr *plen, bool is_write)
73
+ hwaddr *plen, bool is_write, MemTxAttrs attrs)
74
{
75
MemoryRegionSection section;
76
MemoryRegion *mr;
77
@@ -XXX,XX +XXX,XX @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
78
MemoryRegion *mr;
79
80
l = len;
81
- mr = address_space_translate_cached(cache, addr, &addr1, &l, false);
82
+ mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
83
+ MEMTXATTRS_UNSPECIFIED);
84
flatview_read_continue(cache->fv,
85
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
86
addr1, l, mr);
87
@@ -XXX,XX +XXX,XX @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
88
MemoryRegion *mr;
89
90
l = len;
91
- mr = address_space_translate_cached(cache, addr, &addr1, &l, true);
92
+ mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
93
+ MEMTXATTRS_UNSPECIFIED);
94
flatview_write_continue(cache->fv,
95
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
96
addr1, l, mr);
97
@@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
98
99
rcu_read_lock();
100
mr = address_space_translate(&address_space_memory,
101
- phys_addr, &phys_addr, &l, false);
102
+ phys_addr, &phys_addr, &l, false,
103
+ MEMTXATTRS_UNSPECIFIED);
104
105
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
106
rcu_read_unlock();
107
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/hw/vfio/common.c
110
+++ b/hw/vfio/common.c
111
@@ -XXX,XX +XXX,XX @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
112
*/
113
mr = address_space_translate(&address_space_memory,
114
iotlb->translated_addr,
115
- &xlat, &len, writable);
116
+ &xlat, &len, writable,
117
+ MEMTXATTRS_UNSPECIFIED);
118
if (!memory_region_is_ram(mr)) {
119
error_report("iommu map to non memory area %"HWADDR_PRIx"",
120
xlat);
121
diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/memory_ldst.inc.c
124
+++ b/memory_ldst.inc.c
125
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
126
bool release_lock = false;
127
128
RCU_READ_LOCK();
129
- mr = TRANSLATE(addr, &addr1, &l, false);
130
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
131
if (l < 4 || !IS_DIRECT(mr, false)) {
132
release_lock |= prepare_mmio_access(mr);
133
134
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
135
bool release_lock = false;
136
137
RCU_READ_LOCK();
138
- mr = TRANSLATE(addr, &addr1, &l, false);
139
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
140
if (l < 8 || !IS_DIRECT(mr, false)) {
141
release_lock |= prepare_mmio_access(mr);
142
143
@@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
144
bool release_lock = false;
145
146
RCU_READ_LOCK();
147
- mr = TRANSLATE(addr, &addr1, &l, false);
148
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
149
if (!IS_DIRECT(mr, false)) {
150
release_lock |= prepare_mmio_access(mr);
151
152
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
153
bool release_lock = false;
154
155
RCU_READ_LOCK();
156
- mr = TRANSLATE(addr, &addr1, &l, false);
157
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
158
if (l < 2 || !IS_DIRECT(mr, false)) {
159
release_lock |= prepare_mmio_access(mr);
160
161
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
162
bool release_lock = false;
163
164
RCU_READ_LOCK();
165
- mr = TRANSLATE(addr, &addr1, &l, true);
166
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
167
if (l < 4 || !IS_DIRECT(mr, true)) {
168
release_lock |= prepare_mmio_access(mr);
169
170
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
171
bool release_lock = false;
172
173
RCU_READ_LOCK();
174
- mr = TRANSLATE(addr, &addr1, &l, true);
175
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
176
if (l < 4 || !IS_DIRECT(mr, true)) {
177
release_lock |= prepare_mmio_access(mr);
178
179
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
180
bool release_lock = false;
181
182
RCU_READ_LOCK();
183
- mr = TRANSLATE(addr, &addr1, &l, true);
184
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
185
if (!IS_DIRECT(mr, true)) {
186
release_lock |= prepare_mmio_access(mr);
187
r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
188
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
189
bool release_lock = false;
190
191
RCU_READ_LOCK();
192
- mr = TRANSLATE(addr, &addr1, &l, true);
193
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
194
if (l < 2 || !IS_DIRECT(mr, true)) {
195
release_lock |= prepare_mmio_access(mr);
196
197
@@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
198
bool release_lock = false;
199
200
RCU_READ_LOCK();
201
- mr = TRANSLATE(addr, &addr1, &l, true);
202
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
203
if (l < 8 || !IS_DIRECT(mr, true)) {
204
release_lock |= prepare_mmio_access(mr);
205
206
diff --git a/target/riscv/helper.c b/target/riscv/helper.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/riscv/helper.c
209
+++ b/target/riscv/helper.c
210
@@ -XXX,XX +XXX,XX @@ restart:
211
MemoryRegion *mr;
212
hwaddr l = sizeof(target_ulong), addr1;
213
mr = address_space_translate(cs->as, pte_addr,
214
- &addr1, &l, false);
215
+ &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
216
if (memory_access_is_direct(mr, true)) {
217
target_ulong *pte_pa =
218
qemu_map_ram_ptr(mr->ram_block, addr1);
219
--
27
--
220
2.17.1
28
2.20.1
221
29
222
30
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Andrew Jones <drjones@redhat.com>
2
add MemTxAttrs as an argument to flatview_translate(); all its
3
callers now have attrs available.
4
2
3
A couple return -EINVAL's forgot their '-'s.
4
5
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180521140402.23318-11-peter.maydell@linaro.org
9
---
9
---
10
include/exec/memory.h | 7 ++++---
10
target/arm/kvm64.c | 4 ++--
11
exec.c | 17 +++++++++--------
11
1 file changed, 2 insertions(+), 2 deletions(-)
12
2 files changed, 13 insertions(+), 11 deletions(-)
13
12
14
diff --git a/include/exec/memory.h b/include/exec/memory.h
13
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memory.h
15
--- a/target/arm/kvm64.c
17
+++ b/include/exec/memory.h
16
+++ b/target/arm/kvm64.c
18
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
17
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
19
*/
18
write_cpustate_to_list(cpu, true);
20
MemoryRegion *flatview_translate(FlatView *fv,
19
21
hwaddr addr, hwaddr *xlat,
20
if (!write_list_to_kvmstate(cpu, level)) {
22
- hwaddr *len, bool is_write);
21
- return EINVAL;
23
+ hwaddr *len, bool is_write,
22
+ return -EINVAL;
24
+ MemTxAttrs attrs);
25
26
static inline MemoryRegion *address_space_translate(AddressSpace *as,
27
hwaddr addr, hwaddr *xlat,
28
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
29
MemTxAttrs attrs)
30
{
31
return flatview_translate(address_space_to_flatview(as),
32
- addr, xlat, len, is_write);
33
+ addr, xlat, len, is_write, attrs);
34
}
35
36
/* address_space_access_valid: check for validity of accessing an address
37
@@ -XXX,XX +XXX,XX @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
38
rcu_read_lock();
39
fv = address_space_to_flatview(as);
40
l = len;
41
- mr = flatview_translate(fv, addr, &addr1, &l, false);
42
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
43
if (len == l && memory_access_is_direct(mr, false)) {
44
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
45
memcpy(buf, ptr, len);
46
diff --git a/exec.c b/exec.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/exec.c
49
+++ b/exec.c
50
@@ -XXX,XX +XXX,XX @@ iotlb_fail:
51
52
/* Called from RCU critical section */
53
MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
54
- hwaddr *plen, bool is_write)
55
+ hwaddr *plen, bool is_write,
56
+ MemTxAttrs attrs)
57
{
58
MemoryRegion *mr;
59
MemoryRegionSection section;
60
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
61
}
62
63
l = len;
64
- mr = flatview_translate(fv, addr, &addr1, &l, true);
65
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
66
}
23
}
67
24
68
return result;
25
kvm_arm_sync_mpstate_to_kvm(cpu);
69
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
26
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
70
MemTxResult result = MEMTX_OK;
71
72
l = len;
73
- mr = flatview_translate(fv, addr, &addr1, &l, true);
74
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
75
result = flatview_write_continue(fv, addr, attrs, buf, len,
76
addr1, l, mr);
77
78
@@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
79
}
80
81
l = len;
82
- mr = flatview_translate(fv, addr, &addr1, &l, false);
83
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
84
}
27
}
85
28
86
return result;
29
if (!write_kvmstate_to_list(cpu)) {
87
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
30
- return EINVAL;
88
MemoryRegion *mr;
31
+ return -EINVAL;
89
32
}
90
l = len;
33
/* Note that it's OK to have registers which aren't in CPUState,
91
- mr = flatview_translate(fv, addr, &addr1, &l, false);
34
* so we can ignore a failure return here.
92
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
93
return flatview_read_continue(fv, addr, attrs, buf, len,
94
addr1, l, mr);
95
}
96
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
97
98
while (len > 0) {
99
l = len;
100
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
101
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
102
if (!memory_access_is_direct(mr, is_write)) {
103
l = memory_access_size(mr, l, addr);
104
if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
105
@@ -XXX,XX +XXX,XX @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
106
107
len = target_len;
108
this_mr = flatview_translate(fv, addr, &xlat,
109
- &len, is_write);
110
+ &len, is_write, attrs);
111
if (this_mr != mr || xlat != base + done) {
112
return done;
113
}
114
@@ -XXX,XX +XXX,XX @@ void *address_space_map(AddressSpace *as,
115
l = len;
116
rcu_read_lock();
117
fv = address_space_to_flatview(as);
118
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
119
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
120
121
if (!memory_access_is_direct(mr, is_write)) {
122
if (atomic_xchg(&bounce.in_use, true)) {
123
--
35
--
124
2.17.1
36
2.20.1
125
37
126
38
diff view generated by jsdifflib
New patch
1
1
From: Andrew Jones <drjones@redhat.com>
2
3
Move the getting/putting of the fpsimd registers out of
4
kvm_arch_get/put_registers() into their own helper functions
5
to prepare for alternatively getting/putting SVE registers.
6
7
No functional change.
8
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/kvm64.c | 148 +++++++++++++++++++++++++++------------------
15
1 file changed, 88 insertions(+), 60 deletions(-)
16
17
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm64.c
20
+++ b/target/arm/kvm64.c
21
@@ -XXX,XX +XXX,XX @@ int kvm_arm_cpreg_level(uint64_t regidx)
22
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
23
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
24
25
+static int kvm_arch_put_fpsimd(CPUState *cs)
26
+{
27
+ ARMCPU *cpu = ARM_CPU(cs);
28
+ CPUARMState *env = &cpu->env;
29
+ struct kvm_one_reg reg;
30
+ uint32_t fpr;
31
+ int i, ret;
32
+
33
+ for (i = 0; i < 32; i++) {
34
+ uint64_t *q = aa64_vfp_qreg(env, i);
35
+#ifdef HOST_WORDS_BIGENDIAN
36
+ uint64_t fp_val[2] = { q[1], q[0] };
37
+ reg.addr = (uintptr_t)fp_val;
38
+#else
39
+ reg.addr = (uintptr_t)q;
40
+#endif
41
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
42
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
43
+ if (ret) {
44
+ return ret;
45
+ }
46
+ }
47
+
48
+ reg.addr = (uintptr_t)(&fpr);
49
+ fpr = vfp_get_fpsr(env);
50
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
51
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
52
+ if (ret) {
53
+ return ret;
54
+ }
55
+
56
+ reg.addr = (uintptr_t)(&fpr);
57
+ fpr = vfp_get_fpcr(env);
58
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
59
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
60
+ if (ret) {
61
+ return ret;
62
+ }
63
+
64
+ return 0;
65
+}
66
+
67
int kvm_arch_put_registers(CPUState *cs, int level)
68
{
69
struct kvm_one_reg reg;
70
- uint32_t fpr;
71
uint64_t val;
72
- int i;
73
- int ret;
74
+ int i, ret;
75
unsigned int el;
76
77
ARMCPU *cpu = ARM_CPU(cs);
78
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
79
}
80
}
81
82
- /* Advanced SIMD and FP registers. */
83
- for (i = 0; i < 32; i++) {
84
- uint64_t *q = aa64_vfp_qreg(env, i);
85
-#ifdef HOST_WORDS_BIGENDIAN
86
- uint64_t fp_val[2] = { q[1], q[0] };
87
- reg.addr = (uintptr_t)fp_val;
88
-#else
89
- reg.addr = (uintptr_t)q;
90
-#endif
91
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
92
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
93
- if (ret) {
94
- return ret;
95
- }
96
- }
97
-
98
- reg.addr = (uintptr_t)(&fpr);
99
- fpr = vfp_get_fpsr(env);
100
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
101
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
102
- if (ret) {
103
- return ret;
104
- }
105
-
106
- fpr = vfp_get_fpcr(env);
107
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
108
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
109
+ ret = kvm_arch_put_fpsimd(cs);
110
if (ret) {
111
return ret;
112
}
113
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
114
return ret;
115
}
116
117
+static int kvm_arch_get_fpsimd(CPUState *cs)
118
+{
119
+ ARMCPU *cpu = ARM_CPU(cs);
120
+ CPUARMState *env = &cpu->env;
121
+ struct kvm_one_reg reg;
122
+ uint32_t fpr;
123
+ int i, ret;
124
+
125
+ for (i = 0; i < 32; i++) {
126
+ uint64_t *q = aa64_vfp_qreg(env, i);
127
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
128
+ reg.addr = (uintptr_t)q;
129
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
130
+ if (ret) {
131
+ return ret;
132
+ } else {
133
+#ifdef HOST_WORDS_BIGENDIAN
134
+ uint64_t t;
135
+ t = q[0], q[0] = q[1], q[1] = t;
136
+#endif
137
+ }
138
+ }
139
+
140
+ reg.addr = (uintptr_t)(&fpr);
141
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
142
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
143
+ if (ret) {
144
+ return ret;
145
+ }
146
+ vfp_set_fpsr(env, fpr);
147
+
148
+ reg.addr = (uintptr_t)(&fpr);
149
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
150
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
151
+ if (ret) {
152
+ return ret;
153
+ }
154
+ vfp_set_fpcr(env, fpr);
155
+
156
+ return 0;
157
+}
158
+
159
int kvm_arch_get_registers(CPUState *cs)
160
{
161
struct kvm_one_reg reg;
162
uint64_t val;
163
- uint32_t fpr;
164
unsigned int el;
165
- int i;
166
- int ret;
167
+ int i, ret;
168
169
ARMCPU *cpu = ARM_CPU(cs);
170
CPUARMState *env = &cpu->env;
171
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
172
env->spsr = env->banked_spsr[i];
173
}
174
175
- /* Advanced SIMD and FP registers */
176
- for (i = 0; i < 32; i++) {
177
- uint64_t *q = aa64_vfp_qreg(env, i);
178
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
179
- reg.addr = (uintptr_t)q;
180
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
181
- if (ret) {
182
- return ret;
183
- } else {
184
-#ifdef HOST_WORDS_BIGENDIAN
185
- uint64_t t;
186
- t = q[0], q[0] = q[1], q[1] = t;
187
-#endif
188
- }
189
- }
190
-
191
- reg.addr = (uintptr_t)(&fpr);
192
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
193
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
194
+ ret = kvm_arch_get_fpsimd(cs);
195
if (ret) {
196
return ret;
197
}
198
- vfp_set_fpsr(env, fpr);
199
-
200
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
201
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
202
- if (ret) {
203
- return ret;
204
- }
205
- vfp_set_fpcr(env, fpr);
206
207
ret = kvm_get_vcpu_events(cpu);
208
if (ret) {
209
--
210
2.20.1
211
212
diff view generated by jsdifflib
1
From: Francisco Iglesias <frasse.iglesias@gmail.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Coverity found that the string return by 'object_get_canonical_path' was not
3
Extract is a compact combination of shift + and.
4
being freed at two locations in the model (CID 1391294 and CID 1391293) and
5
also that a memset was being called with a value greater than the max of a byte
6
on the second argument (CID 1391286). This patch corrects this by adding the
7
freeing of the strings and also changing to memset to zero instead on
8
descriptor unaligned errors.
9
4
10
Signed-off-by: Francisco Iglesias <frasse.iglesias@gmail.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Message-id: 20190808202616.13782-2-richard.henderson@linaro.org
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20180528184859.3530-1-frasse.iglesias@gmail.com
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
9
---
17
hw/dma/xlnx-zdma.c | 10 +++++++---
10
target/arm/translate.c | 9 +--------
18
1 file changed, 7 insertions(+), 3 deletions(-)
11
1 file changed, 1 insertion(+), 8 deletions(-)
19
12
20
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
21
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/dma/xlnx-zdma.c
15
--- a/target/arm/translate.c
23
+++ b/hw/dma/xlnx-zdma.c
16
+++ b/target/arm/translate.c
24
@@ -XXX,XX +XXX,XX @@ static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
17
@@ -XXX,XX +XXX,XX @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
25
qemu_log_mask(LOG_GUEST_ERROR,
18
26
"zdma: unaligned descriptor at %" PRIx64,
19
static void shifter_out_im(TCGv_i32 var, int shift)
27
addr);
20
{
28
- memset(buf, 0xdeadbeef, sizeof(XlnxZDMADescr));
21
- if (shift == 0) {
29
+ memset(buf, 0x0, sizeof(XlnxZDMADescr));
22
- tcg_gen_andi_i32(cpu_CF, var, 1);
30
s->error = true;
23
- } else {
31
return false;
24
- tcg_gen_shri_i32(cpu_CF, var, shift);
32
}
25
- if (shift != 31) {
33
@@ -XXX,XX +XXX,XX @@ static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
26
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
34
RegisterInfo *r = &s->regs_info[addr / 4];
27
- }
35
28
- }
36
if (!r->data) {
29
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
37
+ gchar *path = object_get_canonical_path(OBJECT(s));
30
}
38
qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
31
39
- object_get_canonical_path(OBJECT(s)),
32
/* Shift by immediate. Includes special handling for shift == 0. */
40
+ path,
41
addr);
42
+ g_free(path);
43
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
44
zdma_ch_imr_update_irq(s);
45
return 0;
46
@@ -XXX,XX +XXX,XX @@ static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
47
RegisterInfo *r = &s->regs_info[addr / 4];
48
49
if (!r->data) {
50
+ gchar *path = object_get_canonical_path(OBJECT(s));
51
qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
52
- object_get_canonical_path(OBJECT(s)),
53
+ path,
54
addr, value);
55
+ g_free(path);
56
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
57
zdma_ch_imr_update_irq(s);
58
return;
59
--
33
--
60
2.17.1
34
2.20.1
61
35
62
36
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
cpregs_keys is an uint32_t* so the allocation should use uint32_t.
3
Use deposit as the composit operation to merge the
4
g_new is even better because it is type-safe.
4
bits from the two inputs.
5
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-3-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/gdbstub.c | 3 +--
11
target/arm/translate.c | 26 ++++++++++----------------
12
1 file changed, 1 insertion(+), 2 deletions(-)
12
1 file changed, 10 insertions(+), 16 deletions(-)
13
13
14
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/gdbstub.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/gdbstub.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_xml(CPUState *cs)
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
19
RegisterSysregXmlParam param = {cs, s};
19
shift = (insn >> 7) & 0x1f;
20
20
if (insn & (1 << 6)) {
21
cpu->dyn_xml.num_cpregs = 0;
21
/* pkhtb */
22
- cpu->dyn_xml.cpregs_keys = g_malloc(sizeof(uint32_t *) *
22
- if (shift == 0)
23
- g_hash_table_size(cpu->cp_regs));
23
+ if (shift == 0) {
24
+ cpu->dyn_xml.cpregs_keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs));
24
shift = 31;
25
g_string_printf(s, "<?xml version=\"1.0\"?>");
25
+ }
26
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
26
tcg_gen_sari_i32(tmp2, tmp2, shift);
27
g_string_append_printf(s, "<feature name=\"org.qemu.gdb.arm.sys.regs\">");
27
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
28
- tcg_gen_ext16u_i32(tmp2, tmp2);
29
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
30
} else {
31
/* pkhbt */
32
- if (shift)
33
- tcg_gen_shli_i32(tmp2, tmp2, shift);
34
- tcg_gen_ext16u_i32(tmp, tmp);
35
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
36
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
37
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
38
}
39
- tcg_gen_or_i32(tmp, tmp, tmp2);
40
tcg_temp_free_i32(tmp2);
41
store_reg(s, rd, tmp);
42
} else if ((insn & 0x00200020) == 0x00200000) {
43
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
44
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
45
if (insn & (1 << 5)) {
46
/* pkhtb */
47
- if (shift == 0)
48
+ if (shift == 0) {
49
shift = 31;
50
+ }
51
tcg_gen_sari_i32(tmp2, tmp2, shift);
52
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
53
- tcg_gen_ext16u_i32(tmp2, tmp2);
54
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
55
} else {
56
/* pkhbt */
57
- if (shift)
58
- tcg_gen_shli_i32(tmp2, tmp2, shift);
59
- tcg_gen_ext16u_i32(tmp, tmp);
60
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
61
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
62
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
63
}
64
- tcg_gen_or_i32(tmp, tmp, tmp2);
65
tcg_temp_free_i32(tmp2);
66
store_reg(s, rd, tmp);
67
} else {
28
--
68
--
29
2.17.1
69
2.20.1
30
70
31
71
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
It forgot to increase clroffset during the loop. So it only clear the
3
The immediate shift generator functions already test for,
4
first 4 bytes.
4
and eliminate, the case of a shift by zero.
5
5
6
Fixes: 367b9f527becdd20ddf116e17a3c0c2bbc486920
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Cc: qemu-stable@nongnu.org
7
Message-id: 20190808202616.13782-4-richard.henderson@linaro.org
8
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Message-id: 1527047633-12368-1-git-send-email-zhaoshenglong@huawei.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
hw/intc/arm_gicv3_kvm.c | 1 +
11
target/arm/translate.c | 19 +++++++------------
15
1 file changed, 1 insertion(+)
12
1 file changed, 7 insertions(+), 12 deletions(-)
16
13
17
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/intc/arm_gicv3_kvm.c
16
--- a/target/arm/translate.c
20
+++ b/hw/intc/arm_gicv3_kvm.c
17
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void kvm_dist_putbmp(GICv3State *s, uint32_t offset,
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
22
if (clroffset != 0) {
19
shift = (insn >> 10) & 3;
23
reg = 0;
20
/* ??? In many cases it's not necessary to do a
24
kvm_gicd_access(s, clroffset, &reg, true);
21
rotate, a shift is sufficient. */
25
+ clroffset += 4;
22
- if (shift != 0)
26
}
23
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
27
reg = *gic_bmp_ptr32(bmp, irq);
24
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
28
kvm_gicd_access(s, offset, &reg, true);
25
op1 = (insn >> 20) & 7;
26
switch (op1) {
27
case 0: gen_sxtb16(tmp); break;
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
29
shift = (insn >> 4) & 3;
30
/* ??? In many cases it's not necessary to do a
31
rotate, a shift is sufficient. */
32
- if (shift != 0)
33
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
34
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
35
op = (insn >> 20) & 7;
36
switch (op) {
37
case 0: gen_sxth(tmp); break;
38
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
39
case 7:
40
goto illegal_op;
41
default: /* Saturate. */
42
- if (shift) {
43
- if (op & 1)
44
- tcg_gen_sari_i32(tmp, tmp, shift);
45
- else
46
- tcg_gen_shli_i32(tmp, tmp, shift);
47
+ if (op & 1) {
48
+ tcg_gen_sari_i32(tmp, tmp, shift);
49
+ } else {
50
+ tcg_gen_shli_i32(tmp, tmp, shift);
51
}
52
tmp2 = tcg_const_i32(imm);
53
if (op & 4) {
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
55
goto illegal_op;
56
}
57
tmp = load_reg(s, rm);
58
- if (shift) {
59
- tcg_gen_shli_i32(tmp, tmp, shift);
60
- }
61
+ tcg_gen_shli_i32(tmp, tmp, shift);
62
tcg_gen_add_i32(addr, addr, tmp);
63
tcg_temp_free_i32(tmp);
64
break;
29
--
65
--
30
2.17.1
66
2.20.1
31
67
32
68
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
When QEMU is started with following CLI
3
The helper function is more documentary, and also already
4
-machine virt,gic-version=3,accel=kvm -cpu host -bios AAVMF_CODE.fd
4
handles the case of rotate by zero.
5
it crashes with abort at
6
accel/kvm/kvm-all.c:2164:
7
KVM_SET_DEVICE_ATTR failed: Group 6 attr 0x000000000000c665: Invalid argument
8
5
9
Which is caused by implicit dependency of kvm_arm_gicv3_reset() on
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
arm_gicv3_icc_reset() where the later is called by CPU reset
7
Message-id: 20190808202616.13782-5-richard.henderson@linaro.org
11
reset callback.
12
13
However commit:
14
3b77f6c arm/boot: split load_dtb() from arm_load_kernel()
15
broke CPU reset callback registration in case
16
17
arm_load_kernel()
18
...
19
if (!info->kernel_filename || info->firmware_loaded)
20
21
branch is taken, i.e. it's sufficient to provide a firmware
22
or do not provide kernel on CLI to skip cpu reset callback
23
registration, where before offending commit the callback
24
has been registered unconditionally.
25
26
Fix it by registering the callback right at the beginning of
27
arm_load_kernel() unconditionally instead of doing it at the end.
28
29
NOTE:
30
we probably should eliminate that dependency anyways as well as
31
separate arch CPU reset parts from arm_load_kernel() into CPU
32
itself, but that refactoring that I probably would have to do
33
anyways later for CPU hotplug to work.
34
35
Reported-by: Auger Eric <eric.auger@redhat.com>
36
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
37
Reviewed-by: Eric Auger <eric.auger@redhat.com>
38
Tested-by: Eric Auger <eric.auger@redhat.com>
39
Message-id: 1527070950-208350-1-git-send-email-imammedo@redhat.com
40
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
41
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
42
---
10
---
43
hw/arm/boot.c | 18 +++++++++---------
11
target/arm/translate.c | 7 ++-----
44
1 file changed, 9 insertions(+), 9 deletions(-)
12
1 file changed, 2 insertions(+), 5 deletions(-)
45
13
46
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
47
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/arm/boot.c
16
--- a/target/arm/translate.c
49
+++ b/hw/arm/boot.c
17
+++ b/target/arm/translate.c
50
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
51
static const ARMInsnFixup *primary_loader;
19
/* CPSR = immediate */
52
AddressSpace *as = arm_boot_address_space(cpu, info);
20
val = insn & 0xff;
53
21
shift = ((insn >> 8) & 0xf) * 2;
54
+ /* CPU objects (unlike devices) are not automatically reset on system
22
- if (shift)
55
+ * reset, so we must always register a handler to do so. If we're
23
- val = (val >> shift) | (val << (32 - shift));
56
+ * actually loading a kernel, the handler is also responsible for
24
+ val = ror32(val, shift);
57
+ * arranging that we start it correctly.
25
i = ((insn & (1 << 22)) != 0);
58
+ */
26
if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
59
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
27
i, val)) {
60
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
28
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
61
+ }
29
/* immediate operand */
62
+
30
val = insn & 0xff;
63
/* The board code is not supposed to set secure_board_setup unless
31
shift = ((insn >> 8) & 0xf) * 2;
64
* running its code in secure mode is actually possible, and KVM
32
- if (shift) {
65
* doesn't support secure.
33
- val = (val >> shift) | (val << (32 - shift));
66
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
34
- }
67
ARM_CPU(cs)->env.boot_info = info;
35
+ val = ror32(val, shift);
68
}
36
tmp2 = tcg_temp_new_i32();
69
37
tcg_gen_movi_i32(tmp2, val);
70
- /* CPU objects (unlike devices) are not automatically reset on system
38
if (logic_cc && shift) {
71
- * reset, so we must always register a handler to do so. If we're
72
- * actually loading a kernel, the handler is also responsible for
73
- * arranging that we start it correctly.
74
- */
75
- for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
76
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
77
- }
78
-
79
if (!info->skip_dtb_autoload && have_dtb(info)) {
80
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
81
exit(1);
82
--
39
--
83
2.17.1
40
2.20.1
84
41
85
42
diff view generated by jsdifflib
1
From: Jan Kiszka <jan.kiszka@siemens.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
There was a nasty flip in identifying which register group an access is
3
Rotate is the more compact and obvious way to swap 16-bit
4
targeting. The issue caused spuriously raised priorities of the guest
4
elements of a 32-bit word.
5
when handing CPUs over in the Jailhouse hypervisor.
6
5
7
Cc: qemu-stable@nongnu.org
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
7
Message-id: 20190808202616.13782-6-richard.henderson@linaro.org
9
Message-id: 28b927d3-da58-bce4-cc13-bfec7f9b1cb9@siemens.com
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
hw/intc/arm_gicv3_cpuif.c | 12 ++++++------
11
target/arm/translate.c | 6 +-----
14
1 file changed, 6 insertions(+), 6 deletions(-)
12
1 file changed, 1 insertion(+), 5 deletions(-)
15
13
16
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/arm_gicv3_cpuif.c
16
--- a/target/arm/translate.c
19
+++ b/hw/intc/arm_gicv3_cpuif.c
17
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
19
/* Swap low and high halfwords. */
20
static void gen_swap_half(TCGv_i32 var)
21
{
21
{
22
GICv3CPUState *cs = icc_cs_from_env(env);
22
- TCGv_i32 tmp = tcg_temp_new_i32();
23
int regno = ri->opc2 & 3;
23
- tcg_gen_shri_i32(tmp, var, 16);
24
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
24
- tcg_gen_shli_i32(var, var, 16);
25
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
25
- tcg_gen_or_i32(var, var, tmp);
26
uint64_t value = cs->ich_apr[grp][regno];
26
- tcg_temp_free_i32(tmp);
27
27
+ tcg_gen_rotri_i32(var, var, 16);
28
trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
28
}
29
@@ -XXX,XX +XXX,XX @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
29
30
{
30
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
31
GICv3CPUState *cs = icc_cs_from_env(env);
32
int regno = ri->opc2 & 3;
33
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
34
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
35
36
trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
37
38
@@ -XXX,XX +XXX,XX @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
39
uint64_t value;
40
41
int regno = ri->opc2 & 3;
42
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
43
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
44
45
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
46
return icv_ap_read(env, ri);
47
@@ -XXX,XX +XXX,XX @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
48
GICv3CPUState *cs = icc_cs_from_env(env);
49
50
int regno = ri->opc2 & 3;
51
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
52
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
53
54
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
55
icv_ap_write(env, ri, value);
56
@@ -XXX,XX +XXX,XX @@ static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
57
{
58
GICv3CPUState *cs = icc_cs_from_env(env);
59
int regno = ri->opc2 & 3;
60
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
61
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
62
uint64_t value;
63
64
value = cs->ich_apr[grp][regno];
65
@@ -XXX,XX +XXX,XX @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
66
{
67
GICv3CPUState *cs = icc_cs_from_env(env);
68
int regno = ri->opc2 & 3;
69
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
70
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
71
72
trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
73
74
--
31
--
75
2.17.1
32
2.20.1
76
33
77
34
diff view generated by jsdifflib
1
Add entries to MAINTAINERS to cover the newer MPS2 boards and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
the new devices they use.
3
2
3
All of the inputs to these instructions are 32-bits. Rather than
4
extend each input to 64-bits and then extract the high 32-bits of
5
the output, use tcg_gen_muls2_i32 and other 32-bit generator functions.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190808202616.13782-7-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20180518153157.14899-1-peter.maydell@linaro.org
6
---
11
---
7
MAINTAINERS | 9 +++++++--
12
target/arm/translate.c | 72 +++++++++++++++---------------------------
8
1 file changed, 7 insertions(+), 2 deletions(-)
13
1 file changed, 26 insertions(+), 46 deletions(-)
9
14
10
diff --git a/MAINTAINERS b/MAINTAINERS
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/MAINTAINERS
17
--- a/target/arm/translate.c
13
+++ b/MAINTAINERS
18
+++ b/target/arm/translate.c
14
@@ -XXX,XX +XXX,XX @@ F: hw/timer/cmsdk-apb-timer.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_revsh(TCGv_i32 var)
15
F: include/hw/timer/cmsdk-apb-timer.h
20
tcg_gen_ext16s_i32(var, var);
16
F: hw/char/cmsdk-apb-uart.c
21
}
17
F: include/hw/char/cmsdk-apb-uart.h
22
18
+F: hw/misc/tz-ppc.c
23
-/* Return (b << 32) + a. Mark inputs as dead */
19
+F: include/hw/misc/tz-ppc.h
24
-static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
20
25
-{
21
ARM cores
26
- TCGv_i64 tmp64 = tcg_temp_new_i64();
22
M: Peter Maydell <peter.maydell@linaro.org>
27
-
23
@@ -XXX,XX +XXX,XX @@ M: Peter Maydell <peter.maydell@linaro.org>
28
- tcg_gen_extu_i32_i64(tmp64, b);
24
L: qemu-arm@nongnu.org
29
- tcg_temp_free_i32(b);
25
S: Maintained
30
- tcg_gen_shli_i64(tmp64, tmp64, 32);
26
F: hw/arm/mps2.c
31
- tcg_gen_add_i64(a, tmp64, a);
27
-F: hw/misc/mps2-scc.c
32
-
28
-F: include/hw/misc/mps2-scc.h
33
- tcg_temp_free_i64(tmp64);
29
+F: hw/arm/mps2-tz.c
34
- return a;
30
+F: hw/misc/mps2-*.c
35
-}
31
+F: include/hw/misc/mps2-*.h
36
-
32
+F: hw/arm/iotkit.c
37
-/* Return (b << 32) - a. Mark inputs as dead. */
33
+F: include/hw/arm/iotkit.h
38
-static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
34
39
-{
35
Musicpal
40
- TCGv_i64 tmp64 = tcg_temp_new_i64();
36
M: Jan Kiszka <jan.kiszka@web.de>
41
-
42
- tcg_gen_extu_i32_i64(tmp64, b);
43
- tcg_temp_free_i32(b);
44
- tcg_gen_shli_i64(tmp64, tmp64, 32);
45
- tcg_gen_sub_i64(a, tmp64, a);
46
-
47
- tcg_temp_free_i64(tmp64);
48
- return a;
49
-}
50
-
51
/* 32x32->64 multiply. Marks inputs as dead. */
52
static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
53
{
54
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
55
(SMMUL, SMMLA, SMMLS) */
56
tmp = load_reg(s, rm);
57
tmp2 = load_reg(s, rs);
58
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
59
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
60
61
if (rd != 15) {
62
- tmp = load_reg(s, rd);
63
+ tmp3 = load_reg(s, rd);
64
if (insn & (1 << 6)) {
65
- tmp64 = gen_subq_msw(tmp64, tmp);
66
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
67
} else {
68
- tmp64 = gen_addq_msw(tmp64, tmp);
69
+ tcg_gen_add_i32(tmp, tmp, tmp3);
70
}
71
+ tcg_temp_free_i32(tmp3);
72
}
73
if (insn & (1 << 5)) {
74
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
75
+ /*
76
+ * Adding 0x80000000 to the 64-bit quantity
77
+ * means that we have carry in to the high
78
+ * word when the low word has the high bit set.
79
+ */
80
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
81
+ tcg_gen_add_i32(tmp, tmp, tmp2);
82
}
83
- tcg_gen_shri_i64(tmp64, tmp64, 32);
84
- tmp = tcg_temp_new_i32();
85
- tcg_gen_extrl_i64_i32(tmp, tmp64);
86
- tcg_temp_free_i64(tmp64);
87
+ tcg_temp_free_i32(tmp2);
88
store_reg(s, rn, tmp);
89
break;
90
case 0:
91
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
92
}
93
break;
94
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
95
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
96
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
97
if (rs != 15) {
98
- tmp = load_reg(s, rs);
99
+ tmp3 = load_reg(s, rs);
100
if (insn & (1 << 20)) {
101
- tmp64 = gen_addq_msw(tmp64, tmp);
102
+ tcg_gen_add_i32(tmp, tmp, tmp3);
103
} else {
104
- tmp64 = gen_subq_msw(tmp64, tmp);
105
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
106
}
107
+ tcg_temp_free_i32(tmp3);
108
}
109
if (insn & (1 << 4)) {
110
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
111
+ /*
112
+ * Adding 0x80000000 to the 64-bit quantity
113
+ * means that we have carry in to the high
114
+ * word when the low word has the high bit set.
115
+ */
116
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
117
+ tcg_gen_add_i32(tmp, tmp, tmp2);
118
}
119
- tcg_gen_shri_i64(tmp64, tmp64, 32);
120
- tmp = tcg_temp_new_i32();
121
- tcg_gen_extrl_i64_i32(tmp, tmp64);
122
- tcg_temp_free_i64(tmp64);
123
+ tcg_temp_free_i32(tmp2);
124
break;
125
case 7: /* Unsigned sum of absolute differences. */
126
gen_helper_usad8(tmp, tmp, tmp2);
37
--
127
--
38
2.17.1
128
2.20.1
39
129
40
130
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to address_space_translate_iommu().
3
2
3
Separate shift + extract low will result in one extra insn
4
for hosts like RISC-V, MIPS, and Sparc.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-8-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180521140402.23318-14-peter.maydell@linaro.org
8
---
10
---
9
exec.c | 8 +++++---
11
target/arm/translate.c | 18 ++++++------------
10
1 file changed, 5 insertions(+), 3 deletions(-)
12
1 file changed, 6 insertions(+), 12 deletions(-)
11
13
12
diff --git a/exec.c b/exec.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/exec.c
16
--- a/target/arm/translate.c
15
+++ b/exec.c
17
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
18
@@ -XXX,XX +XXX,XX @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
17
* @is_write: whether the translation operation is for write
19
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
18
* @is_mmio: whether this can be MMIO, set true if it can
20
iwmmxt_load_reg(cpu_V0, wrd);
19
* @target_as: the address space targeted by the IOMMU
21
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
20
+ * @attrs: transaction attributes
22
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
21
*
23
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
22
* This function is called from RCU critical section. It is the common
24
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
23
* part of flatview_do_translate and address_space_translate_cached.
25
} else { /* TMCRR */
24
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iomm
26
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
25
hwaddr *page_mask_out,
27
iwmmxt_store_reg(cpu_V0, wrd);
26
bool is_write,
28
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
27
bool is_mmio,
29
if (insn & ARM_CP_RW_BIT) { /* MRA */
28
- AddressSpace **target_as)
30
iwmmxt_load_reg(cpu_V0, acc);
29
+ AddressSpace **target_as,
31
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
30
+ MemTxAttrs attrs)
32
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
31
{
33
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
32
MemoryRegionSection *section;
34
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
33
hwaddr page_mask = (hwaddr)-1;
35
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
34
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
36
} else { /* MAR */
35
return address_space_translate_iommu(iommu_mr, xlat,
37
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
36
plen_out, page_mask_out,
38
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
37
is_write, is_mmio,
39
gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
38
- target_as);
40
break;
39
+ target_as, attrs);
41
case 2:
40
}
42
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
41
if (page_mask_out) {
43
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
42
/* Not behind an IOMMU, use default page size. */
44
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
43
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate_cached(
45
break;
44
46
default: abort();
45
section = address_space_translate_iommu(iommu_mr, xlat, plen,
47
}
46
NULL, is_write, true,
48
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
47
- &target_as);
49
break;
48
+ &target_as, attrs);
50
case 2:
49
return section.mr;
51
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
52
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
53
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
54
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
55
break;
56
default: abort();
57
}
58
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
59
tmp = tcg_temp_new_i32();
60
tcg_gen_extrl_i64_i32(tmp, tmp64);
61
store_reg(s, rt, tmp);
62
- tcg_gen_shri_i64(tmp64, tmp64, 32);
63
tmp = tcg_temp_new_i32();
64
- tcg_gen_extrl_i64_i32(tmp, tmp64);
65
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
66
tcg_temp_free_i64(tmp64);
67
store_reg(s, rt2, tmp);
68
} else {
69
@@ -XXX,XX +XXX,XX @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
70
tcg_gen_extrl_i64_i32(tmp, val);
71
store_reg(s, rlow, tmp);
72
tmp = tcg_temp_new_i32();
73
- tcg_gen_shri_i64(val, val, 32);
74
- tcg_gen_extrl_i64_i32(tmp, val);
75
+ tcg_gen_extrh_i64_i32(tmp, val);
76
store_reg(s, rhigh, tmp);
50
}
77
}
51
78
52
--
79
--
53
2.17.1
80
2.20.1
54
81
55
82
diff view generated by jsdifflib