1
The following changes since commit 0d3e41d5efd638a0c5682f6813b26448c3c51624:
1
First arm pullreq of 4.2...
2
2
3
Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-pull-request' into staging (2019-02-14 17:42:25 +0000)
3
thanks
4
-- PMM
5
6
The following changes since commit 27608c7c66bd923eb5e5faab80e795408cbe2b51:
7
8
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190814a' into staging (2019-08-16 12:00:18 +0100)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190214
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190816
8
13
9
for you to fetch changes up to 497bc12b1b374ecd62903bf062229bd93f8924af:
14
for you to fetch changes up to 664b7e3b97d6376f3329986c465b3782458b0f8b:
10
15
11
gdbstub: Send a reply to the vKill packet. (2019-02-14 18:45:49 +0000)
16
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word (2019-08-16 14:02:53 +0100)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
target-arm queue:
19
target-arm queue:
15
* gdbstub: Send a reply to the vKill packet
20
* target/arm: generate a custom MIDR for -cpu max
16
* Improve codegen for neon min/max and saturating arithmetic
21
* hw/misc/zynq_slcr: refactor to use standard register definition
17
* Fix a bug in clearing FPSCR exception status bits
22
* Set ENET_BD_BDU in I.MX FEC controller
18
* hw/arm/armsse: Fix miswiring of expansion IRQs
23
* target/arm: Fix routing of singlestep exceptions
19
* hw/intc/armv7m_nvic: Allow byte accesses to SHPR1
24
* refactor a32/t32 decoder handling of PC
20
* MAINTAINERS: Remove Peter Crosthwaite from various entries
25
* minor optimisations/cleanups of some a32/t32 codegen
21
* arm: Allow system registers for KVM guests to be changed by QEMU code
26
* target/arm/cpu64: Ensure kvm really supports aarch64=off
22
* linux-user: support HWCAP_CPUID which exposes ID registers to user code
27
* target/arm/cpu: Ensure we can use the pmu with kvm
23
* Fix bug in 128-bit cmpxchg for BE Arm guests
28
* target/arm: Minor cleanups preparatory to KVM SVE support
24
* Implement (no-op) HACR_EL2
25
* Fix CRn to be 14 for PMEVTYPER/PMEVCNTR
26
29
27
----------------------------------------------------------------
30
----------------------------------------------------------------
28
Aaron Lindsay OS (1):
31
Aaron Hill (1):
29
target/arm: Fix CRn to be 14 for PMEVTYPER/PMEVCNTR
32
Set ENET_BD_BDU in I.MX FEC controller
30
33
31
Alex Bennée (5):
34
Alex Bennée (1):
32
target/arm: relax permission checks for HWCAP_CPUID registers
35
target/arm: generate a custom MIDR for -cpu max
33
target/arm: expose CPUID registers to userspace
34
target/arm: expose MPIDR_EL1 to userspace
35
target/arm: expose remaining CPUID registers as RAZ
36
linux-user/elfload: enable HWCAP_CPUID for AArch64
37
36
38
Catherine Ho (1):
37
Andrew Jones (6):
39
target/arm: Fix int128_make128 lo, hi order in paired_cmpxchg64_be
38
target/arm/cpu64: Ensure kvm really supports aarch64=off
39
target/arm/cpu: Ensure we can use the pmu with kvm
40
target/arm/helper: zcr: Add build bug next to value range assumption
41
target/arm/cpu: Use div-round-up to determine predicate register array size
42
target/arm/kvm64: Fix error returns
43
target/arm/kvm64: Move the get/put of fpsimd registers out
40
44
41
Peter Maydell (5):
45
Damien Hedde (1):
42
target/arm: Implement HACR_EL2
46
hw/misc/zynq_slcr: use standard register definition
43
arm: Allow system registers for KVM guests to be changed by QEMU code
44
MAINTAINERS: Remove Peter Crosthwaite from various entries
45
hw/intc/armv7m_nvic: Allow byte accesses to SHPR1
46
hw/arm/armsse: Fix miswiring of expansion IRQs
47
47
48
Richard Henderson (14):
48
Peter Maydell (2):
49
target/arm: Force result size into dp after operation
49
target/arm: Factor out 'generate singlestep exception' function
50
target/arm: Restructure disas_fp_int_conv
50
target/arm: Fix routing of singlestep exceptions
51
target/arm: Rely on optimization within tcg_gen_gvec_or
52
target/arm: Use vector minmax expanders for aarch64
53
target/arm: Use vector minmax expanders for aarch32
54
target/arm: Use tcg integer min/max primitives for neon
55
target/arm: Remove neon min/max helpers
56
target/arm: Fix vfp_gdb_get/set_reg vs FPSCR
57
target/arm: Fix arm_cpu_dump_state vs FPSCR
58
target/arm: Split out flags setting from vfp compares
59
target/arm: Fix set of bits kept in xregs[ARM_VFP_FPSCR]
60
target/arm: Split out FPSCR.QC to a vector field
61
target/arm: Use vector operations for saturation
62
target/arm: Add missing clear_tail calls
63
51
64
Sandra Loosemore (1):
52
Richard Henderson (18):
65
gdbstub: Send a reply to the vKill packet.
53
target/arm: Pass in pc to thumb_insn_is_16bit
54
target/arm: Introduce pc_curr
55
target/arm: Introduce read_pc
56
target/arm: Introduce add_reg_for_lit
57
target/arm: Remove redundant s->pc & ~1
58
target/arm: Replace s->pc with s->base.pc_next
59
target/arm: Replace offset with pc in gen_exception_insn
60
target/arm: Replace offset with pc in gen_exception_internal_insn
61
target/arm: Remove offset argument to gen_exception_bkpt_insn
62
target/arm: Use unallocated_encoding for aarch32
63
target/arm: Remove helper_double_saturate
64
target/arm: Use tcg_gen_extract_i32 for shifter_out_im
65
target/arm: Use tcg_gen_deposit_i32 for PKHBT, PKHTB
66
target/arm: Remove redundant shift tests
67
target/arm: Use ror32 instead of open-coding the operation
68
target/arm: Use tcg_gen_rotri_i32 for gen_swap_half
69
target/arm: Simplify SMMLA, SMMLAR, SMMLS, SMMLSR
70
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word
66
71
67
target/arm/cpu.h | 50 ++++++++-
72
target/arm/cpu.h | 13 +-
68
target/arm/helper.h | 45 +++++---
73
target/arm/helper.h | 1 -
69
target/arm/translate.h | 4 +
74
target/arm/kvm_arm.h | 28 ++
70
gdbstub.c | 1 +
75
target/arm/translate-a64.h | 4 +-
71
hw/arm/armsse.c | 2 +-
76
target/arm/translate.h | 39 ++-
72
hw/intc/armv7m_nvic.c | 4 +-
77
hw/misc/zynq_slcr.c | 450 ++++++++++++++++----------------
73
linux-user/elfload.c | 1 +
78
hw/net/imx_fec.c | 4 +
74
target/arm/helper-a64.c | 4 +-
79
target/arm/cpu.c | 30 ++-
75
target/arm/helper.c | 228 ++++++++++++++++++++++++++++++++--------
80
target/arm/cpu64.c | 31 ++-
76
target/arm/kvm32.c | 20 +---
81
target/arm/helper.c | 7 +
77
target/arm/kvm64.c | 2 +
82
target/arm/kvm.c | 7 +
78
target/arm/machine.c | 2 +-
83
target/arm/kvm64.c | 161 +++++++-----
79
target/arm/neon_helper.c | 14 +--
84
target/arm/op_helper.c | 15 --
80
target/arm/translate-a64.c | 171 +++++++++++++++---------------
85
target/arm/translate-a64.c | 130 ++++------
81
target/arm/translate-sve.c | 6 +-
86
target/arm/translate-vfp.inc.c | 45 +---
82
target/arm/translate.c | 251 ++++++++++++++++++++++++++++++++++-----------
87
target/arm/translate.c | 572 +++++++++++++++++------------------------
83
target/arm/vec_helper.c | 134 +++++++++++++++++++++++-
88
16 files changed, 771 insertions(+), 766 deletions(-)
84
MAINTAINERS | 4 -
85
18 files changed, 687 insertions(+), 256 deletions(-)
86
89
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
There are a whole bunch more registers in the CPUID space which are
3
While most features are now detected by probing the ID_* registers
4
currently not used but are exposed as RAZ. To avoid too much
4
kernels can (and do) use MIDR_EL1 for working out of they have to
5
duplication we expand ARMCPRegUserSpaceInfo to understand glob
5
apply errata. This can trip up warnings in the kernel as it tries to
6
patterns so we only need one entry to tweak whole ranges of registers.
6
work out if it should apply workarounds to features that don't
7
actually exist in the reported CPU type.
8
9
Avoid this problem by synthesising our own MIDR value.
7
10
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20190205190224.2198-5-alex.bennee@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20190726113950.7499-1-alex.bennee@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
16
---
13
target/arm/cpu.h | 3 +++
17
target/arm/cpu.h | 6 ++++++
14
target/arm/helper.c | 26 +++++++++++++++++++++++---
18
target/arm/cpu64.c | 19 +++++++++++++++++++
15
2 files changed, 26 insertions(+), 3 deletions(-)
19
2 files changed, 25 insertions(+)
16
20
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
23
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCPRegUserSpaceInfo {
25
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
22
/* Name of register */
26
/*
23
const char *name;
27
* System register ID fields.
24
28
*/
25
+ /* Is the name actually a glob pattern */
29
+FIELD(MIDR_EL1, REVISION, 0, 4)
26
+ bool is_glob;
30
+FIELD(MIDR_EL1, PARTNUM, 4, 12)
31
+FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
32
+FIELD(MIDR_EL1, VARIANT, 20, 4)
33
+FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
27
+
34
+
28
/* Only some bits are exported to user space */
35
FIELD(ID_ISAR0, SWAP, 0, 4)
29
uint64_t exported_bits;
36
FIELD(ID_ISAR0, BITCOUNT, 4, 4)
30
37
FIELD(ID_ISAR0, BITFIELD, 8, 4)
31
diff --git a/target/arm/helper.c b/target/arm/helper.c
38
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
32
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/helper.c
40
--- a/target/arm/cpu64.c
34
+++ b/target/arm/helper.c
41
+++ b/target/arm/cpu64.c
35
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
42
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
36
.fixed_bits = 0x0000000000000011 },
43
uint32_t u;
37
{ .name = "ID_AA64PFR1_EL1",
44
aarch64_a57_initfn(obj);
38
.exported_bits = 0x00000000000000f0 },
45
39
+ { .name = "ID_AA64PFR*_EL1_RESERVED",
46
+ /*
40
+ .is_glob = true },
47
+ * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
41
{ .name = "ID_AA64ZFR0_EL1" },
48
+ * one and try to apply errata workarounds or use impdef features we
42
{ .name = "ID_AA64MMFR0_EL1",
49
+ * don't provide.
43
.fixed_bits = 0x00000000ff000000 },
50
+ * An IMPLEMENTER field of 0 means "reserved for software use";
44
{ .name = "ID_AA64MMFR1_EL1" },
51
+ * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
45
+ { .name = "ID_AA64MMFR*_EL1_RESERVED",
52
+ * to see which features are present";
46
+ .is_glob = true },
53
+ * the VARIANT, PARTNUM and REVISION fields are all implementation
47
{ .name = "ID_AA64DFR0_EL1",
54
+ * defined and we choose to define PARTNUM just in case guest
48
.fixed_bits = 0x0000000000000006 },
55
+ * code needs to distinguish this QEMU CPU from other software
49
{ .name = "ID_AA64DFR1_EL1" },
56
+ * implementations, though this shouldn't be needed.
50
- { .name = "ID_AA64AFR0_EL1" },
57
+ */
51
- { .name = "ID_AA64AFR1_EL1" },
58
+ t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
52
+ { .name = "ID_AA64DFR*_EL1_RESERVED",
59
+ t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
53
+ .is_glob = true },
60
+ t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
54
+ { .name = "ID_AA64AFR*",
61
+ t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
55
+ .is_glob = true },
62
+ t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
56
{ .name = "ID_AA64ISAR0_EL1",
63
+ cpu->midr = t;
57
.exported_bits = 0x00fffffff0fffff0 },
64
+
58
{ .name = "ID_AA64ISAR1_EL1",
65
t = cpu->isar.id_aa64isar0;
59
.exported_bits = 0x000000f0ffffffff },
66
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
60
+ { .name = "ID_AA64ISAR*_EL1_RESERVED",
67
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
61
+ .is_glob = true },
62
REGUSERINFO_SENTINEL
63
};
64
modify_arm_cp_regs(v8_idregs, v8_user_idregs);
65
@@ -XXX,XX +XXX,XX @@ void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
66
ARMCPRegInfo *r;
67
68
for (m = mods; m->name; m++) {
69
+ GPatternSpec *pat = NULL;
70
+ if (m->is_glob) {
71
+ pat = g_pattern_spec_new(m->name);
72
+ }
73
for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
74
- if (strcmp(r->name, m->name) == 0) {
75
+ if (pat && g_pattern_match_string(pat, r->name)) {
76
+ r->type = ARM_CP_CONST;
77
+ r->access = PL0U_R;
78
+ r->resetvalue = 0;
79
+ /* continue */
80
+ } else if (strcmp(r->name, m->name) == 0) {
81
r->type = ARM_CP_CONST;
82
r->access = PL0U_R;
83
r->resetvalue &= m->exported_bits;
84
@@ -XXX,XX +XXX,XX @@ void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
85
break;
86
}
87
}
88
+ if (pat) {
89
+ g_pattern_spec_free(pat);
90
+ }
91
}
92
}
93
94
--
68
--
95
2.20.1
69
2.20.1
96
70
97
71
diff view generated by jsdifflib
1
HACR_EL2 is a register with IMPDEF behaviour, which allows
1
From: Damien Hedde <damien.hedde@greensocs.com>
2
implementation specific trapping to EL2. Implement it as RAZ/WI,
3
since QEMU's implementation has no extra traps. This also
4
matches what h/w implementations like Cortex-A53 and A57 do.
5
2
3
Replace the zynq_slcr registers enum and macros using the
4
hw/registerfields.h macros.
5
6
Signed-off-by: Damien Hedde <damien.hedde@greensocs.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20190729145654.14644-30-damien.hedde@greensocs.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190205181218.8995-1-peter.maydell@linaro.org
9
---
11
---
10
target/arm/helper.c | 6 ++++++
12
hw/misc/zynq_slcr.c | 450 ++++++++++++++++++++++----------------------
11
1 file changed, 6 insertions(+)
13
1 file changed, 225 insertions(+), 225 deletions(-)
12
14
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
17
--- a/hw/misc/zynq_slcr.c
16
+++ b/target/arm/helper.c
18
+++ b/hw/misc/zynq_slcr.c
17
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
19
@@ -XXX,XX +XXX,XX @@
18
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
20
#include "sysemu/sysemu.h"
19
.access = PL2_RW,
21
#include "qemu/log.h"
20
.type = ARM_CP_CONST, .resetvalue = 0 },
22
#include "qemu/module.h"
21
+ { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
23
+#include "hw/registerfields.h"
22
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
24
23
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
25
#ifndef ZYNQ_SLCR_ERR_DEBUG
24
{ .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
26
#define ZYNQ_SLCR_ERR_DEBUG 0
25
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
27
@@ -XXX,XX +XXX,XX @@
26
.access = PL2_RW,
28
#define XILINX_LOCK_KEY 0x767b
27
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
29
#define XILINX_UNLOCK_KEY 0xdf0d
28
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
30
29
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
31
-#define R_PSS_RST_CTRL_SOFT_RST 0x1
30
.writefn = hcr_writelow },
32
+REG32(SCL, 0x000)
31
+ { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
33
+REG32(LOCK, 0x004)
32
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
34
+REG32(UNLOCK, 0x008)
33
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
35
+REG32(LOCKSTA, 0x00c)
34
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
36
35
.type = ARM_CP_ALIAS,
37
-enum {
36
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
38
- SCL = 0x000 / 4,
39
- LOCK,
40
- UNLOCK,
41
- LOCKSTA,
42
+REG32(ARM_PLL_CTRL, 0x100)
43
+REG32(DDR_PLL_CTRL, 0x104)
44
+REG32(IO_PLL_CTRL, 0x108)
45
+REG32(PLL_STATUS, 0x10c)
46
+REG32(ARM_PLL_CFG, 0x110)
47
+REG32(DDR_PLL_CFG, 0x114)
48
+REG32(IO_PLL_CFG, 0x118)
49
50
- ARM_PLL_CTRL = 0x100 / 4,
51
- DDR_PLL_CTRL,
52
- IO_PLL_CTRL,
53
- PLL_STATUS,
54
- ARM_PLL_CFG,
55
- DDR_PLL_CFG,
56
- IO_PLL_CFG,
57
-
58
- ARM_CLK_CTRL = 0x120 / 4,
59
- DDR_CLK_CTRL,
60
- DCI_CLK_CTRL,
61
- APER_CLK_CTRL,
62
- USB0_CLK_CTRL,
63
- USB1_CLK_CTRL,
64
- GEM0_RCLK_CTRL,
65
- GEM1_RCLK_CTRL,
66
- GEM0_CLK_CTRL,
67
- GEM1_CLK_CTRL,
68
- SMC_CLK_CTRL,
69
- LQSPI_CLK_CTRL,
70
- SDIO_CLK_CTRL,
71
- UART_CLK_CTRL,
72
- SPI_CLK_CTRL,
73
- CAN_CLK_CTRL,
74
- CAN_MIOCLK_CTRL,
75
- DBG_CLK_CTRL,
76
- PCAP_CLK_CTRL,
77
- TOPSW_CLK_CTRL,
78
+REG32(ARM_CLK_CTRL, 0x120)
79
+REG32(DDR_CLK_CTRL, 0x124)
80
+REG32(DCI_CLK_CTRL, 0x128)
81
+REG32(APER_CLK_CTRL, 0x12c)
82
+REG32(USB0_CLK_CTRL, 0x130)
83
+REG32(USB1_CLK_CTRL, 0x134)
84
+REG32(GEM0_RCLK_CTRL, 0x138)
85
+REG32(GEM1_RCLK_CTRL, 0x13c)
86
+REG32(GEM0_CLK_CTRL, 0x140)
87
+REG32(GEM1_CLK_CTRL, 0x144)
88
+REG32(SMC_CLK_CTRL, 0x148)
89
+REG32(LQSPI_CLK_CTRL, 0x14c)
90
+REG32(SDIO_CLK_CTRL, 0x150)
91
+REG32(UART_CLK_CTRL, 0x154)
92
+REG32(SPI_CLK_CTRL, 0x158)
93
+REG32(CAN_CLK_CTRL, 0x15c)
94
+REG32(CAN_MIOCLK_CTRL, 0x160)
95
+REG32(DBG_CLK_CTRL, 0x164)
96
+REG32(PCAP_CLK_CTRL, 0x168)
97
+REG32(TOPSW_CLK_CTRL, 0x16c)
98
99
#define FPGA_CTRL_REGS(n, start) \
100
- FPGA ## n ## _CLK_CTRL = (start) / 4, \
101
- FPGA ## n ## _THR_CTRL, \
102
- FPGA ## n ## _THR_CNT, \
103
- FPGA ## n ## _THR_STA,
104
- FPGA_CTRL_REGS(0, 0x170)
105
- FPGA_CTRL_REGS(1, 0x180)
106
- FPGA_CTRL_REGS(2, 0x190)
107
- FPGA_CTRL_REGS(3, 0x1a0)
108
+ REG32(FPGA ## n ## _CLK_CTRL, (start)) \
109
+ REG32(FPGA ## n ## _THR_CTRL, (start) + 0x4)\
110
+ REG32(FPGA ## n ## _THR_CNT, (start) + 0x8)\
111
+ REG32(FPGA ## n ## _THR_STA, (start) + 0xc)
112
+FPGA_CTRL_REGS(0, 0x170)
113
+FPGA_CTRL_REGS(1, 0x180)
114
+FPGA_CTRL_REGS(2, 0x190)
115
+FPGA_CTRL_REGS(3, 0x1a0)
116
117
- BANDGAP_TRIP = 0x1b8 / 4,
118
- PLL_PREDIVISOR = 0x1c0 / 4,
119
- CLK_621_TRUE,
120
+REG32(BANDGAP_TRIP, 0x1b8)
121
+REG32(PLL_PREDIVISOR, 0x1c0)
122
+REG32(CLK_621_TRUE, 0x1c4)
123
124
- PSS_RST_CTRL = 0x200 / 4,
125
- DDR_RST_CTRL,
126
- TOPSW_RESET_CTRL,
127
- DMAC_RST_CTRL,
128
- USB_RST_CTRL,
129
- GEM_RST_CTRL,
130
- SDIO_RST_CTRL,
131
- SPI_RST_CTRL,
132
- CAN_RST_CTRL,
133
- I2C_RST_CTRL,
134
- UART_RST_CTRL,
135
- GPIO_RST_CTRL,
136
- LQSPI_RST_CTRL,
137
- SMC_RST_CTRL,
138
- OCM_RST_CTRL,
139
- FPGA_RST_CTRL = 0x240 / 4,
140
- A9_CPU_RST_CTRL,
141
+REG32(PSS_RST_CTRL, 0x200)
142
+ FIELD(PSS_RST_CTRL, SOFT_RST, 0, 1)
143
+REG32(DDR_RST_CTRL, 0x204)
144
+REG32(TOPSW_RESET_CTRL, 0x208)
145
+REG32(DMAC_RST_CTRL, 0x20c)
146
+REG32(USB_RST_CTRL, 0x210)
147
+REG32(GEM_RST_CTRL, 0x214)
148
+REG32(SDIO_RST_CTRL, 0x218)
149
+REG32(SPI_RST_CTRL, 0x21c)
150
+REG32(CAN_RST_CTRL, 0x220)
151
+REG32(I2C_RST_CTRL, 0x224)
152
+REG32(UART_RST_CTRL, 0x228)
153
+REG32(GPIO_RST_CTRL, 0x22c)
154
+REG32(LQSPI_RST_CTRL, 0x230)
155
+REG32(SMC_RST_CTRL, 0x234)
156
+REG32(OCM_RST_CTRL, 0x238)
157
+REG32(FPGA_RST_CTRL, 0x240)
158
+REG32(A9_CPU_RST_CTRL, 0x244)
159
160
- RS_AWDT_CTRL = 0x24c / 4,
161
- RST_REASON,
162
+REG32(RS_AWDT_CTRL, 0x24c)
163
+REG32(RST_REASON, 0x250)
164
165
- REBOOT_STATUS = 0x258 / 4,
166
- BOOT_MODE,
167
+REG32(REBOOT_STATUS, 0x258)
168
+REG32(BOOT_MODE, 0x25c)
169
170
- APU_CTRL = 0x300 / 4,
171
- WDT_CLK_SEL,
172
+REG32(APU_CTRL, 0x300)
173
+REG32(WDT_CLK_SEL, 0x304)
174
175
- TZ_DMA_NS = 0x440 / 4,
176
- TZ_DMA_IRQ_NS,
177
- TZ_DMA_PERIPH_NS,
178
+REG32(TZ_DMA_NS, 0x440)
179
+REG32(TZ_DMA_IRQ_NS, 0x444)
180
+REG32(TZ_DMA_PERIPH_NS, 0x448)
181
182
- PSS_IDCODE = 0x530 / 4,
183
+REG32(PSS_IDCODE, 0x530)
184
185
- DDR_URGENT = 0x600 / 4,
186
- DDR_CAL_START = 0x60c / 4,
187
- DDR_REF_START = 0x614 / 4,
188
- DDR_CMD_STA,
189
- DDR_URGENT_SEL,
190
- DDR_DFI_STATUS,
191
+REG32(DDR_URGENT, 0x600)
192
+REG32(DDR_CAL_START, 0x60c)
193
+REG32(DDR_REF_START, 0x614)
194
+REG32(DDR_CMD_STA, 0x618)
195
+REG32(DDR_URGENT_SEL, 0x61c)
196
+REG32(DDR_DFI_STATUS, 0x620)
197
198
- MIO = 0x700 / 4,
199
+REG32(MIO, 0x700)
200
#define MIO_LENGTH 54
201
202
- MIO_LOOPBACK = 0x804 / 4,
203
- MIO_MST_TRI0,
204
- MIO_MST_TRI1,
205
+REG32(MIO_LOOPBACK, 0x804)
206
+REG32(MIO_MST_TRI0, 0x808)
207
+REG32(MIO_MST_TRI1, 0x80c)
208
209
- SD0_WP_CD_SEL = 0x830 / 4,
210
- SD1_WP_CD_SEL,
211
+REG32(SD0_WP_CD_SEL, 0x830)
212
+REG32(SD1_WP_CD_SEL, 0x834)
213
214
- LVL_SHFTR_EN = 0x900 / 4,
215
- OCM_CFG = 0x910 / 4,
216
+REG32(LVL_SHFTR_EN, 0x900)
217
+REG32(OCM_CFG, 0x910)
218
219
- CPU_RAM = 0xa00 / 4,
220
+REG32(CPU_RAM, 0xa00)
221
222
- IOU = 0xa30 / 4,
223
+REG32(IOU, 0xa30)
224
225
- DMAC_RAM = 0xa50 / 4,
226
+REG32(DMAC_RAM, 0xa50)
227
228
- AFI0 = 0xa60 / 4,
229
- AFI1 = AFI0 + 3,
230
- AFI2 = AFI1 + 3,
231
- AFI3 = AFI2 + 3,
232
+REG32(AFI0, 0xa60)
233
+REG32(AFI1, 0xa6c)
234
+REG32(AFI2, 0xa78)
235
+REG32(AFI3, 0xa84)
236
#define AFI_LENGTH 3
237
238
- OCM = 0xa90 / 4,
239
+REG32(OCM, 0xa90)
240
241
- DEVCI_RAM = 0xaa0 / 4,
242
+REG32(DEVCI_RAM, 0xaa0)
243
244
- CSG_RAM = 0xab0 / 4,
245
+REG32(CSG_RAM, 0xab0)
246
247
- GPIOB_CTRL = 0xb00 / 4,
248
- GPIOB_CFG_CMOS18,
249
- GPIOB_CFG_CMOS25,
250
- GPIOB_CFG_CMOS33,
251
- GPIOB_CFG_HSTL = 0xb14 / 4,
252
- GPIOB_DRVR_BIAS_CTRL,
253
+REG32(GPIOB_CTRL, 0xb00)
254
+REG32(GPIOB_CFG_CMOS18, 0xb04)
255
+REG32(GPIOB_CFG_CMOS25, 0xb08)
256
+REG32(GPIOB_CFG_CMOS33, 0xb0c)
257
+REG32(GPIOB_CFG_HSTL, 0xb14)
258
+REG32(GPIOB_DRVR_BIAS_CTRL, 0xb18)
259
260
- DDRIOB = 0xb40 / 4,
261
+REG32(DDRIOB, 0xb40)
262
#define DDRIOB_LENGTH 14
263
-};
264
265
#define ZYNQ_SLCR_MMIO_SIZE 0x1000
266
#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4)
267
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_reset(DeviceState *d)
268
269
DB_PRINT("RESET\n");
270
271
- s->regs[LOCKSTA] = 1;
272
+ s->regs[R_LOCKSTA] = 1;
273
/* 0x100 - 0x11C */
274
- s->regs[ARM_PLL_CTRL] = 0x0001A008;
275
- s->regs[DDR_PLL_CTRL] = 0x0001A008;
276
- s->regs[IO_PLL_CTRL] = 0x0001A008;
277
- s->regs[PLL_STATUS] = 0x0000003F;
278
- s->regs[ARM_PLL_CFG] = 0x00014000;
279
- s->regs[DDR_PLL_CFG] = 0x00014000;
280
- s->regs[IO_PLL_CFG] = 0x00014000;
281
+ s->regs[R_ARM_PLL_CTRL] = 0x0001A008;
282
+ s->regs[R_DDR_PLL_CTRL] = 0x0001A008;
283
+ s->regs[R_IO_PLL_CTRL] = 0x0001A008;
284
+ s->regs[R_PLL_STATUS] = 0x0000003F;
285
+ s->regs[R_ARM_PLL_CFG] = 0x00014000;
286
+ s->regs[R_DDR_PLL_CFG] = 0x00014000;
287
+ s->regs[R_IO_PLL_CFG] = 0x00014000;
288
289
/* 0x120 - 0x16C */
290
- s->regs[ARM_CLK_CTRL] = 0x1F000400;
291
- s->regs[DDR_CLK_CTRL] = 0x18400003;
292
- s->regs[DCI_CLK_CTRL] = 0x01E03201;
293
- s->regs[APER_CLK_CTRL] = 0x01FFCCCD;
294
- s->regs[USB0_CLK_CTRL] = s->regs[USB1_CLK_CTRL] = 0x00101941;
295
- s->regs[GEM0_RCLK_CTRL] = s->regs[GEM1_RCLK_CTRL] = 0x00000001;
296
- s->regs[GEM0_CLK_CTRL] = s->regs[GEM1_CLK_CTRL] = 0x00003C01;
297
- s->regs[SMC_CLK_CTRL] = 0x00003C01;
298
- s->regs[LQSPI_CLK_CTRL] = 0x00002821;
299
- s->regs[SDIO_CLK_CTRL] = 0x00001E03;
300
- s->regs[UART_CLK_CTRL] = 0x00003F03;
301
- s->regs[SPI_CLK_CTRL] = 0x00003F03;
302
- s->regs[CAN_CLK_CTRL] = 0x00501903;
303
- s->regs[DBG_CLK_CTRL] = 0x00000F03;
304
- s->regs[PCAP_CLK_CTRL] = 0x00000F01;
305
+ s->regs[R_ARM_CLK_CTRL] = 0x1F000400;
306
+ s->regs[R_DDR_CLK_CTRL] = 0x18400003;
307
+ s->regs[R_DCI_CLK_CTRL] = 0x01E03201;
308
+ s->regs[R_APER_CLK_CTRL] = 0x01FFCCCD;
309
+ s->regs[R_USB0_CLK_CTRL] = s->regs[R_USB1_CLK_CTRL] = 0x00101941;
310
+ s->regs[R_GEM0_RCLK_CTRL] = s->regs[R_GEM1_RCLK_CTRL] = 0x00000001;
311
+ s->regs[R_GEM0_CLK_CTRL] = s->regs[R_GEM1_CLK_CTRL] = 0x00003C01;
312
+ s->regs[R_SMC_CLK_CTRL] = 0x00003C01;
313
+ s->regs[R_LQSPI_CLK_CTRL] = 0x00002821;
314
+ s->regs[R_SDIO_CLK_CTRL] = 0x00001E03;
315
+ s->regs[R_UART_CLK_CTRL] = 0x00003F03;
316
+ s->regs[R_SPI_CLK_CTRL] = 0x00003F03;
317
+ s->regs[R_CAN_CLK_CTRL] = 0x00501903;
318
+ s->regs[R_DBG_CLK_CTRL] = 0x00000F03;
319
+ s->regs[R_PCAP_CLK_CTRL] = 0x00000F01;
320
321
/* 0x170 - 0x1AC */
322
- s->regs[FPGA0_CLK_CTRL] = s->regs[FPGA1_CLK_CTRL] = s->regs[FPGA2_CLK_CTRL]
323
- = s->regs[FPGA3_CLK_CTRL] = 0x00101800;
324
- s->regs[FPGA0_THR_STA] = s->regs[FPGA1_THR_STA] = s->regs[FPGA2_THR_STA]
325
- = s->regs[FPGA3_THR_STA] = 0x00010000;
326
+ s->regs[R_FPGA0_CLK_CTRL] = s->regs[R_FPGA1_CLK_CTRL]
327
+ = s->regs[R_FPGA2_CLK_CTRL]
328
+ = s->regs[R_FPGA3_CLK_CTRL] = 0x00101800;
329
+ s->regs[R_FPGA0_THR_STA] = s->regs[R_FPGA1_THR_STA]
330
+ = s->regs[R_FPGA2_THR_STA]
331
+ = s->regs[R_FPGA3_THR_STA] = 0x00010000;
332
333
/* 0x1B0 - 0x1D8 */
334
- s->regs[BANDGAP_TRIP] = 0x0000001F;
335
- s->regs[PLL_PREDIVISOR] = 0x00000001;
336
- s->regs[CLK_621_TRUE] = 0x00000001;
337
+ s->regs[R_BANDGAP_TRIP] = 0x0000001F;
338
+ s->regs[R_PLL_PREDIVISOR] = 0x00000001;
339
+ s->regs[R_CLK_621_TRUE] = 0x00000001;
340
341
/* 0x200 - 0x25C */
342
- s->regs[FPGA_RST_CTRL] = 0x01F33F0F;
343
- s->regs[RST_REASON] = 0x00000040;
344
+ s->regs[R_FPGA_RST_CTRL] = 0x01F33F0F;
345
+ s->regs[R_RST_REASON] = 0x00000040;
346
347
- s->regs[BOOT_MODE] = 0x00000001;
348
+ s->regs[R_BOOT_MODE] = 0x00000001;
349
350
/* 0x700 - 0x7D4 */
351
for (i = 0; i < 54; i++) {
352
- s->regs[MIO + i] = 0x00001601;
353
+ s->regs[R_MIO + i] = 0x00001601;
354
}
355
for (i = 2; i <= 8; i++) {
356
- s->regs[MIO + i] = 0x00000601;
357
+ s->regs[R_MIO + i] = 0x00000601;
358
}
359
360
- s->regs[MIO_MST_TRI0] = s->regs[MIO_MST_TRI1] = 0xFFFFFFFF;
361
+ s->regs[R_MIO_MST_TRI0] = s->regs[R_MIO_MST_TRI1] = 0xFFFFFFFF;
362
363
- s->regs[CPU_RAM + 0] = s->regs[CPU_RAM + 1] = s->regs[CPU_RAM + 3]
364
- = s->regs[CPU_RAM + 4] = s->regs[CPU_RAM + 7]
365
- = 0x00010101;
366
- s->regs[CPU_RAM + 2] = s->regs[CPU_RAM + 5] = 0x01010101;
367
- s->regs[CPU_RAM + 6] = 0x00000001;
368
+ s->regs[R_CPU_RAM + 0] = s->regs[R_CPU_RAM + 1] = s->regs[R_CPU_RAM + 3]
369
+ = s->regs[R_CPU_RAM + 4] = s->regs[R_CPU_RAM + 7]
370
+ = 0x00010101;
371
+ s->regs[R_CPU_RAM + 2] = s->regs[R_CPU_RAM + 5] = 0x01010101;
372
+ s->regs[R_CPU_RAM + 6] = 0x00000001;
373
374
- s->regs[IOU + 0] = s->regs[IOU + 1] = s->regs[IOU + 2] = s->regs[IOU + 3]
375
- = 0x09090909;
376
- s->regs[IOU + 4] = s->regs[IOU + 5] = 0x00090909;
377
- s->regs[IOU + 6] = 0x00000909;
378
+ s->regs[R_IOU + 0] = s->regs[R_IOU + 1] = s->regs[R_IOU + 2]
379
+ = s->regs[R_IOU + 3] = 0x09090909;
380
+ s->regs[R_IOU + 4] = s->regs[R_IOU + 5] = 0x00090909;
381
+ s->regs[R_IOU + 6] = 0x00000909;
382
383
- s->regs[DMAC_RAM] = 0x00000009;
384
+ s->regs[R_DMAC_RAM] = 0x00000009;
385
386
- s->regs[AFI0 + 0] = s->regs[AFI0 + 1] = 0x09090909;
387
- s->regs[AFI1 + 0] = s->regs[AFI1 + 1] = 0x09090909;
388
- s->regs[AFI2 + 0] = s->regs[AFI2 + 1] = 0x09090909;
389
- s->regs[AFI3 + 0] = s->regs[AFI3 + 1] = 0x09090909;
390
- s->regs[AFI0 + 2] = s->regs[AFI1 + 2] = s->regs[AFI2 + 2]
391
- = s->regs[AFI3 + 2] = 0x00000909;
392
+ s->regs[R_AFI0 + 0] = s->regs[R_AFI0 + 1] = 0x09090909;
393
+ s->regs[R_AFI1 + 0] = s->regs[R_AFI1 + 1] = 0x09090909;
394
+ s->regs[R_AFI2 + 0] = s->regs[R_AFI2 + 1] = 0x09090909;
395
+ s->regs[R_AFI3 + 0] = s->regs[R_AFI3 + 1] = 0x09090909;
396
+ s->regs[R_AFI0 + 2] = s->regs[R_AFI1 + 2] = s->regs[R_AFI2 + 2]
397
+ = s->regs[R_AFI3 + 2] = 0x00000909;
398
399
- s->regs[OCM + 0] = 0x01010101;
400
- s->regs[OCM + 1] = s->regs[OCM + 2] = 0x09090909;
401
+ s->regs[R_OCM + 0] = 0x01010101;
402
+ s->regs[R_OCM + 1] = s->regs[R_OCM + 2] = 0x09090909;
403
404
- s->regs[DEVCI_RAM] = 0x00000909;
405
- s->regs[CSG_RAM] = 0x00000001;
406
+ s->regs[R_DEVCI_RAM] = 0x00000909;
407
+ s->regs[R_CSG_RAM] = 0x00000001;
408
409
- s->regs[DDRIOB + 0] = s->regs[DDRIOB + 1] = s->regs[DDRIOB + 2]
410
- = s->regs[DDRIOB + 3] = 0x00000e00;
411
- s->regs[DDRIOB + 4] = s->regs[DDRIOB + 5] = s->regs[DDRIOB + 6]
412
- = 0x00000e00;
413
- s->regs[DDRIOB + 12] = 0x00000021;
414
+ s->regs[R_DDRIOB + 0] = s->regs[R_DDRIOB + 1] = s->regs[R_DDRIOB + 2]
415
+ = s->regs[R_DDRIOB + 3] = 0x00000e00;
416
+ s->regs[R_DDRIOB + 4] = s->regs[R_DDRIOB + 5] = s->regs[R_DDRIOB + 6]
417
+ = 0x00000e00;
418
+ s->regs[R_DDRIOB + 12] = 0x00000021;
419
}
420
421
422
static bool zynq_slcr_check_offset(hwaddr offset, bool rnw)
423
{
424
switch (offset) {
425
- case LOCK:
426
- case UNLOCK:
427
- case DDR_CAL_START:
428
- case DDR_REF_START:
429
+ case R_LOCK:
430
+ case R_UNLOCK:
431
+ case R_DDR_CAL_START:
432
+ case R_DDR_REF_START:
433
return !rnw; /* Write only */
434
- case LOCKSTA:
435
- case FPGA0_THR_STA:
436
- case FPGA1_THR_STA:
437
- case FPGA2_THR_STA:
438
- case FPGA3_THR_STA:
439
- case BOOT_MODE:
440
- case PSS_IDCODE:
441
- case DDR_CMD_STA:
442
- case DDR_DFI_STATUS:
443
- case PLL_STATUS:
444
+ case R_LOCKSTA:
445
+ case R_FPGA0_THR_STA:
446
+ case R_FPGA1_THR_STA:
447
+ case R_FPGA2_THR_STA:
448
+ case R_FPGA3_THR_STA:
449
+ case R_BOOT_MODE:
450
+ case R_PSS_IDCODE:
451
+ case R_DDR_CMD_STA:
452
+ case R_DDR_DFI_STATUS:
453
+ case R_PLL_STATUS:
454
return rnw;/* read only */
455
- case SCL:
456
- case ARM_PLL_CTRL ... IO_PLL_CTRL:
457
- case ARM_PLL_CFG ... IO_PLL_CFG:
458
- case ARM_CLK_CTRL ... TOPSW_CLK_CTRL:
459
- case FPGA0_CLK_CTRL ... FPGA0_THR_CNT:
460
- case FPGA1_CLK_CTRL ... FPGA1_THR_CNT:
461
- case FPGA2_CLK_CTRL ... FPGA2_THR_CNT:
462
- case FPGA3_CLK_CTRL ... FPGA3_THR_CNT:
463
- case BANDGAP_TRIP:
464
- case PLL_PREDIVISOR:
465
- case CLK_621_TRUE:
466
- case PSS_RST_CTRL ... A9_CPU_RST_CTRL:
467
- case RS_AWDT_CTRL:
468
- case RST_REASON:
469
- case REBOOT_STATUS:
470
- case APU_CTRL:
471
- case WDT_CLK_SEL:
472
- case TZ_DMA_NS ... TZ_DMA_PERIPH_NS:
473
- case DDR_URGENT:
474
- case DDR_URGENT_SEL:
475
- case MIO ... MIO + MIO_LENGTH - 1:
476
- case MIO_LOOPBACK ... MIO_MST_TRI1:
477
- case SD0_WP_CD_SEL:
478
- case SD1_WP_CD_SEL:
479
- case LVL_SHFTR_EN:
480
- case OCM_CFG:
481
- case CPU_RAM:
482
- case IOU:
483
- case DMAC_RAM:
484
- case AFI0 ... AFI3 + AFI_LENGTH - 1:
485
- case OCM:
486
- case DEVCI_RAM:
487
- case CSG_RAM:
488
- case GPIOB_CTRL ... GPIOB_CFG_CMOS33:
489
- case GPIOB_CFG_HSTL:
490
- case GPIOB_DRVR_BIAS_CTRL:
491
- case DDRIOB ... DDRIOB + DDRIOB_LENGTH - 1:
492
+ case R_SCL:
493
+ case R_ARM_PLL_CTRL ... R_IO_PLL_CTRL:
494
+ case R_ARM_PLL_CFG ... R_IO_PLL_CFG:
495
+ case R_ARM_CLK_CTRL ... R_TOPSW_CLK_CTRL:
496
+ case R_FPGA0_CLK_CTRL ... R_FPGA0_THR_CNT:
497
+ case R_FPGA1_CLK_CTRL ... R_FPGA1_THR_CNT:
498
+ case R_FPGA2_CLK_CTRL ... R_FPGA2_THR_CNT:
499
+ case R_FPGA3_CLK_CTRL ... R_FPGA3_THR_CNT:
500
+ case R_BANDGAP_TRIP:
501
+ case R_PLL_PREDIVISOR:
502
+ case R_CLK_621_TRUE:
503
+ case R_PSS_RST_CTRL ... R_A9_CPU_RST_CTRL:
504
+ case R_RS_AWDT_CTRL:
505
+ case R_RST_REASON:
506
+ case R_REBOOT_STATUS:
507
+ case R_APU_CTRL:
508
+ case R_WDT_CLK_SEL:
509
+ case R_TZ_DMA_NS ... R_TZ_DMA_PERIPH_NS:
510
+ case R_DDR_URGENT:
511
+ case R_DDR_URGENT_SEL:
512
+ case R_MIO ... R_MIO + MIO_LENGTH - 1:
513
+ case R_MIO_LOOPBACK ... R_MIO_MST_TRI1:
514
+ case R_SD0_WP_CD_SEL:
515
+ case R_SD1_WP_CD_SEL:
516
+ case R_LVL_SHFTR_EN:
517
+ case R_OCM_CFG:
518
+ case R_CPU_RAM:
519
+ case R_IOU:
520
+ case R_DMAC_RAM:
521
+ case R_AFI0 ... R_AFI3 + AFI_LENGTH - 1:
522
+ case R_OCM:
523
+ case R_DEVCI_RAM:
524
+ case R_CSG_RAM:
525
+ case R_GPIOB_CTRL ... R_GPIOB_CFG_CMOS33:
526
+ case R_GPIOB_CFG_HSTL:
527
+ case R_GPIOB_DRVR_BIAS_CTRL:
528
+ case R_DDRIOB ... R_DDRIOB + DDRIOB_LENGTH - 1:
529
return true;
530
default:
531
return false;
532
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
533
}
534
535
switch (offset) {
536
- case SCL:
537
- s->regs[SCL] = val & 0x1;
538
+ case R_SCL:
539
+ s->regs[R_SCL] = val & 0x1;
540
return;
541
- case LOCK:
542
+ case R_LOCK:
543
if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
544
DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
545
(unsigned)val & 0xFFFF);
546
- s->regs[LOCKSTA] = 1;
547
+ s->regs[R_LOCKSTA] = 1;
548
} else {
549
DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
550
(int)offset, (unsigned)val & 0xFFFF);
551
}
552
return;
553
- case UNLOCK:
554
+ case R_UNLOCK:
555
if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
556
DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
557
(unsigned)val & 0xFFFF);
558
- s->regs[LOCKSTA] = 0;
559
+ s->regs[R_LOCKSTA] = 0;
560
} else {
561
DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
562
(int)offset, (unsigned)val & 0xFFFF);
563
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
564
return;
565
}
566
567
- if (s->regs[LOCKSTA]) {
568
+ if (s->regs[R_LOCKSTA]) {
569
qemu_log_mask(LOG_GUEST_ERROR,
570
"SCLR registers are locked. Unlock them first\n");
571
return;
572
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
573
s->regs[offset] = val;
574
575
switch (offset) {
576
- case PSS_RST_CTRL:
577
- if (val & R_PSS_RST_CTRL_SOFT_RST) {
578
+ case R_PSS_RST_CTRL:
579
+ if (FIELD_EX32(val, PSS_RST_CTRL, SOFT_RST)) {
580
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
581
}
582
break;
37
--
583
--
38
2.20.1
584
2.20.1
39
585
40
586
diff view generated by jsdifflib
New patch
1
From: Aaron Hill <aa1ronham@gmail.com>
1
2
3
This commit properly sets the ENET_BD_BDU flag once the emulated FEC controller
4
has finished processing the last descriptor. This is done for both transmit
5
and receive descriptors.
6
7
This allows the QNX 7.0.0 BSP for the Sabrelite board (which can be
8
found at http://blackberry.qnx.com/en/developers/bsp) to properly
9
control the FEC. Without this patch, the BSP ethernet driver will never
10
re-use FEC descriptors, as the unset ENET_BD_BDU flag will cause
11
it to believe that the descriptors are still in use by the NIC.
12
13
Note that Linux does not appear to use this field at all, and is
14
unaffected by this patch.
15
16
Without this patch, QNX will think that the NIC is still processing its
17
transaction descriptors, and won't send any more data over the network.
18
19
For reference:
20
21
On page 1192 of the I.MX 6DQ reference manual revision (Rev. 5, 06/2018),
22
which can be found at https://www.nxp.com/products/processors-and-microcontrollers/arm-based-processors-and-mcus/i.mx-applications-processors/i.mx-6-processors/i.mx-6quad-processors-high-performance-3d-graphics-hd-video-arm-cortex-a9-core:i.MX6Q?&tab=Documentation_Tab&linkline=Application-Note
23
24
the 'BDU' field is described as follows for the 'Enhanced transmit
25
buffer descriptor':
26
27
'Last buffer descriptor update done. Indicates that the last BD data has been updated by
28
uDMA. This field is written by the user (=0) and uDMA (=1).'
29
30
The same description is used for the receive buffer descriptor.
31
32
Signed-off-by: Aaron Hill <aa1ronham@gmail.com>
33
Message-id: 20190805142417.10433-1-aaron.hill@alertinnovation.com
34
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
35
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
36
---
37
hw/net/imx_fec.c | 4 ++++
38
1 file changed, 4 insertions(+)
39
40
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/net/imx_fec.c
43
+++ b/hw/net/imx_fec.c
44
@@ -XXX,XX +XXX,XX @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
45
if (bd.option & ENET_BD_TX_INT) {
46
s->regs[ENET_EIR] |= int_txf;
47
}
48
+ /* Indicate that we've updated the last buffer descriptor. */
49
+ bd.last_buffer = ENET_BD_BDU;
50
}
51
if (bd.option & ENET_BD_TX_INT) {
52
s->regs[ENET_EIR] |= int_txb;
53
@@ -XXX,XX +XXX,XX @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
54
/* Last buffer in frame. */
55
bd.flags |= flags | ENET_BD_L;
56
FEC_PRINTF("rx frame flags %04x\n", bd.flags);
57
+ /* Indicate that we've updated the last buffer descriptor. */
58
+ bd.last_buffer = ENET_BD_BDU;
59
if (bd.option & ENET_BD_RX_INT) {
60
s->regs[ENET_EIR] |= ENET_INT_RXF;
61
}
62
--
63
2.20.1
64
65
diff view generated by jsdifflib
1
In commit 91c1e9fcbd7548db368 where we added dual-CPU support to
1
Factor out code to 'generate a singlestep exception', which is
2
the ARMSSE, we set up the wiring of the expansion IRQs via nested
2
currently repeated in four places.
3
loops: the outer loop on 'i' loops for each CPU, and the inner loop
4
on 'j' loops for each interrupt. Fix a typo which meant we were
5
wiring every expansion IRQ line to external IRQ 0 on CPU 0 and
6
to external IRQ 1 on CPU 1.
7
3
8
Fixes: 91c1e9fcbd7548db368 ("hw/arm/armsse: Support dual-CPU configuration")
4
To do this we need to also pull the identical copies of the
5
gen-exception() function out of translate-a64.c and translate.c
6
into translate.h.
7
8
(There is a bug in the code: we're taking the exception to the wrong
9
target EL. This will be simpler to fix if there's only one place to
10
do it.)
11
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Message-id: 20190805130952.4415-2-peter.maydell@linaro.org
11
---
16
---
12
hw/arm/armsse.c | 2 +-
17
target/arm/translate.h | 23 +++++++++++++++++++++++
13
1 file changed, 1 insertion(+), 1 deletion(-)
18
target/arm/translate-a64.c | 19 ++-----------------
19
target/arm/translate.c | 20 ++------------------
20
3 files changed, 27 insertions(+), 35 deletions(-)
14
21
15
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/armsse.c
24
--- a/target/arm/translate.h
18
+++ b/hw/arm/armsse.c
25
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
26
@@ -XXX,XX +XXX,XX @@
20
/* Connect EXP_IRQ/EXP_CPUn_IRQ GPIOs to the NVIC's lines 32 and up */
27
#define TARGET_ARM_TRANSLATE_H
21
s->exp_irqs[i] = g_new(qemu_irq, s->exp_numirq);
28
22
for (j = 0; j < s->exp_numirq; j++) {
29
#include "exec/translator.h"
23
- s->exp_irqs[i][j] = qdev_get_gpio_in(cpudev, i + 32);
30
+#include "internals.h"
24
+ s->exp_irqs[i][j] = qdev_get_gpio_in(cpudev, j + 32);
31
25
}
32
26
if (i == 0) {
33
/* internal defines */
27
gpioname = g_strdup("EXP_IRQ");
34
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
35
}
36
}
37
38
+static inline void gen_exception(int excp, uint32_t syndrome,
39
+ uint32_t target_el)
40
+{
41
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
42
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
43
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
44
+
45
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
46
+ tcg_syn, tcg_el);
47
+
48
+ tcg_temp_free_i32(tcg_el);
49
+ tcg_temp_free_i32(tcg_syn);
50
+ tcg_temp_free_i32(tcg_excp);
51
+}
52
+
53
+/* Generate an architectural singlestep exception */
54
+static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
55
+{
56
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
57
+ default_exception_el(s));
58
+}
59
+
60
/*
61
* Given a VFP floating point constant encoded into an 8 bit immediate in an
62
* instruction, expand it to the actual constant value of the specified
63
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-a64.c
66
+++ b/target/arm/translate-a64.c
67
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
68
tcg_temp_free_i32(tcg_excp);
69
}
70
71
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
72
-{
73
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
74
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
75
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
76
-
77
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
78
- tcg_syn, tcg_el);
79
- tcg_temp_free_i32(tcg_el);
80
- tcg_temp_free_i32(tcg_syn);
81
- tcg_temp_free_i32(tcg_excp);
82
-}
83
-
84
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
85
{
86
gen_a64_set_pc_im(s->pc - offset);
87
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
88
* of the exception, and our syndrome information is always correct.
89
*/
90
gen_ss_advance(s);
91
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
92
- default_exception_el(s));
93
+ gen_swstep_exception(s, 1, s->is_ldex);
94
s->base.is_jmp = DISAS_NORETURN;
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
98
* bits should be zero.
99
*/
100
assert(dc->base.num_insns == 1);
101
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
102
- default_exception_el(dc));
103
+ gen_swstep_exception(dc, 0, 0);
104
dc->base.is_jmp = DISAS_NORETURN;
105
} else {
106
disas_a64_insn(env, dc);
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
112
tcg_temp_free_i32(tcg_excp);
113
}
114
115
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
116
-{
117
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
118
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
119
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
120
-
121
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
122
- tcg_syn, tcg_el);
123
-
124
- tcg_temp_free_i32(tcg_el);
125
- tcg_temp_free_i32(tcg_syn);
126
- tcg_temp_free_i32(tcg_excp);
127
-}
128
-
129
static void gen_step_complete_exception(DisasContext *s)
130
{
131
/* We just completed step of an insn. Move from Active-not-pending
132
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
133
* of the exception, and our syndrome information is always correct.
134
*/
135
gen_ss_advance(s);
136
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
137
- default_exception_el(s));
138
+ gen_swstep_exception(s, 1, s->is_ldex);
139
s->base.is_jmp = DISAS_NORETURN;
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
143
* bits should be zero.
144
*/
145
assert(dc->base.num_insns == 1);
146
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
147
- default_exception_el(dc));
148
+ gen_swstep_exception(dc, 0, 0);
149
dc->base.is_jmp = DISAS_NORETURN;
150
return true;
151
}
28
--
152
--
29
2.20.1
153
2.20.1
30
154
31
155
diff view generated by jsdifflib
1
At the moment the Arm implementations of kvm_arch_{get,put}_registers()
1
When generating an architectural single-step exception we were
2
don't support having QEMU change the values of system registers
2
routing it to the "default exception level", which is to say
3
(aka coprocessor registers for AArch32). This is because although
3
the same exception level we execute at except that EL0 exceptions
4
kvm_arch_get_registers() calls write_list_to_cpustate() to
4
go to EL1. This is incorrect because the debug exception level
5
update the CPU state struct fields (so QEMU code can read the
5
can be configured by the guest for situations such as single
6
values in the usual way), kvm_arch_put_registers() does not
6
stepping of EL0 and EL1 code by EL2.
7
call write_cpustate_to_list(), meaning that any changes to
8
the CPU state struct fields will not be passed back to KVM.
9
7
10
The rationale for this design is documented in a comment in the
8
We have to track the target debug exception level in the TB
11
AArch32 kvm_arch_put_registers() -- writing the values in the
9
flags, because it is dependent on CPU state like HCR_EL2.TGE
12
cpregs list into the CPU state struct is "lossy" because the
10
and MDCR_EL2.TDE. (That we were previously calling the
13
write of a register might not succeed, and so if we blindly
11
arm_debug_target_el() function to determine dc->ss_same_el
14
copy the CPU state values back again we will incorrectly
12
is itself a bug, though one that would only have manifested
15
change register values for the guest. The assumption was that
13
as incorrect syndrome information.) Since we are out of TB
16
no QEMU code would need to write to the registers.
14
flag bits unless we want to expand into the cs_base field,
15
we share some bits with the M-profile only HANDLER and
16
STACKCHECK bits, since only A-profile has this singlestep.
17
17
18
However, when we implemented debug support for KVM guests, we
18
Fixes: https://bugs.launchpad.net/qemu/+bug/1838913
19
broke that assumption: the code to handle "set the guest up
20
to take a breakpoint exception" does so by updating various
21
guest registers including ESR_EL1.
22
23
Support this by making kvm_arch_put_registers() synchronize
24
CPU state back into the list. We sync only those registers
25
where the initial write succeeds, which should be sufficient.
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
20
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
29
Tested-by: Alex Bennée <alex.bennee@linaro.org>
21
Tested-by: Alex Bennée <alex.bennee@linaro.org>
30
Tested-by: Dongjiu Geng <gengdongjiu@huawei.com>
22
Message-id: 20190805130952.4415-3-peter.maydell@linaro.org
31
---
23
---
32
target/arm/cpu.h | 9 ++++++++-
24
target/arm/cpu.h | 5 +++++
33
target/arm/helper.c | 27 +++++++++++++++++++++++++--
25
target/arm/translate.h | 15 +++++++++++----
34
target/arm/kvm32.c | 20 ++------------------
26
target/arm/helper.c | 6 ++++++
35
target/arm/kvm64.c | 2 ++
27
target/arm/translate-a64.c | 2 +-
36
target/arm/machine.c | 2 +-
28
target/arm/translate.c | 4 +++-
37
5 files changed, 38 insertions(+), 22 deletions(-)
29
5 files changed, 26 insertions(+), 6 deletions(-)
38
30
39
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
40
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/cpu.h
33
--- a/target/arm/cpu.h
42
+++ b/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
43
@@ -XXX,XX +XXX,XX @@ bool write_list_to_cpustate(ARMCPU *cpu);
35
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
44
/**
36
/* Target EL if we take a floating-point-disabled exception */
45
* write_cpustate_to_list:
37
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
46
* @cpu: ARMCPU
38
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
47
+ * @kvm_sync: true if this is for syncing back to KVM
39
+/*
48
*
40
+ * For A-profile only, target EL for debug exceptions.
49
* For each register listed in the ARMCPU cpreg_indexes list, write
41
+ * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
50
* its value from the ARMCPUState structure into the cpreg_values list.
42
+ */
51
* This is used to copy info from TCG's working data structures into
43
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
52
* KVM or for outbound migration.
44
53
*
45
/* Bit usage when in AArch32 state: */
54
+ * @kvm_sync is true if we are doing this in order to sync the
46
FIELD(TBFLAG_A32, THUMB, 0, 1)
55
+ * register state back to KVM. In this case we will only update
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
56
+ * values in the list if the previous list->cpustate sync actually
48
index XXXXXXX..XXXXXXX 100644
57
+ * successfully wrote the CPU state. Otherwise we will keep the value
49
--- a/target/arm/translate.h
58
+ * that is in the list.
50
+++ b/target/arm/translate.h
59
+ *
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
60
* Returns: true if all register values were read correctly,
52
uint32_t svc_imm;
61
* false if some register was unknown or could not be read.
53
int aarch64;
62
* Note that we do not stop early on failure -- we will attempt
54
int current_el;
63
* reading all registers in the list.
55
+ /* Debug target exception level for single-step exceptions */
64
*/
56
+ int debug_target_el;
65
-bool write_cpustate_to_list(ARMCPU *cpu);
57
GHashTable *cp_regs;
66
+bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
58
uint64_t features; /* CPU features bits */
67
59
/* Because unallocated encodings generate different exception syndrome
68
#define ARM_CPUID_TI915T 0x54029152
60
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
69
#define ARM_CPUID_TI925T 0x54029252
61
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
62
*/
63
bool is_ldex;
64
- /* True if a single-step exception will be taken to the current EL */
65
- bool ss_same_el;
66
/* True if v8.3-PAuth is active. */
67
bool pauth_active;
68
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
69
@@ -XXX,XX +XXX,XX @@ static inline void gen_exception(int excp, uint32_t syndrome,
70
/* Generate an architectural singlestep exception */
71
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
72
{
73
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
74
- default_exception_el(s));
75
+ bool same_el = (s->debug_target_el == s->current_el);
76
+
77
+ /*
78
+ * If singlestep is targeting a lower EL than the current one,
79
+ * then s->ss_active must be false and we can never get here.
80
+ */
81
+ assert(s->debug_target_el >= s->current_el);
82
+
83
+ gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
84
}
85
86
/*
70
diff --git a/target/arm/helper.c b/target/arm/helper.c
87
diff --git a/target/arm/helper.c b/target/arm/helper.c
71
index XXXXXXX..XXXXXXX 100644
88
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/helper.c
89
--- a/target/arm/helper.c
73
+++ b/target/arm/helper.c
90
+++ b/target/arm/helper.c
74
@@ -XXX,XX +XXX,XX @@ static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
91
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
75
return true;
92
}
93
}
94
95
+ if (!arm_feature(env, ARM_FEATURE_M)) {
96
+ int target_el = arm_debug_target_el(env);
97
+
98
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
99
+ }
100
+
101
*pflags = flags;
102
*cs_base = 0;
76
}
103
}
77
104
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
78
-bool write_cpustate_to_list(ARMCPU *cpu)
79
+bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
80
{
81
/* Write the coprocessor state from cpu->env to the (index,value) list. */
82
int i;
83
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu)
84
for (i = 0; i < cpu->cpreg_array_len; i++) {
85
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
86
const ARMCPRegInfo *ri;
87
+ uint64_t newval;
88
89
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
90
if (!ri) {
91
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu)
92
if (ri->type & ARM_CP_NO_RAW) {
93
continue;
94
}
95
- cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
96
+
97
+ newval = read_raw_cp_reg(&cpu->env, ri);
98
+ if (kvm_sync) {
99
+ /*
100
+ * Only sync if the previous list->cpustate sync succeeded.
101
+ * Rather than tracking the success/failure state for every
102
+ * item in the list, we just recheck "does the raw write we must
103
+ * have made in write_list_to_cpustate() read back OK" here.
104
+ */
105
+ uint64_t oldval = cpu->cpreg_values[i];
106
+
107
+ if (oldval == newval) {
108
+ continue;
109
+ }
110
+
111
+ write_raw_cp_reg(&cpu->env, ri, oldval);
112
+ if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
113
+ continue;
114
+ }
115
+
116
+ write_raw_cp_reg(&cpu->env, ri, newval);
117
+ }
118
+ cpu->cpreg_values[i] = newval;
119
}
120
return ok;
121
}
122
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
123
index XXXXXXX..XXXXXXX 100644
105
index XXXXXXX..XXXXXXX 100644
124
--- a/target/arm/kvm32.c
106
--- a/target/arm/translate-a64.c
125
+++ b/target/arm/kvm32.c
107
+++ b/target/arm/translate-a64.c
126
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
108
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
127
return ret;
109
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
128
}
110
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
129
111
dc->is_ldex = false;
130
- /* Note that we do not call write_cpustate_to_list()
112
- dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
131
- * here, so we are only writing the tuple list back to
113
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
132
- * KVM. This is safe because nothing can change the
114
133
- * CPUARMState cp15 fields (in particular gdb accesses cannot)
115
/* Bound the number of insns to execute to those left on the page. */
134
- * and so there are no changes to sync. In fact syncing would
116
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
135
- * be wrong at this point: for a constant register where TCG and
117
diff --git a/target/arm/translate.c b/target/arm/translate.c
136
- * KVM disagree about its value, the preceding write_list_to_cpustate()
137
- * would not have had any effect on the CPUARMState value (since the
138
- * register is read-only), and a write_cpustate_to_list() here would
139
- * then try to write the TCG value back into KVM -- this would either
140
- * fail or incorrectly change the value the guest sees.
141
- *
142
- * If we ever want to allow the user to modify cp15 registers via
143
- * the gdb stub, we would need to be more clever here (for instance
144
- * tracking the set of registers kvm_arch_get_registers() successfully
145
- * managed to update the CPUARMState with, and only allowing those
146
- * to be written back up into the kernel).
147
- */
148
+ write_cpustate_to_list(cpu, true);
149
+
150
if (!write_list_to_kvmstate(cpu, level)) {
151
return EINVAL;
152
}
153
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
154
index XXXXXXX..XXXXXXX 100644
118
index XXXXXXX..XXXXXXX 100644
155
--- a/target/arm/kvm64.c
119
--- a/target/arm/translate.c
156
+++ b/target/arm/kvm64.c
120
+++ b/target/arm/translate.c
157
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
121
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
158
return ret;
122
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
159
}
123
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
160
124
dc->is_ldex = false;
161
+ write_cpustate_to_list(cpu, true);
125
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
162
+
126
+ if (!arm_feature(env, ARM_FEATURE_M)) {
163
if (!write_list_to_kvmstate(cpu, level)) {
127
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
164
return EINVAL;
128
+ }
165
}
129
166
diff --git a/target/arm/machine.c b/target/arm/machine.c
130
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
167
index XXXXXXX..XXXXXXX 100644
131
168
--- a/target/arm/machine.c
169
+++ b/target/arm/machine.c
170
@@ -XXX,XX +XXX,XX @@ static int cpu_pre_save(void *opaque)
171
abort();
172
}
173
} else {
174
- if (!write_cpustate_to_list(cpu)) {
175
+ if (!write_cpustate_to_list(cpu, false)) {
176
/* This should never fail. */
177
abort();
178
}
179
--
132
--
180
2.20.1
133
2.20.1
181
134
182
135
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Fortunately, the functions affected are so far only called from SVE,
3
This function is used in two different contexts, and it will be
4
so there is no tail to be cleared. But as we convert more of AdvSIMD
4
clearer if the function is given the address to which it applies.
5
to gvec, this will matter.
6
5
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190209033847.9014-13-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/vec_helper.c | 2 ++
12
target/arm/translate.c | 14 +++++++-------
13
1 file changed, 2 insertions(+)
13
1 file changed, 7 insertions(+), 7 deletions(-)
14
14
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/vec_helper.c
17
--- a/target/arm/translate.c
18
+++ b/target/arm/vec_helper.c
18
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
19
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
20
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
20
}
21
d[i] = FUNC(n[i], stat); \
22
} \
23
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
24
}
21
}
25
22
26
DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
23
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
27
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
24
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
28
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
25
{
29
d[i] = FUNC(n[i], m[i], stat); \
26
- /* Return true if this is a 16 bit instruction. We must be precise
30
} \
27
- * about this (matching the decode). We assume that s->pc still
31
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
28
- * points to the first 16 bits of the insn.
29
+ /*
30
+ * Return true if this is a 16 bit instruction. We must be precise
31
+ * about this (matching the decode).
32
*/
33
if ((insn >> 11) < 0x1d) {
34
/* Definitely a 16-bit instruction */
35
@@ -XXX,XX +XXX,XX @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
36
return false;
37
}
38
39
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
40
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
41
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
42
* is not on the next page; we merge this into a 32-bit
43
* insn.
44
@@ -XXX,XX +XXX,XX @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
45
*/
46
uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
47
48
- return !thumb_insn_is_16bit(s, insn);
49
+ return !thumb_insn_is_16bit(s, s->pc, insn);
32
}
50
}
33
51
34
DO_3OP(gvec_fadd_h, float16_add, float16)
52
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
53
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
54
}
55
56
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
57
- is_16bit = thumb_insn_is_16bit(dc, insn);
58
+ is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
59
dc->pc += 2;
60
if (!is_16bit) {
61
uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
35
--
62
--
36
2.20.1
63
2.20.1
37
64
38
65
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For same-sign saturation, we have tcg vector operations. We can
3
Add a new field to retain the address of the instruction currently
4
compute the QC bit by comparing the saturated value against the
4
being translated. The 32-bit uses are all within subroutines used
5
unsaturated value.
5
by a32 and t32. This will become less obvious when t16 support is
6
merged with a32+t32, and having a clear definition will help.
7
8
Convert aarch64 as well for consistency. Note that there is one
9
instance of a pre-assert fprintf that used the wrong value for the
10
address of the current instruction.
6
11
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190209033847.9014-12-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190807045335.1361-3-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
17
---
12
target/arm/helper.h | 33 +++++++
18
target/arm/translate-a64.h | 2 +-
13
target/arm/translate.h | 4 +
19
target/arm/translate.h | 2 ++
14
target/arm/translate-a64.c | 36 ++++----
20
target/arm/translate-a64.c | 21 +++++++++++----------
15
target/arm/translate.c | 172 +++++++++++++++++++++++++++++++------
21
target/arm/translate.c | 14 ++++++++------
16
target/arm/vec_helper.c | 130 ++++++++++++++++++++++++++++
22
4 files changed, 22 insertions(+), 17 deletions(-)
17
5 files changed, 331 insertions(+), 44 deletions(-)
23
18
24
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
19
diff --git a/target/arm/helper.h b/target/arm/helper.h
25
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-a64.h
21
--- a/target/arm/helper.h
27
+++ b/target/arm/translate-a64.h
22
+++ b/target/arm/helper.h
28
@@ -XXX,XX +XXX,XX @@ void unallocated_encoding(DisasContext *s);
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
29
qemu_log_mask(LOG_UNIMP, \
24
DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
30
"%s:%d: unsupported instruction encoding 0x%08x " \
25
void, ptr, ptr, ptr, ptr, ptr, i32)
31
"at pc=%016" PRIx64 "\n", \
26
32
- __FILE__, __LINE__, insn, s->pc - 4); \
27
+DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
33
+ __FILE__, __LINE__, insn, s->pc_curr); \
28
+ void, ptr, ptr, ptr, ptr, i32)
34
unallocated_encoding(s); \
29
+DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
35
} while (0)
30
+ void, ptr, ptr, ptr, ptr, i32)
36
31
+DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
36
+ void, ptr, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
40
+ void, ptr, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
42
+ void, ptr, ptr, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
44
+ void, ptr, ptr, ptr, ptr, i32)
45
+DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
46
+ void, ptr, ptr, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
48
+ void, ptr, ptr, ptr, ptr, i32)
49
+DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
50
+ void, ptr, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
52
+ void, ptr, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, i32)
57
+DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
58
+ void, ptr, ptr, ptr, ptr, i32)
59
+
60
#ifdef TARGET_AARCH64
61
#include "helper-a64.h"
62
#include "helper-sve.h"
63
diff --git a/target/arm/translate.h b/target/arm/translate.h
37
diff --git a/target/arm/translate.h b/target/arm/translate.h
64
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate.h
39
--- a/target/arm/translate.h
66
+++ b/target/arm/translate.h
40
+++ b/target/arm/translate.h
67
@@ -XXX,XX +XXX,XX @@ extern const GVecGen2i ssra_op[4];
41
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
68
extern const GVecGen2i usra_op[4];
42
const ARMISARegisters *isar;
69
extern const GVecGen2i sri_op[4];
43
70
extern const GVecGen2i sli_op[4];
44
target_ulong pc;
71
+extern const GVecGen4 uqadd_op[4];
45
+ /* The address of the current instruction being translated. */
72
+extern const GVecGen4 sqadd_op[4];
46
+ target_ulong pc_curr;
73
+extern const GVecGen4 uqsub_op[4];
47
target_ulong page_start;
74
+extern const GVecGen4 sqsub_op[4];
48
uint32_t insn;
75
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
49
/* Nonzero if this instruction has been conditionally skipped. */
76
77
/*
78
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
50
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
79
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate-a64.c
52
--- a/target/arm/translate-a64.c
81
+++ b/target/arm/translate-a64.c
53
+++ b/target/arm/translate-a64.c
82
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
54
@@ -XXX,XX +XXX,XX @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
55
*/
56
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
57
{
58
- uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
59
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
60
61
if (insn & (1U << 31)) {
62
/* BL Branch with link */
63
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
64
sf = extract32(insn, 31, 1);
65
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
66
rt = extract32(insn, 0, 5);
67
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
68
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
69
70
tcg_cmp = read_cpu_reg(s, rt, sf);
71
label_match = gen_new_label();
72
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
73
74
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
75
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
76
- addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
77
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
78
rt = extract32(insn, 0, 5);
79
80
tcg_cmp = tcg_temp_new_i64();
81
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
82
unallocated_encoding(s);
83
return;
83
}
84
}
84
85
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
85
switch (opcode) {
86
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
86
+ case 0x01: /* SQADD, UQADD */
87
cond = extract32(insn, 0, 4);
87
+ tcg_gen_gvec_4(vec_full_reg_offset(s, rd),
88
88
+ offsetof(CPUARMState, vfp.qc),
89
reset_btype(s);
89
+ vec_full_reg_offset(s, rn),
90
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
90
+ vec_full_reg_offset(s, rm),
91
TCGv_i32 tcg_syn, tcg_isread;
91
+ is_q ? 16 : 8, vec_full_reg_size(s),
92
uint32_t syndrome;
92
+ (u ? uqadd_op : sqadd_op) + size);
93
93
+ return;
94
- gen_a64_set_pc_im(s->pc - 4);
94
+ case 0x05: /* SQSUB, UQSUB */
95
+ gen_a64_set_pc_im(s->pc_curr);
95
+ tcg_gen_gvec_4(vec_full_reg_offset(s, rd),
96
tmpptr = tcg_const_ptr(ri);
96
+ offsetof(CPUARMState, vfp.qc),
97
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
97
+ vec_full_reg_offset(s, rn),
98
tcg_syn = tcg_const_i32(syndrome);
98
+ vec_full_reg_offset(s, rm),
99
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
99
+ is_q ? 16 : 8, vec_full_reg_size(s),
100
/* The pre HVC helper handles cases when HVC gets trapped
100
+ (u ? uqsub_op : sqsub_op) + size);
101
* as an undefined insn by runtime configuration.
101
+ return;
102
*/
102
case 0x0c: /* SMAX, UMAX */
103
- gen_a64_set_pc_im(s->pc - 4);
103
if (u) {
104
+ gen_a64_set_pc_im(s->pc_curr);
104
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
105
gen_helper_pre_hvc(cpu_env);
105
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
106
gen_ss_advance(s);
106
genfn = fns[size][u];
107
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
108
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
109
unallocated_encoding(s);
107
break;
110
break;
108
}
111
}
109
- case 0x1: /* SQADD, UQADD */
112
- gen_a64_set_pc_im(s->pc - 4);
110
- {
113
+ gen_a64_set_pc_im(s->pc_curr);
111
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
114
tmp = tcg_const_i32(syn_aa64_smc(imm16));
112
- { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
115
gen_helper_pre_smc(cpu_env, tmp);
113
- { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
116
tcg_temp_free_i32(tmp);
114
- { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
117
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
115
- };
118
116
- genenvfn = fns[size][u];
119
tcg_rt = cpu_reg(s, rt);
117
- break;
120
118
- }
121
- clean_addr = tcg_const_i64((s->pc - 4) + imm);
119
case 0x2: /* SRHADD, URHADD */
122
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
120
{
123
if (is_vector) {
121
static NeonGenTwoOpFn * const fns[3][2] = {
124
do_fp_ld(s, rt, clean_addr, size);
122
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
125
} else {
123
genfn = fns[size][u];
126
@@ -XXX,XX +XXX,XX @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
127
offset = sextract64(insn, 5, 19);
128
offset = offset << 2 | extract32(insn, 29, 2);
129
rd = extract32(insn, 0, 5);
130
- base = s->pc - 4;
131
+ base = s->pc_curr;
132
133
if (page) {
134
/* ADRP (page based) */
135
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
124
break;
136
break;
137
default:
138
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
139
- __func__, insn, fpopcode, s->pc);
140
+ __func__, insn, fpopcode, s->pc_curr);
141
g_assert_not_reached();
125
}
142
}
126
- case 0x5: /* SQSUB, UQSUB */
143
127
- {
144
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
128
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
145
{
129
- { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
146
uint32_t insn;
130
- { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
147
131
- { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
148
+ s->pc_curr = s->pc;
132
- };
149
insn = arm_ldl_code(env, s->pc, s->sctlr_b);
133
- genenvfn = fns[size][u];
150
s->insn = insn;
134
- break;
151
s->pc += 4;
135
- }
136
case 0x8: /* SSHL, USHL */
137
{
138
static NeonGenTwoOpFn * const fns[3][2] = {
139
diff --git a/target/arm/translate.c b/target/arm/translate.c
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
140
index XXXXXXX..XXXXXXX 100644
153
index XXXXXXX..XXXXXXX 100644
141
--- a/target/arm/translate.c
154
--- a/target/arm/translate.c
142
+++ b/target/arm/translate.c
155
+++ b/target/arm/translate.c
143
@@ -XXX,XX +XXX,XX @@ const GVecGen3 cmtst_op[4] = {
156
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
144
.vece = MO_64 },
157
* as an undefined insn by runtime configuration (ie before
145
};
158
* the insn really executes).
146
159
*/
147
+static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
160
- gen_set_pc_im(s, s->pc - 4);
148
+ TCGv_vec a, TCGv_vec b)
161
+ gen_set_pc_im(s, s->pc_curr);
149
+{
162
gen_helper_pre_hvc(cpu_env);
150
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
163
/* Otherwise we will treat this as a real exception which
151
+ tcg_gen_add_vec(vece, x, a, b);
164
* happens after execution of the insn. (The distinction matters
152
+ tcg_gen_usadd_vec(vece, t, a, b);
165
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
153
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
166
*/
154
+ tcg_gen_or_vec(vece, sat, sat, x);
167
TCGv_i32 tmp;
155
+ tcg_temp_free_vec(x);
168
156
+}
169
- gen_set_pc_im(s, s->pc - 4);
157
+
170
+ gen_set_pc_im(s, s->pc_curr);
158
+const GVecGen4 uqadd_op[4] = {
171
tmp = tcg_const_i32(syn_aa32_smc());
159
+ { .fniv = gen_uqadd_vec,
172
gen_helper_pre_smc(cpu_env, tmp);
160
+ .fno = gen_helper_gvec_uqadd_b,
173
tcg_temp_free_i32(tmp);
161
+ .opc = INDEX_op_usadd_vec,
174
@@ -XXX,XX +XXX,XX @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
162
+ .write_aofs = true,
175
163
+ .vece = MO_8 },
176
/* Sync state because msr_banked() can raise exceptions */
164
+ { .fniv = gen_uqadd_vec,
177
gen_set_condexec(s);
165
+ .fno = gen_helper_gvec_uqadd_h,
178
- gen_set_pc_im(s, s->pc - 4);
166
+ .opc = INDEX_op_usadd_vec,
179
+ gen_set_pc_im(s, s->pc_curr);
167
+ .write_aofs = true,
180
tcg_reg = load_reg(s, rn);
168
+ .vece = MO_16 },
181
tcg_tgtmode = tcg_const_i32(tgtmode);
169
+ { .fniv = gen_uqadd_vec,
182
tcg_regno = tcg_const_i32(regno);
170
+ .fno = gen_helper_gvec_uqadd_s,
183
@@ -XXX,XX +XXX,XX @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
171
+ .opc = INDEX_op_usadd_vec,
184
172
+ .write_aofs = true,
185
/* Sync state because mrs_banked() can raise exceptions */
173
+ .vece = MO_32 },
186
gen_set_condexec(s);
174
+ { .fniv = gen_uqadd_vec,
187
- gen_set_pc_im(s, s->pc - 4);
175
+ .fno = gen_helper_gvec_uqadd_d,
188
+ gen_set_pc_im(s, s->pc_curr);
176
+ .opc = INDEX_op_usadd_vec,
189
tcg_reg = tcg_temp_new_i32();
177
+ .write_aofs = true,
190
tcg_tgtmode = tcg_const_i32(tgtmode);
178
+ .vece = MO_64 },
191
tcg_regno = tcg_const_i32(regno);
179
+};
192
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
180
+
181
+static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
182
+ TCGv_vec a, TCGv_vec b)
183
+{
184
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
185
+ tcg_gen_add_vec(vece, x, a, b);
186
+ tcg_gen_ssadd_vec(vece, t, a, b);
187
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
188
+ tcg_gen_or_vec(vece, sat, sat, x);
189
+ tcg_temp_free_vec(x);
190
+}
191
+
192
+const GVecGen4 sqadd_op[4] = {
193
+ { .fniv = gen_sqadd_vec,
194
+ .fno = gen_helper_gvec_sqadd_b,
195
+ .opc = INDEX_op_ssadd_vec,
196
+ .write_aofs = true,
197
+ .vece = MO_8 },
198
+ { .fniv = gen_sqadd_vec,
199
+ .fno = gen_helper_gvec_sqadd_h,
200
+ .opc = INDEX_op_ssadd_vec,
201
+ .write_aofs = true,
202
+ .vece = MO_16 },
203
+ { .fniv = gen_sqadd_vec,
204
+ .fno = gen_helper_gvec_sqadd_s,
205
+ .opc = INDEX_op_ssadd_vec,
206
+ .write_aofs = true,
207
+ .vece = MO_32 },
208
+ { .fniv = gen_sqadd_vec,
209
+ .fno = gen_helper_gvec_sqadd_d,
210
+ .opc = INDEX_op_ssadd_vec,
211
+ .write_aofs = true,
212
+ .vece = MO_64 },
213
+};
214
+
215
+static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
216
+ TCGv_vec a, TCGv_vec b)
217
+{
218
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
219
+ tcg_gen_sub_vec(vece, x, a, b);
220
+ tcg_gen_ussub_vec(vece, t, a, b);
221
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
222
+ tcg_gen_or_vec(vece, sat, sat, x);
223
+ tcg_temp_free_vec(x);
224
+}
225
+
226
+const GVecGen4 uqsub_op[4] = {
227
+ { .fniv = gen_uqsub_vec,
228
+ .fno = gen_helper_gvec_uqsub_b,
229
+ .opc = INDEX_op_ussub_vec,
230
+ .write_aofs = true,
231
+ .vece = MO_8 },
232
+ { .fniv = gen_uqsub_vec,
233
+ .fno = gen_helper_gvec_uqsub_h,
234
+ .opc = INDEX_op_ussub_vec,
235
+ .write_aofs = true,
236
+ .vece = MO_16 },
237
+ { .fniv = gen_uqsub_vec,
238
+ .fno = gen_helper_gvec_uqsub_s,
239
+ .opc = INDEX_op_ussub_vec,
240
+ .write_aofs = true,
241
+ .vece = MO_32 },
242
+ { .fniv = gen_uqsub_vec,
243
+ .fno = gen_helper_gvec_uqsub_d,
244
+ .opc = INDEX_op_ussub_vec,
245
+ .write_aofs = true,
246
+ .vece = MO_64 },
247
+};
248
+
249
+static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
250
+ TCGv_vec a, TCGv_vec b)
251
+{
252
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
253
+ tcg_gen_sub_vec(vece, x, a, b);
254
+ tcg_gen_sssub_vec(vece, t, a, b);
255
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
256
+ tcg_gen_or_vec(vece, sat, sat, x);
257
+ tcg_temp_free_vec(x);
258
+}
259
+
260
+const GVecGen4 sqsub_op[4] = {
261
+ { .fniv = gen_sqsub_vec,
262
+ .fno = gen_helper_gvec_sqsub_b,
263
+ .opc = INDEX_op_sssub_vec,
264
+ .write_aofs = true,
265
+ .vece = MO_8 },
266
+ { .fniv = gen_sqsub_vec,
267
+ .fno = gen_helper_gvec_sqsub_h,
268
+ .opc = INDEX_op_sssub_vec,
269
+ .write_aofs = true,
270
+ .vece = MO_16 },
271
+ { .fniv = gen_sqsub_vec,
272
+ .fno = gen_helper_gvec_sqsub_s,
273
+ .opc = INDEX_op_sssub_vec,
274
+ .write_aofs = true,
275
+ .vece = MO_32 },
276
+ { .fniv = gen_sqsub_vec,
277
+ .fno = gen_helper_gvec_sqsub_d,
278
+ .opc = INDEX_op_sssub_vec,
279
+ .write_aofs = true,
280
+ .vece = MO_64 },
281
+};
282
+
283
/* Translate a NEON data processing instruction. Return nonzero if the
284
instruction is invalid.
285
We process data in a mixture of 32-bit and 64-bit chunks.
286
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
287
}
193
}
288
return 0;
194
289
195
gen_set_condexec(s);
290
+ case NEON_3R_VQADD:
196
- gen_set_pc_im(s, s->pc - 4);
291
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
197
+ gen_set_pc_im(s, s->pc_curr);
292
+ rn_ofs, rm_ofs, vec_size, vec_size,
198
tmpptr = tcg_const_ptr(ri);
293
+ (u ? uqadd_op : sqadd_op) + size);
199
tcg_syn = tcg_const_i32(syndrome);
294
+ break;
200
tcg_isread = tcg_const_i32(isread);
295
+
201
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
296
+ case NEON_3R_VQSUB:
202
tmp = tcg_const_i32(mode);
297
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
203
/* get_r13_banked() will raise an exception if called from System mode */
298
+ rn_ofs, rm_ofs, vec_size, vec_size,
204
gen_set_condexec(s);
299
+ (u ? uqsub_op : sqsub_op) + size);
205
- gen_set_pc_im(s, s->pc - 4);
300
+ break;
206
+ gen_set_pc_im(s, s->pc_curr);
301
+
207
gen_helper_get_r13_banked(addr, cpu_env, tmp);
302
case NEON_3R_VMUL: /* VMUL */
208
tcg_temp_free_i32(tmp);
303
if (u) {
209
switch (amode) {
304
/* Polynomial case allows only P8 and is handled below. */
210
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
305
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
211
return;
306
neon_load_reg64(cpu_V0, rn + pass);
212
}
307
neon_load_reg64(cpu_V1, rm + pass);
213
308
switch (op) {
214
+ dc->pc_curr = dc->pc;
309
- case NEON_3R_VQADD:
215
insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
310
- if (u) {
216
dc->insn = insn;
311
- gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
217
dc->pc += 4;
312
- cpu_V0, cpu_V1);
218
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
313
- } else {
219
return;
314
- gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
220
}
315
- cpu_V0, cpu_V1);
221
316
- }
222
+ dc->pc_curr = dc->pc;
317
- break;
223
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
318
- case NEON_3R_VQSUB:
224
is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
319
- if (u) {
225
dc->pc += 2;
320
- gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
321
- cpu_V0, cpu_V1);
322
- } else {
323
- gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
324
- cpu_V0, cpu_V1);
325
- }
326
- break;
327
case NEON_3R_VSHL:
328
if (u) {
329
gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
330
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
331
case NEON_3R_VHADD:
332
GEN_NEON_INTEGER_OP(hadd);
333
break;
334
- case NEON_3R_VQADD:
335
- GEN_NEON_INTEGER_OP_ENV(qadd);
336
- break;
337
case NEON_3R_VRHADD:
338
GEN_NEON_INTEGER_OP(rhadd);
339
break;
340
case NEON_3R_VHSUB:
341
GEN_NEON_INTEGER_OP(hsub);
342
break;
343
- case NEON_3R_VQSUB:
344
- GEN_NEON_INTEGER_OP_ENV(qsub);
345
- break;
346
case NEON_3R_VSHL:
347
GEN_NEON_INTEGER_OP(shl);
348
break;
349
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
350
index XXXXXXX..XXXXXXX 100644
351
--- a/target/arm/vec_helper.c
352
+++ b/target/arm/vec_helper.c
353
@@ -XXX,XX +XXX,XX @@ DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
354
DO_FMLA_IDX(gvec_fmla_idx_d, float64, )
355
356
#undef DO_FMLA_IDX
357
+
358
+#define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
359
+void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
360
+{ \
361
+ intptr_t i, oprsz = simd_oprsz(desc); \
362
+ TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
363
+ bool q = false; \
364
+ for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
365
+ WTYPE dd = (WTYPE)n[i] OP m[i]; \
366
+ if (dd < MIN) { \
367
+ dd = MIN; \
368
+ q = true; \
369
+ } else if (dd > MAX) { \
370
+ dd = MAX; \
371
+ q = true; \
372
+ } \
373
+ d[i] = dd; \
374
+ } \
375
+ if (q) { \
376
+ uint32_t *qc = vq; \
377
+ qc[0] = 1; \
378
+ } \
379
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
380
+}
381
+
382
+DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX)
383
+DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX)
384
+DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX)
385
+
386
+DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX)
387
+DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX)
388
+DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX)
389
+
390
+DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX)
391
+DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX)
392
+DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX)
393
+
394
+DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
395
+DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
396
+DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
397
+
398
+#undef DO_SAT
399
+
400
+void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
401
+ void *vm, uint32_t desc)
402
+{
403
+ intptr_t i, oprsz = simd_oprsz(desc);
404
+ uint64_t *d = vd, *n = vn, *m = vm;
405
+ bool q = false;
406
+
407
+ for (i = 0; i < oprsz / 8; i++) {
408
+ uint64_t nn = n[i], mm = m[i], dd = nn + mm;
409
+ if (dd < nn) {
410
+ dd = UINT64_MAX;
411
+ q = true;
412
+ }
413
+ d[i] = dd;
414
+ }
415
+ if (q) {
416
+ uint32_t *qc = vq;
417
+ qc[0] = 1;
418
+ }
419
+ clear_tail(d, oprsz, simd_maxsz(desc));
420
+}
421
+
422
+void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn,
423
+ void *vm, uint32_t desc)
424
+{
425
+ intptr_t i, oprsz = simd_oprsz(desc);
426
+ uint64_t *d = vd, *n = vn, *m = vm;
427
+ bool q = false;
428
+
429
+ for (i = 0; i < oprsz / 8; i++) {
430
+ uint64_t nn = n[i], mm = m[i], dd = nn - mm;
431
+ if (nn < mm) {
432
+ dd = 0;
433
+ q = true;
434
+ }
435
+ d[i] = dd;
436
+ }
437
+ if (q) {
438
+ uint32_t *qc = vq;
439
+ qc[0] = 1;
440
+ }
441
+ clear_tail(d, oprsz, simd_maxsz(desc));
442
+}
443
+
444
+void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn,
445
+ void *vm, uint32_t desc)
446
+{
447
+ intptr_t i, oprsz = simd_oprsz(desc);
448
+ int64_t *d = vd, *n = vn, *m = vm;
449
+ bool q = false;
450
+
451
+ for (i = 0; i < oprsz / 8; i++) {
452
+ int64_t nn = n[i], mm = m[i], dd = nn + mm;
453
+ if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) {
454
+ dd = (nn >> 63) ^ ~INT64_MIN;
455
+ q = true;
456
+ }
457
+ d[i] = dd;
458
+ }
459
+ if (q) {
460
+ uint32_t *qc = vq;
461
+ qc[0] = 1;
462
+ }
463
+ clear_tail(d, oprsz, simd_maxsz(desc));
464
+}
465
+
466
+void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
467
+ void *vm, uint32_t desc)
468
+{
469
+ intptr_t i, oprsz = simd_oprsz(desc);
470
+ int64_t *d = vd, *n = vn, *m = vm;
471
+ bool q = false;
472
+
473
+ for (i = 0; i < oprsz / 8; i++) {
474
+ int64_t nn = n[i], mm = m[i], dd = nn - mm;
475
+ if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) {
476
+ dd = (nn >> 63) ^ ~INT64_MIN;
477
+ q = true;
478
+ }
479
+ d[i] = dd;
480
+ }
481
+ if (q) {
482
+ uint32_t *qc = vq;
483
+ qc[0] = 1;
484
+ }
485
+ clear_tail(d, oprsz, simd_maxsz(desc));
486
+}
487
--
226
--
488
2.20.1
227
2.20.1
489
228
490
229
diff view generated by jsdifflib
1
The code for handling the NVIC SHPR1 register intends to permit
1
From: Richard Henderson <richard.henderson@linaro.org>
2
byte and halfword accesses (as the architecture requires). However
2
3
the 'case' line for it only lists the base address of the
3
We currently have 3 different ways of computing the architectural
4
register, so attempts to access bytes other than the first one
4
value of "PC" as seen in the ARM ARM.
5
end up in the "bad write" default logic. This bug was added
5
6
accidentally when we split out the SHPR1 logic from SHPR2 and
6
The value of s->pc has been incremented past the current insn,
7
SHPR3 to support v6M.
7
but that is all. Thus for a32, PC = s->pc + 4; for t32, PC = s->pc;
8
8
for t16, PC = s->pc + 2. These differing computations make it
9
Fixes: 7c9140afd594 ("nvic: Handle ARMv6-M SCS reserved registers")
9
impossible at present to unify the various code paths.
10
11
With the newly introduced s->pc_curr, we can compute the correct
12
value for all cases, using the formula given in the ARM ARM.
13
14
This changes the behaviour for load_reg() and load_reg_var()
15
when called with reg==15 from a 32-bit Thumb instruction:
16
previously they would have returned the incorrect value
17
of pc_curr + 6, and now they will return the architecturally
18
correct value of PC, which is pc_curr + 4. This will not
19
affect well-behaved guest software, because all of the places
20
we call these functions from T32 code are instructions where
21
using r15 is UNPREDICTABLE. Using the architectural PC value
22
here is more consistent with the T16 and A32 behaviour.
23
24
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
26
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
27
Message-id: 20190807045335.1361-4-richard.henderson@linaro.org
28
[PMM: added commit message note about UNPREDICTABLE T32 cases]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
---
30
---
13
The Zephyr RTOS happens to access SHPR1 byte at a time,
31
target/arm/translate.c | 59 ++++++++++++++++--------------------------
14
which is how I spotted this.
32
1 file changed, 23 insertions(+), 36 deletions(-)
15
---
33
16
hw/intc/armv7m_nvic.c | 4 ++--
34
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
1 file changed, 2 insertions(+), 2 deletions(-)
18
19
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
20
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/intc/armv7m_nvic.c
36
--- a/target/arm/translate.c
22
+++ b/hw/intc/armv7m_nvic.c
37
+++ b/target/arm/translate.c
23
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
38
@@ -XXX,XX +XXX,XX @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
39
#define store_cpu_field(var, name) \
40
store_cpu_offset(var, offsetof(CPUARMState, name))
41
42
+/* The architectural value of PC. */
43
+static uint32_t read_pc(DisasContext *s)
44
+{
45
+ return s->pc_curr + (s->thumb ? 4 : 8);
46
+}
47
+
48
/* Set a variable to the value of a CPU register. */
49
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
50
{
51
if (reg == 15) {
52
- uint32_t addr;
53
- /* normally, since we updated PC, we need only to add one insn */
54
- if (s->thumb)
55
- addr = (long)s->pc + 2;
56
- else
57
- addr = (long)s->pc + 4;
58
- tcg_gen_movi_i32(var, addr);
59
+ tcg_gen_movi_i32(var, read_pc(s));
60
} else {
61
tcg_gen_mov_i32(var, cpu_R[reg]);
62
}
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
64
/* branch link and change to thumb (blx <offset>) */
65
int32_t offset;
66
67
- val = (uint32_t)s->pc;
68
tmp = tcg_temp_new_i32();
69
- tcg_gen_movi_i32(tmp, val);
70
+ tcg_gen_movi_i32(tmp, s->pc);
71
store_reg(s, 14, tmp);
72
/* Sign-extend the 24-bit offset */
73
offset = (((int32_t)insn) << 8) >> 8;
74
+ val = read_pc(s);
75
/* offset * 4 + bit24 * 2 + (thumb bit) */
76
val += (offset << 2) | ((insn >> 23) & 2) | 1;
77
- /* pipeline offset */
78
- val += 4;
79
/* protected by ARCH(5); above, near the start of uncond block */
80
gen_bx_im(s, val);
81
return;
82
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
83
} else {
84
/* store */
85
if (i == 15) {
86
- /* special case: r15 = PC + 8 */
87
- val = (long)s->pc + 4;
88
tmp = tcg_temp_new_i32();
89
- tcg_gen_movi_i32(tmp, val);
90
+ tcg_gen_movi_i32(tmp, read_pc(s));
91
} else if (user) {
92
tmp = tcg_temp_new_i32();
93
tmp2 = tcg_const_i32(i);
94
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
95
int32_t offset;
96
97
/* branch (and link) */
98
- val = (int32_t)s->pc;
99
if (insn & (1 << 24)) {
100
tmp = tcg_temp_new_i32();
101
- tcg_gen_movi_i32(tmp, val);
102
+ tcg_gen_movi_i32(tmp, s->pc);
103
store_reg(s, 14, tmp);
104
}
105
offset = sextract32(insn << 2, 0, 26);
106
- val += offset + 4;
107
- gen_jmp(s, val);
108
+ gen_jmp(s, read_pc(s) + offset);
24
}
109
}
110
break;
111
case 0xc:
112
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
113
tcg_temp_free_i32(addr);
114
} else if ((insn & (7 << 5)) == 0) {
115
/* Table Branch. */
116
- if (rn == 15) {
117
- addr = tcg_temp_new_i32();
118
- tcg_gen_movi_i32(addr, s->pc);
119
- } else {
120
- addr = load_reg(s, rn);
121
- }
122
+ addr = load_reg(s, rn);
123
tmp = load_reg(s, rm);
124
tcg_gen_add_i32(addr, addr, tmp);
125
if (insn & (1 << 4)) {
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
127
}
128
tcg_temp_free_i32(addr);
129
tcg_gen_shli_i32(tmp, tmp, 1);
130
- tcg_gen_addi_i32(tmp, tmp, s->pc);
131
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
132
store_reg(s, 15, tmp);
133
} else {
134
bool is_lasr = false;
135
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
136
tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
137
}
138
139
- offset += s->pc;
140
+ offset += read_pc(s);
141
if (insn & (1 << 12)) {
142
/* b/bl */
143
gen_jmp(s, offset);
144
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
145
offset |= (insn & (1 << 11)) << 8;
146
147
/* jump to the offset */
148
- gen_jmp(s, s->pc + offset);
149
+ gen_jmp(s, read_pc(s) + offset);
150
}
151
} else {
152
/*
153
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
154
if (insn & (1 << 11)) {
155
rd = (insn >> 8) & 7;
156
/* load pc-relative. Bit 1 of PC is ignored. */
157
- val = s->pc + 2 + ((insn & 0xff) * 4);
158
+ val = read_pc(s) + ((insn & 0xff) * 4);
159
val &= ~(uint32_t)2;
160
addr = tcg_temp_new_i32();
161
tcg_gen_movi_i32(addr, val);
162
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
163
} else {
164
/* PC. bit 1 is ignored. */
165
tmp = tcg_temp_new_i32();
166
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
167
+ tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
168
}
169
val = (insn & 0xff) * 4;
170
tcg_gen_addi_i32(tmp, tmp, val);
171
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
172
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
173
tcg_temp_free_i32(tmp);
174
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
175
- val = (uint32_t)s->pc + 2;
176
- val += offset;
177
- gen_jmp(s, val);
178
+ gen_jmp(s, read_pc(s) + offset);
179
break;
180
181
case 15: /* IT, nop-hint. */
182
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
183
arm_skip_unless(s, cond);
184
185
/* jump to the offset */
186
- val = (uint32_t)s->pc + 2;
187
+ val = read_pc(s);
188
offset = ((int32_t)insn << 24) >> 24;
189
val += offset << 1;
190
gen_jmp(s, val);
191
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
192
break;
193
}
194
/* unconditional branch */
195
- val = (uint32_t)s->pc;
196
+ val = read_pc(s);
197
offset = ((int32_t)insn << 21) >> 21;
198
- val += (offset << 1) + 2;
199
+ val += offset << 1;
200
gen_jmp(s, val);
201
break;
202
203
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
204
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
205
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
206
207
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
208
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
25
}
209
}
26
break;
210
break;
27
- case 0xd18: /* System Handler Priority (SHPR1) */
211
}
28
+ case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
29
if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
30
val = 0;
31
break;
32
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
33
}
34
nvic_irq_update(s);
35
return MEMTX_OK;
36
- case 0xd18: /* System Handler Priority (SHPR1) */
37
+ case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
38
if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
39
return MEMTX_OK;
40
}
41
--
212
--
42
2.20.1
213
2.20.1
43
214
44
215
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Given that we mask bits properly on set, there is no reason
3
Provide a common routine for the places that require ALIGN(PC, 4)
4
to mask them again on get. We failed to clear the exception
4
as the base address as opposed to plain PC. The two are always
5
status bits, 0x9f, which means that the wrong value would be
5
the same for A32, but the difference is meaningful for thumb mode.
6
returned on get. Except in the (probably normal) case in which
7
the set clears all of the bits.
8
9
Simplify the code in set to also clear the RES0 bits.
10
6
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20190209033847.9014-10-richard.henderson@linaro.org
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-5-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
12
---
16
target/arm/helper.c | 15 ++++++++-------
13
target/arm/translate-vfp.inc.c | 38 ++------
17
1 file changed, 8 insertions(+), 7 deletions(-)
14
target/arm/translate.c | 166 +++++++++++++++------------------
18
15
2 files changed, 82 insertions(+), 122 deletions(-)
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
17
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
19
--- a/target/arm/translate-vfp.inc.c
22
+++ b/target/arm/helper.c
20
+++ b/target/arm/translate-vfp.inc.c
23
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
21
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
24
int i;
22
offset = -offset;
25
uint32_t fpscr;
23
}
26
24
27
- fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
25
- if (s->thumb && a->rn == 15) {
28
+ fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
26
- /* This is actually UNPREDICTABLE */
29
| (env->vfp.vec_len << 16)
27
- addr = tcg_temp_new_i32();
30
| (env->vfp.vec_stride << 20);
28
- tcg_gen_movi_i32(addr, s->pc & ~2);
31
29
- } else {
32
@@ -XXX,XX +XXX,XX @@ static inline int vfp_exceptbits_to_host(int target_bits)
30
- addr = load_reg(s, a->rn);
33
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
31
- }
34
{
32
- tcg_gen_addi_i32(addr, addr, offset);
35
int i;
33
+ /* For thumb, use of PC is UNPREDICTABLE. */
36
- uint32_t changed;
34
+ addr = add_reg_for_lit(s, a->rn, offset);
37
+ uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
35
tmp = tcg_temp_new_i32();
38
36
if (a->l) {
39
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
37
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
40
if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
41
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
39
offset = -offset;
42
40
}
43
/*
41
44
* We don't implement trapped exception handling, so the
42
- if (s->thumb && a->rn == 15) {
45
- * trap enable bits are all RAZ/WI (not RES0!)
43
- /* This is actually UNPREDICTABLE */
46
+ * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
44
- addr = tcg_temp_new_i32();
47
+ *
45
- tcg_gen_movi_i32(addr, s->pc & ~2);
48
+ * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
46
- } else {
49
+ * (which are stored in fp_status), and the other RES0 bits
47
- addr = load_reg(s, a->rn);
50
+ * in between, then we clear all of the low 16 bits.
48
- }
51
*/
49
- tcg_gen_addi_i32(addr, addr, offset);
52
- val &= ~(FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE);
50
+ /* For thumb, use of PC is UNPREDICTABLE. */
53
-
51
+ addr = add_reg_for_lit(s, a->rn, offset);
54
- changed = env->vfp.xregs[ARM_VFP_FPSCR];
52
tmp = tcg_temp_new_i64();
55
- env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
53
if (a->l) {
56
+ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xffc80000;
54
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
57
env->vfp.vec_len = (val >> 16) & 7;
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
58
env->vfp.vec_stride = (val >> 20) & 3;
56
return true;
57
}
58
59
- if (s->thumb && a->rn == 15) {
60
- /* This is actually UNPREDICTABLE */
61
- addr = tcg_temp_new_i32();
62
- tcg_gen_movi_i32(addr, s->pc & ~2);
63
- } else {
64
- addr = load_reg(s, a->rn);
65
- }
66
+ /* For thumb, use of PC is UNPREDICTABLE. */
67
+ addr = add_reg_for_lit(s, a->rn, 0);
68
if (a->p) {
69
/* pre-decrement */
70
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
71
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
72
return true;
73
}
74
75
- if (s->thumb && a->rn == 15) {
76
- /* This is actually UNPREDICTABLE */
77
- addr = tcg_temp_new_i32();
78
- tcg_gen_movi_i32(addr, s->pc & ~2);
79
- } else {
80
- addr = load_reg(s, a->rn);
81
- }
82
+ /* For thumb, use of PC is UNPREDICTABLE. */
83
+ addr = add_reg_for_lit(s, a->rn, 0);
84
if (a->p) {
85
/* pre-decrement */
86
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
87
diff --git a/target/arm/translate.c b/target/arm/translate.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/translate.c
90
+++ b/target/arm/translate.c
91
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg)
92
return tmp;
93
}
94
95
+/*
96
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
97
+ * This is used for load/store for which use of PC implies (literal),
98
+ * or ADD that implies ADR.
99
+ */
100
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
101
+{
102
+ TCGv_i32 tmp = tcg_temp_new_i32();
103
+
104
+ if (reg == 15) {
105
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
106
+ } else {
107
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
108
+ }
109
+ return tmp;
110
+}
111
+
112
/* Set a CPU register. The source must be a temporary and will be
113
marked as dead. */
114
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
115
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
116
*/
117
bool wback = extract32(insn, 21, 1);
118
119
- if (rn == 15) {
120
- if (insn & (1 << 21)) {
121
- /* UNPREDICTABLE */
122
- goto illegal_op;
123
- }
124
- addr = tcg_temp_new_i32();
125
- tcg_gen_movi_i32(addr, s->pc & ~3);
126
- } else {
127
- addr = load_reg(s, rn);
128
+ if (rn == 15 && (insn & (1 << 21))) {
129
+ /* UNPREDICTABLE */
130
+ goto illegal_op;
131
}
132
+
133
+ addr = add_reg_for_lit(s, rn, 0);
134
offset = (insn & 0xff) * 4;
135
if ((insn & (1 << 23)) == 0) {
136
offset = -offset;
137
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
138
store_reg(s, rd, tmp);
139
} else {
140
/* Add/sub 12-bit immediate. */
141
- if (rn == 15) {
142
- offset = s->pc & ~(uint32_t)3;
143
- if (insn & (1 << 23))
144
- offset -= imm;
145
- else
146
- offset += imm;
147
- tmp = tcg_temp_new_i32();
148
- tcg_gen_movi_i32(tmp, offset);
149
- store_reg(s, rd, tmp);
150
+ if (insn & (1 << 23)) {
151
+ imm = -imm;
152
+ }
153
+ tmp = add_reg_for_lit(s, rn, imm);
154
+ if (rn == 13 && rd == 13) {
155
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
156
+ store_sp_checked(s, tmp);
157
} else {
158
- tmp = load_reg(s, rn);
159
- if (insn & (1 << 23))
160
- tcg_gen_subi_i32(tmp, tmp, imm);
161
- else
162
- tcg_gen_addi_i32(tmp, tmp, imm);
163
- if (rn == 13 && rd == 13) {
164
- /* ADD SP, SP, imm or SUB SP, SP, imm */
165
- store_sp_checked(s, tmp);
166
- } else {
167
- store_reg(s, rd, tmp);
168
- }
169
+ store_reg(s, rd, tmp);
170
}
171
}
172
}
173
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
174
}
175
}
176
memidx = get_mem_index(s);
177
- if (rn == 15) {
178
- addr = tcg_temp_new_i32();
179
- /* PC relative. */
180
- /* s->pc has already been incremented by 4. */
181
- imm = s->pc & 0xfffffffc;
182
- if (insn & (1 << 23))
183
- imm += insn & 0xfff;
184
- else
185
- imm -= insn & 0xfff;
186
- tcg_gen_movi_i32(addr, imm);
187
+ imm = insn & 0xfff;
188
+ if (insn & (1 << 23)) {
189
+ /* PC relative or Positive offset. */
190
+ addr = add_reg_for_lit(s, rn, imm);
191
+ } else if (rn == 15) {
192
+ /* PC relative with negative offset. */
193
+ addr = add_reg_for_lit(s, rn, -imm);
194
} else {
195
addr = load_reg(s, rn);
196
- if (insn & (1 << 23)) {
197
- /* Positive offset. */
198
- imm = insn & 0xfff;
199
- tcg_gen_addi_i32(addr, addr, imm);
200
- } else {
201
- imm = insn & 0xff;
202
- switch ((insn >> 8) & 0xf) {
203
- case 0x0: /* Shifted Register. */
204
- shift = (insn >> 4) & 0xf;
205
- if (shift > 3) {
206
- tcg_temp_free_i32(addr);
207
- goto illegal_op;
208
- }
209
- tmp = load_reg(s, rm);
210
- if (shift)
211
- tcg_gen_shli_i32(tmp, tmp, shift);
212
- tcg_gen_add_i32(addr, addr, tmp);
213
- tcg_temp_free_i32(tmp);
214
- break;
215
- case 0xc: /* Negative offset. */
216
- tcg_gen_addi_i32(addr, addr, -imm);
217
- break;
218
- case 0xe: /* User privilege. */
219
- tcg_gen_addi_i32(addr, addr, imm);
220
- memidx = get_a32_user_mem_index(s);
221
- break;
222
- case 0x9: /* Post-decrement. */
223
- imm = -imm;
224
- /* Fall through. */
225
- case 0xb: /* Post-increment. */
226
- postinc = 1;
227
- writeback = 1;
228
- break;
229
- case 0xd: /* Pre-decrement. */
230
- imm = -imm;
231
- /* Fall through. */
232
- case 0xf: /* Pre-increment. */
233
- writeback = 1;
234
- break;
235
- default:
236
+ imm = insn & 0xff;
237
+ switch ((insn >> 8) & 0xf) {
238
+ case 0x0: /* Shifted Register. */
239
+ shift = (insn >> 4) & 0xf;
240
+ if (shift > 3) {
241
tcg_temp_free_i32(addr);
242
goto illegal_op;
243
}
244
+ tmp = load_reg(s, rm);
245
+ if (shift) {
246
+ tcg_gen_shli_i32(tmp, tmp, shift);
247
+ }
248
+ tcg_gen_add_i32(addr, addr, tmp);
249
+ tcg_temp_free_i32(tmp);
250
+ break;
251
+ case 0xc: /* Negative offset. */
252
+ tcg_gen_addi_i32(addr, addr, -imm);
253
+ break;
254
+ case 0xe: /* User privilege. */
255
+ tcg_gen_addi_i32(addr, addr, imm);
256
+ memidx = get_a32_user_mem_index(s);
257
+ break;
258
+ case 0x9: /* Post-decrement. */
259
+ imm = -imm;
260
+ /* Fall through. */
261
+ case 0xb: /* Post-increment. */
262
+ postinc = 1;
263
+ writeback = 1;
264
+ break;
265
+ case 0xd: /* Pre-decrement. */
266
+ imm = -imm;
267
+ /* Fall through. */
268
+ case 0xf: /* Pre-increment. */
269
+ writeback = 1;
270
+ break;
271
+ default:
272
+ tcg_temp_free_i32(addr);
273
+ goto illegal_op;
274
}
275
}
276
277
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
278
if (insn & (1 << 11)) {
279
rd = (insn >> 8) & 7;
280
/* load pc-relative. Bit 1 of PC is ignored. */
281
- val = read_pc(s) + ((insn & 0xff) * 4);
282
- val &= ~(uint32_t)2;
283
- addr = tcg_temp_new_i32();
284
- tcg_gen_movi_i32(addr, val);
285
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
286
tmp = tcg_temp_new_i32();
287
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
288
rd | ISSIs16Bit);
289
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
290
* - Add PC/SP (immediate)
291
*/
292
rd = (insn >> 8) & 7;
293
- if (insn & (1 << 11)) {
294
- /* SP */
295
- tmp = load_reg(s, 13);
296
- } else {
297
- /* PC. bit 1 is ignored. */
298
- tmp = tcg_temp_new_i32();
299
- tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
300
- }
301
val = (insn & 0xff) * 4;
302
- tcg_gen_addi_i32(tmp, tmp, val);
303
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
304
store_reg(s, rd, tmp);
305
break;
59
306
60
--
307
--
61
2.20.1
308
2.20.1
62
309
63
310
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The thumb bit has already been removed from s->pc, and is always even.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Message-id: 20190209033847.9014-4-richard.henderson@linaro.org
8
Message-id: 20190807045335.1361-6-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 25 +++++++++++++++++++------
11
target/arm/translate.c | 10 +++++-----
9
1 file changed, 19 insertions(+), 6 deletions(-)
12
1 file changed, 5 insertions(+), 5 deletions(-)
10
13
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
16
tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
19
/* Force a TB lookup after an instruction that changes the CPU state. */
17
rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
20
static inline void gen_lookup_tb(DisasContext *s)
18
return 0;
21
{
19
+
22
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
20
+ case NEON_3R_VMAX:
23
+ tcg_gen_movi_i32(cpu_R[15], s->pc);
21
+ if (u) {
24
s->base.is_jmp = DISAS_EXIT;
22
+ tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
25
}
23
+ vec_size, vec_size);
26
24
+ } else {
27
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
25
+ tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
28
* self-modifying code correctly and also to take
26
+ vec_size, vec_size);
29
* any pending interrupts immediately.
27
+ }
30
*/
28
+ return 0;
31
- gen_goto_tb(s, 0, s->pc & ~1);
29
+ case NEON_3R_VMIN:
32
+ gen_goto_tb(s, 0, s->pc);
30
+ if (u) {
33
return;
31
+ tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
34
case 7: /* sb */
32
+ vec_size, vec_size);
35
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
33
+ } else {
36
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
34
+ tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
37
* for TCG; MB and end the TB instead.
35
+ vec_size, vec_size);
38
*/
36
+ }
39
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
37
+ return 0;
40
- gen_goto_tb(s, 0, s->pc & ~1);
38
}
41
+ gen_goto_tb(s, 0, s->pc);
39
42
return;
40
if (size == 3) {
43
default:
41
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
44
goto illegal_op;
42
case NEON_3R_VQRSHL:
45
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
43
GEN_NEON_INTEGER_OP_ENV(qrshl);
46
* and also to take any pending interrupts
44
break;
47
* immediately.
45
- case NEON_3R_VMAX:
48
*/
46
- GEN_NEON_INTEGER_OP(max);
49
- gen_goto_tb(s, 0, s->pc & ~1);
47
- break;
50
+ gen_goto_tb(s, 0, s->pc);
48
- case NEON_3R_VMIN:
51
break;
49
- GEN_NEON_INTEGER_OP(min);
52
case 7: /* sb */
50
- break;
53
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
51
case NEON_3R_VABD:
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
52
GEN_NEON_INTEGER_OP(abd);
55
* for TCG; MB and end the TB instead.
53
break;
56
*/
57
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
58
- gen_goto_tb(s, 0, s->pc & ~1);
59
+ gen_goto_tb(s, 0, s->pc);
60
break;
61
default:
62
goto illegal_op;
54
--
63
--
55
2.20.1
64
2.20.1
56
65
57
66
diff view generated by jsdifflib
1
Peter Crosthwaite hasn't had the bandwidth to do code review or
1
From: Richard Henderson <richard.henderson@linaro.org>
2
other QEMU work for some time now -- remove his email address
3
from MAINTAINERS file entries so we don't bombard him with
4
patch emails.
5
2
3
We must update s->base.pc_next when we return from the translate_insn
4
hook to the main translator loop. By incrementing s->base.pc_next
5
immediately after reading the insn word, "pc_next" contains the address
6
of the next instruction throughout translation.
7
8
All remaining uses of s->pc are referencing the address of the next insn,
9
so this is now a simple global replacement. Remove the "s->pc" field.
10
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Message-id: 20190807045335.1361-7-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20190207181422.4907-1-peter.maydell@linaro.org
8
---
16
---
9
MAINTAINERS | 4 ----
17
target/arm/translate.h | 1 -
10
1 file changed, 4 deletions(-)
18
target/arm/translate-a64.c | 51 +++++++++---------
19
target/arm/translate.c | 103 ++++++++++++++++++-------------------
20
3 files changed, 72 insertions(+), 83 deletions(-)
11
21
12
diff --git a/MAINTAINERS b/MAINTAINERS
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
13
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
14
--- a/MAINTAINERS
24
--- a/target/arm/translate.h
15
+++ b/MAINTAINERS
25
+++ b/target/arm/translate.h
16
@@ -XXX,XX +XXX,XX @@ Guest CPU cores (TCG):
26
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
17
----------------------
27
DisasContextBase base;
18
Overall
28
const ARMISARegisters *isar;
19
L: qemu-devel@nongnu.org
29
20
-M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
30
- target_ulong pc;
21
M: Richard Henderson <rth@twiddle.net>
31
/* The address of the current instruction being translated. */
22
R: Paolo Bonzini <pbonzini@redhat.com>
32
target_ulong pc_curr;
23
S: Maintained
33
target_ulong page_start;
24
@@ -XXX,XX +XXX,XX @@ F: tests/virtio-scsi-test.c
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
25
T: git https://github.com/bonzini/qemu.git scsi-next
35
index XXXXXXX..XXXXXXX 100644
26
36
--- a/target/arm/translate-a64.c
27
SSI
37
+++ b/target/arm/translate-a64.c
28
-M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
38
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
29
M: Alistair Francis <alistair@alistair23.me>
39
30
S: Maintained
40
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
31
F: hw/ssi/*
41
{
32
@@ -XXX,XX +XXX,XX @@ F: tests/m25p80-test.c
42
- gen_a64_set_pc_im(s->pc - offset);
33
43
+ gen_a64_set_pc_im(s->base.pc_next - offset);
34
Xilinx SPI
44
gen_exception_internal(excp);
35
M: Alistair Francis <alistair@alistair23.me>
45
s->base.is_jmp = DISAS_NORETURN;
36
-M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
46
}
37
S: Maintained
47
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
38
F: hw/ssi/xilinx_*
48
static void gen_exception_insn(DisasContext *s, int offset, int excp,
39
49
uint32_t syndrome, uint32_t target_el)
40
@@ -XXX,XX +XXX,XX @@ F: qom/cpu.c
50
{
41
F: include/qom/cpu.h
51
- gen_a64_set_pc_im(s->pc - offset);
42
52
+ gen_a64_set_pc_im(s->base.pc_next - offset);
43
Device Tree
53
gen_exception(excp, syndrome, target_el);
44
-M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
54
s->base.is_jmp = DISAS_NORETURN;
45
M: Alexander Graf <agraf@suse.de>
55
}
46
S: Maintained
56
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset,
47
F: device_tree.c
57
{
58
TCGv_i32 tcg_syn;
59
60
- gen_a64_set_pc_im(s->pc - offset);
61
+ gen_a64_set_pc_im(s->base.pc_next - offset);
62
tcg_syn = tcg_const_i32(syndrome);
63
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
64
tcg_temp_free_i32(tcg_syn);
65
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
66
67
if (insn & (1U << 31)) {
68
/* BL Branch with link */
69
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
70
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
71
}
72
73
/* B Branch / BL Branch with link */
74
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
75
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
76
tcg_cmp, 0, label_match);
77
78
- gen_goto_tb(s, 0, s->pc);
79
+ gen_goto_tb(s, 0, s->base.pc_next);
80
gen_set_label(label_match);
81
gen_goto_tb(s, 1, addr);
82
}
83
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
84
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
85
tcg_cmp, 0, label_match);
86
tcg_temp_free_i64(tcg_cmp);
87
- gen_goto_tb(s, 0, s->pc);
88
+ gen_goto_tb(s, 0, s->base.pc_next);
89
gen_set_label(label_match);
90
gen_goto_tb(s, 1, addr);
91
}
92
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
93
/* genuinely conditional branches */
94
TCGLabel *label_match = gen_new_label();
95
arm_gen_test_cc(cond, label_match);
96
- gen_goto_tb(s, 0, s->pc);
97
+ gen_goto_tb(s, 0, s->base.pc_next);
98
gen_set_label(label_match);
99
gen_goto_tb(s, 1, addr);
100
} else {
101
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
102
* any pending interrupts immediately.
103
*/
104
reset_btype(s);
105
- gen_goto_tb(s, 0, s->pc);
106
+ gen_goto_tb(s, 0, s->base.pc_next);
107
return;
108
109
case 7: /* SB */
110
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
111
* MB and end the TB instead.
112
*/
113
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
114
- gen_goto_tb(s, 0, s->pc);
115
+ gen_goto_tb(s, 0, s->base.pc_next);
116
return;
117
118
default:
119
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
120
gen_a64_set_pc(s, dst);
121
/* BLR also needs to load return address */
122
if (opc == 1) {
123
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
124
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
125
}
126
break;
127
128
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
129
gen_a64_set_pc(s, dst);
130
/* BLRAA also needs to load return address */
131
if (opc == 9) {
132
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
133
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
134
}
135
break;
136
137
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
138
{
139
uint32_t insn;
140
141
- s->pc_curr = s->pc;
142
- insn = arm_ldl_code(env, s->pc, s->sctlr_b);
143
+ s->pc_curr = s->base.pc_next;
144
+ insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
145
s->insn = insn;
146
- s->pc += 4;
147
+ s->base.pc_next += 4;
148
149
s->fp_access_checked = false;
150
151
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
152
int bound, core_mmu_idx;
153
154
dc->isar = &arm_cpu->isar;
155
- dc->pc = dc->base.pc_first;
156
dc->condjmp = 0;
157
158
dc->aarch64 = 1;
159
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
160
{
161
DisasContext *dc = container_of(dcbase, DisasContext, base);
162
163
- tcg_gen_insn_start(dc->pc, 0, 0);
164
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
165
dc->insn_start = tcg_last_op();
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
169
DisasContext *dc = container_of(dcbase, DisasContext, base);
170
171
if (bp->flags & BP_CPU) {
172
- gen_a64_set_pc_im(dc->pc);
173
+ gen_a64_set_pc_im(dc->base.pc_next);
174
gen_helper_check_breakpoints(cpu_env);
175
/* End the TB early; it likely won't be executed */
176
dc->base.is_jmp = DISAS_TOO_MANY;
177
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
178
to for it to be properly cleared -- thus we
179
increment the PC here so that the logic setting
180
tb->size below does the right thing. */
181
- dc->pc += 4;
182
+ dc->base.pc_next += 4;
183
dc->base.is_jmp = DISAS_NORETURN;
184
}
185
186
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
187
disas_a64_insn(env, dc);
188
}
189
190
- dc->base.pc_next = dc->pc;
191
translator_loop_temp_check(&dc->base);
192
}
193
194
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
195
*/
196
switch (dc->base.is_jmp) {
197
default:
198
- gen_a64_set_pc_im(dc->pc);
199
+ gen_a64_set_pc_im(dc->base.pc_next);
200
/* fall through */
201
case DISAS_EXIT:
202
case DISAS_JUMP:
203
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
204
switch (dc->base.is_jmp) {
205
case DISAS_NEXT:
206
case DISAS_TOO_MANY:
207
- gen_goto_tb(dc, 1, dc->pc);
208
+ gen_goto_tb(dc, 1, dc->base.pc_next);
209
break;
210
default:
211
case DISAS_UPDATE:
212
- gen_a64_set_pc_im(dc->pc);
213
+ gen_a64_set_pc_im(dc->base.pc_next);
214
/* fall through */
215
case DISAS_EXIT:
216
tcg_gen_exit_tb(NULL, 0);
217
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
218
case DISAS_SWI:
219
break;
220
case DISAS_WFE:
221
- gen_a64_set_pc_im(dc->pc);
222
+ gen_a64_set_pc_im(dc->base.pc_next);
223
gen_helper_wfe(cpu_env);
224
break;
225
case DISAS_YIELD:
226
- gen_a64_set_pc_im(dc->pc);
227
+ gen_a64_set_pc_im(dc->base.pc_next);
228
gen_helper_yield(cpu_env);
229
break;
230
case DISAS_WFI:
231
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
232
*/
233
TCGv_i32 tmp = tcg_const_i32(4);
234
235
- gen_a64_set_pc_im(dc->pc);
236
+ gen_a64_set_pc_im(dc->base.pc_next);
237
gen_helper_wfi(cpu_env, tmp);
238
tcg_temp_free_i32(tmp);
239
/* The helper doesn't necessarily throw an exception, but we
240
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
241
}
242
}
243
}
244
-
245
- /* Functions above can change dc->pc, so re-align db->pc_next */
246
- dc->base.pc_next = dc->pc;
247
}
248
249
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
250
diff --git a/target/arm/translate.c b/target/arm/translate.c
251
index XXXXXXX..XXXXXXX 100644
252
--- a/target/arm/translate.c
253
+++ b/target/arm/translate.c
254
@@ -XXX,XX +XXX,XX @@ static inline void gen_blxns(DisasContext *s, int rm)
255
* We do however need to set the PC, because the blxns helper reads it.
256
* The blxns helper may throw an exception.
257
*/
258
- gen_set_pc_im(s, s->pc);
259
+ gen_set_pc_im(s, s->base.pc_next);
260
gen_helper_v7m_blxns(cpu_env, var);
261
tcg_temp_free_i32(var);
262
s->base.is_jmp = DISAS_EXIT;
263
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
264
* for single stepping.)
265
*/
266
s->svc_imm = imm16;
267
- gen_set_pc_im(s, s->pc);
268
+ gen_set_pc_im(s, s->base.pc_next);
269
s->base.is_jmp = DISAS_HVC;
270
}
271
272
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
273
tmp = tcg_const_i32(syn_aa32_smc());
274
gen_helper_pre_smc(cpu_env, tmp);
275
tcg_temp_free_i32(tmp);
276
- gen_set_pc_im(s, s->pc);
277
+ gen_set_pc_im(s, s->base.pc_next);
278
s->base.is_jmp = DISAS_SMC;
279
}
280
281
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
282
{
283
gen_set_condexec(s);
284
- gen_set_pc_im(s, s->pc - offset);
285
+ gen_set_pc_im(s, s->base.pc_next - offset);
286
gen_exception_internal(excp);
287
s->base.is_jmp = DISAS_NORETURN;
288
}
289
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
290
int syn, uint32_t target_el)
291
{
292
gen_set_condexec(s);
293
- gen_set_pc_im(s, s->pc - offset);
294
+ gen_set_pc_im(s, s->base.pc_next - offset);
295
gen_exception(excp, syn, target_el);
296
s->base.is_jmp = DISAS_NORETURN;
297
}
298
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
299
TCGv_i32 tcg_syn;
300
301
gen_set_condexec(s);
302
- gen_set_pc_im(s, s->pc - offset);
303
+ gen_set_pc_im(s, s->base.pc_next - offset);
304
tcg_syn = tcg_const_i32(syn);
305
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
306
tcg_temp_free_i32(tcg_syn);
307
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
308
/* Force a TB lookup after an instruction that changes the CPU state. */
309
static inline void gen_lookup_tb(DisasContext *s)
310
{
311
- tcg_gen_movi_i32(cpu_R[15], s->pc);
312
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
313
s->base.is_jmp = DISAS_EXIT;
314
}
315
316
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
317
{
318
#ifndef CONFIG_USER_ONLY
319
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
320
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
321
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
322
#else
323
return true;
324
#endif
325
@@ -XXX,XX +XXX,XX @@ static void gen_nop_hint(DisasContext *s, int val)
326
*/
327
case 1: /* yield */
328
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
329
- gen_set_pc_im(s, s->pc);
330
+ gen_set_pc_im(s, s->base.pc_next);
331
s->base.is_jmp = DISAS_YIELD;
332
}
333
break;
334
case 3: /* wfi */
335
- gen_set_pc_im(s, s->pc);
336
+ gen_set_pc_im(s, s->base.pc_next);
337
s->base.is_jmp = DISAS_WFI;
338
break;
339
case 2: /* wfe */
340
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
341
- gen_set_pc_im(s, s->pc);
342
+ gen_set_pc_im(s, s->base.pc_next);
343
s->base.is_jmp = DISAS_WFE;
344
}
345
break;
346
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
347
if (isread) {
348
return 1;
349
}
350
- gen_set_pc_im(s, s->pc);
351
+ gen_set_pc_im(s, s->base.pc_next);
352
s->base.is_jmp = DISAS_WFI;
353
return 0;
354
default:
355
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
356
* self-modifying code correctly and also to take
357
* any pending interrupts immediately.
358
*/
359
- gen_goto_tb(s, 0, s->pc);
360
+ gen_goto_tb(s, 0, s->base.pc_next);
361
return;
362
case 7: /* sb */
363
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
364
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
365
* for TCG; MB and end the TB instead.
366
*/
367
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
368
- gen_goto_tb(s, 0, s->pc);
369
+ gen_goto_tb(s, 0, s->base.pc_next);
370
return;
371
default:
372
goto illegal_op;
373
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
374
int32_t offset;
375
376
tmp = tcg_temp_new_i32();
377
- tcg_gen_movi_i32(tmp, s->pc);
378
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
379
store_reg(s, 14, tmp);
380
/* Sign-extend the 24-bit offset */
381
offset = (((int32_t)insn) << 8) >> 8;
382
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
383
/* branch link/exchange thumb (blx) */
384
tmp = load_reg(s, rm);
385
tmp2 = tcg_temp_new_i32();
386
- tcg_gen_movi_i32(tmp2, s->pc);
387
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
388
store_reg(s, 14, tmp2);
389
gen_bx(s, tmp);
390
break;
391
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
392
/* branch (and link) */
393
if (insn & (1 << 24)) {
394
tmp = tcg_temp_new_i32();
395
- tcg_gen_movi_i32(tmp, s->pc);
396
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
397
store_reg(s, 14, tmp);
398
}
399
offset = sextract32(insn << 2, 0, 26);
400
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
401
break;
402
case 0xf:
403
/* swi */
404
- gen_set_pc_im(s, s->pc);
405
+ gen_set_pc_im(s, s->base.pc_next);
406
s->svc_imm = extract32(insn, 0, 24);
407
s->base.is_jmp = DISAS_SWI;
408
break;
409
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
410
411
if (insn & (1 << 14)) {
412
/* Branch and link. */
413
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
414
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
415
}
416
417
offset += read_pc(s);
418
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
419
* and also to take any pending interrupts
420
* immediately.
421
*/
422
- gen_goto_tb(s, 0, s->pc);
423
+ gen_goto_tb(s, 0, s->base.pc_next);
424
break;
425
case 7: /* sb */
426
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
427
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
428
* for TCG; MB and end the TB instead.
429
*/
430
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
431
- gen_goto_tb(s, 0, s->pc);
432
+ gen_goto_tb(s, 0, s->base.pc_next);
433
break;
434
default:
435
goto illegal_op;
436
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
437
/* BLX/BX */
438
tmp = load_reg(s, rm);
439
if (link) {
440
- val = (uint32_t)s->pc | 1;
441
+ val = (uint32_t)s->base.pc_next | 1;
442
tmp2 = tcg_temp_new_i32();
443
tcg_gen_movi_i32(tmp2, val);
444
store_reg(s, 14, tmp2);
445
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
446
447
if (cond == 0xf) {
448
/* swi */
449
- gen_set_pc_im(s, s->pc);
450
+ gen_set_pc_im(s, s->base.pc_next);
451
s->svc_imm = extract32(insn, 0, 8);
452
s->base.is_jmp = DISAS_SWI;
453
break;
454
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
455
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
456
457
tmp2 = tcg_temp_new_i32();
458
- tcg_gen_movi_i32(tmp2, s->pc | 1);
459
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
460
store_reg(s, 14, tmp2);
461
gen_bx(s, tmp);
462
break;
463
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
464
tcg_gen_addi_i32(tmp, tmp, offset);
465
466
tmp2 = tcg_temp_new_i32();
467
- tcg_gen_movi_i32(tmp2, s->pc | 1);
468
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
469
store_reg(s, 14, tmp2);
470
gen_bx(s, tmp);
471
} else {
472
@@ -XXX,XX +XXX,XX @@ undef:
473
474
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
475
{
476
- /* Return true if the insn at dc->pc might cross a page boundary.
477
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
478
* (False positives are OK, false negatives are not.)
479
* We know this is a Thumb insn, and our caller ensures we are
480
- * only called if dc->pc is less than 4 bytes from the page
481
+ * only called if dc->base.pc_next is less than 4 bytes from the page
482
* boundary, so we cross the page if the first 16 bits indicate
483
* that this is a 32 bit insn.
484
*/
485
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
486
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
487
488
- return !thumb_insn_is_16bit(s, s->pc, insn);
489
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
490
}
491
492
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
493
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
494
uint32_t condexec, core_mmu_idx;
495
496
dc->isar = &cpu->isar;
497
- dc->pc = dc->base.pc_first;
498
dc->condjmp = 0;
499
500
dc->aarch64 = 0;
501
@@ -XXX,XX +XXX,XX @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
502
{
503
DisasContext *dc = container_of(dcbase, DisasContext, base);
504
505
- tcg_gen_insn_start(dc->pc,
506
+ tcg_gen_insn_start(dc->base.pc_next,
507
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
508
0);
509
dc->insn_start = tcg_last_op();
510
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
511
512
if (bp->flags & BP_CPU) {
513
gen_set_condexec(dc);
514
- gen_set_pc_im(dc, dc->pc);
515
+ gen_set_pc_im(dc, dc->base.pc_next);
516
gen_helper_check_breakpoints(cpu_env);
517
/* End the TB early; it's likely not going to be executed */
518
dc->base.is_jmp = DISAS_TOO_MANY;
519
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
520
tb->size below does the right thing. */
521
/* TODO: Advance PC by correct instruction length to
522
* avoid disassembler error messages */
523
- dc->pc += 2;
524
+ dc->base.pc_next += 2;
525
dc->base.is_jmp = DISAS_NORETURN;
526
}
527
528
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
529
{
530
#ifdef CONFIG_USER_ONLY
531
/* Intercept jump to the magic kernel page. */
532
- if (dc->pc >= 0xffff0000) {
533
+ if (dc->base.pc_next >= 0xffff0000) {
534
/* We always get here via a jump, so know we are not in a
535
conditional execution block. */
536
gen_exception_internal(EXCP_KERNEL_TRAP);
537
@@ -XXX,XX +XXX,XX @@ static void arm_post_translate_insn(DisasContext *dc)
538
gen_set_label(dc->condlabel);
539
dc->condjmp = 0;
540
}
541
- dc->base.pc_next = dc->pc;
542
translator_loop_temp_check(&dc->base);
543
}
544
545
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
546
return;
547
}
548
549
- dc->pc_curr = dc->pc;
550
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
551
+ dc->pc_curr = dc->base.pc_next;
552
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
553
dc->insn = insn;
554
- dc->pc += 4;
555
+ dc->base.pc_next += 4;
556
disas_arm_insn(dc, insn);
557
558
arm_post_translate_insn(dc);
559
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
560
return;
561
}
562
563
- dc->pc_curr = dc->pc;
564
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
565
- is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
566
- dc->pc += 2;
567
+ dc->pc_curr = dc->base.pc_next;
568
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
569
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
570
+ dc->base.pc_next += 2;
571
if (!is_16bit) {
572
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
573
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
574
575
insn = insn << 16 | insn2;
576
- dc->pc += 2;
577
+ dc->base.pc_next += 2;
578
}
579
dc->insn = insn;
580
581
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
582
* but isn't very efficient).
583
*/
584
if (dc->base.is_jmp == DISAS_NEXT
585
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
586
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
587
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
588
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
589
&& insn_crosses_page(env, dc)))) {
590
dc->base.is_jmp = DISAS_TOO_MANY;
591
}
592
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
593
case DISAS_NEXT:
594
case DISAS_TOO_MANY:
595
case DISAS_UPDATE:
596
- gen_set_pc_im(dc, dc->pc);
597
+ gen_set_pc_im(dc, dc->base.pc_next);
598
/* fall through */
599
default:
600
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
601
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
602
switch(dc->base.is_jmp) {
603
case DISAS_NEXT:
604
case DISAS_TOO_MANY:
605
- gen_goto_tb(dc, 1, dc->pc);
606
+ gen_goto_tb(dc, 1, dc->base.pc_next);
607
break;
608
case DISAS_JUMP:
609
gen_goto_ptr();
610
break;
611
case DISAS_UPDATE:
612
- gen_set_pc_im(dc, dc->pc);
613
+ gen_set_pc_im(dc, dc->base.pc_next);
614
/* fall through */
615
default:
616
/* indicate that the hash table must be used to find the next TB */
617
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
618
gen_set_label(dc->condlabel);
619
gen_set_condexec(dc);
620
if (unlikely(is_singlestepping(dc))) {
621
- gen_set_pc_im(dc, dc->pc);
622
+ gen_set_pc_im(dc, dc->base.pc_next);
623
gen_singlestep_exception(dc);
624
} else {
625
- gen_goto_tb(dc, 1, dc->pc);
626
+ gen_goto_tb(dc, 1, dc->base.pc_next);
627
}
628
}
629
-
630
- /* Functions above can change dc->pc, so re-align db->pc_next */
631
- dc->base.pc_next = dc->pc;
632
}
633
634
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
48
--
635
--
49
2.20.1
636
2.20.1
50
637
51
638
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For opcodes 0-5, move some if conditions into the structure
3
The offset is variable depending on the instruction set, whereas
4
of a switch statement. For opcodes 6 & 7, decode everything
4
we have stored values for the current pc and the next pc. Passing
5
at once with a second switch.
5
in the actual value is clearer in intent.
6
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190206052857.5077-3-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-8-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
target/arm/translate-a64.c | 94 ++++++++++++++++++++------------------
13
target/arm/translate-a64.c | 25 ++++++++++++++-----------
13
1 file changed, 49 insertions(+), 45 deletions(-)
14
target/arm/translate-vfp.inc.c | 6 +++---
15
target/arm/translate.c | 31 ++++++++++++++++---------------
16
3 files changed, 33 insertions(+), 29 deletions(-)
14
17
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
20
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
21
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
22
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
20
int type = extract32(insn, 22, 2);
23
s->base.is_jmp = DISAS_NORETURN;
21
bool sbit = extract32(insn, 29, 1);
24
}
22
bool sf = extract32(insn, 31, 1);
25
23
+ bool itof = false;
26
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
24
27
+static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
25
if (sbit) {
28
uint32_t syndrome, uint32_t target_el)
26
- unallocated_encoding(s);
29
{
27
- return;
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
28
+ goto do_unallocated;
31
+ gen_a64_set_pc_im(pc);
29
}
32
gen_exception(excp, syndrome, target_el);
30
33
s->base.is_jmp = DISAS_NORETURN;
31
- if (opcode > 5) {
34
}
32
- /* FMOV */
35
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
33
- bool itof = opcode & 1;
36
void unallocated_encoding(DisasContext *s)
34
-
37
{
35
- if (rmode >= 2) {
38
/* Unallocated and reserved encodings are uncategorized */
36
- unallocated_encoding(s);
39
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
37
- return;
40
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
38
- }
41
default_exception_el(s));
39
-
42
}
40
- switch (sf << 3 | type << 1 | rmode) {
43
41
- case 0x0: /* 32 bit */
44
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
42
- case 0xa: /* 64 bit */
45
return true;
43
- case 0xd: /* 64 bit to top half of quad */
46
}
44
- break;
47
45
- case 0x6: /* 16-bit float, 32-bit int */
48
- gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
46
- case 0xe: /* 16-bit float, 64-bit int */
49
- s->fp_excp_el);
47
- if (dc_isar_feature(aa64_fp16, s)) {
50
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
48
- break;
51
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
49
- }
52
return false;
50
- /* fallthru */
53
}
51
- default:
54
52
- /* all other sf/type/rmode combinations are invalid */
55
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
53
- unallocated_encoding(s);
56
bool sve_access_check(DisasContext *s)
54
- return;
57
{
55
- }
58
if (s->sve_excp_el) {
56
-
59
- gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
57
- if (!fp_access_check(s)) {
60
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
58
- return;
61
s->sve_excp_el);
59
- }
62
return false;
60
- handle_fmov(s, rd, rn, type, itof);
63
}
61
- } else {
64
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
62
- /* actual FP conversions */
65
switch (op2_ll) {
63
- bool itof = extract32(opcode, 1, 1);
66
case 1: /* SVC */
64
-
67
gen_ss_advance(s);
65
- if (rmode != 0 && opcode > 1) {
68
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
66
- unallocated_encoding(s);
69
- default_exception_el(s));
67
- return;
70
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
68
+ switch (opcode) {
71
+ syn_aa64_svc(imm16), default_exception_el(s));
69
+ case 2: /* SCVTF */
72
break;
70
+ case 3: /* UCVTF */
73
case 2: /* HVC */
71
+ itof = true;
74
if (s->current_el == 0) {
72
+ /* fallthru */
75
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
73
+ case 4: /* FCVTAS */
76
gen_a64_set_pc_im(s->pc_curr);
74
+ case 5: /* FCVTAU */
77
gen_helper_pre_hvc(cpu_env);
75
+ if (rmode != 0) {
78
gen_ss_advance(s);
76
+ goto do_unallocated;
79
- gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
80
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
81
+ syn_aa64_hvc(imm16), 2);
82
break;
83
case 3: /* SMC */
84
if (s->current_el == 0) {
85
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
86
gen_helper_pre_smc(cpu_env, tmp);
87
tcg_temp_free_i32(tmp);
88
gen_ss_advance(s);
89
- gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
90
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
91
+ syn_aa64_smc(imm16), 3);
92
break;
93
default:
94
unallocated_encoding(s);
95
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
96
if (s->btype != 0
97
&& s->guarded_page
98
&& !btype_destination_ok(insn, s->bt, s->btype)) {
99
- gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
100
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
101
+ syn_btitrap(s->btype),
102
default_exception_el(s));
103
return;
104
}
105
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate-vfp.inc.c
108
+++ b/target/arm/translate-vfp.inc.c
109
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
110
{
111
if (s->fp_excp_el) {
112
if (arm_dc_feature(s, ARM_FEATURE_M)) {
113
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
114
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
115
s->fp_excp_el);
116
} else {
117
- gen_exception_insn(s, 4, EXCP_UDEF,
118
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
119
syn_fp_access_trap(1, 0xe, false),
120
s->fp_excp_el);
77
}
121
}
78
+ /* fallthru */
122
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
79
+ case 0: /* FCVT[NPMZ]S */
123
80
+ case 1: /* FCVT[NPMZ]U */
124
if (!s->vfp_enabled && !ignore_vfp_enabled) {
81
switch (type) {
125
assert(!arm_dc_feature(s, ARM_FEATURE_M));
82
case 0: /* float32 */
126
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
83
case 1: /* float64 */
127
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
84
break;
128
default_exception_el(s));
85
case 3: /* float16 */
129
return false;
86
- if (dc_isar_feature(aa64_fp16, s)) {
130
}
87
- break;
131
diff --git a/target/arm/translate.c b/target/arm/translate.c
88
+ if (!dc_isar_feature(aa64_fp16, s)) {
132
index XXXXXXX..XXXXXXX 100644
89
+ goto do_unallocated;
133
--- a/target/arm/translate.c
134
+++ b/target/arm/translate.c
135
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
136
s->base.is_jmp = DISAS_NORETURN;
137
}
138
139
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
140
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
141
int syn, uint32_t target_el)
142
{
143
gen_set_condexec(s);
144
- gen_set_pc_im(s, s->base.pc_next - offset);
145
+ gen_set_pc_im(s, pc);
146
gen_exception(excp, syn, target_el);
147
s->base.is_jmp = DISAS_NORETURN;
148
}
149
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
150
return;
151
}
152
153
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
154
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
155
default_exception_el(s));
156
}
157
158
@@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
159
160
undef:
161
/* If we get here then some access check did not pass */
162
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
163
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
+ syn_uncategorized(), exc_target);
165
return false;
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
169
* for attempts to execute invalid vfp/neon encodings with FP disabled.
170
*/
171
if (s->fp_excp_el) {
172
- gen_exception_insn(s, 4, EXCP_UDEF,
173
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
174
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
175
return 0;
176
}
177
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
178
* for attempts to execute invalid vfp/neon encodings with FP disabled.
179
*/
180
if (s->fp_excp_el) {
181
- gen_exception_insn(s, 4, EXCP_UDEF,
182
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
183
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
184
return 0;
185
}
186
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
187
}
188
189
if (s->fp_excp_el) {
190
- gen_exception_insn(s, 4, EXCP_UDEF,
191
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
192
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
193
return 0;
194
}
195
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
196
off_rm = vfp_reg_offset(0, rm);
197
}
198
if (s->fp_excp_el) {
199
- gen_exception_insn(s, 4, EXCP_UDEF,
200
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
201
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
202
return 0;
203
}
204
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
205
* For the UNPREDICTABLE cases we choose to UNDEF.
206
*/
207
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
208
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
209
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
210
return;
211
}
212
213
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
214
}
215
216
if (undef) {
217
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
218
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
219
default_exception_el(s));
220
return;
221
}
222
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
223
* UsageFault exception.
224
*/
225
if (arm_dc_feature(s, ARM_FEATURE_M)) {
226
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
227
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
228
default_exception_el(s));
229
return;
230
}
231
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
232
break;
233
default:
234
illegal_op:
235
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
236
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
237
default_exception_el(s));
238
break;
239
}
240
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
90
}
241
}
91
- /* fallthru */
242
92
+ break;
243
/* All other insns: NOCP */
93
default:
244
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
94
- unallocated_encoding(s);
245
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
95
- return;
246
default_exception_el(s));
96
+ goto do_unallocated;
247
break;
97
}
248
}
98
-
249
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
99
if (!fp_access_check(s)) {
250
}
100
return;
251
return;
101
}
252
illegal_op:
102
handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
253
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
103
+ break;
254
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
104
+
255
default_exception_el(s));
105
+ default:
256
}
106
+ switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
257
107
+ case 0b01100110: /* FMOV half <-> 32-bit int */
258
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
108
+ case 0b01100111:
259
return;
109
+ case 0b11100110: /* FMOV half <-> 64-bit int */
260
illegal_op:
110
+ case 0b11100111:
261
undef:
111
+ if (!dc_isar_feature(aa64_fp16, s)) {
262
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
112
+ goto do_unallocated;
263
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
113
+ }
264
default_exception_el(s));
114
+ /* fallthru */
265
}
115
+ case 0b00000110: /* FMOV 32-bit */
116
+ case 0b00000111:
117
+ case 0b10100110: /* FMOV 64-bit */
118
+ case 0b10100111:
119
+ case 0b11001110: /* FMOV top half of 128-bit */
120
+ case 0b11001111:
121
+ if (!fp_access_check(s)) {
122
+ return;
123
+ }
124
+ itof = opcode & 1;
125
+ handle_fmov(s, rd, rn, type, itof);
126
+ break;
127
+
128
+ default:
129
+ do_unallocated:
130
+ unallocated_encoding(s);
131
+ return;
132
+ }
133
+ break;
134
}
135
}
136
266
137
--
267
--
138
2.20.1
268
2.20.1
139
269
140
270
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The offset is variable depending on the instruction set.
4
Passing in the actual value is clearer in intent.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Message-id: 20190209033847.9014-3-richard.henderson@linaro.org
9
Message-id: 20190807045335.1361-9-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
target/arm/translate-a64.c | 35 ++++++++++++++---------------------
12
target/arm/translate-a64.c | 8 ++++----
9
1 file changed, 14 insertions(+), 21 deletions(-)
13
target/arm/translate.c | 8 ++++----
14
2 files changed, 8 insertions(+), 8 deletions(-)
10
15
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
18
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
16
}
21
tcg_temp_free_i32(tcg_excp);
17
22
}
18
switch (opcode) {
23
19
+ case 0x0c: /* SMAX, UMAX */
24
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
20
+ if (u) {
25
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
21
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
26
{
22
+ } else {
27
- gen_a64_set_pc_im(s->base.pc_next - offset);
23
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
28
+ gen_a64_set_pc_im(pc);
24
+ }
29
gen_exception_internal(excp);
25
+ return;
30
s->base.is_jmp = DISAS_NORETURN;
26
+ case 0x0d: /* SMIN, UMIN */
31
}
27
+ if (u) {
32
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
28
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
29
+ } else {
30
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
31
+ }
32
+ return;
33
case 0x10: /* ADD, SUB */
34
if (u) {
35
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
36
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
37
genenvfn = fns[size][u];
38
break;
33
break;
39
}
34
}
40
- case 0xc: /* SMAX, UMAX */
35
#endif
41
- {
36
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
42
- static NeonGenTwoOpFn * const fns[3][2] = {
37
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
43
- { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
38
} else {
44
- { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
39
unsupported_encoding(s, insn);
45
- { tcg_gen_smax_i32, tcg_gen_umax_i32 },
40
}
46
- };
41
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
47
- genfn = fns[size][u];
42
/* End the TB early; it likely won't be executed */
48
- break;
43
dc->base.is_jmp = DISAS_TOO_MANY;
49
- }
44
} else {
50
-
45
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
51
- case 0xd: /* SMIN, UMIN */
46
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
52
- {
47
/* The address covered by the breakpoint must be
53
- static NeonGenTwoOpFn * const fns[3][2] = {
48
included in [tb->pc, tb->pc + tb->size) in order
54
- { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
49
to for it to be properly cleared -- thus we
55
- { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
50
diff --git a/target/arm/translate.c b/target/arm/translate.c
56
- { tcg_gen_smin_i32, tcg_gen_umin_i32 },
51
index XXXXXXX..XXXXXXX 100644
57
- };
52
--- a/target/arm/translate.c
58
- genfn = fns[size][u];
53
+++ b/target/arm/translate.c
59
- break;
54
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
60
- }
55
s->base.is_jmp = DISAS_SMC;
61
case 0xe: /* SABD, UABD */
56
}
62
case 0xf: /* SABA, UABA */
57
63
{
58
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
59
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
60
{
61
gen_set_condexec(s);
62
- gen_set_pc_im(s, s->base.pc_next - offset);
63
+ gen_set_pc_im(s, pc);
64
gen_exception_internal(excp);
65
s->base.is_jmp = DISAS_NORETURN;
66
}
67
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
68
s->current_el != 0 &&
69
#endif
70
(imm == (s->thumb ? 0x3c : 0xf000))) {
71
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
72
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
73
return;
74
}
75
76
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
77
/* End the TB early; it's likely not going to be executed */
78
dc->base.is_jmp = DISAS_TOO_MANY;
79
} else {
80
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
81
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
82
/* The address covered by the breakpoint must be
83
included in [tb->pc, tb->pc + tb->size) in order
84
to for it to be properly cleared -- thus we
64
--
85
--
65
2.20.1
86
2.20.1
66
87
67
88
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Since we're now handling a == b generically, we no longer need
3
Unlike the other more generic gen_exception{,_internal}_insn
4
to do it by hand within target/arm/.
4
interfaces, breakpoints always refer to the current instruction.
5
5
6
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190209033847.9014-2-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/translate-a64.c | 6 +-----
12
target/arm/translate-a64.c | 7 +++----
12
target/arm/translate-sve.c | 6 +-----
13
target/arm/translate.c | 8 ++++----
13
target/arm/translate.c | 12 +++---------
14
2 files changed, 7 insertions(+), 8 deletions(-)
14
3 files changed, 5 insertions(+), 19 deletions(-)
15
15
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
18
--- a/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
21
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
21
s->base.is_jmp = DISAS_NORETURN;
22
return;
22
}
23
case 2: /* ORR */
23
24
- if (rn == rm) { /* MOV */
24
-static void gen_exception_bkpt_insn(DisasContext *s, int offset,
25
- gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
25
- uint32_t syndrome)
26
- } else {
26
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
27
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
28
- }
29
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
30
return;
31
case 3: /* ORN */
32
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
33
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/translate-sve.c
36
+++ b/target/arm/translate-sve.c
37
@@ -XXX,XX +XXX,XX @@ static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a)
38
39
static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a)
40
{
27
{
41
- if (a->rn == a->rm) { /* MOV */
28
TCGv_i32 tcg_syn;
42
- return do_mov_z(s, a->rd, a->rn);
29
43
- } else {
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
44
- return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm);
31
+ gen_a64_set_pc_im(s->pc_curr);
45
- }
32
tcg_syn = tcg_const_i32(syndrome);
46
+ return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm);
33
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
47
}
34
tcg_temp_free_i32(tcg_syn);
48
35
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
49
static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a)
36
break;
37
}
38
/* BRK */
39
- gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
40
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
41
break;
42
case 2:
43
if (op2_ll != 0) {
50
diff --git a/target/arm/translate.c b/target/arm/translate.c
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
51
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate.c
46
--- a/target/arm/translate.c
53
+++ b/target/arm/translate.c
47
+++ b/target/arm/translate.c
54
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
55
tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
49
s->base.is_jmp = DISAS_NORETURN;
56
vec_size, vec_size);
50
}
51
52
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
53
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
54
{
55
TCGv_i32 tcg_syn;
56
57
gen_set_condexec(s);
58
- gen_set_pc_im(s, s->base.pc_next - offset);
59
+ gen_set_pc_im(s, s->pc_curr);
60
tcg_syn = tcg_const_i32(syn);
61
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
62
tcg_temp_free_i32(tcg_syn);
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
64
case 1:
65
/* bkpt */
66
ARCH(5);
67
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
68
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
57
break;
69
break;
58
- case 2:
70
case 2:
59
- if (rn == rm) {
71
/* Hypervisor call (v7) */
60
- /* VMOV */
72
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
61
- tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
73
{
62
- } else {
74
int imm8 = extract32(insn, 0, 8);
63
- /* VORR */
75
ARCH(5);
64
- tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
76
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
65
- vec_size, vec_size);
77
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
66
- }
78
break;
67
+ case 2: /* VORR */
79
}
68
+ tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
80
69
+ vec_size, vec_size);
70
break;
71
case 3: /* VORN */
72
tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
73
--
81
--
74
2.20.1
82
2.20.1
75
83
76
84
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
A number of CPUID registers are exposed to userspace by modern Linux
3
Promote this function from aarch64 to fully general use.
4
kernels thanks to the "ARM64 CPU Feature Registers" ABI. For QEMU's
4
Use it to unify the code sequences for generating illegal
5
user-mode emulation we don't need to emulate the kernels trap but just
5
opcode exceptions.
6
return the value the trap would have done. To avoid too much #ifdef
7
hackery we process ARMCPRegInfo with a new helper (modify_arm_cp_regs)
8
before defining the registers. The modify routine is driven by a
9
simple data structure which describes which bits are exported and
10
which are fixed.
11
6
12
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20190205190224.2198-3-alex.bennee@linaro.org
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-11-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
12
---
17
target/arm/cpu.h | 21 ++++++++++++++++
13
target/arm/translate-a64.h | 2 --
18
target/arm/helper.c | 59 +++++++++++++++++++++++++++++++++++++++++++++
14
target/arm/translate.h | 2 ++
19
2 files changed, 80 insertions(+)
15
target/arm/translate-a64.c | 7 -------
16
target/arm/translate-vfp.inc.c | 3 +--
17
target/arm/translate.c | 22 ++++++++++++----------
18
5 files changed, 15 insertions(+), 21 deletions(-)
20
19
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
22
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
22
--- a/target/arm/translate-a64.h
24
+++ b/target/arm/cpu.h
23
+++ b/target/arm/translate-a64.h
25
@@ -XXX,XX +XXX,XX @@ static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
24
@@ -XXX,XX +XXX,XX @@
26
}
25
#ifndef TARGET_ARM_TRANSLATE_A64_H
27
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
26
#define TARGET_ARM_TRANSLATE_A64_H
28
27
29
+/*
28
-void unallocated_encoding(DisasContext *s);
30
+ * Definition of an ARM co-processor register as viewed from
29
-
31
+ * userspace. This is used for presenting sanitised versions of
30
#define unsupported_encoding(s, insn) \
32
+ * registers to userspace when emulating the Linux AArch64 CPU
31
do { \
33
+ * ID/feature ABI (advertised as HWCAP_CPUID).
32
qemu_log_mask(LOG_UNIMP, \
34
+ */
33
diff --git a/target/arm/translate.h b/target/arm/translate.h
35
+typedef struct ARMCPRegUserSpaceInfo {
34
index XXXXXXX..XXXXXXX 100644
36
+ /* Name of register */
35
--- a/target/arm/translate.h
37
+ const char *name;
36
+++ b/target/arm/translate.h
37
@@ -XXX,XX +XXX,XX @@ typedef struct DisasCompare {
38
bool value_global;
39
} DisasCompare;
40
41
+void unallocated_encoding(DisasContext *s);
38
+
42
+
39
+ /* Only some bits are exported to user space */
43
/* Share the TCG temporaries common between 32 and 64 bit modes. */
40
+ uint64_t exported_bits;
44
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41
+
45
extern TCGv_i64 cpu_exclusive_addr;
42
+ /* Fixed bits are applied after the mask */
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
43
+ uint64_t fixed_bits;
44
+} ARMCPRegUserSpaceInfo;
45
+
46
+#define REGUSERINFO_SENTINEL { .name = NULL }
47
+
48
+void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
49
+
50
/* CPWriteFn that can be used to implement writes-ignored behaviour */
51
void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
52
uint64_t value);
53
diff --git a/target/arm/helper.c b/target/arm/helper.c
54
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/helper.c
48
--- a/target/arm/translate-a64.c
56
+++ b/target/arm/helper.c
49
+++ b/target/arm/translate-a64.c
57
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
50
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
58
.resetvalue = cpu->pmceid1 },
59
REGINFO_SENTINEL
60
};
61
+#ifdef CONFIG_USER_ONLY
62
+ ARMCPRegUserSpaceInfo v8_user_idregs[] = {
63
+ { .name = "ID_AA64PFR0_EL1",
64
+ .exported_bits = 0x000f000f00ff0000,
65
+ .fixed_bits = 0x0000000000000011 },
66
+ { .name = "ID_AA64PFR1_EL1",
67
+ .exported_bits = 0x00000000000000f0 },
68
+ { .name = "ID_AA64ZFR0_EL1" },
69
+ { .name = "ID_AA64MMFR0_EL1",
70
+ .fixed_bits = 0x00000000ff000000 },
71
+ { .name = "ID_AA64MMFR1_EL1" },
72
+ { .name = "ID_AA64DFR0_EL1",
73
+ .fixed_bits = 0x0000000000000006 },
74
+ { .name = "ID_AA64DFR1_EL1" },
75
+ { .name = "ID_AA64AFR0_EL1" },
76
+ { .name = "ID_AA64AFR1_EL1" },
77
+ { .name = "ID_AA64ISAR0_EL1",
78
+ .exported_bits = 0x00fffffff0fffff0 },
79
+ { .name = "ID_AA64ISAR1_EL1",
80
+ .exported_bits = 0x000000f0ffffffff },
81
+ REGUSERINFO_SENTINEL
82
+ };
83
+ modify_arm_cp_regs(v8_idregs, v8_user_idregs);
84
+#endif
85
/* RVBAR_EL1 is only implemented if EL1 is the highest EL */
86
if (!arm_feature(env, ARM_FEATURE_EL3) &&
87
!arm_feature(env, ARM_FEATURE_EL2)) {
88
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
89
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
90
.type = ARM_CP_NOP | ARM_CP_OVERRIDE
91
};
92
+#ifdef CONFIG_USER_ONLY
93
+ ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
94
+ { .name = "MIDR_EL1",
95
+ .exported_bits = 0x00000000ffffffff },
96
+ { .name = "REVIDR_EL1" },
97
+ REGUSERINFO_SENTINEL
98
+ };
99
+ modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
100
+#endif
101
if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
102
arm_feature(env, ARM_FEATURE_STRONGARM)) {
103
ARMCPRegInfo *r;
104
@@ -XXX,XX +XXX,XX @@ void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
105
}
51
}
106
}
52
}
107
53
108
+/*
54
-void unallocated_encoding(DisasContext *s)
109
+ * Modify ARMCPRegInfo for access from userspace.
55
-{
110
+ *
56
- /* Unallocated and reserved encodings are uncategorized */
111
+ * This is a data driven modification directed by
57
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
112
+ * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
58
- default_exception_el(s));
113
+ * user-space cannot alter any values and dynamic values pertaining to
59
-}
114
+ * execution state are hidden from user space view anyway.
60
-
115
+ */
61
static void init_tmp_a64_array(DisasContext *s)
116
+void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
62
{
63
#ifdef CONFIG_DEBUG_TCG
64
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-vfp.inc.c
67
+++ b/target/arm/translate-vfp.inc.c
68
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
69
70
if (!s->vfp_enabled && !ignore_vfp_enabled) {
71
assert(!arm_dc_feature(s, ARM_FEATURE_M));
72
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
73
- default_exception_el(s));
74
+ unallocated_encoding(s);
75
return false;
76
}
77
78
diff --git a/target/arm/translate.c b/target/arm/translate.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate.c
81
+++ b/target/arm/translate.c
82
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
83
s->base.is_jmp = DISAS_NORETURN;
84
}
85
86
+void unallocated_encoding(DisasContext *s)
117
+{
87
+{
118
+ const ARMCPRegUserSpaceInfo *m;
88
+ /* Unallocated and reserved encodings are uncategorized */
119
+ ARMCPRegInfo *r;
89
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
120
+
90
+ default_exception_el(s));
121
+ for (m = mods; m->name; m++) {
122
+ for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
123
+ if (strcmp(r->name, m->name) == 0) {
124
+ r->type = ARM_CP_CONST;
125
+ r->access = PL0U_R;
126
+ r->resetvalue &= m->exported_bits;
127
+ r->resetvalue |= m->fixed_bits;
128
+ break;
129
+ }
130
+ }
131
+ }
132
+}
91
+}
133
+
92
+
134
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
93
/* Force a TB lookup after an instruction that changes the CPU state. */
94
static inline void gen_lookup_tb(DisasContext *s)
135
{
95
{
136
return g_hash_table_lookup(cpregs, &encoded_cp);
96
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
97
return;
98
}
99
100
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
101
- default_exception_el(s));
102
+ unallocated_encoding(s);
103
}
104
105
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
106
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
107
}
108
109
if (undef) {
110
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
111
- default_exception_el(s));
112
+ unallocated_encoding(s);
113
return;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
117
break;
118
default:
119
illegal_op:
120
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
121
- default_exception_el(s));
122
+ unallocated_encoding(s);
123
break;
124
}
125
}
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
127
}
128
return;
129
illegal_op:
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
131
- default_exception_el(s));
132
+ unallocated_encoding(s);
133
}
134
135
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
136
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
137
return;
138
illegal_op:
139
undef:
140
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
141
- default_exception_el(s));
142
+ unallocated_encoding(s);
143
}
144
145
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
137
--
146
--
138
2.20.1
147
2.20.1
139
148
140
149
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These are now unused.
3
Replace x = double_saturate(y) with x = add_saturate(y, y).
4
There is no need for a separate more specialized helper.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20190209033847.9014-6-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-12-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
11
---
10
target/arm/helper.h | 12 ------------
12
target/arm/helper.h | 1 -
11
target/arm/neon_helper.c | 12 ------------
13
target/arm/op_helper.c | 15 ---------------
12
2 files changed, 24 deletions(-)
14
target/arm/translate.c | 4 ++--
15
3 files changed, 2 insertions(+), 18 deletions(-)
13
16
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
19
--- a/target/arm/helper.h
17
+++ b/target/arm/helper.h
20
+++ b/target/arm/helper.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(neon_cge_s16, i32, i32, i32)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
19
DEF_HELPER_2(neon_cge_u32, i32, i32, i32)
22
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
20
DEF_HELPER_2(neon_cge_s32, i32, i32, i32)
23
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
21
24
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
22
-DEF_HELPER_2(neon_min_u8, i32, i32, i32)
25
-DEF_HELPER_2(double_saturate, i32, env, s32)
23
-DEF_HELPER_2(neon_min_s8, i32, i32, i32)
26
DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
24
-DEF_HELPER_2(neon_min_u16, i32, i32, i32)
27
DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
25
-DEF_HELPER_2(neon_min_s16, i32, i32, i32)
28
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
26
-DEF_HELPER_2(neon_min_u32, i32, i32, i32)
29
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
27
-DEF_HELPER_2(neon_min_s32, i32, i32, i32)
28
-DEF_HELPER_2(neon_max_u8, i32, i32, i32)
29
-DEF_HELPER_2(neon_max_s8, i32, i32, i32)
30
-DEF_HELPER_2(neon_max_u16, i32, i32, i32)
31
-DEF_HELPER_2(neon_max_s16, i32, i32, i32)
32
-DEF_HELPER_2(neon_max_u32, i32, i32, i32)
33
-DEF_HELPER_2(neon_max_s32, i32, i32, i32)
34
DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
35
DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
36
DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
37
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
38
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/neon_helper.c
31
--- a/target/arm/op_helper.c
40
+++ b/target/arm/neon_helper.c
32
+++ b/target/arm/op_helper.c
41
@@ -XXX,XX +XXX,XX @@ NEON_VOP(cge_u32, neon_u32, 1)
33
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
42
#undef NEON_FN
34
return res;
43
35
}
44
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
36
45
-NEON_VOP(min_s8, neon_s8, 4)
37
-uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
46
-NEON_VOP(min_u8, neon_u8, 4)
38
-{
47
-NEON_VOP(min_s16, neon_s16, 2)
39
- uint32_t res;
48
-NEON_VOP(min_u16, neon_u16, 2)
40
- if (val >= 0x40000000) {
49
-NEON_VOP(min_s32, neon_s32, 1)
41
- res = ~SIGNBIT;
50
-NEON_VOP(min_u32, neon_u32, 1)
42
- env->QF = 1;
51
NEON_POP(pmin_s8, neon_s8, 4)
43
- } else if (val <= (int32_t)0xc0000000) {
52
NEON_POP(pmin_u8, neon_u8, 4)
44
- res = SIGNBIT;
53
NEON_POP(pmin_s16, neon_s16, 2)
45
- env->QF = 1;
54
@@ -XXX,XX +XXX,XX @@ NEON_POP(pmin_u16, neon_u16, 2)
46
- } else {
55
#undef NEON_FN
47
- res = val << 1;
56
48
- }
57
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
49
- return res;
58
-NEON_VOP(max_s8, neon_s8, 4)
50
-}
59
-NEON_VOP(max_u8, neon_u8, 4)
51
-
60
-NEON_VOP(max_s16, neon_s16, 2)
52
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
61
-NEON_VOP(max_u16, neon_u16, 2)
53
{
62
-NEON_VOP(max_s32, neon_s32, 1)
54
uint32_t res = a + b;
63
-NEON_VOP(max_u32, neon_u32, 1)
55
diff --git a/target/arm/translate.c b/target/arm/translate.c
64
NEON_POP(pmax_s8, neon_s8, 4)
56
index XXXXXXX..XXXXXXX 100644
65
NEON_POP(pmax_u8, neon_u8, 4)
57
--- a/target/arm/translate.c
66
NEON_POP(pmax_s16, neon_s16, 2)
58
+++ b/target/arm/translate.c
59
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
60
tmp = load_reg(s, rm);
61
tmp2 = load_reg(s, rn);
62
if (op1 & 2)
63
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
64
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
65
if (op1 & 1)
66
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
67
else
68
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
69
tmp = load_reg(s, rn);
70
tmp2 = load_reg(s, rm);
71
if (op & 1)
72
- gen_helper_double_saturate(tmp, cpu_env, tmp);
73
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
74
if (op & 2)
75
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
76
else
67
--
77
--
68
2.20.1
78
2.20.1
69
79
70
80
diff view generated by jsdifflib
New patch
1
From: Andrew Jones <drjones@redhat.com>
1
2
3
If -cpu <cpu>,aarch64=off is used then KVM must also be used, and it
4
and the host must support running the vcpu in 32-bit mode. Also, if
5
-cpu <cpu>,aarch64=on is used, then it doesn't matter if kvm is
6
enabled or not.
7
8
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/kvm_arm.h | 14 ++++++++++++++
13
target/arm/cpu64.c | 12 ++++++------
14
target/arm/kvm64.c | 9 +++++++++
15
3 files changed, 29 insertions(+), 6 deletions(-)
16
17
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm_arm.h
20
+++ b/target/arm/kvm_arm.h
21
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
22
*/
23
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
24
25
+/**
26
+ * kvm_arm_aarch32_supported:
27
+ * @cs: CPUState
28
+ *
29
+ * Returns: true if the KVM VCPU can enable AArch32 mode
30
+ * and false otherwise.
31
+ */
32
+bool kvm_arm_aarch32_supported(CPUState *cs);
33
+
34
/**
35
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
36
* IPA address space supported by KVM
37
@@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
38
cpu->host_cpu_probe_failed = true;
39
}
40
41
+static inline bool kvm_arm_aarch32_supported(CPUState *cs)
42
+{
43
+ return false;
44
+}
45
+
46
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
47
{
48
return -ENOENT;
49
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/cpu64.c
52
+++ b/target/arm/cpu64.c
53
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
54
* restriction allows us to avoid fixing up functionality that assumes a
55
* uniform execution state like do_interrupt.
56
*/
57
- if (!kvm_enabled()) {
58
- error_setg(errp, "'aarch64' feature cannot be disabled "
59
- "unless KVM is enabled");
60
- return;
61
- }
62
-
63
if (value == false) {
64
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
65
+ error_setg(errp, "'aarch64' feature cannot be disabled "
66
+ "unless KVM is enabled and 32-bit EL1 "
67
+ "is supported");
68
+ return;
69
+ }
70
unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
71
} else {
72
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
73
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/kvm64.c
76
+++ b/target/arm/kvm64.c
77
@@ -XXX,XX +XXX,XX @@
78
#include "exec/gdbstub.h"
79
#include "sysemu/sysemu.h"
80
#include "sysemu/kvm.h"
81
+#include "sysemu/kvm_int.h"
82
#include "kvm_arm.h"
83
+#include "hw/boards.h"
84
#include "internals.h"
85
86
static bool have_guest_debug;
87
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
88
return true;
89
}
90
91
+bool kvm_arm_aarch32_supported(CPUState *cpu)
92
+{
93
+ KVMState *s = KVM_STATE(current_machine->accelerator);
94
+
95
+ return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
96
+}
97
+
98
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
99
100
int kvm_arch_init_vcpu(CPUState *cs)
101
--
102
2.20.1
103
104
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
As this is a single register we could expose it with a simple ifdef
3
We first convert the pmu property from a static property to one with
4
but we use the existing modify_arm_cp_regs mechanism for consistency.
4
its own accessors. Then we use the set accessor to check if the PMU is
5
supported when using KVM. Indeed a 32-bit KVM host does not support
6
the PMU, so this check will catch an attempt to use it at property-set
7
time.
5
8
6
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
Message-id: 20190205190224.2198-4-alex.bennee@linaro.org
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
target/arm/helper.c | 21 ++++++++++++++-------
13
target/arm/kvm_arm.h | 14 ++++++++++++++
12
1 file changed, 14 insertions(+), 7 deletions(-)
14
target/arm/cpu.c | 30 +++++++++++++++++++++++++-----
15
target/arm/kvm.c | 7 +++++++
16
3 files changed, 46 insertions(+), 5 deletions(-)
13
17
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
20
--- a/target/arm/kvm_arm.h
17
+++ b/target/arm/helper.c
21
+++ b/target/arm/kvm_arm.h
18
@@ -XXX,XX +XXX,XX @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
22
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
19
return mpidr_read_val(env);
23
*/
24
bool kvm_arm_aarch32_supported(CPUState *cs);
25
26
+/**
27
+ * bool kvm_arm_pmu_supported:
28
+ * @cs: CPUState
29
+ *
30
+ * Returns: true if the KVM VCPU can enable its PMU
31
+ * and false otherwise.
32
+ */
33
+bool kvm_arm_pmu_supported(CPUState *cs);
34
+
35
/**
36
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
37
* IPA address space supported by KVM
38
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_aarch32_supported(CPUState *cs)
39
return false;
20
}
40
}
21
41
22
-static const ARMCPRegInfo mpidr_cp_reginfo[] = {
42
+static inline bool kvm_arm_pmu_supported(CPUState *cs)
23
- { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
43
+{
24
- .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
44
+ return false;
25
- .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
45
+}
26
- REGINFO_SENTINEL
46
+
27
-};
47
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
48
{
49
return -ENOENT;
50
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/cpu.c
53
+++ b/target/arm/cpu.c
54
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_el3_property =
55
static Property arm_cpu_cfgend_property =
56
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
57
58
-/* use property name "pmu" to match other archs and virt tools */
59
-static Property arm_cpu_has_pmu_property =
60
- DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
28
-
61
-
29
static const ARMCPRegInfo lpae_cp_reginfo[] = {
62
static Property arm_cpu_has_vfp_property =
30
/* NOP AMAIR0/1 */
63
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
31
{ .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
64
32
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
65
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_pmsav7_dregion_property =
66
pmsav7_dregion,
67
qdev_prop_uint32, uint32_t);
68
69
+static bool arm_get_pmu(Object *obj, Error **errp)
70
+{
71
+ ARMCPU *cpu = ARM_CPU(obj);
72
+
73
+ return cpu->has_pmu;
74
+}
75
+
76
+static void arm_set_pmu(Object *obj, bool value, Error **errp)
77
+{
78
+ ARMCPU *cpu = ARM_CPU(obj);
79
+
80
+ if (value) {
81
+ if (kvm_enabled() && !kvm_arm_pmu_supported(CPU(cpu))) {
82
+ error_setg(errp, "'pmu' feature not supported by KVM on this host");
83
+ return;
84
+ }
85
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
86
+ } else {
87
+ unset_feature(&cpu->env, ARM_FEATURE_PMU);
88
+ }
89
+ cpu->has_pmu = value;
90
+}
91
+
92
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
93
void *opaque, Error **errp)
94
{
95
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
33
}
96
}
34
97
35
if (arm_feature(env, ARM_FEATURE_MPIDR)) {
98
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
36
+ ARMCPRegInfo mpidr_cp_reginfo[] = {
99
- qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
37
+ { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
100
+ cpu->has_pmu = true;
38
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
101
+ object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu,
39
+ .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
102
&error_abort);
40
+ REGINFO_SENTINEL
41
+ };
42
+#ifdef CONFIG_USER_ONLY
43
+ ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
44
+ { .name = "MPIDR_EL1",
45
+ .fixed_bits = 0x0000000080000000 },
46
+ REGUSERINFO_SENTINEL
47
+ };
48
+ modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
49
+#endif
50
define_arm_cp_regs(cpu, mpidr_cp_reginfo);
51
}
103
}
52
104
105
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/kvm.c
108
+++ b/target/arm/kvm.c
109
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
110
env->features = arm_host_cpu_features.features;
111
}
112
113
+bool kvm_arm_pmu_supported(CPUState *cpu)
114
+{
115
+ KVMState *s = KVM_STATE(current_machine->accelerator);
116
+
117
+ return kvm_check_extension(s, KVM_CAP_ARM_PMU_V3);
118
+}
119
+
120
int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
121
{
122
KVMState *s = KVM_STATE(ms->accelerator);
53
--
123
--
54
2.20.1
124
2.20.1
55
125
56
126
diff view generated by jsdifflib
1
From: Aaron Lindsay OS <aaron@os.amperecomputing.com>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
This bug was introduced in:
3
The current implementation of ZCR_ELx matches the architecture, only
4
commit 5ecdd3e47cadae83a62dc92b472f1fe163b56f59
4
implementing the lower four bits, with the rest RAZ/WI. This puts
5
target/arm: Finish implementation of PM[X]EVCNTR and PM[X]EVTYPER
5
a strict limit on ARM_MAX_VQ of 16. Make sure we don't let ARM_MAX_VQ
6
grow without a corresponding update here.
6
7
7
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
8
Suggested-by: Dave Martin <Dave.Martin@arm.com>
8
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20190205135129.19338-1-aaron@os.amperecomputing.com
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
target/arm/helper.c | 8 ++++----
14
target/arm/helper.c | 1 +
14
1 file changed, 4 insertions(+), 4 deletions(-)
15
1 file changed, 1 insertion(+)
15
16
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
19
--- a/target/arm/helper.c
19
+++ b/target/arm/helper.c
20
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
21
@@ -XXX,XX +XXX,XX @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
21
char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
22
int new_len;
22
char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
23
23
ARMCPRegInfo pmev_regs[] = {
24
/* Bits other than [3:0] are RAZ/WI. */
24
- { .name = pmevcntr_name, .cp = 15, .crn = 15,
25
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
25
+ { .name = pmevcntr_name, .cp = 15, .crn = 14,
26
raw_write(env, ri, value & 0xf);
26
.crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
27
27
.access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
28
/*
28
.readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
29
.accessfn = pmreg_access },
30
{ .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
31
- .opc0 = 3, .opc1 = 3, .crn = 15, .crm = 8 | (3 & (i >> 3)),
32
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
33
.opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
34
.type = ARM_CP_IO,
35
.readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
36
.raw_readfn = pmevcntr_rawread,
37
.raw_writefn = pmevcntr_rawwrite },
38
- { .name = pmevtyper_name, .cp = 15, .crn = 15,
39
+ { .name = pmevtyper_name, .cp = 15, .crn = 14,
40
.crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
41
.access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
42
.readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
43
.accessfn = pmreg_access },
44
{ .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
45
- .opc0 = 3, .opc1 = 3, .crn = 15, .crm = 12 | (3 & (i >> 3)),
46
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
47
.opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
48
.type = ARM_CP_IO,
49
.readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
50
--
29
--
51
2.20.1
30
2.20.1
52
31
53
32
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
Although technically not visible to userspace the kernel does make
3
Unless we're guaranteed to always increase ARM_MAX_VQ by a multiple of
4
them visible via a trap and emulate ABI. We provide a new permission
4
four, then we should use DIV_ROUND_UP to ensure we get an appropriate
5
mask (PL0U_R) which maps to PL0_R for CONFIG_USER builds and adjust
5
array size.
6
the minimum permission check accordingly.
7
6
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Message-id: 20190205190224.2198-2-alex.bennee@linaro.org
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
target/arm/cpu.h | 12 ++++++++++++
11
target/arm/cpu.h | 2 +-
14
target/arm/helper.c | 6 +++++-
12
1 file changed, 1 insertion(+), 1 deletion(-)
15
2 files changed, 17 insertions(+), 1 deletion(-)
16
13
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
16
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ static inline bool cptype_valid(int cptype)
18
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVectorReg {
22
#define PL0_R (0x02 | PL1_R)
19
#ifdef TARGET_AARCH64
23
#define PL0_W (0x01 | PL1_W)
20
/* In AArch32 mode, predicate registers do not exist at all. */
24
21
typedef struct ARMPredicateReg {
25
+/*
22
- uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
26
+ * For user-mode some registers are accessible to EL0 via a kernel
23
+ uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
27
+ * trap-and-emulate ABI. In this case we define the read permissions
24
} ARMPredicateReg;
28
+ * as actually being PL0_R. However some bits of any given register
25
29
+ * may still be masked.
26
/* In AArch32 mode, PAC keys do not exist at all. */
30
+ */
31
+#ifdef CONFIG_USER_ONLY
32
+#define PL0U_R PL0_R
33
+#else
34
+#define PL0U_R PL1_R
35
+#endif
36
+
37
#define PL3_RW (PL3_R | PL3_W)
38
#define PL2_RW (PL2_R | PL2_W)
39
#define PL1_RW (PL1_R | PL1_W)
40
diff --git a/target/arm/helper.c b/target/arm/helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/helper.c
43
+++ b/target/arm/helper.c
44
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
45
if (r->state != ARM_CP_STATE_AA32) {
46
int mask = 0;
47
switch (r->opc1) {
48
- case 0: case 1: case 2:
49
+ case 0:
50
+ /* min_EL EL1, but some accessible to EL0 via kernel ABI */
51
+ mask = PL0U_R | PL1_RW;
52
+ break;
53
+ case 1: case 2:
54
/* min_EL EL1 */
55
mask = PL1_RW;
56
break;
57
--
27
--
58
2.20.1
28
2.20.1
59
29
60
30
diff view generated by jsdifflib
1
From: Catherine Ho <catherine.hecx@gmail.com>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
The lo,hi order is different from the comments. And in commit
3
A couple return -EINVAL's forgot their '-'s.
4
1ec182c33379 ("target/arm: Convert to HAVE_CMPXCHG128"), it changes
5
the original code logic. So just restore the old code logic before this
6
commit:
7
do_paired_cmpxchg64_be():
8
cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
9
newv = int128_make128(new_hi, new_lo);
10
4
11
This fixes a bug that would only be visible for big-endian
5
Signed-off-by: Andrew Jones <drjones@redhat.com>
12
AArch64 guest code.
6
Reviewed-by: Eric Auger <eric.auger@redhat.com>
13
14
Fixes: 1ec182c33379 ("target/arm: Convert to HAVE_CMPXCHG128")
15
Signed-off-by: Catherine Ho <catherine.hecx@gmail.com>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 1548985244-24523-1-git-send-email-catherine.hecx@gmail.com
18
[PMM: added note that bug only affects BE guests]
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
9
---
21
target/arm/helper-a64.c | 4 ++--
10
target/arm/kvm64.c | 4 ++--
22
1 file changed, 2 insertions(+), 2 deletions(-)
11
1 file changed, 2 insertions(+), 2 deletions(-)
23
12
24
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
13
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
25
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/helper-a64.c
15
--- a/target/arm/kvm64.c
27
+++ b/target/arm/helper-a64.c
16
+++ b/target/arm/kvm64.c
28
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
17
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
29
* High and low need to be switched here because this is not actually a
18
write_cpustate_to_list(cpu, true);
30
* 128bit store but two doublewords stored consecutively
19
31
*/
20
if (!write_list_to_kvmstate(cpu, level)) {
32
- Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
21
- return EINVAL;
33
- Int128 newv = int128_make128(new_lo, new_hi);
22
+ return -EINVAL;
34
+ Int128 cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
23
}
35
+ Int128 newv = int128_make128(new_hi, new_lo);
24
36
Int128 oldv;
25
kvm_arm_sync_mpstate_to_kvm(cpu);
37
uintptr_t ra = GETPC();
26
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
38
uint64_t o0, o1;
27
}
28
29
if (!write_kvmstate_to_list(cpu)) {
30
- return EINVAL;
31
+ return -EINVAL;
32
}
33
/* Note that it's OK to have registers which aren't in CPUState,
34
* so we can ignore a failure return here.
39
--
35
--
40
2.20.1
36
2.20.1
41
37
42
38
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Andrew Jones <drjones@redhat.com>
2
2
3
Userspace programs should (in theory) query the ELF HWCAP before
3
Move the getting/putting of the fpsimd registers out of
4
probing these registers. Now we have implemented them all make it
4
kvm_arch_get/put_registers() into their own helper functions
5
public.
5
to prepare for alternatively getting/putting SVE registers.
6
6
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
No functional change.
8
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190205190224.2198-6-alex.bennee@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
linux-user/elfload.c | 1 +
14
target/arm/kvm64.c | 148 +++++++++++++++++++++++++++------------------
13
1 file changed, 1 insertion(+)
15
1 file changed, 88 insertions(+), 60 deletions(-)
14
16
15
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
17
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/elfload.c
19
--- a/target/arm/kvm64.c
18
+++ b/linux-user/elfload.c
20
+++ b/target/arm/kvm64.c
19
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
21
@@ -XXX,XX +XXX,XX @@ int kvm_arm_cpreg_level(uint64_t regidx)
20
22
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
21
hwcaps |= ARM_HWCAP_A64_FP;
23
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
22
hwcaps |= ARM_HWCAP_A64_ASIMD;
24
23
+ hwcaps |= ARM_HWCAP_A64_CPUID;
25
+static int kvm_arch_put_fpsimd(CPUState *cs)
24
26
+{
25
/* probe for the extra features */
27
+ ARMCPU *cpu = ARM_CPU(cs);
26
#define GET_FEATURE_ID(feat, hwcap) \
28
+ CPUARMState *env = &cpu->env;
29
+ struct kvm_one_reg reg;
30
+ uint32_t fpr;
31
+ int i, ret;
32
+
33
+ for (i = 0; i < 32; i++) {
34
+ uint64_t *q = aa64_vfp_qreg(env, i);
35
+#ifdef HOST_WORDS_BIGENDIAN
36
+ uint64_t fp_val[2] = { q[1], q[0] };
37
+ reg.addr = (uintptr_t)fp_val;
38
+#else
39
+ reg.addr = (uintptr_t)q;
40
+#endif
41
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
42
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
43
+ if (ret) {
44
+ return ret;
45
+ }
46
+ }
47
+
48
+ reg.addr = (uintptr_t)(&fpr);
49
+ fpr = vfp_get_fpsr(env);
50
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
51
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
52
+ if (ret) {
53
+ return ret;
54
+ }
55
+
56
+ reg.addr = (uintptr_t)(&fpr);
57
+ fpr = vfp_get_fpcr(env);
58
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
59
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
60
+ if (ret) {
61
+ return ret;
62
+ }
63
+
64
+ return 0;
65
+}
66
+
67
int kvm_arch_put_registers(CPUState *cs, int level)
68
{
69
struct kvm_one_reg reg;
70
- uint32_t fpr;
71
uint64_t val;
72
- int i;
73
- int ret;
74
+ int i, ret;
75
unsigned int el;
76
77
ARMCPU *cpu = ARM_CPU(cs);
78
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
79
}
80
}
81
82
- /* Advanced SIMD and FP registers. */
83
- for (i = 0; i < 32; i++) {
84
- uint64_t *q = aa64_vfp_qreg(env, i);
85
-#ifdef HOST_WORDS_BIGENDIAN
86
- uint64_t fp_val[2] = { q[1], q[0] };
87
- reg.addr = (uintptr_t)fp_val;
88
-#else
89
- reg.addr = (uintptr_t)q;
90
-#endif
91
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
92
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
93
- if (ret) {
94
- return ret;
95
- }
96
- }
97
-
98
- reg.addr = (uintptr_t)(&fpr);
99
- fpr = vfp_get_fpsr(env);
100
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
101
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
102
- if (ret) {
103
- return ret;
104
- }
105
-
106
- fpr = vfp_get_fpcr(env);
107
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
108
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
109
+ ret = kvm_arch_put_fpsimd(cs);
110
if (ret) {
111
return ret;
112
}
113
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
114
return ret;
115
}
116
117
+static int kvm_arch_get_fpsimd(CPUState *cs)
118
+{
119
+ ARMCPU *cpu = ARM_CPU(cs);
120
+ CPUARMState *env = &cpu->env;
121
+ struct kvm_one_reg reg;
122
+ uint32_t fpr;
123
+ int i, ret;
124
+
125
+ for (i = 0; i < 32; i++) {
126
+ uint64_t *q = aa64_vfp_qreg(env, i);
127
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
128
+ reg.addr = (uintptr_t)q;
129
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
130
+ if (ret) {
131
+ return ret;
132
+ } else {
133
+#ifdef HOST_WORDS_BIGENDIAN
134
+ uint64_t t;
135
+ t = q[0], q[0] = q[1], q[1] = t;
136
+#endif
137
+ }
138
+ }
139
+
140
+ reg.addr = (uintptr_t)(&fpr);
141
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
142
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
143
+ if (ret) {
144
+ return ret;
145
+ }
146
+ vfp_set_fpsr(env, fpr);
147
+
148
+ reg.addr = (uintptr_t)(&fpr);
149
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
150
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
151
+ if (ret) {
152
+ return ret;
153
+ }
154
+ vfp_set_fpcr(env, fpr);
155
+
156
+ return 0;
157
+}
158
+
159
int kvm_arch_get_registers(CPUState *cs)
160
{
161
struct kvm_one_reg reg;
162
uint64_t val;
163
- uint32_t fpr;
164
unsigned int el;
165
- int i;
166
- int ret;
167
+ int i, ret;
168
169
ARMCPU *cpu = ARM_CPU(cs);
170
CPUARMState *env = &cpu->env;
171
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
172
env->spsr = env->banked_spsr[i];
173
}
174
175
- /* Advanced SIMD and FP registers */
176
- for (i = 0; i < 32; i++) {
177
- uint64_t *q = aa64_vfp_qreg(env, i);
178
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
179
- reg.addr = (uintptr_t)q;
180
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
181
- if (ret) {
182
- return ret;
183
- } else {
184
-#ifdef HOST_WORDS_BIGENDIAN
185
- uint64_t t;
186
- t = q[0], q[0] = q[1], q[1] = t;
187
-#endif
188
- }
189
- }
190
-
191
- reg.addr = (uintptr_t)(&fpr);
192
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
193
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
194
+ ret = kvm_arch_get_fpsimd(cs);
195
if (ret) {
196
return ret;
197
}
198
- vfp_set_fpsr(env, fpr);
199
-
200
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
201
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
202
- if (ret) {
203
- return ret;
204
- }
205
- vfp_set_fpcr(env, fpr);
206
207
ret = kvm_get_vcpu_events(cpu);
208
if (ret) {
27
--
209
--
28
2.20.1
210
2.20.1
29
211
30
212
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Change the representation of this field such that it is easy
3
Extract is a compact combination of shift + and.
4
to set from vector code.
5
4
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190209033847.9014-11-richard.henderson@linaro.org
6
Message-id: 20190808202616.13782-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/cpu.h | 5 ++++-
10
target/arm/translate.c | 9 +--------
12
target/arm/helper.c | 19 +++++++++++++++----
11
1 file changed, 1 insertion(+), 8 deletions(-)
13
target/arm/neon_helper.c | 2 +-
14
target/arm/vec_helper.c | 2 +-
15
4 files changed, 21 insertions(+), 7 deletions(-)
16
12
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
15
--- a/target/arm/translate.c
20
+++ b/target/arm/cpu.h
16
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
17
@@ -XXX,XX +XXX,XX @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
22
ARMPredicateReg preg_tmp;
18
23
#endif
19
static void shifter_out_im(TCGv_i32 var, int shift)
24
25
- uint32_t xregs[16];
26
/* We store these fpcsr fields separately for convenience. */
27
+ uint32_t qc[4] QEMU_ALIGNED(16);
28
int vec_len;
29
int vec_stride;
30
31
+ uint32_t xregs[16];
32
+
33
/* Scratch space for aa32 neon expansion. */
34
uint32_t scratch[8];
35
36
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
37
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
38
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
39
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
40
+#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
41
42
static inline uint32_t vfp_get_fpsr(CPUARMState *env)
43
{
20
{
44
diff --git a/target/arm/helper.c b/target/arm/helper.c
21
- if (shift == 0) {
45
index XXXXXXX..XXXXXXX 100644
22
- tcg_gen_andi_i32(cpu_CF, var, 1);
46
--- a/target/arm/helper.c
23
- } else {
47
+++ b/target/arm/helper.c
24
- tcg_gen_shri_i32(cpu_CF, var, shift);
48
@@ -XXX,XX +XXX,XX @@ static inline int vfp_exceptbits_from_host(int host_bits)
25
- if (shift != 31) {
49
26
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
50
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
27
- }
51
{
28
- }
52
- int i;
29
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
53
- uint32_t fpscr;
54
+ uint32_t i, fpscr;
55
56
fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
57
| (env->vfp.vec_len << 16)
58
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
59
/* FZ16 does not generate an input denormal exception. */
60
i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
61
& ~float_flag_input_denormal);
62
-
63
fpscr |= vfp_exceptbits_from_host(i);
64
+
65
+ i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
66
+ fpscr |= i ? FPCR_QC : 0;
67
+
68
return fpscr;
69
}
30
}
70
31
71
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
32
/* Shift by immediate. Includes special handling for shift == 0. */
72
* (which are stored in fp_status), and the other RES0 bits
73
* in between, then we clear all of the low 16 bits.
74
*/
75
- env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xffc80000;
76
+ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
77
env->vfp.vec_len = (val >> 16) & 7;
78
env->vfp.vec_stride = (val >> 20) & 3;
79
80
+ /*
81
+ * The bit we set within fpscr_q is arbitrary; the register as a
82
+ * whole being zero/non-zero is what counts.
83
+ */
84
+ env->vfp.qc[0] = val & FPCR_QC;
85
+ env->vfp.qc[1] = 0;
86
+ env->vfp.qc[2] = 0;
87
+ env->vfp.qc[3] = 0;
88
+
89
changed ^= val;
90
if (changed & (3 << 22)) {
91
i = (val >> 22) & 3;
92
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/neon_helper.c
95
+++ b/target/arm/neon_helper.c
96
@@ -XXX,XX +XXX,XX @@
97
#define SIGNBIT (uint32_t)0x80000000
98
#define SIGNBIT64 ((uint64_t)1 << 63)
99
100
-#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
101
+#define SET_QC() env->vfp.qc[0] = 1
102
103
#define NEON_TYPE1(name, type) \
104
typedef struct \
105
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/vec_helper.c
108
+++ b/target/arm/vec_helper.c
109
@@ -XXX,XX +XXX,XX @@
110
#define H4(x) (x)
111
#endif
112
113
-#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
114
+#define SET_QC() env->vfp.qc[0] = 1
115
116
static void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
117
{
118
--
33
--
119
2.20.1
34
2.20.1
120
35
121
36
diff view generated by jsdifflib
1
From: Sandra Loosemore <sandra@codesourcery.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Per the GDB remote protocol documentation
3
Use deposit as the composit operation to merge the
4
bits from the two inputs.
4
5
5
https://sourceware.org/gdb/current/onlinedocs/gdb/Packets.html#index-vKill-packet
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
7
Message-id: 20190808202616.13782-3-richard.henderson@linaro.org
7
the debug stub is expected to send a reply to the 'vKill' packet. At
8
least some versions of GDB crash if the gdb stub simply exits without
9
sending a reply. This patch fixes QEMU's gdb stub to conform to the
10
expected behavior.
11
12
Note that QEMU's existing handling of the legacy 'k' packet is
13
correct: in that case GDB does not expect a reply, and QEMU does not
14
send one.
15
16
Signed-off-by: Sandra Loosemore <sandra@codesourcery.com>
17
Message-id: 1550008033-26540-1-git-send-email-sandra@codesourcery.com
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
10
---
21
gdbstub.c | 1 +
11
target/arm/translate.c | 26 ++++++++++----------------
22
1 file changed, 1 insertion(+)
12
1 file changed, 10 insertions(+), 16 deletions(-)
23
13
24
diff --git a/gdbstub.c b/gdbstub.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
25
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
26
--- a/gdbstub.c
16
--- a/target/arm/translate.c
27
+++ b/gdbstub.c
17
+++ b/target/arm/translate.c
28
@@ -XXX,XX +XXX,XX @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
29
break;
19
shift = (insn >> 7) & 0x1f;
30
} else if (strncmp(p, "Kill;", 5) == 0) {
20
if (insn & (1 << 6)) {
31
/* Kill the target */
21
/* pkhtb */
32
+ put_packet(s, "OK");
22
- if (shift == 0)
33
error_report("QEMU: Terminated via GDBstub");
23
+ if (shift == 0) {
34
exit(0);
24
shift = 31;
25
+ }
26
tcg_gen_sari_i32(tmp2, tmp2, shift);
27
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
28
- tcg_gen_ext16u_i32(tmp2, tmp2);
29
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
30
} else {
31
/* pkhbt */
32
- if (shift)
33
- tcg_gen_shli_i32(tmp2, tmp2, shift);
34
- tcg_gen_ext16u_i32(tmp, tmp);
35
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
36
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
37
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
38
}
39
- tcg_gen_or_i32(tmp, tmp, tmp2);
40
tcg_temp_free_i32(tmp2);
41
store_reg(s, rd, tmp);
42
} else if ((insn & 0x00200020) == 0x00200000) {
43
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
44
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
45
if (insn & (1 << 5)) {
46
/* pkhtb */
47
- if (shift == 0)
48
+ if (shift == 0) {
49
shift = 31;
50
+ }
51
tcg_gen_sari_i32(tmp2, tmp2, shift);
52
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
53
- tcg_gen_ext16u_i32(tmp2, tmp2);
54
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
55
} else {
56
/* pkhbt */
57
- if (shift)
58
- tcg_gen_shli_i32(tmp2, tmp2, shift);
59
- tcg_gen_ext16u_i32(tmp, tmp);
60
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
61
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
62
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
63
}
64
- tcg_gen_or_i32(tmp, tmp, tmp2);
65
tcg_temp_free_i32(tmp2);
66
store_reg(s, rd, tmp);
35
} else {
67
} else {
36
--
68
--
37
2.20.1
69
2.20.1
38
70
39
71
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The components of this register is stored in several
3
The immediate shift generator functions already test for,
4
different locations.
4
and eliminate, the case of a shift by zero.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190209033847.9014-7-richard.henderson@linaro.org
7
Message-id: 20190808202616.13782-4-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper.c | 4 ++--
11
target/arm/translate.c | 19 +++++++------------
12
1 file changed, 2 insertions(+), 2 deletions(-)
12
1 file changed, 7 insertions(+), 12 deletions(-)
13
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/helper.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
19
}
19
shift = (insn >> 10) & 3;
20
switch (reg - nregs) {
20
/* ??? In many cases it's not necessary to do a
21
case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
21
rotate, a shift is sufficient. */
22
- case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
22
- if (shift != 0)
23
+ case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
23
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
24
case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
24
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
25
}
25
op1 = (insn >> 20) & 7;
26
return 0;
26
switch (op1) {
27
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
27
case 0: gen_sxtb16(tmp); break;
28
}
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
29
switch (reg - nregs) {
29
shift = (insn >> 4) & 3;
30
case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
30
/* ??? In many cases it's not necessary to do a
31
- case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
31
rotate, a shift is sufficient. */
32
+ case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
32
- if (shift != 0)
33
case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
33
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
34
}
34
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
35
return 0;
35
op = (insn >> 20) & 7;
36
switch (op) {
37
case 0: gen_sxth(tmp); break;
38
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
39
case 7:
40
goto illegal_op;
41
default: /* Saturate. */
42
- if (shift) {
43
- if (op & 1)
44
- tcg_gen_sari_i32(tmp, tmp, shift);
45
- else
46
- tcg_gen_shli_i32(tmp, tmp, shift);
47
+ if (op & 1) {
48
+ tcg_gen_sari_i32(tmp, tmp, shift);
49
+ } else {
50
+ tcg_gen_shli_i32(tmp, tmp, shift);
51
}
52
tmp2 = tcg_const_i32(imm);
53
if (op & 4) {
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
55
goto illegal_op;
56
}
57
tmp = load_reg(s, rm);
58
- if (shift) {
59
- tcg_gen_shli_i32(tmp, tmp, shift);
60
- }
61
+ tcg_gen_shli_i32(tmp, tmp, shift);
62
tcg_gen_add_i32(addr, addr, tmp);
63
tcg_temp_free_i32(tmp);
64
break;
36
--
65
--
37
2.20.1
66
2.20.1
38
67
39
68
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rather than a complex set of cases testing for writeback,
3
The helper function is more documentary, and also already
4
adjust DP after performing the operation.
4
handles the case of rotate by zero.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-5-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190206052857.5077-2-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/translate.c | 32 ++++++++++++++++----------------
11
target/arm/translate.c | 7 ++-----
12
1 file changed, 16 insertions(+), 16 deletions(-)
12
1 file changed, 2 insertions(+), 5 deletions(-)
13
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
19
tcg_gen_or_i32(tmp, tmp, tmp2);
19
/* CPSR = immediate */
20
tcg_temp_free_i32(tmp2);
20
val = insn & 0xff;
21
gen_vfp_msr(tmp);
21
shift = ((insn >> 8) & 0xf) * 2;
22
+ dp = 0; /* always a single precision result */
22
- if (shift)
23
break;
23
- val = (val >> shift) | (val << (32 - shift));
24
}
24
+ val = ror32(val, shift);
25
case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
25
i = ((insn & (1 << 22)) != 0);
26
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
26
if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
27
tcg_gen_or_i32(tmp, tmp, tmp2);
27
i, val)) {
28
tcg_temp_free_i32(tmp2);
28
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
29
gen_vfp_msr(tmp);
29
/* immediate operand */
30
+ dp = 0; /* always a single precision result */
30
val = insn & 0xff;
31
break;
31
shift = ((insn >> 8) & 0xf) * 2;
32
}
32
- if (shift) {
33
case 8: /* cmp */
33
- val = (val >> shift) | (val << (32 - shift));
34
gen_vfp_cmp(dp);
34
- }
35
+ dp = -1; /* no write back */
35
+ val = ror32(val, shift);
36
break;
36
tmp2 = tcg_temp_new_i32();
37
case 9: /* cmpe */
37
tcg_gen_movi_i32(tmp2, val);
38
gen_vfp_cmpe(dp);
38
if (logic_cc && shift) {
39
+ dp = -1; /* no write back */
40
break;
41
case 10: /* cmpz */
42
gen_vfp_cmp(dp);
43
+ dp = -1; /* no write back */
44
break;
45
case 11: /* cmpez */
46
gen_vfp_F1_ld0(dp);
47
gen_vfp_cmpe(dp);
48
+ dp = -1; /* no write back */
49
break;
50
case 12: /* vrintr */
51
{
52
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
53
break;
54
}
55
case 15: /* single<->double conversion */
56
- if (dp)
57
+ if (dp) {
58
gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
59
- else
60
+ } else {
61
gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
62
+ }
63
+ dp = !dp; /* result size is opposite */
64
break;
65
case 16: /* fuito */
66
gen_vfp_uito(dp, 0);
67
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
68
break;
69
case 24: /* ftoui */
70
gen_vfp_toui(dp, 0);
71
+ dp = 0; /* always an integer result */
72
break;
73
case 25: /* ftouiz */
74
gen_vfp_touiz(dp, 0);
75
+ dp = 0; /* always an integer result */
76
break;
77
case 26: /* ftosi */
78
gen_vfp_tosi(dp, 0);
79
+ dp = 0; /* always an integer result */
80
break;
81
case 27: /* ftosiz */
82
gen_vfp_tosiz(dp, 0);
83
+ dp = 0; /* always an integer result */
84
break;
85
case 28: /* ftosh */
86
if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
87
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
88
return 1;
89
}
90
91
- /* Write back the result. */
92
- if (op == 15 && (rn >= 8 && rn <= 11)) {
93
- /* Comparison, do nothing. */
94
- } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
95
- (rn & 0x1e) == 0x6)) {
96
- /* VCVT double to int: always integer result.
97
- * VCVT double to half precision is always a single
98
- * precision result.
99
- */
100
- gen_mov_vreg_F0(0, rd);
101
- } else if (op == 15 && rn == 15) {
102
- /* conversion */
103
- gen_mov_vreg_F0(!dp, rd);
104
- } else {
105
+ /* Write back the result, if any. */
106
+ if (dp >= 0) {
107
gen_mov_vreg_F0(dp, rd);
108
}
109
110
--
39
--
111
2.20.1
40
2.20.1
112
41
113
42
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Minimize the code within a macro by splitting out a helper function.
3
Rotate is the more compact and obvious way to swap 16-bit
4
Use deposit32 instead of manual bit manipulation.
4
elements of a 32-bit word.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190209033847.9014-9-richard.henderson@linaro.org
7
Message-id: 20190808202616.13782-6-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper.c | 45 +++++++++++++++++++++++++++------------------
11
target/arm/translate.c | 6 +-----
12
1 file changed, 27 insertions(+), 18 deletions(-)
12
1 file changed, 1 insertion(+), 5 deletions(-)
13
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/helper.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
18
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
19
return float64_sqrt(a, &env->vfp.fp_status);
19
/* Swap low and high halfwords. */
20
static void gen_swap_half(TCGv_i32 var)
21
{
22
- TCGv_i32 tmp = tcg_temp_new_i32();
23
- tcg_gen_shri_i32(tmp, var, 16);
24
- tcg_gen_shli_i32(var, var, 16);
25
- tcg_gen_or_i32(var, var, tmp);
26
- tcg_temp_free_i32(tmp);
27
+ tcg_gen_rotri_i32(var, var, 16);
20
}
28
}
21
29
22
+static void softfloat_to_vfp_compare(CPUARMState *env, int cmp)
30
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
23
+{
24
+ uint32_t flags;
25
+ switch (cmp) {
26
+ case float_relation_equal:
27
+ flags = 0x6;
28
+ break;
29
+ case float_relation_less:
30
+ flags = 0x8;
31
+ break;
32
+ case float_relation_greater:
33
+ flags = 0x2;
34
+ break;
35
+ case float_relation_unordered:
36
+ flags = 0x3;
37
+ break;
38
+ default:
39
+ g_assert_not_reached();
40
+ }
41
+ env->vfp.xregs[ARM_VFP_FPSCR] =
42
+ deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags);
43
+}
44
+
45
/* XXX: check quiet/signaling case */
46
#define DO_VFP_cmp(p, type) \
47
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
48
{ \
49
- uint32_t flags; \
50
- switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
51
- case 0: flags = 0x6; break; \
52
- case -1: flags = 0x8; break; \
53
- case 1: flags = 0x2; break; \
54
- default: case 2: flags = 0x3; break; \
55
- } \
56
- env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
57
- | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
58
+ softfloat_to_vfp_compare(env, \
59
+ type ## _compare_quiet(a, b, &env->vfp.fp_status)); \
60
} \
61
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
62
{ \
63
- uint32_t flags; \
64
- switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
65
- case 0: flags = 0x6; break; \
66
- case -1: flags = 0x8; break; \
67
- case 1: flags = 0x2; break; \
68
- default: case 2: flags = 0x3; break; \
69
- } \
70
- env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
71
- | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
72
+ softfloat_to_vfp_compare(env, \
73
+ type ## _compare(a, b, &env->vfp.fp_status)); \
74
}
75
DO_VFP_cmp(s, float32)
76
DO_VFP_cmp(d, float64)
77
--
31
--
78
2.20.1
32
2.20.1
79
33
80
34
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The 32-bit PMIN/PMAX has been decomposed to scalars,
3
All of the inputs to these instructions are 32-bits. Rather than
4
and so can be trivially expanded inline.
4
extend each input to 64-bits and then extract the high 32-bits of
5
the output, use tcg_gen_muls2_i32 and other 32-bit generator functions.
5
6
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190209033847.9014-5-richard.henderson@linaro.org
8
Message-id: 20190808202616.13782-7-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/translate.c | 8 ++++----
12
target/arm/translate.c | 72 +++++++++++++++---------------------------
12
1 file changed, 4 insertions(+), 4 deletions(-)
13
1 file changed, 26 insertions(+), 46 deletions(-)
13
14
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
19
@@ -XXX,XX +XXX,XX @@ static void gen_revsh(TCGv_i32 var)
20
tcg_gen_ext16s_i32(var, var);
19
}
21
}
20
22
21
/* 32-bit pairwise ops end up the same as the elementwise versions. */
23
-/* Return (b << 32) + a. Mark inputs as dead */
22
-#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
24
-static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
23
-#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
25
-{
24
-#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
26
- TCGv_i64 tmp64 = tcg_temp_new_i64();
25
-#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
27
-
26
+#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
28
- tcg_gen_extu_i32_i64(tmp64, b);
27
+#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
29
- tcg_temp_free_i32(b);
28
+#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
30
- tcg_gen_shli_i64(tmp64, tmp64, 32);
29
+#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
31
- tcg_gen_add_i64(a, tmp64, a);
30
32
-
31
#define GEN_NEON_INTEGER_OP_ENV(name) do { \
33
- tcg_temp_free_i64(tmp64);
32
switch ((size << 1) | u) { \
34
- return a;
35
-}
36
-
37
-/* Return (b << 32) - a. Mark inputs as dead. */
38
-static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
39
-{
40
- TCGv_i64 tmp64 = tcg_temp_new_i64();
41
-
42
- tcg_gen_extu_i32_i64(tmp64, b);
43
- tcg_temp_free_i32(b);
44
- tcg_gen_shli_i64(tmp64, tmp64, 32);
45
- tcg_gen_sub_i64(a, tmp64, a);
46
-
47
- tcg_temp_free_i64(tmp64);
48
- return a;
49
-}
50
-
51
/* 32x32->64 multiply. Marks inputs as dead. */
52
static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
53
{
54
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
55
(SMMUL, SMMLA, SMMLS) */
56
tmp = load_reg(s, rm);
57
tmp2 = load_reg(s, rs);
58
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
59
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
60
61
if (rd != 15) {
62
- tmp = load_reg(s, rd);
63
+ tmp3 = load_reg(s, rd);
64
if (insn & (1 << 6)) {
65
- tmp64 = gen_subq_msw(tmp64, tmp);
66
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
67
} else {
68
- tmp64 = gen_addq_msw(tmp64, tmp);
69
+ tcg_gen_add_i32(tmp, tmp, tmp3);
70
}
71
+ tcg_temp_free_i32(tmp3);
72
}
73
if (insn & (1 << 5)) {
74
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
75
+ /*
76
+ * Adding 0x80000000 to the 64-bit quantity
77
+ * means that we have carry in to the high
78
+ * word when the low word has the high bit set.
79
+ */
80
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
81
+ tcg_gen_add_i32(tmp, tmp, tmp2);
82
}
83
- tcg_gen_shri_i64(tmp64, tmp64, 32);
84
- tmp = tcg_temp_new_i32();
85
- tcg_gen_extrl_i64_i32(tmp, tmp64);
86
- tcg_temp_free_i64(tmp64);
87
+ tcg_temp_free_i32(tmp2);
88
store_reg(s, rn, tmp);
89
break;
90
case 0:
91
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
92
}
93
break;
94
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
95
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
96
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
97
if (rs != 15) {
98
- tmp = load_reg(s, rs);
99
+ tmp3 = load_reg(s, rs);
100
if (insn & (1 << 20)) {
101
- tmp64 = gen_addq_msw(tmp64, tmp);
102
+ tcg_gen_add_i32(tmp, tmp, tmp3);
103
} else {
104
- tmp64 = gen_subq_msw(tmp64, tmp);
105
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
106
}
107
+ tcg_temp_free_i32(tmp3);
108
}
109
if (insn & (1 << 4)) {
110
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
111
+ /*
112
+ * Adding 0x80000000 to the 64-bit quantity
113
+ * means that we have carry in to the high
114
+ * word when the low word has the high bit set.
115
+ */
116
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
117
+ tcg_gen_add_i32(tmp, tmp, tmp2);
118
}
119
- tcg_gen_shri_i64(tmp64, tmp64, 32);
120
- tmp = tcg_temp_new_i32();
121
- tcg_gen_extrl_i64_i32(tmp, tmp64);
122
- tcg_temp_free_i64(tmp64);
123
+ tcg_temp_free_i32(tmp2);
124
break;
125
case 7: /* Unsigned sum of absolute differences. */
126
gen_helper_usad8(tmp, tmp, tmp2);
33
--
127
--
34
2.20.1
128
2.20.1
35
129
36
130
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Separate shift + extract low will result in one extra insn
4
for hosts like RISC-V, MIPS, and Sparc.
5
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20190209033847.9014-8-richard.henderson@linaro.org
7
Message-id: 20190808202616.13782-8-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 2 +-
11
target/arm/translate.c | 18 ++++++------------
9
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 6 insertions(+), 12 deletions(-)
10
13
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
18
@@ -XXX,XX +XXX,XX @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
16
i * 2 + 1, (uint32_t)(v >> 32),
19
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
17
i, v);
20
iwmmxt_load_reg(cpu_V0, wrd);
18
}
21
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
19
- cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
22
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
20
+ cpu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
23
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
21
}
24
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
25
} else { /* TMCRR */
26
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
27
iwmmxt_store_reg(cpu_V0, wrd);
28
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
29
if (insn & ARM_CP_RW_BIT) { /* MRA */
30
iwmmxt_load_reg(cpu_V0, acc);
31
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
32
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
33
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
34
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
35
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
36
} else { /* MAR */
37
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
38
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
39
gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
40
break;
41
case 2:
42
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
43
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
44
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
45
break;
46
default: abort();
47
}
48
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
49
break;
50
case 2:
51
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
52
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
53
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
54
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
55
break;
56
default: abort();
57
}
58
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
59
tmp = tcg_temp_new_i32();
60
tcg_gen_extrl_i64_i32(tmp, tmp64);
61
store_reg(s, rt, tmp);
62
- tcg_gen_shri_i64(tmp64, tmp64, 32);
63
tmp = tcg_temp_new_i32();
64
- tcg_gen_extrl_i64_i32(tmp, tmp64);
65
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
66
tcg_temp_free_i64(tmp64);
67
store_reg(s, rt2, tmp);
68
} else {
69
@@ -XXX,XX +XXX,XX @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
70
tcg_gen_extrl_i64_i32(tmp, val);
71
store_reg(s, rlow, tmp);
72
tmp = tcg_temp_new_i32();
73
- tcg_gen_shri_i64(val, val, 32);
74
- tcg_gen_extrl_i64_i32(tmp, val);
75
+ tcg_gen_extrh_i64_i32(tmp, val);
76
store_reg(s, rhigh, tmp);
22
}
77
}
23
78
24
--
79
--
25
2.20.1
80
2.20.1
26
81
27
82
diff view generated by jsdifflib