1
First arm pullreq of 4.2...
1
Small pile of bug fixes for rc1. I've included my patches to get
2
our docs building with Sphinx 3, just for convenience...
2
3
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit 27608c7c66bd923eb5e5faab80e795408cbe2b51:
6
The following changes since commit b149dea55cce97cb226683d06af61984a1c11e96:
7
7
8
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190814a' into staging (2019-08-16 12:00:18 +0100)
8
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20201102' into staging (2020-11-02 10:57:48 +0000)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190816
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201102
13
13
14
for you to fetch changes up to 664b7e3b97d6376f3329986c465b3782458b0f8b:
14
for you to fetch changes up to ffb4fbf90a2f63c9cb33e4bb9f854c79bf04ca4a:
15
15
16
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word (2019-08-16 14:02:53 +0100)
16
tests/qtest/npcm7xx_rng-test: Disable randomness tests (2020-11-02 16:52:18 +0000)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* target/arm: generate a custom MIDR for -cpu max
20
* target/arm: Fix Neon emulation bugs on big-endian hosts
21
* hw/misc/zynq_slcr: refactor to use standard register definition
21
* target/arm: fix handling of HCR.FB
22
* Set ENET_BD_BDU in I.MX FEC controller
22
* target/arm: fix LORID_EL1 access check
23
* target/arm: Fix routing of singlestep exceptions
23
* disas/capstone: Fix monitor disassembly of >32 bytes
24
* refactor a32/t32 decoder handling of PC
24
* hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
25
* minor optimisations/cleanups of some a32/t32 codegen
25
* hw/arm/boot: fix SVE for EL3 direct kernel boot
26
* target/arm/cpu64: Ensure kvm really supports aarch64=off
26
* hw/display/omap_lcdc: Fix potential NULL pointer dereference
27
* target/arm/cpu: Ensure we can use the pmu with kvm
27
* hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
28
* target/arm: Minor cleanups preparatory to KVM SVE support
28
* target/arm: Get correct MMU index for other-security-state
29
* configure: Test that gio libs from pkg-config work
30
* hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
31
* docs: Fix building with Sphinx 3
32
* tests/qtest/npcm7xx_rng-test: Disable randomness tests
29
33
30
----------------------------------------------------------------
34
----------------------------------------------------------------
31
Aaron Hill (1):
35
AlexChen (2):
32
Set ENET_BD_BDU in I.MX FEC controller
36
hw/display/omap_lcdc: Fix potential NULL pointer dereference
37
hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
33
38
34
Alex Bennée (1):
39
Peter Maydell (9):
35
target/arm: generate a custom MIDR for -cpu max
40
target/arm: Fix float16 pairwise Neon ops on big-endian hosts
41
target/arm: Fix VUDOT/VSDOT (scalar) on big-endian hosts
42
disas/capstone: Fix monitor disassembly of >32 bytes
43
target/arm: Get correct MMU index for other-security-state
44
configure: Test that gio libs from pkg-config work
45
hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
46
scripts/kerneldoc: For Sphinx 3 use c:macro for macros with arguments
47
qemu-option-trace.rst.inc: Don't use option:: markup
48
tests/qtest/npcm7xx_rng-test: Disable randomness tests
36
49
37
Andrew Jones (6):
50
Philippe Mathieu-Daudé (1):
38
target/arm/cpu64: Ensure kvm really supports aarch64=off
51
hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
39
target/arm/cpu: Ensure we can use the pmu with kvm
40
target/arm/helper: zcr: Add build bug next to value range assumption
41
target/arm/cpu: Use div-round-up to determine predicate register array size
42
target/arm/kvm64: Fix error returns
43
target/arm/kvm64: Move the get/put of fpsimd registers out
44
52
45
Damien Hedde (1):
53
Richard Henderson (11):
46
hw/misc/zynq_slcr: use standard register definition
54
target/arm: Introduce neon_full_reg_offset
55
target/arm: Move neon_element_offset to translate.c
56
target/arm: Use neon_element_offset in neon_load/store_reg
57
target/arm: Use neon_element_offset in vfp_reg_offset
58
target/arm: Add read/write_neon_element32
59
target/arm: Expand read/write_neon_element32 to all MemOp
60
target/arm: Rename neon_load_reg32 to vfp_load_reg32
61
target/arm: Add read/write_neon_element64
62
target/arm: Rename neon_load_reg64 to vfp_load_reg64
63
target/arm: Simplify do_long_3d and do_2scalar_long
64
target/arm: Improve do_prewiden_3d
47
65
48
Peter Maydell (2):
66
Rémi Denis-Courmont (3):
49
target/arm: Factor out 'generate singlestep exception' function
67
target/arm: fix handling of HCR.FB
50
target/arm: Fix routing of singlestep exceptions
68
target/arm: fix LORID_EL1 access check
69
hw/arm/boot: fix SVE for EL3 direct kernel boot
51
70
52
Richard Henderson (18):
71
docs/qemu-option-trace.rst.inc | 6 +-
53
target/arm: Pass in pc to thumb_insn_is_16bit
72
configure | 10 +-
54
target/arm: Introduce pc_curr
73
include/hw/intc/arm_gicv3_common.h | 1 -
55
target/arm: Introduce read_pc
74
disas/capstone.c | 2 +-
56
target/arm: Introduce add_reg_for_lit
75
hw/arm/boot.c | 3 +
57
target/arm: Remove redundant s->pc & ~1
76
hw/arm/smmuv3.c | 3 +-
58
target/arm: Replace s->pc with s->base.pc_next
77
hw/display/exynos4210_fimd.c | 4 +-
59
target/arm: Replace offset with pc in gen_exception_insn
78
hw/display/omap_lcdc.c | 10 +-
60
target/arm: Replace offset with pc in gen_exception_internal_insn
79
hw/intc/arm_gicv3_cpuif.c | 5 +-
61
target/arm: Remove offset argument to gen_exception_bkpt_insn
80
target/arm/helper.c | 24 +-
62
target/arm: Use unallocated_encoding for aarch32
81
target/arm/m_helper.c | 3 +-
63
target/arm: Remove helper_double_saturate
82
target/arm/translate.c | 153 +++++++++---
64
target/arm: Use tcg_gen_extract_i32 for shifter_out_im
83
target/arm/vec_helper.c | 12 +-
65
target/arm: Use tcg_gen_deposit_i32 for PKHBT, PKHTB
84
tests/qtest/npcm7xx_rng-test.c | 14 +-
66
target/arm: Remove redundant shift tests
85
scripts/kernel-doc | 18 +-
67
target/arm: Use ror32 instead of open-coding the operation
86
target/arm/translate-neon.c.inc | 472 ++++++++++++++++++++-----------------
68
target/arm: Use tcg_gen_rotri_i32 for gen_swap_half
87
target/arm/translate-vfp.c.inc | 341 +++++++++++----------------
69
target/arm: Simplify SMMLA, SMMLAR, SMMLS, SMMLSR
88
17 files changed, 588 insertions(+), 493 deletions(-)
70
target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word
71
89
72
target/arm/cpu.h | 13 +-
73
target/arm/helper.h | 1 -
74
target/arm/kvm_arm.h | 28 ++
75
target/arm/translate-a64.h | 4 +-
76
target/arm/translate.h | 39 ++-
77
hw/misc/zynq_slcr.c | 450 ++++++++++++++++----------------
78
hw/net/imx_fec.c | 4 +
79
target/arm/cpu.c | 30 ++-
80
target/arm/cpu64.c | 31 ++-
81
target/arm/helper.c | 7 +
82
target/arm/kvm.c | 7 +
83
target/arm/kvm64.c | 161 +++++++-----
84
target/arm/op_helper.c | 15 --
85
target/arm/translate-a64.c | 130 ++++------
86
target/arm/translate-vfp.inc.c | 45 +---
87
target/arm/translate.c | 572 +++++++++++++++++------------------------
88
16 files changed, 771 insertions(+), 766 deletions(-)
89
diff view generated by jsdifflib
Deleted patch
1
From: Alex Bennée <alex.bennee@linaro.org>
2
1
3
While most features are now detected by probing the ID_* registers
4
kernels can (and do) use MIDR_EL1 for working out of they have to
5
apply errata. This can trip up warnings in the kernel as it tries to
6
work out if it should apply workarounds to features that don't
7
actually exist in the reported CPU type.
8
9
Avoid this problem by synthesising our own MIDR value.
10
11
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20190726113950.7499-1-alex.bennee@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
target/arm/cpu.h | 6 ++++++
18
target/arm/cpu64.c | 19 +++++++++++++++++++
19
2 files changed, 25 insertions(+)
20
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
25
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
26
/*
27
* System register ID fields.
28
*/
29
+FIELD(MIDR_EL1, REVISION, 0, 4)
30
+FIELD(MIDR_EL1, PARTNUM, 4, 12)
31
+FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
32
+FIELD(MIDR_EL1, VARIANT, 20, 4)
33
+FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
34
+
35
FIELD(ID_ISAR0, SWAP, 0, 4)
36
FIELD(ID_ISAR0, BITCOUNT, 4, 4)
37
FIELD(ID_ISAR0, BITFIELD, 8, 4)
38
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/cpu64.c
41
+++ b/target/arm/cpu64.c
42
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
43
uint32_t u;
44
aarch64_a57_initfn(obj);
45
46
+ /*
47
+ * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
48
+ * one and try to apply errata workarounds or use impdef features we
49
+ * don't provide.
50
+ * An IMPLEMENTER field of 0 means "reserved for software use";
51
+ * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
52
+ * to see which features are present";
53
+ * the VARIANT, PARTNUM and REVISION fields are all implementation
54
+ * defined and we choose to define PARTNUM just in case guest
55
+ * code needs to distinguish this QEMU CPU from other software
56
+ * implementations, though this shouldn't be needed.
57
+ */
58
+ t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
59
+ t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
60
+ t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
61
+ t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
62
+ t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
63
+ cpu->midr = t;
64
+
65
t = cpu->isar.id_aa64isar0;
66
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
67
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
68
--
69
2.20.1
70
71
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Promote this function from aarch64 to fully general use.
3
This function makes it clear that we're talking about the whole
4
Use it to unify the code sequences for generating illegal
4
register, and not the 32-bit piece at index 0. This fixes a bug
5
opcode exceptions.
5
when running on a big-endian host.
6
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201030022618.785675-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-11-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
11
---
13
target/arm/translate-a64.h | 2 --
12
target/arm/translate.c | 8 ++++++
14
target/arm/translate.h | 2 ++
13
target/arm/translate-neon.c.inc | 44 ++++++++++++++++-----------------
15
target/arm/translate-a64.c | 7 -------
14
target/arm/translate-vfp.c.inc | 2 +-
16
target/arm/translate-vfp.inc.c | 3 +--
15
3 files changed, 31 insertions(+), 23 deletions(-)
17
target/arm/translate.c | 22 ++++++++++++----------
18
5 files changed, 15 insertions(+), 21 deletions(-)
19
16
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate-a64.h
23
+++ b/target/arm/translate-a64.h
24
@@ -XXX,XX +XXX,XX @@
25
#ifndef TARGET_ARM_TRANSLATE_A64_H
26
#define TARGET_ARM_TRANSLATE_A64_H
27
28
-void unallocated_encoding(DisasContext *s);
29
-
30
#define unsupported_encoding(s, insn) \
31
do { \
32
qemu_log_mask(LOG_UNIMP, \
33
diff --git a/target/arm/translate.h b/target/arm/translate.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/translate.h
36
+++ b/target/arm/translate.h
37
@@ -XXX,XX +XXX,XX @@ typedef struct DisasCompare {
38
bool value_global;
39
} DisasCompare;
40
41
+void unallocated_encoding(DisasContext *s);
42
+
43
/* Share the TCG temporaries common between 32 and 64 bit modes. */
44
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
45
extern TCGv_i64 cpu_exclusive_addr;
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate-a64.c
49
+++ b/target/arm/translate-a64.c
50
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
51
}
52
}
53
54
-void unallocated_encoding(DisasContext *s)
55
-{
56
- /* Unallocated and reserved encodings are uncategorized */
57
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
58
- default_exception_el(s));
59
-}
60
-
61
static void init_tmp_a64_array(DisasContext *s)
62
{
63
#ifdef CONFIG_DEBUG_TCG
64
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-vfp.inc.c
67
+++ b/target/arm/translate-vfp.inc.c
68
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
69
70
if (!s->vfp_enabled && !ignore_vfp_enabled) {
71
assert(!arm_dc_feature(s, ARM_FEATURE_M));
72
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
73
- default_exception_el(s));
74
+ unallocated_encoding(s);
75
return false;
76
}
77
78
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
79
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate.c
19
--- a/target/arm/translate.c
81
+++ b/target/arm/translate.c
20
+++ b/target/arm/translate.c
82
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
21
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
83
s->base.is_jmp = DISAS_NORETURN;
22
unallocated_encoding(s);
84
}
23
}
85
24
86
+void unallocated_encoding(DisasContext *s)
25
+/*
26
+ * Return the offset of a "full" NEON Dreg.
27
+ */
28
+static long neon_full_reg_offset(unsigned reg)
87
+{
29
+{
88
+ /* Unallocated and reserved encodings are uncategorized */
30
+ return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
89
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
90
+ default_exception_el(s));
91
+}
31
+}
92
+
32
+
93
/* Force a TB lookup after an instruction that changes the CPU state. */
33
static inline long vfp_reg_offset(bool dp, unsigned reg)
94
static inline void gen_lookup_tb(DisasContext *s)
95
{
34
{
96
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
35
if (dp) {
97
return;
36
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.c.inc
39
+++ b/target/arm/translate-neon.c.inc
40
@@ -XXX,XX +XXX,XX @@ neon_element_offset(int reg, int element, MemOp size)
41
ofs ^= 8 - element_size;
98
}
42
}
99
43
#endif
100
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
44
- return neon_reg_offset(reg, 0) + ofs;
101
- default_exception_el(s));
45
+ return neon_full_reg_offset(reg) + ofs;
102
+ unallocated_encoding(s);
103
}
46
}
104
47
105
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
48
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
106
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
50
* We cannot write 16 bytes at once because the
51
* destination is unaligned.
52
*/
53
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
54
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
55
8, 8, tmp);
56
- tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
57
- neon_reg_offset(vd, 0), 8, 8);
58
+ tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1),
59
+ neon_full_reg_offset(vd), 8, 8);
60
} else {
61
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
62
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
63
vec_size, vec_size, tmp);
64
}
65
tcg_gen_addi_i32(addr, addr, 1 << size);
66
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
67
static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
68
{
69
int vec_size = a->q ? 16 : 8;
70
- int rd_ofs = neon_reg_offset(a->vd, 0);
71
- int rn_ofs = neon_reg_offset(a->vn, 0);
72
- int rm_ofs = neon_reg_offset(a->vm, 0);
73
+ int rd_ofs = neon_full_reg_offset(a->vd);
74
+ int rn_ofs = neon_full_reg_offset(a->vn);
75
+ int rm_ofs = neon_full_reg_offset(a->vm);
76
77
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
78
return false;
79
@@ -XXX,XX +XXX,XX @@ static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
80
{
81
/* Handle a 2-reg-shift insn which can be vectorized. */
82
int vec_size = a->q ? 16 : 8;
83
- int rd_ofs = neon_reg_offset(a->vd, 0);
84
- int rm_ofs = neon_reg_offset(a->vm, 0);
85
+ int rd_ofs = neon_full_reg_offset(a->vd);
86
+ int rm_ofs = neon_full_reg_offset(a->vm);
87
88
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
89
return false;
90
@@ -XXX,XX +XXX,XX @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
91
{
92
/* FP operations in 2-reg-and-shift group */
93
int vec_size = a->q ? 16 : 8;
94
- int rd_ofs = neon_reg_offset(a->vd, 0);
95
- int rm_ofs = neon_reg_offset(a->vm, 0);
96
+ int rd_ofs = neon_full_reg_offset(a->vd);
97
+ int rm_ofs = neon_full_reg_offset(a->vm);
98
TCGv_ptr fpst;
99
100
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
101
@@ -XXX,XX +XXX,XX @@ static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
102
return true;
107
}
103
}
108
104
109
if (undef) {
105
- reg_ofs = neon_reg_offset(a->vd, 0);
110
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
106
+ reg_ofs = neon_full_reg_offset(a->vd);
111
- default_exception_el(s));
107
vec_size = a->q ? 16 : 8;
112
+ unallocated_encoding(s);
108
imm = asimd_imm_const(a->imm, a->cmode, a->op);
113
return;
109
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
111
return true;
114
}
112
}
115
113
116
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
114
- tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
117
break;
115
- neon_reg_offset(a->vn, 0),
118
default:
116
- neon_reg_offset(a->vm, 0),
119
illegal_op:
117
+ tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd),
120
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
118
+ neon_full_reg_offset(a->vn),
121
- default_exception_el(s));
119
+ neon_full_reg_offset(a->vm),
122
+ unallocated_encoding(s);
120
16, 16, 0, fn_gvec);
123
break;
121
return true;
124
}
122
}
123
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
124
{
125
/* Two registers and a scalar, using gvec */
126
int vec_size = a->q ? 16 : 8;
127
- int rd_ofs = neon_reg_offset(a->vd, 0);
128
- int rn_ofs = neon_reg_offset(a->vn, 0);
129
+ int rd_ofs = neon_full_reg_offset(a->vd);
130
+ int rn_ofs = neon_full_reg_offset(a->vn);
131
int rm_ofs;
132
int idx;
133
TCGv_ptr fpstatus;
134
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
135
/* a->vm is M:Vm, which encodes both register and index */
136
idx = extract32(a->vm, a->size + 2, 2);
137
a->vm = extract32(a->vm, 0, a->size + 2);
138
- rm_ofs = neon_reg_offset(a->vm, 0);
139
+ rm_ofs = neon_full_reg_offset(a->vm);
140
141
fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
142
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
144
return true;
125
}
145
}
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
146
147
- tcg_gen_gvec_dup_mem(a->size, neon_reg_offset(a->vd, 0),
148
+ tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd),
149
neon_element_offset(a->vm, a->index, a->size),
150
a->q ? 16 : 8, a->q ? 16 : 8);
151
return true;
152
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
153
static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
154
{
155
int vec_size = a->q ? 16 : 8;
156
- int rd_ofs = neon_reg_offset(a->vd, 0);
157
- int rm_ofs = neon_reg_offset(a->vm, 0);
158
+ int rd_ofs = neon_full_reg_offset(a->vd);
159
+ int rm_ofs = neon_full_reg_offset(a->vm);
160
161
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
return false;
163
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-vfp.c.inc
166
+++ b/target/arm/translate-vfp.c.inc
167
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
127
}
168
}
128
return;
169
129
illegal_op:
170
tmp = load_reg(s, a->rt);
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
171
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
131
- default_exception_el(s));
172
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
132
+ unallocated_encoding(s);
173
vec_size, vec_size, tmp);
133
}
174
tcg_temp_free_i32(tmp);
134
175
135
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
136
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
137
return;
138
illegal_op:
139
undef:
140
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
141
- default_exception_el(s));
142
+ unallocated_encoding(s);
143
}
144
145
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
146
--
176
--
147
2.20.1
177
2.20.1
148
178
149
179
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The helper function is more documentary, and also already
3
This will shortly have users outside of translate-neon.c.inc.
4
handles the case of rotate by zero.
5
4
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-5-richard.henderson@linaro.org
6
Message-id: 20201030022618.785675-3-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/translate.c | 7 ++-----
10
target/arm/translate.c | 20 ++++++++++++++++++++
12
1 file changed, 2 insertions(+), 5 deletions(-)
11
target/arm/translate-neon.c.inc | 19 -------------------
12
2 files changed, 20 insertions(+), 19 deletions(-)
13
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
18
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
19
/* CPSR = immediate */
19
return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
20
val = insn & 0xff;
20
}
21
shift = ((insn >> 8) & 0xf) * 2;
21
22
- if (shift)
22
+/*
23
- val = (val >> shift) | (val << (32 - shift));
23
+ * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
24
+ val = ror32(val, shift);
24
+ * where 0 is the least significant end of the register.
25
i = ((insn & (1 << 22)) != 0);
25
+ */
26
if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
26
+static long neon_element_offset(int reg, int element, MemOp size)
27
i, val)) {
27
+{
28
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
28
+ int element_size = 1 << size;
29
/* immediate operand */
29
+ int ofs = element * element_size;
30
val = insn & 0xff;
30
+#ifdef HOST_WORDS_BIGENDIAN
31
shift = ((insn >> 8) & 0xf) * 2;
31
+ /*
32
- if (shift) {
32
+ * Calculate the offset assuming fully little-endian,
33
- val = (val >> shift) | (val << (32 - shift));
33
+ * then XOR to account for the order of the 8-byte units.
34
- }
34
+ */
35
+ val = ror32(val, shift);
35
+ if (element_size < 8) {
36
tmp2 = tcg_temp_new_i32();
36
+ ofs ^= 8 - element_size;
37
tcg_gen_movi_i32(tmp2, val);
37
+ }
38
if (logic_cc && shift) {
38
+#endif
39
+ return neon_full_reg_offset(reg) + ofs;
40
+}
41
+
42
static inline long vfp_reg_offset(bool dp, unsigned reg)
43
{
44
if (dp) {
45
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate-neon.c.inc
48
+++ b/target/arm/translate-neon.c.inc
49
@@ -XXX,XX +XXX,XX @@ static inline int neon_3same_fp_size(DisasContext *s, int x)
50
#include "decode-neon-ls.c.inc"
51
#include "decode-neon-shared.c.inc"
52
53
-/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
54
- * where 0 is the least significant end of the register.
55
- */
56
-static inline long
57
-neon_element_offset(int reg, int element, MemOp size)
58
-{
59
- int element_size = 1 << size;
60
- int ofs = element * element_size;
61
-#ifdef HOST_WORDS_BIGENDIAN
62
- /* Calculate the offset assuming fully little-endian,
63
- * then XOR to account for the order of the 8-byte units.
64
- */
65
- if (element_size < 8) {
66
- ofs ^= 8 - element_size;
67
- }
68
-#endif
69
- return neon_full_reg_offset(reg) + ofs;
70
-}
71
-
72
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
73
{
74
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
39
--
75
--
40
2.20.1
76
2.20.1
41
77
42
78
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Provide a common routine for the places that require ALIGN(PC, 4)
3
These are the only users of neon_reg_offset, so remove that.
4
as the base address as opposed to plain PC. The two are always
5
the same for A32, but the difference is meaningful for thumb mode.
6
4
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-4-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-5-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
9
---
13
target/arm/translate-vfp.inc.c | 38 ++------
10
target/arm/translate.c | 14 ++------------
14
target/arm/translate.c | 166 +++++++++++++++------------------
11
1 file changed, 2 insertions(+), 12 deletions(-)
15
2 files changed, 82 insertions(+), 122 deletions(-)
16
12
17
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate-vfp.inc.c
20
+++ b/target/arm/translate-vfp.inc.c
21
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
22
offset = -offset;
23
}
24
25
- if (s->thumb && a->rn == 15) {
26
- /* This is actually UNPREDICTABLE */
27
- addr = tcg_temp_new_i32();
28
- tcg_gen_movi_i32(addr, s->pc & ~2);
29
- } else {
30
- addr = load_reg(s, a->rn);
31
- }
32
- tcg_gen_addi_i32(addr, addr, offset);
33
+ /* For thumb, use of PC is UNPREDICTABLE. */
34
+ addr = add_reg_for_lit(s, a->rn, offset);
35
tmp = tcg_temp_new_i32();
36
if (a->l) {
37
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
39
offset = -offset;
40
}
41
42
- if (s->thumb && a->rn == 15) {
43
- /* This is actually UNPREDICTABLE */
44
- addr = tcg_temp_new_i32();
45
- tcg_gen_movi_i32(addr, s->pc & ~2);
46
- } else {
47
- addr = load_reg(s, a->rn);
48
- }
49
- tcg_gen_addi_i32(addr, addr, offset);
50
+ /* For thumb, use of PC is UNPREDICTABLE. */
51
+ addr = add_reg_for_lit(s, a->rn, offset);
52
tmp = tcg_temp_new_i64();
53
if (a->l) {
54
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
56
return true;
57
}
58
59
- if (s->thumb && a->rn == 15) {
60
- /* This is actually UNPREDICTABLE */
61
- addr = tcg_temp_new_i32();
62
- tcg_gen_movi_i32(addr, s->pc & ~2);
63
- } else {
64
- addr = load_reg(s, a->rn);
65
- }
66
+ /* For thumb, use of PC is UNPREDICTABLE. */
67
+ addr = add_reg_for_lit(s, a->rn, 0);
68
if (a->p) {
69
/* pre-decrement */
70
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
71
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
72
return true;
73
}
74
75
- if (s->thumb && a->rn == 15) {
76
- /* This is actually UNPREDICTABLE */
77
- addr = tcg_temp_new_i32();
78
- tcg_gen_movi_i32(addr, s->pc & ~2);
79
- } else {
80
- addr = load_reg(s, a->rn);
81
- }
82
+ /* For thumb, use of PC is UNPREDICTABLE. */
83
+ addr = add_reg_for_lit(s, a->rn, 0);
84
if (a->p) {
85
/* pre-decrement */
86
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
87
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
88
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
90
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
91
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg)
17
@@ -XXX,XX +XXX,XX @@ static inline long vfp_reg_offset(bool dp, unsigned reg)
18
}
19
}
20
21
-/* Return the offset of a 32-bit piece of a NEON register.
22
- zero is the least significant end of the register. */
23
-static inline long
24
-neon_reg_offset (int reg, int n)
25
-{
26
- int sreg;
27
- sreg = reg * 2 + n;
28
- return vfp_reg_offset(0, sreg);
29
-}
30
-
31
static TCGv_i32 neon_load_reg(int reg, int pass)
32
{
33
TCGv_i32 tmp = tcg_temp_new_i32();
34
- tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
35
+ tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
92
return tmp;
36
return tmp;
93
}
37
}
94
38
95
+/*
39
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
96
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
40
{
97
+ * This is used for load/store for which use of PC implies (literal),
41
- tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
98
+ * or ADD that implies ADR.
42
+ tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
99
+ */
43
tcg_temp_free_i32(var);
100
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
44
}
101
+{
102
+ TCGv_i32 tmp = tcg_temp_new_i32();
103
+
104
+ if (reg == 15) {
105
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
106
+ } else {
107
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
108
+ }
109
+ return tmp;
110
+}
111
+
112
/* Set a CPU register. The source must be a temporary and will be
113
marked as dead. */
114
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
115
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
116
*/
117
bool wback = extract32(insn, 21, 1);
118
119
- if (rn == 15) {
120
- if (insn & (1 << 21)) {
121
- /* UNPREDICTABLE */
122
- goto illegal_op;
123
- }
124
- addr = tcg_temp_new_i32();
125
- tcg_gen_movi_i32(addr, s->pc & ~3);
126
- } else {
127
- addr = load_reg(s, rn);
128
+ if (rn == 15 && (insn & (1 << 21))) {
129
+ /* UNPREDICTABLE */
130
+ goto illegal_op;
131
}
132
+
133
+ addr = add_reg_for_lit(s, rn, 0);
134
offset = (insn & 0xff) * 4;
135
if ((insn & (1 << 23)) == 0) {
136
offset = -offset;
137
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
138
store_reg(s, rd, tmp);
139
} else {
140
/* Add/sub 12-bit immediate. */
141
- if (rn == 15) {
142
- offset = s->pc & ~(uint32_t)3;
143
- if (insn & (1 << 23))
144
- offset -= imm;
145
- else
146
- offset += imm;
147
- tmp = tcg_temp_new_i32();
148
- tcg_gen_movi_i32(tmp, offset);
149
- store_reg(s, rd, tmp);
150
+ if (insn & (1 << 23)) {
151
+ imm = -imm;
152
+ }
153
+ tmp = add_reg_for_lit(s, rn, imm);
154
+ if (rn == 13 && rd == 13) {
155
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
156
+ store_sp_checked(s, tmp);
157
} else {
158
- tmp = load_reg(s, rn);
159
- if (insn & (1 << 23))
160
- tcg_gen_subi_i32(tmp, tmp, imm);
161
- else
162
- tcg_gen_addi_i32(tmp, tmp, imm);
163
- if (rn == 13 && rd == 13) {
164
- /* ADD SP, SP, imm or SUB SP, SP, imm */
165
- store_sp_checked(s, tmp);
166
- } else {
167
- store_reg(s, rd, tmp);
168
- }
169
+ store_reg(s, rd, tmp);
170
}
171
}
172
}
173
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
174
}
175
}
176
memidx = get_mem_index(s);
177
- if (rn == 15) {
178
- addr = tcg_temp_new_i32();
179
- /* PC relative. */
180
- /* s->pc has already been incremented by 4. */
181
- imm = s->pc & 0xfffffffc;
182
- if (insn & (1 << 23))
183
- imm += insn & 0xfff;
184
- else
185
- imm -= insn & 0xfff;
186
- tcg_gen_movi_i32(addr, imm);
187
+ imm = insn & 0xfff;
188
+ if (insn & (1 << 23)) {
189
+ /* PC relative or Positive offset. */
190
+ addr = add_reg_for_lit(s, rn, imm);
191
+ } else if (rn == 15) {
192
+ /* PC relative with negative offset. */
193
+ addr = add_reg_for_lit(s, rn, -imm);
194
} else {
195
addr = load_reg(s, rn);
196
- if (insn & (1 << 23)) {
197
- /* Positive offset. */
198
- imm = insn & 0xfff;
199
- tcg_gen_addi_i32(addr, addr, imm);
200
- } else {
201
- imm = insn & 0xff;
202
- switch ((insn >> 8) & 0xf) {
203
- case 0x0: /* Shifted Register. */
204
- shift = (insn >> 4) & 0xf;
205
- if (shift > 3) {
206
- tcg_temp_free_i32(addr);
207
- goto illegal_op;
208
- }
209
- tmp = load_reg(s, rm);
210
- if (shift)
211
- tcg_gen_shli_i32(tmp, tmp, shift);
212
- tcg_gen_add_i32(addr, addr, tmp);
213
- tcg_temp_free_i32(tmp);
214
- break;
215
- case 0xc: /* Negative offset. */
216
- tcg_gen_addi_i32(addr, addr, -imm);
217
- break;
218
- case 0xe: /* User privilege. */
219
- tcg_gen_addi_i32(addr, addr, imm);
220
- memidx = get_a32_user_mem_index(s);
221
- break;
222
- case 0x9: /* Post-decrement. */
223
- imm = -imm;
224
- /* Fall through. */
225
- case 0xb: /* Post-increment. */
226
- postinc = 1;
227
- writeback = 1;
228
- break;
229
- case 0xd: /* Pre-decrement. */
230
- imm = -imm;
231
- /* Fall through. */
232
- case 0xf: /* Pre-increment. */
233
- writeback = 1;
234
- break;
235
- default:
236
+ imm = insn & 0xff;
237
+ switch ((insn >> 8) & 0xf) {
238
+ case 0x0: /* Shifted Register. */
239
+ shift = (insn >> 4) & 0xf;
240
+ if (shift > 3) {
241
tcg_temp_free_i32(addr);
242
goto illegal_op;
243
}
244
+ tmp = load_reg(s, rm);
245
+ if (shift) {
246
+ tcg_gen_shli_i32(tmp, tmp, shift);
247
+ }
248
+ tcg_gen_add_i32(addr, addr, tmp);
249
+ tcg_temp_free_i32(tmp);
250
+ break;
251
+ case 0xc: /* Negative offset. */
252
+ tcg_gen_addi_i32(addr, addr, -imm);
253
+ break;
254
+ case 0xe: /* User privilege. */
255
+ tcg_gen_addi_i32(addr, addr, imm);
256
+ memidx = get_a32_user_mem_index(s);
257
+ break;
258
+ case 0x9: /* Post-decrement. */
259
+ imm = -imm;
260
+ /* Fall through. */
261
+ case 0xb: /* Post-increment. */
262
+ postinc = 1;
263
+ writeback = 1;
264
+ break;
265
+ case 0xd: /* Pre-decrement. */
266
+ imm = -imm;
267
+ /* Fall through. */
268
+ case 0xf: /* Pre-increment. */
269
+ writeback = 1;
270
+ break;
271
+ default:
272
+ tcg_temp_free_i32(addr);
273
+ goto illegal_op;
274
}
275
}
276
277
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
278
if (insn & (1 << 11)) {
279
rd = (insn >> 8) & 7;
280
/* load pc-relative. Bit 1 of PC is ignored. */
281
- val = read_pc(s) + ((insn & 0xff) * 4);
282
- val &= ~(uint32_t)2;
283
- addr = tcg_temp_new_i32();
284
- tcg_gen_movi_i32(addr, val);
285
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
286
tmp = tcg_temp_new_i32();
287
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
288
rd | ISSIs16Bit);
289
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
290
* - Add PC/SP (immediate)
291
*/
292
rd = (insn >> 8) & 7;
293
- if (insn & (1 << 11)) {
294
- /* SP */
295
- tmp = load_reg(s, 13);
296
- } else {
297
- /* PC. bit 1 is ignored. */
298
- tmp = tcg_temp_new_i32();
299
- tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
300
- }
301
val = (insn & 0xff) * 4;
302
- tcg_gen_addi_i32(tmp, tmp, val);
303
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
304
store_reg(s, rd, tmp);
305
break;
306
45
307
--
46
--
308
2.20.1
47
2.20.1
309
48
310
49
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Separate shift + extract low will result in one extra insn
3
This seems a bit more readable than using offsetof CPU_DoubleU.
4
for hosts like RISC-V, MIPS, and Sparc.
5
4
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-8-richard.henderson@linaro.org
6
Message-id: 20201030022618.785675-5-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/translate.c | 18 ++++++------------
10
target/arm/translate.c | 13 ++++---------
12
1 file changed, 6 insertions(+), 12 deletions(-)
11
1 file changed, 4 insertions(+), 9 deletions(-)
13
12
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ static long neon_element_offset(int reg, int element, MemOp size)
19
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
18
return neon_full_reg_offset(reg) + ofs;
20
iwmmxt_load_reg(cpu_V0, wrd);
21
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
22
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
23
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
24
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
25
} else { /* TMCRR */
26
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
27
iwmmxt_store_reg(cpu_V0, wrd);
28
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
29
if (insn & ARM_CP_RW_BIT) { /* MRA */
30
iwmmxt_load_reg(cpu_V0, acc);
31
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
32
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
33
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
34
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
35
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
36
} else { /* MAR */
37
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
38
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
39
gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
40
break;
41
case 2:
42
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
43
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
44
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
45
break;
46
default: abort();
47
}
48
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
49
break;
50
case 2:
51
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
52
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
53
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
54
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
55
break;
56
default: abort();
57
}
58
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
59
tmp = tcg_temp_new_i32();
60
tcg_gen_extrl_i64_i32(tmp, tmp64);
61
store_reg(s, rt, tmp);
62
- tcg_gen_shri_i64(tmp64, tmp64, 32);
63
tmp = tcg_temp_new_i32();
64
- tcg_gen_extrl_i64_i32(tmp, tmp64);
65
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
66
tcg_temp_free_i64(tmp64);
67
store_reg(s, rt2, tmp);
68
} else {
69
@@ -XXX,XX +XXX,XX @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
70
tcg_gen_extrl_i64_i32(tmp, val);
71
store_reg(s, rlow, tmp);
72
tmp = tcg_temp_new_i32();
73
- tcg_gen_shri_i64(val, val, 32);
74
- tcg_gen_extrl_i64_i32(tmp, val);
75
+ tcg_gen_extrh_i64_i32(tmp, val);
76
store_reg(s, rhigh, tmp);
77
}
19
}
20
21
-static inline long vfp_reg_offset(bool dp, unsigned reg)
22
+/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
23
+static long vfp_reg_offset(bool dp, unsigned reg)
24
{
25
if (dp) {
26
- return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
27
+ return neon_element_offset(reg, 0, MO_64);
28
} else {
29
- long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
30
- if (reg & 1) {
31
- ofs += offsetof(CPU_DoubleU, l.upper);
32
- } else {
33
- ofs += offsetof(CPU_DoubleU, l.lower);
34
- }
35
- return ofs;
36
+ return neon_element_offset(reg >> 1, reg & 1, MO_32);
37
}
38
}
78
39
79
--
40
--
80
2.20.1
41
2.20.1
81
42
82
43
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The immediate shift generator functions already test for,
3
Model these off the aa64 read/write_vec_element functions.
4
and eliminate, the case of a shift by zero.
4
Use it within translate-neon.c.inc. The new functions do
5
not allocate or free temps, so this rearranges the calling
6
code a bit.
5
7
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-4-richard.henderson@linaro.org
9
Message-id: 20201030022618.785675-6-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
target/arm/translate.c | 19 +++++++------------
13
target/arm/translate.c | 26 ++++
12
1 file changed, 7 insertions(+), 12 deletions(-)
14
target/arm/translate-neon.c.inc | 256 ++++++++++++++++++++------------
15
2 files changed, 183 insertions(+), 99 deletions(-)
13
16
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
19
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
20
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
21
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
19
shift = (insn >> 10) & 3;
22
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
20
/* ??? In many cases it's not necessary to do a
23
}
21
rotate, a shift is sufficient. */
24
22
- if (shift != 0)
25
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
23
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
26
+{
24
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
27
+ long off = neon_element_offset(reg, ele, size);
25
op1 = (insn >> 20) & 7;
28
+
26
switch (op1) {
29
+ switch (size) {
27
case 0: gen_sxtb16(tmp); break;
30
+ case MO_32:
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
31
+ tcg_gen_ld_i32(dest, cpu_env, off);
29
shift = (insn >> 4) & 3;
32
+ break;
30
/* ??? In many cases it's not necessary to do a
33
+ default:
31
rotate, a shift is sufficient. */
34
+ g_assert_not_reached();
32
- if (shift != 0)
35
+ }
33
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
36
+}
34
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
37
+
35
op = (insn >> 20) & 7;
38
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
36
switch (op) {
39
+{
37
case 0: gen_sxth(tmp); break;
40
+ long off = neon_element_offset(reg, ele, size);
38
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
41
+
39
case 7:
42
+ switch (size) {
40
goto illegal_op;
43
+ case MO_32:
41
default: /* Saturate. */
44
+ tcg_gen_st_i32(src, cpu_env, off);
42
- if (shift) {
45
+ break;
43
- if (op & 1)
46
+ default:
44
- tcg_gen_sari_i32(tmp, tmp, shift);
47
+ g_assert_not_reached();
45
- else
48
+ }
46
- tcg_gen_shli_i32(tmp, tmp, shift);
49
+}
47
+ if (op & 1) {
50
+
48
+ tcg_gen_sari_i32(tmp, tmp, shift);
51
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
49
+ } else {
52
{
50
+ tcg_gen_shli_i32(tmp, tmp, shift);
53
TCGv_ptr ret = tcg_temp_new_ptr();
51
}
54
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
52
tmp2 = tcg_const_i32(imm);
55
index XXXXXXX..XXXXXXX 100644
53
if (op & 4) {
56
--- a/target/arm/translate-neon.c.inc
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
57
+++ b/target/arm/translate-neon.c.inc
55
goto illegal_op;
58
@@ -XXX,XX +XXX,XX @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
56
}
59
* early. Since Q is 0 there are always just two passes, so instead
57
tmp = load_reg(s, rm);
60
* of a complicated loop over each pass we just unroll.
58
- if (shift) {
61
*/
59
- tcg_gen_shli_i32(tmp, tmp, shift);
62
- tmp = neon_load_reg(a->vn, 0);
60
- }
63
- tmp2 = neon_load_reg(a->vn, 1);
61
+ tcg_gen_shli_i32(tmp, tmp, shift);
64
+ tmp = tcg_temp_new_i32();
62
tcg_gen_add_i32(addr, addr, tmp);
65
+ tmp2 = tcg_temp_new_i32();
63
tcg_temp_free_i32(tmp);
66
+ tmp3 = tcg_temp_new_i32();
64
break;
67
+
68
+ read_neon_element32(tmp, a->vn, 0, MO_32);
69
+ read_neon_element32(tmp2, a->vn, 1, MO_32);
70
fn(tmp, tmp, tmp2);
71
- tcg_temp_free_i32(tmp2);
72
73
- tmp3 = neon_load_reg(a->vm, 0);
74
- tmp2 = neon_load_reg(a->vm, 1);
75
+ read_neon_element32(tmp3, a->vm, 0, MO_32);
76
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
77
fn(tmp3, tmp3, tmp2);
78
- tcg_temp_free_i32(tmp2);
79
80
- neon_store_reg(a->vd, 0, tmp);
81
- neon_store_reg(a->vd, 1, tmp3);
82
+ write_neon_element32(tmp, a->vd, 0, MO_32);
83
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
84
+
85
+ tcg_temp_free_i32(tmp);
86
+ tcg_temp_free_i32(tmp2);
87
+ tcg_temp_free_i32(tmp3);
88
return true;
89
}
90
91
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
92
* 2-reg-and-shift operations, size < 3 case, where the
93
* helper needs to be passed cpu_env.
94
*/
95
- TCGv_i32 constimm;
96
+ TCGv_i32 constimm, tmp;
97
int pass;
98
99
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
100
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
101
* by immediate using the variable shift operations.
102
*/
103
constimm = tcg_const_i32(dup_const(a->size, a->shift));
104
+ tmp = tcg_temp_new_i32();
105
106
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
107
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
108
+ read_neon_element32(tmp, a->vm, pass, MO_32);
109
fn(tmp, cpu_env, tmp, constimm);
110
- neon_store_reg(a->vd, pass, tmp);
111
+ write_neon_element32(tmp, a->vd, pass, MO_32);
112
}
113
+ tcg_temp_free_i32(tmp);
114
tcg_temp_free_i32(constimm);
115
return true;
116
}
117
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
118
constimm = tcg_const_i64(-a->shift);
119
rm1 = tcg_temp_new_i64();
120
rm2 = tcg_temp_new_i64();
121
+ rd = tcg_temp_new_i32();
122
123
/* Load both inputs first to avoid potential overwrite if rm == rd */
124
neon_load_reg64(rm1, a->vm);
125
neon_load_reg64(rm2, a->vm + 1);
126
127
shiftfn(rm1, rm1, constimm);
128
- rd = tcg_temp_new_i32();
129
narrowfn(rd, cpu_env, rm1);
130
- neon_store_reg(a->vd, 0, rd);
131
+ write_neon_element32(rd, a->vd, 0, MO_32);
132
133
shiftfn(rm2, rm2, constimm);
134
- rd = tcg_temp_new_i32();
135
narrowfn(rd, cpu_env, rm2);
136
- neon_store_reg(a->vd, 1, rd);
137
+ write_neon_element32(rd, a->vd, 1, MO_32);
138
139
+ tcg_temp_free_i32(rd);
140
tcg_temp_free_i64(rm1);
141
tcg_temp_free_i64(rm2);
142
tcg_temp_free_i64(constimm);
143
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
144
constimm = tcg_const_i32(imm);
145
146
/* Load all inputs first to avoid potential overwrite */
147
- rm1 = neon_load_reg(a->vm, 0);
148
- rm2 = neon_load_reg(a->vm, 1);
149
- rm3 = neon_load_reg(a->vm + 1, 0);
150
- rm4 = neon_load_reg(a->vm + 1, 1);
151
+ rm1 = tcg_temp_new_i32();
152
+ rm2 = tcg_temp_new_i32();
153
+ rm3 = tcg_temp_new_i32();
154
+ rm4 = tcg_temp_new_i32();
155
+ read_neon_element32(rm1, a->vm, 0, MO_32);
156
+ read_neon_element32(rm2, a->vm, 1, MO_32);
157
+ read_neon_element32(rm3, a->vm, 2, MO_32);
158
+ read_neon_element32(rm4, a->vm, 3, MO_32);
159
rtmp = tcg_temp_new_i64();
160
161
shiftfn(rm1, rm1, constimm);
162
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
163
tcg_temp_free_i32(rm2);
164
165
narrowfn(rm1, cpu_env, rtmp);
166
- neon_store_reg(a->vd, 0, rm1);
167
+ write_neon_element32(rm1, a->vd, 0, MO_32);
168
+ tcg_temp_free_i32(rm1);
169
170
shiftfn(rm3, rm3, constimm);
171
shiftfn(rm4, rm4, constimm);
172
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
173
174
narrowfn(rm3, cpu_env, rtmp);
175
tcg_temp_free_i64(rtmp);
176
- neon_store_reg(a->vd, 1, rm3);
177
+ write_neon_element32(rm3, a->vd, 1, MO_32);
178
+ tcg_temp_free_i32(rm3);
179
return true;
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
183
widen_mask = dup_const(a->size + 1, widen_mask);
184
}
185
186
- rm0 = neon_load_reg(a->vm, 0);
187
- rm1 = neon_load_reg(a->vm, 1);
188
+ rm0 = tcg_temp_new_i32();
189
+ rm1 = tcg_temp_new_i32();
190
+ read_neon_element32(rm0, a->vm, 0, MO_32);
191
+ read_neon_element32(rm1, a->vm, 1, MO_32);
192
tmp = tcg_temp_new_i64();
193
194
widenfn(tmp, rm0);
195
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
196
if (src1_wide) {
197
neon_load_reg64(rn0_64, a->vn);
198
} else {
199
- TCGv_i32 tmp = neon_load_reg(a->vn, 0);
200
+ TCGv_i32 tmp = tcg_temp_new_i32();
201
+ read_neon_element32(tmp, a->vn, 0, MO_32);
202
widenfn(rn0_64, tmp);
203
tcg_temp_free_i32(tmp);
204
}
205
- rm = neon_load_reg(a->vm, 0);
206
+ rm = tcg_temp_new_i32();
207
+ read_neon_element32(rm, a->vm, 0, MO_32);
208
209
widenfn(rm_64, rm);
210
tcg_temp_free_i32(rm);
211
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
212
if (src1_wide) {
213
neon_load_reg64(rn1_64, a->vn + 1);
214
} else {
215
- TCGv_i32 tmp = neon_load_reg(a->vn, 1);
216
+ TCGv_i32 tmp = tcg_temp_new_i32();
217
+ read_neon_element32(tmp, a->vn, 1, MO_32);
218
widenfn(rn1_64, tmp);
219
tcg_temp_free_i32(tmp);
220
}
221
- rm = neon_load_reg(a->vm, 1);
222
+ rm = tcg_temp_new_i32();
223
+ read_neon_element32(rm, a->vm, 1, MO_32);
224
225
neon_store_reg64(rn0_64, a->vd);
226
227
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
228
229
narrowfn(rd1, rn_64);
230
231
- neon_store_reg(a->vd, 0, rd0);
232
- neon_store_reg(a->vd, 1, rd1);
233
+ write_neon_element32(rd0, a->vd, 0, MO_32);
234
+ write_neon_element32(rd1, a->vd, 1, MO_32);
235
236
+ tcg_temp_free_i32(rd0);
237
+ tcg_temp_free_i32(rd1);
238
tcg_temp_free_i64(rn_64);
239
tcg_temp_free_i64(rm_64);
240
241
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
242
rd0 = tcg_temp_new_i64();
243
rd1 = tcg_temp_new_i64();
244
245
- rn = neon_load_reg(a->vn, 0);
246
- rm = neon_load_reg(a->vm, 0);
247
+ rn = tcg_temp_new_i32();
248
+ rm = tcg_temp_new_i32();
249
+ read_neon_element32(rn, a->vn, 0, MO_32);
250
+ read_neon_element32(rm, a->vm, 0, MO_32);
251
opfn(rd0, rn, rm);
252
- tcg_temp_free_i32(rn);
253
- tcg_temp_free_i32(rm);
254
255
- rn = neon_load_reg(a->vn, 1);
256
- rm = neon_load_reg(a->vm, 1);
257
+ read_neon_element32(rn, a->vn, 1, MO_32);
258
+ read_neon_element32(rm, a->vm, 1, MO_32);
259
opfn(rd1, rn, rm);
260
tcg_temp_free_i32(rn);
261
tcg_temp_free_i32(rm);
262
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
263
264
static inline TCGv_i32 neon_get_scalar(int size, int reg)
265
{
266
- TCGv_i32 tmp;
267
- if (size == 1) {
268
- tmp = neon_load_reg(reg & 7, reg >> 4);
269
+ TCGv_i32 tmp = tcg_temp_new_i32();
270
+ if (size == MO_16) {
271
+ read_neon_element32(tmp, reg & 7, reg >> 4, MO_32);
272
if (reg & 8) {
273
gen_neon_dup_high16(tmp);
274
} else {
275
gen_neon_dup_low16(tmp);
276
}
277
} else {
278
- tmp = neon_load_reg(reg & 15, reg >> 4);
279
+ read_neon_element32(tmp, reg & 15, reg >> 4, MO_32);
280
}
281
return tmp;
282
}
283
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
284
* perform an accumulation operation of that result into the
285
* destination.
286
*/
287
- TCGv_i32 scalar;
288
+ TCGv_i32 scalar, tmp;
289
int pass;
290
291
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
292
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
293
}
294
295
scalar = neon_get_scalar(a->size, a->vm);
296
+ tmp = tcg_temp_new_i32();
297
298
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
299
- TCGv_i32 tmp = neon_load_reg(a->vn, pass);
300
+ read_neon_element32(tmp, a->vn, pass, MO_32);
301
opfn(tmp, tmp, scalar);
302
if (accfn) {
303
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
304
+ TCGv_i32 rd = tcg_temp_new_i32();
305
+ read_neon_element32(rd, a->vd, pass, MO_32);
306
accfn(tmp, rd, tmp);
307
tcg_temp_free_i32(rd);
308
}
309
- neon_store_reg(a->vd, pass, tmp);
310
+ write_neon_element32(tmp, a->vd, pass, MO_32);
311
}
312
+ tcg_temp_free_i32(tmp);
313
tcg_temp_free_i32(scalar);
314
return true;
315
}
316
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
317
* performs a kind of fused op-then-accumulate using a helper
318
* function that takes all of rd, rn and the scalar at once.
319
*/
320
- TCGv_i32 scalar;
321
+ TCGv_i32 scalar, rn, rd;
322
int pass;
323
324
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
325
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
326
}
327
328
scalar = neon_get_scalar(a->size, a->vm);
329
+ rn = tcg_temp_new_i32();
330
+ rd = tcg_temp_new_i32();
331
332
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
333
- TCGv_i32 rn = neon_load_reg(a->vn, pass);
334
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
335
+ read_neon_element32(rn, a->vn, pass, MO_32);
336
+ read_neon_element32(rd, a->vd, pass, MO_32);
337
opfn(rd, cpu_env, rn, scalar, rd);
338
- tcg_temp_free_i32(rn);
339
- neon_store_reg(a->vd, pass, rd);
340
+ write_neon_element32(rd, a->vd, pass, MO_32);
341
}
342
+ tcg_temp_free_i32(rn);
343
+ tcg_temp_free_i32(rd);
344
tcg_temp_free_i32(scalar);
345
346
return true;
347
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
348
scalar = neon_get_scalar(a->size, a->vm);
349
350
/* Load all inputs before writing any outputs, in case of overlap */
351
- rn = neon_load_reg(a->vn, 0);
352
+ rn = tcg_temp_new_i32();
353
+ read_neon_element32(rn, a->vn, 0, MO_32);
354
rn0_64 = tcg_temp_new_i64();
355
opfn(rn0_64, rn, scalar);
356
- tcg_temp_free_i32(rn);
357
358
- rn = neon_load_reg(a->vn, 1);
359
+ read_neon_element32(rn, a->vn, 1, MO_32);
360
rn1_64 = tcg_temp_new_i64();
361
opfn(rn1_64, rn, scalar);
362
tcg_temp_free_i32(rn);
363
@@ -XXX,XX +XXX,XX @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
364
return false;
365
}
366
n <<= 3;
367
+ tmp = tcg_temp_new_i32();
368
if (a->op) {
369
- tmp = neon_load_reg(a->vd, 0);
370
+ read_neon_element32(tmp, a->vd, 0, MO_32);
371
} else {
372
- tmp = tcg_temp_new_i32();
373
tcg_gen_movi_i32(tmp, 0);
374
}
375
- tmp2 = neon_load_reg(a->vm, 0);
376
+ tmp2 = tcg_temp_new_i32();
377
+ read_neon_element32(tmp2, a->vm, 0, MO_32);
378
ptr1 = vfp_reg_ptr(true, a->vn);
379
tmp4 = tcg_const_i32(n);
380
gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4);
381
- tcg_temp_free_i32(tmp);
382
+
383
if (a->op) {
384
- tmp = neon_load_reg(a->vd, 1);
385
+ read_neon_element32(tmp, a->vd, 1, MO_32);
386
} else {
387
- tmp = tcg_temp_new_i32();
388
tcg_gen_movi_i32(tmp, 0);
389
}
390
- tmp3 = neon_load_reg(a->vm, 1);
391
+ tmp3 = tcg_temp_new_i32();
392
+ read_neon_element32(tmp3, a->vm, 1, MO_32);
393
gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4);
394
+ tcg_temp_free_i32(tmp);
395
tcg_temp_free_i32(tmp4);
396
tcg_temp_free_ptr(ptr1);
397
- neon_store_reg(a->vd, 0, tmp2);
398
- neon_store_reg(a->vd, 1, tmp3);
399
- tcg_temp_free_i32(tmp);
400
+
401
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
402
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
403
+ tcg_temp_free_i32(tmp2);
404
+ tcg_temp_free_i32(tmp3);
405
return true;
406
}
407
408
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
409
static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
410
{
411
int pass, half;
412
+ TCGv_i32 tmp[2];
413
414
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
415
return false;
416
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
417
return true;
418
}
419
420
- for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
421
- TCGv_i32 tmp[2];
422
+ tmp[0] = tcg_temp_new_i32();
423
+ tmp[1] = tcg_temp_new_i32();
424
425
+ for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
426
for (half = 0; half < 2; half++) {
427
- tmp[half] = neon_load_reg(a->vm, pass * 2 + half);
428
+ read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32);
429
switch (a->size) {
430
case 0:
431
tcg_gen_bswap32_i32(tmp[half], tmp[half]);
432
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
433
g_assert_not_reached();
434
}
435
}
436
- neon_store_reg(a->vd, pass * 2, tmp[1]);
437
- neon_store_reg(a->vd, pass * 2 + 1, tmp[0]);
438
+ write_neon_element32(tmp[1], a->vd, pass * 2, MO_32);
439
+ write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32);
440
}
441
+
442
+ tcg_temp_free_i32(tmp[0]);
443
+ tcg_temp_free_i32(tmp[1]);
444
return true;
445
}
446
447
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
448
rm0_64 = tcg_temp_new_i64();
449
rm1_64 = tcg_temp_new_i64();
450
rd_64 = tcg_temp_new_i64();
451
- tmp = neon_load_reg(a->vm, pass * 2);
452
+
453
+ tmp = tcg_temp_new_i32();
454
+ read_neon_element32(tmp, a->vm, pass * 2, MO_32);
455
widenfn(rm0_64, tmp);
456
- tcg_temp_free_i32(tmp);
457
- tmp = neon_load_reg(a->vm, pass * 2 + 1);
458
+ read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
459
widenfn(rm1_64, tmp);
460
tcg_temp_free_i32(tmp);
461
+
462
opfn(rd_64, rm0_64, rm1_64);
463
tcg_temp_free_i64(rm0_64);
464
tcg_temp_free_i64(rm1_64);
465
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
466
narrowfn(rd0, cpu_env, rm);
467
neon_load_reg64(rm, a->vm + 1);
468
narrowfn(rd1, cpu_env, rm);
469
- neon_store_reg(a->vd, 0, rd0);
470
- neon_store_reg(a->vd, 1, rd1);
471
+ write_neon_element32(rd0, a->vd, 0, MO_32);
472
+ write_neon_element32(rd1, a->vd, 1, MO_32);
473
+ tcg_temp_free_i32(rd0);
474
+ tcg_temp_free_i32(rd1);
475
tcg_temp_free_i64(rm);
476
return true;
477
}
478
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
479
}
480
481
rd = tcg_temp_new_i64();
482
+ rm0 = tcg_temp_new_i32();
483
+ rm1 = tcg_temp_new_i32();
484
485
- rm0 = neon_load_reg(a->vm, 0);
486
- rm1 = neon_load_reg(a->vm, 1);
487
+ read_neon_element32(rm0, a->vm, 0, MO_32);
488
+ read_neon_element32(rm1, a->vm, 1, MO_32);
489
490
widenfn(rd, rm0);
491
tcg_gen_shli_i64(rd, rd, 8 << a->size);
492
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
493
494
fpst = fpstatus_ptr(FPST_STD);
495
ahp = get_ahp_flag();
496
- tmp = neon_load_reg(a->vm, 0);
497
+ tmp = tcg_temp_new_i32();
498
+ read_neon_element32(tmp, a->vm, 0, MO_32);
499
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
500
- tmp2 = neon_load_reg(a->vm, 1);
501
+ tmp2 = tcg_temp_new_i32();
502
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
503
gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
504
tcg_gen_shli_i32(tmp2, tmp2, 16);
505
tcg_gen_or_i32(tmp2, tmp2, tmp);
506
- tcg_temp_free_i32(tmp);
507
- tmp = neon_load_reg(a->vm, 2);
508
+ read_neon_element32(tmp, a->vm, 2, MO_32);
509
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
510
- tmp3 = neon_load_reg(a->vm, 3);
511
- neon_store_reg(a->vd, 0, tmp2);
512
+ tmp3 = tcg_temp_new_i32();
513
+ read_neon_element32(tmp3, a->vm, 3, MO_32);
514
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
515
+ tcg_temp_free_i32(tmp2);
516
gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
517
tcg_gen_shli_i32(tmp3, tmp3, 16);
518
tcg_gen_or_i32(tmp3, tmp3, tmp);
519
- neon_store_reg(a->vd, 1, tmp3);
520
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
521
+ tcg_temp_free_i32(tmp3);
522
tcg_temp_free_i32(tmp);
523
tcg_temp_free_i32(ahp);
524
tcg_temp_free_ptr(fpst);
525
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
526
fpst = fpstatus_ptr(FPST_STD);
527
ahp = get_ahp_flag();
528
tmp3 = tcg_temp_new_i32();
529
- tmp = neon_load_reg(a->vm, 0);
530
- tmp2 = neon_load_reg(a->vm, 1);
531
+ tmp2 = tcg_temp_new_i32();
532
+ tmp = tcg_temp_new_i32();
533
+ read_neon_element32(tmp, a->vm, 0, MO_32);
534
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
535
tcg_gen_ext16u_i32(tmp3, tmp);
536
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
537
- neon_store_reg(a->vd, 0, tmp3);
538
+ write_neon_element32(tmp3, a->vd, 0, MO_32);
539
tcg_gen_shri_i32(tmp, tmp, 16);
540
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
541
- neon_store_reg(a->vd, 1, tmp);
542
- tmp3 = tcg_temp_new_i32();
543
+ write_neon_element32(tmp, a->vd, 1, MO_32);
544
+ tcg_temp_free_i32(tmp);
545
tcg_gen_ext16u_i32(tmp3, tmp2);
546
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
547
- neon_store_reg(a->vd, 2, tmp3);
548
+ write_neon_element32(tmp3, a->vd, 2, MO_32);
549
+ tcg_temp_free_i32(tmp3);
550
tcg_gen_shri_i32(tmp2, tmp2, 16);
551
gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
552
- neon_store_reg(a->vd, 3, tmp2);
553
+ write_neon_element32(tmp2, a->vd, 3, MO_32);
554
+ tcg_temp_free_i32(tmp2);
555
tcg_temp_free_i32(ahp);
556
tcg_temp_free_ptr(fpst);
557
558
@@ -XXX,XX +XXX,XX @@ DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)
559
560
static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
561
{
562
+ TCGv_i32 tmp;
563
int pass;
564
565
/* Handle a 2-reg-misc operation by iterating 32 bits at a time */
566
@@ -XXX,XX +XXX,XX @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
567
return true;
568
}
569
570
+ tmp = tcg_temp_new_i32();
571
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
572
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
573
+ read_neon_element32(tmp, a->vm, pass, MO_32);
574
fn(tmp, tmp);
575
- neon_store_reg(a->vd, pass, tmp);
576
+ write_neon_element32(tmp, a->vd, pass, MO_32);
577
}
578
+ tcg_temp_free_i32(tmp);
579
580
return true;
581
}
582
@@ -XXX,XX +XXX,XX @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a)
583
return true;
584
}
585
586
- if (a->size == 2) {
587
+ tmp = tcg_temp_new_i32();
588
+ tmp2 = tcg_temp_new_i32();
589
+ if (a->size == MO_32) {
590
for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) {
591
- tmp = neon_load_reg(a->vm, pass);
592
- tmp2 = neon_load_reg(a->vd, pass + 1);
593
- neon_store_reg(a->vm, pass, tmp2);
594
- neon_store_reg(a->vd, pass + 1, tmp);
595
+ read_neon_element32(tmp, a->vm, pass, MO_32);
596
+ read_neon_element32(tmp2, a->vd, pass + 1, MO_32);
597
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
598
+ write_neon_element32(tmp, a->vd, pass + 1, MO_32);
599
}
600
} else {
601
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
602
- tmp = neon_load_reg(a->vm, pass);
603
- tmp2 = neon_load_reg(a->vd, pass);
604
- if (a->size == 0) {
605
+ read_neon_element32(tmp, a->vm, pass, MO_32);
606
+ read_neon_element32(tmp2, a->vd, pass, MO_32);
607
+ if (a->size == MO_8) {
608
gen_neon_trn_u8(tmp, tmp2);
609
} else {
610
gen_neon_trn_u16(tmp, tmp2);
611
}
612
- neon_store_reg(a->vm, pass, tmp2);
613
- neon_store_reg(a->vd, pass, tmp);
614
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
615
+ write_neon_element32(tmp, a->vd, pass, MO_32);
616
}
617
}
618
+ tcg_temp_free_i32(tmp);
619
+ tcg_temp_free_i32(tmp2);
620
return true;
621
}
65
--
622
--
66
2.20.1
623
2.20.1
67
624
68
625
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
All of the inputs to these instructions are 32-bits. Rather than
3
We can then use this to improve VMOV (scalar to gp) and
4
extend each input to 64-bits and then extract the high 32-bits of
4
VMOV (gp to scalar) so that we simply perform the memory
5
the output, use tcg_gen_muls2_i32 and other 32-bit generator functions.
5
operation that we wanted, rather than inserting or
6
extracting from a 32-bit quantity.
7
8
These were the last uses of neon_load/store_reg, so remove them.
6
9
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190808202616.13782-7-richard.henderson@linaro.org
11
Message-id: 20201030022618.785675-7-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
target/arm/translate.c | 72 +++++++++++++++---------------------------
15
target/arm/translate.c | 50 +++++++++++++-----------
13
1 file changed, 26 insertions(+), 46 deletions(-)
16
target/arm/translate-vfp.c.inc | 71 +++++-----------------------------
17
2 files changed, 37 insertions(+), 84 deletions(-)
14
18
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.c
21
--- a/target/arm/translate.c
18
+++ b/target/arm/translate.c
22
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_revsh(TCGv_i32 var)
23
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
20
tcg_gen_ext16s_i32(var, var);
24
* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
21
}
25
* where 0 is the least significant end of the register.
22
26
*/
23
-/* Return (b << 32) + a. Mark inputs as dead */
27
-static long neon_element_offset(int reg, int element, MemOp size)
24
-static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
28
+static long neon_element_offset(int reg, int element, MemOp memop)
29
{
30
- int element_size = 1 << size;
31
+ int element_size = 1 << (memop & MO_SIZE);
32
int ofs = element * element_size;
33
#ifdef HOST_WORDS_BIGENDIAN
34
/*
35
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
36
}
37
}
38
39
-static TCGv_i32 neon_load_reg(int reg, int pass)
25
-{
40
-{
26
- TCGv_i64 tmp64 = tcg_temp_new_i64();
41
- TCGv_i32 tmp = tcg_temp_new_i32();
27
-
42
- tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
28
- tcg_gen_extu_i32_i64(tmp64, b);
43
- return tmp;
29
- tcg_temp_free_i32(b);
30
- tcg_gen_shli_i64(tmp64, tmp64, 32);
31
- tcg_gen_add_i64(a, tmp64, a);
32
-
33
- tcg_temp_free_i64(tmp64);
34
- return a;
35
-}
44
-}
36
-
45
-
37
-/* Return (b << 32) - a. Mark inputs as dead. */
46
-static void neon_store_reg(int reg, int pass, TCGv_i32 var)
38
-static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
39
-{
47
-{
40
- TCGv_i64 tmp64 = tcg_temp_new_i64();
48
- tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
41
-
49
- tcg_temp_free_i32(var);
42
- tcg_gen_extu_i32_i64(tmp64, b);
43
- tcg_temp_free_i32(b);
44
- tcg_gen_shli_i64(tmp64, tmp64, 32);
45
- tcg_gen_sub_i64(a, tmp64, a);
46
-
47
- tcg_temp_free_i64(tmp64);
48
- return a;
49
-}
50
-}
50
-
51
-
51
/* 32x32->64 multiply. Marks inputs as dead. */
52
static inline void neon_load_reg64(TCGv_i64 var, int reg)
52
static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
53
{
53
{
54
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
54
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
55
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
55
(SMMUL, SMMLA, SMMLS) */
56
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
56
tmp = load_reg(s, rm);
57
}
57
tmp2 = load_reg(s, rs);
58
58
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
59
-static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
59
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
60
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
60
61
{
61
if (rd != 15) {
62
- long off = neon_element_offset(reg, ele, size);
62
- tmp = load_reg(s, rd);
63
+ long off = neon_element_offset(reg, ele, memop);
63
+ tmp3 = load_reg(s, rd);
64
64
if (insn & (1 << 6)) {
65
- switch (size) {
65
- tmp64 = gen_subq_msw(tmp64, tmp);
66
- case MO_32:
66
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
67
+ switch (memop) {
67
} else {
68
+ case MO_SB:
68
- tmp64 = gen_addq_msw(tmp64, tmp);
69
+ tcg_gen_ld8s_i32(dest, cpu_env, off);
69
+ tcg_gen_add_i32(tmp, tmp, tmp3);
70
+ break;
70
}
71
+ case MO_UB:
71
+ tcg_temp_free_i32(tmp3);
72
+ tcg_gen_ld8u_i32(dest, cpu_env, off);
72
}
73
+ break;
73
if (insn & (1 << 5)) {
74
+ case MO_SW:
74
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
75
+ tcg_gen_ld16s_i32(dest, cpu_env, off);
75
+ /*
76
+ break;
76
+ * Adding 0x80000000 to the 64-bit quantity
77
+ case MO_UW:
77
+ * means that we have carry in to the high
78
+ tcg_gen_ld16u_i32(dest, cpu_env, off);
78
+ * word when the low word has the high bit set.
79
+ break;
79
+ */
80
+ case MO_UL:
80
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
81
+ case MO_SL:
81
+ tcg_gen_add_i32(tmp, tmp, tmp2);
82
tcg_gen_ld_i32(dest, cpu_env, off);
82
}
83
break;
83
- tcg_gen_shri_i64(tmp64, tmp64, 32);
84
default:
84
- tmp = tcg_temp_new_i32();
85
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
85
- tcg_gen_extrl_i64_i32(tmp, tmp64);
86
}
86
- tcg_temp_free_i64(tmp64);
87
}
87
+ tcg_temp_free_i32(tmp2);
88
88
store_reg(s, rn, tmp);
89
-static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
89
break;
90
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
90
case 0:
91
{
91
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
92
- long off = neon_element_offset(reg, ele, size);
92
}
93
+ long off = neon_element_offset(reg, ele, memop);
93
break;
94
94
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
95
- switch (size) {
95
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
96
+ switch (memop) {
96
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
97
+ case MO_8:
97
if (rs != 15) {
98
+ tcg_gen_st8_i32(src, cpu_env, off);
98
- tmp = load_reg(s, rs);
99
+ break;
99
+ tmp3 = load_reg(s, rs);
100
+ case MO_16:
100
if (insn & (1 << 20)) {
101
+ tcg_gen_st16_i32(src, cpu_env, off);
101
- tmp64 = gen_addq_msw(tmp64, tmp);
102
+ break;
102
+ tcg_gen_add_i32(tmp, tmp, tmp3);
103
case MO_32:
103
} else {
104
tcg_gen_st_i32(src, cpu_env, off);
104
- tmp64 = gen_subq_msw(tmp64, tmp);
105
break;
105
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
106
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
106
}
107
index XXXXXXX..XXXXXXX 100644
107
+ tcg_temp_free_i32(tmp3);
108
--- a/target/arm/translate-vfp.c.inc
108
}
109
+++ b/target/arm/translate-vfp.c.inc
109
if (insn & (1 << 4)) {
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
110
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
111
{
111
+ /*
112
/* VMOV scalar to general purpose register */
112
+ * Adding 0x80000000 to the 64-bit quantity
113
TCGv_i32 tmp;
113
+ * means that we have carry in to the high
114
- int pass;
114
+ * word when the low word has the high bit set.
115
- uint32_t offset;
115
+ */
116
116
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
117
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
117
+ tcg_gen_add_i32(tmp, tmp, tmp2);
118
- if (a->size == 2
118
}
119
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
119
- tcg_gen_shri_i64(tmp64, tmp64, 32);
120
+ if (a->size == MO_32
120
- tmp = tcg_temp_new_i32();
121
? !dc_isar_feature(aa32_fpsp_v2, s)
121
- tcg_gen_extrl_i64_i32(tmp, tmp64);
122
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
122
- tcg_temp_free_i64(tmp64);
123
return false;
123
+ tcg_temp_free_i32(tmp2);
124
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
124
break;
125
return false;
125
case 7: /* Unsigned sum of absolute differences. */
126
}
126
gen_helper_usad8(tmp, tmp, tmp2);
127
128
- offset = a->index << a->size;
129
- pass = extract32(offset, 2, 1);
130
- offset = extract32(offset, 0, 2) * 8;
131
-
132
if (!vfp_access_check(s)) {
133
return true;
134
}
135
136
- tmp = neon_load_reg(a->vn, pass);
137
- switch (a->size) {
138
- case 0:
139
- if (offset) {
140
- tcg_gen_shri_i32(tmp, tmp, offset);
141
- }
142
- if (a->u) {
143
- gen_uxtb(tmp);
144
- } else {
145
- gen_sxtb(tmp);
146
- }
147
- break;
148
- case 1:
149
- if (a->u) {
150
- if (offset) {
151
- tcg_gen_shri_i32(tmp, tmp, 16);
152
- } else {
153
- gen_uxth(tmp);
154
- }
155
- } else {
156
- if (offset) {
157
- tcg_gen_sari_i32(tmp, tmp, 16);
158
- } else {
159
- gen_sxth(tmp);
160
- }
161
- }
162
- break;
163
- case 2:
164
- break;
165
- }
166
+ tmp = tcg_temp_new_i32();
167
+ read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
168
store_reg(s, a->rt, tmp);
169
170
return true;
171
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
172
static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
173
{
174
/* VMOV general purpose register to scalar */
175
- TCGv_i32 tmp, tmp2;
176
- int pass;
177
- uint32_t offset;
178
+ TCGv_i32 tmp;
179
180
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
181
- if (a->size == 2
182
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
183
+ if (a->size == MO_32
184
? !dc_isar_feature(aa32_fpsp_v2, s)
185
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
186
return false;
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
188
return false;
189
}
190
191
- offset = a->index << a->size;
192
- pass = extract32(offset, 2, 1);
193
- offset = extract32(offset, 0, 2) * 8;
194
-
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
199
tmp = load_reg(s, a->rt);
200
- switch (a->size) {
201
- case 0:
202
- tmp2 = neon_load_reg(a->vn, pass);
203
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
204
- tcg_temp_free_i32(tmp2);
205
- break;
206
- case 1:
207
- tmp2 = neon_load_reg(a->vn, pass);
208
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
209
- tcg_temp_free_i32(tmp2);
210
- break;
211
- case 2:
212
- break;
213
- }
214
- neon_store_reg(a->vn, pass, tmp);
215
+ write_neon_element32(tmp, a->vn, a->index, a->size);
216
+ tcg_temp_free_i32(tmp);
217
218
return true;
219
}
127
--
220
--
128
2.20.1
221
2.20.1
129
222
130
223
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Use deposit as the composit operation to merge the
3
The only uses of this function are for loading VFP
4
bits from the two inputs.
4
single-precision values, and nothing to do with NEON.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-3-richard.henderson@linaro.org
7
Message-id: 20201030022618.785675-8-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/translate.c | 26 ++++++++++----------------
11
target/arm/translate.c | 4 +-
12
1 file changed, 10 insertions(+), 16 deletions(-)
12
target/arm/translate-vfp.c.inc | 184 ++++++++++++++++-----------------
13
2 files changed, 94 insertions(+), 94 deletions(-)
13
14
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
19
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
19
shift = (insn >> 7) & 0x1f;
20
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
20
if (insn & (1 << 6)) {
21
}
21
/* pkhtb */
22
22
- if (shift == 0)
23
-static inline void neon_load_reg32(TCGv_i32 var, int reg)
23
+ if (shift == 0) {
24
+static inline void vfp_load_reg32(TCGv_i32 var, int reg)
24
shift = 31;
25
{
25
+ }
26
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
26
tcg_gen_sari_i32(tmp2, tmp2, shift);
27
}
27
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
28
28
- tcg_gen_ext16u_i32(tmp2, tmp2);
29
-static inline void neon_store_reg32(TCGv_i32 var, int reg)
29
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
30
+static inline void vfp_store_reg32(TCGv_i32 var, int reg)
30
} else {
31
{
31
/* pkhbt */
32
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
32
- if (shift)
33
}
33
- tcg_gen_shli_i32(tmp2, tmp2, shift);
34
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
34
- tcg_gen_ext16u_i32(tmp, tmp);
35
index XXXXXXX..XXXXXXX 100644
35
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
36
--- a/target/arm/translate-vfp.c.inc
36
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
37
+++ b/target/arm/translate-vfp.c.inc
37
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
38
}
39
frn = tcg_temp_new_i32();
39
- tcg_gen_or_i32(tmp, tmp, tmp2);
40
frm = tcg_temp_new_i32();
40
tcg_temp_free_i32(tmp2);
41
dest = tcg_temp_new_i32();
41
store_reg(s, rd, tmp);
42
- neon_load_reg32(frn, rn);
42
} else if ((insn & 0x00200020) == 0x00200000) {
43
- neon_load_reg32(frm, rm);
43
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
44
+ vfp_load_reg32(frn, rn);
44
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
45
+ vfp_load_reg32(frm, rm);
45
if (insn & (1 << 5)) {
46
switch (a->cc) {
46
/* pkhtb */
47
case 0: /* eq: Z */
47
- if (shift == 0)
48
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
48
+ if (shift == 0) {
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
49
shift = 31;
50
if (sz == 1) {
50
+ }
51
tcg_gen_andi_i32(dest, dest, 0xffff);
51
tcg_gen_sari_i32(tmp2, tmp2, shift);
52
}
52
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
53
- neon_store_reg32(dest, rd);
53
- tcg_gen_ext16u_i32(tmp2, tmp2);
54
+ vfp_store_reg32(dest, rd);
54
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
55
tcg_temp_free_i32(frn);
55
} else {
56
tcg_temp_free_i32(frm);
56
/* pkhbt */
57
tcg_temp_free_i32(dest);
57
- if (shift)
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
58
- tcg_gen_shli_i32(tmp2, tmp2, shift);
59
TCGv_i32 tcg_res;
59
- tcg_gen_ext16u_i32(tmp, tmp);
60
tcg_op = tcg_temp_new_i32();
60
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
61
tcg_res = tcg_temp_new_i32();
61
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
62
- neon_load_reg32(tcg_op, rm);
62
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
63
+ vfp_load_reg32(tcg_op, rm);
64
if (sz == 1) {
65
gen_helper_rinth(tcg_res, tcg_op, fpst);
66
} else {
67
gen_helper_rints(tcg_res, tcg_op, fpst);
68
}
69
- neon_store_reg32(tcg_res, rd);
70
+ vfp_store_reg32(tcg_res, rd);
71
tcg_temp_free_i32(tcg_op);
72
tcg_temp_free_i32(tcg_res);
73
}
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
76
}
77
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
78
- neon_store_reg32(tcg_tmp, rd);
79
+ vfp_store_reg32(tcg_tmp, rd);
80
tcg_temp_free_i32(tcg_tmp);
81
tcg_temp_free_i64(tcg_res);
82
tcg_temp_free_i64(tcg_double);
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
84
TCGv_i32 tcg_single, tcg_res;
85
tcg_single = tcg_temp_new_i32();
86
tcg_res = tcg_temp_new_i32();
87
- neon_load_reg32(tcg_single, rm);
88
+ vfp_load_reg32(tcg_single, rm);
89
if (sz == 1) {
90
if (is_signed) {
91
gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
93
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
63
}
94
}
64
- tcg_gen_or_i32(tmp, tmp, tmp2);
95
}
65
tcg_temp_free_i32(tmp2);
96
- neon_store_reg32(tcg_res, rd);
66
store_reg(s, rd, tmp);
97
+ vfp_store_reg32(tcg_res, rd);
98
tcg_temp_free_i32(tcg_res);
99
tcg_temp_free_i32(tcg_single);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
102
if (a->l) {
103
/* VFP to general purpose register */
104
tmp = tcg_temp_new_i32();
105
- neon_load_reg32(tmp, a->vn);
106
+ vfp_load_reg32(tmp, a->vn);
107
tcg_gen_andi_i32(tmp, tmp, 0xffff);
108
store_reg(s, a->rt, tmp);
109
} else {
110
/* general purpose register to VFP */
111
tmp = load_reg(s, a->rt);
112
tcg_gen_andi_i32(tmp, tmp, 0xffff);
113
- neon_store_reg32(tmp, a->vn);
114
+ vfp_store_reg32(tmp, a->vn);
115
tcg_temp_free_i32(tmp);
116
}
117
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
119
if (a->l) {
120
/* VFP to general purpose register */
121
tmp = tcg_temp_new_i32();
122
- neon_load_reg32(tmp, a->vn);
123
+ vfp_load_reg32(tmp, a->vn);
124
if (a->rt == 15) {
125
/* Set the 4 flag bits in the CPSR. */
126
gen_set_nzcv(tmp);
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
128
} else {
129
/* general purpose register to VFP */
130
tmp = load_reg(s, a->rt);
131
- neon_store_reg32(tmp, a->vn);
132
+ vfp_store_reg32(tmp, a->vn);
133
tcg_temp_free_i32(tmp);
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
137
if (a->op) {
138
/* fpreg to gpreg */
139
tmp = tcg_temp_new_i32();
140
- neon_load_reg32(tmp, a->vm);
141
+ vfp_load_reg32(tmp, a->vm);
142
store_reg(s, a->rt, tmp);
143
tmp = tcg_temp_new_i32();
144
- neon_load_reg32(tmp, a->vm + 1);
145
+ vfp_load_reg32(tmp, a->vm + 1);
146
store_reg(s, a->rt2, tmp);
147
} else {
148
/* gpreg to fpreg */
149
tmp = load_reg(s, a->rt);
150
- neon_store_reg32(tmp, a->vm);
151
+ vfp_store_reg32(tmp, a->vm);
152
tcg_temp_free_i32(tmp);
153
tmp = load_reg(s, a->rt2);
154
- neon_store_reg32(tmp, a->vm + 1);
155
+ vfp_store_reg32(tmp, a->vm + 1);
156
tcg_temp_free_i32(tmp);
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
160
if (a->op) {
161
/* fpreg to gpreg */
162
tmp = tcg_temp_new_i32();
163
- neon_load_reg32(tmp, a->vm * 2);
164
+ vfp_load_reg32(tmp, a->vm * 2);
165
store_reg(s, a->rt, tmp);
166
tmp = tcg_temp_new_i32();
167
- neon_load_reg32(tmp, a->vm * 2 + 1);
168
+ vfp_load_reg32(tmp, a->vm * 2 + 1);
169
store_reg(s, a->rt2, tmp);
170
} else {
171
/* gpreg to fpreg */
172
tmp = load_reg(s, a->rt);
173
- neon_store_reg32(tmp, a->vm * 2);
174
+ vfp_store_reg32(tmp, a->vm * 2);
175
tcg_temp_free_i32(tmp);
176
tmp = load_reg(s, a->rt2);
177
- neon_store_reg32(tmp, a->vm * 2 + 1);
178
+ vfp_store_reg32(tmp, a->vm * 2 + 1);
179
tcg_temp_free_i32(tmp);
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
183
tmp = tcg_temp_new_i32();
184
if (a->l) {
185
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
186
- neon_store_reg32(tmp, a->vd);
187
+ vfp_store_reg32(tmp, a->vd);
188
} else {
189
- neon_load_reg32(tmp, a->vd);
190
+ vfp_load_reg32(tmp, a->vd);
191
gen_aa32_st16(s, tmp, addr, get_mem_index(s));
192
}
193
tcg_temp_free_i32(tmp);
194
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
195
tmp = tcg_temp_new_i32();
196
if (a->l) {
197
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
198
- neon_store_reg32(tmp, a->vd);
199
+ vfp_store_reg32(tmp, a->vd);
200
} else {
201
- neon_load_reg32(tmp, a->vd);
202
+ vfp_load_reg32(tmp, a->vd);
203
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
204
}
205
tcg_temp_free_i32(tmp);
206
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
207
if (a->l) {
208
/* load */
209
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
210
- neon_store_reg32(tmp, a->vd + i);
211
+ vfp_store_reg32(tmp, a->vd + i);
67
} else {
212
} else {
213
/* store */
214
- neon_load_reg32(tmp, a->vd + i);
215
+ vfp_load_reg32(tmp, a->vd + i);
216
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
217
}
218
tcg_gen_addi_i32(addr, addr, offset);
219
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
220
fd = tcg_temp_new_i32();
221
fpst = fpstatus_ptr(FPST_FPCR);
222
223
- neon_load_reg32(f0, vn);
224
- neon_load_reg32(f1, vm);
225
+ vfp_load_reg32(f0, vn);
226
+ vfp_load_reg32(f1, vm);
227
228
for (;;) {
229
if (reads_vd) {
230
- neon_load_reg32(fd, vd);
231
+ vfp_load_reg32(fd, vd);
232
}
233
fn(fd, f0, f1, fpst);
234
- neon_store_reg32(fd, vd);
235
+ vfp_store_reg32(fd, vd);
236
237
if (veclen == 0) {
238
break;
239
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
240
veclen--;
241
vd = vfp_advance_sreg(vd, delta_d);
242
vn = vfp_advance_sreg(vn, delta_d);
243
- neon_load_reg32(f0, vn);
244
+ vfp_load_reg32(f0, vn);
245
if (delta_m) {
246
vm = vfp_advance_sreg(vm, delta_m);
247
- neon_load_reg32(f1, vm);
248
+ vfp_load_reg32(f1, vm);
249
}
250
}
251
252
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
253
fd = tcg_temp_new_i32();
254
fpst = fpstatus_ptr(FPST_FPCR_F16);
255
256
- neon_load_reg32(f0, vn);
257
- neon_load_reg32(f1, vm);
258
+ vfp_load_reg32(f0, vn);
259
+ vfp_load_reg32(f1, vm);
260
261
if (reads_vd) {
262
- neon_load_reg32(fd, vd);
263
+ vfp_load_reg32(fd, vd);
264
}
265
fn(fd, f0, f1, fpst);
266
- neon_store_reg32(fd, vd);
267
+ vfp_store_reg32(fd, vd);
268
269
tcg_temp_free_i32(f0);
270
tcg_temp_free_i32(f1);
271
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
272
f0 = tcg_temp_new_i32();
273
fd = tcg_temp_new_i32();
274
275
- neon_load_reg32(f0, vm);
276
+ vfp_load_reg32(f0, vm);
277
278
for (;;) {
279
fn(fd, f0);
280
- neon_store_reg32(fd, vd);
281
+ vfp_store_reg32(fd, vd);
282
283
if (veclen == 0) {
284
break;
285
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
286
/* single source one-many */
287
while (veclen--) {
288
vd = vfp_advance_sreg(vd, delta_d);
289
- neon_store_reg32(fd, vd);
290
+ vfp_store_reg32(fd, vd);
291
}
292
break;
293
}
294
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
295
veclen--;
296
vd = vfp_advance_sreg(vd, delta_d);
297
vm = vfp_advance_sreg(vm, delta_m);
298
- neon_load_reg32(f0, vm);
299
+ vfp_load_reg32(f0, vm);
300
}
301
302
tcg_temp_free_i32(f0);
303
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
304
}
305
306
f0 = tcg_temp_new_i32();
307
- neon_load_reg32(f0, vm);
308
+ vfp_load_reg32(f0, vm);
309
fn(f0, f0);
310
- neon_store_reg32(f0, vd);
311
+ vfp_store_reg32(f0, vd);
312
tcg_temp_free_i32(f0);
313
314
return true;
315
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
316
vm = tcg_temp_new_i32();
317
vd = tcg_temp_new_i32();
318
319
- neon_load_reg32(vn, a->vn);
320
- neon_load_reg32(vm, a->vm);
321
+ vfp_load_reg32(vn, a->vn);
322
+ vfp_load_reg32(vm, a->vm);
323
if (neg_n) {
324
/* VFNMS, VFMS */
325
gen_helper_vfp_negh(vn, vn);
326
}
327
- neon_load_reg32(vd, a->vd);
328
+ vfp_load_reg32(vd, a->vd);
329
if (neg_d) {
330
/* VFNMA, VFNMS */
331
gen_helper_vfp_negh(vd, vd);
332
}
333
fpst = fpstatus_ptr(FPST_FPCR_F16);
334
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
335
- neon_store_reg32(vd, a->vd);
336
+ vfp_store_reg32(vd, a->vd);
337
338
tcg_temp_free_ptr(fpst);
339
tcg_temp_free_i32(vn);
340
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
341
vm = tcg_temp_new_i32();
342
vd = tcg_temp_new_i32();
343
344
- neon_load_reg32(vn, a->vn);
345
- neon_load_reg32(vm, a->vm);
346
+ vfp_load_reg32(vn, a->vn);
347
+ vfp_load_reg32(vm, a->vm);
348
if (neg_n) {
349
/* VFNMS, VFMS */
350
gen_helper_vfp_negs(vn, vn);
351
}
352
- neon_load_reg32(vd, a->vd);
353
+ vfp_load_reg32(vd, a->vd);
354
if (neg_d) {
355
/* VFNMA, VFNMS */
356
gen_helper_vfp_negs(vd, vd);
357
}
358
fpst = fpstatus_ptr(FPST_FPCR);
359
gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
360
- neon_store_reg32(vd, a->vd);
361
+ vfp_store_reg32(vd, a->vd);
362
363
tcg_temp_free_ptr(fpst);
364
tcg_temp_free_i32(vn);
365
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
366
}
367
368
fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
369
- neon_store_reg32(fd, a->vd);
370
+ vfp_store_reg32(fd, a->vd);
371
tcg_temp_free_i32(fd);
372
return true;
373
}
374
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
375
fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
376
377
for (;;) {
378
- neon_store_reg32(fd, vd);
379
+ vfp_store_reg32(fd, vd);
380
381
if (veclen == 0) {
382
break;
383
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
384
vd = tcg_temp_new_i32();
385
vm = tcg_temp_new_i32();
386
387
- neon_load_reg32(vd, a->vd);
388
+ vfp_load_reg32(vd, a->vd);
389
if (a->z) {
390
tcg_gen_movi_i32(vm, 0);
391
} else {
392
- neon_load_reg32(vm, a->vm);
393
+ vfp_load_reg32(vm, a->vm);
394
}
395
396
if (a->e) {
397
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
398
vd = tcg_temp_new_i32();
399
vm = tcg_temp_new_i32();
400
401
- neon_load_reg32(vd, a->vd);
402
+ vfp_load_reg32(vd, a->vd);
403
if (a->z) {
404
tcg_gen_movi_i32(vm, 0);
405
} else {
406
- neon_load_reg32(vm, a->vm);
407
+ vfp_load_reg32(vm, a->vm);
408
}
409
410
if (a->e) {
411
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
412
/* The T bit tells us if we want the low or high 16 bits of Vm */
413
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
414
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
415
- neon_store_reg32(tmp, a->vd);
416
+ vfp_store_reg32(tmp, a->vd);
417
tcg_temp_free_i32(ahp_mode);
418
tcg_temp_free_ptr(fpst);
419
tcg_temp_free_i32(tmp);
420
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
421
ahp_mode = get_ahp_flag();
422
tmp = tcg_temp_new_i32();
423
424
- neon_load_reg32(tmp, a->vm);
425
+ vfp_load_reg32(tmp, a->vm);
426
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
427
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
428
tcg_temp_free_i32(ahp_mode);
429
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
430
}
431
432
tmp = tcg_temp_new_i32();
433
- neon_load_reg32(tmp, a->vm);
434
+ vfp_load_reg32(tmp, a->vm);
435
fpst = fpstatus_ptr(FPST_FPCR_F16);
436
gen_helper_rinth(tmp, tmp, fpst);
437
- neon_store_reg32(tmp, a->vd);
438
+ vfp_store_reg32(tmp, a->vd);
439
tcg_temp_free_ptr(fpst);
440
tcg_temp_free_i32(tmp);
441
return true;
442
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
443
}
444
445
tmp = tcg_temp_new_i32();
446
- neon_load_reg32(tmp, a->vm);
447
+ vfp_load_reg32(tmp, a->vm);
448
fpst = fpstatus_ptr(FPST_FPCR);
449
gen_helper_rints(tmp, tmp, fpst);
450
- neon_store_reg32(tmp, a->vd);
451
+ vfp_store_reg32(tmp, a->vd);
452
tcg_temp_free_ptr(fpst);
453
tcg_temp_free_i32(tmp);
454
return true;
455
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
456
}
457
458
tmp = tcg_temp_new_i32();
459
- neon_load_reg32(tmp, a->vm);
460
+ vfp_load_reg32(tmp, a->vm);
461
fpst = fpstatus_ptr(FPST_FPCR_F16);
462
tcg_rmode = tcg_const_i32(float_round_to_zero);
463
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
464
gen_helper_rinth(tmp, tmp, fpst);
465
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
466
- neon_store_reg32(tmp, a->vd);
467
+ vfp_store_reg32(tmp, a->vd);
468
tcg_temp_free_ptr(fpst);
469
tcg_temp_free_i32(tcg_rmode);
470
tcg_temp_free_i32(tmp);
471
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
472
}
473
474
tmp = tcg_temp_new_i32();
475
- neon_load_reg32(tmp, a->vm);
476
+ vfp_load_reg32(tmp, a->vm);
477
fpst = fpstatus_ptr(FPST_FPCR);
478
tcg_rmode = tcg_const_i32(float_round_to_zero);
479
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
480
gen_helper_rints(tmp, tmp, fpst);
481
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
482
- neon_store_reg32(tmp, a->vd);
483
+ vfp_store_reg32(tmp, a->vd);
484
tcg_temp_free_ptr(fpst);
485
tcg_temp_free_i32(tcg_rmode);
486
tcg_temp_free_i32(tmp);
487
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
488
}
489
490
tmp = tcg_temp_new_i32();
491
- neon_load_reg32(tmp, a->vm);
492
+ vfp_load_reg32(tmp, a->vm);
493
fpst = fpstatus_ptr(FPST_FPCR_F16);
494
gen_helper_rinth_exact(tmp, tmp, fpst);
495
- neon_store_reg32(tmp, a->vd);
496
+ vfp_store_reg32(tmp, a->vd);
497
tcg_temp_free_ptr(fpst);
498
tcg_temp_free_i32(tmp);
499
return true;
500
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
501
}
502
503
tmp = tcg_temp_new_i32();
504
- neon_load_reg32(tmp, a->vm);
505
+ vfp_load_reg32(tmp, a->vm);
506
fpst = fpstatus_ptr(FPST_FPCR);
507
gen_helper_rints_exact(tmp, tmp, fpst);
508
- neon_store_reg32(tmp, a->vd);
509
+ vfp_store_reg32(tmp, a->vd);
510
tcg_temp_free_ptr(fpst);
511
tcg_temp_free_i32(tmp);
512
return true;
513
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
514
515
vm = tcg_temp_new_i32();
516
vd = tcg_temp_new_i64();
517
- neon_load_reg32(vm, a->vm);
518
+ vfp_load_reg32(vm, a->vm);
519
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
520
neon_store_reg64(vd, a->vd);
521
tcg_temp_free_i32(vm);
522
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
523
vm = tcg_temp_new_i64();
524
neon_load_reg64(vm, a->vm);
525
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
526
- neon_store_reg32(vd, a->vd);
527
+ vfp_store_reg32(vd, a->vd);
528
tcg_temp_free_i32(vd);
529
tcg_temp_free_i64(vm);
530
return true;
531
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
532
}
533
534
vm = tcg_temp_new_i32();
535
- neon_load_reg32(vm, a->vm);
536
+ vfp_load_reg32(vm, a->vm);
537
fpst = fpstatus_ptr(FPST_FPCR_F16);
538
if (a->s) {
539
/* i32 -> f16 */
540
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
541
/* u32 -> f16 */
542
gen_helper_vfp_uitoh(vm, vm, fpst);
543
}
544
- neon_store_reg32(vm, a->vd);
545
+ vfp_store_reg32(vm, a->vd);
546
tcg_temp_free_i32(vm);
547
tcg_temp_free_ptr(fpst);
548
return true;
549
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
550
}
551
552
vm = tcg_temp_new_i32();
553
- neon_load_reg32(vm, a->vm);
554
+ vfp_load_reg32(vm, a->vm);
555
fpst = fpstatus_ptr(FPST_FPCR);
556
if (a->s) {
557
/* i32 -> f32 */
558
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
559
/* u32 -> f32 */
560
gen_helper_vfp_uitos(vm, vm, fpst);
561
}
562
- neon_store_reg32(vm, a->vd);
563
+ vfp_store_reg32(vm, a->vd);
564
tcg_temp_free_i32(vm);
565
tcg_temp_free_ptr(fpst);
566
return true;
567
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
568
569
vm = tcg_temp_new_i32();
570
vd = tcg_temp_new_i64();
571
- neon_load_reg32(vm, a->vm);
572
+ vfp_load_reg32(vm, a->vm);
573
fpst = fpstatus_ptr(FPST_FPCR);
574
if (a->s) {
575
/* i32 -> f64 */
576
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
577
vd = tcg_temp_new_i32();
578
neon_load_reg64(vm, a->vm);
579
gen_helper_vjcvt(vd, vm, cpu_env);
580
- neon_store_reg32(vd, a->vd);
581
+ vfp_store_reg32(vd, a->vd);
582
tcg_temp_free_i64(vm);
583
tcg_temp_free_i32(vd);
584
return true;
585
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
586
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
587
588
vd = tcg_temp_new_i32();
589
- neon_load_reg32(vd, a->vd);
590
+ vfp_load_reg32(vd, a->vd);
591
592
fpst = fpstatus_ptr(FPST_FPCR_F16);
593
shift = tcg_const_i32(frac_bits);
594
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
595
g_assert_not_reached();
596
}
597
598
- neon_store_reg32(vd, a->vd);
599
+ vfp_store_reg32(vd, a->vd);
600
tcg_temp_free_i32(vd);
601
tcg_temp_free_i32(shift);
602
tcg_temp_free_ptr(fpst);
603
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
604
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
605
606
vd = tcg_temp_new_i32();
607
- neon_load_reg32(vd, a->vd);
608
+ vfp_load_reg32(vd, a->vd);
609
610
fpst = fpstatus_ptr(FPST_FPCR);
611
shift = tcg_const_i32(frac_bits);
612
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
613
g_assert_not_reached();
614
}
615
616
- neon_store_reg32(vd, a->vd);
617
+ vfp_store_reg32(vd, a->vd);
618
tcg_temp_free_i32(vd);
619
tcg_temp_free_i32(shift);
620
tcg_temp_free_ptr(fpst);
621
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
622
623
fpst = fpstatus_ptr(FPST_FPCR_F16);
624
vm = tcg_temp_new_i32();
625
- neon_load_reg32(vm, a->vm);
626
+ vfp_load_reg32(vm, a->vm);
627
628
if (a->s) {
629
if (a->rz) {
630
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
631
gen_helper_vfp_touih(vm, vm, fpst);
632
}
633
}
634
- neon_store_reg32(vm, a->vd);
635
+ vfp_store_reg32(vm, a->vd);
636
tcg_temp_free_i32(vm);
637
tcg_temp_free_ptr(fpst);
638
return true;
639
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
640
641
fpst = fpstatus_ptr(FPST_FPCR);
642
vm = tcg_temp_new_i32();
643
- neon_load_reg32(vm, a->vm);
644
+ vfp_load_reg32(vm, a->vm);
645
646
if (a->s) {
647
if (a->rz) {
648
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
649
gen_helper_vfp_touis(vm, vm, fpst);
650
}
651
}
652
- neon_store_reg32(vm, a->vd);
653
+ vfp_store_reg32(vm, a->vd);
654
tcg_temp_free_i32(vm);
655
tcg_temp_free_ptr(fpst);
656
return true;
657
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
658
gen_helper_vfp_touid(vd, vm, fpst);
659
}
660
}
661
- neon_store_reg32(vd, a->vd);
662
+ vfp_store_reg32(vd, a->vd);
663
tcg_temp_free_i32(vd);
664
tcg_temp_free_i64(vm);
665
tcg_temp_free_ptr(fpst);
666
@@ -XXX,XX +XXX,XX @@ static bool trans_VINS(DisasContext *s, arg_VINS *a)
667
/* Insert low half of Vm into high half of Vd */
668
rm = tcg_temp_new_i32();
669
rd = tcg_temp_new_i32();
670
- neon_load_reg32(rm, a->vm);
671
- neon_load_reg32(rd, a->vd);
672
+ vfp_load_reg32(rm, a->vm);
673
+ vfp_load_reg32(rd, a->vd);
674
tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
675
- neon_store_reg32(rd, a->vd);
676
+ vfp_store_reg32(rd, a->vd);
677
tcg_temp_free_i32(rm);
678
tcg_temp_free_i32(rd);
679
return true;
680
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
681
682
/* Set Vd to high half of Vm */
683
rm = tcg_temp_new_i32();
684
- neon_load_reg32(rm, a->vm);
685
+ vfp_load_reg32(rm, a->vm);
686
tcg_gen_shri_i32(rm, rm, 16);
687
- neon_store_reg32(rm, a->vd);
688
+ vfp_store_reg32(rm, a->vd);
689
tcg_temp_free_i32(rm);
690
return true;
691
}
68
--
692
--
69
2.20.1
693
2.20.1
70
694
71
695
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rotate is the more compact and obvious way to swap 16-bit
3
Replace all uses of neon_load/store_reg64 within translate-neon.c.inc.
4
elements of a 32-bit word.
5
4
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190808202616.13782-6-richard.henderson@linaro.org
6
Message-id: 20201030022618.785675-9-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/translate.c | 6 +-----
10
target/arm/translate.c | 26 +++++++++
12
1 file changed, 1 insertion(+), 5 deletions(-)
11
target/arm/translate-neon.c.inc | 94 ++++++++++++++++-----------------
12
2 files changed, 73 insertions(+), 47 deletions(-)
13
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
18
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
19
/* Swap low and high halfwords. */
19
}
20
static void gen_swap_half(TCGv_i32 var)
20
}
21
22
+static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
23
+{
24
+ long off = neon_element_offset(reg, ele, memop);
25
+
26
+ switch (memop) {
27
+ case MO_Q:
28
+ tcg_gen_ld_i64(dest, cpu_env, off);
29
+ break;
30
+ default:
31
+ g_assert_not_reached();
32
+ }
33
+}
34
+
35
static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
21
{
36
{
22
- TCGv_i32 tmp = tcg_temp_new_i32();
37
long off = neon_element_offset(reg, ele, memop);
23
- tcg_gen_shri_i32(tmp, var, 16);
38
@@ -XXX,XX +XXX,XX @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
24
- tcg_gen_shli_i32(var, var, 16);
39
}
25
- tcg_gen_or_i32(var, var, tmp);
26
- tcg_temp_free_i32(tmp);
27
+ tcg_gen_rotri_i32(var, var, 16);
28
}
40
}
29
41
30
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
42
+static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
43
+{
44
+ long off = neon_element_offset(reg, ele, memop);
45
+
46
+ switch (memop) {
47
+ case MO_64:
48
+ tcg_gen_st_i64(src, cpu_env, off);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
52
+ }
53
+}
54
+
55
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
56
{
57
TCGv_ptr ret = tcg_temp_new_ptr();
58
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/translate-neon.c.inc
61
+++ b/target/arm/translate-neon.c.inc
62
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
63
for (pass = 0; pass < a->q + 1; pass++) {
64
TCGv_i64 tmp = tcg_temp_new_i64();
65
66
- neon_load_reg64(tmp, a->vm + pass);
67
+ read_neon_element64(tmp, a->vm, pass, MO_64);
68
fn(tmp, cpu_env, tmp, constimm);
69
- neon_store_reg64(tmp, a->vd + pass);
70
+ write_neon_element64(tmp, a->vd, pass, MO_64);
71
tcg_temp_free_i64(tmp);
72
}
73
tcg_temp_free_i64(constimm);
74
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
75
rd = tcg_temp_new_i32();
76
77
/* Load both inputs first to avoid potential overwrite if rm == rd */
78
- neon_load_reg64(rm1, a->vm);
79
- neon_load_reg64(rm2, a->vm + 1);
80
+ read_neon_element64(rm1, a->vm, 0, MO_64);
81
+ read_neon_element64(rm2, a->vm, 1, MO_64);
82
83
shiftfn(rm1, rm1, constimm);
84
narrowfn(rd, cpu_env, rm1);
85
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
86
tcg_gen_shli_i64(tmp, tmp, a->shift);
87
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
88
}
89
- neon_store_reg64(tmp, a->vd);
90
+ write_neon_element64(tmp, a->vd, 0, MO_64);
91
92
widenfn(tmp, rm1);
93
tcg_temp_free_i32(rm1);
94
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
95
tcg_gen_shli_i64(tmp, tmp, a->shift);
96
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
97
}
98
- neon_store_reg64(tmp, a->vd + 1);
99
+ write_neon_element64(tmp, a->vd, 1, MO_64);
100
tcg_temp_free_i64(tmp);
101
return true;
102
}
103
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
104
rm_64 = tcg_temp_new_i64();
105
106
if (src1_wide) {
107
- neon_load_reg64(rn0_64, a->vn);
108
+ read_neon_element64(rn0_64, a->vn, 0, MO_64);
109
} else {
110
TCGv_i32 tmp = tcg_temp_new_i32();
111
read_neon_element32(tmp, a->vn, 0, MO_32);
112
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
113
* avoid incorrect results if a narrow input overlaps with the result.
114
*/
115
if (src1_wide) {
116
- neon_load_reg64(rn1_64, a->vn + 1);
117
+ read_neon_element64(rn1_64, a->vn, 1, MO_64);
118
} else {
119
TCGv_i32 tmp = tcg_temp_new_i32();
120
read_neon_element32(tmp, a->vn, 1, MO_32);
121
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
122
rm = tcg_temp_new_i32();
123
read_neon_element32(rm, a->vm, 1, MO_32);
124
125
- neon_store_reg64(rn0_64, a->vd);
126
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
127
128
widenfn(rm_64, rm);
129
tcg_temp_free_i32(rm);
130
opfn(rn1_64, rn1_64, rm_64);
131
- neon_store_reg64(rn1_64, a->vd + 1);
132
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
133
134
tcg_temp_free_i64(rn0_64);
135
tcg_temp_free_i64(rn1_64);
136
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
137
rd0 = tcg_temp_new_i32();
138
rd1 = tcg_temp_new_i32();
139
140
- neon_load_reg64(rn_64, a->vn);
141
- neon_load_reg64(rm_64, a->vm);
142
+ read_neon_element64(rn_64, a->vn, 0, MO_64);
143
+ read_neon_element64(rm_64, a->vm, 0, MO_64);
144
145
opfn(rn_64, rn_64, rm_64);
146
147
narrowfn(rd0, rn_64);
148
149
- neon_load_reg64(rn_64, a->vn + 1);
150
- neon_load_reg64(rm_64, a->vm + 1);
151
+ read_neon_element64(rn_64, a->vn, 1, MO_64);
152
+ read_neon_element64(rm_64, a->vm, 1, MO_64);
153
154
opfn(rn_64, rn_64, rm_64);
155
156
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
157
/* Don't store results until after all loads: they might overlap */
158
if (accfn) {
159
tmp = tcg_temp_new_i64();
160
- neon_load_reg64(tmp, a->vd);
161
+ read_neon_element64(tmp, a->vd, 0, MO_64);
162
accfn(tmp, tmp, rd0);
163
- neon_store_reg64(tmp, a->vd);
164
- neon_load_reg64(tmp, a->vd + 1);
165
+ write_neon_element64(tmp, a->vd, 0, MO_64);
166
+ read_neon_element64(tmp, a->vd, 1, MO_64);
167
accfn(tmp, tmp, rd1);
168
- neon_store_reg64(tmp, a->vd + 1);
169
+ write_neon_element64(tmp, a->vd, 1, MO_64);
170
tcg_temp_free_i64(tmp);
171
} else {
172
- neon_store_reg64(rd0, a->vd);
173
- neon_store_reg64(rd1, a->vd + 1);
174
+ write_neon_element64(rd0, a->vd, 0, MO_64);
175
+ write_neon_element64(rd1, a->vd, 1, MO_64);
176
}
177
178
tcg_temp_free_i64(rd0);
179
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
180
181
if (accfn) {
182
TCGv_i64 t64 = tcg_temp_new_i64();
183
- neon_load_reg64(t64, a->vd);
184
+ read_neon_element64(t64, a->vd, 0, MO_64);
185
accfn(t64, t64, rn0_64);
186
- neon_store_reg64(t64, a->vd);
187
- neon_load_reg64(t64, a->vd + 1);
188
+ write_neon_element64(t64, a->vd, 0, MO_64);
189
+ read_neon_element64(t64, a->vd, 1, MO_64);
190
accfn(t64, t64, rn1_64);
191
- neon_store_reg64(t64, a->vd + 1);
192
+ write_neon_element64(t64, a->vd, 1, MO_64);
193
tcg_temp_free_i64(t64);
194
} else {
195
- neon_store_reg64(rn0_64, a->vd);
196
- neon_store_reg64(rn1_64, a->vd + 1);
197
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
198
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
199
}
200
tcg_temp_free_i64(rn0_64);
201
tcg_temp_free_i64(rn1_64);
202
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
203
right = tcg_temp_new_i64();
204
dest = tcg_temp_new_i64();
205
206
- neon_load_reg64(right, a->vn);
207
- neon_load_reg64(left, a->vm);
208
+ read_neon_element64(right, a->vn, 0, MO_64);
209
+ read_neon_element64(left, a->vm, 0, MO_64);
210
tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
211
- neon_store_reg64(dest, a->vd);
212
+ write_neon_element64(dest, a->vd, 0, MO_64);
213
214
tcg_temp_free_i64(left);
215
tcg_temp_free_i64(right);
216
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
217
destright = tcg_temp_new_i64();
218
219
if (a->imm < 8) {
220
- neon_load_reg64(right, a->vn);
221
- neon_load_reg64(middle, a->vn + 1);
222
+ read_neon_element64(right, a->vn, 0, MO_64);
223
+ read_neon_element64(middle, a->vn, 1, MO_64);
224
tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
225
- neon_load_reg64(left, a->vm);
226
+ read_neon_element64(left, a->vm, 0, MO_64);
227
tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
228
} else {
229
- neon_load_reg64(right, a->vn + 1);
230
- neon_load_reg64(middle, a->vm);
231
+ read_neon_element64(right, a->vn, 1, MO_64);
232
+ read_neon_element64(middle, a->vm, 0, MO_64);
233
tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
234
- neon_load_reg64(left, a->vm + 1);
235
+ read_neon_element64(left, a->vm, 1, MO_64);
236
tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
237
}
238
239
- neon_store_reg64(destright, a->vd);
240
- neon_store_reg64(destleft, a->vd + 1);
241
+ write_neon_element64(destright, a->vd, 0, MO_64);
242
+ write_neon_element64(destleft, a->vd, 1, MO_64);
243
244
tcg_temp_free_i64(destright);
245
tcg_temp_free_i64(destleft);
246
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
247
248
if (accfn) {
249
TCGv_i64 tmp64 = tcg_temp_new_i64();
250
- neon_load_reg64(tmp64, a->vd + pass);
251
+ read_neon_element64(tmp64, a->vd, pass, MO_64);
252
accfn(rd_64, tmp64, rd_64);
253
tcg_temp_free_i64(tmp64);
254
}
255
- neon_store_reg64(rd_64, a->vd + pass);
256
+ write_neon_element64(rd_64, a->vd, pass, MO_64);
257
tcg_temp_free_i64(rd_64);
258
}
259
return true;
260
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
261
rd0 = tcg_temp_new_i32();
262
rd1 = tcg_temp_new_i32();
263
264
- neon_load_reg64(rm, a->vm);
265
+ read_neon_element64(rm, a->vm, 0, MO_64);
266
narrowfn(rd0, cpu_env, rm);
267
- neon_load_reg64(rm, a->vm + 1);
268
+ read_neon_element64(rm, a->vm, 1, MO_64);
269
narrowfn(rd1, cpu_env, rm);
270
write_neon_element32(rd0, a->vd, 0, MO_32);
271
write_neon_element32(rd1, a->vd, 1, MO_32);
272
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
273
274
widenfn(rd, rm0);
275
tcg_gen_shli_i64(rd, rd, 8 << a->size);
276
- neon_store_reg64(rd, a->vd);
277
+ write_neon_element64(rd, a->vd, 0, MO_64);
278
widenfn(rd, rm1);
279
tcg_gen_shli_i64(rd, rd, 8 << a->size);
280
- neon_store_reg64(rd, a->vd + 1);
281
+ write_neon_element64(rd, a->vd, 1, MO_64);
282
283
tcg_temp_free_i64(rd);
284
tcg_temp_free_i32(rm0);
285
@@ -XXX,XX +XXX,XX @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a)
286
rm = tcg_temp_new_i64();
287
rd = tcg_temp_new_i64();
288
for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
289
- neon_load_reg64(rm, a->vm + pass);
290
- neon_load_reg64(rd, a->vd + pass);
291
- neon_store_reg64(rm, a->vd + pass);
292
- neon_store_reg64(rd, a->vm + pass);
293
+ read_neon_element64(rm, a->vm, pass, MO_64);
294
+ read_neon_element64(rd, a->vd, pass, MO_64);
295
+ write_neon_element64(rm, a->vd, pass, MO_64);
296
+ write_neon_element64(rd, a->vm, pass, MO_64);
297
}
298
tcg_temp_free_i64(rm);
299
tcg_temp_free_i64(rd);
31
--
300
--
32
2.20.1
301
2.20.1
33
302
34
303
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Extract is a compact combination of shift + and.
3
The only uses of this function are for loading VFP
4
double-precision values, and nothing to do with NEON.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20190808202616.13782-2-richard.henderson@linaro.org
7
Message-id: 20201030022618.785675-10-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/translate.c | 9 +--------
11
target/arm/translate.c | 8 ++--
11
1 file changed, 1 insertion(+), 8 deletions(-)
12
target/arm/translate-vfp.c.inc | 84 +++++++++++++++++-----------------
13
2 files changed, 46 insertions(+), 46 deletions(-)
12
14
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
19
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
18
20
}
19
static void shifter_out_im(TCGv_i32 var, int shift)
21
}
22
23
-static inline void neon_load_reg64(TCGv_i64 var, int reg)
24
+static inline void vfp_load_reg64(TCGv_i64 var, int reg)
20
{
25
{
21
- if (shift == 0) {
26
- tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
22
- tcg_gen_andi_i32(cpu_CF, var, 1);
27
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
23
- } else {
24
- tcg_gen_shri_i32(cpu_CF, var, shift);
25
- if (shift != 31) {
26
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
27
- }
28
- }
29
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
30
}
28
}
31
29
32
/* Shift by immediate. Includes special handling for shift == 0. */
30
-static inline void neon_store_reg64(TCGv_i64 var, int reg)
31
+static inline void vfp_store_reg64(TCGv_i64 var, int reg)
32
{
33
- tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
34
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
35
}
36
37
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
38
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/translate-vfp.c.inc
41
+++ b/target/arm/translate-vfp.c.inc
42
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
43
tcg_gen_ext_i32_i64(nf, cpu_NF);
44
tcg_gen_ext_i32_i64(vf, cpu_VF);
45
46
- neon_load_reg64(frn, rn);
47
- neon_load_reg64(frm, rm);
48
+ vfp_load_reg64(frn, rn);
49
+ vfp_load_reg64(frm, rm);
50
switch (a->cc) {
51
case 0: /* eq: Z */
52
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
53
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
54
tcg_temp_free_i64(tmp);
55
break;
56
}
57
- neon_store_reg64(dest, rd);
58
+ vfp_store_reg64(dest, rd);
59
tcg_temp_free_i64(frn);
60
tcg_temp_free_i64(frm);
61
tcg_temp_free_i64(dest);
62
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
63
TCGv_i64 tcg_res;
64
tcg_op = tcg_temp_new_i64();
65
tcg_res = tcg_temp_new_i64();
66
- neon_load_reg64(tcg_op, rm);
67
+ vfp_load_reg64(tcg_op, rm);
68
gen_helper_rintd(tcg_res, tcg_op, fpst);
69
- neon_store_reg64(tcg_res, rd);
70
+ vfp_store_reg64(tcg_res, rd);
71
tcg_temp_free_i64(tcg_op);
72
tcg_temp_free_i64(tcg_res);
73
} else {
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
tcg_double = tcg_temp_new_i64();
76
tcg_res = tcg_temp_new_i64();
77
tcg_tmp = tcg_temp_new_i32();
78
- neon_load_reg64(tcg_double, rm);
79
+ vfp_load_reg64(tcg_double, rm);
80
if (is_signed) {
81
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
82
} else {
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
84
tmp = tcg_temp_new_i64();
85
if (a->l) {
86
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
87
- neon_store_reg64(tmp, a->vd);
88
+ vfp_store_reg64(tmp, a->vd);
89
} else {
90
- neon_load_reg64(tmp, a->vd);
91
+ vfp_load_reg64(tmp, a->vd);
92
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
93
}
94
tcg_temp_free_i64(tmp);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
96
if (a->l) {
97
/* load */
98
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
99
- neon_store_reg64(tmp, a->vd + i);
100
+ vfp_store_reg64(tmp, a->vd + i);
101
} else {
102
/* store */
103
- neon_load_reg64(tmp, a->vd + i);
104
+ vfp_load_reg64(tmp, a->vd + i);
105
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
106
}
107
tcg_gen_addi_i32(addr, addr, offset);
108
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
109
fd = tcg_temp_new_i64();
110
fpst = fpstatus_ptr(FPST_FPCR);
111
112
- neon_load_reg64(f0, vn);
113
- neon_load_reg64(f1, vm);
114
+ vfp_load_reg64(f0, vn);
115
+ vfp_load_reg64(f1, vm);
116
117
for (;;) {
118
if (reads_vd) {
119
- neon_load_reg64(fd, vd);
120
+ vfp_load_reg64(fd, vd);
121
}
122
fn(fd, f0, f1, fpst);
123
- neon_store_reg64(fd, vd);
124
+ vfp_store_reg64(fd, vd);
125
126
if (veclen == 0) {
127
break;
128
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
129
veclen--;
130
vd = vfp_advance_dreg(vd, delta_d);
131
vn = vfp_advance_dreg(vn, delta_d);
132
- neon_load_reg64(f0, vn);
133
+ vfp_load_reg64(f0, vn);
134
if (delta_m) {
135
vm = vfp_advance_dreg(vm, delta_m);
136
- neon_load_reg64(f1, vm);
137
+ vfp_load_reg64(f1, vm);
138
}
139
}
140
141
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
142
f0 = tcg_temp_new_i64();
143
fd = tcg_temp_new_i64();
144
145
- neon_load_reg64(f0, vm);
146
+ vfp_load_reg64(f0, vm);
147
148
for (;;) {
149
fn(fd, f0);
150
- neon_store_reg64(fd, vd);
151
+ vfp_store_reg64(fd, vd);
152
153
if (veclen == 0) {
154
break;
155
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
156
/* single source one-many */
157
while (veclen--) {
158
vd = vfp_advance_dreg(vd, delta_d);
159
- neon_store_reg64(fd, vd);
160
+ vfp_store_reg64(fd, vd);
161
}
162
break;
163
}
164
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
165
veclen--;
166
vd = vfp_advance_dreg(vd, delta_d);
167
vd = vfp_advance_dreg(vm, delta_m);
168
- neon_load_reg64(f0, vm);
169
+ vfp_load_reg64(f0, vm);
170
}
171
172
tcg_temp_free_i64(f0);
173
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
174
vm = tcg_temp_new_i64();
175
vd = tcg_temp_new_i64();
176
177
- neon_load_reg64(vn, a->vn);
178
- neon_load_reg64(vm, a->vm);
179
+ vfp_load_reg64(vn, a->vn);
180
+ vfp_load_reg64(vm, a->vm);
181
if (neg_n) {
182
/* VFNMS, VFMS */
183
gen_helper_vfp_negd(vn, vn);
184
}
185
- neon_load_reg64(vd, a->vd);
186
+ vfp_load_reg64(vd, a->vd);
187
if (neg_d) {
188
/* VFNMA, VFNMS */
189
gen_helper_vfp_negd(vd, vd);
190
}
191
fpst = fpstatus_ptr(FPST_FPCR);
192
gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
193
- neon_store_reg64(vd, a->vd);
194
+ vfp_store_reg64(vd, a->vd);
195
196
tcg_temp_free_ptr(fpst);
197
tcg_temp_free_i64(vn);
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
199
fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
200
201
for (;;) {
202
- neon_store_reg64(fd, vd);
203
+ vfp_store_reg64(fd, vd);
204
205
if (veclen == 0) {
206
break;
207
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
208
vd = tcg_temp_new_i64();
209
vm = tcg_temp_new_i64();
210
211
- neon_load_reg64(vd, a->vd);
212
+ vfp_load_reg64(vd, a->vd);
213
if (a->z) {
214
tcg_gen_movi_i64(vm, 0);
215
} else {
216
- neon_load_reg64(vm, a->vm);
217
+ vfp_load_reg64(vm, a->vm);
218
}
219
220
if (a->e) {
221
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
222
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
223
vd = tcg_temp_new_i64();
224
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
225
- neon_store_reg64(vd, a->vd);
226
+ vfp_store_reg64(vd, a->vd);
227
tcg_temp_free_i32(ahp_mode);
228
tcg_temp_free_ptr(fpst);
229
tcg_temp_free_i32(tmp);
230
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
231
tmp = tcg_temp_new_i32();
232
vm = tcg_temp_new_i64();
233
234
- neon_load_reg64(vm, a->vm);
235
+ vfp_load_reg64(vm, a->vm);
236
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
237
tcg_temp_free_i64(vm);
238
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
239
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
240
}
241
242
tmp = tcg_temp_new_i64();
243
- neon_load_reg64(tmp, a->vm);
244
+ vfp_load_reg64(tmp, a->vm);
245
fpst = fpstatus_ptr(FPST_FPCR);
246
gen_helper_rintd(tmp, tmp, fpst);
247
- neon_store_reg64(tmp, a->vd);
248
+ vfp_store_reg64(tmp, a->vd);
249
tcg_temp_free_ptr(fpst);
250
tcg_temp_free_i64(tmp);
251
return true;
252
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
253
}
254
255
tmp = tcg_temp_new_i64();
256
- neon_load_reg64(tmp, a->vm);
257
+ vfp_load_reg64(tmp, a->vm);
258
fpst = fpstatus_ptr(FPST_FPCR);
259
tcg_rmode = tcg_const_i32(float_round_to_zero);
260
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
261
gen_helper_rintd(tmp, tmp, fpst);
262
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
263
- neon_store_reg64(tmp, a->vd);
264
+ vfp_store_reg64(tmp, a->vd);
265
tcg_temp_free_ptr(fpst);
266
tcg_temp_free_i64(tmp);
267
tcg_temp_free_i32(tcg_rmode);
268
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
269
}
270
271
tmp = tcg_temp_new_i64();
272
- neon_load_reg64(tmp, a->vm);
273
+ vfp_load_reg64(tmp, a->vm);
274
fpst = fpstatus_ptr(FPST_FPCR);
275
gen_helper_rintd_exact(tmp, tmp, fpst);
276
- neon_store_reg64(tmp, a->vd);
277
+ vfp_store_reg64(tmp, a->vd);
278
tcg_temp_free_ptr(fpst);
279
tcg_temp_free_i64(tmp);
280
return true;
281
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
282
vd = tcg_temp_new_i64();
283
vfp_load_reg32(vm, a->vm);
284
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
285
- neon_store_reg64(vd, a->vd);
286
+ vfp_store_reg64(vd, a->vd);
287
tcg_temp_free_i32(vm);
288
tcg_temp_free_i64(vd);
289
return true;
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
291
292
vd = tcg_temp_new_i32();
293
vm = tcg_temp_new_i64();
294
- neon_load_reg64(vm, a->vm);
295
+ vfp_load_reg64(vm, a->vm);
296
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
297
vfp_store_reg32(vd, a->vd);
298
tcg_temp_free_i32(vd);
299
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
300
/* u32 -> f64 */
301
gen_helper_vfp_uitod(vd, vm, fpst);
302
}
303
- neon_store_reg64(vd, a->vd);
304
+ vfp_store_reg64(vd, a->vd);
305
tcg_temp_free_i32(vm);
306
tcg_temp_free_i64(vd);
307
tcg_temp_free_ptr(fpst);
308
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
309
310
vm = tcg_temp_new_i64();
311
vd = tcg_temp_new_i32();
312
- neon_load_reg64(vm, a->vm);
313
+ vfp_load_reg64(vm, a->vm);
314
gen_helper_vjcvt(vd, vm, cpu_env);
315
vfp_store_reg32(vd, a->vd);
316
tcg_temp_free_i64(vm);
317
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
318
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
319
320
vd = tcg_temp_new_i64();
321
- neon_load_reg64(vd, a->vd);
322
+ vfp_load_reg64(vd, a->vd);
323
324
fpst = fpstatus_ptr(FPST_FPCR);
325
shift = tcg_const_i32(frac_bits);
326
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
327
g_assert_not_reached();
328
}
329
330
- neon_store_reg64(vd, a->vd);
331
+ vfp_store_reg64(vd, a->vd);
332
tcg_temp_free_i64(vd);
333
tcg_temp_free_i32(shift);
334
tcg_temp_free_ptr(fpst);
335
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
336
fpst = fpstatus_ptr(FPST_FPCR);
337
vm = tcg_temp_new_i64();
338
vd = tcg_temp_new_i32();
339
- neon_load_reg64(vm, a->vm);
340
+ vfp_load_reg64(vm, a->vm);
341
342
if (a->s) {
343
if (a->rz) {
33
--
344
--
34
2.20.1
345
2.20.1
35
346
36
347
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The offset is variable depending on the instruction set.
3
In both cases, we can sink the write-back and perform
4
Passing in the actual value is clearer in intent.
4
the accumulate into the normal destination temps.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-11-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-9-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 8 ++++----
11
target/arm/translate-neon.c.inc | 23 +++++++++--------------
13
target/arm/translate.c | 8 ++++----
12
1 file changed, 9 insertions(+), 14 deletions(-)
14
2 files changed, 8 insertions(+), 8 deletions(-)
15
13
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
16
--- a/target/arm/translate-neon.c.inc
19
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/translate-neon.c.inc
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
18
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
21
tcg_temp_free_i32(tcg_excp);
19
if (accfn) {
22
}
20
tmp = tcg_temp_new_i64();
23
21
read_neon_element64(tmp, a->vd, 0, MO_64);
24
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
22
- accfn(tmp, tmp, rd0);
25
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
23
- write_neon_element64(tmp, a->vd, 0, MO_64);
26
{
24
+ accfn(rd0, tmp, rd0);
27
- gen_a64_set_pc_im(s->base.pc_next - offset);
25
read_neon_element64(tmp, a->vd, 1, MO_64);
28
+ gen_a64_set_pc_im(pc);
26
- accfn(tmp, tmp, rd1);
29
gen_exception_internal(excp);
27
- write_neon_element64(tmp, a->vd, 1, MO_64);
30
s->base.is_jmp = DISAS_NORETURN;
28
+ accfn(rd1, tmp, rd1);
31
}
29
tcg_temp_free_i64(tmp);
32
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
30
- } else {
33
break;
31
- write_neon_element64(rd0, a->vd, 0, MO_64);
34
}
32
- write_neon_element64(rd1, a->vd, 1, MO_64);
35
#endif
36
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
37
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
38
} else {
39
unsupported_encoding(s, insn);
40
}
41
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
42
/* End the TB early; it likely won't be executed */
43
dc->base.is_jmp = DISAS_TOO_MANY;
44
} else {
45
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
46
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
47
/* The address covered by the breakpoint must be
48
included in [tb->pc, tb->pc + tb->size) in order
49
to for it to be properly cleared -- thus we
50
diff --git a/target/arm/translate.c b/target/arm/translate.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate.c
53
+++ b/target/arm/translate.c
54
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
55
s->base.is_jmp = DISAS_SMC;
56
}
57
58
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
59
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
60
{
61
gen_set_condexec(s);
62
- gen_set_pc_im(s, s->base.pc_next - offset);
63
+ gen_set_pc_im(s, pc);
64
gen_exception_internal(excp);
65
s->base.is_jmp = DISAS_NORETURN;
66
}
67
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
68
s->current_el != 0 &&
69
#endif
70
(imm == (s->thumb ? 0x3c : 0xf000))) {
71
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
72
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
73
return;
74
}
33
}
75
34
76
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
35
+ write_neon_element64(rd0, a->vd, 0, MO_64);
77
/* End the TB early; it's likely not going to be executed */
36
+ write_neon_element64(rd1, a->vd, 1, MO_64);
78
dc->base.is_jmp = DISAS_TOO_MANY;
37
tcg_temp_free_i64(rd0);
79
} else {
38
tcg_temp_free_i64(rd1);
80
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
39
81
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
40
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
82
/* The address covered by the breakpoint must be
41
if (accfn) {
83
included in [tb->pc, tb->pc + tb->size) in order
42
TCGv_i64 t64 = tcg_temp_new_i64();
84
to for it to be properly cleared -- thus we
43
read_neon_element64(t64, a->vd, 0, MO_64);
44
- accfn(t64, t64, rn0_64);
45
- write_neon_element64(t64, a->vd, 0, MO_64);
46
+ accfn(rn0_64, t64, rn0_64);
47
read_neon_element64(t64, a->vd, 1, MO_64);
48
- accfn(t64, t64, rn1_64);
49
- write_neon_element64(t64, a->vd, 1, MO_64);
50
+ accfn(rn1_64, t64, rn1_64);
51
tcg_temp_free_i64(t64);
52
- } else {
53
- write_neon_element64(rn0_64, a->vd, 0, MO_64);
54
- write_neon_element64(rn1_64, a->vd, 1, MO_64);
55
}
56
+
57
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
58
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
59
tcg_temp_free_i64(rn0_64);
60
tcg_temp_free_i64(rn1_64);
61
return true;
85
--
62
--
86
2.20.1
63
2.20.1
87
64
88
65
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This function is used in two different contexts, and it will be
3
We can use proper widening loads to extend 32-bit inputs,
4
clearer if the function is given the address to which it applies.
4
and skip the "widenfn" step.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-12-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate.c | 14 +++++++-------
11
target/arm/translate.c | 6 +++
13
1 file changed, 7 insertions(+), 7 deletions(-)
12
target/arm/translate-neon.c.inc | 66 ++++++++++++++++++---------------
13
2 files changed, 43 insertions(+), 29 deletions(-)
14
14
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
18
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
19
@@ -XXX,XX +XXX,XX @@ static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
20
}
20
long off = neon_element_offset(reg, ele, memop);
21
}
21
22
22
switch (memop) {
23
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
23
+ case MO_SL:
24
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
24
+ tcg_gen_ld32s_i64(dest, cpu_env, off);
25
+ break;
26
+ case MO_UL:
27
+ tcg_gen_ld32u_i64(dest, cpu_env, off);
28
+ break;
29
case MO_Q:
30
tcg_gen_ld_i64(dest, cpu_env, off);
31
break;
32
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-neon.c.inc
35
+++ b/target/arm/translate-neon.c.inc
36
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
37
static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
38
NeonGenWidenFn *widenfn,
39
NeonGenTwo64OpFn *opfn,
40
- bool src1_wide)
41
+ int src1_mop, int src2_mop)
25
{
42
{
26
- /* Return true if this is a 16 bit instruction. We must be precise
43
/* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
27
- * about this (matching the decode). We assume that s->pc still
44
TCGv_i64 rn0_64, rn1_64, rm_64;
28
- * points to the first 16 bits of the insn.
45
- TCGv_i32 rm;
29
+ /*
46
30
+ * Return true if this is a 16 bit instruction. We must be precise
47
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
31
+ * about this (matching the decode).
48
return false;
32
*/
49
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
33
if ((insn >> 11) < 0x1d) {
34
/* Definitely a 16-bit instruction */
35
@@ -XXX,XX +XXX,XX @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
36
return false;
50
return false;
37
}
51
}
38
52
39
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
53
- if (!widenfn || !opfn) {
40
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
54
+ if (!opfn) {
41
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
55
/* size == 3 case, which is an entirely different insn group */
42
* is not on the next page; we merge this into a 32-bit
56
return false;
43
* insn.
57
}
44
@@ -XXX,XX +XXX,XX @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
58
59
- if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
60
+ if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
61
return false;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
65
rn1_64 = tcg_temp_new_i64();
66
rm_64 = tcg_temp_new_i64();
67
68
- if (src1_wide) {
69
- read_neon_element64(rn0_64, a->vn, 0, MO_64);
70
+ if (src1_mop >= 0) {
71
+ read_neon_element64(rn0_64, a->vn, 0, src1_mop);
72
} else {
73
TCGv_i32 tmp = tcg_temp_new_i32();
74
read_neon_element32(tmp, a->vn, 0, MO_32);
75
widenfn(rn0_64, tmp);
76
tcg_temp_free_i32(tmp);
77
}
78
- rm = tcg_temp_new_i32();
79
- read_neon_element32(rm, a->vm, 0, MO_32);
80
+ if (src2_mop >= 0) {
81
+ read_neon_element64(rm_64, a->vm, 0, src2_mop);
82
+ } else {
83
+ TCGv_i32 tmp = tcg_temp_new_i32();
84
+ read_neon_element32(tmp, a->vm, 0, MO_32);
85
+ widenfn(rm_64, tmp);
86
+ tcg_temp_free_i32(tmp);
87
+ }
88
89
- widenfn(rm_64, rm);
90
- tcg_temp_free_i32(rm);
91
opfn(rn0_64, rn0_64, rm_64);
92
93
/*
94
* Load second pass inputs before storing the first pass result, to
95
* avoid incorrect results if a narrow input overlaps with the result.
45
*/
96
*/
46
uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
97
- if (src1_wide) {
47
98
- read_neon_element64(rn1_64, a->vn, 1, MO_64);
48
- return !thumb_insn_is_16bit(s, insn);
99
+ if (src1_mop >= 0) {
49
+ return !thumb_insn_is_16bit(s, s->pc, insn);
100
+ read_neon_element64(rn1_64, a->vn, 1, src1_mop);
101
} else {
102
TCGv_i32 tmp = tcg_temp_new_i32();
103
read_neon_element32(tmp, a->vn, 1, MO_32);
104
widenfn(rn1_64, tmp);
105
tcg_temp_free_i32(tmp);
106
}
107
- rm = tcg_temp_new_i32();
108
- read_neon_element32(rm, a->vm, 1, MO_32);
109
+ if (src2_mop >= 0) {
110
+ read_neon_element64(rm_64, a->vm, 1, src2_mop);
111
+ } else {
112
+ TCGv_i32 tmp = tcg_temp_new_i32();
113
+ read_neon_element32(tmp, a->vm, 1, MO_32);
114
+ widenfn(rm_64, tmp);
115
+ tcg_temp_free_i32(tmp);
116
+ }
117
118
write_neon_element64(rn0_64, a->vd, 0, MO_64);
119
120
- widenfn(rm_64, rm);
121
- tcg_temp_free_i32(rm);
122
opfn(rn1_64, rn1_64, rm_64);
123
write_neon_element64(rn1_64, a->vd, 1, MO_64);
124
125
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
126
return true;
50
}
127
}
51
128
52
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
129
-#define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
53
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
130
+#define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
131
static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
132
{ \
133
static NeonGenWidenFn * const widenfn[] = { \
134
gen_helper_neon_widen_##S##8, \
135
gen_helper_neon_widen_##S##16, \
136
- tcg_gen_##EXT##_i32_i64, \
137
- NULL, \
138
+ NULL, NULL, \
139
}; \
140
static NeonGenTwo64OpFn * const addfn[] = { \
141
gen_helper_neon_##OP##l_u16, \
142
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
143
tcg_gen_##OP##_i64, \
144
NULL, \
145
}; \
146
- return do_prewiden_3d(s, a, widenfn[a->size], \
147
- addfn[a->size], SRC1WIDE); \
148
+ int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
149
+ return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
150
+ SRC1WIDE ? MO_Q : narrow_mop, \
151
+ narrow_mop); \
54
}
152
}
55
153
56
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
154
-DO_PREWIDEN(VADDL_S, s, ext, add, false)
57
- is_16bit = thumb_insn_is_16bit(dc, insn);
155
-DO_PREWIDEN(VADDL_U, u, extu, add, false)
58
+ is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
156
-DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
59
dc->pc += 2;
157
-DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
60
if (!is_16bit) {
158
-DO_PREWIDEN(VADDW_S, s, ext, add, true)
61
uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
159
-DO_PREWIDEN(VADDW_U, u, extu, add, true)
160
-DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
161
-DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
162
+DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN)
163
+DO_PREWIDEN(VADDL_U, u, add, false, 0)
164
+DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN)
165
+DO_PREWIDEN(VSUBL_U, u, sub, false, 0)
166
+DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN)
167
+DO_PREWIDEN(VADDW_U, u, add, true, 0)
168
+DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN)
169
+DO_PREWIDEN(VSUBW_U, u, sub, true, 0)
170
171
static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
172
NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
62
--
173
--
63
2.20.1
174
2.20.1
64
175
65
176
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
In the neon_padd/pmax/pmin helpers for float16, a cut-and-paste error
2
meant we were using the H4() address swizzler macro rather than the
3
H2() which is required for 2-byte data. This had no effect on
4
little-endian hosts but meant we put the result data into the
5
destination Dreg in the wrong order on big-endian hosts.
2
6
3
Move the getting/putting of the fpsimd registers out of
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
kvm_arch_get/put_registers() into their own helper functions
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
to prepare for alternatively getting/putting SVE registers.
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20201028191712.4910-2-peter.maydell@linaro.org
11
---
12
target/arm/vec_helper.c | 8 ++++----
13
1 file changed, 4 insertions(+), 4 deletions(-)
6
14
7
No functional change.
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
8
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/kvm64.c | 148 +++++++++++++++++++++++++++------------------
15
1 file changed, 88 insertions(+), 60 deletions(-)
16
17
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm64.c
17
--- a/target/arm/vec_helper.c
20
+++ b/target/arm/kvm64.c
18
+++ b/target/arm/vec_helper.c
21
@@ -XXX,XX +XXX,XX @@ int kvm_arm_cpreg_level(uint64_t regidx)
19
@@ -XXX,XX +XXX,XX @@ DO_ABA(gvec_uaba_d, uint64_t)
22
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
20
r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
23
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
21
r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
24
22
\
25
+static int kvm_arch_put_fpsimd(CPUState *cs)
23
- d[H4(0)] = r0; \
26
+{
24
- d[H4(1)] = r1; \
27
+ ARMCPU *cpu = ARM_CPU(cs);
25
- d[H4(2)] = r2; \
28
+ CPUARMState *env = &cpu->env;
26
- d[H4(3)] = r3; \
29
+ struct kvm_one_reg reg;
27
+ d[H2(0)] = r0; \
30
+ uint32_t fpr;
28
+ d[H2(1)] = r1; \
31
+ int i, ret;
29
+ d[H2(2)] = r2; \
32
+
30
+ d[H2(3)] = r3; \
33
+ for (i = 0; i < 32; i++) {
34
+ uint64_t *q = aa64_vfp_qreg(env, i);
35
+#ifdef HOST_WORDS_BIGENDIAN
36
+ uint64_t fp_val[2] = { q[1], q[0] };
37
+ reg.addr = (uintptr_t)fp_val;
38
+#else
39
+ reg.addr = (uintptr_t)q;
40
+#endif
41
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
42
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
43
+ if (ret) {
44
+ return ret;
45
+ }
46
+ }
47
+
48
+ reg.addr = (uintptr_t)(&fpr);
49
+ fpr = vfp_get_fpsr(env);
50
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
51
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
52
+ if (ret) {
53
+ return ret;
54
+ }
55
+
56
+ reg.addr = (uintptr_t)(&fpr);
57
+ fpr = vfp_get_fpcr(env);
58
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
59
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
60
+ if (ret) {
61
+ return ret;
62
+ }
63
+
64
+ return 0;
65
+}
66
+
67
int kvm_arch_put_registers(CPUState *cs, int level)
68
{
69
struct kvm_one_reg reg;
70
- uint32_t fpr;
71
uint64_t val;
72
- int i;
73
- int ret;
74
+ int i, ret;
75
unsigned int el;
76
77
ARMCPU *cpu = ARM_CPU(cs);
78
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
79
}
80
}
31
}
81
32
82
- /* Advanced SIMD and FP registers. */
33
DO_NEON_PAIRWISE(neon_padd, add)
83
- for (i = 0; i < 32; i++) {
84
- uint64_t *q = aa64_vfp_qreg(env, i);
85
-#ifdef HOST_WORDS_BIGENDIAN
86
- uint64_t fp_val[2] = { q[1], q[0] };
87
- reg.addr = (uintptr_t)fp_val;
88
-#else
89
- reg.addr = (uintptr_t)q;
90
-#endif
91
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
92
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
93
- if (ret) {
94
- return ret;
95
- }
96
- }
97
-
98
- reg.addr = (uintptr_t)(&fpr);
99
- fpr = vfp_get_fpsr(env);
100
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
101
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
102
- if (ret) {
103
- return ret;
104
- }
105
-
106
- fpr = vfp_get_fpcr(env);
107
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
108
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
109
+ ret = kvm_arch_put_fpsimd(cs);
110
if (ret) {
111
return ret;
112
}
113
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
114
return ret;
115
}
116
117
+static int kvm_arch_get_fpsimd(CPUState *cs)
118
+{
119
+ ARMCPU *cpu = ARM_CPU(cs);
120
+ CPUARMState *env = &cpu->env;
121
+ struct kvm_one_reg reg;
122
+ uint32_t fpr;
123
+ int i, ret;
124
+
125
+ for (i = 0; i < 32; i++) {
126
+ uint64_t *q = aa64_vfp_qreg(env, i);
127
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
128
+ reg.addr = (uintptr_t)q;
129
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
130
+ if (ret) {
131
+ return ret;
132
+ } else {
133
+#ifdef HOST_WORDS_BIGENDIAN
134
+ uint64_t t;
135
+ t = q[0], q[0] = q[1], q[1] = t;
136
+#endif
137
+ }
138
+ }
139
+
140
+ reg.addr = (uintptr_t)(&fpr);
141
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
142
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
143
+ if (ret) {
144
+ return ret;
145
+ }
146
+ vfp_set_fpsr(env, fpr);
147
+
148
+ reg.addr = (uintptr_t)(&fpr);
149
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
150
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
151
+ if (ret) {
152
+ return ret;
153
+ }
154
+ vfp_set_fpcr(env, fpr);
155
+
156
+ return 0;
157
+}
158
+
159
int kvm_arch_get_registers(CPUState *cs)
160
{
161
struct kvm_one_reg reg;
162
uint64_t val;
163
- uint32_t fpr;
164
unsigned int el;
165
- int i;
166
- int ret;
167
+ int i, ret;
168
169
ARMCPU *cpu = ARM_CPU(cs);
170
CPUARMState *env = &cpu->env;
171
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
172
env->spsr = env->banked_spsr[i];
173
}
174
175
- /* Advanced SIMD and FP registers */
176
- for (i = 0; i < 32; i++) {
177
- uint64_t *q = aa64_vfp_qreg(env, i);
178
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
179
- reg.addr = (uintptr_t)q;
180
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
181
- if (ret) {
182
- return ret;
183
- } else {
184
-#ifdef HOST_WORDS_BIGENDIAN
185
- uint64_t t;
186
- t = q[0], q[0] = q[1], q[1] = t;
187
-#endif
188
- }
189
- }
190
-
191
- reg.addr = (uintptr_t)(&fpr);
192
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
193
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
194
+ ret = kvm_arch_get_fpsimd(cs);
195
if (ret) {
196
return ret;
197
}
198
- vfp_set_fpsr(env, fpr);
199
-
200
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
201
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
202
- if (ret) {
203
- return ret;
204
- }
205
- vfp_set_fpcr(env, fpr);
206
207
ret = kvm_get_vcpu_events(cpu);
208
if (ret) {
209
--
34
--
210
2.20.1
35
2.20.1
211
36
212
37
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
The helper functions for performing the udot/sdot operations against
2
a scalar were not using an address-swizzling macro when converting
3
the index of the scalar element into a pointer into the vm array.
4
This had no effect on little-endian hosts but meant we generated
5
incorrect results on big-endian hosts.
2
6
3
A couple return -EINVAL's forgot their '-'s.
7
For these insns, the index is indexing over group of 4 8-bit values,
8
so 32 bits per indexed entity, and H4() is therefore what we want.
9
(For Neon the only possible input indexes are 0 and 1.)
4
10
5
Signed-off-by: Andrew Jones <drjones@redhat.com>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20201028191712.4910-3-peter.maydell@linaro.org
9
---
15
---
10
target/arm/kvm64.c | 4 ++--
16
target/arm/vec_helper.c | 4 ++--
11
1 file changed, 2 insertions(+), 2 deletions(-)
17
1 file changed, 2 insertions(+), 2 deletions(-)
12
18
13
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
19
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/kvm64.c
21
--- a/target/arm/vec_helper.c
16
+++ b/target/arm/kvm64.c
22
+++ b/target/arm/vec_helper.c
17
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
23
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
18
write_cpustate_to_list(cpu, true);
24
intptr_t index = simd_data(desc);
19
25
uint32_t *d = vd;
20
if (!write_list_to_kvmstate(cpu, level)) {
26
int8_t *n = vn;
21
- return EINVAL;
27
- int8_t *m_indexed = (int8_t *)vm + index * 4;
22
+ return -EINVAL;
28
+ int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
23
}
29
24
30
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
25
kvm_arm_sync_mpstate_to_kvm(cpu);
31
* Otherwise opr_sz is a multiple of 16.
26
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
32
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
27
}
33
intptr_t index = simd_data(desc);
28
34
uint32_t *d = vd;
29
if (!write_kvmstate_to_list(cpu)) {
35
uint8_t *n = vn;
30
- return EINVAL;
36
- uint8_t *m_indexed = (uint8_t *)vm + index * 4;
31
+ return -EINVAL;
37
+ uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
32
}
38
33
/* Note that it's OK to have registers which aren't in CPUState,
39
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
34
* so we can ignore a failure return here.
40
* Otherwise opr_sz is a multiple of 16.
35
--
41
--
36
2.20.1
42
2.20.1
37
43
38
44
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
2
3
The current implementation of ZCR_ELx matches the architecture, only
3
HCR should be applied when NS is set, not when it is cleared.
4
implementing the lower four bits, with the rest RAZ/WI. This puts
5
a strict limit on ARM_MAX_VQ of 16. Make sure we don't let ARM_MAX_VQ
6
grow without a corresponding update here.
7
4
8
Suggested-by: Dave Martin <Dave.Martin@arm.com>
5
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
8
---
14
target/arm/helper.c | 1 +
9
target/arm/helper.c | 5 ++---
15
1 file changed, 1 insertion(+)
10
1 file changed, 2 insertions(+), 3 deletions(-)
16
11
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
14
--- a/target/arm/helper.c
20
+++ b/target/arm/helper.c
15
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
16
@@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
22
int new_len;
17
23
18
/*
24
/* Bits other than [3:0] are RAZ/WI. */
19
* Non-IS variants of TLB operations are upgraded to
25
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
20
- * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
26
raw_write(env, ri, value & 0xf);
21
+ * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
27
22
* force broadcast of these operations.
28
/*
23
*/
24
static bool tlb_force_broadcast(CPUARMState *env)
25
{
26
- return (env->cp15.hcr_el2 & HCR_FB) &&
27
- arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
28
+ return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
29
}
30
31
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
29
--
32
--
30
2.20.1
33
2.20.1
31
34
32
35
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
2
3
We first convert the pmu property from a static property to one with
3
Secure mode is not exempted from checking SCR_EL3.TLOR, and in the
4
its own accessors. Then we use the set accessor to check if the PMU is
4
future HCR_EL2.TLOR when S-EL2 is enabled.
5
supported when using KVM. Indeed a 32-bit KVM host does not support
6
the PMU, so this check will catch an attempt to use it at property-set
7
time.
8
5
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
9
---
13
target/arm/kvm_arm.h | 14 ++++++++++++++
10
target/arm/helper.c | 19 +++++--------------
14
target/arm/cpu.c | 30 +++++++++++++++++++++++++-----
11
1 file changed, 5 insertions(+), 14 deletions(-)
15
target/arm/kvm.c | 7 +++++++
16
3 files changed, 46 insertions(+), 5 deletions(-)
17
12
18
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/kvm_arm.h
15
--- a/target/arm/helper.c
21
+++ b/target/arm/kvm_arm.h
16
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
17
@@ -XXX,XX +XXX,XX @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
18
#endif
19
20
/* Shared logic between LORID and the rest of the LOR* registers.
21
- * Secure state has already been delt with.
22
+ * Secure state exclusion has already been dealt with.
23
*/
23
*/
24
bool kvm_arm_aarch32_supported(CPUState *cs);
24
-static CPAccessResult access_lor_ns(CPUARMState *env)
25
25
+static CPAccessResult access_lor_ns(CPUARMState *env,
26
+/**
26
+ const ARMCPRegInfo *ri, bool isread)
27
+ * bool kvm_arm_pmu_supported:
27
{
28
+ * @cs: CPUState
28
int el = arm_current_el(env);
29
+ *
29
30
+ * Returns: true if the KVM VCPU can enable its PMU
30
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_ns(CPUARMState *env)
31
+ * and false otherwise.
31
return CP_ACCESS_OK;
32
+ */
33
+bool kvm_arm_pmu_supported(CPUState *cs);
34
+
35
/**
36
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
37
* IPA address space supported by KVM
38
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_aarch32_supported(CPUState *cs)
39
return false;
40
}
32
}
41
33
42
+static inline bool kvm_arm_pmu_supported(CPUState *cs)
34
-static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
43
+{
35
- bool isread)
44
+ return false;
36
-{
45
+}
37
- if (arm_is_secure_below_el3(env)) {
46
+
38
- /* Access ok in secure mode. */
47
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
39
- return CP_ACCESS_OK;
40
- }
41
- return access_lor_ns(env);
42
-}
43
-
44
static CPAccessResult access_lor_other(CPUARMState *env,
45
const ARMCPRegInfo *ri, bool isread)
48
{
46
{
49
return -ENOENT;
47
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_other(CPUARMState *env,
50
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
48
/* Access denied in secure mode. */
51
index XXXXXXX..XXXXXXX 100644
49
return CP_ACCESS_TRAP;
52
--- a/target/arm/cpu.c
53
+++ b/target/arm/cpu.c
54
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_el3_property =
55
static Property arm_cpu_cfgend_property =
56
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
57
58
-/* use property name "pmu" to match other archs and virt tools */
59
-static Property arm_cpu_has_pmu_property =
60
- DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
61
-
62
static Property arm_cpu_has_vfp_property =
63
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
64
65
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_pmsav7_dregion_property =
66
pmsav7_dregion,
67
qdev_prop_uint32, uint32_t);
68
69
+static bool arm_get_pmu(Object *obj, Error **errp)
70
+{
71
+ ARMCPU *cpu = ARM_CPU(obj);
72
+
73
+ return cpu->has_pmu;
74
+}
75
+
76
+static void arm_set_pmu(Object *obj, bool value, Error **errp)
77
+{
78
+ ARMCPU *cpu = ARM_CPU(obj);
79
+
80
+ if (value) {
81
+ if (kvm_enabled() && !kvm_arm_pmu_supported(CPU(cpu))) {
82
+ error_setg(errp, "'pmu' feature not supported by KVM on this host");
83
+ return;
84
+ }
85
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
86
+ } else {
87
+ unset_feature(&cpu->env, ARM_FEATURE_PMU);
88
+ }
89
+ cpu->has_pmu = value;
90
+}
91
+
92
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
93
void *opaque, Error **errp)
94
{
95
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
96
}
50
}
97
51
- return access_lor_ns(env);
98
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
52
+ return access_lor_ns(env, ri, isread);
99
- qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
100
+ cpu->has_pmu = true;
101
+ object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu,
102
&error_abort);
103
}
104
105
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/kvm.c
108
+++ b/target/arm/kvm.c
109
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
110
env->features = arm_host_cpu_features.features;
111
}
53
}
112
54
113
+bool kvm_arm_pmu_supported(CPUState *cpu)
55
/*
114
+{
56
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lor_reginfo[] = {
115
+ KVMState *s = KVM_STATE(current_machine->accelerator);
57
.type = ARM_CP_CONST, .resetvalue = 0 },
116
+
58
{ .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
117
+ return kvm_check_extension(s, KVM_CAP_ARM_PMU_V3);
59
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
118
+}
60
- .access = PL1_R, .accessfn = access_lorid,
119
+
61
+ .access = PL1_R, .accessfn = access_lor_ns,
120
int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
62
.type = ARM_CP_CONST, .resetvalue = 0 },
121
{
63
REGINFO_SENTINEL
122
KVMState *s = KVM_STATE(ms->accelerator);
64
};
123
--
65
--
124
2.20.1
66
2.20.1
125
67
126
68
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
If we're using the capstone disassembler, disassembly of a run of
2
instructions more than 32 bytes long disassembles the wrong data for
3
instructions beyond the 32 byte mark:
2
4
3
Unless we're guaranteed to always increase ARM_MAX_VQ by a multiple of
5
(qemu) xp /16x 0x100
4
four, then we should use DIV_ROUND_UP to ensure we get an appropriate
6
0000000000000100: 0x00000005 0x54410001 0x00000001 0x00001000
5
array size.
7
0000000000000110: 0x00000000 0x00000004 0x54410002 0x3c000000
8
0000000000000120: 0x00000000 0x00000004 0x54410009 0x74736574
9
0000000000000130: 0x00000000 0x00000000 0x00000000 0x00000000
10
(qemu) xp /16i 0x100
11
0x00000100: 00000005 andeq r0, r0, r5
12
0x00000104: 54410001 strbpl r0, [r1], #-1
13
0x00000108: 00000001 andeq r0, r0, r1
14
0x0000010c: 00001000 andeq r1, r0, r0
15
0x00000110: 00000000 andeq r0, r0, r0
16
0x00000114: 00000004 andeq r0, r0, r4
17
0x00000118: 54410002 strbpl r0, [r1], #-2
18
0x0000011c: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
19
0x00000120: 54410001 strbpl r0, [r1], #-1
20
0x00000124: 00000001 andeq r0, r0, r1
21
0x00000128: 00001000 andeq r1, r0, r0
22
0x0000012c: 00000000 andeq r0, r0, r0
23
0x00000130: 00000004 andeq r0, r0, r4
24
0x00000134: 54410002 strbpl r0, [r1], #-2
25
0x00000138: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
26
0x0000013c: 00000000 andeq r0, r0, r0
6
27
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
28
Here the disassembly of 0x120..0x13f is using the data that is in
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
0x104..0x123.
30
31
This is caused by passing the wrong value to the read_memory_func().
32
The intention is that at this point in the loop the 'cap_buf' buffer
33
already contains 'csize' bytes of data for the instruction at guest
34
addr 'pc', and we want to read in an extra 'tsize' bytes. Those
35
extra bytes are therefore at 'pc + csize', not 'pc'. On the first
36
time through the loop 'csize' happens to be zero, so the initial read
37
of 32 bytes into cap_buf is correct and as long as the disassembly
38
never needs to read more data we return the correct information.
39
40
Use the correct guest address in the call to read_memory_func().
41
42
Cc: qemu-stable@nongnu.org
43
Fixes: https://bugs.launchpad.net/qemu/+bug/1900779
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
44
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
45
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Message-id: 20201022132445.25039-1-peter.maydell@linaro.org
10
---
47
---
11
target/arm/cpu.h | 2 +-
48
disas/capstone.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
49
1 file changed, 1 insertion(+), 1 deletion(-)
13
50
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
51
diff --git a/disas/capstone.c b/disas/capstone.c
15
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
53
--- a/disas/capstone.c
17
+++ b/target/arm/cpu.h
54
+++ b/disas/capstone.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVectorReg {
55
@@ -XXX,XX +XXX,XX @@ bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
19
#ifdef TARGET_AARCH64
56
20
/* In AArch32 mode, predicate registers do not exist at all. */
57
/* Make certain that we can make progress. */
21
typedef struct ARMPredicateReg {
58
assert(tsize != 0);
22
- uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
59
- info->read_memory_func(pc, cap_buf + csize, tsize, info);
23
+ uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
60
+ info->read_memory_func(pc + csize, cap_buf + csize, tsize, info);
24
} ARMPredicateReg;
61
csize += tsize;
25
62
26
/* In AArch32 mode, PAC keys do not exist at all. */
63
if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
27
--
64
--
28
2.20.1
65
2.20.1
29
66
30
67
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
We must update s->base.pc_next when we return from the translate_insn
3
Use the BIT_ULL() macro to ensure we use 64-bit arithmetic.
4
hook to the main translator loop. By incrementing s->base.pc_next
4
This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN):
5
immediately after reading the insn word, "pc_next" contains the address
6
of the next instruction throughout translation.
7
5
8
All remaining uses of s->pc are referencing the address of the next insn,
6
CID 1432363 (#1 of 1): Unintentional integer overflow:
9
so this is now a simple global replacement. Remove the "s->pc" field.
10
7
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
overflow_before_widen:
9
Potentially overflowing expression 1 << scale with type int
10
(32 bits, signed) is evaluated using 32-bit arithmetic, and
11
then used in a context that expects an expression of type
12
hwaddr (64 bits, unsigned).
13
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Acked-by: Eric Auger <eric.auger@redhat.com>
16
Message-id: 20201030144617.1535064-1-philmd@redhat.com
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Message-id: 20190807045335.1361-7-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
19
---
17
target/arm/translate.h | 1 -
20
hw/arm/smmuv3.c | 3 ++-
18
target/arm/translate-a64.c | 51 +++++++++---------
21
1 file changed, 2 insertions(+), 1 deletion(-)
19
target/arm/translate.c | 103 ++++++++++++++++++-------------------
20
3 files changed, 72 insertions(+), 83 deletions(-)
21
22
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
23
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
23
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/translate.h
25
--- a/hw/arm/smmuv3.c
25
+++ b/target/arm/translate.h
26
+++ b/hw/arm/smmuv3.c
26
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
27
@@ -XXX,XX +XXX,XX @@
27
DisasContextBase base;
28
*/
28
const ARMISARegisters *isar;
29
29
30
#include "qemu/osdep.h"
30
- target_ulong pc;
31
+#include "qemu/bitops.h"
31
/* The address of the current instruction being translated. */
32
#include "hw/irq.h"
32
target_ulong pc_curr;
33
#include "hw/sysbus.h"
33
target_ulong page_start;
34
#include "migration/vmstate.h"
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
35
@@ -XXX,XX +XXX,XX @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
35
index XXXXXXX..XXXXXXX 100644
36
scale = CMD_SCALE(cmd);
36
--- a/target/arm/translate-a64.c
37
num = CMD_NUM(cmd);
37
+++ b/target/arm/translate-a64.c
38
ttl = CMD_TTL(cmd);
38
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
39
- num_pages = (num + 1) * (1 << (scale));
39
40
+ num_pages = (num + 1) * BIT_ULL(scale);
40
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
41
{
42
- gen_a64_set_pc_im(s->pc - offset);
43
+ gen_a64_set_pc_im(s->base.pc_next - offset);
44
gen_exception_internal(excp);
45
s->base.is_jmp = DISAS_NORETURN;
46
}
47
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
48
static void gen_exception_insn(DisasContext *s, int offset, int excp,
49
uint32_t syndrome, uint32_t target_el)
50
{
51
- gen_a64_set_pc_im(s->pc - offset);
52
+ gen_a64_set_pc_im(s->base.pc_next - offset);
53
gen_exception(excp, syndrome, target_el);
54
s->base.is_jmp = DISAS_NORETURN;
55
}
56
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset,
57
{
58
TCGv_i32 tcg_syn;
59
60
- gen_a64_set_pc_im(s->pc - offset);
61
+ gen_a64_set_pc_im(s->base.pc_next - offset);
62
tcg_syn = tcg_const_i32(syndrome);
63
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
64
tcg_temp_free_i32(tcg_syn);
65
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
66
67
if (insn & (1U << 31)) {
68
/* BL Branch with link */
69
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
70
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
71
}
41
}
72
42
73
/* B Branch / BL Branch with link */
43
if (type == SMMU_CMD_TLBI_NH_VA) {
74
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
75
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
76
tcg_cmp, 0, label_match);
77
78
- gen_goto_tb(s, 0, s->pc);
79
+ gen_goto_tb(s, 0, s->base.pc_next);
80
gen_set_label(label_match);
81
gen_goto_tb(s, 1, addr);
82
}
83
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
84
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
85
tcg_cmp, 0, label_match);
86
tcg_temp_free_i64(tcg_cmp);
87
- gen_goto_tb(s, 0, s->pc);
88
+ gen_goto_tb(s, 0, s->base.pc_next);
89
gen_set_label(label_match);
90
gen_goto_tb(s, 1, addr);
91
}
92
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
93
/* genuinely conditional branches */
94
TCGLabel *label_match = gen_new_label();
95
arm_gen_test_cc(cond, label_match);
96
- gen_goto_tb(s, 0, s->pc);
97
+ gen_goto_tb(s, 0, s->base.pc_next);
98
gen_set_label(label_match);
99
gen_goto_tb(s, 1, addr);
100
} else {
101
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
102
* any pending interrupts immediately.
103
*/
104
reset_btype(s);
105
- gen_goto_tb(s, 0, s->pc);
106
+ gen_goto_tb(s, 0, s->base.pc_next);
107
return;
108
109
case 7: /* SB */
110
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
111
* MB and end the TB instead.
112
*/
113
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
114
- gen_goto_tb(s, 0, s->pc);
115
+ gen_goto_tb(s, 0, s->base.pc_next);
116
return;
117
118
default:
119
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
120
gen_a64_set_pc(s, dst);
121
/* BLR also needs to load return address */
122
if (opc == 1) {
123
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
124
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
125
}
126
break;
127
128
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
129
gen_a64_set_pc(s, dst);
130
/* BLRAA also needs to load return address */
131
if (opc == 9) {
132
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
133
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
134
}
135
break;
136
137
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
138
{
139
uint32_t insn;
140
141
- s->pc_curr = s->pc;
142
- insn = arm_ldl_code(env, s->pc, s->sctlr_b);
143
+ s->pc_curr = s->base.pc_next;
144
+ insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
145
s->insn = insn;
146
- s->pc += 4;
147
+ s->base.pc_next += 4;
148
149
s->fp_access_checked = false;
150
151
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
152
int bound, core_mmu_idx;
153
154
dc->isar = &arm_cpu->isar;
155
- dc->pc = dc->base.pc_first;
156
dc->condjmp = 0;
157
158
dc->aarch64 = 1;
159
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
160
{
161
DisasContext *dc = container_of(dcbase, DisasContext, base);
162
163
- tcg_gen_insn_start(dc->pc, 0, 0);
164
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
165
dc->insn_start = tcg_last_op();
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
169
DisasContext *dc = container_of(dcbase, DisasContext, base);
170
171
if (bp->flags & BP_CPU) {
172
- gen_a64_set_pc_im(dc->pc);
173
+ gen_a64_set_pc_im(dc->base.pc_next);
174
gen_helper_check_breakpoints(cpu_env);
175
/* End the TB early; it likely won't be executed */
176
dc->base.is_jmp = DISAS_TOO_MANY;
177
@@ -XXX,XX +XXX,XX @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
178
to for it to be properly cleared -- thus we
179
increment the PC here so that the logic setting
180
tb->size below does the right thing. */
181
- dc->pc += 4;
182
+ dc->base.pc_next += 4;
183
dc->base.is_jmp = DISAS_NORETURN;
184
}
185
186
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
187
disas_a64_insn(env, dc);
188
}
189
190
- dc->base.pc_next = dc->pc;
191
translator_loop_temp_check(&dc->base);
192
}
193
194
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
195
*/
196
switch (dc->base.is_jmp) {
197
default:
198
- gen_a64_set_pc_im(dc->pc);
199
+ gen_a64_set_pc_im(dc->base.pc_next);
200
/* fall through */
201
case DISAS_EXIT:
202
case DISAS_JUMP:
203
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
204
switch (dc->base.is_jmp) {
205
case DISAS_NEXT:
206
case DISAS_TOO_MANY:
207
- gen_goto_tb(dc, 1, dc->pc);
208
+ gen_goto_tb(dc, 1, dc->base.pc_next);
209
break;
210
default:
211
case DISAS_UPDATE:
212
- gen_a64_set_pc_im(dc->pc);
213
+ gen_a64_set_pc_im(dc->base.pc_next);
214
/* fall through */
215
case DISAS_EXIT:
216
tcg_gen_exit_tb(NULL, 0);
217
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
218
case DISAS_SWI:
219
break;
220
case DISAS_WFE:
221
- gen_a64_set_pc_im(dc->pc);
222
+ gen_a64_set_pc_im(dc->base.pc_next);
223
gen_helper_wfe(cpu_env);
224
break;
225
case DISAS_YIELD:
226
- gen_a64_set_pc_im(dc->pc);
227
+ gen_a64_set_pc_im(dc->base.pc_next);
228
gen_helper_yield(cpu_env);
229
break;
230
case DISAS_WFI:
231
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
232
*/
233
TCGv_i32 tmp = tcg_const_i32(4);
234
235
- gen_a64_set_pc_im(dc->pc);
236
+ gen_a64_set_pc_im(dc->base.pc_next);
237
gen_helper_wfi(cpu_env, tmp);
238
tcg_temp_free_i32(tmp);
239
/* The helper doesn't necessarily throw an exception, but we
240
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
241
}
242
}
243
}
244
-
245
- /* Functions above can change dc->pc, so re-align db->pc_next */
246
- dc->base.pc_next = dc->pc;
247
}
248
249
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
250
diff --git a/target/arm/translate.c b/target/arm/translate.c
251
index XXXXXXX..XXXXXXX 100644
252
--- a/target/arm/translate.c
253
+++ b/target/arm/translate.c
254
@@ -XXX,XX +XXX,XX @@ static inline void gen_blxns(DisasContext *s, int rm)
255
* We do however need to set the PC, because the blxns helper reads it.
256
* The blxns helper may throw an exception.
257
*/
258
- gen_set_pc_im(s, s->pc);
259
+ gen_set_pc_im(s, s->base.pc_next);
260
gen_helper_v7m_blxns(cpu_env, var);
261
tcg_temp_free_i32(var);
262
s->base.is_jmp = DISAS_EXIT;
263
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
264
* for single stepping.)
265
*/
266
s->svc_imm = imm16;
267
- gen_set_pc_im(s, s->pc);
268
+ gen_set_pc_im(s, s->base.pc_next);
269
s->base.is_jmp = DISAS_HVC;
270
}
271
272
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
273
tmp = tcg_const_i32(syn_aa32_smc());
274
gen_helper_pre_smc(cpu_env, tmp);
275
tcg_temp_free_i32(tmp);
276
- gen_set_pc_im(s, s->pc);
277
+ gen_set_pc_im(s, s->base.pc_next);
278
s->base.is_jmp = DISAS_SMC;
279
}
280
281
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
282
{
283
gen_set_condexec(s);
284
- gen_set_pc_im(s, s->pc - offset);
285
+ gen_set_pc_im(s, s->base.pc_next - offset);
286
gen_exception_internal(excp);
287
s->base.is_jmp = DISAS_NORETURN;
288
}
289
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
290
int syn, uint32_t target_el)
291
{
292
gen_set_condexec(s);
293
- gen_set_pc_im(s, s->pc - offset);
294
+ gen_set_pc_im(s, s->base.pc_next - offset);
295
gen_exception(excp, syn, target_el);
296
s->base.is_jmp = DISAS_NORETURN;
297
}
298
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
299
TCGv_i32 tcg_syn;
300
301
gen_set_condexec(s);
302
- gen_set_pc_im(s, s->pc - offset);
303
+ gen_set_pc_im(s, s->base.pc_next - offset);
304
tcg_syn = tcg_const_i32(syn);
305
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
306
tcg_temp_free_i32(tcg_syn);
307
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
308
/* Force a TB lookup after an instruction that changes the CPU state. */
309
static inline void gen_lookup_tb(DisasContext *s)
310
{
311
- tcg_gen_movi_i32(cpu_R[15], s->pc);
312
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
313
s->base.is_jmp = DISAS_EXIT;
314
}
315
316
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
317
{
318
#ifndef CONFIG_USER_ONLY
319
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
320
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
321
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
322
#else
323
return true;
324
#endif
325
@@ -XXX,XX +XXX,XX @@ static void gen_nop_hint(DisasContext *s, int val)
326
*/
327
case 1: /* yield */
328
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
329
- gen_set_pc_im(s, s->pc);
330
+ gen_set_pc_im(s, s->base.pc_next);
331
s->base.is_jmp = DISAS_YIELD;
332
}
333
break;
334
case 3: /* wfi */
335
- gen_set_pc_im(s, s->pc);
336
+ gen_set_pc_im(s, s->base.pc_next);
337
s->base.is_jmp = DISAS_WFI;
338
break;
339
case 2: /* wfe */
340
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
341
- gen_set_pc_im(s, s->pc);
342
+ gen_set_pc_im(s, s->base.pc_next);
343
s->base.is_jmp = DISAS_WFE;
344
}
345
break;
346
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
347
if (isread) {
348
return 1;
349
}
350
- gen_set_pc_im(s, s->pc);
351
+ gen_set_pc_im(s, s->base.pc_next);
352
s->base.is_jmp = DISAS_WFI;
353
return 0;
354
default:
355
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
356
* self-modifying code correctly and also to take
357
* any pending interrupts immediately.
358
*/
359
- gen_goto_tb(s, 0, s->pc);
360
+ gen_goto_tb(s, 0, s->base.pc_next);
361
return;
362
case 7: /* sb */
363
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
364
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
365
* for TCG; MB and end the TB instead.
366
*/
367
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
368
- gen_goto_tb(s, 0, s->pc);
369
+ gen_goto_tb(s, 0, s->base.pc_next);
370
return;
371
default:
372
goto illegal_op;
373
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
374
int32_t offset;
375
376
tmp = tcg_temp_new_i32();
377
- tcg_gen_movi_i32(tmp, s->pc);
378
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
379
store_reg(s, 14, tmp);
380
/* Sign-extend the 24-bit offset */
381
offset = (((int32_t)insn) << 8) >> 8;
382
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
383
/* branch link/exchange thumb (blx) */
384
tmp = load_reg(s, rm);
385
tmp2 = tcg_temp_new_i32();
386
- tcg_gen_movi_i32(tmp2, s->pc);
387
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
388
store_reg(s, 14, tmp2);
389
gen_bx(s, tmp);
390
break;
391
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
392
/* branch (and link) */
393
if (insn & (1 << 24)) {
394
tmp = tcg_temp_new_i32();
395
- tcg_gen_movi_i32(tmp, s->pc);
396
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
397
store_reg(s, 14, tmp);
398
}
399
offset = sextract32(insn << 2, 0, 26);
400
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
401
break;
402
case 0xf:
403
/* swi */
404
- gen_set_pc_im(s, s->pc);
405
+ gen_set_pc_im(s, s->base.pc_next);
406
s->svc_imm = extract32(insn, 0, 24);
407
s->base.is_jmp = DISAS_SWI;
408
break;
409
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
410
411
if (insn & (1 << 14)) {
412
/* Branch and link. */
413
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
414
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
415
}
416
417
offset += read_pc(s);
418
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
419
* and also to take any pending interrupts
420
* immediately.
421
*/
422
- gen_goto_tb(s, 0, s->pc);
423
+ gen_goto_tb(s, 0, s->base.pc_next);
424
break;
425
case 7: /* sb */
426
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
427
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
428
* for TCG; MB and end the TB instead.
429
*/
430
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
431
- gen_goto_tb(s, 0, s->pc);
432
+ gen_goto_tb(s, 0, s->base.pc_next);
433
break;
434
default:
435
goto illegal_op;
436
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
437
/* BLX/BX */
438
tmp = load_reg(s, rm);
439
if (link) {
440
- val = (uint32_t)s->pc | 1;
441
+ val = (uint32_t)s->base.pc_next | 1;
442
tmp2 = tcg_temp_new_i32();
443
tcg_gen_movi_i32(tmp2, val);
444
store_reg(s, 14, tmp2);
445
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
446
447
if (cond == 0xf) {
448
/* swi */
449
- gen_set_pc_im(s, s->pc);
450
+ gen_set_pc_im(s, s->base.pc_next);
451
s->svc_imm = extract32(insn, 0, 8);
452
s->base.is_jmp = DISAS_SWI;
453
break;
454
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
455
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
456
457
tmp2 = tcg_temp_new_i32();
458
- tcg_gen_movi_i32(tmp2, s->pc | 1);
459
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
460
store_reg(s, 14, tmp2);
461
gen_bx(s, tmp);
462
break;
463
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
464
tcg_gen_addi_i32(tmp, tmp, offset);
465
466
tmp2 = tcg_temp_new_i32();
467
- tcg_gen_movi_i32(tmp2, s->pc | 1);
468
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
469
store_reg(s, 14, tmp2);
470
gen_bx(s, tmp);
471
} else {
472
@@ -XXX,XX +XXX,XX @@ undef:
473
474
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
475
{
476
- /* Return true if the insn at dc->pc might cross a page boundary.
477
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
478
* (False positives are OK, false negatives are not.)
479
* We know this is a Thumb insn, and our caller ensures we are
480
- * only called if dc->pc is less than 4 bytes from the page
481
+ * only called if dc->base.pc_next is less than 4 bytes from the page
482
* boundary, so we cross the page if the first 16 bits indicate
483
* that this is a 32 bit insn.
484
*/
485
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
486
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
487
488
- return !thumb_insn_is_16bit(s, s->pc, insn);
489
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
490
}
491
492
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
493
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
494
uint32_t condexec, core_mmu_idx;
495
496
dc->isar = &cpu->isar;
497
- dc->pc = dc->base.pc_first;
498
dc->condjmp = 0;
499
500
dc->aarch64 = 0;
501
@@ -XXX,XX +XXX,XX @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
502
{
503
DisasContext *dc = container_of(dcbase, DisasContext, base);
504
505
- tcg_gen_insn_start(dc->pc,
506
+ tcg_gen_insn_start(dc->base.pc_next,
507
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
508
0);
509
dc->insn_start = tcg_last_op();
510
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
511
512
if (bp->flags & BP_CPU) {
513
gen_set_condexec(dc);
514
- gen_set_pc_im(dc, dc->pc);
515
+ gen_set_pc_im(dc, dc->base.pc_next);
516
gen_helper_check_breakpoints(cpu_env);
517
/* End the TB early; it's likely not going to be executed */
518
dc->base.is_jmp = DISAS_TOO_MANY;
519
@@ -XXX,XX +XXX,XX @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
520
tb->size below does the right thing. */
521
/* TODO: Advance PC by correct instruction length to
522
* avoid disassembler error messages */
523
- dc->pc += 2;
524
+ dc->base.pc_next += 2;
525
dc->base.is_jmp = DISAS_NORETURN;
526
}
527
528
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
529
{
530
#ifdef CONFIG_USER_ONLY
531
/* Intercept jump to the magic kernel page. */
532
- if (dc->pc >= 0xffff0000) {
533
+ if (dc->base.pc_next >= 0xffff0000) {
534
/* We always get here via a jump, so know we are not in a
535
conditional execution block. */
536
gen_exception_internal(EXCP_KERNEL_TRAP);
537
@@ -XXX,XX +XXX,XX @@ static void arm_post_translate_insn(DisasContext *dc)
538
gen_set_label(dc->condlabel);
539
dc->condjmp = 0;
540
}
541
- dc->base.pc_next = dc->pc;
542
translator_loop_temp_check(&dc->base);
543
}
544
545
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
546
return;
547
}
548
549
- dc->pc_curr = dc->pc;
550
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
551
+ dc->pc_curr = dc->base.pc_next;
552
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
553
dc->insn = insn;
554
- dc->pc += 4;
555
+ dc->base.pc_next += 4;
556
disas_arm_insn(dc, insn);
557
558
arm_post_translate_insn(dc);
559
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
560
return;
561
}
562
563
- dc->pc_curr = dc->pc;
564
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
565
- is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
566
- dc->pc += 2;
567
+ dc->pc_curr = dc->base.pc_next;
568
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
569
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
570
+ dc->base.pc_next += 2;
571
if (!is_16bit) {
572
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
573
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
574
575
insn = insn << 16 | insn2;
576
- dc->pc += 2;
577
+ dc->base.pc_next += 2;
578
}
579
dc->insn = insn;
580
581
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
582
* but isn't very efficient).
583
*/
584
if (dc->base.is_jmp == DISAS_NEXT
585
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
586
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
587
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
588
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
589
&& insn_crosses_page(env, dc)))) {
590
dc->base.is_jmp = DISAS_TOO_MANY;
591
}
592
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
593
case DISAS_NEXT:
594
case DISAS_TOO_MANY:
595
case DISAS_UPDATE:
596
- gen_set_pc_im(dc, dc->pc);
597
+ gen_set_pc_im(dc, dc->base.pc_next);
598
/* fall through */
599
default:
600
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
601
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
602
switch(dc->base.is_jmp) {
603
case DISAS_NEXT:
604
case DISAS_TOO_MANY:
605
- gen_goto_tb(dc, 1, dc->pc);
606
+ gen_goto_tb(dc, 1, dc->base.pc_next);
607
break;
608
case DISAS_JUMP:
609
gen_goto_ptr();
610
break;
611
case DISAS_UPDATE:
612
- gen_set_pc_im(dc, dc->pc);
613
+ gen_set_pc_im(dc, dc->base.pc_next);
614
/* fall through */
615
default:
616
/* indicate that the hash table must be used to find the next TB */
617
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
618
gen_set_label(dc->condlabel);
619
gen_set_condexec(dc);
620
if (unlikely(is_singlestepping(dc))) {
621
- gen_set_pc_im(dc, dc->pc);
622
+ gen_set_pc_im(dc, dc->base.pc_next);
623
gen_singlestep_exception(dc);
624
} else {
625
- gen_goto_tb(dc, 1, dc->pc);
626
+ gen_goto_tb(dc, 1, dc->base.pc_next);
627
}
628
}
629
-
630
- /* Functions above can change dc->pc, so re-align db->pc_next */
631
- dc->base.pc_next = dc->pc;
632
}
633
634
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
635
--
44
--
636
2.20.1
45
2.20.1
637
46
638
47
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
2
3
If -cpu <cpu>,aarch64=off is used then KVM must also be used, and it
3
When booting a CPU with EL3 using the -kernel flag, set up CPTR_EL3 so
4
and the host must support running the vcpu in 32-bit mode. Also, if
4
that SVE will not trap to EL3.
5
-cpu <cpu>,aarch64=on is used, then it doesn't matter if kvm is
6
enabled or not.
7
5
8
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201030151541.11976-1-remi@remlab.net
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/kvm_arm.h | 14 ++++++++++++++
11
hw/arm/boot.c | 3 +++
13
target/arm/cpu64.c | 12 ++++++------
12
1 file changed, 3 insertions(+)
14
target/arm/kvm64.c | 9 +++++++++
15
3 files changed, 29 insertions(+), 6 deletions(-)
16
13
17
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm_arm.h
16
--- a/hw/arm/boot.c
20
+++ b/target/arm/kvm_arm.h
17
+++ b/hw/arm/boot.c
21
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
18
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
22
*/
19
if (cpu_isar_feature(aa64_mte, cpu)) {
23
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
20
env->cp15.scr_el3 |= SCR_ATA;
24
21
}
25
+/**
22
+ if (cpu_isar_feature(aa64_sve, cpu)) {
26
+ * kvm_arm_aarch32_supported:
23
+ env->cp15.cptr_el[3] |= CPTR_EZ;
27
+ * @cs: CPUState
24
+ }
28
+ *
25
/* AArch64 kernels never boot in secure mode */
29
+ * Returns: true if the KVM VCPU can enable AArch32 mode
26
assert(!info->secure_boot);
30
+ * and false otherwise.
27
/* This hook is only supported for AArch32 currently:
31
+ */
32
+bool kvm_arm_aarch32_supported(CPUState *cs);
33
+
34
/**
35
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
36
* IPA address space supported by KVM
37
@@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
38
cpu->host_cpu_probe_failed = true;
39
}
40
41
+static inline bool kvm_arm_aarch32_supported(CPUState *cs)
42
+{
43
+ return false;
44
+}
45
+
46
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
47
{
48
return -ENOENT;
49
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/cpu64.c
52
+++ b/target/arm/cpu64.c
53
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
54
* restriction allows us to avoid fixing up functionality that assumes a
55
* uniform execution state like do_interrupt.
56
*/
57
- if (!kvm_enabled()) {
58
- error_setg(errp, "'aarch64' feature cannot be disabled "
59
- "unless KVM is enabled");
60
- return;
61
- }
62
-
63
if (value == false) {
64
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
65
+ error_setg(errp, "'aarch64' feature cannot be disabled "
66
+ "unless KVM is enabled and 32-bit EL1 "
67
+ "is supported");
68
+ return;
69
+ }
70
unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
71
} else {
72
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
73
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/kvm64.c
76
+++ b/target/arm/kvm64.c
77
@@ -XXX,XX +XXX,XX @@
78
#include "exec/gdbstub.h"
79
#include "sysemu/sysemu.h"
80
#include "sysemu/kvm.h"
81
+#include "sysemu/kvm_int.h"
82
#include "kvm_arm.h"
83
+#include "hw/boards.h"
84
#include "internals.h"
85
86
static bool have_guest_debug;
87
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
88
return true;
89
}
90
91
+bool kvm_arm_aarch32_supported(CPUState *cpu)
92
+{
93
+ KVMState *s = KVM_STATE(current_machine->accelerator);
94
+
95
+ return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
96
+}
97
+
98
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
99
100
int kvm_arch_init_vcpu(CPUState *cs)
101
--
28
--
102
2.20.1
29
2.20.1
103
30
104
31
diff view generated by jsdifflib
1
From: Damien Hedde <damien.hedde@greensocs.com>
1
From: AlexChen <alex.chen@huawei.com>
2
2
3
Replace the zynq_slcr registers enum and macros using the
3
In omap_lcd_interrupts(), the pointer omap_lcd is dereferinced before
4
hw/registerfields.h macros.
4
being check if it is valid, which may lead to NULL pointer dereference.
5
So move the assignment to surface after checking that the omap_lcd is valid
6
and move surface_bits_per_pixel(surface) to after the surface assignment.
5
7
6
Signed-off-by: Damien Hedde <damien.hedde@greensocs.com>
8
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: AlexChen <alex.chen@huawei.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 5F9CDB8A.9000001@huawei.com
9
Message-id: 20190729145654.14644-30-damien.hedde@greensocs.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
hw/misc/zynq_slcr.c | 450 ++++++++++++++++++++++----------------------
14
hw/display/omap_lcdc.c | 10 +++++++---
13
1 file changed, 225 insertions(+), 225 deletions(-)
15
1 file changed, 7 insertions(+), 3 deletions(-)
14
16
15
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
17
diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/misc/zynq_slcr.c
19
--- a/hw/display/omap_lcdc.c
18
+++ b/hw/misc/zynq_slcr.c
20
+++ b/hw/display/omap_lcdc.c
19
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ static void omap_lcd_interrupts(struct omap_lcd_panel_s *s)
20
#include "sysemu/sysemu.h"
22
static void omap_update_display(void *opaque)
21
#include "qemu/log.h"
22
#include "qemu/module.h"
23
+#include "hw/registerfields.h"
24
25
#ifndef ZYNQ_SLCR_ERR_DEBUG
26
#define ZYNQ_SLCR_ERR_DEBUG 0
27
@@ -XXX,XX +XXX,XX @@
28
#define XILINX_LOCK_KEY 0x767b
29
#define XILINX_UNLOCK_KEY 0xdf0d
30
31
-#define R_PSS_RST_CTRL_SOFT_RST 0x1
32
+REG32(SCL, 0x000)
33
+REG32(LOCK, 0x004)
34
+REG32(UNLOCK, 0x008)
35
+REG32(LOCKSTA, 0x00c)
36
37
-enum {
38
- SCL = 0x000 / 4,
39
- LOCK,
40
- UNLOCK,
41
- LOCKSTA,
42
+REG32(ARM_PLL_CTRL, 0x100)
43
+REG32(DDR_PLL_CTRL, 0x104)
44
+REG32(IO_PLL_CTRL, 0x108)
45
+REG32(PLL_STATUS, 0x10c)
46
+REG32(ARM_PLL_CFG, 0x110)
47
+REG32(DDR_PLL_CFG, 0x114)
48
+REG32(IO_PLL_CFG, 0x118)
49
50
- ARM_PLL_CTRL = 0x100 / 4,
51
- DDR_PLL_CTRL,
52
- IO_PLL_CTRL,
53
- PLL_STATUS,
54
- ARM_PLL_CFG,
55
- DDR_PLL_CFG,
56
- IO_PLL_CFG,
57
-
58
- ARM_CLK_CTRL = 0x120 / 4,
59
- DDR_CLK_CTRL,
60
- DCI_CLK_CTRL,
61
- APER_CLK_CTRL,
62
- USB0_CLK_CTRL,
63
- USB1_CLK_CTRL,
64
- GEM0_RCLK_CTRL,
65
- GEM1_RCLK_CTRL,
66
- GEM0_CLK_CTRL,
67
- GEM1_CLK_CTRL,
68
- SMC_CLK_CTRL,
69
- LQSPI_CLK_CTRL,
70
- SDIO_CLK_CTRL,
71
- UART_CLK_CTRL,
72
- SPI_CLK_CTRL,
73
- CAN_CLK_CTRL,
74
- CAN_MIOCLK_CTRL,
75
- DBG_CLK_CTRL,
76
- PCAP_CLK_CTRL,
77
- TOPSW_CLK_CTRL,
78
+REG32(ARM_CLK_CTRL, 0x120)
79
+REG32(DDR_CLK_CTRL, 0x124)
80
+REG32(DCI_CLK_CTRL, 0x128)
81
+REG32(APER_CLK_CTRL, 0x12c)
82
+REG32(USB0_CLK_CTRL, 0x130)
83
+REG32(USB1_CLK_CTRL, 0x134)
84
+REG32(GEM0_RCLK_CTRL, 0x138)
85
+REG32(GEM1_RCLK_CTRL, 0x13c)
86
+REG32(GEM0_CLK_CTRL, 0x140)
87
+REG32(GEM1_CLK_CTRL, 0x144)
88
+REG32(SMC_CLK_CTRL, 0x148)
89
+REG32(LQSPI_CLK_CTRL, 0x14c)
90
+REG32(SDIO_CLK_CTRL, 0x150)
91
+REG32(UART_CLK_CTRL, 0x154)
92
+REG32(SPI_CLK_CTRL, 0x158)
93
+REG32(CAN_CLK_CTRL, 0x15c)
94
+REG32(CAN_MIOCLK_CTRL, 0x160)
95
+REG32(DBG_CLK_CTRL, 0x164)
96
+REG32(PCAP_CLK_CTRL, 0x168)
97
+REG32(TOPSW_CLK_CTRL, 0x16c)
98
99
#define FPGA_CTRL_REGS(n, start) \
100
- FPGA ## n ## _CLK_CTRL = (start) / 4, \
101
- FPGA ## n ## _THR_CTRL, \
102
- FPGA ## n ## _THR_CNT, \
103
- FPGA ## n ## _THR_STA,
104
- FPGA_CTRL_REGS(0, 0x170)
105
- FPGA_CTRL_REGS(1, 0x180)
106
- FPGA_CTRL_REGS(2, 0x190)
107
- FPGA_CTRL_REGS(3, 0x1a0)
108
+ REG32(FPGA ## n ## _CLK_CTRL, (start)) \
109
+ REG32(FPGA ## n ## _THR_CTRL, (start) + 0x4)\
110
+ REG32(FPGA ## n ## _THR_CNT, (start) + 0x8)\
111
+ REG32(FPGA ## n ## _THR_STA, (start) + 0xc)
112
+FPGA_CTRL_REGS(0, 0x170)
113
+FPGA_CTRL_REGS(1, 0x180)
114
+FPGA_CTRL_REGS(2, 0x190)
115
+FPGA_CTRL_REGS(3, 0x1a0)
116
117
- BANDGAP_TRIP = 0x1b8 / 4,
118
- PLL_PREDIVISOR = 0x1c0 / 4,
119
- CLK_621_TRUE,
120
+REG32(BANDGAP_TRIP, 0x1b8)
121
+REG32(PLL_PREDIVISOR, 0x1c0)
122
+REG32(CLK_621_TRUE, 0x1c4)
123
124
- PSS_RST_CTRL = 0x200 / 4,
125
- DDR_RST_CTRL,
126
- TOPSW_RESET_CTRL,
127
- DMAC_RST_CTRL,
128
- USB_RST_CTRL,
129
- GEM_RST_CTRL,
130
- SDIO_RST_CTRL,
131
- SPI_RST_CTRL,
132
- CAN_RST_CTRL,
133
- I2C_RST_CTRL,
134
- UART_RST_CTRL,
135
- GPIO_RST_CTRL,
136
- LQSPI_RST_CTRL,
137
- SMC_RST_CTRL,
138
- OCM_RST_CTRL,
139
- FPGA_RST_CTRL = 0x240 / 4,
140
- A9_CPU_RST_CTRL,
141
+REG32(PSS_RST_CTRL, 0x200)
142
+ FIELD(PSS_RST_CTRL, SOFT_RST, 0, 1)
143
+REG32(DDR_RST_CTRL, 0x204)
144
+REG32(TOPSW_RESET_CTRL, 0x208)
145
+REG32(DMAC_RST_CTRL, 0x20c)
146
+REG32(USB_RST_CTRL, 0x210)
147
+REG32(GEM_RST_CTRL, 0x214)
148
+REG32(SDIO_RST_CTRL, 0x218)
149
+REG32(SPI_RST_CTRL, 0x21c)
150
+REG32(CAN_RST_CTRL, 0x220)
151
+REG32(I2C_RST_CTRL, 0x224)
152
+REG32(UART_RST_CTRL, 0x228)
153
+REG32(GPIO_RST_CTRL, 0x22c)
154
+REG32(LQSPI_RST_CTRL, 0x230)
155
+REG32(SMC_RST_CTRL, 0x234)
156
+REG32(OCM_RST_CTRL, 0x238)
157
+REG32(FPGA_RST_CTRL, 0x240)
158
+REG32(A9_CPU_RST_CTRL, 0x244)
159
160
- RS_AWDT_CTRL = 0x24c / 4,
161
- RST_REASON,
162
+REG32(RS_AWDT_CTRL, 0x24c)
163
+REG32(RST_REASON, 0x250)
164
165
- REBOOT_STATUS = 0x258 / 4,
166
- BOOT_MODE,
167
+REG32(REBOOT_STATUS, 0x258)
168
+REG32(BOOT_MODE, 0x25c)
169
170
- APU_CTRL = 0x300 / 4,
171
- WDT_CLK_SEL,
172
+REG32(APU_CTRL, 0x300)
173
+REG32(WDT_CLK_SEL, 0x304)
174
175
- TZ_DMA_NS = 0x440 / 4,
176
- TZ_DMA_IRQ_NS,
177
- TZ_DMA_PERIPH_NS,
178
+REG32(TZ_DMA_NS, 0x440)
179
+REG32(TZ_DMA_IRQ_NS, 0x444)
180
+REG32(TZ_DMA_PERIPH_NS, 0x448)
181
182
- PSS_IDCODE = 0x530 / 4,
183
+REG32(PSS_IDCODE, 0x530)
184
185
- DDR_URGENT = 0x600 / 4,
186
- DDR_CAL_START = 0x60c / 4,
187
- DDR_REF_START = 0x614 / 4,
188
- DDR_CMD_STA,
189
- DDR_URGENT_SEL,
190
- DDR_DFI_STATUS,
191
+REG32(DDR_URGENT, 0x600)
192
+REG32(DDR_CAL_START, 0x60c)
193
+REG32(DDR_REF_START, 0x614)
194
+REG32(DDR_CMD_STA, 0x618)
195
+REG32(DDR_URGENT_SEL, 0x61c)
196
+REG32(DDR_DFI_STATUS, 0x620)
197
198
- MIO = 0x700 / 4,
199
+REG32(MIO, 0x700)
200
#define MIO_LENGTH 54
201
202
- MIO_LOOPBACK = 0x804 / 4,
203
- MIO_MST_TRI0,
204
- MIO_MST_TRI1,
205
+REG32(MIO_LOOPBACK, 0x804)
206
+REG32(MIO_MST_TRI0, 0x808)
207
+REG32(MIO_MST_TRI1, 0x80c)
208
209
- SD0_WP_CD_SEL = 0x830 / 4,
210
- SD1_WP_CD_SEL,
211
+REG32(SD0_WP_CD_SEL, 0x830)
212
+REG32(SD1_WP_CD_SEL, 0x834)
213
214
- LVL_SHFTR_EN = 0x900 / 4,
215
- OCM_CFG = 0x910 / 4,
216
+REG32(LVL_SHFTR_EN, 0x900)
217
+REG32(OCM_CFG, 0x910)
218
219
- CPU_RAM = 0xa00 / 4,
220
+REG32(CPU_RAM, 0xa00)
221
222
- IOU = 0xa30 / 4,
223
+REG32(IOU, 0xa30)
224
225
- DMAC_RAM = 0xa50 / 4,
226
+REG32(DMAC_RAM, 0xa50)
227
228
- AFI0 = 0xa60 / 4,
229
- AFI1 = AFI0 + 3,
230
- AFI2 = AFI1 + 3,
231
- AFI3 = AFI2 + 3,
232
+REG32(AFI0, 0xa60)
233
+REG32(AFI1, 0xa6c)
234
+REG32(AFI2, 0xa78)
235
+REG32(AFI3, 0xa84)
236
#define AFI_LENGTH 3
237
238
- OCM = 0xa90 / 4,
239
+REG32(OCM, 0xa90)
240
241
- DEVCI_RAM = 0xaa0 / 4,
242
+REG32(DEVCI_RAM, 0xaa0)
243
244
- CSG_RAM = 0xab0 / 4,
245
+REG32(CSG_RAM, 0xab0)
246
247
- GPIOB_CTRL = 0xb00 / 4,
248
- GPIOB_CFG_CMOS18,
249
- GPIOB_CFG_CMOS25,
250
- GPIOB_CFG_CMOS33,
251
- GPIOB_CFG_HSTL = 0xb14 / 4,
252
- GPIOB_DRVR_BIAS_CTRL,
253
+REG32(GPIOB_CTRL, 0xb00)
254
+REG32(GPIOB_CFG_CMOS18, 0xb04)
255
+REG32(GPIOB_CFG_CMOS25, 0xb08)
256
+REG32(GPIOB_CFG_CMOS33, 0xb0c)
257
+REG32(GPIOB_CFG_HSTL, 0xb14)
258
+REG32(GPIOB_DRVR_BIAS_CTRL, 0xb18)
259
260
- DDRIOB = 0xb40 / 4,
261
+REG32(DDRIOB, 0xb40)
262
#define DDRIOB_LENGTH 14
263
-};
264
265
#define ZYNQ_SLCR_MMIO_SIZE 0x1000
266
#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4)
267
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_reset(DeviceState *d)
268
269
DB_PRINT("RESET\n");
270
271
- s->regs[LOCKSTA] = 1;
272
+ s->regs[R_LOCKSTA] = 1;
273
/* 0x100 - 0x11C */
274
- s->regs[ARM_PLL_CTRL] = 0x0001A008;
275
- s->regs[DDR_PLL_CTRL] = 0x0001A008;
276
- s->regs[IO_PLL_CTRL] = 0x0001A008;
277
- s->regs[PLL_STATUS] = 0x0000003F;
278
- s->regs[ARM_PLL_CFG] = 0x00014000;
279
- s->regs[DDR_PLL_CFG] = 0x00014000;
280
- s->regs[IO_PLL_CFG] = 0x00014000;
281
+ s->regs[R_ARM_PLL_CTRL] = 0x0001A008;
282
+ s->regs[R_DDR_PLL_CTRL] = 0x0001A008;
283
+ s->regs[R_IO_PLL_CTRL] = 0x0001A008;
284
+ s->regs[R_PLL_STATUS] = 0x0000003F;
285
+ s->regs[R_ARM_PLL_CFG] = 0x00014000;
286
+ s->regs[R_DDR_PLL_CFG] = 0x00014000;
287
+ s->regs[R_IO_PLL_CFG] = 0x00014000;
288
289
/* 0x120 - 0x16C */
290
- s->regs[ARM_CLK_CTRL] = 0x1F000400;
291
- s->regs[DDR_CLK_CTRL] = 0x18400003;
292
- s->regs[DCI_CLK_CTRL] = 0x01E03201;
293
- s->regs[APER_CLK_CTRL] = 0x01FFCCCD;
294
- s->regs[USB0_CLK_CTRL] = s->regs[USB1_CLK_CTRL] = 0x00101941;
295
- s->regs[GEM0_RCLK_CTRL] = s->regs[GEM1_RCLK_CTRL] = 0x00000001;
296
- s->regs[GEM0_CLK_CTRL] = s->regs[GEM1_CLK_CTRL] = 0x00003C01;
297
- s->regs[SMC_CLK_CTRL] = 0x00003C01;
298
- s->regs[LQSPI_CLK_CTRL] = 0x00002821;
299
- s->regs[SDIO_CLK_CTRL] = 0x00001E03;
300
- s->regs[UART_CLK_CTRL] = 0x00003F03;
301
- s->regs[SPI_CLK_CTRL] = 0x00003F03;
302
- s->regs[CAN_CLK_CTRL] = 0x00501903;
303
- s->regs[DBG_CLK_CTRL] = 0x00000F03;
304
- s->regs[PCAP_CLK_CTRL] = 0x00000F01;
305
+ s->regs[R_ARM_CLK_CTRL] = 0x1F000400;
306
+ s->regs[R_DDR_CLK_CTRL] = 0x18400003;
307
+ s->regs[R_DCI_CLK_CTRL] = 0x01E03201;
308
+ s->regs[R_APER_CLK_CTRL] = 0x01FFCCCD;
309
+ s->regs[R_USB0_CLK_CTRL] = s->regs[R_USB1_CLK_CTRL] = 0x00101941;
310
+ s->regs[R_GEM0_RCLK_CTRL] = s->regs[R_GEM1_RCLK_CTRL] = 0x00000001;
311
+ s->regs[R_GEM0_CLK_CTRL] = s->regs[R_GEM1_CLK_CTRL] = 0x00003C01;
312
+ s->regs[R_SMC_CLK_CTRL] = 0x00003C01;
313
+ s->regs[R_LQSPI_CLK_CTRL] = 0x00002821;
314
+ s->regs[R_SDIO_CLK_CTRL] = 0x00001E03;
315
+ s->regs[R_UART_CLK_CTRL] = 0x00003F03;
316
+ s->regs[R_SPI_CLK_CTRL] = 0x00003F03;
317
+ s->regs[R_CAN_CLK_CTRL] = 0x00501903;
318
+ s->regs[R_DBG_CLK_CTRL] = 0x00000F03;
319
+ s->regs[R_PCAP_CLK_CTRL] = 0x00000F01;
320
321
/* 0x170 - 0x1AC */
322
- s->regs[FPGA0_CLK_CTRL] = s->regs[FPGA1_CLK_CTRL] = s->regs[FPGA2_CLK_CTRL]
323
- = s->regs[FPGA3_CLK_CTRL] = 0x00101800;
324
- s->regs[FPGA0_THR_STA] = s->regs[FPGA1_THR_STA] = s->regs[FPGA2_THR_STA]
325
- = s->regs[FPGA3_THR_STA] = 0x00010000;
326
+ s->regs[R_FPGA0_CLK_CTRL] = s->regs[R_FPGA1_CLK_CTRL]
327
+ = s->regs[R_FPGA2_CLK_CTRL]
328
+ = s->regs[R_FPGA3_CLK_CTRL] = 0x00101800;
329
+ s->regs[R_FPGA0_THR_STA] = s->regs[R_FPGA1_THR_STA]
330
+ = s->regs[R_FPGA2_THR_STA]
331
+ = s->regs[R_FPGA3_THR_STA] = 0x00010000;
332
333
/* 0x1B0 - 0x1D8 */
334
- s->regs[BANDGAP_TRIP] = 0x0000001F;
335
- s->regs[PLL_PREDIVISOR] = 0x00000001;
336
- s->regs[CLK_621_TRUE] = 0x00000001;
337
+ s->regs[R_BANDGAP_TRIP] = 0x0000001F;
338
+ s->regs[R_PLL_PREDIVISOR] = 0x00000001;
339
+ s->regs[R_CLK_621_TRUE] = 0x00000001;
340
341
/* 0x200 - 0x25C */
342
- s->regs[FPGA_RST_CTRL] = 0x01F33F0F;
343
- s->regs[RST_REASON] = 0x00000040;
344
+ s->regs[R_FPGA_RST_CTRL] = 0x01F33F0F;
345
+ s->regs[R_RST_REASON] = 0x00000040;
346
347
- s->regs[BOOT_MODE] = 0x00000001;
348
+ s->regs[R_BOOT_MODE] = 0x00000001;
349
350
/* 0x700 - 0x7D4 */
351
for (i = 0; i < 54; i++) {
352
- s->regs[MIO + i] = 0x00001601;
353
+ s->regs[R_MIO + i] = 0x00001601;
354
}
355
for (i = 2; i <= 8; i++) {
356
- s->regs[MIO + i] = 0x00000601;
357
+ s->regs[R_MIO + i] = 0x00000601;
358
}
359
360
- s->regs[MIO_MST_TRI0] = s->regs[MIO_MST_TRI1] = 0xFFFFFFFF;
361
+ s->regs[R_MIO_MST_TRI0] = s->regs[R_MIO_MST_TRI1] = 0xFFFFFFFF;
362
363
- s->regs[CPU_RAM + 0] = s->regs[CPU_RAM + 1] = s->regs[CPU_RAM + 3]
364
- = s->regs[CPU_RAM + 4] = s->regs[CPU_RAM + 7]
365
- = 0x00010101;
366
- s->regs[CPU_RAM + 2] = s->regs[CPU_RAM + 5] = 0x01010101;
367
- s->regs[CPU_RAM + 6] = 0x00000001;
368
+ s->regs[R_CPU_RAM + 0] = s->regs[R_CPU_RAM + 1] = s->regs[R_CPU_RAM + 3]
369
+ = s->regs[R_CPU_RAM + 4] = s->regs[R_CPU_RAM + 7]
370
+ = 0x00010101;
371
+ s->regs[R_CPU_RAM + 2] = s->regs[R_CPU_RAM + 5] = 0x01010101;
372
+ s->regs[R_CPU_RAM + 6] = 0x00000001;
373
374
- s->regs[IOU + 0] = s->regs[IOU + 1] = s->regs[IOU + 2] = s->regs[IOU + 3]
375
- = 0x09090909;
376
- s->regs[IOU + 4] = s->regs[IOU + 5] = 0x00090909;
377
- s->regs[IOU + 6] = 0x00000909;
378
+ s->regs[R_IOU + 0] = s->regs[R_IOU + 1] = s->regs[R_IOU + 2]
379
+ = s->regs[R_IOU + 3] = 0x09090909;
380
+ s->regs[R_IOU + 4] = s->regs[R_IOU + 5] = 0x00090909;
381
+ s->regs[R_IOU + 6] = 0x00000909;
382
383
- s->regs[DMAC_RAM] = 0x00000009;
384
+ s->regs[R_DMAC_RAM] = 0x00000009;
385
386
- s->regs[AFI0 + 0] = s->regs[AFI0 + 1] = 0x09090909;
387
- s->regs[AFI1 + 0] = s->regs[AFI1 + 1] = 0x09090909;
388
- s->regs[AFI2 + 0] = s->regs[AFI2 + 1] = 0x09090909;
389
- s->regs[AFI3 + 0] = s->regs[AFI3 + 1] = 0x09090909;
390
- s->regs[AFI0 + 2] = s->regs[AFI1 + 2] = s->regs[AFI2 + 2]
391
- = s->regs[AFI3 + 2] = 0x00000909;
392
+ s->regs[R_AFI0 + 0] = s->regs[R_AFI0 + 1] = 0x09090909;
393
+ s->regs[R_AFI1 + 0] = s->regs[R_AFI1 + 1] = 0x09090909;
394
+ s->regs[R_AFI2 + 0] = s->regs[R_AFI2 + 1] = 0x09090909;
395
+ s->regs[R_AFI3 + 0] = s->regs[R_AFI3 + 1] = 0x09090909;
396
+ s->regs[R_AFI0 + 2] = s->regs[R_AFI1 + 2] = s->regs[R_AFI2 + 2]
397
+ = s->regs[R_AFI3 + 2] = 0x00000909;
398
399
- s->regs[OCM + 0] = 0x01010101;
400
- s->regs[OCM + 1] = s->regs[OCM + 2] = 0x09090909;
401
+ s->regs[R_OCM + 0] = 0x01010101;
402
+ s->regs[R_OCM + 1] = s->regs[R_OCM + 2] = 0x09090909;
403
404
- s->regs[DEVCI_RAM] = 0x00000909;
405
- s->regs[CSG_RAM] = 0x00000001;
406
+ s->regs[R_DEVCI_RAM] = 0x00000909;
407
+ s->regs[R_CSG_RAM] = 0x00000001;
408
409
- s->regs[DDRIOB + 0] = s->regs[DDRIOB + 1] = s->regs[DDRIOB + 2]
410
- = s->regs[DDRIOB + 3] = 0x00000e00;
411
- s->regs[DDRIOB + 4] = s->regs[DDRIOB + 5] = s->regs[DDRIOB + 6]
412
- = 0x00000e00;
413
- s->regs[DDRIOB + 12] = 0x00000021;
414
+ s->regs[R_DDRIOB + 0] = s->regs[R_DDRIOB + 1] = s->regs[R_DDRIOB + 2]
415
+ = s->regs[R_DDRIOB + 3] = 0x00000e00;
416
+ s->regs[R_DDRIOB + 4] = s->regs[R_DDRIOB + 5] = s->regs[R_DDRIOB + 6]
417
+ = 0x00000e00;
418
+ s->regs[R_DDRIOB + 12] = 0x00000021;
419
}
420
421
422
static bool zynq_slcr_check_offset(hwaddr offset, bool rnw)
423
{
23
{
424
switch (offset) {
24
struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque;
425
- case LOCK:
25
- DisplaySurface *surface = qemu_console_surface(omap_lcd->con);
426
- case UNLOCK:
26
+ DisplaySurface *surface;
427
- case DDR_CAL_START:
27
draw_line_func draw_line;
428
- case DDR_REF_START:
28
int size, height, first, last;
429
+ case R_LOCK:
29
int width, linesize, step, bpp, frame_offset;
430
+ case R_UNLOCK:
30
hwaddr frame_base;
431
+ case R_DDR_CAL_START:
31
432
+ case R_DDR_REF_START:
32
- if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable ||
433
return !rnw; /* Write only */
33
- !surface_bits_per_pixel(surface)) {
434
- case LOCKSTA:
34
+ if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable) {
435
- case FPGA0_THR_STA:
35
+ return;
436
- case FPGA1_THR_STA:
36
+ }
437
- case FPGA2_THR_STA:
37
+
438
- case FPGA3_THR_STA:
38
+ surface = qemu_console_surface(omap_lcd->con);
439
- case BOOT_MODE:
39
+ if (!surface_bits_per_pixel(surface)) {
440
- case PSS_IDCODE:
441
- case DDR_CMD_STA:
442
- case DDR_DFI_STATUS:
443
- case PLL_STATUS:
444
+ case R_LOCKSTA:
445
+ case R_FPGA0_THR_STA:
446
+ case R_FPGA1_THR_STA:
447
+ case R_FPGA2_THR_STA:
448
+ case R_FPGA3_THR_STA:
449
+ case R_BOOT_MODE:
450
+ case R_PSS_IDCODE:
451
+ case R_DDR_CMD_STA:
452
+ case R_DDR_DFI_STATUS:
453
+ case R_PLL_STATUS:
454
return rnw;/* read only */
455
- case SCL:
456
- case ARM_PLL_CTRL ... IO_PLL_CTRL:
457
- case ARM_PLL_CFG ... IO_PLL_CFG:
458
- case ARM_CLK_CTRL ... TOPSW_CLK_CTRL:
459
- case FPGA0_CLK_CTRL ... FPGA0_THR_CNT:
460
- case FPGA1_CLK_CTRL ... FPGA1_THR_CNT:
461
- case FPGA2_CLK_CTRL ... FPGA2_THR_CNT:
462
- case FPGA3_CLK_CTRL ... FPGA3_THR_CNT:
463
- case BANDGAP_TRIP:
464
- case PLL_PREDIVISOR:
465
- case CLK_621_TRUE:
466
- case PSS_RST_CTRL ... A9_CPU_RST_CTRL:
467
- case RS_AWDT_CTRL:
468
- case RST_REASON:
469
- case REBOOT_STATUS:
470
- case APU_CTRL:
471
- case WDT_CLK_SEL:
472
- case TZ_DMA_NS ... TZ_DMA_PERIPH_NS:
473
- case DDR_URGENT:
474
- case DDR_URGENT_SEL:
475
- case MIO ... MIO + MIO_LENGTH - 1:
476
- case MIO_LOOPBACK ... MIO_MST_TRI1:
477
- case SD0_WP_CD_SEL:
478
- case SD1_WP_CD_SEL:
479
- case LVL_SHFTR_EN:
480
- case OCM_CFG:
481
- case CPU_RAM:
482
- case IOU:
483
- case DMAC_RAM:
484
- case AFI0 ... AFI3 + AFI_LENGTH - 1:
485
- case OCM:
486
- case DEVCI_RAM:
487
- case CSG_RAM:
488
- case GPIOB_CTRL ... GPIOB_CFG_CMOS33:
489
- case GPIOB_CFG_HSTL:
490
- case GPIOB_DRVR_BIAS_CTRL:
491
- case DDRIOB ... DDRIOB + DDRIOB_LENGTH - 1:
492
+ case R_SCL:
493
+ case R_ARM_PLL_CTRL ... R_IO_PLL_CTRL:
494
+ case R_ARM_PLL_CFG ... R_IO_PLL_CFG:
495
+ case R_ARM_CLK_CTRL ... R_TOPSW_CLK_CTRL:
496
+ case R_FPGA0_CLK_CTRL ... R_FPGA0_THR_CNT:
497
+ case R_FPGA1_CLK_CTRL ... R_FPGA1_THR_CNT:
498
+ case R_FPGA2_CLK_CTRL ... R_FPGA2_THR_CNT:
499
+ case R_FPGA3_CLK_CTRL ... R_FPGA3_THR_CNT:
500
+ case R_BANDGAP_TRIP:
501
+ case R_PLL_PREDIVISOR:
502
+ case R_CLK_621_TRUE:
503
+ case R_PSS_RST_CTRL ... R_A9_CPU_RST_CTRL:
504
+ case R_RS_AWDT_CTRL:
505
+ case R_RST_REASON:
506
+ case R_REBOOT_STATUS:
507
+ case R_APU_CTRL:
508
+ case R_WDT_CLK_SEL:
509
+ case R_TZ_DMA_NS ... R_TZ_DMA_PERIPH_NS:
510
+ case R_DDR_URGENT:
511
+ case R_DDR_URGENT_SEL:
512
+ case R_MIO ... R_MIO + MIO_LENGTH - 1:
513
+ case R_MIO_LOOPBACK ... R_MIO_MST_TRI1:
514
+ case R_SD0_WP_CD_SEL:
515
+ case R_SD1_WP_CD_SEL:
516
+ case R_LVL_SHFTR_EN:
517
+ case R_OCM_CFG:
518
+ case R_CPU_RAM:
519
+ case R_IOU:
520
+ case R_DMAC_RAM:
521
+ case R_AFI0 ... R_AFI3 + AFI_LENGTH - 1:
522
+ case R_OCM:
523
+ case R_DEVCI_RAM:
524
+ case R_CSG_RAM:
525
+ case R_GPIOB_CTRL ... R_GPIOB_CFG_CMOS33:
526
+ case R_GPIOB_CFG_HSTL:
527
+ case R_GPIOB_DRVR_BIAS_CTRL:
528
+ case R_DDRIOB ... R_DDRIOB + DDRIOB_LENGTH - 1:
529
return true;
530
default:
531
return false;
532
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
533
}
534
535
switch (offset) {
536
- case SCL:
537
- s->regs[SCL] = val & 0x1;
538
+ case R_SCL:
539
+ s->regs[R_SCL] = val & 0x1;
540
return;
541
- case LOCK:
542
+ case R_LOCK:
543
if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
544
DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
545
(unsigned)val & 0xFFFF);
546
- s->regs[LOCKSTA] = 1;
547
+ s->regs[R_LOCKSTA] = 1;
548
} else {
549
DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
550
(int)offset, (unsigned)val & 0xFFFF);
551
}
552
return;
553
- case UNLOCK:
554
+ case R_UNLOCK:
555
if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
556
DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
557
(unsigned)val & 0xFFFF);
558
- s->regs[LOCKSTA] = 0;
559
+ s->regs[R_LOCKSTA] = 0;
560
} else {
561
DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
562
(int)offset, (unsigned)val & 0xFFFF);
563
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
564
return;
40
return;
565
}
41
}
566
42
567
- if (s->regs[LOCKSTA]) {
568
+ if (s->regs[R_LOCKSTA]) {
569
qemu_log_mask(LOG_GUEST_ERROR,
570
"SCLR registers are locked. Unlock them first\n");
571
return;
572
@@ -XXX,XX +XXX,XX @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
573
s->regs[offset] = val;
574
575
switch (offset) {
576
- case PSS_RST_CTRL:
577
- if (val & R_PSS_RST_CTRL_SOFT_RST) {
578
+ case R_PSS_RST_CTRL:
579
+ if (FIELD_EX32(val, PSS_RST_CTRL, SOFT_RST)) {
580
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
581
}
582
break;
583
--
43
--
584
2.20.1
44
2.20.1
585
45
586
46
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: AlexChen <alex.chen@huawei.com>
2
2
3
The offset is variable depending on the instruction set, whereas
3
In exynos4210_fimd_update(), the pointer s is dereferinced before
4
we have stored values for the current pc and the next pc. Passing
4
being check if it is valid, which may lead to NULL pointer dereference.
5
in the actual value is clearer in intent.
5
So move the assignment to global_width after checking that the s is valid.
6
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reported-by: Euler Robot <euler.robot@huawei.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Alex Chen <alex.chen@huawei.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190807045335.1361-8-richard.henderson@linaro.org
10
Message-id: 5F9F8D88.9030102@huawei.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
12
---
13
target/arm/translate-a64.c | 25 ++++++++++++++-----------
13
hw/display/exynos4210_fimd.c | 4 +++-
14
target/arm/translate-vfp.inc.c | 6 +++---
14
1 file changed, 3 insertions(+), 1 deletion(-)
15
target/arm/translate.c | 31 ++++++++++++++++---------------
16
3 files changed, 33 insertions(+), 29 deletions(-)
17
15
18
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-a64.c
18
--- a/hw/display/exynos4210_fimd.c
21
+++ b/target/arm/translate-a64.c
19
+++ b/hw/display/exynos4210_fimd.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
20
@@ -XXX,XX +XXX,XX @@ static void exynos4210_fimd_update(void *opaque)
23
s->base.is_jmp = DISAS_NORETURN;
21
bool blend = false;
24
}
22
uint8_t *host_fb_addr;
25
23
bool is_dirty = false;
26
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
24
- const int global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
27
+static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
25
+ int global_width;
28
uint32_t syndrome, uint32_t target_el)
26
29
{
27
if (!s || !s->console || !s->enabled ||
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
28
surface_bits_per_pixel(qemu_console_surface(s->console)) == 0) {
31
+ gen_a64_set_pc_im(pc);
32
gen_exception(excp, syndrome, target_el);
33
s->base.is_jmp = DISAS_NORETURN;
34
}
35
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
36
void unallocated_encoding(DisasContext *s)
37
{
38
/* Unallocated and reserved encodings are uncategorized */
39
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
40
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
41
default_exception_el(s));
42
}
43
44
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
45
return true;
46
}
47
48
- gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
49
- s->fp_excp_el);
50
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
51
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
52
return false;
53
}
54
55
@@ -XXX,XX +XXX,XX @@ static inline bool fp_access_check(DisasContext *s)
56
bool sve_access_check(DisasContext *s)
57
{
58
if (s->sve_excp_el) {
59
- gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
60
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
61
s->sve_excp_el);
62
return false;
63
}
64
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
65
switch (op2_ll) {
66
case 1: /* SVC */
67
gen_ss_advance(s);
68
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
69
- default_exception_el(s));
70
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
71
+ syn_aa64_svc(imm16), default_exception_el(s));
72
break;
73
case 2: /* HVC */
74
if (s->current_el == 0) {
75
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
76
gen_a64_set_pc_im(s->pc_curr);
77
gen_helper_pre_hvc(cpu_env);
78
gen_ss_advance(s);
79
- gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
80
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
81
+ syn_aa64_hvc(imm16), 2);
82
break;
83
case 3: /* SMC */
84
if (s->current_el == 0) {
85
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
86
gen_helper_pre_smc(cpu_env, tmp);
87
tcg_temp_free_i32(tmp);
88
gen_ss_advance(s);
89
- gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
90
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
91
+ syn_aa64_smc(imm16), 3);
92
break;
93
default:
94
unallocated_encoding(s);
95
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
96
if (s->btype != 0
97
&& s->guarded_page
98
&& !btype_destination_ok(insn, s->bt, s->btype)) {
99
- gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
100
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
101
+ syn_btitrap(s->btype),
102
default_exception_el(s));
103
return;
104
}
105
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate-vfp.inc.c
108
+++ b/target/arm/translate-vfp.inc.c
109
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
110
{
111
if (s->fp_excp_el) {
112
if (arm_dc_feature(s, ARM_FEATURE_M)) {
113
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
114
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
115
s->fp_excp_el);
116
} else {
117
- gen_exception_insn(s, 4, EXCP_UDEF,
118
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
119
syn_fp_access_trap(1, 0xe, false),
120
s->fp_excp_el);
121
}
122
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
123
124
if (!s->vfp_enabled && !ignore_vfp_enabled) {
125
assert(!arm_dc_feature(s, ARM_FEATURE_M));
126
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
127
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
128
default_exception_el(s));
129
return false;
130
}
131
diff --git a/target/arm/translate.c b/target/arm/translate.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/target/arm/translate.c
134
+++ b/target/arm/translate.c
135
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
136
s->base.is_jmp = DISAS_NORETURN;
137
}
138
139
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
140
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
141
int syn, uint32_t target_el)
142
{
143
gen_set_condexec(s);
144
- gen_set_pc_im(s, s->base.pc_next - offset);
145
+ gen_set_pc_im(s, pc);
146
gen_exception(excp, syn, target_el);
147
s->base.is_jmp = DISAS_NORETURN;
148
}
149
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
150
return;
29
return;
151
}
30
}
152
31
+
153
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
32
+ global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
154
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
33
exynos4210_update_resolution(s);
155
default_exception_el(s));
34
surface = qemu_console_surface(s->console);
156
}
157
158
@@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
159
160
undef:
161
/* If we get here then some access check did not pass */
162
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
163
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
+ syn_uncategorized(), exc_target);
165
return false;
166
}
167
168
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
169
* for attempts to execute invalid vfp/neon encodings with FP disabled.
170
*/
171
if (s->fp_excp_el) {
172
- gen_exception_insn(s, 4, EXCP_UDEF,
173
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
174
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
175
return 0;
176
}
177
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
178
* for attempts to execute invalid vfp/neon encodings with FP disabled.
179
*/
180
if (s->fp_excp_el) {
181
- gen_exception_insn(s, 4, EXCP_UDEF,
182
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
183
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
184
return 0;
185
}
186
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
187
}
188
189
if (s->fp_excp_el) {
190
- gen_exception_insn(s, 4, EXCP_UDEF,
191
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
192
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
193
return 0;
194
}
195
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
196
off_rm = vfp_reg_offset(0, rm);
197
}
198
if (s->fp_excp_el) {
199
- gen_exception_insn(s, 4, EXCP_UDEF,
200
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
201
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
202
return 0;
203
}
204
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
205
* For the UNPREDICTABLE cases we choose to UNDEF.
206
*/
207
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
208
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
209
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
210
return;
211
}
212
213
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
214
}
215
216
if (undef) {
217
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
218
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
219
default_exception_el(s));
220
return;
221
}
222
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
223
* UsageFault exception.
224
*/
225
if (arm_dc_feature(s, ARM_FEATURE_M)) {
226
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
227
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
228
default_exception_el(s));
229
return;
230
}
231
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
232
break;
233
default:
234
illegal_op:
235
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
236
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
237
default_exception_el(s));
238
break;
239
}
240
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
241
}
242
243
/* All other insns: NOCP */
244
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
245
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
246
default_exception_el(s));
247
break;
248
}
249
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
250
}
251
return;
252
illegal_op:
253
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
254
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
255
default_exception_el(s));
256
}
257
258
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
259
return;
260
illegal_op:
261
undef:
262
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
263
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
264
default_exception_el(s));
265
}
266
35
267
--
36
--
268
2.20.1
37
2.20.1
269
38
270
39
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In arm_v7m_mmu_idx_for_secstate() we get the 'priv' level to pass to
2
armv7m_mmu_idx_for_secstate_and_priv() by calling arm_current_el().
3
This is incorrect when the security state being queried is not the
4
current one, because arm_current_el() uses the current security state
5
to determine which of the banked CONTROL.nPRIV bits to look at.
6
The effect was that if (for instance) Secure state was in privileged
7
mode but Non-Secure was not then we would return the wrong MMU index.
2
8
3
Replace x = double_saturate(y) with x = add_saturate(y, y).
9
The only places where we are using this function in a way that could
4
There is no need for a separate more specialized helper.
10
trigger this bug are for the stack loads during a v8M function-return
11
and for the instruction fetch of a v8M SG insn.
5
12
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Fix the bug by expanding out the M-profile version of the
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
arm_current_el() logic inline so it can use the passed in secstate
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
rather than env->v7m.secure.
9
Message-id: 20190807045335.1361-12-richard.henderson@linaro.org
16
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20201022164408.13214-1-peter.maydell@linaro.org
11
---
20
---
12
target/arm/helper.h | 1 -
21
target/arm/m_helper.c | 3 ++-
13
target/arm/op_helper.c | 15 ---------------
22
1 file changed, 2 insertions(+), 1 deletion(-)
14
target/arm/translate.c | 4 ++--
15
3 files changed, 2 insertions(+), 18 deletions(-)
16
23
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
24
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
18
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.h
26
--- a/target/arm/m_helper.c
20
+++ b/target/arm/helper.h
27
+++ b/target/arm/m_helper.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
28
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
22
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
29
/* Return the MMU index for a v7M CPU in the specified security state */
23
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
30
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
24
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
31
{
25
-DEF_HELPER_2(double_saturate, i32, env, s32)
32
- bool priv = arm_current_el(env) != 0;
26
DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
33
+ bool priv = arm_v7m_is_handler_mode(env) ||
27
DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
34
+ !(env->v7m.control[secstate] & 1);
28
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
35
29
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
36
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/op_helper.c
32
+++ b/target/arm/op_helper.c
33
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
34
return res;
35
}
37
}
36
37
-uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
38
-{
39
- uint32_t res;
40
- if (val >= 0x40000000) {
41
- res = ~SIGNBIT;
42
- env->QF = 1;
43
- } else if (val <= (int32_t)0xc0000000) {
44
- res = SIGNBIT;
45
- env->QF = 1;
46
- } else {
47
- res = val << 1;
48
- }
49
- return res;
50
-}
51
-
52
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
53
{
54
uint32_t res = a + b;
55
diff --git a/target/arm/translate.c b/target/arm/translate.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate.c
58
+++ b/target/arm/translate.c
59
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
60
tmp = load_reg(s, rm);
61
tmp2 = load_reg(s, rn);
62
if (op1 & 2)
63
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
64
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
65
if (op1 & 1)
66
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
67
else
68
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
69
tmp = load_reg(s, rn);
70
tmp2 = load_reg(s, rm);
71
if (op & 1)
72
- gen_helper_double_saturate(tmp, cpu_env, tmp);
73
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
74
if (op & 2)
75
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
76
else
77
--
38
--
78
2.20.1
39
2.20.1
79
40
80
41
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
On some hosts (eg Ubuntu Bionic) pkg-config returns a set of
2
libraries for gio-2.0 which don't actually work when compiling
3
statically. (Specifically, the returned library string includes
4
-lmount, but not -lblkid which -lmount depends upon, so linking
5
fails due to missing symbols.)
2
6
3
Unlike the other more generic gen_exception{,_internal}_insn
7
Check that the libraries work, and don't enable gio if they don't,
4
interfaces, breakpoints always refer to the current instruction.
8
in the same way we do for gnutls.
5
9
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190807045335.1361-10-richard.henderson@linaro.org
13
Message-id: 20200928160402.7961-1-peter.maydell@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
target/arm/translate-a64.c | 7 +++----
15
configure | 10 +++++++++-
13
target/arm/translate.c | 8 ++++----
16
1 file changed, 9 insertions(+), 1 deletion(-)
14
2 files changed, 7 insertions(+), 8 deletions(-)
15
17
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
diff --git a/configure b/configure
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100755
18
--- a/target/arm/translate-a64.c
20
--- a/configure
19
+++ b/target/arm/translate-a64.c
21
+++ b/configure
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
22
@@ -XXX,XX +XXX,XX @@ if test "$static" = yes && test "$mingw32" = yes; then
21
s->base.is_jmp = DISAS_NORETURN;
23
fi
22
}
24
23
25
if $pkg_config --atleast-version=$glib_req_ver gio-2.0; then
24
-static void gen_exception_bkpt_insn(DisasContext *s, int offset,
26
- gio=yes
25
- uint32_t syndrome)
27
gio_cflags=$($pkg_config --cflags gio-2.0)
26
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
28
gio_libs=$($pkg_config --libs gio-2.0)
27
{
29
gdbus_codegen=$($pkg_config --variable=gdbus_codegen gio-2.0)
28
TCGv_i32 tcg_syn;
30
if [ ! -x "$gdbus_codegen" ]; then
29
31
gdbus_codegen=
30
- gen_a64_set_pc_im(s->base.pc_next - offset);
32
fi
31
+ gen_a64_set_pc_im(s->pc_curr);
33
+ # Check that the libraries actually work -- Ubuntu 18.04 ships
32
tcg_syn = tcg_const_i32(syndrome);
34
+ # with pkg-config --static --libs data for gio-2.0 that is missing
33
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
35
+ # -lblkid and will give a link error.
34
tcg_temp_free_i32(tcg_syn);
36
+ write_c_skeleton
35
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
37
+ if compile_prog "" "gio_libs" ; then
36
break;
38
+ gio=yes
37
}
39
+ else
38
/* BRK */
40
+ gio=no
39
- gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
41
+ fi
40
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
42
else
41
break;
43
gio=no
42
case 2:
44
fi
43
if (op2_ll != 0) {
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/translate.c
47
+++ b/target/arm/translate.c
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
49
s->base.is_jmp = DISAS_NORETURN;
50
}
51
52
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
53
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
54
{
55
TCGv_i32 tcg_syn;
56
57
gen_set_condexec(s);
58
- gen_set_pc_im(s, s->base.pc_next - offset);
59
+ gen_set_pc_im(s, s->pc_curr);
60
tcg_syn = tcg_const_i32(syn);
61
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
62
tcg_temp_free_i32(tcg_syn);
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
64
case 1:
65
/* bkpt */
66
ARCH(5);
67
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
68
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
69
break;
70
case 2:
71
/* Hypervisor call (v7) */
72
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
73
{
74
int imm8 = extract32(insn, 0, 8);
75
ARCH(5);
76
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
77
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
78
break;
79
}
80
81
--
45
--
82
2.20.1
46
2.20.1
83
47
84
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In gicv3_init_cpuif() we copy the ARMCPU gicv3_maintenance_interrupt
2
into the GICv3CPUState struct's maintenance_irq field. This will
3
only work if the board happens to have already wired up the CPU
4
maintenance IRQ before the GIC was realized. Unfortunately this is
5
not the case for the 'virt' board, and so the value that gets copied
6
is NULL (since a qemu_irq is really a pointer to an IRQState struct
7
under the hood). The effect is that the CPU interface code never
8
actually raises the maintenance interrupt line.
2
9
3
The thumb bit has already been removed from s->pc, and is always even.
10
Instead, since the GICv3CPUState has a pointer to the CPUState, make
11
the dereference at the point where we want to raise the interrupt, to
12
avoid an implicit requirement on board code to wire things up in a
13
particular order.
4
14
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Reported-by: Jose Martins <josemartins90@gmail.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190807045335.1361-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Message-id: 20201009153904.28529-1-peter.maydell@linaro.org
18
Reviewed-by: Luc Michel <luc@lmichel.fr>
10
---
19
---
11
target/arm/translate.c | 10 +++++-----
20
include/hw/intc/arm_gicv3_common.h | 1 -
12
1 file changed, 5 insertions(+), 5 deletions(-)
21
hw/intc/arm_gicv3_cpuif.c | 5 ++---
22
2 files changed, 2 insertions(+), 4 deletions(-)
13
23
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
24
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
15
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
26
--- a/include/hw/intc/arm_gicv3_common.h
17
+++ b/target/arm/translate.c
27
+++ b/include/hw/intc/arm_gicv3_common.h
18
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
28
@@ -XXX,XX +XXX,XX @@ struct GICv3CPUState {
19
/* Force a TB lookup after an instruction that changes the CPU state. */
29
qemu_irq parent_fiq;
20
static inline void gen_lookup_tb(DisasContext *s)
30
qemu_irq parent_virq;
21
{
31
qemu_irq parent_vfiq;
22
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
32
- qemu_irq maintenance_irq;
23
+ tcg_gen_movi_i32(cpu_R[15], s->pc);
33
24
s->base.is_jmp = DISAS_EXIT;
34
/* Redistributor */
35
uint32_t level; /* Current IRQ level */
36
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/hw/intc/arm_gicv3_cpuif.c
39
+++ b/hw/intc/arm_gicv3_cpuif.c
40
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
41
int irqlevel = 0;
42
int fiqlevel = 0;
43
int maintlevel = 0;
44
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
45
46
idx = hppvi_index(cs);
47
trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
48
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
49
50
qemu_set_irq(cs->parent_vfiq, fiqlevel);
51
qemu_set_irq(cs->parent_virq, irqlevel);
52
- qemu_set_irq(cs->maintenance_irq, maintlevel);
53
+ qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
25
}
54
}
26
55
27
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
56
static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
28
* self-modifying code correctly and also to take
57
@@ -XXX,XX +XXX,XX @@ void gicv3_init_cpuif(GICv3State *s)
29
* any pending interrupts immediately.
58
&& cpu->gic_num_lrs) {
30
*/
59
int j;
31
- gen_goto_tb(s, 0, s->pc & ~1);
60
32
+ gen_goto_tb(s, 0, s->pc);
61
- cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
33
return;
62
-
34
case 7: /* sb */
63
cs->num_list_regs = cpu->gic_num_lrs;
35
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
64
cs->vpribits = cpu->gic_vpribits;
36
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
65
cs->vprebits = cpu->gic_vprebits;
37
* for TCG; MB and end the TB instead.
38
*/
39
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
40
- gen_goto_tb(s, 0, s->pc & ~1);
41
+ gen_goto_tb(s, 0, s->pc);
42
return;
43
default:
44
goto illegal_op;
45
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
46
* and also to take any pending interrupts
47
* immediately.
48
*/
49
- gen_goto_tb(s, 0, s->pc & ~1);
50
+ gen_goto_tb(s, 0, s->pc);
51
break;
52
case 7: /* sb */
53
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
54
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
55
* for TCG; MB and end the TB instead.
56
*/
57
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
58
- gen_goto_tb(s, 0, s->pc & ~1);
59
+ gen_goto_tb(s, 0, s->pc);
60
break;
61
default:
62
goto illegal_op;
63
--
66
--
64
2.20.1
67
2.20.1
65
68
66
69
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The kerneldoc script currently emits Sphinx markup for a macro with
2
arguments that uses the c:function directive. This is correct for
3
Sphinx versions earlier than Sphinx 3, where c:macro doesn't allow
4
documentation of macros with arguments and c:function is not picky
5
about the syntax of what it is passed. However, in Sphinx 3 the
6
c:macro directive was enhanced to support macros with arguments,
7
and c:function was made more picky about what syntax it accepted.
2
8
3
We currently have 3 different ways of computing the architectural
9
When kerneldoc is told that it needs to produce output for Sphinx
4
value of "PC" as seen in the ARM ARM.
10
3 or later, make it emit c:function only for functions and c:macro
11
for macros with arguments. We assume that anything with a return
12
type is a function and anything without is a macro.
5
13
6
The value of s->pc has been incremented past the current insn,
14
This fixes the Sphinx error:
7
but that is all. Thus for a32, PC = s->pc + 4; for t32, PC = s->pc;
8
for t16, PC = s->pc + 2. These differing computations make it
9
impossible at present to unify the various code paths.
10
15
11
With the newly introduced s->pc_curr, we can compute the correct
16
/home/petmay01/linaro/qemu-from-laptop/qemu/docs/../include/qom/object.h:155:Error in declarator
12
value for all cases, using the formula given in the ARM ARM.
17
If declarator-id with parameters (e.g., 'void f(int arg)'):
18
Invalid C declaration: Expected identifier in nested name. [error at 25]
19
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
20
-------------------------^
21
If parenthesis in noptr-declarator (e.g., 'void (*f(int arg))(double)'):
22
Error in declarator or parameters
23
Invalid C declaration: Expecting "(" in parameters. [error at 39]
24
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
25
---------------------------------------^
13
26
14
This changes the behaviour for load_reg() and load_reg_var()
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
when called with reg==15 from a 32-bit Thumb instruction:
28
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
16
previously they would have returned the incorrect value
29
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
17
of pc_curr + 6, and now they will return the architecturally
30
Message-id: 20201030174700.7204-2-peter.maydell@linaro.org
18
correct value of PC, which is pc_curr + 4. This will not
31
---
19
affect well-behaved guest software, because all of the places
32
scripts/kernel-doc | 18 +++++++++++++++++-
20
we call these functions from T32 code are instructions where
33
1 file changed, 17 insertions(+), 1 deletion(-)
21
using r15 is UNPREDICTABLE. Using the architectural PC value
22
here is more consistent with the T16 and A32 behaviour.
23
34
24
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
35
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
25
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
36
index XXXXXXX..XXXXXXX 100755
26
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
37
--- a/scripts/kernel-doc
27
Message-id: 20190807045335.1361-4-richard.henderson@linaro.org
38
+++ b/scripts/kernel-doc
28
[PMM: added commit message note about UNPREDICTABLE T32 cases]
39
@@ -XXX,XX +XXX,XX @@ sub output_function_rst(%) {
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
40
    output_highlight_rst($args{'purpose'});
30
---
41
    $start = "\n\n**Syntax**\n\n ``";
31
target/arm/translate.c | 59 ++++++++++++++++--------------------------
32
1 file changed, 23 insertions(+), 36 deletions(-)
33
34
diff --git a/target/arm/translate.c b/target/arm/translate.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate.c
37
+++ b/target/arm/translate.c
38
@@ -XXX,XX +XXX,XX @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
39
#define store_cpu_field(var, name) \
40
store_cpu_offset(var, offsetof(CPUARMState, name))
41
42
+/* The architectural value of PC. */
43
+static uint32_t read_pc(DisasContext *s)
44
+{
45
+ return s->pc_curr + (s->thumb ? 4 : 8);
46
+}
47
+
48
/* Set a variable to the value of a CPU register. */
49
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
50
{
51
if (reg == 15) {
52
- uint32_t addr;
53
- /* normally, since we updated PC, we need only to add one insn */
54
- if (s->thumb)
55
- addr = (long)s->pc + 2;
56
- else
57
- addr = (long)s->pc + 4;
58
- tcg_gen_movi_i32(var, addr);
59
+ tcg_gen_movi_i32(var, read_pc(s));
60
} else {
42
} else {
61
tcg_gen_mov_i32(var, cpu_R[reg]);
43
-    print ".. c:function:: ";
44
+ if ((split(/\./, $sphinx_version))[0] >= 3) {
45
+ # Sphinx 3 and later distinguish macros and functions and
46
+ # complain if you use c:function with something that's not
47
+ # syntactically valid as a function declaration.
48
+ # We assume that anything with a return type is a function
49
+ # and anything without is a macro.
50
+ if ($args{'functiontype'} ne "") {
51
+ print ".. c:function:: ";
52
+ } else {
53
+ print ".. c:macro:: ";
54
+ }
55
+ } else {
56
+ # Older Sphinx don't support documenting macros that take
57
+ # arguments with c:macro, and don't complain about the use
58
+ # of c:function for this.
59
+ print ".. c:function:: ";
60
+ }
62
}
61
}
63
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
62
if ($args{'functiontype'} ne "") {
64
/* branch link and change to thumb (blx <offset>) */
63
    $start .= $args{'functiontype'} . " " . $args{'function'} . " (";
65
int32_t offset;
66
67
- val = (uint32_t)s->pc;
68
tmp = tcg_temp_new_i32();
69
- tcg_gen_movi_i32(tmp, val);
70
+ tcg_gen_movi_i32(tmp, s->pc);
71
store_reg(s, 14, tmp);
72
/* Sign-extend the 24-bit offset */
73
offset = (((int32_t)insn) << 8) >> 8;
74
+ val = read_pc(s);
75
/* offset * 4 + bit24 * 2 + (thumb bit) */
76
val += (offset << 2) | ((insn >> 23) & 2) | 1;
77
- /* pipeline offset */
78
- val += 4;
79
/* protected by ARCH(5); above, near the start of uncond block */
80
gen_bx_im(s, val);
81
return;
82
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
83
} else {
84
/* store */
85
if (i == 15) {
86
- /* special case: r15 = PC + 8 */
87
- val = (long)s->pc + 4;
88
tmp = tcg_temp_new_i32();
89
- tcg_gen_movi_i32(tmp, val);
90
+ tcg_gen_movi_i32(tmp, read_pc(s));
91
} else if (user) {
92
tmp = tcg_temp_new_i32();
93
tmp2 = tcg_const_i32(i);
94
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
95
int32_t offset;
96
97
/* branch (and link) */
98
- val = (int32_t)s->pc;
99
if (insn & (1 << 24)) {
100
tmp = tcg_temp_new_i32();
101
- tcg_gen_movi_i32(tmp, val);
102
+ tcg_gen_movi_i32(tmp, s->pc);
103
store_reg(s, 14, tmp);
104
}
105
offset = sextract32(insn << 2, 0, 26);
106
- val += offset + 4;
107
- gen_jmp(s, val);
108
+ gen_jmp(s, read_pc(s) + offset);
109
}
110
break;
111
case 0xc:
112
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
113
tcg_temp_free_i32(addr);
114
} else if ((insn & (7 << 5)) == 0) {
115
/* Table Branch. */
116
- if (rn == 15) {
117
- addr = tcg_temp_new_i32();
118
- tcg_gen_movi_i32(addr, s->pc);
119
- } else {
120
- addr = load_reg(s, rn);
121
- }
122
+ addr = load_reg(s, rn);
123
tmp = load_reg(s, rm);
124
tcg_gen_add_i32(addr, addr, tmp);
125
if (insn & (1 << 4)) {
126
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
127
}
128
tcg_temp_free_i32(addr);
129
tcg_gen_shli_i32(tmp, tmp, 1);
130
- tcg_gen_addi_i32(tmp, tmp, s->pc);
131
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
132
store_reg(s, 15, tmp);
133
} else {
134
bool is_lasr = false;
135
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
136
tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
137
}
138
139
- offset += s->pc;
140
+ offset += read_pc(s);
141
if (insn & (1 << 12)) {
142
/* b/bl */
143
gen_jmp(s, offset);
144
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
145
offset |= (insn & (1 << 11)) << 8;
146
147
/* jump to the offset */
148
- gen_jmp(s, s->pc + offset);
149
+ gen_jmp(s, read_pc(s) + offset);
150
}
151
} else {
152
/*
153
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
154
if (insn & (1 << 11)) {
155
rd = (insn >> 8) & 7;
156
/* load pc-relative. Bit 1 of PC is ignored. */
157
- val = s->pc + 2 + ((insn & 0xff) * 4);
158
+ val = read_pc(s) + ((insn & 0xff) * 4);
159
val &= ~(uint32_t)2;
160
addr = tcg_temp_new_i32();
161
tcg_gen_movi_i32(addr, val);
162
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
163
} else {
164
/* PC. bit 1 is ignored. */
165
tmp = tcg_temp_new_i32();
166
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
167
+ tcg_gen_movi_i32(tmp, read_pc(s) & ~(uint32_t)2);
168
}
169
val = (insn & 0xff) * 4;
170
tcg_gen_addi_i32(tmp, tmp, val);
171
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
172
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
173
tcg_temp_free_i32(tmp);
174
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
175
- val = (uint32_t)s->pc + 2;
176
- val += offset;
177
- gen_jmp(s, val);
178
+ gen_jmp(s, read_pc(s) + offset);
179
break;
180
181
case 15: /* IT, nop-hint. */
182
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
183
arm_skip_unless(s, cond);
184
185
/* jump to the offset */
186
- val = (uint32_t)s->pc + 2;
187
+ val = read_pc(s);
188
offset = ((int32_t)insn << 24) >> 24;
189
val += offset << 1;
190
gen_jmp(s, val);
191
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
192
break;
193
}
194
/* unconditional branch */
195
- val = (uint32_t)s->pc;
196
+ val = read_pc(s);
197
offset = ((int32_t)insn << 21) >> 21;
198
- val += (offset << 1) + 2;
199
+ val += offset << 1;
200
gen_jmp(s, val);
201
break;
202
203
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
204
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
205
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
206
207
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
208
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
209
}
210
break;
211
}
212
--
64
--
213
2.20.1
65
2.20.1
214
66
215
67
diff view generated by jsdifflib
1
From: Aaron Hill <aa1ronham@gmail.com>
1
Sphinx 3.2 is pickier than earlier versions about the option:: markup,
2
and complains about our usage in qemu-option-trace.rst:
2
3
3
This commit properly sets the ENET_BD_BDU flag once the emulated FEC controller
4
../../docs/qemu-option-trace.rst.inc:4:Malformed option description
4
has finished processing the last descriptor. This is done for both transmit
5
'[enable=]PATTERN', should look like "opt", "-opt args", "--opt args",
5
and receive descriptors.
6
"/opt args" or "+opt args"
6
7
7
This allows the QNX 7.0.0 BSP for the Sabrelite board (which can be
8
In this file, we're really trying to document the different parts of
8
found at http://blackberry.qnx.com/en/developers/bsp) to properly
9
the top-level --trace option, which qemu-nbd.rst and qemu-img.rst
9
control the FEC. Without this patch, the BSP ethernet driver will never
10
have already introduced with an option:: markup. So it's not right
10
re-use FEC descriptors, as the unset ENET_BD_BDU flag will cause
11
to use option:: here anyway. Switch to a different markup
11
it to believe that the descriptors are still in use by the NIC.
12
(definition lists) which gives about the same formatted output.
12
13
13
Note that Linux does not appear to use this field at all, and is
14
(Unlike option::, this markup doesn't produce index entries; but
14
unaffected by this patch.
15
at the moment we don't do anything much with indexes anyway, and
16
in any case I think it doesn't make much sense to have individual
17
index entries for the sub-parts of the --trace option.)
15
18
16
Without this patch, QNX will think that the NIC is still processing its
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
transaction descriptors, and won't send any more data over the network.
20
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
21
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-id: 20201030174700.7204-3-peter.maydell@linaro.org
23
---
24
docs/qemu-option-trace.rst.inc | 6 +++---
25
1 file changed, 3 insertions(+), 3 deletions(-)
18
26
19
For reference:
27
diff --git a/docs/qemu-option-trace.rst.inc b/docs/qemu-option-trace.rst.inc
20
21
On page 1192 of the I.MX 6DQ reference manual revision (Rev. 5, 06/2018),
22
which can be found at https://www.nxp.com/products/processors-and-microcontrollers/arm-based-processors-and-mcus/i.mx-applications-processors/i.mx-6-processors/i.mx-6quad-processors-high-performance-3d-graphics-hd-video-arm-cortex-a9-core:i.MX6Q?&tab=Documentation_Tab&linkline=Application-Note
23
24
the 'BDU' field is described as follows for the 'Enhanced transmit
25
buffer descriptor':
26
27
'Last buffer descriptor update done. Indicates that the last BD data has been updated by
28
uDMA. This field is written by the user (=0) and uDMA (=1).'
29
30
The same description is used for the receive buffer descriptor.
31
32
Signed-off-by: Aaron Hill <aa1ronham@gmail.com>
33
Message-id: 20190805142417.10433-1-aaron.hill@alertinnovation.com
34
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
35
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
36
---
37
hw/net/imx_fec.c | 4 ++++
38
1 file changed, 4 insertions(+)
39
40
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
41
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/net/imx_fec.c
29
--- a/docs/qemu-option-trace.rst.inc
43
+++ b/hw/net/imx_fec.c
30
+++ b/docs/qemu-option-trace.rst.inc
44
@@ -XXX,XX +XXX,XX @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
31
@@ -XXX,XX +XXX,XX @@
45
if (bd.option & ENET_BD_TX_INT) {
32
46
s->regs[ENET_EIR] |= int_txf;
33
Specify tracing options.
47
}
34
48
+ /* Indicate that we've updated the last buffer descriptor. */
35
-.. option:: [enable=]PATTERN
49
+ bd.last_buffer = ENET_BD_BDU;
36
+``[enable=]PATTERN``
50
}
37
51
if (bd.option & ENET_BD_TX_INT) {
38
Immediately enable events matching *PATTERN*
52
s->regs[ENET_EIR] |= int_txb;
39
(either event name or a globbing pattern). This option is only
53
@@ -XXX,XX +XXX,XX @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
40
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
54
/* Last buffer in frame. */
41
55
bd.flags |= flags | ENET_BD_L;
42
Use :option:`-trace help` to print a list of names of trace points.
56
FEC_PRINTF("rx frame flags %04x\n", bd.flags);
43
57
+ /* Indicate that we've updated the last buffer descriptor. */
44
-.. option:: events=FILE
58
+ bd.last_buffer = ENET_BD_BDU;
45
+``events=FILE``
59
if (bd.option & ENET_BD_RX_INT) {
46
60
s->regs[ENET_EIR] |= ENET_INT_RXF;
47
Immediately enable events listed in *FILE*.
61
}
48
The file must contain one event name (as listed in the ``trace-events-all``
49
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
50
available if QEMU has been compiled with the ``simple``, ``log`` or
51
``ftrace`` tracing backend.
52
53
-.. option:: file=FILE
54
+``file=FILE``
55
56
Log output traces to *FILE*.
57
This option is only available if QEMU has been compiled with
62
--
58
--
63
2.20.1
59
2.20.1
64
60
65
61
diff view generated by jsdifflib
1
Factor out code to 'generate a singlestep exception', which is
1
The randomness tests in the NPCM7xx RNG test fail intermittently
2
currently repeated in four places.
2
but fairly frequently. On my machine running the test in a loop:
3
while QTEST_QEMU_BINARY=./qemu-system-aarch64 ./tests/qtest/npcm7xx_rng-test; do true; done
3
4
4
To do this we need to also pull the identical copies of the
5
will fail in less than a minute with an error like:
5
gen-exception() function out of translate-a64.c and translate.c
6
ERROR:../../tests/qtest/npcm7xx_rng-test.c:256:test_first_byte_runs:
6
into translate.h.
7
assertion failed (calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE) > 0.01): (0.00286205989 > 0.01)
7
8
8
(There is a bug in the code: we're taking the exception to the wrong
9
(Failures have been observed on all 4 of the randomness tests,
9
target EL. This will be simpler to fix if there's only one place to
10
not just first_byte_runs.)
10
do it.)
11
12
It's not clear why these tests are failing like this, but intermittent
13
failures make CI and merge testing awkward, so disable running them
14
unless a developer specifically sets QEMU_TEST_FLAKY_RNG_TESTS when
15
running the test suite, until we work out the cause.
11
16
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
19
Message-id: 20201102152454.8287-1-peter.maydell@linaro.org
15
Message-id: 20190805130952.4415-2-peter.maydell@linaro.org
20
Reviewed-by: Havard Skinnemoen <hskinnemoen@google.com>
16
---
21
---
17
target/arm/translate.h | 23 +++++++++++++++++++++++
22
tests/qtest/npcm7xx_rng-test.c | 14 ++++++++++----
18
target/arm/translate-a64.c | 19 ++-----------------
23
1 file changed, 10 insertions(+), 4 deletions(-)
19
target/arm/translate.c | 20 ++------------------
20
3 files changed, 27 insertions(+), 35 deletions(-)
21
24
22
diff --git a/target/arm/translate.h b/target/arm/translate.h
25
diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c
23
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/translate.h
27
--- a/tests/qtest/npcm7xx_rng-test.c
25
+++ b/target/arm/translate.h
28
+++ b/tests/qtest/npcm7xx_rng-test.c
26
@@ -XXX,XX +XXX,XX @@
29
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
27
#define TARGET_ARM_TRANSLATE_H
30
28
31
qtest_add_func("npcm7xx_rng/enable_disable", test_enable_disable);
29
#include "exec/translator.h"
32
qtest_add_func("npcm7xx_rng/rosel", test_rosel);
30
+#include "internals.h"
33
- qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
31
34
- qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
32
35
- qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
33
/* internal defines */
36
- qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
34
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
37
+ /*
35
}
38
+ * These tests fail intermittently; only run them on explicit
36
}
39
+ * request until we figure out why.
37
40
+ */
38
+static inline void gen_exception(int excp, uint32_t syndrome,
41
+ if (getenv("QEMU_TEST_FLAKY_RNG_TESTS")) {
39
+ uint32_t target_el)
42
+ qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
40
+{
43
+ qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
41
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
44
+ qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
42
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
45
+ qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
43
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
46
+ }
44
+
47
45
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
48
qtest_start("-machine npcm750-evb");
46
+ tcg_syn, tcg_el);
49
ret = g_test_run();
47
+
48
+ tcg_temp_free_i32(tcg_el);
49
+ tcg_temp_free_i32(tcg_syn);
50
+ tcg_temp_free_i32(tcg_excp);
51
+}
52
+
53
+/* Generate an architectural singlestep exception */
54
+static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
55
+{
56
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
57
+ default_exception_el(s));
58
+}
59
+
60
/*
61
* Given a VFP floating point constant encoded into an 8 bit immediate in an
62
* instruction, expand it to the actual constant value of the specified
63
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-a64.c
66
+++ b/target/arm/translate-a64.c
67
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
68
tcg_temp_free_i32(tcg_excp);
69
}
70
71
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
72
-{
73
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
74
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
75
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
76
-
77
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
78
- tcg_syn, tcg_el);
79
- tcg_temp_free_i32(tcg_el);
80
- tcg_temp_free_i32(tcg_syn);
81
- tcg_temp_free_i32(tcg_excp);
82
-}
83
-
84
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
85
{
86
gen_a64_set_pc_im(s->pc - offset);
87
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
88
* of the exception, and our syndrome information is always correct.
89
*/
90
gen_ss_advance(s);
91
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
92
- default_exception_el(s));
93
+ gen_swstep_exception(s, 1, s->is_ldex);
94
s->base.is_jmp = DISAS_NORETURN;
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
98
* bits should be zero.
99
*/
100
assert(dc->base.num_insns == 1);
101
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
102
- default_exception_el(dc));
103
+ gen_swstep_exception(dc, 0, 0);
104
dc->base.is_jmp = DISAS_NORETURN;
105
} else {
106
disas_a64_insn(env, dc);
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
112
tcg_temp_free_i32(tcg_excp);
113
}
114
115
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
116
-{
117
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
118
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
119
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
120
-
121
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
122
- tcg_syn, tcg_el);
123
-
124
- tcg_temp_free_i32(tcg_el);
125
- tcg_temp_free_i32(tcg_syn);
126
- tcg_temp_free_i32(tcg_excp);
127
-}
128
-
129
static void gen_step_complete_exception(DisasContext *s)
130
{
131
/* We just completed step of an insn. Move from Active-not-pending
132
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
133
* of the exception, and our syndrome information is always correct.
134
*/
135
gen_ss_advance(s);
136
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
137
- default_exception_el(s));
138
+ gen_swstep_exception(s, 1, s->is_ldex);
139
s->base.is_jmp = DISAS_NORETURN;
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static bool arm_pre_translate_insn(DisasContext *dc)
143
* bits should be zero.
144
*/
145
assert(dc->base.num_insns == 1);
146
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
147
- default_exception_el(dc));
148
+ gen_swstep_exception(dc, 0, 0);
149
dc->base.is_jmp = DISAS_NORETURN;
150
return true;
151
}
152
--
50
--
153
2.20.1
51
2.20.1
154
52
155
53
diff view generated by jsdifflib
Deleted patch
1
When generating an architectural single-step exception we were
2
routing it to the "default exception level", which is to say
3
the same exception level we execute at except that EL0 exceptions
4
go to EL1. This is incorrect because the debug exception level
5
can be configured by the guest for situations such as single
6
stepping of EL0 and EL1 code by EL2.
7
1
8
We have to track the target debug exception level in the TB
9
flags, because it is dependent on CPU state like HCR_EL2.TGE
10
and MDCR_EL2.TDE. (That we were previously calling the
11
arm_debug_target_el() function to determine dc->ss_same_el
12
is itself a bug, though one that would only have manifested
13
as incorrect syndrome information.) Since we are out of TB
14
flag bits unless we want to expand into the cs_base field,
15
we share some bits with the M-profile only HANDLER and
16
STACKCHECK bits, since only A-profile has this singlestep.
17
18
Fixes: https://bugs.launchpad.net/qemu/+bug/1838913
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
21
Tested-by: Alex Bennée <alex.bennee@linaro.org>
22
Message-id: 20190805130952.4415-3-peter.maydell@linaro.org
23
---
24
target/arm/cpu.h | 5 +++++
25
target/arm/translate.h | 15 +++++++++++----
26
target/arm/helper.c | 6 ++++++
27
target/arm/translate-a64.c | 2 +-
28
target/arm/translate.c | 4 +++-
29
5 files changed, 26 insertions(+), 6 deletions(-)
30
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
36
/* Target EL if we take a floating-point-disabled exception */
37
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
38
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
39
+/*
40
+ * For A-profile only, target EL for debug exceptions.
41
+ * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
42
+ */
43
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
44
45
/* Bit usage when in AArch32 state: */
46
FIELD(TBFLAG_A32, THUMB, 0, 1)
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate.h
50
+++ b/target/arm/translate.h
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
52
uint32_t svc_imm;
53
int aarch64;
54
int current_el;
55
+ /* Debug target exception level for single-step exceptions */
56
+ int debug_target_el;
57
GHashTable *cp_regs;
58
uint64_t features; /* CPU features bits */
59
/* Because unallocated encodings generate different exception syndrome
60
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
61
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
62
*/
63
bool is_ldex;
64
- /* True if a single-step exception will be taken to the current EL */
65
- bool ss_same_el;
66
/* True if v8.3-PAuth is active. */
67
bool pauth_active;
68
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
69
@@ -XXX,XX +XXX,XX @@ static inline void gen_exception(int excp, uint32_t syndrome,
70
/* Generate an architectural singlestep exception */
71
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
72
{
73
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, isv, ex),
74
- default_exception_el(s));
75
+ bool same_el = (s->debug_target_el == s->current_el);
76
+
77
+ /*
78
+ * If singlestep is targeting a lower EL than the current one,
79
+ * then s->ss_active must be false and we can never get here.
80
+ */
81
+ assert(s->debug_target_el >= s->current_el);
82
+
83
+ gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
84
}
85
86
/*
87
diff --git a/target/arm/helper.c b/target/arm/helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/helper.c
90
+++ b/target/arm/helper.c
91
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
92
}
93
}
94
95
+ if (!arm_feature(env, ARM_FEATURE_M)) {
96
+ int target_el = arm_debug_target_el(env);
97
+
98
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
99
+ }
100
+
101
*pflags = flags;
102
*cs_base = 0;
103
}
104
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/translate-a64.c
107
+++ b/target/arm/translate-a64.c
108
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
109
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
110
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
111
dc->is_ldex = false;
112
- dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
113
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
114
115
/* Bound the number of insns to execute to those left on the page. */
116
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
117
diff --git a/target/arm/translate.c b/target/arm/translate.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate.c
120
+++ b/target/arm/translate.c
121
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
122
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
123
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
124
dc->is_ldex = false;
125
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
126
+ if (!arm_feature(env, ARM_FEATURE_M)) {
127
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
128
+ }
129
130
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
131
132
--
133
2.20.1
134
135
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Add a new field to retain the address of the instruction currently
4
being translated. The 32-bit uses are all within subroutines used
5
by a32 and t32. This will become less obvious when t16 support is
6
merged with a32+t32, and having a clear definition will help.
7
8
Convert aarch64 as well for consistency. Note that there is one
9
instance of a pre-assert fprintf that used the wrong value for the
10
address of the current instruction.
11
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190807045335.1361-3-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/translate-a64.h | 2 +-
19
target/arm/translate.h | 2 ++
20
target/arm/translate-a64.c | 21 +++++++++++----------
21
target/arm/translate.c | 14 ++++++++------
22
4 files changed, 22 insertions(+), 17 deletions(-)
23
24
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-a64.h
27
+++ b/target/arm/translate-a64.h
28
@@ -XXX,XX +XXX,XX @@ void unallocated_encoding(DisasContext *s);
29
qemu_log_mask(LOG_UNIMP, \
30
"%s:%d: unsupported instruction encoding 0x%08x " \
31
"at pc=%016" PRIx64 "\n", \
32
- __FILE__, __LINE__, insn, s->pc - 4); \
33
+ __FILE__, __LINE__, insn, s->pc_curr); \
34
unallocated_encoding(s); \
35
} while (0)
36
37
diff --git a/target/arm/translate.h b/target/arm/translate.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate.h
40
+++ b/target/arm/translate.h
41
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
42
const ARMISARegisters *isar;
43
44
target_ulong pc;
45
+ /* The address of the current instruction being translated. */
46
+ target_ulong pc_curr;
47
target_ulong page_start;
48
uint32_t insn;
49
/* Nonzero if this instruction has been conditionally skipped. */
50
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-a64.c
53
+++ b/target/arm/translate-a64.c
54
@@ -XXX,XX +XXX,XX @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
55
*/
56
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
57
{
58
- uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
59
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
60
61
if (insn & (1U << 31)) {
62
/* BL Branch with link */
63
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
64
sf = extract32(insn, 31, 1);
65
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
66
rt = extract32(insn, 0, 5);
67
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
68
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
69
70
tcg_cmp = read_cpu_reg(s, rt, sf);
71
label_match = gen_new_label();
72
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
73
74
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
75
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
76
- addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
77
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
78
rt = extract32(insn, 0, 5);
79
80
tcg_cmp = tcg_temp_new_i64();
81
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
82
unallocated_encoding(s);
83
return;
84
}
85
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
86
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
87
cond = extract32(insn, 0, 4);
88
89
reset_btype(s);
90
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
91
TCGv_i32 tcg_syn, tcg_isread;
92
uint32_t syndrome;
93
94
- gen_a64_set_pc_im(s->pc - 4);
95
+ gen_a64_set_pc_im(s->pc_curr);
96
tmpptr = tcg_const_ptr(ri);
97
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
98
tcg_syn = tcg_const_i32(syndrome);
99
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
100
/* The pre HVC helper handles cases when HVC gets trapped
101
* as an undefined insn by runtime configuration.
102
*/
103
- gen_a64_set_pc_im(s->pc - 4);
104
+ gen_a64_set_pc_im(s->pc_curr);
105
gen_helper_pre_hvc(cpu_env);
106
gen_ss_advance(s);
107
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
108
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
109
unallocated_encoding(s);
110
break;
111
}
112
- gen_a64_set_pc_im(s->pc - 4);
113
+ gen_a64_set_pc_im(s->pc_curr);
114
tmp = tcg_const_i32(syn_aa64_smc(imm16));
115
gen_helper_pre_smc(cpu_env, tmp);
116
tcg_temp_free_i32(tmp);
117
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
118
119
tcg_rt = cpu_reg(s, rt);
120
121
- clean_addr = tcg_const_i64((s->pc - 4) + imm);
122
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
123
if (is_vector) {
124
do_fp_ld(s, rt, clean_addr, size);
125
} else {
126
@@ -XXX,XX +XXX,XX @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
127
offset = sextract64(insn, 5, 19);
128
offset = offset << 2 | extract32(insn, 29, 2);
129
rd = extract32(insn, 0, 5);
130
- base = s->pc - 4;
131
+ base = s->pc_curr;
132
133
if (page) {
134
/* ADRP (page based) */
135
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
136
break;
137
default:
138
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
139
- __func__, insn, fpopcode, s->pc);
140
+ __func__, insn, fpopcode, s->pc_curr);
141
g_assert_not_reached();
142
}
143
144
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
145
{
146
uint32_t insn;
147
148
+ s->pc_curr = s->pc;
149
insn = arm_ldl_code(env, s->pc, s->sctlr_b);
150
s->insn = insn;
151
s->pc += 4;
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate.c
155
+++ b/target/arm/translate.c
156
@@ -XXX,XX +XXX,XX @@ static inline void gen_hvc(DisasContext *s, int imm16)
157
* as an undefined insn by runtime configuration (ie before
158
* the insn really executes).
159
*/
160
- gen_set_pc_im(s, s->pc - 4);
161
+ gen_set_pc_im(s, s->pc_curr);
162
gen_helper_pre_hvc(cpu_env);
163
/* Otherwise we will treat this as a real exception which
164
* happens after execution of the insn. (The distinction matters
165
@@ -XXX,XX +XXX,XX @@ static inline void gen_smc(DisasContext *s)
166
*/
167
TCGv_i32 tmp;
168
169
- gen_set_pc_im(s, s->pc - 4);
170
+ gen_set_pc_im(s, s->pc_curr);
171
tmp = tcg_const_i32(syn_aa32_smc());
172
gen_helper_pre_smc(cpu_env, tmp);
173
tcg_temp_free_i32(tmp);
174
@@ -XXX,XX +XXX,XX @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
175
176
/* Sync state because msr_banked() can raise exceptions */
177
gen_set_condexec(s);
178
- gen_set_pc_im(s, s->pc - 4);
179
+ gen_set_pc_im(s, s->pc_curr);
180
tcg_reg = load_reg(s, rn);
181
tcg_tgtmode = tcg_const_i32(tgtmode);
182
tcg_regno = tcg_const_i32(regno);
183
@@ -XXX,XX +XXX,XX @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
184
185
/* Sync state because mrs_banked() can raise exceptions */
186
gen_set_condexec(s);
187
- gen_set_pc_im(s, s->pc - 4);
188
+ gen_set_pc_im(s, s->pc_curr);
189
tcg_reg = tcg_temp_new_i32();
190
tcg_tgtmode = tcg_const_i32(tgtmode);
191
tcg_regno = tcg_const_i32(regno);
192
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
193
}
194
195
gen_set_condexec(s);
196
- gen_set_pc_im(s, s->pc - 4);
197
+ gen_set_pc_im(s, s->pc_curr);
198
tmpptr = tcg_const_ptr(ri);
199
tcg_syn = tcg_const_i32(syndrome);
200
tcg_isread = tcg_const_i32(isread);
201
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
202
tmp = tcg_const_i32(mode);
203
/* get_r13_banked() will raise an exception if called from System mode */
204
gen_set_condexec(s);
205
- gen_set_pc_im(s, s->pc - 4);
206
+ gen_set_pc_im(s, s->pc_curr);
207
gen_helper_get_r13_banked(addr, cpu_env, tmp);
208
tcg_temp_free_i32(tmp);
209
switch (amode) {
210
@@ -XXX,XX +XXX,XX @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
211
return;
212
}
213
214
+ dc->pc_curr = dc->pc;
215
insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
216
dc->insn = insn;
217
dc->pc += 4;
218
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
219
return;
220
}
221
222
+ dc->pc_curr = dc->pc;
223
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
224
is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
225
dc->pc += 2;
226
--
227
2.20.1
228
229
diff view generated by jsdifflib