1
First arm pullreq for 6.1 cycle. The big stuff here is RTH's alignment series.
1
Some arm patches; my to-review queue is by no means empty, but
2
this is a big enough set of patches to be getting on with...
2
3
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4:
6
The following changes since commit cb9c6a8e5ad6a1f0ce164d352e3102df46986e22:
7
7
8
Open 6.1 development tree (2021-04-30 11:15:40 +0100)
8
.gitlab-ci.d/windows: Work-around timeout and OpenGL problems of the MSYS2 jobs (2023-01-04 18:58:33 +0000)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210430
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230105
13
13
14
for you to fetch changes up to a6091108aa44e9017af4ca13c43f55a629e3744c:
14
for you to fetch changes up to 93c9678de9dc7d2e68f9e8477da072bac30ef132:
15
15
16
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows (2021-04-30 11:16:52 +0100)
16
hw/net: Fix read of uninitialized memory in imx_fec. (2023-01-05 15:33:00 +0000)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
20
* Implement AArch32 ARMv8-R support
21
* hw: add compat machines for 6.1
21
* Add Cortex-R52 CPU
22
* Fault misaligned accesses where the architecture requires it
22
* fix handling of HLT semihosting in system mode
23
* Fix some corner cases of MTE faults (notably with misaligned accesses)
23
* hw/timer/ixm_epit: cleanup and fix bug in compare handling
24
* Make Thumb store insns UNDEF for Rn==1111
24
* target/arm: Coding style fixes
25
* hw/arm/smmuv3: Support 16K translation granule
25
* target/arm: Clean up includes
26
* nseries: minor code cleanups
27
* target/arm: align exposed ID registers with Linux
28
* hw/arm/smmu-common: remove unnecessary inlines
29
* i.MX7D: Handle GPT timers
30
* i.MX7D: Connect IRQs to GPIO devices
31
* i.MX6UL: Add a specific GPT timer instance
32
* hw/net: Fix read of uninitialized memory in imx_fec
26
33
27
----------------------------------------------------------------
34
----------------------------------------------------------------
28
Cornelia Huck (1):
35
Alex Bennée (1):
29
hw: add compat machines for 6.1
36
target/arm: fix handling of HLT semihosting in system mode
30
37
31
Kunkun Jiang (1):
38
Axel Heider (8):
32
hw/arm/smmuv3: Support 16K translation granule
39
hw/timer/imx_epit: improve comments
40
hw/timer/imx_epit: cleanup CR defines
41
hw/timer/imx_epit: define SR_OCIF
42
hw/timer/imx_epit: update interrupt state on CR write access
43
hw/timer/imx_epit: hard reset initializes CR with 0
44
hw/timer/imx_epit: factor out register write handlers
45
hw/timer/imx_epit: remove explicit fields cnt and freq
46
hw/timer/imx_epit: fix compare timer handling
33
47
34
Peter Maydell (2):
48
Claudio Fontana (1):
35
target/arm: Make Thumb store insns UNDEF for Rn==1111
49
target/arm: cleanup cpu includes
36
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
37
50
38
Richard Henderson (39):
51
Fabiano Rosas (5):
39
target/arm: Fix mte_checkN
52
target/arm: Fix checkpatch comment style warnings in helper.c
40
target/arm: Split out mte_probe_int
53
target/arm: Fix checkpatch space errors in helper.c
41
target/arm: Fix unaligned checks for mte_check1, mte_probe1
54
target/arm: Fix checkpatch brace errors in helper.c
42
test/tcg/aarch64: Add mte-5
55
target/arm: Remove unused includes from m_helper.c
43
target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1
56
target/arm: Remove unused includes from helper.c
44
target/arm: Merge mte_check1, mte_checkN
45
target/arm: Rename mte_probe1 to mte_probe
46
target/arm: Simplify sve mte checking
47
target/arm: Remove log2_esize parameter to gen_mte_checkN
48
target/arm: Fix decode of align in VLDST_single
49
target/arm: Rename TBFLAG_A32, SCTLR_B
50
target/arm: Rename TBFLAG_ANY, PSTATE_SS
51
target/arm: Add wrapper macros for accessing tbflags
52
target/arm: Introduce CPUARMTBFlags
53
target/arm: Move mode specific TB flags to tb->cs_base
54
target/arm: Move TBFLAG_AM32 bits to the top
55
target/arm: Move TBFLAG_ANY bits to the bottom
56
target/arm: Add ALIGN_MEM to TBFLAG_ANY
57
target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
58
target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64
59
target/arm: Fix SCTLR_B test for TCGv_i64 load/store
60
target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness
61
target/arm: Enforce word alignment for LDRD/STRD
62
target/arm: Enforce alignment for LDA/LDAH/STL/STLH
63
target/arm: Enforce alignment for LDM/STM
64
target/arm: Enforce alignment for RFE
65
target/arm: Enforce alignment for SRS
66
target/arm: Enforce alignment for VLDM/VSTM
67
target/arm: Enforce alignment for VLDR/VSTR
68
target/arm: Enforce alignment for VLDn (all lanes)
69
target/arm: Enforce alignment for VLDn/VSTn (multiple)
70
target/arm: Enforce alignment for VLDn/VSTn (single)
71
target/arm: Use finalize_memop for aa64 gpr load/store
72
target/arm: Use finalize_memop for aa64 fpr load/store
73
target/arm: Enforce alignment for aa64 load-acq/store-rel
74
target/arm: Use MemOp for size + endian in aa64 vector ld/st
75
target/arm: Enforce alignment for aa64 vector LDn/STn (multiple)
76
target/arm: Enforce alignment for aa64 vector LDn/STn (single)
77
target/arm: Enforce alignment for sve LD1R
78
57
79
include/hw/boards.h | 3 +
58
Jean-Christophe Dubois (4):
80
include/hw/i386/pc.h | 3 +
59
i.MX7D: Connect GPT timers to IRQ
81
include/hw/pci-host/gpex.h | 4 +
60
i.MX7D: Compute clock frequency for the fixed frequency clocks.
82
target/arm/cpu.h | 105 ++++++++++-----
61
i.MX6UL: Add a specific GPT timer instance for the i.MX6UL
83
target/arm/helper-a64.h | 3 +-
62
i.MX7D: Connect IRQs to GPIO devices.
84
target/arm/internals.h | 11 +-
85
target/arm/translate-a64.h | 2 +-
86
target/arm/translate.h | 38 ++++++
87
target/arm/neon-ls.decode | 4 +-
88
hw/arm/smmuv3.c | 6 +-
89
hw/arm/virt.c | 7 +-
90
hw/core/machine.c | 5 +
91
hw/i386/pc.c | 3 +
92
hw/i386/pc_piix.c | 14 +-
93
hw/i386/pc_q35.c | 13 +-
94
hw/pci-host/gpex.c | 56 +++++++-
95
hw/ppc/spapr.c | 17 ++-
96
hw/s390x/s390-virtio-ccw.c | 14 +-
97
target/arm/helper-a64.c | 2 +-
98
target/arm/helper.c | 162 ++++++++++++----------
99
target/arm/mte_helper.c | 185 ++++++++++---------------
100
target/arm/sve_helper.c | 100 +++++---------
101
target/arm/translate-a64.c | 236 ++++++++++++++++----------------
102
target/arm/translate-sve.c | 11 +-
103
target/arm/translate.c | 274 ++++++++++++++++++++++----------------
104
tests/tcg/aarch64/mte-5.c | 44 ++++++
105
target/arm/translate-neon.c.inc | 117 ++++++++++++----
106
target/arm/translate-vfp.c.inc | 20 +--
107
tests/tcg/aarch64/Makefile.target | 2 +-
108
29 files changed, 878 insertions(+), 583 deletions(-)
109
create mode 100644 tests/tcg/aarch64/mte-5.c
110
63
64
Peter Maydell (1):
65
target/arm:Set lg_page_size to 0 if either S1 or S2 asks for it
66
67
Philippe Mathieu-Daudé (5):
68
hw/input/tsc2xxx: Constify set_transform()'s MouseTransformInfo arg
69
hw/arm/nseries: Constify various read-only arrays
70
hw/arm/nseries: Silent -Wmissing-field-initializers warning
71
hw/arm/smmu-common: Reduce smmu_inv_notifiers_mr() scope
72
hw/arm/smmu-common: Avoid using inlined functions with external linkage
73
74
Stephen Longfield (1):
75
hw/net: Fix read of uninitialized memory in imx_fec.
76
77
Tobias Röhmel (7):
78
target/arm: Don't add all MIDR aliases for cores that implement PMSA
79
target/arm: Make RVBAR available for all ARMv8 CPUs
80
target/arm: Make stage_2_format for cache attributes optional
81
target/arm: Enable TTBCR_EAE for ARMv8-R AArch32
82
target/arm: Add PMSAv8r registers
83
target/arm: Add PMSAv8r functionality
84
target/arm: Add ARM Cortex-R52 CPU
85
86
Zhuojia Shen (1):
87
target/arm: align exposed ID registers with Linux
88
89
include/hw/arm/fsl-imx7.h | 20 +
90
include/hw/arm/smmu-common.h | 3 -
91
include/hw/input/tsc2xxx.h | 4 +-
92
include/hw/timer/imx_epit.h | 8 +-
93
include/hw/timer/imx_gpt.h | 1 +
94
target/arm/cpu.h | 6 +
95
target/arm/internals.h | 4 +
96
hw/arm/fsl-imx6ul.c | 2 +-
97
hw/arm/fsl-imx7.c | 41 +-
98
hw/arm/nseries.c | 28 +-
99
hw/arm/smmu-common.c | 15 +-
100
hw/input/tsc2005.c | 2 +-
101
hw/input/tsc210x.c | 3 +-
102
hw/misc/imx6ul_ccm.c | 6 -
103
hw/misc/imx7_ccm.c | 49 ++-
104
hw/net/imx_fec.c | 8 +-
105
hw/timer/imx_epit.c | 376 +++++++++-------
106
hw/timer/imx_gpt.c | 25 ++
107
target/arm/cpu.c | 35 +-
108
target/arm/cpu64.c | 6 -
109
target/arm/cpu_tcg.c | 42 ++
110
target/arm/debug_helper.c | 3 +
111
target/arm/helper.c | 871 +++++++++++++++++++++++++++++---------
112
target/arm/m_helper.c | 16 -
113
target/arm/machine.c | 28 ++
114
target/arm/ptw.c | 152 +++++--
115
target/arm/tlb_helper.c | 4 +
116
target/arm/translate.c | 2 +-
117
tests/tcg/aarch64/sysregs.c | 24 +-
118
tests/tcg/aarch64/Makefile.target | 7 +-
119
30 files changed, 1330 insertions(+), 461 deletions(-)
120
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In get_phys_addr_twostage() we set the lg_page_size of the result to
2
the maximum of the stage 1 and stage 2 page sizes. This works for
3
the case where we do want to create a TLB entry, because we know the
4
common TLB code only creates entries of the TARGET_PAGE_SIZE and
5
asking for a size larger than that only means that invalidations
6
invalidate the whole larger area. However, if lg_page_size is
7
smaller than TARGET_PAGE_SIZE this effectively means "don't create a
8
TLB entry"; in this case if either S1 or S2 said "this covers less
9
than a page and can't go in a TLB" then the final result also should
10
be marked that way. Set the resulting page size to 0 if either
11
stage asked for a less-than-a-page entry, and expand the comment
12
to explain what's going on.
2
13
3
For 128-bit load/store, use 16-byte alignment. This
14
This has no effect for VMSA because currently the VMSA lookup always
4
requires that we perform the two operations in the
15
returns results that cover at least TARGET_PAGE_SIZE; however when we
5
correct order so that we generate the alignment fault
16
add v8R support it will reuse this code path, and for v8R the S1 and
6
before modifying memory.
17
S2 results can be smaller than TARGET_PAGE_SIZE.
7
18
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-27-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 20221212142708.610090-1-peter.maydell@linaro.org
12
---
22
---
13
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
23
target/arm/ptw.c | 16 +++++++++++++---
14
1 file changed, 26 insertions(+), 16 deletions(-)
24
1 file changed, 13 insertions(+), 3 deletions(-)
15
25
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
26
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
28
--- a/target/arm/ptw.c
19
+++ b/target/arm/translate-a64.c
29
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
30
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
21
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
22
{
23
/* This writes the bottom N bits of a 128 bit wide vector to memory */
24
- TCGv_i64 tmp = tcg_temp_new_i64();
25
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
26
+ TCGv_i64 tmplo = tcg_temp_new_i64();
27
+ MemOp mop;
28
+
29
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
30
+
31
if (size < 4) {
32
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
33
- s->be_data + size);
34
+ mop = finalize_memop(s, size);
35
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
36
} else {
37
bool be = s->be_data == MO_BE;
38
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
39
+ TCGv_i64 tmphi = tcg_temp_new_i64();
40
41
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
42
+
43
+ mop = s->be_data | MO_Q;
44
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
45
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
46
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
47
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
48
- s->be_data | MO_Q);
49
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
50
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
51
- s->be_data | MO_Q);
52
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
53
+ get_mem_index(s), mop);
54
+
55
tcg_temp_free_i64(tcg_hiaddr);
56
+ tcg_temp_free_i64(tmphi);
57
}
31
}
58
32
59
- tcg_temp_free_i64(tmp);
33
/*
60
+ tcg_temp_free_i64(tmplo);
34
- * Use the maximum of the S1 & S2 page size, so that invalidation
61
}
35
- * of pages > TARGET_PAGE_SIZE works correctly.
62
36
+ * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
63
/*
37
+ * this means "don't put this in the TLB"; in this case, return a
64
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
38
+ * result with lg_page_size == 0 to achieve that. Otherwise,
65
/* This always zero-extends and writes to a full 128 bit wide vector */
39
+ * use the maximum of the S1 & S2 page size, so that invalidation
66
TCGv_i64 tmplo = tcg_temp_new_i64();
40
+ * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
67
TCGv_i64 tmphi = NULL;
41
+ * we know the combined result permissions etc only cover the minimum
68
+ MemOp mop;
42
+ * of the S1 and S2 page size, because we know that the common TLB code
69
43
+ * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
70
if (size < 4) {
44
+ * and passing a larger page size value only affects invalidations.)
71
- MemOp memop = s->be_data + size;
45
*/
72
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
46
- if (result->f.lg_page_size < s1_lgpgsz) {
73
+ mop = finalize_memop(s, size);
47
+ if (result->f.lg_page_size < TARGET_PAGE_BITS ||
74
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
48
+ s1_lgpgsz < TARGET_PAGE_BITS) {
75
} else {
49
+ result->f.lg_page_size = 0;
76
bool be = s->be_data == MO_BE;
50
+ } else if (result->f.lg_page_size < s1_lgpgsz) {
77
TCGv_i64 tcg_hiaddr;
51
result->f.lg_page_size = s1_lgpgsz;
78
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
79
tmphi = tcg_temp_new_i64();
80
tcg_hiaddr = tcg_temp_new_i64();
81
82
+ mop = s->be_data | MO_Q;
83
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
84
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
85
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
86
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
87
- s->be_data | MO_Q);
88
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
89
- s->be_data | MO_Q);
90
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
91
+ get_mem_index(s), mop);
92
tcg_temp_free_i64(tcg_hiaddr);
93
}
52
}
94
53
95
--
54
--
96
2.20.1
55
2.25.1
97
98
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
Use this to signal when memory access alignment is required.
3
Cores with PMSA have the MPUIR register which has the
4
This value comes from the CCR register for M-profile, and
4
same encoding as the MIDR alias with opc2=4. So we only
5
from the SCTLR register for A-profile.
5
add that alias if we are not realizing a core that
6
implements PMSA.
6
7
8
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-11-richard.henderson@linaro.org
11
Message-id: 20221206102504.165775-2-tobias.roehmel@rwth-aachen.de
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
target/arm/cpu.h | 2 ++
14
target/arm/helper.c | 13 +++++++++----
13
target/arm/translate.h | 2 ++
15
1 file changed, 9 insertions(+), 4 deletions(-)
14
target/arm/helper.c | 19 +++++++++++++++++--
15
target/arm/translate-a64.c | 1 +
16
target/arm/translate.c | 7 +++----
17
5 files changed, 25 insertions(+), 6 deletions(-)
18
16
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
24
FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
25
/* For A-profile only, target EL for debug exceptions. */
26
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
27
+/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
28
+FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
29
30
/*
31
* Bit usage when in AArch32 state, both A- and M-profile.
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
37
bool bt;
38
/* True if any CP15 access is trapped by HSTR_EL2 */
39
bool hstr_active;
40
+ /* True if memory operations require alignment */
41
+ bool align_mem;
42
/*
43
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
44
* < 0, set by the current instruction.
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
19
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
20
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
21
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
50
ARMMMUIdx mmu_idx)
22
.access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
51
{
23
.fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
52
CPUARMTBFlags flags = {};
24
.readfn = midr_read },
53
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
25
- /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
54
+
26
- { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
55
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
27
- .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
56
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
28
- .access = PL1_R, .resetvalue = cpu->midr },
57
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
29
+ /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
58
+ }
30
{ .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
59
31
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
60
if (arm_v7m_is_handler_mode(env)) {
32
.access = PL1_R, .resetvalue = cpu->midr },
61
DP_TBFLAG_M32(flags, HANDLER, 1);
33
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
62
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
34
.accessfn = access_aa64_tid1,
63
*/
35
.type = ARM_CP_CONST, .resetvalue = cpu->revidr },
64
if (arm_feature(env, ARM_FEATURE_V8) &&
36
};
65
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
37
+ ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
66
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
38
+ .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
67
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
39
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
68
DP_TBFLAG_M32(flags, STACKCHECK, 1);
40
+ .access = PL1_R, .resetvalue = cpu->midr
69
}
41
+ };
70
42
ARMCPRegInfo id_cp_reginfo[] = {
71
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
43
/* These are common to v8 and pre-v8 */
72
ARMMMUIdx mmu_idx)
44
{ .name = "CTR",
73
{
45
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
74
CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
46
}
75
+ int el = arm_current_el(env);
47
if (arm_feature(env, ARM_FEATURE_V8)) {
76
+
48
define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
77
+ if (arm_sctlr(env, el) & SCTLR_A) {
49
+ if (!arm_feature(env, ARM_FEATURE_PMSA)) {
78
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
50
+ define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
79
+ }
51
+ }
80
52
} else {
81
if (arm_el_is_aa64(env, 1)) {
53
define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
82
DP_TBFLAG_A32(flags, VFPEN, 1);
54
}
83
}
84
85
- if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
86
+ if (el < 2 && env->cp15.hstr_el2 &&
87
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
88
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
89
}
90
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
91
92
sctlr = regime_sctlr(env, stage1);
93
94
+ if (sctlr & SCTLR_A) {
95
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
96
+ }
97
+
98
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
99
DP_TBFLAG_ANY(flags, BE_DATA, 1);
100
}
101
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/arm/translate-a64.c
104
+++ b/target/arm/translate-a64.c
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
106
dc->user = (dc->current_el == 0);
107
#endif
108
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
109
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
110
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
111
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
112
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
113
diff --git a/target/arm/translate.c b/target/arm/translate.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate.c
116
+++ b/target/arm/translate.c
117
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
118
{
119
TCGv addr;
120
121
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
122
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
123
+ if (s->align_mem) {
124
opc |= MO_ALIGN;
125
}
126
127
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
128
{
129
TCGv addr;
130
131
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
132
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
133
+ if (s->align_mem) {
134
opc |= MO_ALIGN;
135
}
136
137
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
138
dc->user = (dc->current_el == 0);
139
#endif
140
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
141
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
142
143
if (arm_feature(env, ARM_FEATURE_M)) {
144
dc->vfp_enabled = 1;
145
--
55
--
146
2.20.1
56
2.25.1
147
57
148
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
We're about to split tbflags into two parts. These macros
3
RVBAR shadows RVBAR_ELx where x is the highest exception
4
will ensure that the correct part is used with the correct
4
level if the highest EL is not EL3. This patch also allows
5
set of bits.
5
ARMv8 CPUs to change the reset address with
6
the rvbar property.
6
7
8
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20221206102504.165775-3-tobias.roehmel@rwth-aachen.de
9
Message-id: 20210419202257.161730-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
target/arm/cpu.h | 22 +++++++++-
13
target/arm/cpu.c | 6 +++++-
13
target/arm/helper-a64.c | 2 +-
14
target/arm/helper.c | 21 ++++++++++++++-------
14
target/arm/helper.c | 85 +++++++++++++++++---------------------
15
2 files changed, 19 insertions(+), 8 deletions(-)
15
target/arm/translate-a64.c | 36 ++++++++--------
16
target/arm/translate.c | 48 ++++++++++-----------
17
5 files changed, 101 insertions(+), 92 deletions(-)
18
16
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
19
--- a/target/arm/cpu.c
22
+++ b/target/arm/cpu.h
20
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2)
21
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset_hold(Object *obj)
24
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
22
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
25
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
23
CPACR, CP11, 3);
26
24
#endif
27
+/*
25
+ if (arm_feature(env, ARM_FEATURE_V8)) {
28
+ * Helpers for using the above.
26
+ env->cp15.rvbar = cpu->rvbar_prop;
29
+ */
27
+ env->regs[15] = cpu->rvbar_prop;
30
+#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
28
+ }
31
+ (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
29
}
32
+#define DP_TBFLAG_A64(DST, WHICH, VAL) \
30
33
+ (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
31
#if defined(CONFIG_USER_ONLY)
34
+#define DP_TBFLAG_A32(DST, WHICH, VAL) \
32
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
35
+ (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
33
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
36
+#define DP_TBFLAG_M32(DST, WHICH, VAL) \
34
}
37
+ (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
35
38
+#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
36
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
39
+ (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
37
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
40
+
38
object_property_add_uint64_ptr(obj, "rvbar",
41
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
39
&cpu->rvbar_prop,
42
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
40
OBJ_PROP_FLAG_READWRITE);
43
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
44
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
45
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
46
+
47
/**
48
* cpu_mmu_index:
49
* @env: The cpu environment
50
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
51
*/
52
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
53
{
54
- return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX);
55
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
56
}
57
58
static inline bool bswap_code(bool sctlr_b)
59
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/helper-a64.c
62
+++ b/target/arm/helper-a64.c
63
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
64
* the hflags rebuild, since we can pull the composite TBII field
65
* from there.
66
*/
67
- tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII);
68
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
69
if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
70
/* TBI is enabled. */
71
int core_mmu_idx = cpu_mmu_index(env, false);
72
diff --git a/target/arm/helper.c b/target/arm/helper.c
41
diff --git a/target/arm/helper.c b/target/arm/helper.c
73
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/helper.c
43
--- a/target/arm/helper.c
75
+++ b/target/arm/helper.c
44
+++ b/target/arm/helper.c
76
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
45
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
77
static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
46
if (!arm_feature(env, ARM_FEATURE_EL3) &&
78
ARMMMUIdx mmu_idx, uint32_t flags)
47
!arm_feature(env, ARM_FEATURE_EL2)) {
79
{
48
ARMCPRegInfo rvbar = {
80
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
49
- .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
81
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
50
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
82
- arm_to_core_mmu_idx(mmu_idx));
51
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
83
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
52
.access = PL1_R,
84
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
53
.fieldoffset = offsetof(CPUARMState, cp15.rvbar),
85
54
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
86
if (arm_singlestep_active(env)) {
87
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
88
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
89
}
90
return flags;
91
}
92
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
93
bool sctlr_b = arm_sctlr_b(env);
94
95
if (sctlr_b) {
96
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
97
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
98
}
99
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
100
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
101
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
102
}
103
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
104
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
105
106
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
107
}
108
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
109
uint32_t flags = 0;
110
111
if (arm_v7m_is_handler_mode(env)) {
112
- flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
113
+ DP_TBFLAG_M32(flags, HANDLER, 1);
114
}
115
116
/*
117
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
118
if (arm_feature(env, ARM_FEATURE_V8) &&
119
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
120
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
121
- flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
122
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
123
}
124
125
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
127
{
128
int flags = 0;
129
130
- flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
131
- arm_debug_target_el(env));
132
+ DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
133
return flags;
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
137
uint32_t flags = rebuild_hflags_aprofile(env);
138
139
if (arm_el_is_aa64(env, 1)) {
140
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
141
+ DP_TBFLAG_A32(flags, VFPEN, 1);
142
}
143
144
if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
145
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
146
- flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
147
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
148
}
149
150
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
151
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
152
uint64_t sctlr;
153
int tbii, tbid;
154
155
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
156
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
157
158
/* Get control bits for tagged addresses. */
159
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
160
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
161
162
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
163
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
164
+ DP_TBFLAG_A64(flags, TBII, tbii);
165
+ DP_TBFLAG_A64(flags, TBID, tbid);
166
167
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
168
int sve_el = sve_exception_el(env, el);
169
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
170
} else {
171
zcr_len = sve_zcr_len_for_el(env, el);
172
}
55
}
173
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
56
/* RVBAR_EL2 is only implemented if EL2 is the highest EL */
174
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
57
if (!arm_feature(env, ARM_FEATURE_EL3)) {
175
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
58
- ARMCPRegInfo rvbar = {
176
+ DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
59
- .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
177
}
60
- .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
178
61
- .access = PL2_R,
179
sctlr = regime_sctlr(env, stage1);
62
- .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
180
63
+ ARMCPRegInfo rvbar[] = {
181
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
64
+ {
182
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
65
+ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
183
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
66
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
184
}
67
+ .access = PL2_R,
185
68
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
186
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
69
+ },
187
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
70
+ { .name = "RVBAR", .type = ARM_CP_ALIAS,
188
* The decision of which action to take is left to a helper.
71
+ .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
189
*/
72
+ .access = PL2_R,
190
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
73
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
191
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
74
+ },
192
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
75
};
76
- define_one_arm_cp_reg(cpu, &rvbar);
77
+ define_arm_cp_regs(cpu, rvbar);
193
}
78
}
194
}
79
}
195
80
196
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
197
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
198
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
199
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
200
+ DP_TBFLAG_A64(flags, BT, 1);
201
}
202
}
203
204
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
205
case ARMMMUIdx_SE10_1:
206
case ARMMMUIdx_SE10_1_PAN:
207
/* TODO: ARMv8.3-NV */
208
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
209
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
210
break;
211
case ARMMMUIdx_E20_2:
212
case ARMMMUIdx_E20_2_PAN:
213
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
214
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
215
*/
216
if (env->cp15.hcr_el2 & HCR_TGE) {
217
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
218
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
219
}
220
break;
221
default:
222
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
223
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
224
*/
225
if (allocation_tag_access_enabled(env, el, sctlr)) {
226
- flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
227
+ DP_TBFLAG_A64(flags, ATA, 1);
228
if (tbid
229
&& !(env->pstate & PSTATE_TCO)
230
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
231
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
232
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
233
}
234
}
235
/* And again for unprivileged accesses, if required. */
236
- if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
237
+ if (EX_TBFLAG_A64(flags, UNPRIV)
238
&& tbid
239
&& !(env->pstate & PSTATE_TCO)
240
&& (sctlr & SCTLR_TCF0)
241
&& allocation_tag_access_enabled(env, 0, sctlr)) {
242
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
243
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
244
}
245
/* Cache TCMA as well as TBI. */
246
- flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
247
- aa64_va_parameter_tcma(tcr, mmu_idx));
248
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
249
}
250
251
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
252
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
253
*cs_base = 0;
254
assert_hflags_rebuild_correctly(env);
255
256
- if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
257
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
258
*pc = env->pc;
259
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
260
- flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
261
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
262
}
263
} else {
264
*pc = env->regs[15];
265
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
266
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
267
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
268
!= env->v7m.secure) {
269
- flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
270
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
271
}
272
273
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
274
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
275
* active FP context; we must create a new FP context before
276
* executing any FP insn.
277
*/
278
- flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
279
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
280
}
281
282
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
283
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
284
- flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
285
+ DP_TBFLAG_M32(flags, LSPACT, 1);
286
}
287
} else {
288
/*
289
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
290
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
291
*/
292
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
293
- flags = FIELD_DP32(flags, TBFLAG_A32,
294
- XSCALE_CPAR, env->cp15.c15_cpar);
295
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
296
} else {
297
- flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
298
- env->vfp.vec_len);
299
- flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
300
- env->vfp.vec_stride);
301
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
302
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
303
}
304
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
305
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
306
+ DP_TBFLAG_A32(flags, VFPEN, 1);
307
}
308
}
309
310
- flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
311
- flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
312
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
313
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
314
}
315
316
/*
317
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
318
* 1 1 Active-not-pending
319
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
320
*/
321
- if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
322
- (env->pstate & PSTATE_SS)) {
323
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
324
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
325
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
326
}
327
328
*pflags = flags;
329
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
330
index XXXXXXX..XXXXXXX 100644
331
--- a/target/arm/translate-a64.c
332
+++ b/target/arm/translate-a64.c
333
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
334
!arm_el_is_aa64(env, 3);
335
dc->thumb = 0;
336
dc->sctlr_b = 0;
337
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
338
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
339
dc->condexec_mask = 0;
340
dc->condexec_cond = 0;
341
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
342
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
343
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
344
- dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
345
- dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
346
- dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
347
+ dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
348
+ dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
349
+ dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
350
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
351
#if !defined(CONFIG_USER_ONLY)
352
dc->user = (dc->current_el == 0);
353
#endif
354
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
355
- dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
356
- dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
357
- dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
358
- dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
359
- dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
360
- dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
361
- dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
362
- dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
363
- dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
364
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
365
+ dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
366
+ dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
367
+ dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
368
+ dc->bt = EX_TBFLAG_A64(tb_flags, BT);
369
+ dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
370
+ dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
371
+ dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
372
+ dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
373
+ dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
374
dc->vec_len = 0;
375
dc->vec_stride = 0;
376
dc->cp_regs = arm_cpu->cp_regs;
377
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
378
* emit code to generate a software step exception
379
* end the TB
380
*/
381
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
382
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
383
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
384
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
385
dc->is_ldex = false;
386
- dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
387
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
388
389
/* Bound the number of insns to execute to those left on the page. */
390
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
391
diff --git a/target/arm/translate.c b/target/arm/translate.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/arm/translate.c
394
+++ b/target/arm/translate.c
395
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
396
*/
397
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
398
!arm_el_is_aa64(env, 3);
399
- dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
400
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
401
- condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
402
+ dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
403
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
404
+ condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
405
dc->condexec_mask = (condexec & 0xf) << 1;
406
dc->condexec_cond = condexec >> 4;
407
408
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
409
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
410
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
411
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
412
#if !defined(CONFIG_USER_ONLY)
413
dc->user = (dc->current_el == 0);
414
#endif
415
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
416
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
417
418
if (arm_feature(env, ARM_FEATURE_M)) {
419
dc->vfp_enabled = 1;
420
dc->be_data = MO_TE;
421
- dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
422
+ dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
423
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
424
regime_is_secure(env, dc->mmu_idx);
425
- dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
426
- dc->v8m_fpccr_s_wrong =
427
- FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
428
+ dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
429
+ dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
430
dc->v7m_new_fp_ctxt_needed =
431
- FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
432
- dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
433
+ EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
434
+ dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
435
} else {
436
- dc->be_data =
437
- FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
438
- dc->debug_target_el =
439
- FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
440
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
441
- dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
442
- dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
443
- dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
444
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
445
+ dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
446
+ dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
447
+ dc->ns = EX_TBFLAG_A32(tb_flags, NS);
448
+ dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
449
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
450
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
451
+ dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
452
} else {
453
- dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
454
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
455
+ dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
456
+ dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
457
}
458
}
459
dc->cp_regs = cpu->cp_regs;
460
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
461
* emit code to generate a software step exception
462
* end the TB
463
*/
464
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
465
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
466
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
467
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
468
dc->is_ldex = false;
469
470
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
471
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
472
DisasContext dc = { };
473
const TranslatorOps *ops = &arm_translator_ops;
474
475
- if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
476
+ if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
477
ops = &thumb_translator_ops;
478
}
479
#ifdef TARGET_AARCH64
480
- if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
481
+ if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
482
ops = &aarch64_translator_ops;
483
}
484
#endif
485
--
81
--
486
2.20.1
82
2.25.1
487
83
488
84
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
The v8R PMSAv8 has a two-stage MPU translation process, but, unlike
4
VMSAv8, the stage 2 attributes are in the same format as the stage 1
5
attributes (8-bit MAIR format). Rather than converting the MAIR
6
format to the format used for VMSA stage 2 (bits [5:2] of a VMSA
7
stage 2 descriptor) and then converting back to do the attribute
8
combination, allow combined_attrs_nofwb() to accept s2 attributes
9
that are already in the MAIR format.
10
11
We move the assert() to combined_attrs_fwb(), because that function
12
really does require a VMSA stage 2 attribute format. (We will never
13
get there for v8R, because PMSAv8 does not implement FEAT_S2FWB.)
14
15
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20221206102504.165775-4-tobias.roehmel@rwth-aachen.de
5
Message-id: 20210419202257.161730-32-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
19
---
8
target/arm/translate-sve.c | 2 +-
20
target/arm/ptw.c | 10 ++++++++--
9
1 file changed, 1 insertion(+), 1 deletion(-)
21
1 file changed, 8 insertions(+), 2 deletions(-)
10
22
11
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
23
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
12
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-sve.c
25
--- a/target/arm/ptw.c
14
+++ b/target/arm/translate-sve.c
26
+++ b/target/arm/ptw.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
27
@@ -XXX,XX +XXX,XX @@ static uint8_t combined_attrs_nofwb(uint64_t hcr,
16
clean_addr = gen_mte_check1(s, temp, false, true, msz);
28
{
17
29
uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
18
tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
30
19
- s->be_data | dtype_mop[a->dtype]);
31
- s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
20
+ finalize_memop(s, dtype_mop[a->dtype]));
32
+ if (s2.is_s2_format) {
21
33
+ s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
22
/* Broadcast to *all* elements. */
34
+ } else {
23
tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
35
+ s2_mair_attrs = s2.attrs;
36
+ }
37
38
s1lo = extract32(s1.attrs, 0, 4);
39
s2lo = extract32(s2_mair_attrs, 0, 4);
40
@@ -XXX,XX +XXX,XX @@ static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
41
*/
42
static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
43
{
44
+ assert(s2.is_s2_format && !s1.is_s2_format);
45
+
46
switch (s2.attrs) {
47
case 7:
48
/* Use stage 1 attributes */
49
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
50
ARMCacheAttrs ret;
51
bool tagged = false;
52
53
- assert(s2.is_s2_format && !s1.is_s2_format);
54
+ assert(!s1.is_s2_format);
55
ret.is_s2_format = false;
56
57
if (s1.attrs == 0xf0) {
24
--
58
--
25
2.20.1
59
2.25.1
26
60
27
61
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
After recent changes, mte_checkN does not use ESIZE,
3
ARMv8-R AArch32 CPUs behave as if TTBCR.EAE is always 1 even
4
and mte_check1 never used TSIZE. We can combine the
4
tough they don't have the TTBCR register.
5
two into a single field: SIZEM1.
5
See ARM Architecture Reference Manual Supplement - ARMv8, for the ARMv8-R
6
AArch32 architecture profile Version:A.c section C1.2.
6
7
7
Choose to pass size - 1 because size == 0 is never used,
8
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
8
our immediate need in mte_probe_int is for the address
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
of the last byte (ptr + size - 1), and since almost all
10
Message-id: 20221206102504.165775-5-tobias.roehmel@rwth-aachen.de
10
operations are powers of 2, this makes the immediate
11
constant one bit smaller.
12
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20210416183106.1516563-6-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
12
---
18
target/arm/internals.h | 4 ++--
13
target/arm/internals.h | 4 ++++
19
target/arm/mte_helper.c | 18 ++++++++----------
14
target/arm/debug_helper.c | 3 +++
20
target/arm/translate-a64.c | 5 ++---
15
target/arm/tlb_helper.c | 4 ++++
21
target/arm/translate-sve.c | 5 ++---
16
3 files changed, 11 insertions(+)
22
4 files changed, 14 insertions(+), 18 deletions(-)
23
17
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
25
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/internals.h
20
--- a/target/arm/internals.h
27
+++ b/target/arm/internals.h
21
+++ b/target/arm/internals.h
28
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ unsigned int arm_pamax(ARMCPU *cpu);
29
#define TARGET_ARM_INTERNALS_H
23
static inline bool extended_addresses_enabled(CPUARMState *env)
30
24
{
31
#include "hw/registerfields.h"
25
uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
32
+#include "tcg/tcg-gvec-desc.h"
26
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
33
#include "syndrome.h"
27
+ arm_feature(env, ARM_FEATURE_V8)) {
34
28
+ return true;
35
/* register banks for CPU modes */
29
+ }
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4)
30
return arm_el_is_aa64(env, 1) ||
37
FIELD(MTEDESC, TBI, 4, 2)
31
(arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
38
FIELD(MTEDESC, TCMA, 6, 2)
32
}
39
FIELD(MTEDESC, WRITE, 8, 1)
33
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
40
-FIELD(MTEDESC, ESIZE, 9, 5)
41
-FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
42
+FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
43
44
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
45
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
46
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
47
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mte_helper.c
35
--- a/target/arm/debug_helper.c
49
+++ b/target/arm/mte_helper.c
36
+++ b/target/arm/debug_helper.c
50
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
37
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env)
51
* Return positive on success with tbi enabled.
38
52
*/
39
if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
53
static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
40
using_lpae = true;
54
- uintptr_t ra, uint32_t total, uint64_t *fault)
41
+ } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
55
+ uintptr_t ra, uint64_t *fault)
42
+ arm_feature(env, ARM_FEATURE_V8)) {
56
{
43
+ using_lpae = true;
57
int mmu_idx, ptr_tag, bit55;
44
} else {
58
uint64_t ptr_last, prev_page, next_page;
45
if (arm_feature(env, ARM_FEATURE_LPAE) &&
59
uint64_t tag_first, tag_last;
46
(env->cp15.tcr_el[target_el] & TTBCR_EAE)) {
60
uint64_t tag_byte_first, tag_byte_last;
47
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
61
- uint32_t tag_count, tag_size, n, c;
62
+ uint32_t sizem1, tag_count, tag_size, n, c;
63
uint8_t *mem1, *mem2;
64
MMUAccessType type;
65
66
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
67
68
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
69
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
70
+ sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
71
72
/* Find the addr of the end of the access */
73
- ptr_last = ptr + total - 1;
74
+ ptr_last = ptr + sizem1;
75
76
/* Round the bounds to the tag granule, and compute the number of tags. */
77
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
78
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
79
if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
80
/* Memory access stays on one page. */
81
tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
82
- mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
83
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
84
MMU_DATA_LOAD, tag_size, ra);
85
if (!mem1) {
86
return 1;
87
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
88
uint64_t ptr, uintptr_t ra)
89
{
90
uint64_t fault;
91
- uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
92
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
93
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
94
95
if (unlikely(ret == 0)) {
96
mte_check_fail(env, desc, fault, ra);
97
@@ -XXX,XX +XXX,XX @@ uint64_t mte_check1(CPUARMState *env, uint32_t desc,
98
uint64_t ptr, uintptr_t ra)
99
{
100
uint64_t fault;
101
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
102
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
103
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
104
105
if (unlikely(ret == 0)) {
106
mte_check_fail(env, desc, fault, ra);
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
109
{
110
uint64_t fault;
111
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
112
- int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
113
+ int ret = mte_probe_int(env, desc, ptr, 0, &fault);
114
115
return ret != 0;
116
}
117
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
118
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-a64.c
49
--- a/target/arm/tlb_helper.c
120
+++ b/target/arm/translate-a64.c
50
+++ b/target/arm/tlb_helper.c
121
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
51
@@ -XXX,XX +XXX,XX @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
122
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
52
if (el == 2 || arm_el_is_aa64(env, el)) {
123
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
53
return true;
124
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
125
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
126
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
127
tcg_desc = tcg_const_i32(desc);
128
129
ret = new_tmp_a64(s);
130
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
131
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
132
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
133
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
134
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
135
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
136
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
137
tcg_desc = tcg_const_i32(desc);
138
139
ret = new_tmp_a64(s);
140
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate-sve.c
143
+++ b/target/arm/translate-sve.c
144
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
145
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
146
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
147
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
148
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
149
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz);
150
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
151
desc <<= SVE_MTEDESC_SHIFT;
152
} else {
153
addr = clean_data_tbi(s, addr);
154
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
155
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
156
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
157
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
158
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
159
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
160
desc <<= SVE_MTEDESC_SHIFT;
161
}
54
}
162
desc = simd_desc(vsz, vsz, desc | scale);
55
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
56
+ arm_feature(env, ARM_FEATURE_V8)) {
57
+ return true;
58
+ }
59
if (arm_feature(env, ARM_FEATURE_LPAE)
60
&& (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
61
return true;
163
--
62
--
164
2.20.1
63
2.25.1
165
64
166
65
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
We're about to rearrange the macro expansion surrounding tbflags,
3
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
4
and this field name will be expanded using the bit definition of
4
Message-id: 20221206102504.165775-6-tobias.roehmel@rwth-aachen.de
5
the same name, resulting in a token pasting error.
6
7
So PSTATE_SS -> PSTATE__SS in the uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-4-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
6
---
14
target/arm/cpu.h | 2 +-
7
target/arm/cpu.h | 6 +
15
target/arm/helper.c | 4 ++--
8
target/arm/cpu.c | 28 +++-
16
target/arm/translate-a64.c | 2 +-
9
target/arm/helper.c | 302 +++++++++++++++++++++++++++++++++++++++++++
17
target/arm/translate.c | 2 +-
10
target/arm/machine.c | 28 ++++
18
4 files changed, 5 insertions(+), 5 deletions(-)
11
4 files changed, 360 insertions(+), 4 deletions(-)
19
12
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
15
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
17
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
25
*/
18
};
26
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
19
uint64_t sctlr_el[4];
27
FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
20
};
28
-FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
21
+ uint64_t vsctlr; /* Virtualization System control register. */
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
22
uint64_t cpacr_el1; /* Architectural feature access control register */
30
FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
23
uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
31
FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
24
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
32
/* Target EL if we take a floating-point-disabled exception */
25
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
26
*/
27
uint32_t *rbar[M_REG_NUM_BANKS];
28
uint32_t *rlar[M_REG_NUM_BANKS];
29
+ uint32_t *hprbar;
30
+ uint32_t *hprlar;
31
uint32_t mair0[M_REG_NUM_BANKS];
32
uint32_t mair1[M_REG_NUM_BANKS];
33
+ uint32_t hprselr;
34
} pmsav8;
35
36
/* v8M SAU */
37
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
38
bool has_mpu;
39
/* PMSAv7 MPU number of supported regions */
40
uint32_t pmsav7_dregion;
41
+ /* PMSAv8 MPU number of supported hyp regions */
42
+ uint32_t pmsav8r_hdregion;
43
/* v8M SAU number of supported regions */
44
uint32_t sau_sregion;
45
46
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/cpu.c
49
+++ b/target/arm/cpu.c
50
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset_hold(Object *obj)
51
sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
52
}
53
}
54
+
55
+ if (cpu->pmsav8r_hdregion > 0) {
56
+ memset(env->pmsav8.hprbar, 0,
57
+ sizeof(*env->pmsav8.hprbar) * cpu->pmsav8r_hdregion);
58
+ memset(env->pmsav8.hprlar, 0,
59
+ sizeof(*env->pmsav8.hprlar) * cpu->pmsav8r_hdregion);
60
+ }
61
+
62
env->pmsav7.rnr[M_REG_NS] = 0;
63
env->pmsav7.rnr[M_REG_S] = 0;
64
env->pmsav8.mair0[M_REG_NS] = 0;
65
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
66
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
67
* to false or by setting pmsav7-dregion to 0.
68
*/
69
- if (!cpu->has_mpu) {
70
- cpu->pmsav7_dregion = 0;
71
- }
72
- if (cpu->pmsav7_dregion == 0) {
73
+ if (!cpu->has_mpu || cpu->pmsav7_dregion == 0) {
74
cpu->has_mpu = false;
75
+ cpu->pmsav7_dregion = 0;
76
+ cpu->pmsav8r_hdregion = 0;
77
}
78
79
if (arm_feature(env, ARM_FEATURE_PMSA) &&
80
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
81
env->pmsav7.dracr = g_new0(uint32_t, nr);
82
}
83
}
84
+
85
+ if (cpu->pmsav8r_hdregion > 0xff) {
86
+ error_setg(errp, "PMSAv8 MPU EL2 #regions invalid %" PRIu32,
87
+ cpu->pmsav8r_hdregion);
88
+ return;
89
+ }
90
+
91
+ if (cpu->pmsav8r_hdregion) {
92
+ env->pmsav8.hprbar = g_new0(uint32_t,
93
+ cpu->pmsav8r_hdregion);
94
+ env->pmsav8.hprlar = g_new0(uint32_t,
95
+ cpu->pmsav8r_hdregion);
96
+ }
97
}
98
99
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
100
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
index XXXXXXX..XXXXXXX 100644
101
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/helper.c
102
--- a/target/arm/helper.c
36
+++ b/target/arm/helper.c
103
+++ b/target/arm/helper.c
37
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
104
@@ -XXX,XX +XXX,XX @@ static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
38
* 0 x Inactive (the TB flag for SS is always 0)
105
raw_write(env, ri, value);
39
* 1 0 Active-pending
106
}
40
* 1 1 Active-not-pending
107
41
- * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
108
+static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
42
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
109
+ uint64_t value)
43
*/
110
+{
44
if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
111
+ ARMCPU *cpu = env_archcpu(env);
45
(env->pstate & PSTATE_SS)) {
112
+
46
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
113
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
47
+ flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
114
+ env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
115
+}
116
+
117
+static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
118
+{
119
+ return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
120
+}
121
+
122
+static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
123
+ uint64_t value)
124
+{
125
+ ARMCPU *cpu = env_archcpu(env);
126
+
127
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
128
+ env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
129
+}
130
+
131
+static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
132
+{
133
+ return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
134
+}
135
+
136
+static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
137
+ uint64_t value)
138
+{
139
+ ARMCPU *cpu = env_archcpu(env);
140
+
141
+ /*
142
+ * Ignore writes that would select not implemented region.
143
+ * This is architecturally UNPREDICTABLE.
144
+ */
145
+ if (value >= cpu->pmsav7_dregion) {
146
+ return;
147
+ }
148
+
149
+ env->pmsav7.rnr[M_REG_NS] = value;
150
+}
151
+
152
+static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
153
+ uint64_t value)
154
+{
155
+ ARMCPU *cpu = env_archcpu(env);
156
+
157
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
158
+ env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
159
+}
160
+
161
+static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
162
+{
163
+ return env->pmsav8.hprbar[env->pmsav8.hprselr];
164
+}
165
+
166
+static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
167
+ uint64_t value)
168
+{
169
+ ARMCPU *cpu = env_archcpu(env);
170
+
171
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
172
+ env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
173
+}
174
+
175
+static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
176
+{
177
+ return env->pmsav8.hprlar[env->pmsav8.hprselr];
178
+}
179
+
180
+static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
181
+ uint64_t value)
182
+{
183
+ uint32_t n;
184
+ uint32_t bit;
185
+ ARMCPU *cpu = env_archcpu(env);
186
+
187
+ /* Ignore writes to unimplemented regions */
188
+ int rmax = MIN(cpu->pmsav8r_hdregion, 32);
189
+ value &= MAKE_64BIT_MASK(0, rmax);
190
+
191
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
192
+
193
+ /* Register alias is only valid for first 32 indexes */
194
+ for (n = 0; n < rmax; ++n) {
195
+ bit = extract32(value, n, 1);
196
+ env->pmsav8.hprlar[n] = deposit32(
197
+ env->pmsav8.hprlar[n], 0, 1, bit);
198
+ }
199
+}
200
+
201
+static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
202
+{
203
+ uint32_t n;
204
+ uint32_t result = 0x0;
205
+ ARMCPU *cpu = env_archcpu(env);
206
+
207
+ /* Register alias is only valid for first 32 indexes */
208
+ for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
209
+ if (env->pmsav8.hprlar[n] & 0x1) {
210
+ result |= (0x1 << n);
211
+ }
212
+ }
213
+ return result;
214
+}
215
+
216
+static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
217
+ uint64_t value)
218
+{
219
+ ARMCPU *cpu = env_archcpu(env);
220
+
221
+ /*
222
+ * Ignore writes that would select not implemented region.
223
+ * This is architecturally UNPREDICTABLE.
224
+ */
225
+ if (value >= cpu->pmsav8r_hdregion) {
226
+ return;
227
+ }
228
+
229
+ env->pmsav8.hprselr = value;
230
+}
231
+
232
+static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
233
+ uint64_t value)
234
+{
235
+ ARMCPU *cpu = env_archcpu(env);
236
+ uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
237
+ (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
238
+
239
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
240
+
241
+ if (ri->opc1 & 4) {
242
+ if (index >= cpu->pmsav8r_hdregion) {
243
+ return;
244
+ }
245
+ if (ri->opc2 & 0x1) {
246
+ env->pmsav8.hprlar[index] = value;
247
+ } else {
248
+ env->pmsav8.hprbar[index] = value;
249
+ }
250
+ } else {
251
+ if (index >= cpu->pmsav7_dregion) {
252
+ return;
253
+ }
254
+ if (ri->opc2 & 0x1) {
255
+ env->pmsav8.rlar[M_REG_NS][index] = value;
256
+ } else {
257
+ env->pmsav8.rbar[M_REG_NS][index] = value;
258
+ }
259
+ }
260
+}
261
+
262
+static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
263
+{
264
+ ARMCPU *cpu = env_archcpu(env);
265
+ uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
266
+ (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
267
+
268
+ if (ri->opc1 & 4) {
269
+ if (index >= cpu->pmsav8r_hdregion) {
270
+ return 0x0;
271
+ }
272
+ if (ri->opc2 & 0x1) {
273
+ return env->pmsav8.hprlar[index];
274
+ } else {
275
+ return env->pmsav8.hprbar[index];
276
+ }
277
+ } else {
278
+ if (index >= cpu->pmsav7_dregion) {
279
+ return 0x0;
280
+ }
281
+ if (ri->opc2 & 0x1) {
282
+ return env->pmsav8.rlar[M_REG_NS][index];
283
+ } else {
284
+ return env->pmsav8.rbar[M_REG_NS][index];
285
+ }
286
+ }
287
+}
288
+
289
+static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
290
+ { .name = "PRBAR",
291
+ .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
292
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
293
+ .accessfn = access_tvm_trvm,
294
+ .readfn = prbar_read, .writefn = prbar_write },
295
+ { .name = "PRLAR",
296
+ .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
297
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
298
+ .accessfn = access_tvm_trvm,
299
+ .readfn = prlar_read, .writefn = prlar_write },
300
+ { .name = "PRSELR", .resetvalue = 0,
301
+ .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
302
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
303
+ .writefn = prselr_write,
304
+ .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
305
+ { .name = "HPRBAR", .resetvalue = 0,
306
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
307
+ .access = PL2_RW, .type = ARM_CP_NO_RAW,
308
+ .readfn = hprbar_read, .writefn = hprbar_write },
309
+ { .name = "HPRLAR",
310
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
311
+ .access = PL2_RW, .type = ARM_CP_NO_RAW,
312
+ .readfn = hprlar_read, .writefn = hprlar_write },
313
+ { .name = "HPRSELR", .resetvalue = 0,
314
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
315
+ .access = PL2_RW,
316
+ .writefn = hprselr_write,
317
+ .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
318
+ { .name = "HPRENR",
319
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
320
+ .access = PL2_RW, .type = ARM_CP_NO_RAW,
321
+ .readfn = hprenr_read, .writefn = hprenr_write },
322
+};
323
+
324
static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
325
/* Reset for all these registers is handled in arm_cpu_reset(),
326
* because the PMSAv7 is also used by M-profile CPUs, which do
327
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
328
.access = PL1_R, .type = ARM_CP_CONST,
329
.resetvalue = cpu->pmsav7_dregion << 8
330
};
331
+ /* HMPUIR is specific to PMSA V8 */
332
+ ARMCPRegInfo id_hmpuir_reginfo = {
333
+ .name = "HMPUIR",
334
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
335
+ .access = PL2_R, .type = ARM_CP_CONST,
336
+ .resetvalue = cpu->pmsav8r_hdregion
337
+ };
338
static const ARMCPRegInfo crn0_wi_reginfo = {
339
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
340
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
341
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
342
define_arm_cp_regs(cpu, id_cp_reginfo);
343
if (!arm_feature(env, ARM_FEATURE_PMSA)) {
344
define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
345
+ } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
346
+ arm_feature(env, ARM_FEATURE_V8)) {
347
+ uint32_t i = 0;
348
+ char *tmp_string;
349
+
350
+ define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
351
+ define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
352
+ define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
353
+
354
+ /* Register alias is only valid for first 32 indexes */
355
+ for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
356
+ uint8_t crm = 0b1000 | extract32(i, 1, 3);
357
+ uint8_t opc1 = extract32(i, 4, 1);
358
+ uint8_t opc2 = extract32(i, 0, 1) << 2;
359
+
360
+ tmp_string = g_strdup_printf("PRBAR%u", i);
361
+ ARMCPRegInfo tmp_prbarn_reginfo = {
362
+ .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
363
+ .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
364
+ .access = PL1_RW, .resetvalue = 0,
365
+ .accessfn = access_tvm_trvm,
366
+ .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
367
+ };
368
+ define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
369
+ g_free(tmp_string);
370
+
371
+ opc2 = extract32(i, 0, 1) << 2 | 0x1;
372
+ tmp_string = g_strdup_printf("PRLAR%u", i);
373
+ ARMCPRegInfo tmp_prlarn_reginfo = {
374
+ .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
375
+ .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
376
+ .access = PL1_RW, .resetvalue = 0,
377
+ .accessfn = access_tvm_trvm,
378
+ .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
379
+ };
380
+ define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
381
+ g_free(tmp_string);
382
+ }
383
+
384
+ /* Register alias is only valid for first 32 indexes */
385
+ for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
386
+ uint8_t crm = 0b1000 | extract32(i, 1, 3);
387
+ uint8_t opc1 = 0b100 | extract32(i, 4, 1);
388
+ uint8_t opc2 = extract32(i, 0, 1) << 2;
389
+
390
+ tmp_string = g_strdup_printf("HPRBAR%u", i);
391
+ ARMCPRegInfo tmp_hprbarn_reginfo = {
392
+ .name = tmp_string,
393
+ .type = ARM_CP_NO_RAW,
394
+ .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
395
+ .access = PL2_RW, .resetvalue = 0,
396
+ .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
397
+ };
398
+ define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
399
+ g_free(tmp_string);
400
+
401
+ opc2 = extract32(i, 0, 1) << 2 | 0x1;
402
+ tmp_string = g_strdup_printf("HPRLAR%u", i);
403
+ ARMCPRegInfo tmp_hprlarn_reginfo = {
404
+ .name = tmp_string,
405
+ .type = ARM_CP_NO_RAW,
406
+ .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
407
+ .access = PL2_RW, .resetvalue = 0,
408
+ .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
409
+ };
410
+ define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
411
+ g_free(tmp_string);
412
+ }
413
} else if (arm_feature(env, ARM_FEATURE_V7)) {
414
define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
415
}
416
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
417
sctlr.type |= ARM_CP_SUPPRESS_TB_END;
418
}
419
define_one_arm_cp_reg(cpu, &sctlr);
420
+
421
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
422
+ arm_feature(env, ARM_FEATURE_V8)) {
423
+ ARMCPRegInfo vsctlr = {
424
+ .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
425
+ .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
426
+ .access = PL2_RW, .resetvalue = 0x0,
427
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
428
+ };
429
+ define_one_arm_cp_reg(cpu, &vsctlr);
430
+ }
48
}
431
}
49
432
50
*pflags = flags;
433
if (cpu_isar_feature(aa64_lor, cpu)) {
51
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
434
diff --git a/target/arm/machine.c b/target/arm/machine.c
52
index XXXXXXX..XXXXXXX 100644
435
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-a64.c
436
--- a/target/arm/machine.c
54
+++ b/target/arm/translate-a64.c
437
+++ b/target/arm/machine.c
55
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
438
@@ -XXX,XX +XXX,XX @@ static bool pmsav8_needed(void *opaque)
56
* end the TB
439
arm_feature(env, ARM_FEATURE_V8);
57
*/
440
}
58
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
441
59
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
442
+static bool pmsav8r_needed(void *opaque)
60
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
443
+{
61
dc->is_ldex = false;
444
+ ARMCPU *cpu = opaque;
62
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
445
+ CPUARMState *env = &cpu->env;
63
446
+
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
447
+ return arm_feature(env, ARM_FEATURE_PMSA) &&
65
index XXXXXXX..XXXXXXX 100644
448
+ arm_feature(env, ARM_FEATURE_V8) &&
66
--- a/target/arm/translate.c
449
+ !arm_feature(env, ARM_FEATURE_M);
67
+++ b/target/arm/translate.c
450
+}
68
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
451
+
69
* end the TB
452
+static const VMStateDescription vmstate_pmsav8r = {
70
*/
453
+ .name = "cpu/pmsav8/pmsav8r",
71
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
454
+ .version_id = 1,
72
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
455
+ .minimum_version_id = 1,
73
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
456
+ .needed = pmsav8r_needed,
74
dc->is_ldex = false;
457
+ .fields = (VMStateField[]) {
75
458
+ VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
76
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
459
+ pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
460
+ VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
461
+ pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
462
+ VMSTATE_END_OF_LIST()
463
+ },
464
+};
465
+
466
static const VMStateDescription vmstate_pmsav8 = {
467
.name = "cpu/pmsav8",
468
.version_id = 1,
469
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_pmsav8 = {
470
VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
471
VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
472
VMSTATE_END_OF_LIST()
473
+ },
474
+ .subsections = (const VMStateDescription * []) {
475
+ &vmstate_pmsav8r,
476
+ NULL
477
}
478
};
479
77
--
480
--
78
2.20.1
481
2.25.1
79
482
80
483
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
The mte_check1 and mte_checkN functions are now identical.
3
Add PMSAv8r translation.
4
Drop mte_check1 and rename mte_checkN to mte_check.
4
5
5
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221206102504.165775-7-tobias.roehmel@rwth-aachen.de
8
Message-id: 20210416183106.1516563-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/helper-a64.h | 3 +--
10
target/arm/ptw.c | 126 ++++++++++++++++++++++++++++++++++++++---------
12
target/arm/internals.h | 5 +----
11
1 file changed, 104 insertions(+), 22 deletions(-)
13
target/arm/mte_helper.c | 26 +++-----------------------
12
14
target/arm/sve_helper.c | 14 +++++++-------
13
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
target/arm/translate-a64.c | 4 ++--
16
5 files changed, 14 insertions(+), 38 deletions(-)
17
18
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
19
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-a64.h
15
--- a/target/arm/ptw.c
21
+++ b/target/arm/helper-a64.h
16
+++ b/target/arm/ptw.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
17
@@ -XXX,XX +XXX,XX @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
23
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
18
24
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
19
if (arm_feature(env, ARM_FEATURE_M)) {
25
20
return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
26
-DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
21
- } else {
27
-DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
22
- return regime_sctlr(env, mmu_idx) & SCTLR_BR;
28
+DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64)
23
}
29
DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
24
+
30
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
25
+ if (mmu_idx == ARMMMUIdx_Stage2) {
31
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
26
+ return false;
32
diff --git a/target/arm/internals.h b/target/arm/internals.h
27
+ }
33
index XXXXXXX..XXXXXXX 100644
28
+
34
--- a/target/arm/internals.h
29
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
35
+++ b/target/arm/internals.h
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, WRITE, 8, 1)
37
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
38
39
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
40
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
41
- uint64_t ptr, uintptr_t ra);
42
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
43
- uint64_t ptr, uintptr_t ra);
44
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
45
46
static inline int allocation_tag_from_addr(uint64_t ptr)
47
{
48
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mte_helper.c
51
+++ b/target/arm/mte_helper.c
52
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
53
return 0;
54
}
30
}
55
31
56
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
32
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
57
- uint64_t ptr, uintptr_t ra)
33
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
58
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
34
return !(result->f.prot & (1 << access_type));
59
{
60
uint64_t fault;
61
int ret = mte_probe_int(env, desc, ptr, ra, &fault);
62
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
63
return useronly_clean_ptr(ptr);
64
}
35
}
65
36
66
-uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
37
+static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
67
+uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
38
+ uint32_t secure)
68
{
39
+{
69
- return mte_checkN(env, desc, ptr, GETPC());
40
+ if (regime_el(env, mmu_idx) == 2) {
70
-}
41
+ return env->pmsav8.hprbar;
71
-
42
+ } else {
72
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
43
+ return env->pmsav8.rbar[secure];
73
- uint64_t ptr, uintptr_t ra)
44
+ }
74
-{
45
+}
75
- uint64_t fault;
46
+
76
- int ret = mte_probe_int(env, desc, ptr, ra, &fault);
47
+static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
77
-
48
+ uint32_t secure)
78
- if (unlikely(ret == 0)) {
49
+{
79
- mte_check_fail(env, desc, fault, ra);
50
+ if (regime_el(env, mmu_idx) == 2) {
80
- } else if (ret < 0) {
51
+ return env->pmsav8.hprlar;
81
- return ptr;
52
+ } else {
82
- }
53
+ return env->pmsav8.rlar[secure];
83
- return useronly_clean_ptr(ptr);
54
+ }
84
-}
55
+}
85
-
56
+
86
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
57
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
87
-{
58
MMUAccessType access_type, ARMMMUIdx mmu_idx,
88
- return mte_check1(env, desc, ptr, GETPC());
59
bool secure, GetPhysAddrResult *result,
89
+ return mte_check(env, desc, ptr, GETPC());
60
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
61
bool hit = false;
62
uint32_t addr_page_base = address & TARGET_PAGE_MASK;
63
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
64
+ int region_counter;
65
+
66
+ if (regime_el(env, mmu_idx) == 2) {
67
+ region_counter = cpu->pmsav8r_hdregion;
68
+ } else {
69
+ region_counter = cpu->pmsav7_dregion;
70
+ }
71
72
result->f.lg_page_size = TARGET_PAGE_BITS;
73
result->f.phys_addr = address;
74
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
75
*mregion = -1;
76
}
77
78
+ if (mmu_idx == ARMMMUIdx_Stage2) {
79
+ fi->stage2 = true;
80
+ }
81
+
82
/*
83
* Unlike the ARM ARM pseudocode, we don't need to check whether this
84
* was an exception vector read from the vector table (which is always
85
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
86
hit = true;
87
}
88
89
- for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
90
+ uint32_t bitmask;
91
+ if (arm_feature(env, ARM_FEATURE_M)) {
92
+ bitmask = 0x1f;
93
+ } else {
94
+ bitmask = 0x3f;
95
+ fi->level = 0;
96
+ }
97
+
98
+ for (n = region_counter - 1; n >= 0; n--) {
99
/* region search */
100
/*
101
- * Note that the base address is bits [31:5] from the register
102
- * with bits [4:0] all zeroes, but the limit address is bits
103
- * [31:5] from the register with bits [4:0] all ones.
104
+ * Note that the base address is bits [31:x] from the register
105
+ * with bits [x-1:0] all zeroes, but the limit address is bits
106
+ * [31:x] from the register with bits [x:0] all ones. Where x is
107
+ * 5 for Cortex-M and 6 for Cortex-R
108
*/
109
- uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
110
- uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
111
+ uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
112
+ uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
113
114
- if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
115
+ if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
116
/* Region disabled */
117
continue;
118
}
119
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
120
* PMSAv7 where highest-numbered-region wins)
121
*/
122
fi->type = ARMFault_Permission;
123
- fi->level = 1;
124
+ if (arm_feature(env, ARM_FEATURE_M)) {
125
+ fi->level = 1;
126
+ }
127
return true;
128
}
129
130
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
131
}
132
133
if (!hit) {
134
- /* background fault */
135
- fi->type = ARMFault_Background;
136
+ if (arm_feature(env, ARM_FEATURE_M)) {
137
+ fi->type = ARMFault_Background;
138
+ } else {
139
+ fi->type = ARMFault_Permission;
140
+ }
141
return true;
142
}
143
144
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
145
/* hit using the background region */
146
get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
147
} else {
148
- uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
149
- uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
150
+ uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
151
+ uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
152
+ uint32_t ap = extract32(matched_rbar, 1, 2);
153
+ uint32_t xn = extract32(matched_rbar, 0, 1);
154
bool pxn = false;
155
156
if (arm_feature(env, ARM_FEATURE_V8_1M)) {
157
- pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
158
+ pxn = extract32(matched_rlar, 4, 1);
159
}
160
161
if (m_is_system_region(env, address)) {
162
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
163
xn = 1;
164
}
165
166
- result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
167
+ if (regime_el(env, mmu_idx) == 2) {
168
+ result->f.prot = simple_ap_to_rw_prot_is_user(ap,
169
+ mmu_idx != ARMMMUIdx_E2);
170
+ } else {
171
+ result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
172
+ }
173
+
174
+ if (!arm_feature(env, ARM_FEATURE_M)) {
175
+ uint8_t attrindx = extract32(matched_rlar, 1, 3);
176
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
177
+ uint8_t sh = extract32(matched_rlar, 3, 2);
178
+
179
+ if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
180
+ result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
181
+ xn = 0x1;
182
+ }
183
+
184
+ if ((regime_el(env, mmu_idx) == 1) &&
185
+ regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
186
+ pxn = 0x1;
187
+ }
188
+
189
+ result->cacheattrs.is_s2_format = false;
190
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
191
+ result->cacheattrs.shareability = sh;
192
+ }
193
+
194
if (result->f.prot && !xn && !(pxn && !is_user)) {
195
result->f.prot |= PAGE_EXEC;
196
}
197
- /*
198
- * We don't need to look the attribute up in the MAIR0/MAIR1
199
- * registers because that only tells us about cacheability.
200
- */
201
+
202
if (mregion) {
203
*mregion = matchregion;
204
}
205
}
206
207
fi->type = ARMFault_Permission;
208
- fi->level = 1;
209
+ if (arm_feature(env, ARM_FEATURE_M)) {
210
+ fi->level = 1;
211
+ }
212
return !(result->f.prot & (1 << access_type));
90
}
213
}
91
214
92
/*
215
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
93
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
216
cacheattrs1 = result->cacheattrs;
94
index XXXXXXX..XXXXXXX 100644
217
memset(result, 0, sizeof(*result));
95
--- a/target/arm/sve_helper.c
218
96
+++ b/target/arm/sve_helper.c
219
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
97
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
220
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
98
uintptr_t ra)
221
+ ret = get_phys_addr_pmsav8(env, ipa, access_type,
99
{
222
+ ptw->in_mmu_idx, is_secure, result, fi);
100
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
223
+ } else {
101
- mtedesc, ra, mte_check1);
224
+ ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
102
+ mtedesc, ra, mte_check);
225
+ is_el0, result, fi);
103
}
226
+ }
104
227
fi->s2addr = ipa;
105
static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
228
106
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
229
/* Combine the S1 and S2 perms. */
107
uintptr_t ra)
108
{
109
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
110
- mtedesc, ra, mte_checkN);
111
+ mtedesc, ra, mte_check);
112
}
113
114
115
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
116
if (fault == FAULT_FIRST) {
117
/* Trapping mte check for the first-fault element. */
118
if (mtedesc) {
119
- mte_check1(env, mtedesc, addr + mem_off, retaddr);
120
+ mte_check(env, mtedesc, addr + mem_off, retaddr);
121
}
122
123
/*
124
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
125
info.attrs, BP_MEM_READ, retaddr);
126
}
127
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
128
- mte_check1(env, mtedesc, addr, retaddr);
129
+ mte_check(env, mtedesc, addr, retaddr);
130
}
131
host_fn(&scratch, reg_off, info.host);
132
} else {
133
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
134
BP_MEM_READ, retaddr);
135
}
136
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
137
- mte_check1(env, mtedesc, addr, retaddr);
138
+ mte_check(env, mtedesc, addr, retaddr);
139
}
140
tlb_fn(env, &scratch, reg_off, addr, retaddr);
141
}
142
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
143
*/
144
addr = base + (off_fn(vm, reg_off) << scale);
145
if (mtedesc) {
146
- mte_check1(env, mtedesc, addr, retaddr);
147
+ mte_check(env, mtedesc, addr, retaddr);
148
}
149
tlb_fn(env, vd, reg_off, addr, retaddr);
150
151
@@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
152
}
153
154
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
155
- mte_check1(env, mtedesc, addr, retaddr);
156
+ mte_check(env, mtedesc, addr, retaddr);
157
}
158
}
159
i += 1;
160
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/arm/translate-a64.c
163
+++ b/target/arm/translate-a64.c
164
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
165
tcg_desc = tcg_const_i32(desc);
166
167
ret = new_tmp_a64(s);
168
- gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
169
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
170
tcg_temp_free_i32(tcg_desc);
171
172
return ret;
173
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
174
tcg_desc = tcg_const_i32(desc);
175
176
ret = new_tmp_a64(s);
177
- gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
178
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
179
tcg_temp_free_i32(tcg_desc);
180
181
return ret;
182
--
230
--
183
2.20.1
231
2.25.1
184
232
185
233
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
2
2
3
Create a finalize_memop function that computes alignment and
3
All constants are taken from the ARM Cortex-R52 Processor TRM Revision: r1p3
4
endianness and returns the final MemOp for the operation.
5
4
6
Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special
5
Signed-off-by: Tobias Röhmel <tobias.roehmel@rwth-aachen.de>
7
handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32
8
so that s->be_data is not added by the callers.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221206102504.165775-8-tobias.roehmel@rwth-aachen.de
12
Message-id: 20210419202257.161730-12-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
9
---
15
target/arm/translate.h | 24 ++++++++
10
target/arm/cpu_tcg.c | 42 ++++++++++++++++++++++++++++++++++++++++++
16
target/arm/translate.c | 100 +++++++++++++++++---------------
11
1 file changed, 42 insertions(+)
17
target/arm/translate-neon.c.inc | 9 +--
18
3 files changed, 79 insertions(+), 54 deletions(-)
19
12
20
diff --git a/target/arm/translate.h b/target/arm/translate.h
13
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
21
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.h
15
--- a/target/arm/cpu_tcg.c
23
+++ b/target/arm/translate.h
16
+++ b/target/arm/cpu_tcg.c
24
@@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
17
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
25
return statusptr;
18
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
26
}
19
}
27
20
28
+/**
21
+static void cortex_r52_initfn(Object *obj)
29
+ * finalize_memop:
30
+ * @s: DisasContext
31
+ * @opc: size+sign+align of the memory operation
32
+ *
33
+ * Build the complete MemOp for a memory operation, including alignment
34
+ * and endianness.
35
+ *
36
+ * If (op & MO_AMASK) then the operation already contains the required
37
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
38
+ * unaligned operation, e.g. for AccType_NORMAL.
39
+ *
40
+ * In the latter case, there are configuration bits that require alignment,
41
+ * and this is applied here. Note that there is no way to indicate that
42
+ * no alignment should ever be enforced; this must be handled manually.
43
+ */
44
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
45
+{
22
+{
46
+ if (s->align_mem && !(opc & MO_AMASK)) {
23
+ ARMCPU *cpu = ARM_CPU(obj);
47
+ opc |= MO_ALIGN;
24
+
48
+ }
25
+ set_feature(&cpu->env, ARM_FEATURE_V8);
49
+ return opc | s->be_data;
26
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
27
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
28
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
29
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
30
+ cpu->midr = 0x411fd133; /* r1p3 */
31
+ cpu->revidr = 0x00000000;
32
+ cpu->reset_fpsid = 0x41034023;
33
+ cpu->isar.mvfr0 = 0x10110222;
34
+ cpu->isar.mvfr1 = 0x12111111;
35
+ cpu->isar.mvfr2 = 0x00000043;
36
+ cpu->ctr = 0x8144c004;
37
+ cpu->reset_sctlr = 0x30c50838;
38
+ cpu->isar.id_pfr0 = 0x00000131;
39
+ cpu->isar.id_pfr1 = 0x10111001;
40
+ cpu->isar.id_dfr0 = 0x03010006;
41
+ cpu->id_afr0 = 0x00000000;
42
+ cpu->isar.id_mmfr0 = 0x00211040;
43
+ cpu->isar.id_mmfr1 = 0x40000000;
44
+ cpu->isar.id_mmfr2 = 0x01200000;
45
+ cpu->isar.id_mmfr3 = 0xf0102211;
46
+ cpu->isar.id_mmfr4 = 0x00000010;
47
+ cpu->isar.id_isar0 = 0x02101110;
48
+ cpu->isar.id_isar1 = 0x13112111;
49
+ cpu->isar.id_isar2 = 0x21232142;
50
+ cpu->isar.id_isar3 = 0x01112131;
51
+ cpu->isar.id_isar4 = 0x00010142;
52
+ cpu->isar.id_isar5 = 0x00010001;
53
+ cpu->isar.dbgdidr = 0x77168000;
54
+ cpu->clidr = (1 << 27) | (1 << 24) | 0x3;
55
+ cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
56
+ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
57
+
58
+ cpu->pmsav7_dregion = 16;
59
+ cpu->pmsav8r_hdregion = 16;
50
+}
60
+}
51
+
61
+
52
#endif /* TARGET_ARM_TRANSLATE_H */
62
static void cortex_r5f_initfn(Object *obj)
53
diff --git a/target/arm/translate.c b/target/arm/translate.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/translate.c
56
+++ b/target/arm/translate.c
57
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
58
#define IS_USER_ONLY 0
59
#endif
60
61
-/* Abstractions of "generate code to do a guest load/store for
62
+/*
63
+ * Abstractions of "generate code to do a guest load/store for
64
* AArch32", where a vaddr is always 32 bits (and is zero
65
* extended if we're a 64 bit core) and data is also
66
* 32 bits unless specifically doing a 64 bit access.
67
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
68
* that the address argument is TCGv_i32 rather than TCGv.
69
*/
70
71
-static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
72
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
73
{
63
{
74
TCGv addr = tcg_temp_new();
64
ARMCPU *cpu = ARM_CPU(obj);
75
tcg_gen_extu_i32_tl(addr, a32);
65
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_tcg_cpus[] = {
76
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
66
.class_init = arm_v7m_class_init },
77
return addr;
67
{ .name = "cortex-r5", .initfn = cortex_r5_initfn },
78
}
68
{ .name = "cortex-r5f", .initfn = cortex_r5f_initfn },
79
69
+ { .name = "cortex-r52", .initfn = cortex_r52_initfn },
80
+/*
70
{ .name = "ti925t", .initfn = ti925t_initfn },
81
+ * Internal routines are used for NEON cases where the endianness
71
{ .name = "sa1100", .initfn = sa1100_initfn },
82
+ * and/or alignment has already been taken into account and manipulated.
72
{ .name = "sa1110", .initfn = sa1110_initfn },
83
+ */
84
+static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
85
+ TCGv_i32 a32, int index, MemOp opc)
86
+{
87
+ TCGv addr = gen_aa32_addr(s, a32, opc);
88
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
89
+ tcg_temp_free(addr);
90
+}
91
+
92
+static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
93
+ TCGv_i32 a32, int index, MemOp opc)
94
+{
95
+ TCGv addr = gen_aa32_addr(s, a32, opc);
96
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
97
+ tcg_temp_free(addr);
98
+}
99
+
100
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
101
int index, MemOp opc)
102
{
103
- TCGv addr;
104
-
105
- if (s->align_mem) {
106
- opc |= MO_ALIGN;
107
- }
108
-
109
- addr = gen_aa32_addr(s, a32, opc);
110
- tcg_gen_qemu_ld_i32(val, addr, index, opc);
111
- tcg_temp_free(addr);
112
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
113
}
114
115
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
116
int index, MemOp opc)
117
{
118
- TCGv addr;
119
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
120
+}
121
122
- if (s->align_mem) {
123
- opc |= MO_ALIGN;
124
+#define DO_GEN_LD(SUFF, OPC) \
125
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
126
+ TCGv_i32 a32, int index) \
127
+ { \
128
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
129
}
130
131
- addr = gen_aa32_addr(s, a32, opc);
132
- tcg_gen_qemu_st_i32(val, addr, index, opc);
133
- tcg_temp_free(addr);
134
-}
135
-
136
-#define DO_GEN_LD(SUFF, OPC) \
137
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
138
- TCGv_i32 a32, int index) \
139
-{ \
140
- gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
141
-}
142
-
143
-#define DO_GEN_ST(SUFF, OPC) \
144
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
145
- TCGv_i32 a32, int index) \
146
-{ \
147
- gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
148
-}
149
+#define DO_GEN_ST(SUFF, OPC) \
150
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
151
+ TCGv_i32 a32, int index) \
152
+ { \
153
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
154
+ }
155
156
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
157
{
158
@@ -XXX,XX +XXX,XX @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
159
addr = op_addr_rr_pre(s, a);
160
161
tmp = tcg_temp_new_i32();
162
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
163
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
164
disas_set_da_iss(s, mop, issinfo);
165
166
/*
167
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
168
addr = op_addr_rr_pre(s, a);
169
170
tmp = load_reg(s, a->rt);
171
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
172
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
173
disas_set_da_iss(s, mop, issinfo);
174
tcg_temp_free_i32(tmp);
175
176
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
177
addr = op_addr_rr_pre(s, a);
178
179
tmp = tcg_temp_new_i32();
180
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
181
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
182
store_reg(s, a->rt, tmp);
183
184
tcg_gen_addi_i32(addr, addr, 4);
185
186
tmp = tcg_temp_new_i32();
187
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
188
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
189
store_reg(s, a->rt + 1, tmp);
190
191
/* LDRD w/ base writeback is undefined if the registers overlap. */
192
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
193
addr = op_addr_rr_pre(s, a);
194
195
tmp = load_reg(s, a->rt);
196
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
197
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
198
tcg_temp_free_i32(tmp);
199
200
tcg_gen_addi_i32(addr, addr, 4);
201
202
tmp = load_reg(s, a->rt + 1);
203
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
204
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
205
tcg_temp_free_i32(tmp);
206
207
op_addr_rr_post(s, a, addr, -4);
208
@@ -XXX,XX +XXX,XX @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
209
addr = op_addr_ri_pre(s, a);
210
211
tmp = tcg_temp_new_i32();
212
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
213
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
214
disas_set_da_iss(s, mop, issinfo);
215
216
/*
217
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
218
addr = op_addr_ri_pre(s, a);
219
220
tmp = load_reg(s, a->rt);
221
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
222
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
223
disas_set_da_iss(s, mop, issinfo);
224
tcg_temp_free_i32(tmp);
225
226
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
227
addr = op_addr_ri_pre(s, a);
228
229
tmp = tcg_temp_new_i32();
230
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
231
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
232
store_reg(s, a->rt, tmp);
233
234
tcg_gen_addi_i32(addr, addr, 4);
235
236
tmp = tcg_temp_new_i32();
237
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
238
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
239
store_reg(s, rt2, tmp);
240
241
/* LDRD w/ base writeback is undefined if the registers overlap. */
242
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
243
addr = op_addr_ri_pre(s, a);
244
245
tmp = load_reg(s, a->rt);
246
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
247
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
248
tcg_temp_free_i32(tmp);
249
250
tcg_gen_addi_i32(addr, addr, 4);
251
252
tmp = load_reg(s, rt2);
253
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
254
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
255
tcg_temp_free_i32(tmp);
256
257
op_addr_ri_post(s, a, addr, -4);
258
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
259
addr = load_reg(s, a->rn);
260
tmp = load_reg(s, a->rt);
261
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
262
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
263
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
264
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
265
266
tcg_temp_free_i32(tmp);
267
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
268
269
addr = load_reg(s, a->rn);
270
tmp = tcg_temp_new_i32();
271
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
272
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
273
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
274
tcg_temp_free_i32(addr);
275
276
@@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
277
addr = load_reg(s, a->rn);
278
tcg_gen_add_i32(addr, addr, tmp);
279
280
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
281
- half ? MO_UW | s->be_data : MO_UB);
282
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
283
tcg_temp_free_i32(addr);
284
285
tcg_gen_add_i32(tmp, tmp, tmp);
286
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
287
index XXXXXXX..XXXXXXX 100644
288
--- a/target/arm/translate-neon.c.inc
289
+++ b/target/arm/translate-neon.c.inc
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
291
addr = tcg_temp_new_i32();
292
load_reg_var(s, addr, a->rn);
293
for (reg = 0; reg < nregs; reg++) {
294
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
295
- s->be_data | size);
296
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
297
if ((vd & 1) && vec_size == 16) {
298
/*
299
* We cannot write 16 bytes at once because the
300
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
301
*/
302
for (reg = 0; reg < nregs; reg++) {
303
if (a->l) {
304
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
305
- s->be_data | a->size);
306
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
307
neon_store_element(vd, a->reg_idx, a->size, tmp);
308
} else { /* Store */
309
neon_load_element(tmp, vd, a->reg_idx, a->size);
310
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
311
- s->be_data | a->size);
312
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
313
}
314
vd += a->stride;
315
tcg_gen_addi_i32(addr, addr, 1 << a->size);
316
--
73
--
317
2.20.1
74
2.25.1
318
75
319
76
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
The check semihosting_enabled() wants to know if the guest is
4
currently in user mode. Unlike the other cases the test was inverted
5
causing us to block semihosting calls in non-EL0 modes.
6
7
Cc: qemu-stable@nongnu.org
8
Fixes: 19b26317e9 (target/arm: Honour -semihosting-config userspace=on)
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-20-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
12
---
8
target/arm/translate.c | 4 ++--
13
target/arm/translate.c | 2 +-
9
1 file changed, 2 insertions(+), 2 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
10
15
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
18
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
19
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
20
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
16
}
21
* semihosting, to provide some semblance of security
17
tcg_gen_addi_i32(addr, addr, offset);
22
* (and for consistency with our 32-bit semihosting).
18
tmp = load_reg(s, 14);
23
*/
19
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
24
- if (semihosting_enabled(s->current_el != 0) &&
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
25
+ if (semihosting_enabled(s->current_el == 0) &&
21
tcg_temp_free_i32(tmp);
26
(imm == (s->thumb ? 0x3c : 0xf000))) {
22
tmp = load_cpu_field(spsr);
27
gen_exception_internal_insn(s, EXCP_SEMIHOST);
23
tcg_gen_addi_i32(addr, addr, 4);
28
return;
24
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
26
tcg_temp_free_i32(tmp);
27
if (writeback) {
28
switch (amode) {
29
--
29
--
30
2.20.1
30
2.25.1
31
31
32
32
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
We were incorrectly assuming that only the first byte of an MTE access
3
Fix typos, add background information
4
is checked against the tags. But per the ARM, unaligned accesses are
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
7
4
8
We cannot tell a priori whether or not a given scalar access is aligned,
5
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
9
therefore we must at least check. Use mte_probe_int, which is already
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
set up for checking multiple granules.
11
12
Buglink: https://bugs.launchpad.net/bugs/1921948
13
Tested-by: Alex Bennée <alex.bennee@linaro.org>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210416183106.1516563-4-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
8
---
19
target/arm/mte_helper.c | 109 +++++++++++++---------------------------
9
hw/timer/imx_epit.c | 20 ++++++++++++++++----
20
1 file changed, 35 insertions(+), 74 deletions(-)
10
1 file changed, 16 insertions(+), 4 deletions(-)
21
11
22
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
12
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
23
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/mte_helper.c
14
--- a/hw/timer/imx_epit.c
25
+++ b/target/arm/mte_helper.c
15
+++ b/hw/timer/imx_epit.c
26
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
16
@@ -XXX,XX +XXX,XX @@ static void imx_epit_set_freq(IMXEPITState *s)
27
}
17
}
28
}
18
}
29
19
30
-/*
20
+/*
31
- * Perform an MTE checked access for a single logical or atomic access.
21
+ * This is called both on hardware (device) reset and software reset.
32
- */
22
+ */
33
-static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
23
static void imx_epit_reset(DeviceState *dev)
34
- uintptr_t ra, int bit55)
24
{
35
-{
25
IMXEPITState *s = IMX_EPIT(dev);
36
- int mem_tag, mmu_idx, ptr_tag, size;
26
37
- MMUAccessType type;
27
- /*
38
- uint8_t *mem;
28
- * Soft reset doesn't touch some bits; hard reset clears them
39
-
29
- */
40
- ptr_tag = allocation_tag_from_addr(ptr);
30
+ /* Soft reset doesn't touch some bits; hard reset clears them */
41
-
31
s->cr &= (CR_EN|CR_ENMOD|CR_STOPEN|CR_DOZEN|CR_WAITEN|CR_DBGEN);
42
- if (tcma_check(desc, bit55, ptr_tag)) {
32
s->sr = 0;
43
- return true;
33
s->lr = EPIT_TIMER_MAX;
44
- }
34
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
45
-
35
ptimer_transaction_begin(s->timer_cmp);
46
- mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
36
ptimer_transaction_begin(s->timer_reload);
47
- type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
37
48
- size = FIELD_EX32(desc, MTEDESC, ESIZE);
38
+ /* Update the frequency. Has been done already in case of a reset. */
49
-
39
if (!(s->cr & CR_SWR)) {
50
- mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
40
imx_epit_set_freq(s);
51
- MMU_DATA_LOAD, 1, ra);
41
}
52
- if (!mem) {
42
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
53
- return true;
43
break;
54
- }
44
55
-
45
case 1: /* SR - ACK*/
56
- mem_tag = load_tag1(ptr, mem);
46
- /* writing 1 to OCIF clear the OCIF bit */
57
- return ptr_tag == mem_tag;
47
+ /* writing 1 to OCIF clears the OCIF bit */
58
-}
48
if (value & 0x01) {
59
-
49
s->sr = 0;
60
-/*
50
imx_epit_update_int(s);
61
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
51
@@ -XXX,XX +XXX,XX @@ static void imx_epit_realize(DeviceState *dev, Error **errp)
62
- * Returns false if the access is Checked and the check failed. This
52
0x00001000);
63
- * is only intended to probe the tag -- the validity of the page must
53
sysbus_init_mmio(sbd, &s->iomem);
64
- * be checked beforehand.
54
65
- */
55
+ /*
66
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
56
+ * The reload timer keeps running when the peripheral is enabled. It is a
67
-{
57
+ * kind of wall clock that does not generate any interrupts. The callback
68
- int bit55 = extract64(ptr, 55, 1);
58
+ * needs to be provided, but it does nothing as the ptimer already supports
69
-
59
+ * all necessary reloading functionality.
70
- /* If TBI is disabled, the access is unchecked. */
60
+ */
71
- if (unlikely(!tbi_check(desc, bit55))) {
61
s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_LEGACY);
72
- return true;
62
73
- }
63
+ /*
74
-
64
+ * The compare timer is running only when the peripheral configuration is
75
- return mte_probe1_int(env, desc, ptr, 0, bit55);
65
+ * in a state that will generate compare interrupts.
76
-}
66
+ */
77
-
67
s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_LEGACY);
78
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
79
- uint64_t ptr, uintptr_t ra)
80
-{
81
- int bit55 = extract64(ptr, 55, 1);
82
-
83
- /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
84
- if (unlikely(!tbi_check(desc, bit55))) {
85
- return ptr;
86
- }
87
-
88
- if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
89
- mte_check_fail(env, desc, ptr, ra);
90
- }
91
-
92
- return useronly_clean_ptr(ptr);
93
-}
94
-
95
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
96
-{
97
- return mte_check1(env, desc, ptr, GETPC());
98
-}
99
-
100
-/*
101
- * Perform an MTE checked access for multiple logical accesses.
102
- */
103
-
104
/**
105
* checkN:
106
* @tag: tag memory to test
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
return mte_checkN(env, desc, ptr, GETPC());
109
}
68
}
110
69
111
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
114
+ uint64_t fault;
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
117
+
118
+ if (unlikely(ret == 0)) {
119
+ mte_check_fail(env, desc, fault, ra);
120
+ } else if (ret < 0) {
121
+ return ptr;
122
+ }
123
+ return useronly_clean_ptr(ptr);
124
+}
125
+
126
+uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
127
+{
128
+ return mte_check1(env, desc, ptr, GETPC());
129
+}
130
+
131
+/*
132
+ * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
133
+ * Returns false if the access is Checked and the check failed. This
134
+ * is only intended to probe the tag -- the validity of the page must
135
+ * be checked beforehand.
136
+ */
137
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
138
+{
139
+ uint64_t fault;
140
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
141
+ int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
142
+
143
+ return ret != 0;
144
+}
145
+
146
/*
147
* Perform an MTE checked access for DC_ZVA.
148
*/
149
--
70
--
150
2.20.1
71
2.25.1
151
152
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
The encoding of size = 2 and size = 3 had the incorrect decode
3
remove unused defines, add needed defines
4
for align, overlapping the stride field. This error was hidden
5
by what should have been unnecessary masking in translate.
6
4
5
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
8
---
12
target/arm/neon-ls.decode | 4 ++--
9
include/hw/timer/imx_epit.h | 4 ++--
13
target/arm/translate-neon.c.inc | 4 ++--
10
hw/timer/imx_epit.c | 4 ++--
14
2 files changed, 4 insertions(+), 4 deletions(-)
11
2 files changed, 4 insertions(+), 4 deletions(-)
15
12
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
13
diff --git a/include/hw/timer/imx_epit.h b/include/hw/timer/imx_epit.h
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
15
--- a/include/hw/timer/imx_epit.h
19
+++ b/target/arm/neon-ls.decode
16
+++ b/include/hw/timer/imx_epit.h
20
@@ -XXX,XX +XXX,XX @@ VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
17
@@ -XXX,XX +XXX,XX @@
21
18
#define CR_OCIEN (1 << 2)
22
VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
19
#define CR_RLD (1 << 3)
23
vd=%vd_dp size=0 stride=1
20
#define CR_PRESCALE_SHIFT (4)
24
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
21
-#define CR_PRESCALE_MASK (0xfff)
25
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \
22
+#define CR_PRESCALE_BITS (12)
26
vd=%vd_dp size=1 stride=%imm1_5_p1
23
#define CR_SWR (1 << 16)
27
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
24
#define CR_IOVW (1 << 17)
28
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \
25
#define CR_DBGEN (1 << 18)
29
vd=%vd_dp size=2 stride=%imm1_6_p1
26
@@ -XXX,XX +XXX,XX @@
30
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
27
#define CR_DOZEN (1 << 20)
28
#define CR_STOPEN (1 << 21)
29
#define CR_CLKSRC_SHIFT (24)
30
-#define CR_CLKSRC_MASK (0x3 << CR_CLKSRC_SHIFT)
31
+#define CR_CLKSRC_BITS (2)
32
33
#define EPIT_TIMER_MAX 0XFFFFFFFFUL
34
35
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
31
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-neon.c.inc
37
--- a/hw/timer/imx_epit.c
33
+++ b/target/arm/translate-neon.c.inc
38
+++ b/hw/timer/imx_epit.c
34
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
39
@@ -XXX,XX +XXX,XX @@ static void imx_epit_set_freq(IMXEPITState *s)
35
switch (nregs) {
40
uint32_t clksrc;
36
case 1:
41
uint32_t prescaler;
37
if (((a->align & (1 << a->size)) != 0) ||
42
38
- (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
43
- clksrc = extract32(s->cr, CR_CLKSRC_SHIFT, 2);
39
+ (a->size == 2 && (a->align == 1 || a->align == 2))) {
44
- prescaler = 1 + extract32(s->cr, CR_PRESCALE_SHIFT, 12);
40
return false;
45
+ clksrc = extract32(s->cr, CR_CLKSRC_SHIFT, CR_CLKSRC_BITS);
41
}
46
+ prescaler = 1 + extract32(s->cr, CR_PRESCALE_SHIFT, CR_PRESCALE_BITS);
42
break;
47
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
48
s->freq = imx_ccm_get_clock_frequency(s->ccm,
44
}
49
imx_epit_clocks[clksrc]) / prescaler;
45
break;
46
case 4:
47
- if ((a->size == 2) && ((a->align & 3) == 3)) {
48
+ if (a->size == 2 && a->align == 3) {
49
return false;
50
}
51
break;
52
--
50
--
53
2.20.1
51
2.25.1
54
55
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-29-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
5
---
8
target/arm/translate-a64.c | 20 ++++++++++----------
6
include/hw/timer/imx_epit.h | 2 ++
9
1 file changed, 10 insertions(+), 10 deletions(-)
7
hw/timer/imx_epit.c | 12 ++++++------
8
2 files changed, 8 insertions(+), 6 deletions(-)
10
9
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
10
diff --git a/include/hw/timer/imx_epit.h b/include/hw/timer/imx_epit.h
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
12
--- a/include/hw/timer/imx_epit.h
14
+++ b/target/arm/translate-a64.c
13
+++ b/include/hw/timer/imx_epit.h
15
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
14
@@ -XXX,XX +XXX,XX @@
16
15
#define CR_CLKSRC_SHIFT (24)
17
/* Store from vector register to memory */
16
#define CR_CLKSRC_BITS (2)
18
static void do_vec_st(DisasContext *s, int srcidx, int element,
17
19
- TCGv_i64 tcg_addr, int size, MemOp endian)
18
+#define SR_OCIF (1 << 0)
20
+ TCGv_i64 tcg_addr, MemOp mop)
19
+
20
#define EPIT_TIMER_MAX 0XFFFFFFFFUL
21
22
#define TYPE_IMX_EPIT "imx.epit"
23
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/timer/imx_epit.c
26
+++ b/hw/timer/imx_epit.c
27
@@ -XXX,XX +XXX,XX @@ static const IMXClk imx_epit_clocks[] = {
28
*/
29
static void imx_epit_update_int(IMXEPITState *s)
21
{
30
{
22
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
31
- if (s->sr && (s->cr & CR_OCIEN) && (s->cr & CR_EN)) {
23
32
+ if ((s->sr & SR_OCIF) && (s->cr & CR_OCIEN) && (s->cr & CR_EN)) {
24
- read_vec_element(s, tcg_tmp, srcidx, element, size);
33
qemu_irq_raise(s->irq);
25
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
34
} else {
26
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
35
qemu_irq_lower(s->irq);
27
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
36
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
28
37
break;
29
tcg_temp_free_i64(tcg_tmp);
38
39
case 1: /* SR - ACK*/
40
- /* writing 1 to OCIF clears the OCIF bit */
41
- if (value & 0x01) {
42
- s->sr = 0;
43
+ /* writing 1 to SR.OCIF clears this bit and turns the interrupt off */
44
+ if (value & SR_OCIF) {
45
+ s->sr = 0; /* SR.OCIF is the only bit in this register anyway */
46
imx_epit_update_int(s);
47
}
48
break;
49
@@ -XXX,XX +XXX,XX @@ static void imx_epit_cmp(void *opaque)
50
IMXEPITState *s = IMX_EPIT(opaque);
51
52
DPRINTF("sr was %d\n", s->sr);
53
-
54
- s->sr = 1;
55
+ /* Set interrupt status bit SR.OCIF and update the interrupt state */
56
+ s->sr |= SR_OCIF;
57
imx_epit_update_int(s);
30
}
58
}
31
59
32
/* Load from memory to vector register */
33
static void do_vec_ld(DisasContext *s, int destidx, int element,
34
- TCGv_i64 tcg_addr, int size, MemOp endian)
35
+ TCGv_i64 tcg_addr, MemOp mop)
36
{
37
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
38
39
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
40
- write_vec_element(s, tcg_tmp, destidx, element, size);
41
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
42
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
43
44
tcg_temp_free_i64(tcg_tmp);
45
}
46
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
47
for (xs = 0; xs < selem; xs++) {
48
int tt = (rt + r + xs) % 32;
49
if (is_store) {
50
- do_vec_st(s, tt, e, clean_addr, size, endian);
51
+ do_vec_st(s, tt, e, clean_addr, size | endian);
52
} else {
53
- do_vec_ld(s, tt, e, clean_addr, size, endian);
54
+ do_vec_ld(s, tt, e, clean_addr, size | endian);
55
}
56
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
57
}
58
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
59
} else {
60
/* Load/store one element per register */
61
if (is_load) {
62
- do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
63
+ do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
64
} else {
65
- do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
66
+ do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
67
}
68
}
69
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
70
--
60
--
71
2.20.1
61
2.25.1
72
73
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
The interrupt state can change due to:
4
- reset clears both SR.OCIF and CR.OCIE
5
- write to CR.EN or CR.OCIE
6
7
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 4 ++--
11
hw/timer/imx_epit.c | 16 ++++++++++++----
9
1 file changed, 2 insertions(+), 2 deletions(-)
12
1 file changed, 12 insertions(+), 4 deletions(-)
10
13
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
16
--- a/hw/timer/imx_epit.c
14
+++ b/target/arm/translate.c
17
+++ b/hw/timer/imx_epit.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
18
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
16
addr = load_reg(s, a->rn);
19
if (s->cr & CR_SWR) {
17
tmp = load_reg(s, a->rt);
20
/* handle the reset */
18
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
21
imx_epit_reset(DEVICE(s));
19
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
22
- /*
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
23
- * TODO: could we 'break' here? following operations appear
21
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
24
- * to duplicate the work imx_epit_reset() already did.
22
25
- */
23
tcg_temp_free_i32(tmp);
26
}
24
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
27
25
28
+ /*
26
addr = load_reg(s, a->rn);
29
+ * The interrupt state can change due to:
27
tmp = tcg_temp_new_i32();
30
+ * - reset clears both SR.OCIF and CR.OCIE
28
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
31
+ * - write to CR.EN or CR.OCIE
29
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
32
+ */
30
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
33
+ imx_epit_update_int(s);
31
tcg_temp_free_i32(addr);
34
+
35
+ /*
36
+ * TODO: could we 'break' here for reset? following operations appear
37
+ * to duplicate the work imx_epit_reset() already did.
38
+ */
39
+
40
ptimer_transaction_begin(s->timer_cmp);
41
ptimer_transaction_begin(s->timer_reload);
32
42
33
--
43
--
34
2.20.1
44
2.25.1
35
36
diff view generated by jsdifflib
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
The driver can query some bits in SMMUv3 IDR5 to learn which
3
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
4
translation granules are supported. Arm recommends that SMMUv3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
implementations support at least 4K and 64K granules. But in
6
the vSMMUv3, there seems to be no reason not to support 16K
7
translation granule. In addition, if 16K is not supported,
8
vSVA will failed to be enabled in the future for 16K guest
9
kernel. So it'd better to support it.
10
11
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
13
Tested-by: Eric Auger <eric.auger@redhat.com>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
6
---
16
hw/arm/smmuv3.c | 6 ++++--
7
hw/timer/imx_epit.c | 20 ++++++++++++++------
17
1 file changed, 4 insertions(+), 2 deletions(-)
8
1 file changed, 14 insertions(+), 6 deletions(-)
18
9
19
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
10
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/smmuv3.c
12
--- a/hw/timer/imx_epit.c
22
+++ b/hw/arm/smmuv3.c
13
+++ b/hw/timer/imx_epit.c
23
@@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s)
14
@@ -XXX,XX +XXX,XX @@ static void imx_epit_set_freq(IMXEPITState *s)
24
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
15
/*
25
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
16
* This is called both on hardware (device) reset and software reset.
26
17
*/
27
- /* 4K and 64K granule support */
18
-static void imx_epit_reset(DeviceState *dev)
28
+ /* 4K, 16K and 64K granule support */
19
+static void imx_epit_reset(IMXEPITState *s, bool is_hard_reset)
29
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
20
{
30
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
21
- IMXEPITState *s = IMX_EPIT(dev);
31
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
22
-
32
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
23
/* Soft reset doesn't touch some bits; hard reset clears them */
33
24
- s->cr &= (CR_EN|CR_ENMOD|CR_STOPEN|CR_DOZEN|CR_WAITEN|CR_DBGEN);
34
@@ -XXX,XX +XXX,XX @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
25
+ if (is_hard_reset) {
35
26
+ s->cr = 0;
36
tg = CD_TG(cd, i);
27
+ } else {
37
tt->granule_sz = tg2granule(tg, i);
28
+ s->cr &= (CR_EN|CR_ENMOD|CR_STOPEN|CR_DOZEN|CR_WAITEN|CR_DBGEN);
38
- if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
29
+ }
39
+ if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
30
s->sr = 0;
40
+ tt->granule_sz != 16) || CD_ENDI(cd)) {
31
s->lr = EPIT_TIMER_MAX;
41
goto bad_cd;
32
s->cmp = 0;
33
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
34
s->cr = value & 0x03ffffff;
35
if (s->cr & CR_SWR) {
36
/* handle the reset */
37
- imx_epit_reset(DEVICE(s));
38
+ imx_epit_reset(s, false);
42
}
39
}
43
40
41
/*
42
@@ -XXX,XX +XXX,XX @@ static void imx_epit_realize(DeviceState *dev, Error **errp)
43
s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_LEGACY);
44
}
45
46
+static void imx_epit_dev_reset(DeviceState *dev)
47
+{
48
+ IMXEPITState *s = IMX_EPIT(dev);
49
+ imx_epit_reset(s, true);
50
+}
51
+
52
static void imx_epit_class_init(ObjectClass *klass, void *data)
53
{
54
DeviceClass *dc = DEVICE_CLASS(klass);
55
56
dc->realize = imx_epit_realize;
57
- dc->reset = imx_epit_reset;
58
+ dc->reset = imx_epit_dev_reset;
59
dc->vmsd = &vmstate_imx_timer_epit;
60
dc->desc = "i.MX periodic timer";
61
}
44
--
62
--
45
2.20.1
63
2.25.1
46
47
diff view generated by jsdifflib
Deleted patch
1
The Arm ARM specifies that for Thumb encodings of the various plain
2
store insns, if the Rn field is 1111 then we must UNDEF. This is
3
different from the Arm encodings, where this case is either
4
UNPREDICTABLE or has well-defined behaviour. The exclusive stores,
5
store-release and STRD do not have this UNDEF case for any encoding.
6
1
7
Enforce the UNDEF for this case in the Thumb plain store insns.
8
9
Fixes: https://bugs.launchpad.net/qemu/+bug/1922887
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210408162402.5822-1-peter.maydell@linaro.org
13
---
14
target/arm/translate.c | 16 ++++++++++++++++
15
1 file changed, 16 insertions(+)
16
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate.c
20
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
22
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
23
TCGv_i32 addr, tmp;
24
25
+ /*
26
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
27
+ * is either UNPREDICTABLE or has defined behaviour
28
+ */
29
+ if (s->thumb && a->rn == 15) {
30
+ return false;
31
+ }
32
+
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
37
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
38
TCGv_i32 addr, tmp;
39
40
+ /*
41
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
42
+ * is either UNPREDICTABLE or has defined behaviour
43
+ */
44
+ if (s->thumb && a->rn == 15) {
45
+ return false;
46
+ }
47
+
48
addr = op_addr_ri_pre(s, a);
49
50
tmp = load_reg(s, a->rt);
51
--
52
2.20.1
53
54
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
In the case of gpr load, merge the size and is_signed arguments;
3
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
4
otherwise, simply convert size to memop.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-26-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
6
---
11
target/arm/translate-a64.c | 78 ++++++++++++++++----------------------
7
hw/timer/imx_epit.c | 215 ++++++++++++++++++++++++--------------------
12
1 file changed, 33 insertions(+), 45 deletions(-)
8
1 file changed, 117 insertions(+), 98 deletions(-)
13
9
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
10
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
12
--- a/hw/timer/imx_epit.c
17
+++ b/target/arm/translate-a64.c
13
+++ b/hw/timer/imx_epit.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
14
@@ -XXX,XX +XXX,XX @@ static void imx_epit_reload_compare_timer(IMXEPITState *s)
19
* Store from GPR register to memory.
20
*/
21
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
22
- TCGv_i64 tcg_addr, int size, int memidx,
23
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
24
bool iss_valid,
25
unsigned int iss_srt,
26
bool iss_sf, bool iss_ar)
27
{
28
- g_assert(size <= 3);
29
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
30
+ memop = finalize_memop(s, memop);
31
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
32
33
if (iss_valid) {
34
uint32_t syn;
35
36
syn = syn_data_abort_with_iss(0,
37
- size,
38
+ (memop & MO_SIZE),
39
false,
40
iss_srt,
41
iss_sf,
42
@@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
43
}
44
45
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
46
- TCGv_i64 tcg_addr, int size,
47
+ TCGv_i64 tcg_addr, MemOp memop,
48
bool iss_valid,
49
unsigned int iss_srt,
50
bool iss_sf, bool iss_ar)
51
{
52
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
53
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
54
iss_valid, iss_srt, iss_sf, iss_ar);
55
}
56
57
/*
58
* Load from memory to GPR register
59
*/
60
-static void do_gpr_ld_memidx(DisasContext *s,
61
- TCGv_i64 dest, TCGv_i64 tcg_addr,
62
- int size, bool is_signed,
63
- bool extend, int memidx,
64
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
65
+ MemOp memop, bool extend, int memidx,
66
bool iss_valid, unsigned int iss_srt,
67
bool iss_sf, bool iss_ar)
68
{
69
- MemOp memop = s->be_data + size;
70
-
71
- g_assert(size <= 3);
72
-
73
- if (is_signed) {
74
- memop += MO_SIGN;
75
- }
76
-
77
+ memop = finalize_memop(s, memop);
78
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
79
80
- if (extend && is_signed) {
81
- g_assert(size < 3);
82
+ if (extend && (memop & MO_SIGN)) {
83
+ g_assert((memop & MO_SIZE) <= MO_32);
84
tcg_gen_ext32u_i64(dest, dest);
85
}
86
87
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
88
uint32_t syn;
89
90
syn = syn_data_abort_with_iss(0,
91
- size,
92
- is_signed,
93
+ (memop & MO_SIZE),
94
+ (memop & MO_SIGN) != 0,
95
iss_srt,
96
iss_sf,
97
iss_ar,
98
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
99
}
15
}
100
}
16
}
101
17
102
-static void do_gpr_ld(DisasContext *s,
18
+static void imx_epit_write_cr(IMXEPITState *s, uint32_t value)
103
- TCGv_i64 dest, TCGv_i64 tcg_addr,
19
+{
104
- int size, bool is_signed, bool extend,
20
+ uint32_t oldcr = s->cr;
105
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
21
+
106
+ MemOp memop, bool extend,
22
+ s->cr = value & 0x03ffffff;
107
bool iss_valid, unsigned int iss_srt,
23
+
108
bool iss_sf, bool iss_ar)
24
+ if (s->cr & CR_SWR) {
25
+ /* handle the reset */
26
+ imx_epit_reset(s, false);
27
+ }
28
+
29
+ /*
30
+ * The interrupt state can change due to:
31
+ * - reset clears both SR.OCIF and CR.OCIE
32
+ * - write to CR.EN or CR.OCIE
33
+ */
34
+ imx_epit_update_int(s);
35
+
36
+ /*
37
+ * TODO: could we 'break' here for reset? following operations appear
38
+ * to duplicate the work imx_epit_reset() already did.
39
+ */
40
+
41
+ ptimer_transaction_begin(s->timer_cmp);
42
+ ptimer_transaction_begin(s->timer_reload);
43
+
44
+ /* Update the frequency. Has been done already in case of a reset. */
45
+ if (!(s->cr & CR_SWR)) {
46
+ imx_epit_set_freq(s);
47
+ }
48
+
49
+ if (s->freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) {
50
+ if (s->cr & CR_ENMOD) {
51
+ if (s->cr & CR_RLD) {
52
+ ptimer_set_limit(s->timer_reload, s->lr, 1);
53
+ ptimer_set_limit(s->timer_cmp, s->lr, 1);
54
+ } else {
55
+ ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1);
56
+ ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1);
57
+ }
58
+ }
59
+
60
+ imx_epit_reload_compare_timer(s);
61
+ ptimer_run(s->timer_reload, 0);
62
+ if (s->cr & CR_OCIEN) {
63
+ ptimer_run(s->timer_cmp, 0);
64
+ } else {
65
+ ptimer_stop(s->timer_cmp);
66
+ }
67
+ } else if (!(s->cr & CR_EN)) {
68
+ /* stop both timers */
69
+ ptimer_stop(s->timer_reload);
70
+ ptimer_stop(s->timer_cmp);
71
+ } else if (s->cr & CR_OCIEN) {
72
+ if (!(oldcr & CR_OCIEN)) {
73
+ imx_epit_reload_compare_timer(s);
74
+ ptimer_run(s->timer_cmp, 0);
75
+ }
76
+ } else {
77
+ ptimer_stop(s->timer_cmp);
78
+ }
79
+
80
+ ptimer_transaction_commit(s->timer_cmp);
81
+ ptimer_transaction_commit(s->timer_reload);
82
+}
83
+
84
+static void imx_epit_write_sr(IMXEPITState *s, uint32_t value)
85
+{
86
+ /* writing 1 to SR.OCIF clears this bit and turns the interrupt off */
87
+ if (value & SR_OCIF) {
88
+ s->sr = 0; /* SR.OCIF is the only bit in this register anyway */
89
+ imx_epit_update_int(s);
90
+ }
91
+}
92
+
93
+static void imx_epit_write_lr(IMXEPITState *s, uint32_t value)
94
+{
95
+ s->lr = value;
96
+
97
+ ptimer_transaction_begin(s->timer_cmp);
98
+ ptimer_transaction_begin(s->timer_reload);
99
+ if (s->cr & CR_RLD) {
100
+ /* Also set the limit if the LRD bit is set */
101
+ /* If IOVW bit is set then set the timer value */
102
+ ptimer_set_limit(s->timer_reload, s->lr, s->cr & CR_IOVW);
103
+ ptimer_set_limit(s->timer_cmp, s->lr, 0);
104
+ } else if (s->cr & CR_IOVW) {
105
+ /* If IOVW bit is set then set the timer value */
106
+ ptimer_set_count(s->timer_reload, s->lr);
107
+ }
108
+ /*
109
+ * Commit the change to s->timer_reload, so it can propagate. Otherwise
110
+ * the timer interrupt may not fire properly. The commit must happen
111
+ * before calling imx_epit_reload_compare_timer(), which reads
112
+ * s->timer_reload internally again.
113
+ */
114
+ ptimer_transaction_commit(s->timer_reload);
115
+ imx_epit_reload_compare_timer(s);
116
+ ptimer_transaction_commit(s->timer_cmp);
117
+}
118
+
119
+static void imx_epit_write_cmp(IMXEPITState *s, uint32_t value)
120
+{
121
+ s->cmp = value;
122
+
123
+ ptimer_transaction_begin(s->timer_cmp);
124
+ imx_epit_reload_compare_timer(s);
125
+ ptimer_transaction_commit(s->timer_cmp);
126
+}
127
+
128
static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value,
129
unsigned size)
109
{
130
{
110
- do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
131
IMXEPITState *s = IMX_EPIT(opaque);
111
- get_mem_index(s),
132
- uint64_t oldcr;
112
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
133
113
iss_valid, iss_srt, iss_sf, iss_ar);
134
DPRINTF("(%s, value = 0x%08x)\n", imx_epit_reg_name(offset >> 2),
114
}
135
(uint32_t)value);
115
136
116
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
137
switch (offset >> 2) {
117
}
138
case 0: /* CR */
118
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
139
-
119
false, rn != 31, size);
140
- oldcr = s->cr;
120
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
141
- s->cr = value & 0x03ffffff;
121
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
142
- if (s->cr & CR_SWR) {
122
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
143
- /* handle the reset */
123
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
144
- imx_epit_reset(s, false);
124
return;
145
- }
125
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
146
-
126
/* Only unsigned 32bit loads target 32bit registers. */
147
- /*
127
bool iss_sf = opc != 0;
148
- * The interrupt state can change due to:
128
149
- * - reset clears both SR.OCIF and CR.OCIE
129
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
150
- * - write to CR.EN or CR.OCIE
130
- true, rt, iss_sf, false);
151
- */
131
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
152
- imx_epit_update_int(s);
132
+ false, true, rt, iss_sf, false);
153
-
133
}
154
- /*
134
tcg_temp_free_i64(clean_addr);
155
- * TODO: could we 'break' here for reset? following operations appear
135
}
156
- * to duplicate the work imx_epit_reset() already did.
136
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
157
- */
137
/* Do not modify tcg_rt before recognizing any exception
158
-
138
* from the second load.
159
- ptimer_transaction_begin(s->timer_cmp);
139
*/
160
- ptimer_transaction_begin(s->timer_reload);
140
- do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
161
-
141
- false, 0, false, false);
162
- /* Update the frequency. Has been done already in case of a reset. */
142
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
163
- if (!(s->cr & CR_SWR)) {
143
+ false, false, 0, false, false);
164
- imx_epit_set_freq(s);
144
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
165
- }
145
- do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
166
-
146
- false, 0, false, false);
167
- if (s->freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) {
147
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
168
- if (s->cr & CR_ENMOD) {
148
+ false, false, 0, false, false);
169
- if (s->cr & CR_RLD) {
149
170
- ptimer_set_limit(s->timer_reload, s->lr, 1);
150
tcg_gen_mov_i64(tcg_rt, tmp);
171
- ptimer_set_limit(s->timer_cmp, s->lr, 1);
151
tcg_temp_free_i64(tmp);
172
- } else {
152
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
173
- ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1);
153
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
174
- ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1);
154
iss_valid, rt, iss_sf, false);
175
- }
155
} else {
176
- }
156
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
177
-
157
- is_signed, is_extended, memidx,
178
- imx_epit_reload_compare_timer(s);
158
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
179
- ptimer_run(s->timer_reload, 0);
159
+ is_extended, memidx,
180
- if (s->cr & CR_OCIEN) {
160
iss_valid, rt, iss_sf, false);
181
- ptimer_run(s->timer_cmp, 0);
161
}
182
- } else {
162
}
183
- ptimer_stop(s->timer_cmp);
163
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
184
- }
164
do_gpr_st(s, tcg_rt, clean_addr, size,
185
- } else if (!(s->cr & CR_EN)) {
165
true, rt, iss_sf, false);
186
- /* stop both timers */
166
} else {
187
- ptimer_stop(s->timer_reload);
167
- do_gpr_ld(s, tcg_rt, clean_addr, size,
188
- ptimer_stop(s->timer_cmp);
168
- is_signed, is_extended,
189
- } else if (s->cr & CR_OCIEN) {
169
- true, rt, iss_sf, false);
190
- if (!(oldcr & CR_OCIEN)) {
170
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
191
- imx_epit_reload_compare_timer(s);
171
+ is_extended, true, rt, iss_sf, false);
192
- ptimer_run(s->timer_cmp, 0);
172
}
193
- }
194
- } else {
195
- ptimer_stop(s->timer_cmp);
196
- }
197
-
198
- ptimer_transaction_commit(s->timer_cmp);
199
- ptimer_transaction_commit(s->timer_reload);
200
+ imx_epit_write_cr(s, (uint32_t)value);
201
break;
202
203
- case 1: /* SR - ACK*/
204
- /* writing 1 to SR.OCIF clears this bit and turns the interrupt off */
205
- if (value & SR_OCIF) {
206
- s->sr = 0; /* SR.OCIF is the only bit in this register anyway */
207
- imx_epit_update_int(s);
208
- }
209
+ case 1: /* SR */
210
+ imx_epit_write_sr(s, (uint32_t)value);
211
break;
212
213
- case 2: /* LR - set ticks */
214
- s->lr = value;
215
-
216
- ptimer_transaction_begin(s->timer_cmp);
217
- ptimer_transaction_begin(s->timer_reload);
218
- if (s->cr & CR_RLD) {
219
- /* Also set the limit if the LRD bit is set */
220
- /* If IOVW bit is set then set the timer value */
221
- ptimer_set_limit(s->timer_reload, s->lr, s->cr & CR_IOVW);
222
- ptimer_set_limit(s->timer_cmp, s->lr, 0);
223
- } else if (s->cr & CR_IOVW) {
224
- /* If IOVW bit is set then set the timer value */
225
- ptimer_set_count(s->timer_reload, s->lr);
226
- }
227
- /*
228
- * Commit the change to s->timer_reload, so it can propagate. Otherwise
229
- * the timer interrupt may not fire properly. The commit must happen
230
- * before calling imx_epit_reload_compare_timer(), which reads
231
- * s->timer_reload internally again.
232
- */
233
- ptimer_transaction_commit(s->timer_reload);
234
- imx_epit_reload_compare_timer(s);
235
- ptimer_transaction_commit(s->timer_cmp);
236
+ case 2: /* LR */
237
+ imx_epit_write_lr(s, (uint32_t)value);
238
break;
239
240
case 3: /* CMP */
241
- s->cmp = value;
242
-
243
- ptimer_transaction_begin(s->timer_cmp);
244
- imx_epit_reload_compare_timer(s);
245
- ptimer_transaction_commit(s->timer_cmp);
246
-
247
+ imx_epit_write_cmp(s, (uint32_t)value);
248
break;
249
250
default:
251
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
252
HWADDR_PRIx "\n", TYPE_IMX_EPIT, __func__, offset);
253
-
254
break;
173
}
255
}
174
}
256
}
175
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
257
+
176
do_gpr_st(s, tcg_rt, clean_addr, size,
258
static void imx_epit_cmp(void *opaque)
177
true, rt, iss_sf, false);
259
{
178
} else {
260
IMXEPITState *s = IMX_EPIT(opaque);
179
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
180
- true, rt, iss_sf, false);
181
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
182
+ is_extended, true, rt, iss_sf, false);
183
}
184
}
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
187
* full load-acquire (we only need "load-acquire processor consistent"),
188
* but we choose to implement them as full LDAQ.
189
*/
190
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
191
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
192
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
193
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
194
return;
195
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
196
is_wback || rn != 31, size);
197
198
tcg_rt = cpu_reg(s, rt);
199
- do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
200
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
201
/* extend */ false, /* iss_valid */ !is_wback,
202
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
203
204
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
205
* Load-AcquirePC semantics; we implement as the slightly more
206
* restrictive Load-Acquire.
207
*/
208
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
209
- true, rt, iss_sf, true);
210
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
211
+ extend, true, rt, iss_sf, true);
212
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
213
}
214
}
215
--
261
--
216
2.20.1
262
2.25.1
217
218
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
Now that mte_check1 and mte_checkN have been merged, we can
3
The CNT register is a read-only register. There is no need to
4
merge sve_cont_ldst_mte_check1 and sve_cont_ldst_mte_checkN.
4
store it's value, it can be calculated on demand.
5
The calculated frequency is needed temporarily only.
5
6
6
Which means that we can eliminate the function pointer into
7
Note that this is a migration compatibility break for all boards
7
sve_ldN_r and sve_stN_r, calling sve_cont_ldst_mte_check directly.
8
types that use the EPIT peripheral.
8
9
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20210416183106.1516563-9-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
13
---
14
target/arm/sve_helper.c | 84 +++++++++++++----------------------------
14
include/hw/timer/imx_epit.h | 2 -
15
1 file changed, 26 insertions(+), 58 deletions(-)
15
hw/timer/imx_epit.c | 73 ++++++++++++++-----------------------
16
2 files changed, 28 insertions(+), 47 deletions(-)
16
17
17
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
18
diff --git a/include/hw/timer/imx_epit.h b/include/hw/timer/imx_epit.h
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sve_helper.c
20
--- a/include/hw/timer/imx_epit.h
20
+++ b/target/arm/sve_helper.c
21
+++ b/include/hw/timer/imx_epit.h
21
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
22
@@ -XXX,XX +XXX,XX @@ struct IMXEPITState {
22
#endif
23
uint32_t sr;
23
}
24
uint32_t lr;
24
25
uint32_t cmp;
25
-typedef uint64_t mte_check_fn(CPUARMState *, uint32_t, uint64_t, uintptr_t);
26
- uint32_t cnt;
26
-
27
27
-static inline QEMU_ALWAYS_INLINE
28
- uint32_t freq;
28
-void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
29
qemu_irq irq;
29
- uint64_t *vg, target_ulong addr, int esize,
30
};
30
- int msize, uint32_t mtedesc, uintptr_t ra,
31
31
- mte_check_fn *check)
32
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
32
+static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
33
index XXXXXXX..XXXXXXX 100644
33
+ uint64_t *vg, target_ulong addr, int esize,
34
--- a/hw/timer/imx_epit.c
34
+ int msize, uint32_t mtedesc, uintptr_t ra)
35
+++ b/hw/timer/imx_epit.c
35
{
36
@@ -XXX,XX +XXX,XX @@ static void imx_epit_update_int(IMXEPITState *s)
36
intptr_t mem_off, reg_off, reg_last;
37
38
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
39
uint64_t pg = vg[reg_off >> 6];
40
do {
41
if ((pg >> (reg_off & 63)) & 1) {
42
- check(env, mtedesc, addr, ra);
43
+ mte_check(env, mtedesc, addr, ra);
44
}
45
reg_off += esize;
46
mem_off += msize;
47
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
48
uint64_t pg = vg[reg_off >> 6];
49
do {
50
if ((pg >> (reg_off & 63)) & 1) {
51
- check(env, mtedesc, addr, ra);
52
+ mte_check(env, mtedesc, addr, ra);
53
}
54
reg_off += esize;
55
mem_off += msize;
56
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
57
}
37
}
58
}
38
}
59
39
60
-typedef void sve_cont_ldst_mte_check_fn(SVEContLdSt *info, CPUARMState *env,
40
-/*
61
- uint64_t *vg, target_ulong addr,
41
- * Must be called from within a ptimer_transaction_begin/commit block
62
- int esize, int msize, uint32_t mtedesc,
42
- * for both s->timer_cmp and s->timer_reload.
63
- uintptr_t ra);
43
- */
44
-static void imx_epit_set_freq(IMXEPITState *s)
45
+static uint32_t imx_epit_get_freq(IMXEPITState *s)
46
{
47
- uint32_t clksrc;
48
- uint32_t prescaler;
64
-
49
-
65
-static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
50
- clksrc = extract32(s->cr, CR_CLKSRC_SHIFT, CR_CLKSRC_BITS);
66
- uint64_t *vg, target_ulong addr,
51
- prescaler = 1 + extract32(s->cr, CR_PRESCALE_SHIFT, CR_PRESCALE_BITS);
67
- int esize, int msize, uint32_t mtedesc,
52
-
68
- uintptr_t ra)
53
- s->freq = imx_ccm_get_clock_frequency(s->ccm,
54
- imx_epit_clocks[clksrc]) / prescaler;
55
-
56
- DPRINTF("Setting ptimer frequency to %u\n", s->freq);
57
-
58
- if (s->freq) {
59
- ptimer_set_freq(s->timer_reload, s->freq);
60
- ptimer_set_freq(s->timer_cmp, s->freq);
61
- }
62
+ uint32_t clksrc = extract32(s->cr, CR_CLKSRC_SHIFT, CR_CLKSRC_BITS);
63
+ uint32_t prescaler = 1 + extract32(s->cr, CR_PRESCALE_SHIFT, CR_PRESCALE_BITS);
64
+ uint32_t f_in = imx_ccm_get_clock_frequency(s->ccm, imx_epit_clocks[clksrc]);
65
+ uint32_t freq = f_in / prescaler;
66
+ DPRINTF("ptimer frequency is %u\n", freq);
67
+ return freq;
68
}
69
70
/*
71
@@ -XXX,XX +XXX,XX @@ static void imx_epit_reset(IMXEPITState *s, bool is_hard_reset)
72
s->sr = 0;
73
s->lr = EPIT_TIMER_MAX;
74
s->cmp = 0;
75
- s->cnt = 0;
76
ptimer_transaction_begin(s->timer_cmp);
77
ptimer_transaction_begin(s->timer_reload);
78
- /* stop both timers */
79
+
80
+ /*
81
+ * The reset switches off the input clock, so even if the CR.EN is still
82
+ * set, the timers are no longer running.
83
+ */
84
+ assert(imx_epit_get_freq(s) == 0);
85
ptimer_stop(s->timer_cmp);
86
ptimer_stop(s->timer_reload);
87
- /* compute new frequency */
88
- imx_epit_set_freq(s);
89
/* init both timers to EPIT_TIMER_MAX */
90
ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1);
91
ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1);
92
- if (s->freq && (s->cr & CR_EN)) {
93
- /* if the timer is still enabled, restart it */
94
- ptimer_run(s->timer_reload, 0);
95
- }
96
ptimer_transaction_commit(s->timer_cmp);
97
ptimer_transaction_commit(s->timer_reload);
98
}
99
100
-static uint32_t imx_epit_update_count(IMXEPITState *s)
69
-{
101
-{
70
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
102
- s->cnt = ptimer_get_count(s->timer_reload);
71
- mtedesc, ra, mte_check);
103
-
104
- return s->cnt;
72
-}
105
-}
73
-
106
-
74
-static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
107
static uint64_t imx_epit_read(void *opaque, hwaddr offset, unsigned size)
75
- uint64_t *vg, target_ulong addr,
76
- int esize, int msize, uint32_t mtedesc,
77
- uintptr_t ra)
78
-{
79
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
80
- mtedesc, ra, mte_check);
81
-}
82
-
83
-
84
/*
85
* Common helper for all contiguous 1,2,3,4-register predicated stores.
86
*/
87
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
88
uint32_t desc, const uintptr_t retaddr,
89
const int esz, const int msz, const int N, uint32_t mtedesc,
90
sve_ldst1_host_fn *host_fn,
91
- sve_ldst1_tlb_fn *tlb_fn,
92
- sve_cont_ldst_mte_check_fn *mte_check_fn)
93
+ sve_ldst1_tlb_fn *tlb_fn)
94
{
108
{
95
const unsigned rd = simd_data(desc);
109
IMXEPITState *s = IMX_EPIT(opaque);
96
const intptr_t reg_max = simd_oprsz(desc);
110
@@ -XXX,XX +XXX,XX @@ static uint64_t imx_epit_read(void *opaque, hwaddr offset, unsigned size)
97
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
111
break;
98
* Handle mte checks for all active elements.
112
99
* Since TBI must be set for MTE, !mtedesc => !mte_active.
113
case 4: /* CNT */
100
*/
114
- imx_epit_update_count(s);
101
- if (mte_check_fn && mtedesc) {
115
- reg_value = s->cnt;
102
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
116
+ reg_value = ptimer_get_count(s->timer_reload);
103
- mtedesc, retaddr);
117
break;
104
+ if (mtedesc) {
118
105
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
119
default:
106
+ mtedesc, retaddr);
120
@@ -XXX,XX +XXX,XX @@ static void imx_epit_reload_compare_timer(IMXEPITState *s)
121
{
122
if ((s->cr & (CR_EN | CR_OCIEN)) == (CR_EN | CR_OCIEN)) {
123
/* if the compare feature is on and timers are running */
124
- uint32_t tmp = imx_epit_update_count(s);
125
+ uint32_t tmp = ptimer_get_count(s->timer_reload);
126
uint64_t next;
127
if (tmp > s->cmp) {
128
/* It'll fire in this round of the timer */
129
@@ -XXX,XX +XXX,XX @@ static void imx_epit_reload_compare_timer(IMXEPITState *s)
130
131
static void imx_epit_write_cr(IMXEPITState *s, uint32_t value)
132
{
133
+ uint32_t freq = 0;
134
uint32_t oldcr = s->cr;
135
136
s->cr = value & 0x03ffffff;
137
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write_cr(IMXEPITState *s, uint32_t value)
138
ptimer_transaction_begin(s->timer_cmp);
139
ptimer_transaction_begin(s->timer_reload);
140
141
- /* Update the frequency. Has been done already in case of a reset. */
142
+ /*
143
+ * Update the frequency. In case of a reset the input clock was
144
+ * switched off, so this can be skipped.
145
+ */
146
if (!(s->cr & CR_SWR)) {
147
- imx_epit_set_freq(s);
148
+ freq = imx_epit_get_freq(s);
149
+ if (freq) {
150
+ ptimer_set_freq(s->timer_reload, freq);
151
+ ptimer_set_freq(s->timer_cmp, freq);
152
+ }
107
}
153
}
108
154
109
flags = info.page[0].flags | info.page[1].flags;
155
- if (s->freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) {
110
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
156
+ if (freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) {
111
mtedesc = 0;
157
if (s->cr & CR_ENMOD) {
112
}
158
if (s->cr & CR_RLD) {
113
159
ptimer_set_limit(s->timer_reload, s->lr, 1);
114
- sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
160
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps imx_epit_ops = {
115
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
161
116
+ sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
162
static const VMStateDescription vmstate_imx_timer_epit = {
117
}
163
.name = TYPE_IMX_EPIT,
118
164
- .version_id = 2,
119
#define DO_LD1_1(NAME, ESZ) \
165
- .minimum_version_id = 2,
120
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
166
+ .version_id = 3,
121
target_ulong addr, uint32_t desc) \
167
+ .minimum_version_id = 3,
122
{ \
168
.fields = (VMStateField[]) {
123
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
169
VMSTATE_UINT32(cr, IMXEPITState),
124
- sve_##NAME##_host, sve_##NAME##_tlb, NULL); \
170
VMSTATE_UINT32(sr, IMXEPITState),
125
+ sve_##NAME##_host, sve_##NAME##_tlb); \
171
VMSTATE_UINT32(lr, IMXEPITState),
126
} \
172
VMSTATE_UINT32(cmp, IMXEPITState),
127
void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
173
- VMSTATE_UINT32(cnt, IMXEPITState),
128
target_ulong addr, uint32_t desc) \
174
- VMSTATE_UINT32(freq, IMXEPITState),
129
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
175
VMSTATE_PTIMER(timer_reload, IMXEPITState),
130
target_ulong addr, uint32_t desc) \
176
VMSTATE_PTIMER(timer_cmp, IMXEPITState),
131
{ \
177
VMSTATE_END_OF_LIST()
132
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
133
- sve_##NAME##_le_host, sve_##NAME##_le_tlb, NULL); \
134
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
135
} \
136
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
137
target_ulong addr, uint32_t desc) \
138
{ \
139
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
140
- sve_##NAME##_be_host, sve_##NAME##_be_tlb, NULL); \
141
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
142
} \
143
void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
144
- target_ulong addr, uint32_t desc) \
145
+ target_ulong addr, uint32_t desc) \
146
{ \
147
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
148
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
149
} \
150
void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
151
- target_ulong addr, uint32_t desc) \
152
+ target_ulong addr, uint32_t desc) \
153
{ \
154
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
155
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
156
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
157
target_ulong addr, uint32_t desc) \
158
{ \
159
sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
160
- sve_ld1bb_host, sve_ld1bb_tlb, NULL); \
161
+ sve_ld1bb_host, sve_ld1bb_tlb); \
162
} \
163
void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
164
target_ulong addr, uint32_t desc) \
165
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
166
target_ulong addr, uint32_t desc) \
167
{ \
168
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
169
- sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb, NULL); \
170
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
171
} \
172
void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
173
target_ulong addr, uint32_t desc) \
174
{ \
175
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
176
- sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb, NULL); \
177
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
178
} \
179
void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
180
target_ulong addr, uint32_t desc) \
181
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
182
uint32_t desc, const uintptr_t retaddr,
183
const int esz, const int msz, const int N, uint32_t mtedesc,
184
sve_ldst1_host_fn *host_fn,
185
- sve_ldst1_tlb_fn *tlb_fn,
186
- sve_cont_ldst_mte_check_fn *mte_check_fn)
187
+ sve_ldst1_tlb_fn *tlb_fn)
188
{
189
const unsigned rd = simd_data(desc);
190
const intptr_t reg_max = simd_oprsz(desc);
191
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
192
* Handle mte checks for all active elements.
193
* Since TBI must be set for MTE, !mtedesc => !mte_active.
194
*/
195
- if (mte_check_fn && mtedesc) {
196
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
197
- mtedesc, retaddr);
198
+ if (mtedesc) {
199
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
200
+ mtedesc, retaddr);
201
}
202
203
flags = info.page[0].flags | info.page[1].flags;
204
@@ -XXX,XX +XXX,XX @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
205
mtedesc = 0;
206
}
207
208
- sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
209
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
210
+ sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
211
}
212
213
#define DO_STN_1(N, NAME, ESZ) \
214
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
215
target_ulong addr, uint32_t desc) \
216
{ \
217
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
218
- sve_st1##NAME##_host, sve_st1##NAME##_tlb, NULL); \
219
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
220
} \
221
void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
222
target_ulong addr, uint32_t desc) \
223
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
224
target_ulong addr, uint32_t desc) \
225
{ \
226
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
227
- sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb, NULL); \
228
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
229
} \
230
void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
231
target_ulong addr, uint32_t desc) \
232
{ \
233
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
234
- sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb, NULL); \
235
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
236
} \
237
void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
238
target_ulong addr, uint32_t desc) \
239
--
178
--
240
2.20.1
179
2.25.1
241
242
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Axel Heider <axel.heider@hensoldt.net>
2
2
3
The log2_esize parameter is not used except trivially.
3
- fix #1263 for CR writes
4
Drop the parameter and the deferral to gen_mte_check1.
4
- rework compare time handling
5
- The compare timer has to run even if CR.OCIEN is not set,
6
as SR.OCIF must be updated.
7
- The compare timer fires exactly once when the
8
compare value is less than the current value, but the
9
reload values is less than the compare value.
10
- The compare timer will never fire if the reload value is
11
less than the compare value. Disable it in this case.
5
12
6
This fixes a bug in that the parameters as documented
13
Signed-off-by: Axel Heider <axel.heider@hensoldt.net>
7
in the header file were the reverse from those in the
14
[PMM: fixed minor style nits]
8
implementation. Which meant that translate-sve.c was
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
passing the parameters in the wrong order.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210416183106.1516563-10-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
17
---
16
target/arm/translate-a64.h | 2 +-
18
hw/timer/imx_epit.c | 192 ++++++++++++++++++++++++++------------------
17
target/arm/translate-a64.c | 15 +++++++--------
19
1 file changed, 116 insertions(+), 76 deletions(-)
18
target/arm/translate-sve.c | 4 ++--
19
3 files changed, 10 insertions(+), 11 deletions(-)
20
20
21
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
22
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/translate-a64.h
23
--- a/hw/timer/imx_epit.c
24
+++ b/target/arm/translate-a64.h
24
+++ b/hw/timer/imx_epit.c
25
@@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
25
@@ -XXX,XX +XXX,XX @@
26
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
26
* Originally written by Hans Jiang
27
bool tag_checked, int log2_size);
27
* Updated by Peter Chubb
28
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
28
* Updated by Jean-Christophe Dubois <jcd@tribudubois.net>
29
- bool tag_checked, int count, int log2_esize);
29
+ * Updated by Axel Heider
30
+ bool tag_checked, int size);
30
*
31
31
* This code is licensed under GPL version 2 or later. See
32
/* We should have at some point before trying to access an FP register
32
* the COPYING file in the top-level directory.
33
* done the necessary access check, so assert that
33
@@ -XXX,XX +XXX,XX @@ static uint64_t imx_epit_read(void *opaque, hwaddr offset, unsigned size)
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
34
return reg_value;
35
index XXXXXXX..XXXXXXX 100644
35
}
36
--- a/target/arm/translate-a64.c
36
37
+++ b/target/arm/translate-a64.c
37
-/* Must be called from ptimer_transaction_begin/commit block for s->timer_cmp */
38
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
38
-static void imx_epit_reload_compare_timer(IMXEPITState *s)
39
* For MTE, check multiple logical sequential accesses.
39
+/*
40
*/
40
+ * Must be called from a ptimer_transaction_begin/commit block for
41
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
41
+ * s->timer_cmp, but outside of a transaction block of s->timer_reload,
42
- bool tag_checked, int log2_esize, int total_size)
42
+ * so the proper counter value is read.
43
+ bool tag_checked, int size)
43
+ */
44
{
44
+static void imx_epit_update_compare_timer(IMXEPITState *s)
45
- if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
45
{
46
+ if (tag_checked && s->mte_active[0]) {
46
- if ((s->cr & (CR_EN | CR_OCIEN)) == (CR_EN | CR_OCIEN)) {
47
TCGv_i32 tcg_desc;
47
- /* if the compare feature is on and timers are running */
48
TCGv_i64 ret;
48
- uint32_t tmp = ptimer_get_count(s->timer_reload);
49
int desc = 0;
49
- uint64_t next;
50
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
50
- if (tmp > s->cmp) {
51
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
51
- /* It'll fire in this round of the timer */
52
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
52
- next = tmp - s->cmp;
53
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
53
- } else { /* catch it next time around */
54
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
54
- next = tmp - s->cmp + ((s->cr & CR_RLD) ? EPIT_TIMER_MAX : s->lr);
55
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
55
+ uint64_t counter = 0;
56
tcg_desc = tcg_const_i32(desc);
56
+ bool is_oneshot = false;
57
57
+ /*
58
ret = new_tmp_a64(s);
58
+ * The compare timer only has to run if the timer peripheral is active
59
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
59
+ * and there is an input clock, Otherwise it can be switched off.
60
60
+ */
61
return ret;
61
+ bool is_active = (s->cr & CR_EN) && imx_epit_get_freq(s);
62
+ if (is_active) {
63
+ /*
64
+ * Calculate next timeout for compare timer. Reading the reload
65
+ * counter returns proper results only if pending transactions
66
+ * on it are committed here. Otherwise stale values are be read.
67
+ */
68
+ counter = ptimer_get_count(s->timer_reload);
69
+ uint64_t limit = ptimer_get_limit(s->timer_cmp);
70
+ /*
71
+ * The compare timer is a periodic timer if the limit is at least
72
+ * the compare value. Otherwise it may fire at most once in the
73
+ * current round.
74
+ */
75
+ bool is_oneshot = (limit >= s->cmp);
76
+ if (counter >= s->cmp) {
77
+ /* The compare timer fires in the current round. */
78
+ counter -= s->cmp;
79
+ } else if (!is_oneshot) {
80
+ /*
81
+ * The compare timer fires after a reload, as it is below the
82
+ * compare value already in this round. Note that the counter
83
+ * value calculated below can be above the 32-bit limit, which
84
+ * is legal here because the compare timer is an internal
85
+ * helper ptimer only.
86
+ */
87
+ counter += limit - s->cmp;
88
+ } else {
89
+ /*
90
+ * The compare timer won't fire in this round, and the limit is
91
+ * set to a value below the compare value. This practically means
92
+ * it will never fire, so it can be switched off.
93
+ */
94
+ is_active = false;
95
}
96
- ptimer_set_count(s->timer_cmp, next);
62
}
97
}
63
- return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
98
+
64
+ return clean_data_tbi(s, addr);
99
+ /*
65
}
100
+ * Set the compare timer and let it run, or stop it. This is agnostic
66
101
+ * of CR.OCIEN bit, as this bit affects interrupt generation only. The
67
typedef struct DisasCompare64 {
102
+ * compare timer needs to run even if no interrupts are to be generated,
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
103
+ * because the SR.OCIF bit must be updated also.
104
+ * Note that the timer might already be stopped or be running with
105
+ * counter values. However, finding out when an update is needed and
106
+ * when not is not trivial. It's much easier applying the setting again,
107
+ * as this does not harm either and the overhead is negligible.
108
+ */
109
+ if (is_active) {
110
+ ptimer_set_count(s->timer_cmp, counter);
111
+ ptimer_run(s->timer_cmp, is_oneshot ? 1 : 0);
112
+ } else {
113
+ ptimer_stop(s->timer_cmp);
114
+ }
115
+
116
}
117
118
static void imx_epit_write_cr(IMXEPITState *s, uint32_t value)
119
{
120
- uint32_t freq = 0;
121
uint32_t oldcr = s->cr;
122
123
s->cr = value & 0x03ffffff;
124
125
if (s->cr & CR_SWR) {
126
- /* handle the reset */
127
+ /*
128
+ * Reset clears CR.SWR again. It does not touch CR.EN, but the timers
129
+ * are still stopped because the input clock is disabled.
130
+ */
131
imx_epit_reset(s, false);
132
+ } else {
133
+ uint32_t freq;
134
+ uint32_t toggled_cr_bits = oldcr ^ s->cr;
135
+ /* re-initialize the limits if CR.RLD has changed */
136
+ bool set_limit = toggled_cr_bits & CR_RLD;
137
+ /* set the counter if the timer got just enabled and CR.ENMOD is set */
138
+ bool is_switched_on = (toggled_cr_bits & s->cr) & CR_EN;
139
+ bool set_counter = is_switched_on && (s->cr & CR_ENMOD);
140
+
141
+ ptimer_transaction_begin(s->timer_cmp);
142
+ ptimer_transaction_begin(s->timer_reload);
143
+ freq = imx_epit_get_freq(s);
144
+ if (freq) {
145
+ ptimer_set_freq(s->timer_reload, freq);
146
+ ptimer_set_freq(s->timer_cmp, freq);
147
+ }
148
+
149
+ if (set_limit || set_counter) {
150
+ uint64_t limit = (s->cr & CR_RLD) ? s->lr : EPIT_TIMER_MAX;
151
+ ptimer_set_limit(s->timer_reload, limit, set_counter ? 1 : 0);
152
+ if (set_limit) {
153
+ ptimer_set_limit(s->timer_cmp, limit, 0);
154
+ }
155
+ }
156
+ /*
157
+ * If there is an input clock and the peripheral is enabled, then
158
+ * ensure the wall clock timer is ticking. Otherwise stop the timers.
159
+ * The compare timer will be updated later.
160
+ */
161
+ if (freq && (s->cr & CR_EN)) {
162
+ ptimer_run(s->timer_reload, 0);
163
+ } else {
164
+ ptimer_stop(s->timer_reload);
165
+ }
166
+ /* Commit changes to reload timer, so they can propagate. */
167
+ ptimer_transaction_commit(s->timer_reload);
168
+ /* Update compare timer based on the committed reload timer value. */
169
+ imx_epit_update_compare_timer(s);
170
+ ptimer_transaction_commit(s->timer_cmp);
69
}
171
}
70
172
71
clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
173
/*
72
- (wback || rn != 31) && !set_tag,
174
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write_cr(IMXEPITState *s, uint32_t value)
73
- size, 2 << size);
175
* - write to CR.EN or CR.OCIE
74
+ (wback || rn != 31) && !set_tag, 2 << size);
75
76
if (is_vector) {
77
if (is_load) {
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
79
* promote consecutive little-endian elements below.
80
*/
176
*/
81
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
177
imx_epit_update_int(s);
82
- size, total);
178
-
83
+ total);
179
- /*
84
180
- * TODO: could we 'break' here for reset? following operations appear
85
/*
181
- * to duplicate the work imx_epit_reset() already did.
86
* Consecutive little-endian elements from a single register
182
- */
87
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
183
-
88
tcg_rn = cpu_reg_sp(s, rn);
184
- ptimer_transaction_begin(s->timer_cmp);
89
185
- ptimer_transaction_begin(s->timer_reload);
90
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
186
-
91
- scale, total);
187
- /*
92
+ total);
188
- * Update the frequency. In case of a reset the input clock was
93
189
- * switched off, so this can be skipped.
94
tcg_ebytes = tcg_const_i64(1 << scale);
190
- */
95
for (xs = 0; xs < selem; xs++) {
191
- if (!(s->cr & CR_SWR)) {
96
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
192
- freq = imx_epit_get_freq(s);
97
index XXXXXXX..XXXXXXX 100644
193
- if (freq) {
98
--- a/target/arm/translate-sve.c
194
- ptimer_set_freq(s->timer_reload, freq);
99
+++ b/target/arm/translate-sve.c
195
- ptimer_set_freq(s->timer_cmp, freq);
100
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
196
- }
101
197
- }
102
dirty_addr = tcg_temp_new_i64();
198
-
103
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
199
- if (freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) {
104
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
200
- if (s->cr & CR_ENMOD) {
105
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
201
- if (s->cr & CR_RLD) {
106
tcg_temp_free_i64(dirty_addr);
202
- ptimer_set_limit(s->timer_reload, s->lr, 1);
107
203
- ptimer_set_limit(s->timer_cmp, s->lr, 1);
108
/*
204
- } else {
109
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
205
- ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1);
110
206
- ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1);
111
dirty_addr = tcg_temp_new_i64();
207
- }
112
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
208
- }
113
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
209
-
114
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
210
- imx_epit_reload_compare_timer(s);
115
tcg_temp_free_i64(dirty_addr);
211
- ptimer_run(s->timer_reload, 0);
116
212
- if (s->cr & CR_OCIEN) {
117
/* Note that unpredicated load/store of vector/predicate registers
213
- ptimer_run(s->timer_cmp, 0);
214
- } else {
215
- ptimer_stop(s->timer_cmp);
216
- }
217
- } else if (!(s->cr & CR_EN)) {
218
- /* stop both timers */
219
- ptimer_stop(s->timer_reload);
220
- ptimer_stop(s->timer_cmp);
221
- } else if (s->cr & CR_OCIEN) {
222
- if (!(oldcr & CR_OCIEN)) {
223
- imx_epit_reload_compare_timer(s);
224
- ptimer_run(s->timer_cmp, 0);
225
- }
226
- } else {
227
- ptimer_stop(s->timer_cmp);
228
- }
229
-
230
- ptimer_transaction_commit(s->timer_cmp);
231
- ptimer_transaction_commit(s->timer_reload);
232
}
233
234
static void imx_epit_write_sr(IMXEPITState *s, uint32_t value)
235
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write_lr(IMXEPITState *s, uint32_t value)
236
/* If IOVW bit is set then set the timer value */
237
ptimer_set_count(s->timer_reload, s->lr);
238
}
239
- /*
240
- * Commit the change to s->timer_reload, so it can propagate. Otherwise
241
- * the timer interrupt may not fire properly. The commit must happen
242
- * before calling imx_epit_reload_compare_timer(), which reads
243
- * s->timer_reload internally again.
244
- */
245
+ /* Commit the changes to s->timer_reload, so they can propagate. */
246
ptimer_transaction_commit(s->timer_reload);
247
- imx_epit_reload_compare_timer(s);
248
+ /* Update the compare timer based on the committed reload timer value. */
249
+ imx_epit_update_compare_timer(s);
250
ptimer_transaction_commit(s->timer_cmp);
251
}
252
253
@@ -XXX,XX +XXX,XX @@ static void imx_epit_write_cmp(IMXEPITState *s, uint32_t value)
254
{
255
s->cmp = value;
256
257
+ /* Update the compare timer based on the committed reload timer value. */
258
ptimer_transaction_begin(s->timer_cmp);
259
- imx_epit_reload_compare_timer(s);
260
+ imx_epit_update_compare_timer(s);
261
ptimer_transaction_commit(s->timer_cmp);
262
}
263
264
@@ -XXX,XX +XXX,XX @@ static void imx_epit_cmp(void *opaque)
265
{
266
IMXEPITState *s = IMX_EPIT(opaque);
267
268
+ /* The cmp ptimer can't be running when the peripheral is disabled */
269
+ assert(s->cr & CR_EN);
270
+
271
DPRINTF("sr was %d\n", s->sr);
272
/* Set interrupt status bit SR.OCIF and update the interrupt state */
273
s->sr |= SR_OCIF;
118
--
274
--
119
2.20.1
275
2.25.1
120
121
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Fix these:
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20210419202257.161730-28-richard.henderson@linaro.org
5
WARNING: Block comments use a leading /* on a separate line
6
WARNING: Block comments use * on subsequent lines
7
WARNING: Block comments use a trailing */ on a separate line
8
9
Signed-off-by: Fabiano Rosas <farosas@suse.de>
10
Reviewed-by: Claudio Fontana <cfontana@suse.de>
11
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
12
Message-id: 20221213190537.511-2-farosas@suse.de
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
14
---
8
target/arm/translate-a64.c | 23 ++++++++++++++---------
15
target/arm/helper.c | 323 +++++++++++++++++++++++++++++---------------
9
1 file changed, 14 insertions(+), 9 deletions(-)
16
1 file changed, 215 insertions(+), 108 deletions(-)
10
17
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
diff --git a/target/arm/helper.c b/target/arm/helper.c
12
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
20
--- a/target/arm/helper.c
14
+++ b/target/arm/translate-a64.c
21
+++ b/target/arm/helper.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
22
@@ -XXX,XX +XXX,XX @@ uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
16
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
23
static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
17
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
24
uint64_t v)
18
true, rn != 31, size);
25
{
19
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
26
- /* Raw write of a coprocessor register (as needed for migration, etc).
20
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
27
+ /*
21
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
28
+ * Raw write of a coprocessor register (as needed for migration, etc).
22
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
29
* Note that constant registers are treated as write-ignored; the
30
* caller should check for success by whether a readback gives the
31
* value written.
32
@@ -XXX,XX +XXX,XX @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
33
34
static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
35
{
36
- /* Return true if the regdef would cause an assertion if you called
37
+ /*
38
+ * Return true if the regdef would cause an assertion if you called
39
* read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
40
* program bug for it not to have the NO_RAW flag).
41
* NB that returning false here doesn't necessarily mean that calling
42
@@ -XXX,XX +XXX,XX @@ bool write_list_to_cpustate(ARMCPU *cpu)
43
if (ri->type & ARM_CP_NO_RAW) {
44
continue;
45
}
46
- /* Write value and confirm it reads back as written
47
+ /*
48
+ * Write value and confirm it reads back as written
49
* (to catch read-only registers and partially read-only
50
* registers where the incoming migration value doesn't match)
51
*/
52
@@ -XXX,XX +XXX,XX @@ static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
53
54
void init_cpreg_list(ARMCPU *cpu)
55
{
56
- /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
57
+ /*
58
+ * Initialise the cpreg_tuples[] array based on the cp_regs hash.
59
* Note that we require cpreg_tuples[] to be sorted by key ID.
60
*/
61
GList *keys;
62
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_el3_aa32ns(CPUARMState *env,
63
return CP_ACCESS_OK;
64
}
65
66
-/* Some secure-only AArch32 registers trap to EL3 if used from
67
+/*
68
+ * Some secure-only AArch32 registers trap to EL3 if used from
69
* Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
70
* Note that an access from Secure EL1 can only happen if EL3 is AArch64.
71
* We assume that the .access field is set to PL1_RW.
72
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
73
return CP_ACCESS_TRAP_UNCATEGORIZED;
74
}
75
76
-/* Check for traps to performance monitor registers, which are controlled
77
+/*
78
+ * Check for traps to performance monitor registers, which are controlled
79
* by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
80
*/
81
static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
82
@@ -XXX,XX +XXX,XX @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
83
ARMCPU *cpu = env_archcpu(env);
84
85
if (raw_read(env, ri) != value) {
86
- /* Unlike real hardware the qemu TLB uses virtual addresses,
87
+ /*
88
+ * Unlike real hardware the qemu TLB uses virtual addresses,
89
* not modified virtual addresses, so this causes a TLB flush.
90
*/
91
tlb_flush(CPU(cpu));
92
@@ -XXX,XX +XXX,XX @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
93
94
if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
95
&& !extended_addresses_enabled(env)) {
96
- /* For VMSA (when not using the LPAE long descriptor page table
97
+ /*
98
+ * For VMSA (when not using the LPAE long descriptor page table
99
* format) this register includes the ASID, so do a TLB flush.
100
* For PMSA it is purely a process ID and no action is needed.
101
*/
102
@@ -XXX,XX +XXX,XX @@ static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
103
}
104
105
static const ARMCPRegInfo cp_reginfo[] = {
106
- /* Define the secure and non-secure FCSE identifier CP registers
107
+ /*
108
+ * Define the secure and non-secure FCSE identifier CP registers
109
* separately because there is no secure bank in V8 (no _EL3). This allows
110
* the secure register to be properly reset and migrated. There is also no
111
* v8 EL1 version of the register so the non-secure instance stands alone.
112
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cp_reginfo[] = {
113
.access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
114
.fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
115
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
116
- /* Define the secure and non-secure context identifier CP registers
117
+ /*
118
+ * Define the secure and non-secure context identifier CP registers
119
* separately because there is no secure bank in V8 (no _EL3). This allows
120
* the secure register to be properly reset and migrated. In the
121
* non-secure case, the 32-bit register will have reset and migration
122
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cp_reginfo[] = {
123
};
124
125
static const ARMCPRegInfo not_v8_cp_reginfo[] = {
126
- /* NB: Some of these registers exist in v8 but with more precise
127
+ /*
128
+ * NB: Some of these registers exist in v8 but with more precise
129
* definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
130
*/
131
/* MMU Domain access control / MPU write buffer control */
132
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
133
.writefn = dacr_write, .raw_writefn = raw_write,
134
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
135
offsetoflow32(CPUARMState, cp15.dacr_ns) } },
136
- /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
137
+ /*
138
+ * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
139
* For v6 and v5, these mappings are overly broad.
140
*/
141
{ .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
142
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
143
};
144
145
static const ARMCPRegInfo not_v6_cp_reginfo[] = {
146
- /* Not all pre-v6 cores implemented this WFI, so this is slightly
147
+ /*
148
+ * Not all pre-v6 cores implemented this WFI, so this is slightly
149
* over-broad.
150
*/
151
{ .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
152
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v6_cp_reginfo[] = {
153
};
154
155
static const ARMCPRegInfo not_v7_cp_reginfo[] = {
156
- /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
157
+ /*
158
+ * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
159
* is UNPREDICTABLE; we choose to NOP as most implementations do).
160
*/
161
{ .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
162
.access = PL1_W, .type = ARM_CP_WFI },
163
- /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
164
+ /*
165
+ * L1 cache lockdown. Not architectural in v6 and earlier but in practice
166
* implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
167
* OMAPCP will override this space.
168
*/
169
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
170
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
171
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
172
.resetvalue = 0 },
173
- /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
174
+ /*
175
+ * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
176
* implementing it as RAZ means the "debug architecture version" bits
177
* will read as a reserved value, which should cause Linux to not try
178
* to use the debug hardware.
179
*/
180
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
181
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
182
- /* MMU TLB control. Note that the wildcarding means we cover not just
183
+ /*
184
+ * MMU TLB control. Note that the wildcarding means we cover not just
185
* the unified TLB ops but also the dside/iside/inner-shareable variants.
186
*/
187
{ .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
188
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
189
190
/* In ARMv8 most bits of CPACR_EL1 are RES0. */
191
if (!arm_feature(env, ARM_FEATURE_V8)) {
192
- /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
193
+ /*
194
+ * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
195
* ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
196
* TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
197
*/
198
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
199
value |= R_CPACR_ASEDIS_MASK;
200
}
201
202
- /* VFPv3 and upwards with NEON implement 32 double precision
203
+ /*
204
+ * VFPv3 and upwards with NEON implement 32 double precision
205
* registers (D0-D31).
206
*/
207
if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
208
@@ -XXX,XX +XXX,XX @@ static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
209
210
static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
211
{
212
- /* Call cpacr_write() so that we reset with the correct RAO bits set
213
+ /*
214
+ * Call cpacr_write() so that we reset with the correct RAO bits set
215
* for our CPU features.
216
*/
217
cpacr_write(env, ri, 0);
218
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
219
{ .name = "MVA_prefetch",
220
.cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
221
.access = PL1_W, .type = ARM_CP_NOP },
222
- /* We need to break the TB after ISB to execute self-modifying code
223
+ /*
224
+ * We need to break the TB after ISB to execute self-modifying code
225
* correctly and also to take any pending interrupts immediately.
226
* So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
227
*/
228
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
229
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
230
offsetof(CPUARMState, cp15.ifar_ns) },
231
.resetvalue = 0, },
232
- /* Watchpoint Fault Address Register : should actually only be present
233
+ /*
234
+ * Watchpoint Fault Address Register : should actually only be present
235
* for 1136, 1176, 11MPCore.
236
*/
237
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
238
@@ -XXX,XX +XXX,XX @@ static bool event_supported(uint16_t number)
239
static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
240
bool isread)
241
{
242
- /* Performance monitor registers user accessibility is controlled
243
+ /*
244
+ * Performance monitor registers user accessibility is controlled
245
* by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
246
* trapping to EL2 or EL3 for other accesses.
247
*/
248
@@ -XXX,XX +XXX,XX @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
249
(MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
250
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
251
252
-/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
253
+/*
254
+ * Returns true if the counter (pass 31 for PMCCNTR) should count events using
255
* the current EL, security state, and register configuration.
256
*/
257
static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
258
@@ -XXX,XX +XXX,XX @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
259
static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
260
uint64_t value)
261
{
262
- /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
263
+ /*
264
+ * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
265
* PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
266
* meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
267
* accessed.
268
@@ -XXX,XX +XXX,XX @@ static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
269
env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
270
pmevcntr_op_finish(env, counter);
271
}
272
- /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
273
+ /*
274
+ * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
275
* PMSELR value is equal to or greater than the number of implemented
276
* counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
277
*/
278
@@ -XXX,XX +XXX,XX @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
279
}
280
return ret;
281
} else {
282
- /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
283
- * are CONSTRAINED UNPREDICTABLE. */
284
+ /*
285
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
286
+ * are CONSTRAINED UNPREDICTABLE.
287
+ */
288
return 0;
289
}
290
}
291
@@ -XXX,XX +XXX,XX @@ static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
292
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
293
uint64_t value)
294
{
295
- /* Note that even though the AArch64 view of this register has bits
296
+ /*
297
+ * Note that even though the AArch64 view of this register has bits
298
* [10:0] all RES0 we can only mask the bottom 5, to comply with the
299
* architectural requirements for bits which are RES0 only in some
300
* contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
301
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
302
if (!arm_feature(env, ARM_FEATURE_EL2)) {
303
valid_mask &= ~SCR_HCE;
304
305
- /* On ARMv7, SMD (or SCD as it is called in v7) is only
306
+ /*
307
+ * On ARMv7, SMD (or SCD as it is called in v7) is only
308
* supported if EL2 exists. The bit is UNK/SBZP when
309
* EL2 is unavailable. In QEMU ARMv7, we force it to always zero
310
* when EL2 is unavailable.
311
@@ -XXX,XX +XXX,XX @@ static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
312
{
313
ARMCPU *cpu = env_archcpu(env);
314
315
- /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
316
+ /*
317
+ * Acquire the CSSELR index from the bank corresponding to the CCSIDR
318
* bank
319
*/
320
uint32_t index = A32_BANKED_REG_GET(env, csselr,
321
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
322
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
323
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
324
.access = PL1_W, .type = ARM_CP_NOP },
325
- /* Performance monitors are implementation defined in v7,
326
+ /*
327
+ * Performance monitors are implementation defined in v7,
328
* but with an ARM recommended set of registers, which we
329
* follow.
330
*
331
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
332
.writefn = csselr_write, .resetvalue = 0,
333
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
334
offsetof(CPUARMState, cp15.csselr_ns) } },
335
- /* Auxiliary ID register: this actually has an IMPDEF value but for now
336
+ /*
337
+ * Auxiliary ID register: this actually has an IMPDEF value but for now
338
* just RAZ for all cores:
339
*/
340
{ .name = "AIDR", .state = ARM_CP_STATE_BOTH,
341
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
342
.access = PL1_R, .type = ARM_CP_CONST,
343
.accessfn = access_aa64_tid1,
344
.resetvalue = 0 },
345
- /* Auxiliary fault status registers: these also are IMPDEF, and we
346
+ /*
347
+ * Auxiliary fault status registers: these also are IMPDEF, and we
348
* choose to RAZ/WI for all cores.
349
*/
350
{ .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
351
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
352
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
353
.access = PL1_RW, .accessfn = access_tvm_trvm,
354
.type = ARM_CP_CONST, .resetvalue = 0 },
355
- /* MAIR can just read-as-written because we don't implement caches
356
+ /*
357
+ * MAIR can just read-as-written because we don't implement caches
358
* and so don't need to care about memory attributes.
359
*/
360
{ .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
361
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
362
.opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
363
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
364
.resetvalue = 0 },
365
- /* For non-long-descriptor page tables these are PRRR and NMRR;
366
+ /*
367
+ * For non-long-descriptor page tables these are PRRR and NMRR;
368
* regardless they still act as reads-as-written for QEMU.
369
*/
370
- /* MAIR0/1 are defined separately from their 64-bit counterpart which
371
+ /*
372
+ * MAIR0/1 are defined separately from their 64-bit counterpart which
373
* allows them to assign the correct fieldoffset based on the endianness
374
* handled in the field definitions.
375
*/
376
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
377
static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
378
bool isread)
379
{
380
- /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
381
+ /*
382
+ * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
383
* Writable only at the highest implemented exception level.
384
*/
385
int el = arm_current_el(env);
386
@@ -XXX,XX +XXX,XX @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
387
const ARMCPRegInfo *ri,
388
bool isread)
389
{
390
- /* The AArch64 register view of the secure physical timer is
391
+ /*
392
+ * The AArch64 register view of the secure physical timer is
393
* always accessible from EL3, and configurably accessible from
394
* Secure EL1.
395
*/
396
@@ -XXX,XX +XXX,XX @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
397
ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
398
399
if (gt->ctl & 1) {
400
- /* Timer enabled: calculate and set current ISTATUS, irq, and
401
+ /*
402
+ * Timer enabled: calculate and set current ISTATUS, irq, and
403
* reset timer to when ISTATUS next has to change
404
*/
405
uint64_t offset = timeridx == GTIMER_VIRT ?
406
@@ -XXX,XX +XXX,XX @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
407
/* Next transition is when we hit cval */
408
nexttick = gt->cval + offset;
409
}
410
- /* Note that the desired next expiry time might be beyond the
411
+ /*
412
+ * Note that the desired next expiry time might be beyond the
413
* signed-64-bit range of a QEMUTimer -- in this case we just
414
* set the timer for as far in the future as possible. When the
415
* timer expires we will reset the timer for any remaining period.
416
@@ -XXX,XX +XXX,XX @@ static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
417
/* Enable toggled */
418
gt_recalc_timer(cpu, timeridx);
419
} else if ((oldval ^ value) & 2) {
420
- /* IMASK toggled: don't need to recalculate,
421
+ /*
422
+ * IMASK toggled: don't need to recalculate,
423
* just set the interrupt line based on ISTATUS
424
*/
425
int irqstate = (oldval & 4) && !(value & 2);
426
@@ -XXX,XX +XXX,XX @@ static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
427
}
428
429
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
430
- /* Note that CNTFRQ is purely reads-as-written for the benefit
431
+ /*
432
+ * Note that CNTFRQ is purely reads-as-written for the benefit
433
* of software; writing it doesn't actually change the timer frequency.
434
* Our reset value matches the fixed frequency we implement the timer at.
435
*/
436
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
437
.readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
438
.writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
439
},
440
- /* Secure timer -- this is actually restricted to only EL3
441
+ /*
442
+ * Secure timer -- this is actually restricted to only EL3
443
* and configurably Secure-EL1 via the accessfn.
444
*/
445
{ .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
446
@@ -XXX,XX +XXX,XX @@ static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
447
448
#else
449
450
-/* In user-mode most of the generic timer registers are inaccessible
451
+/*
452
+ * In user-mode most of the generic timer registers are inaccessible
453
* however modern kernels (4.12+) allow access to cntvct_el0
454
*/
455
456
@@ -XXX,XX +XXX,XX @@ static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
457
{
458
ARMCPU *cpu = env_archcpu(env);
459
460
- /* Currently we have no support for QEMUTimer in linux-user so we
461
+ /*
462
+ * Currently we have no support for QEMUTimer in linux-user so we
463
* can't call gt_get_countervalue(env), instead we directly
464
* call the lower level functions.
465
*/
466
@@ -XXX,XX +XXX,XX @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
467
bool isread)
468
{
469
if (ri->opc2 & 4) {
470
- /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
471
+ /*
472
+ * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
473
* Secure EL1 (which can only happen if EL3 is AArch64).
474
* They are simply UNDEF if executed from NS EL1.
475
* They function normally from EL2 or EL3.
476
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
477
}
478
}
479
} else {
480
- /* fsr is a DFSR/IFSR value for the short descriptor
481
+ /*
482
+ * fsr is a DFSR/IFSR value for the short descriptor
483
* translation table format (with WnR always clear).
484
* Convert it to a 32-bit PAR.
485
*/
486
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
487
};
488
489
static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
490
- /* Reset for all these registers is handled in arm_cpu_reset(),
491
+ /*
492
+ * Reset for all these registers is handled in arm_cpu_reset(),
493
* because the PMSAv7 is also used by M-profile CPUs, which do
494
* not register cpregs but still need the state to be reset.
495
*/
496
@@ -XXX,XX +XXX,XX @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
497
}
498
499
if (arm_feature(env, ARM_FEATURE_LPAE)) {
500
- /* With LPAE the TTBCR could result in a change of ASID
501
+ /*
502
+ * With LPAE the TTBCR could result in a change of ASID
503
* via the TTBCR.A1 bit, so do a TLB flush.
504
*/
505
tlb_flush(CPU(cpu));
506
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
507
offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
508
};
509
510
-/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
511
+/*
512
+ * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
513
* qemu tlbs nor adjusting cached masks.
514
*/
515
static const ARMCPRegInfo ttbcr2_reginfo = {
516
@@ -XXX,XX +XXX,XX @@ static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
517
static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
518
uint64_t value)
519
{
520
- /* On OMAP there are registers indicating the max/min index of dcache lines
521
+ /*
522
+ * On OMAP there are registers indicating the max/min index of dcache lines
523
* containing a dirty line; cache flush operations have to reset these.
524
*/
525
env->cp15.c15_i_max = 0x000;
526
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
527
.crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
528
.type = ARM_CP_NO_RAW,
529
.readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
530
- /* TODO: Peripheral port remap register:
531
+ /*
532
+ * TODO: Peripheral port remap register:
533
* On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
534
* base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
535
* when MMU is off.
536
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo xscale_cp_reginfo[] = {
537
.cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
538
.fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
539
.resetvalue = 0, },
540
- /* XScale specific cache-lockdown: since we have no cache we NOP these
541
+ /*
542
+ * XScale specific cache-lockdown: since we have no cache we NOP these
543
* and hope the guest does not really rely on cache behaviour.
544
*/
545
{ .name = "XSCALE_LOCK_ICACHE_LINE",
546
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo xscale_cp_reginfo[] = {
547
};
548
549
static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
550
- /* RAZ/WI the whole crn=15 space, when we don't have a more specific
551
+ /*
552
+ * RAZ/WI the whole crn=15 space, when we don't have a more specific
553
* implementation of this implementation-defined space.
554
* Ideally this should eventually disappear in favour of actually
555
* implementing the correct behaviour for all cores.
556
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
557
};
558
559
static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
560
- /* The cache test-and-clean instructions always return (1 << 30)
561
+ /*
562
+ * The cache test-and-clean instructions always return (1 << 30)
563
* to indicate that there are no dirty cache lines.
564
*/
565
{ .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
566
@@ -XXX,XX +XXX,XX @@ static uint64_t mpidr_read_val(CPUARMState *env)
567
568
if (arm_feature(env, ARM_FEATURE_V7MP)) {
569
mpidr |= (1U << 31);
570
- /* Cores which are uniprocessor (non-coherent)
571
+ /*
572
+ * Cores which are uniprocessor (non-coherent)
573
* but still implement the MP extensions set
574
* bit 30. (For instance, Cortex-R5).
575
*/
576
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
577
return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
578
}
579
580
-/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
581
+/*
582
+ * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
583
* Page D4-1736 (DDI0487A.b)
584
*/
585
586
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
587
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
588
uint64_t value)
589
{
590
- /* Invalidate by VA, EL2
591
+ /*
592
+ * Invalidate by VA, EL2
593
* Currently handles both VAE2 and VALE2, since we don't support
594
* flush-last-level-only.
595
*/
596
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
597
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
598
uint64_t value)
599
{
600
- /* Invalidate by VA, EL3
601
+ /*
602
+ * Invalidate by VA, EL3
603
* Currently handles both VAE3 and VALE3, since we don't support
604
* flush-last-level-only.
605
*/
606
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
607
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
608
uint64_t value)
609
{
610
- /* Invalidate by VA, EL1&0 (AArch64 version).
611
+ /*
612
+ * Invalidate by VA, EL1&0 (AArch64 version).
613
* Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
614
* since we don't support flush-for-specific-ASID-only or
615
* flush-last-level-only.
616
@@ -XXX,XX +XXX,XX @@ static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
617
bool isread)
618
{
619
if (!(env->pstate & PSTATE_SP)) {
620
- /* Access to SP_EL0 is undefined if it's being used as
621
+ /*
622
+ * Access to SP_EL0 is undefined if it's being used as
623
* the stack pointer.
624
*/
625
return CP_ACCESS_TRAP_UNCATEGORIZED;
626
@@ -XXX,XX +XXX,XX @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
627
}
628
629
if (raw_read(env, ri) == value) {
630
- /* Skip the TLB flush if nothing actually changed; Linux likes
631
+ /*
632
+ * Skip the TLB flush if nothing actually changed; Linux likes
633
* to do a lot of pointless SCTLR writes.
634
*/
23
return;
635
return;
24
636
@@ -XXX,XX +XXX,XX @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
25
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
637
}
26
}
638
27
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
639
static const ARMCPRegInfo v8_cp_reginfo[] = {
28
false, rn != 31, size);
640
- /* Minimal set of EL0-visible registers. This will need to be expanded
29
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
641
+ /*
30
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
642
+ * Minimal set of EL0-visible registers. This will need to be expanded
31
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
643
* significantly for system emulation of AArch64 CPUs.
32
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
644
*/
33
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
645
{ .name = "NZCV", .state = ARM_CP_STATE_AA64,
34
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
646
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
35
return;
647
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
36
648
.access = PL1_RW,
37
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
649
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
38
int size = extract32(insn, 30, 2);
650
- /* We rely on the access checks not allowing the guest to write to the
39
TCGv_i64 clean_addr, dirty_addr;
651
+ /*
40
bool is_store = false;
652
+ * We rely on the access checks not allowing the guest to write to the
41
- bool is_signed = false;
653
* state field when SPSel indicates that it's being used as the stack
42
bool extend = false;
654
* pointer.
43
bool iss_sf;
655
*/
44
+ MemOp mop;
656
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
45
657
if (arm_feature(env, ARM_FEATURE_EL3)) {
46
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
658
valid_mask &= ~HCR_HCD;
47
unallocated_encoding(s);
659
} else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
48
return;
660
- /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
49
}
661
+ /*
50
662
+ * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
51
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
663
* However, if we're using the SMC PSCI conduit then QEMU is
52
+ mop = size | MO_ALIGN;
664
* effectively acting like EL3 firmware and so the guest at
53
+
665
* EL2 should retain the ability to prevent EL1 from being
54
switch (opc) {
666
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
55
case 0: /* STLURB */
667
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
56
is_store = true;
668
.writefn = tlbi_aa64_vae2is_write },
57
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
669
#ifndef CONFIG_USER_ONLY
58
unallocated_encoding(s);
670
- /* Unlike the other EL2-related AT operations, these must
59
return;
671
+ /*
60
}
672
+ * Unlike the other EL2-related AT operations, these must
61
- is_signed = true;
673
* UNDEF from EL3 if EL2 is not implemented, which is why we
62
+ mop |= MO_SIGN;
674
* define them here rather than with the rest of the AT ops.
63
break;
675
*/
64
case 3: /* LDAPURS* 32-bit variant */
676
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
65
if (size > 1) {
677
.access = PL2_W, .accessfn = at_s1e2_access,
66
unallocated_encoding(s);
678
.type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
67
return;
679
.writefn = ats_write64 },
68
}
680
- /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
69
- is_signed = true;
681
+ /*
70
+ mop |= MO_SIGN;
682
+ * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
71
extend = true; /* zero-extend 32->64 after signed load */
683
* if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
72
break;
684
* with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
685
* to behave as if SCR.NS was 1.
686
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
687
.writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
688
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
689
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
690
- /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
691
+ /*
692
+ * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
693
* reset values as IMPDEF. We choose to reset to 3 to comply with
694
* both ARMv7 and ARMv8.
695
*/
696
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
697
static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
698
bool isread)
699
{
700
- /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
701
+ /*
702
+ * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
703
* At Secure EL1 it traps to EL3 or EL2.
704
*/
705
if (arm_current_el(env) == 3) {
706
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
707
}
708
}
709
710
-/* We don't know until after realize whether there's a GICv3
711
+/*
712
+ * We don't know until after realize whether there's a GICv3
713
* attached, and that is what registers the gicv3 sysregs.
714
* So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
715
* at runtime.
716
@@ -XXX,XX +XXX,XX @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
717
}
718
#endif
719
720
-/* Shared logic between LORID and the rest of the LOR* registers.
721
+/*
722
+ * Shared logic between LORID and the rest of the LOR* registers.
723
* Secure state exclusion has already been dealt with.
724
*/
725
static CPAccessResult access_lor_ns(CPUARMState *env,
726
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
727
728
define_arm_cp_regs(cpu, cp_reginfo);
729
if (!arm_feature(env, ARM_FEATURE_V8)) {
730
- /* Must go early as it is full of wildcards that may be
731
+ /*
732
+ * Must go early as it is full of wildcards that may be
733
* overridden by later definitions.
734
*/
735
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
736
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
737
.access = PL1_R, .type = ARM_CP_CONST,
738
.accessfn = access_aa32_tid3,
739
.resetvalue = cpu->isar.id_pfr0 },
740
- /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
741
+ /*
742
+ * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
743
* the value of the GIC field until after we define these regs.
744
*/
745
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
746
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
747
748
define_arm_cp_regs(cpu, el3_regs);
749
}
750
- /* The behaviour of NSACR is sufficiently various that we don't
751
+ /*
752
+ * The behaviour of NSACR is sufficiently various that we don't
753
* try to describe it in a single reginfo:
754
* if EL3 is 64 bit, then trap to EL3 from S EL1,
755
* reads as constant 0xc00 from NS EL1 and NS EL2
756
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
757
if (cpu_isar_feature(aa32_jazelle, cpu)) {
758
define_arm_cp_regs(cpu, jazelle_regs);
759
}
760
- /* Slightly awkwardly, the OMAP and StrongARM cores need all of
761
+ /*
762
+ * Slightly awkwardly, the OMAP and StrongARM cores need all of
763
* cp15 crn=0 to be writes-ignored, whereas for other cores they should
764
* be read-only (ie write causes UNDEF exception).
765
*/
766
{
767
ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
768
- /* Pre-v8 MIDR space.
769
+ /*
770
+ * Pre-v8 MIDR space.
771
* Note that the MIDR isn't a simple constant register because
772
* of the TI925 behaviour where writes to another register can
773
* cause the MIDR value to change.
774
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
775
if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
776
arm_feature(env, ARM_FEATURE_STRONGARM)) {
777
size_t i;
778
- /* Register the blanket "writes ignored" value first to cover the
779
+ /*
780
+ * Register the blanket "writes ignored" value first to cover the
781
* whole space. Then update the specific ID registers to allow write
782
* access, so that they ignore writes rather than causing them to
783
* UNDEF.
784
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
785
.raw_writefn = raw_write,
786
};
787
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
788
- /* Normally we would always end the TB on an SCTLR write, but Linux
789
+ /*
790
+ * Normally we would always end the TB on an SCTLR write, but Linux
791
* arch/arm/mach-pxa/sleep.S expects two instructions following
792
* an MMU enable to execute from cache. Imitate this behaviour.
793
*/
794
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
795
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
796
const ARMCPRegInfo *r, void *opaque)
797
{
798
- /* Define implementations of coprocessor registers.
799
+ /*
800
+ * Define implementations of coprocessor registers.
801
* We store these in a hashtable because typically
802
* there are less than 150 registers in a space which
803
* is 16*16*16*8*8 = 262144 in size.
804
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
73
default:
805
default:
74
g_assert_not_reached();
806
g_assert_not_reached();
75
}
807
}
76
808
- /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
77
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
809
+ /*
78
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
810
+ * The AArch64 pseudocode CheckSystemAccess() specifies that op1
79
811
* encodes a minimum access level for the register. We roll this
80
if (rn == 31) {
812
* runtime check into our general permission check code, so check
81
gen_check_sp_alignment(s);
813
* here that the reginfo's specified permissions are strict enough
82
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
814
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
83
if (is_store) {
815
assert((r->access & ~mask) == 0);
84
/* Store-Release semantics */
816
}
85
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
817
86
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
818
- /* Check that the register definition has enough info to handle
87
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
819
+ /*
820
+ * Check that the register definition has enough info to handle
821
* reads and writes if they are permitted.
822
*/
823
if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
824
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
825
continue;
826
}
827
if (state == ARM_CP_STATE_AA32) {
828
- /* Under AArch32 CP registers can be common
829
+ /*
830
+ * Under AArch32 CP registers can be common
831
* (same for secure and non-secure world) or banked.
832
*/
833
char *name;
834
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
835
g_assert_not_reached();
836
}
837
} else {
838
- /* AArch64 registers get mapped to non-secure instance
839
- * of AArch32 */
840
+ /*
841
+ * AArch64 registers get mapped to non-secure instance
842
+ * of AArch32
843
+ */
844
add_cpreg_to_hashtable(cpu, r, opaque, state,
845
ARM_CP_SECSTATE_NS,
846
crm, opc1, opc2, r->name);
847
@@ -XXX,XX +XXX,XX @@ void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
848
849
static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
850
{
851
- /* Return true if it is not valid for us to switch to
852
+ /*
853
+ * Return true if it is not valid for us to switch to
854
* this CPU mode (ie all the UNPREDICTABLE cases in
855
* the ARM ARM CPSRWriteByInstr pseudocode).
856
*/
857
@@ -XXX,XX +XXX,XX @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
858
case ARM_CPU_MODE_UND:
859
case ARM_CPU_MODE_IRQ:
860
case ARM_CPU_MODE_FIQ:
861
- /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
862
+ /*
863
+ * Note that we don't implement the IMPDEF NSACR.RFR which in v7
864
* allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
865
*/
866
- /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
867
+ /*
868
+ * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
869
* and CPS are treated as illegal mode changes.
870
*/
871
if (write_type == CPSRWriteByInstr &&
872
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
873
env->GE = (val >> 16) & 0xf;
874
}
875
876
- /* In a V7 implementation that includes the security extensions but does
877
+ /*
878
+ * In a V7 implementation that includes the security extensions but does
879
* not include Virtualization Extensions the SCR.FW and SCR.AW bits control
880
* whether non-secure software is allowed to change the CPSR_F and CPSR_A
881
* bits respectively.
882
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
883
changed_daif = (env->daif ^ val) & mask;
884
885
if (changed_daif & CPSR_A) {
886
- /* Check to see if we are allowed to change the masking of async
887
+ /*
888
+ * Check to see if we are allowed to change the masking of async
889
* abort exceptions from a non-secure state.
890
*/
891
if (!(env->cp15.scr_el3 & SCR_AW)) {
892
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
893
}
894
895
if (changed_daif & CPSR_F) {
896
- /* Check to see if we are allowed to change the masking of FIQ
897
+ /*
898
+ * Check to see if we are allowed to change the masking of FIQ
899
* exceptions from a non-secure state.
900
*/
901
if (!(env->cp15.scr_el3 & SCR_FW)) {
902
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
903
mask &= ~CPSR_F;
904
}
905
906
- /* Check whether non-maskable FIQ (NMFI) support is enabled.
907
+ /*
908
+ * Check whether non-maskable FIQ (NMFI) support is enabled.
909
* If this bit is set software is not allowed to mask
910
* FIQs, but is allowed to set CPSR_F to 0.
911
*/
912
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
913
if (write_type != CPSRWriteRaw &&
914
((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
915
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
916
- /* Note that we can only get here in USR mode if this is a
917
+ /*
918
+ * Note that we can only get here in USR mode if this is a
919
* gdb stub write; for this case we follow the architectural
920
* behaviour for guest writes in USR mode of ignoring an attempt
921
* to switch mode. (Those are caught by translate.c for writes
922
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
923
*/
924
mask &= ~CPSR_M;
925
} else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
926
- /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
927
+ /*
928
+ * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
929
* v7, and has defined behaviour in v8:
930
* + leave CPSR.M untouched
931
* + allow changes to the other CPSR fields
932
@@ -XXX,XX +XXX,XX @@ static void switch_mode(CPUARMState *env, int mode)
933
env->regs[14] = env->banked_r14[r14_bank_number(mode)];
934
}
935
936
-/* Physical Interrupt Target EL Lookup Table
937
+/*
938
+ * Physical Interrupt Target EL Lookup Table
939
*
940
* [ From ARM ARM section G1.13.4 (Table G1-15) ]
941
*
942
@@ -XXX,XX +XXX,XX @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
943
if (arm_feature(env, ARM_FEATURE_EL3)) {
944
rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
88
} else {
945
} else {
89
/*
946
- /* Either EL2 is the highest EL (and so the EL2 register width
90
* Load-AcquirePC semantics; we implement as the slightly more
947
+ /*
91
* restrictive Load-Acquire.
948
+ * Either EL2 is the highest EL (and so the EL2 register width
92
*/
949
* is given by is64); or there is no EL2 or EL3, in which case
93
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
950
* the value of 'rw' does not affect the table lookup anyway.
94
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
951
*/
95
extend, true, rt, iss_sf, true);
952
@@ -XXX,XX +XXX,XX @@ void aarch64_sync_64_to_32(CPUARMState *env)
96
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
953
env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
97
}
954
}
955
956
- /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
957
+ /*
958
+ * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
959
* mode, then we can copy to r8-r14. Otherwise, we copy to the
960
* FIQ bank for r8-r14.
961
*/
962
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
963
/* High vectors. When enabled, base address cannot be remapped. */
964
addr += 0xffff0000;
965
} else {
966
- /* ARM v7 architectures provide a vector base address register to remap
967
+ /*
968
+ * ARM v7 architectures provide a vector base address register to remap
969
* the interrupt vector table.
970
* This register is only followed in non-monitor mode, and is banked.
971
* Note: only bits 31:5 are valid.
972
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
973
aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
974
975
if (cur_el < new_el) {
976
- /* Entry vector offset depends on whether the implemented EL
977
+ /*
978
+ * Entry vector offset depends on whether the implemented EL
979
* immediately lower than the target level is using AArch32 or AArch64
980
*/
981
bool is_aa64;
982
@@ -XXX,XX +XXX,XX @@ static void handle_semihosting(CPUState *cs)
983
}
984
#endif
985
986
-/* Handle a CPU exception for A and R profile CPUs.
987
+/*
988
+ * Handle a CPU exception for A and R profile CPUs.
989
* Do any appropriate logging, handle PSCI calls, and then hand off
990
* to the AArch64-entry or AArch32-entry function depending on the
991
* target exception level's register width.
992
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
993
}
994
#endif
995
996
- /* Hooks may change global state so BQL should be held, also the
997
+ /*
998
+ * Hooks may change global state so BQL should be held, also the
999
* BQL needs to be held for any modification of
1000
* cs->interrupt_request.
1001
*/
1002
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1003
};
1004
}
1005
1006
-/* Note that signed overflow is undefined in C. The following routines are
1007
- careful to use unsigned types where modulo arithmetic is required.
1008
- Failure to do so _will_ break on newer gcc. */
1009
+/*
1010
+ * Note that signed overflow is undefined in C. The following routines are
1011
+ * careful to use unsigned types where modulo arithmetic is required.
1012
+ * Failure to do so _will_ break on newer gcc.
1013
+ */
1014
1015
/* Signed saturating arithmetic. */
1016
1017
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
1018
return (a & mask) | (b & ~mask);
1019
}
1020
1021
-/* CRC helpers.
1022
+/*
1023
+ * CRC helpers.
1024
* The upper bytes of val (above the number specified by 'bytes') must have
1025
* been zeroed out by the caller.
1026
*/
1027
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
1028
return crc32c(acc, buf, bytes) ^ 0xffffffff;
1029
}
1030
1031
-/* Return the exception level to which FP-disabled exceptions should
1032
+/*
1033
+ * Return the exception level to which FP-disabled exceptions should
1034
* be taken, or 0 if FP is enabled.
1035
*/
1036
int fp_exception_el(CPUARMState *env, int cur_el)
1037
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
1038
#ifndef CONFIG_USER_ONLY
1039
uint64_t hcr_el2;
1040
1041
- /* CPACR and the CPTR registers don't exist before v6, so FP is
1042
+ /*
1043
+ * CPACR and the CPTR registers don't exist before v6, so FP is
1044
* always accessible
1045
*/
1046
if (!arm_feature(env, ARM_FEATURE_V6)) {
1047
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
1048
1049
hcr_el2 = arm_hcr_el2_eff(env);
1050
1051
- /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
1052
+ /*
1053
+ * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
1054
* 0, 2 : trap EL0 and EL1/PL1 accesses
1055
* 1 : trap only EL0 accesses
1056
* 3 : trap no accesses
98
--
1057
--
99
2.20.1
1058
2.25.1
100
101
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
Now that we have all of the proper macros defined, expanding
3
Fix the following:
4
the CPUARMTBFlags structure and populating the two TB fields
5
is relatively simple.
6
4
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
ERROR: spaces required around that '|' (ctx:VxV)
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
ERROR: space required before the open parenthesis '('
9
Message-id: 20210419202257.161730-7-richard.henderson@linaro.org
7
ERROR: spaces required around that '+' (ctx:VxB)
8
ERROR: space prohibited between function name and open parenthesis '('
9
10
(the last two still have some occurrences in macros which I left
11
behind because it might impact readability)
12
13
Signed-off-by: Fabiano Rosas <farosas@suse.de>
14
Reviewed-by: Claudio Fontana <cfontana@suse.de>
15
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
16
Message-id: 20221213190537.511-3-farosas@suse.de
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
18
---
12
target/arm/cpu.h | 49 ++++++++++++++++++++++++------------------
19
target/arm/helper.c | 42 +++++++++++++++++++++---------------------
13
target/arm/translate.h | 2 +-
20
1 file changed, 21 insertions(+), 21 deletions(-)
14
target/arm/helper.c | 10 +++++----
15
3 files changed, 35 insertions(+), 26 deletions(-)
16
21
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
22
/* See the commentary above the TBFLAG field definitions. */
23
typedef struct CPUARMTBFlags {
24
uint32_t flags;
25
+ target_ulong flags2;
26
} CPUARMTBFlags;
27
28
typedef struct CPUARMState {
29
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
30
#include "exec/cpu-all.h"
31
32
/*
33
- * Bit usage in the TB flags field: bit 31 indicates whether we are
34
- * in 32 or 64 bit mode. The meaning of the other bits depends on that.
35
- * We put flags which are shared between 32 and 64 bit mode at the top
36
- * of the word, and flags which apply to only one mode at the bottom.
37
+ * We have more than 32-bits worth of state per TB, so we split the data
38
+ * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
39
+ * We collect these two parts in CPUARMTBFlags where they are named
40
+ * flags and flags2 respectively.
41
*
42
- * 31 20 18 14 9 0
43
- * +--------------+-----+-----+----------+--------------+
44
- * | | | TBFLAG_A32 | |
45
- * | | +-----+----------+ TBFLAG_AM32 |
46
- * | TBFLAG_ANY | |TBFLAG_M32| |
47
- * | +-----------+----------+--------------|
48
- * | | TBFLAG_A64 |
49
- * +--------------+-------------------------------------+
50
- * 31 20 0
51
+ * The flags that are shared between all execution modes, TBFLAG_ANY,
52
+ * are stored in flags. The flags that are specific to a given mode
53
+ * are stores in flags2. Since cs_base is sized on the configured
54
+ * address size, flags2 always has 64-bits for A64, and a minimum of
55
+ * 32-bits for A32 and M32.
56
+ *
57
+ * The bits for 32-bit A-profile and M-profile partially overlap:
58
+ *
59
+ * 18 9 0
60
+ * +----------------+--------------+
61
+ * | TBFLAG_A32 | |
62
+ * +-----+----------+ TBFLAG_AM32 |
63
+ * | |TBFLAG_M32| |
64
+ * +-----+----------+--------------+
65
+ * 14 9 0
66
*
67
* Unless otherwise noted, these bits are cached in env->hflags.
68
*/
69
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
70
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
71
(DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
72
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
73
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
74
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
75
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
76
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
77
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
78
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
79
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
80
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
81
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
82
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
83
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
84
85
#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
86
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
87
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
88
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
89
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
90
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
91
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
92
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
93
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
94
95
/**
96
* cpu_mmu_index:
97
diff --git a/target/arm/translate.h b/target/arm/translate.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/translate.h
100
+++ b/target/arm/translate.h
101
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
102
*/
103
static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
104
{
105
- return (CPUARMTBFlags){ tb->flags };
106
+ return (CPUARMTBFlags){ tb->flags, tb->cs_base };
107
}
108
109
/*
110
diff --git a/target/arm/helper.c b/target/arm/helper.c
22
diff --git a/target/arm/helper.c b/target/arm/helper.c
111
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/helper.c
24
--- a/target/arm/helper.c
113
+++ b/target/arm/helper.c
25
+++ b/target/arm/helper.c
114
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
26
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
115
CPUARMTBFlags c = env->hflags;
27
uint32_t regidx = (uintptr_t)key;
116
CPUARMTBFlags r = rebuild_hflags_internal(env);
28
const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
117
29
118
- if (unlikely(c.flags != r.flags)) {
30
- if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
119
- fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
31
+ if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
120
- c.flags, r.flags);
32
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
121
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
33
/* The value array need not be initialized at this point */
122
+ fprintf(stderr, "TCG hflags mismatch "
34
cpu->cpreg_array_len++;
123
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
35
@@ -XXX,XX +XXX,XX @@ static void count_cpreg(gpointer key, gpointer opaque)
124
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
36
125
+ c.flags, c.flags2, r.flags, r.flags2);
37
ri = g_hash_table_lookup(cpu->cp_regs, key);
126
abort();
38
39
- if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
40
+ if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
41
cpu->cpreg_array_len++;
127
}
42
}
128
#endif
43
}
129
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
44
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
130
{
45
.resetfn = arm_cp_reset_ignore },
131
CPUARMTBFlags flags;
46
{ .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
132
47
.opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
133
- *cs_base = 0;
48
- .access = PL0_R|PL1_W,
134
assert_hflags_rebuild_correctly(env);
49
+ .access = PL0_R | PL1_W,
135
flags = env->hflags;
50
.fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
136
51
.resetvalue = 0},
137
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
52
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
53
- .access = PL0_R|PL1_W,
54
+ .access = PL0_R | PL1_W,
55
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
56
offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
57
.resetfn = arm_cp_reset_ignore },
58
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
59
.resetvalue = 0 },
60
/* The cache ops themselves: these all NOP for QEMU */
61
{ .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
62
- .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
63
+ .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
64
{ .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
65
- .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
66
+ .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
67
{ .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
68
- .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
69
+ .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
70
{ .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
71
- .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
72
+ .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
73
{ .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
74
- .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
75
+ .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
76
{ .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
77
- .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
78
+ .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
79
};
80
81
static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
82
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
83
ARMCPRegInfo cbar = {
84
.name = "CBAR",
85
.cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
86
- .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
87
+ .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
88
.fieldoffset = offsetof(CPUARMState,
89
cp15.c15_config_base_address)
90
};
91
@@ -XXX,XX +XXX,XX @@ static void switch_mode(CPUARMState *env, int mode)
92
return;
93
94
if (old_mode == ARM_CPU_MODE_FIQ) {
95
- memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
96
- memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
97
+ memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
98
+ memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
99
} else if (mode == ARM_CPU_MODE_FIQ) {
100
- memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
101
- memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
102
+ memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
103
+ memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
138
}
104
}
139
105
140
*pflags = flags.flags;
106
i = bank_number(old_mode);
141
+ *cs_base = flags.flags2;
107
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
142
}
108
RESULT(sum, n, 16); \
143
109
if (sum >= 0) \
144
#ifdef TARGET_AARCH64
110
ge |= 3 << (n * 2); \
111
- } while(0)
112
+ } while (0)
113
114
#define SARITH8(a, b, n, op) do { \
115
int32_t sum; \
116
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
117
RESULT(sum, n, 8); \
118
if (sum >= 0) \
119
ge |= 1 << n; \
120
- } while(0)
121
+ } while (0)
122
123
124
#define ADD16(a, b, n) SARITH16(a, b, n, +)
125
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
126
RESULT(sum, n, 16); \
127
if ((sum >> 16) == 1) \
128
ge |= 3 << (n * 2); \
129
- } while(0)
130
+ } while (0)
131
132
#define ADD8(a, b, n) do { \
133
uint32_t sum; \
134
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
135
RESULT(sum, n, 8); \
136
if ((sum >> 8) == 1) \
137
ge |= 1 << n; \
138
- } while(0)
139
+ } while (0)
140
141
#define SUB16(a, b, n) do { \
142
uint32_t sum; \
143
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
144
RESULT(sum, n, 16); \
145
if ((sum >> 16) == 0) \
146
ge |= 3 << (n * 2); \
147
- } while(0)
148
+ } while (0)
149
150
#define SUB8(a, b, n) do { \
151
uint32_t sum; \
152
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
153
RESULT(sum, n, 8); \
154
if ((sum >> 8) == 0) \
155
ge |= 1 << n; \
156
- } while(0)
157
+ } while (0)
158
159
#define PFX u
160
#define ARITH_GE
145
--
161
--
146
2.20.1
162
2.25.1
147
148
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
In preparation for splitting tb->flags across multiple
3
Fix this:
4
fields, introduce a structure to hold the value(s).
4
ERROR: braces {} are necessary for all arms of this statement
5
So far this only migrates the one uint32_t and fixes
6
all of the places that require adjustment to match.
7
5
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Fabiano Rosas <farosas@suse.de>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Claudio Fontana <cfontana@suse.de>
10
Message-id: 20210419202257.161730-6-richard.henderson@linaro.org
8
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
9
Message-id: 20221213190537.511-4-farosas@suse.de
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
11
---
13
target/arm/cpu.h | 26 ++++++++++++---------
12
target/arm/helper.c | 67 ++++++++++++++++++++++++++++-----------------
14
target/arm/translate.h | 11 +++++++++
13
1 file changed, 42 insertions(+), 25 deletions(-)
15
target/arm/helper.c | 48 +++++++++++++++++++++-----------------
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 7 +++---
18
5 files changed, 57 insertions(+), 37 deletions(-)
19
14
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
25
} ARMPACKey;
26
#endif
27
28
+/* See the commentary above the TBFLAG field definitions. */
29
+typedef struct CPUARMTBFlags {
30
+ uint32_t flags;
31
+} CPUARMTBFlags;
32
33
typedef struct CPUARMState {
34
/* Regs for current mode. */
35
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
36
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
37
38
/* Cached TBFLAGS state. See below for which bits are included. */
39
- uint32_t hflags;
40
+ CPUARMTBFlags hflags;
41
42
/* Frequently accessed CPSR bits are stored separately for efficiency.
43
This contains all the other bits. Use cpsr_{read,write} to access
44
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
45
* Helpers for using the above.
46
*/
47
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
48
- (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
49
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
50
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
51
- (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
52
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
53
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
54
- (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
55
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
56
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
57
- (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
58
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
59
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
60
- (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
61
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
62
63
-#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
64
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
65
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
66
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
67
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
68
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
69
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
70
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
71
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
72
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
73
74
/**
75
* cpu_mmu_index:
76
diff --git a/target/arm/translate.h b/target/arm/translate.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate.h
79
+++ b/target/arm/translate.h
80
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
82
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
83
84
+/**
85
+ * arm_tbflags_from_tb:
86
+ * @tb: the TranslationBlock
87
+ *
88
+ * Extract the flag values from @tb.
89
+ */
90
+static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
91
+{
92
+ return (CPUARMTBFlags){ tb->flags };
93
+}
94
+
95
/*
96
* Enum for argument to fpstatus_ptr().
97
*/
98
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
99
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/helper.c
17
--- a/target/arm/helper.c
101
+++ b/target/arm/helper.c
18
+++ b/target/arm/helper.c
102
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
19
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
20
env->CF = (val >> 29) & 1;
21
env->VF = (val << 3) & 0x80000000;
22
}
23
- if (mask & CPSR_Q)
24
+ if (mask & CPSR_Q) {
25
env->QF = ((val & CPSR_Q) != 0);
26
- if (mask & CPSR_T)
27
+ }
28
+ if (mask & CPSR_T) {
29
env->thumb = ((val & CPSR_T) != 0);
30
+ }
31
if (mask & CPSR_IT_0_1) {
32
env->condexec_bits &= ~3;
33
env->condexec_bits |= (val >> 25) & 3;
34
@@ -XXX,XX +XXX,XX @@ static void switch_mode(CPUARMState *env, int mode)
35
int i;
36
37
old_mode = env->uncached_cpsr & CPSR_M;
38
- if (mode == old_mode)
39
+ if (mode == old_mode) {
40
return;
41
+ }
42
43
if (old_mode == ARM_CPU_MODE_FIQ) {
44
memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
45
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
46
new_mode = ARM_CPU_MODE_UND;
47
addr = 0x04;
48
mask = CPSR_I;
49
- if (env->thumb)
50
+ if (env->thumb) {
51
offset = 2;
52
- else
53
+ } else {
54
offset = 4;
55
+ }
56
break;
57
case EXCP_SWI:
58
new_mode = ARM_CPU_MODE_SVC;
59
@@ -XXX,XX +XXX,XX @@ static inline uint16_t add16_sat(uint16_t a, uint16_t b)
60
61
res = a + b;
62
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
63
- if (a & 0x8000)
64
+ if (a & 0x8000) {
65
res = 0x8000;
66
- else
67
+ } else {
68
res = 0x7fff;
69
+ }
70
}
71
return res;
103
}
72
}
104
#endif
73
@@ -XXX,XX +XXX,XX @@ static inline uint8_t add8_sat(uint8_t a, uint8_t b)
105
74
106
-static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
75
res = a + b;
107
- ARMMMUIdx mmu_idx, uint32_t flags)
76
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
108
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
77
- if (a & 0x80)
109
+ ARMMMUIdx mmu_idx,
78
+ if (a & 0x80) {
110
+ CPUARMTBFlags flags)
79
res = 0x80;
80
- else
81
+ } else {
82
res = 0x7f;
83
+ }
84
}
85
return res;
86
}
87
@@ -XXX,XX +XXX,XX @@ static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
88
89
res = a - b;
90
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
91
- if (a & 0x8000)
92
+ if (a & 0x8000) {
93
res = 0x8000;
94
- else
95
+ } else {
96
res = 0x7fff;
97
+ }
98
}
99
return res;
100
}
101
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
102
103
res = a - b;
104
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
105
- if (a & 0x80)
106
+ if (a & 0x80) {
107
res = 0x80;
108
- else
109
+ } else {
110
res = 0x7f;
111
+ }
112
}
113
return res;
114
}
115
@@ -XXX,XX +XXX,XX @@ static inline uint16_t add16_usat(uint16_t a, uint16_t b)
111
{
116
{
112
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
117
uint16_t res;
113
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
118
res = a + b;
114
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
119
- if (res < a)
115
return flags;
120
+ if (res < a) {
121
res = 0xffff;
122
+ }
123
return res;
116
}
124
}
117
125
118
-static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
126
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
119
- ARMMMUIdx mmu_idx, uint32_t flags)
120
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
121
+ ARMMMUIdx mmu_idx,
122
+ CPUARMTBFlags flags)
123
{
127
{
124
bool sctlr_b = arm_sctlr_b(env);
128
- if (a > b)
125
129
+ if (a > b) {
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
130
return a - b;
127
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
131
- else
132
+ } else {
133
return 0;
134
+ }
128
}
135
}
129
136
130
-static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
137
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
131
- ARMMMUIdx mmu_idx)
132
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
133
+ ARMMMUIdx mmu_idx)
134
{
138
{
135
- uint32_t flags = 0;
139
uint8_t res;
136
+ CPUARMTBFlags flags = {};
140
res = a + b;
137
141
- if (res < a)
138
if (arm_v7m_is_handler_mode(env)) {
142
+ if (res < a) {
139
DP_TBFLAG_M32(flags, HANDLER, 1);
143
res = 0xff;
140
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
144
+ }
141
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
145
return res;
142
}
146
}
143
147
144
-static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
148
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
145
+static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
146
{
149
{
147
- int flags = 0;
150
- if (a > b)
148
+ CPUARMTBFlags flags = {};
151
+ if (a > b) {
149
152
return a - b;
150
DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
153
- else
151
return flags;
154
+ } else {
155
return 0;
156
+ }
152
}
157
}
153
158
154
-static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
159
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
155
- ARMMMUIdx mmu_idx)
160
@@ -XXX,XX +XXX,XX @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
156
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
161
157
+ ARMMMUIdx mmu_idx)
162
static inline uint8_t do_usad(uint8_t a, uint8_t b)
158
{
163
{
159
- uint32_t flags = rebuild_hflags_aprofile(env);
164
- if (a > b)
160
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
165
+ if (a > b) {
161
166
return a - b;
162
if (arm_el_is_aa64(env, 1)) {
167
- else
163
DP_TBFLAG_A32(flags, VFPEN, 1);
168
+ } else {
164
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
169
return b - a;
165
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
170
+ }
166
}
171
}
167
172
168
-static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
173
/* Unsigned sum of absolute byte differences. */
169
- ARMMMUIdx mmu_idx)
174
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
170
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
175
uint32_t mask;
171
+ ARMMMUIdx mmu_idx)
176
172
{
177
mask = 0;
173
- uint32_t flags = rebuild_hflags_aprofile(env);
178
- if (flags & 1)
174
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
179
+ if (flags & 1) {
175
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
180
mask |= 0xff;
176
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
181
- if (flags & 2)
177
uint64_t sctlr;
182
+ }
178
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
183
+ if (flags & 2) {
179
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
184
mask |= 0xff00;
185
- if (flags & 4)
186
+ }
187
+ if (flags & 4) {
188
mask |= 0xff0000;
189
- if (flags & 8)
190
+ }
191
+ if (flags & 8) {
192
mask |= 0xff000000;
193
+ }
194
return (a & mask) | (b & ~mask);
180
}
195
}
181
196
182
-static uint32_t rebuild_hflags_internal(CPUARMState *env)
183
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
184
{
185
int el = arm_current_el(env);
186
int fp_el = fp_exception_el(env, el);
187
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
188
int el = arm_current_el(env);
189
int fp_el = fp_exception_el(env, el);
190
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
191
+
192
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
193
}
194
195
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
196
static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
197
{
198
#ifdef CONFIG_DEBUG_TCG
199
- uint32_t env_flags_current = env->hflags;
200
- uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
201
+ CPUARMTBFlags c = env->hflags;
202
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
203
204
- if (unlikely(env_flags_current != env_flags_rebuilt)) {
205
+ if (unlikely(c.flags != r.flags)) {
206
fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
207
- env_flags_current, env_flags_rebuilt);
208
+ c.flags, r.flags);
209
abort();
210
}
211
#endif
212
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
213
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
214
target_ulong *cs_base, uint32_t *pflags)
215
{
216
- uint32_t flags = env->hflags;
217
+ CPUARMTBFlags flags;
218
219
*cs_base = 0;
220
assert_hflags_rebuild_correctly(env);
221
+ flags = env->hflags;
222
223
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
224
*pc = env->pc;
225
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
226
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
227
}
228
229
- *pflags = flags;
230
+ *pflags = flags.flags;
231
}
232
233
#ifdef TARGET_AARCH64
234
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/target/arm/translate-a64.c
237
+++ b/target/arm/translate-a64.c
238
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
239
DisasContext *dc = container_of(dcbase, DisasContext, base);
240
CPUARMState *env = cpu->env_ptr;
241
ARMCPU *arm_cpu = env_archcpu(env);
242
- uint32_t tb_flags = dc->base.tb->flags;
243
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
244
int bound, core_mmu_idx;
245
246
dc->isar = &arm_cpu->isar;
247
diff --git a/target/arm/translate.c b/target/arm/translate.c
248
index XXXXXXX..XXXXXXX 100644
249
--- a/target/arm/translate.c
250
+++ b/target/arm/translate.c
251
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
252
DisasContext *dc = container_of(dcbase, DisasContext, base);
253
CPUARMState *env = cs->env_ptr;
254
ARMCPU *cpu = env_archcpu(env);
255
- uint32_t tb_flags = dc->base.tb->flags;
256
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
257
uint32_t condexec, core_mmu_idx;
258
259
dc->isar = &cpu->isar;
260
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
261
{
262
DisasContext dc = { };
263
const TranslatorOps *ops = &arm_translator_ops;
264
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
265
266
- if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
267
+ if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
268
ops = &thumb_translator_ops;
269
}
270
#ifdef TARGET_AARCH64
271
- if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
272
+ if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
273
ops = &aarch64_translator_ops;
274
}
275
#endif
276
--
197
--
277
2.20.1
198
2.25.1
278
279
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Fabiano Rosas <farosas@suse.de>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Claudio Fontana <cfontana@suse.de>
5
Message-id: 20210419202257.161730-31-richard.henderson@linaro.org
5
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
6
Message-id: 20221213190537.511-5-farosas@suse.de
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
---
8
target/arm/translate-a64.c | 9 +++++----
9
target/arm/m_helper.c | 16 ----------------
9
1 file changed, 5 insertions(+), 4 deletions(-)
10
1 file changed, 16 deletions(-)
10
11
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
--- a/target/arm/m_helper.c
14
+++ b/target/arm/translate-a64.c
15
+++ b/target/arm/m_helper.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
16
@@ -XXX,XX +XXX,XX @@
16
int index = is_q << 3 | S << 2 | size;
17
*/
17
int xs, total;
18
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
19
#include "qemu/osdep.h"
19
+ MemOp mop;
20
-#include "qemu/units.h"
20
21
-#include "target/arm/idau.h"
21
if (extract32(insn, 31, 1)) {
22
-#include "trace.h"
22
unallocated_encoding(s);
23
#include "cpu.h"
23
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
24
#include "internals.h"
24
25
-#include "exec/gdbstub.h"
25
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
26
#include "exec/helper-proto.h"
26
total);
27
-#include "qemu/host-utils.h"
27
+ mop = finalize_memop(s, scale);
28
#include "qemu/main-loop.h"
28
29
#include "qemu/bitops.h"
29
tcg_ebytes = tcg_const_i64(1 << scale);
30
-#include "qemu/crc32c.h"
30
for (xs = 0; xs < selem; xs++) {
31
-#include "qemu/qemu-print.h"
31
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
32
#include "qemu/log.h"
32
/* Load and replicate to all elements */
33
#include "exec/exec-all.h"
33
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
34
-#include <zlib.h> /* For crc32 */
34
35
-#include "semihosting/semihost.h"
35
- tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
36
-#include "sysemu/cpus.h"
36
- get_mem_index(s), s->be_data + scale);
37
-#include "sysemu/kvm.h"
37
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
38
-#include "qemu/range.h"
38
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
39
-#include "qapi/qapi-commands-machine-target.h"
39
(is_q + 1) * 8, vec_full_reg_size(s),
40
-#include "qapi/error.h"
40
tcg_tmp);
41
-#include "qemu/guest-random.h"
41
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
42
#ifdef CONFIG_TCG
42
} else {
43
-#include "arm_ldst.h"
43
/* Load/store one element per register */
44
#include "exec/cpu_ldst.h"
44
if (is_load) {
45
#include "semihosting/common-semi.h"
45
- do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
46
#endif
46
+ do_vec_ld(s, rt, index, clean_addr, mop);
47
} else {
48
- do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
49
+ do_vec_st(s, rt, index, clean_addr, mop);
50
}
51
}
52
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
53
--
47
--
54
2.20.1
48
2.25.1
55
56
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
We're about to rearrange the macro expansion surrounding tbflags,
3
Signed-off-by: Fabiano Rosas <farosas@suse.de>
4
and this field name will be expanded using the bit definition of
4
Reviewed-by: Claudio Fontana <cfontana@suse.de>
5
the same name, resulting in a token pasting error.
5
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
6
6
Message-id: 20221213190537.511-6-farosas@suse.de
7
So SCTLR_B -> SCTLR__B in the 3 uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-3-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
8
---
14
target/arm/cpu.h | 2 +-
9
target/arm/helper.c | 7 -------
15
target/arm/helper.c | 2 +-
10
1 file changed, 7 deletions(-)
16
target/arm/translate.c | 2 +-
17
3 files changed, 3 insertions(+), 3 deletions(-)
18
11
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
24
*/
25
FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
26
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
27
-FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
28
+FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
29
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
30
/*
31
* Indicates whether cp register reads and writes by guest code should access
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/helper.c
14
--- a/target/arm/helper.c
35
+++ b/target/arm/helper.c
15
+++ b/target/arm/helper.c
36
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
16
@@ -XXX,XX +XXX,XX @@
37
bool sctlr_b = arm_sctlr_b(env);
17
*/
38
18
39
if (sctlr_b) {
19
#include "qemu/osdep.h"
40
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
20
-#include "qemu/units.h"
41
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
21
#include "qemu/log.h"
42
}
22
#include "trace.h"
43
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
23
#include "cpu.h"
44
flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
24
#include "internals.h"
45
diff --git a/target/arm/translate.c b/target/arm/translate.c
25
#include "exec/helper-proto.h"
46
index XXXXXXX..XXXXXXX 100644
26
-#include "qemu/host-utils.h"
47
--- a/target/arm/translate.c
27
#include "qemu/main-loop.h"
48
+++ b/target/arm/translate.c
28
#include "qemu/timer.h"
49
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
29
#include "qemu/bitops.h"
50
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
30
@@ -XXX,XX +XXX,XX @@
51
dc->debug_target_el =
31
#include "exec/exec-all.h"
52
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
32
#include <zlib.h> /* For crc32 */
53
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
33
#include "hw/irq.h"
54
+ dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
34
-#include "semihosting/semihost.h"
55
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
35
-#include "sysemu/cpus.h"
56
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
36
#include "sysemu/cpu-timers.h"
57
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
37
#include "sysemu/kvm.h"
38
-#include "qemu/range.h"
39
#include "qapi/qapi-commands-machine-target.h"
40
#include "qapi/error.h"
41
#include "qemu/guest-random.h"
42
#ifdef CONFIG_TCG
43
-#include "arm_ldst.h"
44
-#include "exec/cpu_ldst.h"
45
#include "semihosting/common-semi.h"
46
#endif
47
#include "cpregs.h"
58
--
48
--
59
2.20.1
49
2.25.1
60
61
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Claudio Fontana <cfontana@suse.de>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Remove some unused headers.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20210419202257.161730-30-richard.henderson@linaro.org
5
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Acked-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Claudio Fontana <cfontana@suse.de>
8
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
9
Signed-off-by: Fabiano Rosas <farosas@suse.de>
10
Message-id: 20221213190537.511-7-farosas@suse.de
11
[added back some includes that are still needed at this point]
12
Signed-off-by: Fabiano Rosas <farosas@suse.de>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
14
---
8
target/arm/translate-a64.c | 15 +++++++++++----
15
target/arm/cpu.c | 1 -
9
1 file changed, 11 insertions(+), 4 deletions(-)
16
target/arm/cpu64.c | 6 ------
17
2 files changed, 7 deletions(-)
10
18
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
19
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
12
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
21
--- a/target/arm/cpu.c
14
+++ b/target/arm/translate-a64.c
22
+++ b/target/arm/cpu.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
23
@@ -XXX,XX +XXX,XX @@
16
bool is_postidx = extract32(insn, 23, 1);
24
#include "target/arm/idau.h"
17
bool is_q = extract32(insn, 30, 1);
25
#include "qemu/module.h"
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
26
#include "qapi/error.h"
19
- MemOp endian = s->be_data;
27
-#include "qapi/visitor.h"
20
+ MemOp endian, align, mop;
28
#include "cpu.h"
21
29
#ifdef CONFIG_TCG
22
int total; /* total bytes */
30
#include "hw/core/tcg-cpu-ops.h"
23
int elements; /* elements per vector */
31
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
24
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
32
index XXXXXXX..XXXXXXX 100644
25
}
33
--- a/target/arm/cpu64.c
26
34
+++ b/target/arm/cpu64.c
27
/* For our purposes, bytes are always little-endian. */
35
@@ -XXX,XX +XXX,XX @@
28
+ endian = s->be_data;
36
#include "qemu/osdep.h"
29
if (size == 0) {
37
#include "qapi/error.h"
30
endian = MO_LE;
38
#include "cpu.h"
31
}
39
-#ifdef CONFIG_TCG
32
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
40
-#include "hw/core/tcg-cpu-ops.h"
33
* Consecutive little-endian elements from a single register
41
-#endif /* CONFIG_TCG */
34
* can be promoted to a larger little-endian operation.
42
#include "qemu/module.h"
35
*/
43
-#if !defined(CONFIG_USER_ONLY)
36
+ align = MO_ALIGN;
44
-#include "hw/loader.h"
37
if (selem == 1 && endian == MO_LE) {
45
-#endif
38
+ align = pow2_align(size);
46
#include "sysemu/kvm.h"
39
size = 3;
47
#include "sysemu/hvf.h"
40
}
48
#include "kvm_arm.h"
41
- elements = (is_q ? 16 : 8) >> size;
42
+ if (!s->align_mem) {
43
+ align = 0;
44
+ }
45
+ mop = endian | size | align;
46
47
+ elements = (is_q ? 16 : 8) >> size;
48
tcg_ebytes = tcg_const_i64(1 << size);
49
for (r = 0; r < rpt; r++) {
50
int e;
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
52
for (xs = 0; xs < selem; xs++) {
53
int tt = (rt + r + xs) % 32;
54
if (is_store) {
55
- do_vec_st(s, tt, e, clean_addr, size | endian);
56
+ do_vec_st(s, tt, e, clean_addr, mop);
57
} else {
58
- do_vec_ld(s, tt, e, clean_addr, size | endian);
59
+ do_vec_ld(s, tt, e, clean_addr, mop);
60
}
61
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
62
}
63
--
49
--
64
2.20.1
50
2.25.1
65
66
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
The pointed MouseTransformInfo structure is accessed read-only.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20210419202257.161730-25-richard.henderson@linaro.org
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20221220142520.24094-2-philmd@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
9
---
8
target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++-----
10
include/hw/input/tsc2xxx.h | 4 ++--
9
1 file changed, 42 insertions(+), 6 deletions(-)
11
hw/input/tsc2005.c | 2 +-
12
hw/input/tsc210x.c | 3 +--
13
3 files changed, 4 insertions(+), 5 deletions(-)
10
14
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
15
diff --git a/include/hw/input/tsc2xxx.h b/include/hw/input/tsc2xxx.h
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
17
--- a/include/hw/input/tsc2xxx.h
14
+++ b/target/arm/translate-neon.c.inc
18
+++ b/include/hw/input/tsc2xxx.h
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
19
@@ -XXX,XX +XXX,XX @@ uWireSlave *tsc2102_init(qemu_irq pint);
16
int nregs = a->n + 1;
20
uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav);
17
int vd = a->vd;
21
I2SCodec *tsc210x_codec(uWireSlave *chip);
18
TCGv_i32 addr, tmp;
22
uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len);
19
+ MemOp mop;
23
-void tsc210x_set_transform(uWireSlave *chip, MouseTransformInfo *info);
20
24
+void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info);
21
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
25
void tsc210x_key_event(uWireSlave *chip, int key, int down);
22
return false;
26
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
27
/* tsc2005.c */
24
return true;
28
void *tsc2005_init(qemu_irq pintdav);
25
}
29
uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len);
26
30
-void tsc2005_set_transform(void *opaque, MouseTransformInfo *info);
27
+ /* Pick up SCTLR settings */
31
+void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info);
28
+ mop = finalize_memop(s, a->size);
32
29
+
33
#endif
30
+ if (a->align) {
34
diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c
31
+ MemOp align_op;
35
index XXXXXXX..XXXXXXX 100644
32
+
36
--- a/hw/input/tsc2005.c
33
+ switch (nregs) {
37
+++ b/hw/input/tsc2005.c
34
+ case 1:
38
@@ -XXX,XX +XXX,XX @@ void *tsc2005_init(qemu_irq pintdav)
35
+ /* For VLD1, use natural alignment. */
39
* from the touchscreen. Assuming 12-bit precision was used during
36
+ align_op = MO_ALIGN;
40
* tslib calibration.
37
+ break;
41
*/
38
+ case 2:
42
-void tsc2005_set_transform(void *opaque, MouseTransformInfo *info)
39
+ /* For VLD2, use double alignment. */
43
+void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info)
40
+ align_op = pow2_align(a->size + 1);
44
{
41
+ break;
45
TSC2005State *s = (TSC2005State *) opaque;
42
+ case 4:
46
43
+ if (a->size == MO_32) {
47
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
44
+ /*
48
index XXXXXXX..XXXXXXX 100644
45
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
49
--- a/hw/input/tsc210x.c
46
+ * quad alignment; align = 3 is rejected above.
50
+++ b/hw/input/tsc210x.c
47
+ */
51
@@ -XXX,XX +XXX,XX @@ I2SCodec *tsc210x_codec(uWireSlave *chip)
48
+ align_op = pow2_align(a->size + a->align);
52
* from the touchscreen. Assuming 12-bit precision was used during
49
+ } else {
53
* tslib calibration.
50
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
54
*/
51
+ align_op = pow2_align(a->size + 2);
55
-void tsc210x_set_transform(uWireSlave *chip,
52
+ }
56
- MouseTransformInfo *info)
53
+ break;
57
+void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info)
54
+ default:
58
{
55
+ /* For VLD3, the alignment field is zero and rejected above. */
59
TSC210xState *s = (TSC210xState *) chip->opaque;
56
+ g_assert_not_reached();
60
#if 0
57
+ }
58
+
59
+ mop = (mop & ~MO_AMASK) | align_op;
60
+ }
61
+
62
tmp = tcg_temp_new_i32();
63
addr = tcg_temp_new_i32();
64
load_reg_var(s, addr, a->rn);
65
- /*
66
- * TODO: if we implemented alignment exceptions, we should check
67
- * addr against the alignment encoded in a->align here.
68
- */
69
+
70
for (reg = 0; reg < nregs; reg++) {
71
if (a->l) {
72
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
73
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
74
neon_store_element(vd, a->reg_idx, a->size, tmp);
75
} else { /* Store */
76
neon_load_element(tmp, vd, a->reg_idx, a->size);
77
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
78
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
79
}
80
vd += a->stride;
81
tcg_gen_addi_i32(addr, addr, 1 << a->size);
82
+
83
+ /* Subsequent memory operations inherit alignment */
84
+ mop &= ~MO_AMASK;
85
}
86
tcg_temp_free_i32(addr);
87
tcg_temp_free_i32(tmp);
88
--
61
--
89
2.20.1
62
2.25.1
90
63
91
64
diff view generated by jsdifflib
1
Currently the gpex PCI controller implements no special behaviour for
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
guest accesses to areas of the PIO and MMIO where it has not mapped
3
any PCI devices, which means that for Arm you end up with a CPU
4
exception due to a data abort.
5
2
6
Most host OSes expect "like an x86 PC" behaviour, where bad accesses
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
like this return -1 for reads and ignore writes. In the interests of
8
not being surprising, make host CPU accesses to these windows behave
9
as -1/discard where there's no mapped PCI device.
10
11
The old behaviour generally didn't cause any problems, because
12
almost always the guest OS will map the PCI devices and then only
13
access where it has mapped them. One corner case where you will see
14
this kind of access is if Linux attempts to probe legacy ISA
15
devices via a PIO window access. So far the only case where we've
16
seen this has been via the syzkaller fuzzer.
17
18
Reported-by: Dmitry Vyukov <dvyukov@google.com>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Acked-by: Michael S. Tsirkin <mst@redhat.com>
5
Message-id: 20221220142520.24094-3-philmd@linaro.org
22
Message-id: 20210325163315.27724-1-peter.maydell@linaro.org
23
Fixes: https://bugs.launchpad.net/qemu/+bug/1918917
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
7
---
26
include/hw/pci-host/gpex.h | 4 +++
8
hw/arm/nseries.c | 18 +++++++++---------
27
hw/core/machine.c | 4 ++-
9
1 file changed, 9 insertions(+), 9 deletions(-)
28
hw/pci-host/gpex.c | 56 ++++++++++++++++++++++++++++++++++++--
29
3 files changed, 60 insertions(+), 4 deletions(-)
30
10
31
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
11
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
32
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
33
--- a/include/hw/pci-host/gpex.h
13
--- a/hw/arm/nseries.c
34
+++ b/include/hw/pci-host/gpex.h
14
+++ b/hw/arm/nseries.c
35
@@ -XXX,XX +XXX,XX @@ struct GPEXHost {
15
@@ -XXX,XX +XXX,XX @@ static void n8x0_i2c_setup(struct n800_s *s)
36
16
}
37
MemoryRegion io_ioport;
17
38
MemoryRegion io_mmio;
18
/* Touchscreen and keypad controller */
39
+ MemoryRegion io_ioport_window;
19
-static MouseTransformInfo n800_pointercal = {
40
+ MemoryRegion io_mmio_window;
20
+static const MouseTransformInfo n800_pointercal = {
41
qemu_irq irq[GPEX_NUM_IRQS];
21
.x = 800,
42
int irq_num[GPEX_NUM_IRQS];
22
.y = 480,
43
+
23
.a = { 14560, -68, -3455208, -39, -9621, 35152972, 65536 },
44
+ bool allow_unmapped_accesses;
45
};
24
};
46
25
47
struct GPEXConfig {
26
-static MouseTransformInfo n810_pointercal = {
48
diff --git a/hw/core/machine.c b/hw/core/machine.c
27
+static const MouseTransformInfo n810_pointercal = {
49
index XXXXXXX..XXXXXXX 100644
28
.x = 800,
50
--- a/hw/core/machine.c
29
.y = 480,
51
+++ b/hw/core/machine.c
30
.a = { 15041, 148, -4731056, 171, -10238, 35933380, 65536 },
52
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ static void n810_key_event(void *opaque, int keycode)
53
#include "hw/virtio/virtio.h"
32
54
#include "hw/virtio/virtio-pci.h"
33
#define M    0
55
34
56
-GlobalProperty hw_compat_6_0[] = {};
35
-static int n810_keys[0x80] = {
57
+GlobalProperty hw_compat_6_0[] = {
36
+static const int n810_keys[0x80] = {
58
+ { "gpex-pcihost", "allow-unmapped-accesses", "false" },
37
[0x01] = 16,    /* Q */
59
+};
38
[0x02] = 37,    /* K */
60
const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
39
[0x03] = 24,    /* O */
61
40
@@ -XXX,XX +XXX,XX @@ static void n8x0_usb_setup(struct n800_s *s)
62
GlobalProperty hw_compat_5_2[] = {
41
/* Setup done before the main bootloader starts by some early setup code
63
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
42
* - used when we want to run the main bootloader in emulation. This
64
index XXXXXXX..XXXXXXX 100644
43
* isn't documented. */
65
--- a/hw/pci-host/gpex.c
44
-static uint32_t n800_pinout[104] = {
66
+++ b/hw/pci-host/gpex.c
45
+static const uint32_t n800_pinout[104] = {
67
@@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
46
0x080f00d8, 0x00d40808, 0x03080808, 0x080800d0,
68
int i;
47
0x00dc0808, 0x0b0f0f00, 0x080800b4, 0x00c00808,
69
48
0x08080808, 0x180800c4, 0x00b80000, 0x08080808,
70
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
49
@@ -XXX,XX +XXX,XX @@ static void n8x0_boot_init(void *opaque)
71
+ sysbus_init_mmio(sbd, &pex->mmio);
50
#define OMAP_TAG_CBUS        0x4e03
72
+
51
#define OMAP_TAG_EM_ASIC_BB5    0x4e04
73
+ /*
52
74
+ * Note that the MemoryRegions io_mmio and io_ioport that we pass
53
-static struct omap_gpiosw_info_s {
75
+ * to pci_register_root_bus() are not the same as the
54
+static const struct omap_gpiosw_info_s {
76
+ * MemoryRegions io_mmio_window and io_ioport_window that we
55
const char *name;
77
+ * expose as SysBus MRs. The difference is in the behaviour of
56
int line;
78
+ * accesses to addresses where no PCI device has been mapped.
57
int type;
79
+ *
58
@@ -XXX,XX +XXX,XX @@ static struct omap_gpiosw_info_s {
80
+ * io_mmio and io_ioport are the underlying PCI view of the PCI
59
{ NULL }
81
+ * address space, and when a PCI device does a bus master access
60
};
82
+ * to a bad address this is reported back to it as a transaction
61
83
+ * failure.
62
-static struct omap_partition_info_s {
84
+ *
63
+static const struct omap_partition_info_s {
85
+ * io_mmio_window and io_ioport_window implement "unmapped
64
uint32_t offset;
86
+ * addresses read as -1 and ignore writes"; this is traditional
65
uint32_t size;
87
+ * x86 PC behaviour, which is not mandated by the PCI spec proper
66
int mask;
88
+ * but expected by much PCI-using guest software, including Linux.
67
@@ -XXX,XX +XXX,XX @@ static struct omap_partition_info_s {
89
+ *
68
{ 0, 0, 0, NULL }
90
+ * In the interests of not being unnecessarily surprising, we
69
};
91
+ * implement it in the gpex PCI host controller, by providing the
70
92
+ * _window MRs, which are containers with io ops that implement
71
-static uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR };
93
+ * the 'background' behaviour and which hold the real PCI MRs as
72
+static const uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR };
94
+ * subregions.
73
95
+ */
74
static int n8x0_atag_setup(void *p, int model)
96
memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
97
memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
98
99
- sysbus_init_mmio(sbd, &pex->mmio);
100
- sysbus_init_mmio(sbd, &s->io_mmio);
101
- sysbus_init_mmio(sbd, &s->io_ioport);
102
+ if (s->allow_unmapped_accesses) {
103
+ memory_region_init_io(&s->io_mmio_window, OBJECT(s),
104
+ &unassigned_io_ops, OBJECT(s),
105
+ "gpex_mmio_window", UINT64_MAX);
106
+ memory_region_init_io(&s->io_ioport_window, OBJECT(s),
107
+ &unassigned_io_ops, OBJECT(s),
108
+ "gpex_ioport_window", 64 * 1024);
109
+
110
+ memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio);
111
+ memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport);
112
+ sysbus_init_mmio(sbd, &s->io_mmio_window);
113
+ sysbus_init_mmio(sbd, &s->io_ioport_window);
114
+ } else {
115
+ sysbus_init_mmio(sbd, &s->io_mmio);
116
+ sysbus_init_mmio(sbd, &s->io_ioport);
117
+ }
118
+
119
for (i = 0; i < GPEX_NUM_IRQS; i++) {
120
sysbus_init_irq(sbd, &s->irq[i]);
121
s->irq_num[i] = -1;
122
@@ -XXX,XX +XXX,XX @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
123
return "0000:00";
124
}
125
126
+static Property gpex_host_properties[] = {
127
+ /*
128
+ * Permit CPU accesses to unmapped areas of the PIO and MMIO windows
129
+ * (discarding writes and returning -1 for reads) rather than aborting.
130
+ */
131
+ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
132
+ allow_unmapped_accesses, true),
133
+ DEFINE_PROP_END_OF_LIST(),
134
+};
135
+
136
static void gpex_host_class_init(ObjectClass *klass, void *data)
137
{
75
{
138
DeviceClass *dc = DEVICE_CLASS(klass);
76
uint8_t *b;
139
@@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data)
77
uint16_t *w;
140
dc->realize = gpex_host_realize;
78
uint32_t *l;
141
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
79
- struct omap_gpiosw_info_s *gpiosw;
142
dc->fw_name = "pci";
80
- struct omap_partition_info_s *partition;
143
+ device_class_set_props(dc, gpex_host_properties);
81
+ const struct omap_gpiosw_info_s *gpiosw;
144
}
82
+ const struct omap_partition_info_s *partition;
145
83
const char *tag;
146
static void gpex_host_initfn(Object *obj)
84
85
w = p;
147
--
86
--
148
2.20.1
87
2.25.1
149
88
150
89
diff view generated by jsdifflib
1
From: Cornelia Huck <cohuck@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Add 6.1 machine types for arm/i440fx/q35/s390x/spapr.
3
Silent when compiling with -Wextra:
4
4
5
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
5
../hw/arm/nseries.c:1081:12: warning: missing field 'line' initializer [-Wmissing-field-initializers]
6
Acked-by: Greg Kurz <groug@kaod.org>
6
{ NULL }
7
Message-id: 20210331111900.118274-1-cohuck@redhat.com
7
^
8
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Message-id: 20221220142520.24094-4-philmd@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
include/hw/boards.h | 3 +++
14
hw/arm/nseries.c | 10 ++++------
12
include/hw/i386/pc.h | 3 +++
15
1 file changed, 4 insertions(+), 6 deletions(-)
13
hw/arm/virt.c | 7 ++++++-
14
hw/core/machine.c | 3 +++
15
hw/i386/pc.c | 3 +++
16
hw/i386/pc_piix.c | 14 +++++++++++++-
17
hw/i386/pc_q35.c | 13 ++++++++++++-
18
hw/ppc/spapr.c | 17 ++++++++++++++---
19
hw/s390x/s390-virtio-ccw.c | 14 +++++++++++++-
20
9 files changed, 70 insertions(+), 7 deletions(-)
21
16
22
diff --git a/include/hw/boards.h b/include/hw/boards.h
17
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
23
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/boards.h
19
--- a/hw/arm/nseries.c
25
+++ b/include/hw/boards.h
20
+++ b/hw/arm/nseries.c
26
@@ -XXX,XX +XXX,XX @@ struct MachineState {
21
@@ -XXX,XX +XXX,XX @@ static const struct omap_gpiosw_info_s {
27
} \
22
"headphone", N8X0_HEADPHONE_GPIO,
28
type_init(machine_initfn##_register_types)
23
OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED,
29
24
},
30
+extern GlobalProperty hw_compat_6_0[];
25
- { NULL }
31
+extern const size_t hw_compat_6_0_len;
26
+ { /* end of list */ }
32
+
27
}, n810_gpiosw_info[] = {
33
extern GlobalProperty hw_compat_5_2[];
28
{
34
extern const size_t hw_compat_5_2_len;
29
"gps_reset", N810_GPS_RESET_GPIO,
35
30
@@ -XXX,XX +XXX,XX @@ static const struct omap_gpiosw_info_s {
36
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
31
"slide", N810_SLIDE_GPIO,
37
index XXXXXXX..XXXXXXX 100644
32
OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED,
38
--- a/include/hw/i386/pc.h
33
},
39
+++ b/include/hw/i386/pc.h
34
- { NULL }
40
@@ -XXX,XX +XXX,XX @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
35
+ { /* end of list */ }
41
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
42
const CPUArchIdList *apic_ids, GArray *entry);
43
44
+extern GlobalProperty pc_compat_6_0[];
45
+extern const size_t pc_compat_6_0_len;
46
+
47
extern GlobalProperty pc_compat_5_2[];
48
extern const size_t pc_compat_5_2_len;
49
50
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/hw/arm/virt.c
53
+++ b/hw/arm/virt.c
54
@@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void)
55
}
56
type_init(machvirt_machine_init);
57
58
+static void virt_machine_6_1_options(MachineClass *mc)
59
+{
60
+}
61
+DEFINE_VIRT_MACHINE_AS_LATEST(6, 1)
62
+
63
static void virt_machine_6_0_options(MachineClass *mc)
64
{
65
}
66
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 0)
67
+DEFINE_VIRT_MACHINE(6, 0)
68
69
static void virt_machine_5_2_options(MachineClass *mc)
70
{
71
diff --git a/hw/core/machine.c b/hw/core/machine.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/core/machine.c
74
+++ b/hw/core/machine.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "hw/virtio/virtio.h"
77
#include "hw/virtio/virtio-pci.h"
78
79
+GlobalProperty hw_compat_6_0[] = {};
80
+const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
81
+
82
GlobalProperty hw_compat_5_2[] = {
83
{ "ICH9-LPC", "smm-compat", "on"},
84
{ "PIIX4_PM", "smm-compat", "on"},
85
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/i386/pc.c
88
+++ b/hw/i386/pc.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "trace.h"
91
#include CONFIG_DEVICES
92
93
+GlobalProperty pc_compat_6_0[] = {};
94
+const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0);
95
+
96
GlobalProperty pc_compat_5_2[] = {
97
{ "ICH9-LPC", "x-smi-cpu-hotunplug", "off" },
98
};
36
};
99
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
37
100
index XXXXXXX..XXXXXXX 100644
38
static const struct omap_partition_info_s {
101
--- a/hw/i386/pc_piix.c
39
@@ -XXX,XX +XXX,XX @@ static const struct omap_partition_info_s {
102
+++ b/hw/i386/pc_piix.c
40
{ 0x00080000, 0x00200000, 0x0, "kernel" },
103
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_machine_options(MachineClass *m)
41
{ 0x00280000, 0x00200000, 0x3, "initfs" },
104
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
42
{ 0x00480000, 0x0fb80000, 0x3, "rootfs" },
105
}
43
-
106
44
- { 0, 0, 0, NULL }
107
-static void pc_i440fx_6_0_machine_options(MachineClass *m)
45
+ { /* end of list */ }
108
+static void pc_i440fx_6_1_machine_options(MachineClass *m)
46
}, n810_part_info[] = {
109
{
47
{ 0x00000000, 0x00020000, 0x3, "bootloader" },
110
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
48
{ 0x00020000, 0x00060000, 0x0, "config" },
111
pc_i440fx_machine_options(m);
49
{ 0x00080000, 0x00220000, 0x0, "kernel" },
112
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_6_0_machine_options(MachineClass *m)
50
{ 0x002a0000, 0x00400000, 0x0, "initfs" },
113
pcmc->default_cpu_version = 1;
51
{ 0x006a0000, 0x0f960000, 0x0, "rootfs" },
114
}
52
-
115
53
- { 0, 0, 0, NULL }
116
+DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL,
54
+ { /* end of list */ }
117
+ pc_i440fx_6_1_machine_options);
55
};
118
+
56
119
+static void pc_i440fx_6_0_machine_options(MachineClass *m)
57
static const uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR };
120
+{
121
+ pc_i440fx_6_1_machine_options(m);
122
+ m->alias = NULL;
123
+ m->is_default = false;
124
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
125
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
126
+}
127
+
128
DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL,
129
pc_i440fx_6_0_machine_options);
130
131
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/i386/pc_q35.c
134
+++ b/hw/i386/pc_q35.c
135
@@ -XXX,XX +XXX,XX @@ static void pc_q35_machine_options(MachineClass *m)
136
m->max_cpus = 288;
137
}
138
139
-static void pc_q35_6_0_machine_options(MachineClass *m)
140
+static void pc_q35_6_1_machine_options(MachineClass *m)
141
{
142
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
143
pc_q35_machine_options(m);
144
@@ -XXX,XX +XXX,XX @@ static void pc_q35_6_0_machine_options(MachineClass *m)
145
pcmc->default_cpu_version = 1;
146
}
147
148
+DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL,
149
+ pc_q35_6_1_machine_options);
150
+
151
+static void pc_q35_6_0_machine_options(MachineClass *m)
152
+{
153
+ pc_q35_6_1_machine_options(m);
154
+ m->alias = NULL;
155
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
156
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
157
+}
158
+
159
DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL,
160
pc_q35_6_0_machine_options);
161
162
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/hw/ppc/spapr.c
165
+++ b/hw/ppc/spapr.c
166
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_latest_class_options(MachineClass *mc)
167
type_init(spapr_machine_register_##suffix)
168
169
/*
170
- * pseries-6.0
171
+ * pseries-6.1
172
*/
173
-static void spapr_machine_6_0_class_options(MachineClass *mc)
174
+static void spapr_machine_6_1_class_options(MachineClass *mc)
175
{
176
/* Defaults for the latest behaviour inherited from the base class */
177
}
178
179
-DEFINE_SPAPR_MACHINE(6_0, "6.0", true);
180
+DEFINE_SPAPR_MACHINE(6_1, "6.1", true);
181
+
182
+/*
183
+ * pseries-6.0
184
+ */
185
+static void spapr_machine_6_0_class_options(MachineClass *mc)
186
+{
187
+ spapr_machine_6_1_class_options(mc);
188
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
189
+}
190
+
191
+DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
192
193
/*
194
* pseries-5.2
195
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/hw/s390x/s390-virtio-ccw.c
198
+++ b/hw/s390x/s390-virtio-ccw.c
199
@@ -XXX,XX +XXX,XX @@ bool css_migration_enabled(void)
200
} \
201
type_init(ccw_machine_register_##suffix)
202
203
+static void ccw_machine_6_1_instance_options(MachineState *machine)
204
+{
205
+}
206
+
207
+static void ccw_machine_6_1_class_options(MachineClass *mc)
208
+{
209
+}
210
+DEFINE_CCW_MACHINE(6_1, "6.1", true);
211
+
212
static void ccw_machine_6_0_instance_options(MachineState *machine)
213
{
214
+ ccw_machine_6_1_instance_options(machine);
215
}
216
217
static void ccw_machine_6_0_class_options(MachineClass *mc)
218
{
219
+ ccw_machine_6_1_class_options(mc);
220
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
221
}
222
-DEFINE_CCW_MACHINE(6_0, "6.0", true);
223
+DEFINE_CCW_MACHINE(6_0, "6.0", false);
224
225
static void ccw_machine_5_2_instance_options(MachineState *machine)
226
{
227
--
58
--
228
2.20.1
59
2.25.1
229
60
230
61
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Zhuojia Shen <chaosdefinition@hotmail.com>
2
2
3
Buglink: https://bugs.launchpad.net/bugs/1921948
3
In CPUID registers exposed to userspace, some registers were missing
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
and some fields were not exposed. This patch aligns exposed ID
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
registers and their fields with what the upstream kernel currently
6
Message-id: 20210416183106.1516563-5-richard.henderson@linaro.org
6
exposes.
7
8
Specifically, the following new ID registers/fields are exposed to
9
userspace:
10
11
ID_AA64PFR1_EL1.BT: bits 3-0
12
ID_AA64PFR1_EL1.MTE: bits 11-8
13
ID_AA64PFR1_EL1.SME: bits 27-24
14
15
ID_AA64ZFR0_EL1.SVEver: bits 3-0
16
ID_AA64ZFR0_EL1.AES: bits 7-4
17
ID_AA64ZFR0_EL1.BitPerm: bits 19-16
18
ID_AA64ZFR0_EL1.BF16: bits 23-20
19
ID_AA64ZFR0_EL1.SHA3: bits 35-32
20
ID_AA64ZFR0_EL1.SM4: bits 43-40
21
ID_AA64ZFR0_EL1.I8MM: bits 47-44
22
ID_AA64ZFR0_EL1.F32MM: bits 55-52
23
ID_AA64ZFR0_EL1.F64MM: bits 59-56
24
25
ID_AA64SMFR0_EL1.F32F32: bit 32
26
ID_AA64SMFR0_EL1.B16F32: bit 34
27
ID_AA64SMFR0_EL1.F16F32: bit 35
28
ID_AA64SMFR0_EL1.I8I32: bits 39-36
29
ID_AA64SMFR0_EL1.F64F64: bit 48
30
ID_AA64SMFR0_EL1.I16I64: bits 55-52
31
ID_AA64SMFR0_EL1.FA64: bit 63
32
33
ID_AA64MMFR0_EL1.ECV: bits 63-60
34
35
ID_AA64MMFR1_EL1.AFP: bits 47-44
36
37
ID_AA64MMFR2_EL1.AT: bits 35-32
38
39
ID_AA64ISAR0_EL1.RNDR: bits 63-60
40
41
ID_AA64ISAR1_EL1.FRINTTS: bits 35-32
42
ID_AA64ISAR1_EL1.BF16: bits 47-44
43
ID_AA64ISAR1_EL1.DGH: bits 51-48
44
ID_AA64ISAR1_EL1.I8MM: bits 55-52
45
46
ID_AA64ISAR2_EL1.WFxT: bits 3-0
47
ID_AA64ISAR2_EL1.RPRES: bits 7-4
48
ID_AA64ISAR2_EL1.GPA3: bits 11-8
49
ID_AA64ISAR2_EL1.APA3: bits 15-12
50
51
The code is also refactored to use symbolic names for ID register fields
52
for better readability and maintainability.
53
54
The test case in tests/tcg/aarch64/sysregs.c is also updated to match
55
the intended behavior.
56
57
Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com>
58
Message-id: DS7PR12MB6309FB585E10772928F14271ACE79@DS7PR12MB6309.namprd12.prod.outlook.com
59
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
60
[PMM: use Sn_n_Cn_Cn_n syntax to work with older assemblers
61
that don't recognize id_aa64isar2_el1 and id_aa64mmfr2_el1]
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
62
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
63
---
9
tests/tcg/aarch64/mte-5.c | 44 +++++++++++++++++++++++++++++++
64
target/arm/helper.c | 96 +++++++++++++++++++++++++------
10
tests/tcg/aarch64/Makefile.target | 2 +-
65
tests/tcg/aarch64/sysregs.c | 24 ++++++--
11
2 files changed, 45 insertions(+), 1 deletion(-)
66
tests/tcg/aarch64/Makefile.target | 7 ++-
12
create mode 100644 tests/tcg/aarch64/mte-5.c
67
3 files changed, 103 insertions(+), 24 deletions(-)
13
68
14
diff --git a/tests/tcg/aarch64/mte-5.c b/tests/tcg/aarch64/mte-5.c
69
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
new file mode 100644
70
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX
71
--- a/target/arm/helper.c
17
--- /dev/null
72
+++ b/target/arm/helper.c
18
+++ b/tests/tcg/aarch64/mte-5.c
73
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
74
#ifdef CONFIG_USER_ONLY
75
static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
76
{ .name = "ID_AA64PFR0_EL1",
77
- .exported_bits = 0x000f000f00ff0000,
78
- .fixed_bits = 0x0000000000000011 },
79
+ .exported_bits = R_ID_AA64PFR0_FP_MASK |
80
+ R_ID_AA64PFR0_ADVSIMD_MASK |
81
+ R_ID_AA64PFR0_SVE_MASK |
82
+ R_ID_AA64PFR0_DIT_MASK,
83
+ .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
84
+ (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
85
{ .name = "ID_AA64PFR1_EL1",
86
- .exported_bits = 0x00000000000000f0 },
87
+ .exported_bits = R_ID_AA64PFR1_BT_MASK |
88
+ R_ID_AA64PFR1_SSBS_MASK |
89
+ R_ID_AA64PFR1_MTE_MASK |
90
+ R_ID_AA64PFR1_SME_MASK },
91
{ .name = "ID_AA64PFR*_EL1_RESERVED",
92
- .is_glob = true },
93
- { .name = "ID_AA64ZFR0_EL1" },
94
+ .is_glob = true },
95
+ { .name = "ID_AA64ZFR0_EL1",
96
+ .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
97
+ R_ID_AA64ZFR0_AES_MASK |
98
+ R_ID_AA64ZFR0_BITPERM_MASK |
99
+ R_ID_AA64ZFR0_BFLOAT16_MASK |
100
+ R_ID_AA64ZFR0_SHA3_MASK |
101
+ R_ID_AA64ZFR0_SM4_MASK |
102
+ R_ID_AA64ZFR0_I8MM_MASK |
103
+ R_ID_AA64ZFR0_F32MM_MASK |
104
+ R_ID_AA64ZFR0_F64MM_MASK },
105
+ { .name = "ID_AA64SMFR0_EL1",
106
+ .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
107
+ R_ID_AA64SMFR0_B16F32_MASK |
108
+ R_ID_AA64SMFR0_F16F32_MASK |
109
+ R_ID_AA64SMFR0_I8I32_MASK |
110
+ R_ID_AA64SMFR0_F64F64_MASK |
111
+ R_ID_AA64SMFR0_I16I64_MASK |
112
+ R_ID_AA64SMFR0_FA64_MASK },
113
{ .name = "ID_AA64MMFR0_EL1",
114
- .fixed_bits = 0x00000000ff000000 },
115
- { .name = "ID_AA64MMFR1_EL1" },
116
+ .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
117
+ .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
118
+ (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
119
+ { .name = "ID_AA64MMFR1_EL1",
120
+ .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
121
+ { .name = "ID_AA64MMFR2_EL1",
122
+ .exported_bits = R_ID_AA64MMFR2_AT_MASK },
123
{ .name = "ID_AA64MMFR*_EL1_RESERVED",
124
- .is_glob = true },
125
+ .is_glob = true },
126
{ .name = "ID_AA64DFR0_EL1",
127
- .fixed_bits = 0x0000000000000006 },
128
- { .name = "ID_AA64DFR1_EL1" },
129
+ .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
130
+ { .name = "ID_AA64DFR1_EL1" },
131
{ .name = "ID_AA64DFR*_EL1_RESERVED",
132
- .is_glob = true },
133
+ .is_glob = true },
134
{ .name = "ID_AA64AFR*",
135
- .is_glob = true },
136
+ .is_glob = true },
137
{ .name = "ID_AA64ISAR0_EL1",
138
- .exported_bits = 0x00fffffff0fffff0 },
139
+ .exported_bits = R_ID_AA64ISAR0_AES_MASK |
140
+ R_ID_AA64ISAR0_SHA1_MASK |
141
+ R_ID_AA64ISAR0_SHA2_MASK |
142
+ R_ID_AA64ISAR0_CRC32_MASK |
143
+ R_ID_AA64ISAR0_ATOMIC_MASK |
144
+ R_ID_AA64ISAR0_RDM_MASK |
145
+ R_ID_AA64ISAR0_SHA3_MASK |
146
+ R_ID_AA64ISAR0_SM3_MASK |
147
+ R_ID_AA64ISAR0_SM4_MASK |
148
+ R_ID_AA64ISAR0_DP_MASK |
149
+ R_ID_AA64ISAR0_FHM_MASK |
150
+ R_ID_AA64ISAR0_TS_MASK |
151
+ R_ID_AA64ISAR0_RNDR_MASK },
152
{ .name = "ID_AA64ISAR1_EL1",
153
- .exported_bits = 0x000000f0ffffffff },
154
+ .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
155
+ R_ID_AA64ISAR1_APA_MASK |
156
+ R_ID_AA64ISAR1_API_MASK |
157
+ R_ID_AA64ISAR1_JSCVT_MASK |
158
+ R_ID_AA64ISAR1_FCMA_MASK |
159
+ R_ID_AA64ISAR1_LRCPC_MASK |
160
+ R_ID_AA64ISAR1_GPA_MASK |
161
+ R_ID_AA64ISAR1_GPI_MASK |
162
+ R_ID_AA64ISAR1_FRINTTS_MASK |
163
+ R_ID_AA64ISAR1_SB_MASK |
164
+ R_ID_AA64ISAR1_BF16_MASK |
165
+ R_ID_AA64ISAR1_DGH_MASK |
166
+ R_ID_AA64ISAR1_I8MM_MASK },
167
+ { .name = "ID_AA64ISAR2_EL1",
168
+ .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
169
+ R_ID_AA64ISAR2_RPRES_MASK |
170
+ R_ID_AA64ISAR2_GPA3_MASK |
171
+ R_ID_AA64ISAR2_APA3_MASK },
172
{ .name = "ID_AA64ISAR*_EL1_RESERVED",
173
- .is_glob = true },
174
+ .is_glob = true },
175
};
176
modify_arm_cp_regs(v8_idregs, v8_user_idregs);
177
#endif
178
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
179
#ifdef CONFIG_USER_ONLY
180
static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
181
{ .name = "MIDR_EL1",
182
- .exported_bits = 0x00000000ffffffff },
183
- { .name = "REVIDR_EL1" },
184
+ .exported_bits = R_MIDR_EL1_REVISION_MASK |
185
+ R_MIDR_EL1_PARTNUM_MASK |
186
+ R_MIDR_EL1_ARCHITECTURE_MASK |
187
+ R_MIDR_EL1_VARIANT_MASK |
188
+ R_MIDR_EL1_IMPLEMENTER_MASK },
189
+ { .name = "REVIDR_EL1" },
190
};
191
modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
192
#endif
193
diff --git a/tests/tcg/aarch64/sysregs.c b/tests/tcg/aarch64/sysregs.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tests/tcg/aarch64/sysregs.c
196
+++ b/tests/tcg/aarch64/sysregs.c
19
@@ -XXX,XX +XXX,XX @@
197
@@ -XXX,XX +XXX,XX @@
198
#define HWCAP_CPUID (1 << 11)
199
#endif
200
20
+/*
201
+/*
21
+ * Memory tagging, faulting unaligned access.
202
+ * Older assemblers don't recognize newer system register names,
22
+ *
203
+ * but we can still access them by the Sn_n_Cn_Cn_n syntax.
23
+ * Copyright (c) 2021 Linaro Ltd
24
+ * SPDX-License-Identifier: GPL-2.0-or-later
25
+ */
204
+ */
205
+#define SYS_ID_AA64ISAR2_EL1 S3_0_C0_C6_2
206
+#define SYS_ID_AA64MMFR2_EL1 S3_0_C0_C7_2
26
+
207
+
27
+#include "mte.h"
208
int failed_bit_count;
28
+
209
29
+void pass(int sig, siginfo_t *info, void *uc)
210
/* Read and print system register `id' value */
30
+{
211
@@ -XXX,XX +XXX,XX @@ int main(void)
31
+ assert(info->si_code == SEGV_MTESERR);
212
* minimum valid fields - for the purposes of this check allowed
32
+ exit(0);
213
* to have non-zero values.
33
+}
214
*/
34
+
215
- get_cpu_reg_check_mask(id_aa64isar0_el1, _m(00ff,ffff,f0ff,fff0));
35
+int main(int ac, char **av)
216
- get_cpu_reg_check_mask(id_aa64isar1_el1, _m(0000,00f0,ffff,ffff));
36
+{
217
+ get_cpu_reg_check_mask(id_aa64isar0_el1, _m(f0ff,ffff,f0ff,fff0));
37
+ struct sigaction sa;
218
+ get_cpu_reg_check_mask(id_aa64isar1_el1, _m(00ff,f0ff,ffff,ffff));
38
+ void *p0, *p1, *p2;
219
+ get_cpu_reg_check_mask(SYS_ID_AA64ISAR2_EL1, _m(0000,0000,0000,ffff));
39
+ long excl = 1;
220
/* TGran4 & TGran64 as pegged to -1 */
40
+
221
- get_cpu_reg_check_mask(id_aa64mmfr0_el1, _m(0000,0000,ff00,0000));
41
+ enable_mte(PR_MTE_TCF_SYNC);
222
- get_cpu_reg_check_zero(id_aa64mmfr1_el1);
42
+ p0 = alloc_mte_mem(sizeof(*p0));
223
+ get_cpu_reg_check_mask(id_aa64mmfr0_el1, _m(f000,0000,ff00,0000));
43
+
224
+ get_cpu_reg_check_mask(id_aa64mmfr1_el1, _m(0000,f000,0000,0000));
44
+ /* Create two differently tagged pointers. */
225
+ get_cpu_reg_check_mask(SYS_ID_AA64MMFR2_EL1, _m(0000,000f,0000,0000));
45
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
226
/* EL1/EL0 reported as AA64 only */
46
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
227
get_cpu_reg_check_mask(id_aa64pfr0_el1, _m(000f,000f,00ff,0011));
47
+ assert(excl != 1);
228
- get_cpu_reg_check_mask(id_aa64pfr1_el1, _m(0000,0000,0000,00f0));
48
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
229
+ get_cpu_reg_check_mask(id_aa64pfr1_el1, _m(0000,0000,0f00,0fff));
49
+ assert(p1 != p2);
230
/* all hidden, DebugVer fixed to 0x6 (ARMv8 debug architecture) */
50
+
231
get_cpu_reg_check_mask(id_aa64dfr0_el1, _m(0000,0000,0000,0006));
51
+ memset(&sa, 0, sizeof(sa));
232
get_cpu_reg_check_zero(id_aa64dfr1_el1);
52
+ sa.sa_sigaction = pass;
233
- get_cpu_reg_check_zero(id_aa64zfr0_el1);
53
+ sa.sa_flags = SA_SIGINFO;
234
+ get_cpu_reg_check_mask(id_aa64zfr0_el1, _m(0ff0,ff0f,00ff,00ff));
54
+ sigaction(SIGSEGV, &sa, NULL);
235
+#ifdef HAS_ARMV9_SME
55
+
236
+ get_cpu_reg_check_mask(id_aa64smfr0_el1, _m(80f1,00fd,0000,0000));
56
+ /* Store store two different tags in sequential granules. */
237
+#endif
57
+ asm("stg %0, [%0]" : : "r"(p1));
238
58
+ asm("stg %0, [%0]" : : "r"(p2 + 16));
239
get_cpu_reg_check_zero(id_aa64afr0_el1);
59
+
240
get_cpu_reg_check_zero(id_aa64afr1_el1);
60
+ /* Perform an unaligned load crossing the granules. */
61
+ asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12));
62
+ abort();
63
+}
64
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
241
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
65
index XXXXXXX..XXXXXXX 100644
242
index XXXXXXX..XXXXXXX 100644
66
--- a/tests/tcg/aarch64/Makefile.target
243
--- a/tests/tcg/aarch64/Makefile.target
67
+++ b/tests/tcg/aarch64/Makefile.target
244
+++ b/tests/tcg/aarch64/Makefile.target
68
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += bti-2
245
@@ -XXX,XX +XXX,XX @@ config-cc.mak: Makefile
69
246
     $(call cc-option,-march=armv8.1-a+sve2, CROSS_CC_HAS_SVE2); \
70
# MTE Tests
247
     $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \
71
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
248
     $(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \
72
-AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-6
249
-     $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE)) 3> config-cc.mak
73
+AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6
250
+     $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE); \
74
mte-%: CFLAGS += -march=armv8.5-a+memtag
251
+     $(call cc-option,-march=armv9-a+sme, CROSS_CC_HAS_ARMV9_SME)) 3> config-cc.mak
75
endif
252
-include config-cc.mak
76
253
254
# Pauth Tests
255
@@ -XXX,XX +XXX,XX @@ endif
256
ifneq ($(CROSS_CC_HAS_SVE),)
257
# System Registers Tests
258
AARCH64_TESTS += sysregs
259
+ifneq ($(CROSS_CC_HAS_ARMV9_SME),)
260
+sysregs: CFLAGS+=-march=armv9-a+sme -DHAS_ARMV9_SME
261
+else
262
sysregs: CFLAGS+=-march=armv8.1-a+sve
263
+endif
264
265
# SVE ioctl test
266
AARCH64_TESTS += sve-ioctls
77
--
267
--
78
2.20.1
268
2.25.1
79
80
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
For consistency with the mte_check1 + mte_checkN merge
3
This function is not used anywhere outside this file,
4
to mte_check, rename the probe function as well.
4
so we can make the function "static void".
5
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210416183106.1516563-8-richard.henderson@linaro.org
8
Reviewed-by: Eric Auger <eric.auger@redhat.com>
9
Message-id: 20221216214924.4711-2-philmd@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/internals.h | 2 +-
12
include/hw/arm/smmu-common.h | 3 ---
12
target/arm/mte_helper.c | 6 +++---
13
hw/arm/smmu-common.c | 2 +-
13
target/arm/sve_helper.c | 6 +++---
14
2 files changed, 1 insertion(+), 4 deletions(-)
14
3 files changed, 7 insertions(+), 7 deletions(-)
15
15
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/internals.h
18
--- a/include/hw/arm/smmu-common.h
19
+++ b/target/arm/internals.h
19
+++ b/include/hw/arm/smmu-common.h
20
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, TCMA, 6, 2)
20
@@ -XXX,XX +XXX,XX @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
21
FIELD(MTEDESC, WRITE, 8, 1)
21
/* Unmap the range of all the notifiers registered to any IOMMU mr */
22
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
22
void smmu_inv_notifiers_all(SMMUState *s);
23
23
24
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
24
-/* Unmap the range of all the notifiers registered to @mr */
25
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
25
-void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr);
26
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
26
-
27
27
#endif /* HW_ARM_SMMU_COMMON_H */
28
static inline int allocation_tag_from_addr(uint64_t ptr)
28
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
29
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
30
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mte_helper.c
30
--- a/hw/arm/smmu-common.c
32
+++ b/target/arm/mte_helper.c
31
+++ b/hw/arm/smmu-common.c
33
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
32
@@ -XXX,XX +XXX,XX @@ static void smmu_unmap_notifier_range(IOMMUNotifier *n)
34
* exception for inaccessible pages, and resolves the virtual address
35
* into the softmmu tlb.
36
*
37
- * When RA == 0, this is for mte_probe1. The page is expected to be
38
+ * When RA == 0, this is for mte_probe. The page is expected to be
39
* valid. Indicate to probe_access_flags no-fault, then assert that
40
* we received a valid page.
41
*/
42
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
43
}
33
}
44
34
45
/*
35
/* Unmap all notifiers attached to @mr */
46
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
36
-inline void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
47
+ * No-fault version of mte_check, to be used by SVE for MemSingleNF.
37
+static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
48
* Returns false if the access is Checked and the check failed. This
49
* is only intended to probe the tag -- the validity of the page must
50
* be checked beforehand.
51
*/
52
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
53
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
54
{
38
{
55
uint64_t fault;
39
IOMMUNotifier *n;
56
int ret = mte_probe_int(env, desc, ptr, 0, &fault);
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/sve_helper.c
60
+++ b/target/arm/sve_helper.c
61
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
62
/* Watchpoint hit, see below. */
63
goto do_fault;
64
}
65
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
66
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
67
goto do_fault;
68
}
69
/*
70
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
71
& BP_MEM_READ)) {
72
goto do_fault;
73
}
74
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
75
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
76
goto do_fault;
77
}
78
host_fn(vd, reg_off, host + mem_off);
79
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
80
}
81
if (mtedesc &&
82
arm_tlb_mte_tagged(&info.attrs) &&
83
- !mte_probe1(env, mtedesc, addr)) {
84
+ !mte_probe(env, mtedesc, addr)) {
85
goto fault;
86
}
87
40
88
--
41
--
89
2.20.1
42
2.25.1
90
43
91
44
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Adjust the interface to match what has been done to the
3
When using Clang ("Apple clang version 14.0.0 (clang-1400.0.29.202)")
4
TCGv_i32 load/store functions.
4
and building with -Wall we get:
5
5
6
This is less obvious, because at present the only user of
6
hw/arm/smmu-common.c:173:33: warning: static function 'smmu_hash_remove_by_asid_iova' is used in an inline function with external linkage [-Wstatic-in-inline]
7
these functions, trans_VLDST_multiple, also wants to manipulate
7
hw/arm/smmu-common.h:170:1: note: use 'static' to give inline function 'smmu_iotlb_inv_iova' internal linkage
8
the endianness to speed up loading multiple bytes. Thus we
8
void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
9
retain an "internal" interface which is identical to the
9
^
10
current gen_aa32_{ld,st}_i64 interface.
10
static
11
11
12
The "new" interface will gain users as we remove the legacy
12
None of our code base require / use inlined functions with external
13
interfaces, gen_aa32_ld64 and gen_aa32_st64.
13
linkage. Some places use internal inlining in the hot path. These
14
two functions are certainly not in any hot path and don't justify
15
any inlining, so these are likely oversights rather than intentional.
14
16
17
Reported-by: Stefan Weil <sw@weilnetz.de>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
17
Message-id: 20210419202257.161730-15-richard.henderson@linaro.org
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Reviewed-by: Eric Auger <eric.auger@redhat.com>
22
Message-id: 20221216214924.4711-3-philmd@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
24
---
20
target/arm/translate.c | 78 +++++++++++++++++++--------------
25
hw/arm/smmu-common.c | 13 ++++++-------
21
target/arm/translate-neon.c.inc | 6 ++-
26
1 file changed, 6 insertions(+), 7 deletions(-)
22
2 files changed, 49 insertions(+), 35 deletions(-)
23
27
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
28
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
25
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate.c
30
--- a/hw/arm/smmu-common.c
27
+++ b/target/arm/translate.c
31
+++ b/hw/arm/smmu-common.c
28
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
32
@@ -XXX,XX +XXX,XX @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
29
tcg_temp_free(addr);
33
g_hash_table_insert(bs->iotlb, key, new);
30
}
34
}
31
35
32
+static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
36
-inline void smmu_iotlb_inv_all(SMMUState *s)
33
+ TCGv_i32 a32, int index, MemOp opc)
37
+void smmu_iotlb_inv_all(SMMUState *s)
34
+{
35
+ TCGv addr = gen_aa32_addr(s, a32, opc);
36
+
37
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
38
+
39
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
40
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
41
+ tcg_gen_rotri_i64(val, val, 32);
42
+ }
43
+ tcg_temp_free(addr);
44
+}
45
+
46
+static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
47
+ TCGv_i32 a32, int index, MemOp opc)
48
+{
49
+ TCGv addr = gen_aa32_addr(s, a32, opc);
50
+
51
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
52
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
53
+ TCGv_i64 tmp = tcg_temp_new_i64();
54
+ tcg_gen_rotri_i64(tmp, val, 32);
55
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
56
+ tcg_temp_free_i64(tmp);
57
+ } else {
58
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
59
+ }
60
+ tcg_temp_free(addr);
61
+}
62
+
63
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
64
int index, MemOp opc)
65
{
38
{
66
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
39
trace_smmu_iotlb_inv_all();
67
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
40
g_hash_table_remove_all(s->iotlb);
41
@@ -XXX,XX +XXX,XX @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
42
((entry->iova & ~info->mask) == info->iova);
68
}
43
}
69
44
70
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
45
-inline void
71
+ int index, MemOp opc)
46
-smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
72
+{
47
- uint8_t tg, uint64_t num_pages, uint8_t ttl)
73
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
48
+void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
74
+}
49
+ uint8_t tg, uint64_t num_pages, uint8_t ttl)
75
+
76
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
77
+ int index, MemOp opc)
78
+{
79
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
80
+}
81
+
82
#define DO_GEN_LD(SUFF, OPC) \
83
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
84
TCGv_i32 a32, int index) \
85
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
86
gen_aa32_st_i32(s, val, a32, index, OPC); \
87
}
88
89
-static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
90
- int index, MemOp opc)
91
-{
92
- TCGv addr = gen_aa32_addr(s, a32, opc);
93
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
94
-
95
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
96
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
97
- tcg_gen_rotri_i64(val, val, 32);
98
- }
99
-
100
- tcg_temp_free(addr);
101
-}
102
-
103
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
104
TCGv_i32 a32, int index)
105
{
50
{
106
- gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
51
/* if tg is not set we use 4KB range invalidation */
107
-}
52
uint8_t granule = tg ? tg * 2 + 10 : 12;
108
-
53
@@ -XXX,XX +XXX,XX @@ smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
109
-static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
54
&info);
110
- int index, MemOp opc)
111
-{
112
- TCGv addr = gen_aa32_addr(s, a32, opc);
113
-
114
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
115
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
116
- TCGv_i64 tmp = tcg_temp_new_i64();
117
- tcg_gen_rotri_i64(tmp, val, 32);
118
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
119
- tcg_temp_free_i64(tmp);
120
- } else {
121
- tcg_gen_qemu_st_i64(val, addr, index, opc);
122
- }
123
- tcg_temp_free(addr);
124
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
125
}
55
}
126
56
127
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
57
-inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
128
TCGv_i32 a32, int index)
58
+void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
129
{
59
{
130
- gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
60
trace_smmu_iotlb_inv_asid(asid);
131
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
61
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
132
}
62
@@ -XXX,XX +XXX,XX @@ error:
133
63
*
134
DO_GEN_LD(8u, MO_UB)
64
* return 0 on success
135
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
65
*/
136
index XXXXXXX..XXXXXXX 100644
66
-inline int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
137
--- a/target/arm/translate-neon.c.inc
67
- SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
138
+++ b/target/arm/translate-neon.c.inc
68
+int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
69
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
140
int tt = a->vd + reg + spacing * xs;
70
{
141
71
if (!cfg->aa64) {
142
if (a->l) {
72
/*
143
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
144
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
145
+ endian | size);
146
neon_store_element64(tt, n, size, tmp64);
147
} else {
148
neon_load_element64(tmp64, tt, n, size);
149
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
150
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
151
+ endian | size);
152
}
153
tcg_gen_add_i32(addr, addr, tmp);
154
}
155
--
73
--
156
2.20.1
74
2.25.1
157
75
158
76
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
So far the GPT timers were unable to raise IRQs to the processor.
4
5
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-24-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
---
8
target/arm/translate-neon.c.inc | 27 ++++++++++++++++++++++-----
9
include/hw/arm/fsl-imx7.h | 5 +++++
9
1 file changed, 22 insertions(+), 5 deletions(-)
10
hw/arm/fsl-imx7.c | 10 ++++++++++
11
2 files changed, 15 insertions(+)
10
12
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
15
--- a/include/hw/arm/fsl-imx7.h
14
+++ b/target/arm/translate-neon.c.inc
16
+++ b/include/hw/arm/fsl-imx7.h
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
17
@@ -XXX,XX +XXX,XX @@ enum FslIMX7IRQs {
16
{
18
FSL_IMX7_USB2_IRQ = 42,
17
/* Neon load/store multiple structures */
19
FSL_IMX7_USB3_IRQ = 40,
18
int nregs, interleave, spacing, reg, n;
20
19
- MemOp endian = s->be_data;
21
+ FSL_IMX7_GPT1_IRQ = 55,
20
+ MemOp mop, align, endian;
22
+ FSL_IMX7_GPT2_IRQ = 54,
21
int mmu_idx = get_mem_index(s);
23
+ FSL_IMX7_GPT3_IRQ = 53,
22
int size = a->size;
24
+ FSL_IMX7_GPT4_IRQ = 52,
23
TCGv_i64 tmp64;
25
+
24
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
26
FSL_IMX7_WDOG1_IRQ = 78,
27
FSL_IMX7_WDOG2_IRQ = 79,
28
FSL_IMX7_WDOG3_IRQ = 10,
29
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/arm/fsl-imx7.c
32
+++ b/hw/arm/fsl-imx7.c
33
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
34
FSL_IMX7_GPT4_ADDR,
35
};
36
37
+ static const int FSL_IMX7_GPTn_IRQ[FSL_IMX7_NUM_GPTS] = {
38
+ FSL_IMX7_GPT1_IRQ,
39
+ FSL_IMX7_GPT2_IRQ,
40
+ FSL_IMX7_GPT3_IRQ,
41
+ FSL_IMX7_GPT4_IRQ,
42
+ };
43
+
44
s->gpt[i].ccm = IMX_CCM(&s->ccm);
45
sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), &error_abort);
46
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, FSL_IMX7_GPTn_ADDR[i]);
47
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0,
48
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
49
+ FSL_IMX7_GPTn_IRQ[i]));
25
}
50
}
26
51
27
/* For our purposes, bytes are always little-endian. */
52
for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) {
28
+ endian = s->be_data;
29
if (size == 0) {
30
endian = MO_LE;
31
}
32
+
33
+ /* Enforce alignment requested by the instruction */
34
+ if (a->align) {
35
+ align = pow2_align(a->align + 2); /* 4 ** a->align */
36
+ } else {
37
+ align = s->align_mem ? MO_ALIGN : 0;
38
+ }
39
+
40
/*
41
* Consecutive little-endian elements from a single register
42
* can be promoted to a larger little-endian operation.
43
*/
44
if (interleave == 1 && endian == MO_LE) {
45
+ /* Retain any natural alignment. */
46
+ if (align == MO_ALIGN) {
47
+ align = pow2_align(size);
48
+ }
49
size = 3;
50
}
51
+
52
tmp64 = tcg_temp_new_i64();
53
addr = tcg_temp_new_i32();
54
tmp = tcg_const_i32(1 << size);
55
load_reg_var(s, addr, a->rn);
56
+
57
+ mop = endian | size | align;
58
for (reg = 0; reg < nregs; reg++) {
59
for (n = 0; n < 8 >> size; n++) {
60
int xs;
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
62
int tt = a->vd + reg + spacing * xs;
63
64
if (a->l) {
65
- gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
66
- endian | size);
67
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
68
neon_store_element64(tt, n, size, tmp64);
69
} else {
70
neon_load_element64(tmp64, tt, n, size);
71
- gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
72
- endian | size);
73
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
74
}
75
tcg_gen_add_i32(addr, addr, tmp);
76
+
77
+ /* Subsequent memory operations inherit alignment */
78
+ mop &= ~MO_AMASK;
79
}
80
}
81
}
82
--
53
--
83
2.20.1
54
2.25.1
84
85
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
This is the only caller. Adjust some commentary to talk
3
CCM derived clocks will have to be added later.
4
about SCTLR_B instead of the vanishing function.
5
4
5
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-13-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
8
---
11
target/arm/translate.c | 37 ++++++++++++++++---------------------
9
hw/misc/imx7_ccm.c | 49 +++++++++++++++++++++++++++++++++++++---------
12
1 file changed, 16 insertions(+), 21 deletions(-)
10
1 file changed, 40 insertions(+), 9 deletions(-)
13
11
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
14
--- a/hw/misc/imx7_ccm.c
17
+++ b/target/arm/translate.c
15
+++ b/hw/misc/imx7_ccm.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
16
@@ -XXX,XX +XXX,XX @@
19
gen_aa32_st_i32(s, val, a32, index, OPC); \
17
#include "hw/misc/imx7_ccm.h"
20
}
18
#include "migration/vmstate.h"
21
19
22
-static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
20
+#include "trace.h"
23
-{
21
+
24
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
22
+#define CKIH_FREQ 24000000 /* 24MHz crystal input */
25
- if (!IS_USER_ONLY && s->sctlr_b) {
23
+
26
- tcg_gen_rotri_i64(val, val, 32);
24
static void imx7_analog_reset(DeviceState *dev)
27
- }
28
-}
29
-
30
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
31
int index, MemOp opc)
32
{
25
{
33
TCGv addr = gen_aa32_addr(s, a32, opc);
26
IMX7AnalogState *s = IMX7_ANALOG(dev);
34
tcg_gen_qemu_ld_i64(val, addr, index, opc);
27
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_imx7_ccm = {
35
- gen_aa32_frob64(s, val);
28
static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
29
{
30
/*
31
- * This function is "consumed" by GPT emulation code, however on
32
- * i.MX7 each GPT block can have their own clock root. This means
33
- * that this functions needs somehow to know requester's identity
34
- * and the way to pass it: be it via additional IMXClk constants
35
- * or by adding another argument to this method needs to be
36
- * figured out
37
+ * This function is "consumed" by GPT emulation code. Some clocks
38
+ * have fixed frequencies and we can provide requested frequency
39
+ * easily. However for CCM provided clocks (like IPG) each GPT
40
+ * timer can have its own clock root.
41
+ * This means we need additionnal information when calling this
42
+ * function to know the requester's identity.
43
*/
44
- qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Not implemented\n",
45
- TYPE_IMX7_CCM, __func__);
46
- return 0;
47
+ uint32_t freq = 0;
36
+
48
+
37
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
49
+ switch (clock) {
38
+ if (!IS_USER_ONLY && s->sctlr_b) {
50
+ case CLK_NONE:
39
+ tcg_gen_rotri_i64(val, val, 32);
51
+ break;
52
+ case CLK_32k:
53
+ freq = CKIL_FREQ;
54
+ break;
55
+ case CLK_HIGH:
56
+ freq = CKIH_FREQ;
57
+ break;
58
+ case CLK_IPG:
59
+ case CLK_IPG_HIGH:
60
+ /*
61
+ * For now we don't have a way to figure out the device this
62
+ * function is called for. Until then the IPG derived clocks
63
+ * are left unimplemented.
64
+ */
65
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Clock %d Not implemented\n",
66
+ TYPE_IMX7_CCM, __func__, clock);
67
+ break;
68
+ default:
69
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n",
70
+ TYPE_IMX7_CCM, __func__, clock);
71
+ break;
40
+ }
72
+ }
41
+
73
+
42
tcg_temp_free(addr);
74
+ trace_ccm_clock_freq(clock, freq);
75
+
76
+ return freq;
43
}
77
}
44
78
45
@@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
79
static void imx7_ccm_class_init(ObjectClass *klass, void *data)
46
TCGv_i32 tmp2 = tcg_temp_new_i32();
47
TCGv_i64 t64 = tcg_temp_new_i64();
48
49
- /* For AArch32, architecturally the 32-bit word at the lowest
50
+ /*
51
+ * For AArch32, architecturally the 32-bit word at the lowest
52
* address is always Rt and the one at addr+4 is Rt2, even if
53
* the CPU is big-endian. That means we don't want to do a
54
- * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
55
- * for an architecturally 64-bit access, but instead do a
56
- * 64-bit access using MO_BE if appropriate and then split
57
- * the two halves.
58
- * This only makes a difference for BE32 user-mode, where
59
- * frob64() must not flip the two halves of the 64-bit data
60
- * but this code must treat BE32 user-mode like BE32 system.
61
+ * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
62
+ * architecturally 64-bit access, but instead do a 64-bit access
63
+ * using MO_BE if appropriate and then split the two halves.
64
*/
65
TCGv taddr = gen_aa32_addr(s, addr, opc);
66
67
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
68
TCGv_i64 n64 = tcg_temp_new_i64();
69
70
t2 = load_reg(s, rt2);
71
- /* For AArch32, architecturally the 32-bit word at the lowest
72
+
73
+ /*
74
+ * For AArch32, architecturally the 32-bit word at the lowest
75
* address is always Rt and the one at addr+4 is Rt2, even if
76
* the CPU is big-endian. Since we're going to treat this as a
77
* single 64-bit BE store, we need to put the two halves in the
78
* opposite order for BE to LE, so that they end up in the right
79
- * places.
80
- * We don't want gen_aa32_frob64() because that does the wrong
81
- * thing for BE32 usermode.
82
+ * places. We don't want gen_aa32_st_i64, because that checks
83
+ * SCTLR_B as if for an architectural 64-bit access.
84
*/
85
if (s->be_data == MO_BE) {
86
tcg_gen_concat_i32_i64(n64, t2, t1);
87
--
80
--
88
2.20.1
81
2.25.1
89
90
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
Split out a helper function from mte_checkN to perform
3
The i.MX6UL doesn't support CLK_HIGH ou CLK_HIGH_DIV clock source.
4
all of the checking and address manpulation. So far,
5
just use this in mte_checkN itself.
6
4
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210416183106.1516563-3-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
8
---
13
target/arm/mte_helper.c | 52 +++++++++++++++++++++++++++++++----------
9
include/hw/timer/imx_gpt.h | 1 +
14
1 file changed, 40 insertions(+), 12 deletions(-)
10
hw/arm/fsl-imx6ul.c | 2 +-
11
hw/misc/imx6ul_ccm.c | 6 ------
12
hw/timer/imx_gpt.c | 25 +++++++++++++++++++++++++
13
4 files changed, 27 insertions(+), 7 deletions(-)
15
14
16
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
15
diff --git a/include/hw/timer/imx_gpt.h b/include/hw/timer/imx_gpt.h
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/mte_helper.c
17
--- a/include/hw/timer/imx_gpt.h
19
+++ b/target/arm/mte_helper.c
18
+++ b/include/hw/timer/imx_gpt.h
20
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
19
@@ -XXX,XX +XXX,XX @@
21
return n;
20
#define TYPE_IMX25_GPT "imx25.gpt"
21
#define TYPE_IMX31_GPT "imx31.gpt"
22
#define TYPE_IMX6_GPT "imx6.gpt"
23
+#define TYPE_IMX6UL_GPT "imx6ul.gpt"
24
#define TYPE_IMX7_GPT "imx7.gpt"
25
26
#define TYPE_IMX_GPT TYPE_IMX25_GPT
27
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/hw/arm/fsl-imx6ul.c
30
+++ b/hw/arm/fsl-imx6ul.c
31
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
32
*/
33
for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) {
34
snprintf(name, NAME_SIZE, "gpt%d", i);
35
- object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX7_GPT);
36
+ object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX6UL_GPT);
37
}
38
39
/*
40
diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/misc/imx6ul_ccm.c
43
+++ b/hw/misc/imx6ul_ccm.c
44
@@ -XXX,XX +XXX,XX @@ static uint32_t imx6ul_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
45
case CLK_32k:
46
freq = CKIL_FREQ;
47
break;
48
- case CLK_HIGH:
49
- freq = CKIH_FREQ;
50
- break;
51
- case CLK_HIGH_DIV:
52
- freq = CKIH_FREQ / 8;
53
- break;
54
default:
55
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n",
56
TYPE_IMX6UL_CCM, __func__, clock);
57
diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/hw/timer/imx_gpt.c
60
+++ b/hw/timer/imx_gpt.c
61
@@ -XXX,XX +XXX,XX @@ static const IMXClk imx6_gpt_clocks[] = {
62
CLK_HIGH, /* 111 reference clock */
63
};
64
65
+static const IMXClk imx6ul_gpt_clocks[] = {
66
+ CLK_NONE, /* 000 No clock source */
67
+ CLK_IPG, /* 001 ipg_clk, 532MHz*/
68
+ CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */
69
+ CLK_EXT, /* 011 External clock */
70
+ CLK_32k, /* 100 ipg_clk_32k */
71
+ CLK_NONE, /* 101 not defined */
72
+ CLK_NONE, /* 110 not defined */
73
+ CLK_NONE, /* 111 not defined */
74
+};
75
+
76
static const IMXClk imx7_gpt_clocks[] = {
77
CLK_NONE, /* 000 No clock source */
78
CLK_IPG, /* 001 ipg_clk, 532MHz*/
79
@@ -XXX,XX +XXX,XX @@ static void imx6_gpt_init(Object *obj)
80
s->clocks = imx6_gpt_clocks;
22
}
81
}
23
82
24
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
83
+static void imx6ul_gpt_init(Object *obj)
25
- uint64_t ptr, uintptr_t ra)
84
+{
26
+/**
85
+ IMXGPTState *s = IMX_GPT(obj);
27
+ * mte_probe_int() - helper for mte_probe and mte_check
86
+
28
+ * @env: CPU environment
87
+ s->clocks = imx6ul_gpt_clocks;
29
+ * @desc: MTEDESC descriptor
88
+}
30
+ * @ptr: virtual address of the base of the access
89
+
31
+ * @fault: return virtual address of the first check failure
90
static void imx7_gpt_init(Object *obj)
32
+ *
33
+ * Internal routine for both mte_probe and mte_check.
34
+ * Return zero on failure, filling in *fault.
35
+ * Return negative on trivial success for tbi disabled.
36
+ * Return positive on success with tbi enabled.
37
+ */
38
+static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
39
+ uintptr_t ra, uint32_t total, uint64_t *fault)
40
{
91
{
41
int mmu_idx, ptr_tag, bit55;
92
IMXGPTState *s = IMX_GPT(obj);
42
uint64_t ptr_last, prev_page, next_page;
93
@@ -XXX,XX +XXX,XX @@ static const TypeInfo imx6_gpt_info = {
43
uint64_t tag_first, tag_last;
94
.instance_init = imx6_gpt_init,
44
uint64_t tag_byte_first, tag_byte_last;
95
};
45
- uint32_t total, tag_count, tag_size, n, c;
96
46
+ uint32_t tag_count, tag_size, n, c;
97
+static const TypeInfo imx6ul_gpt_info = {
47
uint8_t *mem1, *mem2;
98
+ .name = TYPE_IMX6UL_GPT,
48
MMUAccessType type;
99
+ .parent = TYPE_IMX25_GPT,
49
100
+ .instance_init = imx6ul_gpt_init,
50
bit55 = extract64(ptr, 55, 1);
101
+};
51
+ *fault = ptr;
52
53
/* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
54
if (unlikely(!tbi_check(desc, bit55))) {
55
- return ptr;
56
+ return -1;
57
}
58
59
ptr_tag = allocation_tag_from_addr(ptr);
60
61
if (tcma_check(desc, bit55, ptr_tag)) {
62
- goto done;
63
+ return 1;
64
}
65
66
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
67
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
68
- total = FIELD_EX32(desc, MTEDESC, TSIZE);
69
70
/* Find the addr of the end of the access */
71
ptr_last = ptr + total - 1;
72
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
73
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
74
MMU_DATA_LOAD, tag_size, ra);
75
if (!mem1) {
76
- goto done;
77
+ return 1;
78
}
79
/* Perform all of the comparisons. */
80
n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
}
83
if (n == c) {
84
if (!mem2) {
85
- goto done;
86
+ return 1;
87
}
88
n += checkN(mem2, 0, ptr_tag, tag_count - c);
89
}
90
}
91
92
+ if (likely(n == tag_count)) {
93
+ return 1;
94
+ }
95
+
102
+
96
/*
103
static const TypeInfo imx7_gpt_info = {
97
* If we failed, we know which granule. For the first granule, the
104
.name = TYPE_IMX7_GPT,
98
* failure address is @ptr, the first byte accessed. Otherwise the
105
.parent = TYPE_IMX25_GPT,
99
* failure address is the first byte of the nth granule.
106
@@ -XXX,XX +XXX,XX @@ static void imx_gpt_register_types(void)
100
*/
107
type_register_static(&imx25_gpt_info);
101
- if (unlikely(n < tag_count)) {
108
type_register_static(&imx31_gpt_info);
102
- uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
109
type_register_static(&imx6_gpt_info);
103
- mte_check_fail(env, desc, fault, ra);
110
+ type_register_static(&imx6ul_gpt_info);
104
+ if (n > 0) {
111
type_register_static(&imx7_gpt_info);
105
+ *fault = tag_first + n * TAG_GRANULE;
106
}
107
+ return 0;
108
+}
109
110
- done:
111
+uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
114
+ uint64_t fault;
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
117
+
118
+ if (unlikely(ret == 0)) {
119
+ mte_check_fail(env, desc, fault, ra);
120
+ } else if (ret < 0) {
121
+ return ptr;
122
+ }
123
return useronly_clean_ptr(ptr);
124
}
112
}
125
113
126
--
114
--
127
2.20.1
115
2.25.1
128
129
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
We were incorrectly assuming that only the first byte of an MTE access
3
IRQs were not associated to the various GPIO devices inside i.MX7D.
4
is checked against the tags. But per the ARM, unaligned accesses are
4
This patch brings the i.MX7D on par with i.MX6.
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
7
5
8
Therefore, the first failure is always either the first byte of the
6
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
9
access, or the first byte of the granule.
7
Message-id: 20221226101418.415170-1-jcd@tribudubois.net
10
11
In addition, some of the arithmetic is off for last-first -> count.
12
This does not become directly visible until a later patch that passes
13
single bytes into this function, so ptr == ptr_last.
14
15
Buglink: https://bugs.launchpad.net/bugs/1921948
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210416183106.1516563-2-richard.henderson@linaro.org
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
[PMM: tweaked a comment]
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
10
---
22
target/arm/mte_helper.c | 40 ++++++++++++++++++----------------------
11
include/hw/arm/fsl-imx7.h | 15 +++++++++++++++
23
1 file changed, 18 insertions(+), 22 deletions(-)
12
hw/arm/fsl-imx7.c | 31 ++++++++++++++++++++++++++++++-
13
2 files changed, 45 insertions(+), 1 deletion(-)
24
14
25
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
15
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
26
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mte_helper.c
17
--- a/include/hw/arm/fsl-imx7.h
28
+++ b/target/arm/mte_helper.c
18
+++ b/include/hw/arm/fsl-imx7.h
29
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
19
@@ -XXX,XX +XXX,XX @@ enum FslIMX7IRQs {
30
uint64_t ptr, uintptr_t ra)
20
FSL_IMX7_GPT3_IRQ = 53,
31
{
21
FSL_IMX7_GPT4_IRQ = 52,
32
int mmu_idx, ptr_tag, bit55;
22
33
- uint64_t ptr_last, ptr_end, prev_page, next_page;
23
+ FSL_IMX7_GPIO1_LOW_IRQ = 64,
34
- uint64_t tag_first, tag_end;
24
+ FSL_IMX7_GPIO1_HIGH_IRQ = 65,
35
- uint64_t tag_byte_first, tag_byte_end;
25
+ FSL_IMX7_GPIO2_LOW_IRQ = 66,
36
- uint32_t esize, total, tag_count, tag_size, n, c;
26
+ FSL_IMX7_GPIO2_HIGH_IRQ = 67,
37
+ uint64_t ptr_last, prev_page, next_page;
27
+ FSL_IMX7_GPIO3_LOW_IRQ = 68,
38
+ uint64_t tag_first, tag_last;
28
+ FSL_IMX7_GPIO3_HIGH_IRQ = 69,
39
+ uint64_t tag_byte_first, tag_byte_last;
29
+ FSL_IMX7_GPIO4_LOW_IRQ = 70,
40
+ uint32_t total, tag_count, tag_size, n, c;
30
+ FSL_IMX7_GPIO4_HIGH_IRQ = 71,
41
uint8_t *mem1, *mem2;
31
+ FSL_IMX7_GPIO5_LOW_IRQ = 72,
42
MMUAccessType type;
32
+ FSL_IMX7_GPIO5_HIGH_IRQ = 73,
43
33
+ FSL_IMX7_GPIO6_LOW_IRQ = 74,
44
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
34
+ FSL_IMX7_GPIO6_HIGH_IRQ = 75,
45
35
+ FSL_IMX7_GPIO7_LOW_IRQ = 76,
46
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
36
+ FSL_IMX7_GPIO7_HIGH_IRQ = 77,
47
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
37
+
48
- esize = FIELD_EX32(desc, MTEDESC, ESIZE);
38
FSL_IMX7_WDOG1_IRQ = 78,
49
total = FIELD_EX32(desc, MTEDESC, TSIZE);
39
FSL_IMX7_WDOG2_IRQ = 79,
50
40
FSL_IMX7_WDOG3_IRQ = 10,
51
- /* Find the addr of the end of the access, and of the last element. */
41
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
52
- ptr_end = ptr + total;
42
index XXXXXXX..XXXXXXX 100644
53
- ptr_last = ptr_end - esize;
43
--- a/hw/arm/fsl-imx7.c
54
+ /* Find the addr of the end of the access */
44
+++ b/hw/arm/fsl-imx7.c
55
+ ptr_last = ptr + total - 1;
45
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
56
46
FSL_IMX7_GPIO7_ADDR,
57
/* Round the bounds to the tag granule, and compute the number of tags. */
47
};
58
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
48
59
- tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
49
+ static const int FSL_IMX7_GPIOn_LOW_IRQ[FSL_IMX7_NUM_GPIOS] = {
60
- tag_count = (tag_end - tag_first) / TAG_GRANULE;
50
+ FSL_IMX7_GPIO1_LOW_IRQ,
61
+ tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
51
+ FSL_IMX7_GPIO2_LOW_IRQ,
62
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
52
+ FSL_IMX7_GPIO3_LOW_IRQ,
63
53
+ FSL_IMX7_GPIO4_LOW_IRQ,
64
/* Round the bounds to twice the tag granule, and compute the bytes. */
54
+ FSL_IMX7_GPIO5_LOW_IRQ,
65
tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
55
+ FSL_IMX7_GPIO6_LOW_IRQ,
66
- tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
56
+ FSL_IMX7_GPIO7_LOW_IRQ,
67
+ tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
57
+ };
68
58
+
69
/* Locate the page boundaries. */
59
+ static const int FSL_IMX7_GPIOn_HIGH_IRQ[FSL_IMX7_NUM_GPIOS] = {
70
prev_page = ptr & TARGET_PAGE_MASK;
60
+ FSL_IMX7_GPIO1_HIGH_IRQ,
71
next_page = prev_page + TARGET_PAGE_SIZE;
61
+ FSL_IMX7_GPIO2_HIGH_IRQ,
72
62
+ FSL_IMX7_GPIO3_HIGH_IRQ,
73
- if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
63
+ FSL_IMX7_GPIO4_HIGH_IRQ,
74
+ if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
64
+ FSL_IMX7_GPIO5_HIGH_IRQ,
75
/* Memory access stays on one page. */
65
+ FSL_IMX7_GPIO6_HIGH_IRQ,
76
- tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
66
+ FSL_IMX7_GPIO7_HIGH_IRQ,
77
+ tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
67
+ };
78
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
68
+
79
MMU_DATA_LOAD, tag_size, ra);
69
sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), &error_abort);
80
if (!mem1) {
70
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, FSL_IMX7_GPIOn_ADDR[i]);
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
71
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0,
82
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
72
+ FSL_IMX7_GPIOn_ADDR[i]);
83
MMU_DATA_LOAD, tag_size, ra);
73
+
84
74
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
85
- tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
75
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
86
+ tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
76
+ FSL_IMX7_GPIOn_LOW_IRQ[i]));
87
mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
77
+
88
- ptr_end - next_page,
78
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
89
+ ptr_last - next_page + 1,
79
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
90
MMU_DATA_LOAD, tag_size, ra);
80
+ FSL_IMX7_GPIOn_HIGH_IRQ[i]));
91
92
/*
93
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
94
}
81
}
95
82
96
/*
83
/*
97
- * If we failed, we know which granule. Compute the element that
98
- * is first in that granule, and signal failure on that element.
99
+ * If we failed, we know which granule. For the first granule, the
100
+ * failure address is @ptr, the first byte accessed. Otherwise the
101
+ * failure address is the first byte of the nth granule.
102
*/
103
if (unlikely(n < tag_count)) {
104
- uint64_t fail_ofs;
105
-
106
- fail_ofs = tag_first + n * TAG_GRANULE - ptr;
107
- fail_ofs = ROUND_UP(fail_ofs, esize);
108
- mte_check_fail(env, desc, ptr + fail_ofs, ra);
109
+ uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
110
+ mte_check_fail(env, desc, fault, ra);
111
}
112
113
done:
114
--
84
--
115
2.20.1
85
2.25.1
116
117
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that these bits have been moved out of tb->flags,
4
where TBFLAG_ANY was filling from the top, move AM32
5
to fill from the top, and A32 and M32 to fill from the
6
bottom. This means fewer changes when adding new bits.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-9-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 42 +++++++++++++++++++++---------------------
14
1 file changed, 21 insertions(+), 21 deletions(-)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
21
*
22
* The bits for 32-bit A-profile and M-profile partially overlap:
23
*
24
- * 18 9 0
25
- * +----------------+--------------+
26
- * | TBFLAG_A32 | |
27
- * +-----+----------+ TBFLAG_AM32 |
28
- * | |TBFLAG_M32| |
29
- * +-----+----------+--------------+
30
- * 14 9 0
31
+ * 31 23 11 10 0
32
+ * +-------------+----------+----------------+
33
+ * | | | TBFLAG_A32 |
34
+ * | TBFLAG_AM32 | +-----+----------+
35
+ * | | |TBFLAG_M32|
36
+ * +-------------+----------------+----------+
37
+ * 31 23 5 4 0
38
*
39
* Unless otherwise noted, these bits are cached in env->hflags.
40
*/
41
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
42
/*
43
* Bit usage when in AArch32 state, both A- and M-profile.
44
*/
45
-FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
46
-FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
47
+FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
48
+FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
49
50
/*
51
* Bit usage when in AArch32 state, for A-profile only.
52
*/
53
-FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
54
-FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
55
+FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
56
+FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
57
/*
58
* We store the bottom two bits of the CPAR as TB flags and handle
59
* checks on the other bits at runtime. This shares the same bits as
60
* VECSTRIDE, which is OK as no XScale CPU has VFP.
61
* Not cached, because VECLEN+VECSTRIDE are not cached.
62
*/
63
-FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
64
-FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
65
-FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
66
-FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
67
+FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
68
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
69
+FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
70
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
71
/*
72
* Indicates whether cp register reads and writes by guest code should access
73
* the secure or nonsecure bank of banked registers; note that this is not
74
* the same thing as the current security state of the processor!
75
*/
76
-FIELD(TBFLAG_A32, NS, 17, 1)
77
+FIELD(TBFLAG_A32, NS, 10, 1)
78
79
/*
80
* Bit usage when in AArch32 state, for M-profile only.
81
*/
82
/* Handler (ie not Thread) mode */
83
-FIELD(TBFLAG_M32, HANDLER, 9, 1)
84
+FIELD(TBFLAG_M32, HANDLER, 0, 1)
85
/* Whether we should generate stack-limit checks */
86
-FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
87
+FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
88
/* Set if FPCCR.LSPACT is set */
89
-FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
90
+FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
91
/* Set if we must create a new FP context */
92
-FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
93
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
94
/* Set if FPCCR.S does not match current security state */
95
-FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
96
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
97
98
/*
99
* Bit usage when in AArch64 state
100
--
101
2.20.1
102
103
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Now that other bits have been moved out of tb->flags,
4
there's no point in filling from the top.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 14 +++++++-------
12
1 file changed, 7 insertions(+), 7 deletions(-)
13
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
19
*
20
* Unless otherwise noted, these bits are cached in env->hflags.
21
*/
22
-FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
23
-FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
24
-FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
25
-FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
26
-FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
27
+FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
28
+FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
30
+FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
31
+FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
32
/* Target EL if we take a floating-point-disabled exception */
33
-FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
34
+FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
35
/* For A-profile only, target EL for debug exceptions. */
36
-FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
37
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
38
39
/*
40
* Bit usage when in AArch32 state, both A- and M-profile.
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Just because operating on a TCGv_i64 temporary does not
4
mean that we're performing a 64-bit operation. Restrict
5
the frobbing to actual 64-bit operations.
6
7
This bug is not currently visible because all current
8
users of these two functions always pass MO_64.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-14-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.c | 4 ++--
16
1 file changed, 2 insertions(+), 2 deletions(-)
17
18
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate.c
21
+++ b/target/arm/translate.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
23
tcg_gen_qemu_ld_i64(val, addr, index, opc);
24
25
/* Not needed for user-mode BE32, where we use MO_BE instead. */
26
- if (!IS_USER_ONLY && s->sctlr_b) {
27
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
28
tcg_gen_rotri_i64(val, val, 32);
29
}
30
31
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
32
TCGv addr = gen_aa32_addr(s, a32, opc);
33
34
/* Not needed for user-mode BE32, where we use MO_BE instead. */
35
- if (!IS_USER_ONLY && s->sctlr_b) {
36
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
37
TCGv_i64 tmp = tcg_temp_new_i64();
38
tcg_gen_rotri_i64(tmp, val, 32);
39
tcg_gen_qemu_st_i64(tmp, addr, index, opc);
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Buglink: https://bugs.launchpad.net/qemu/+bug/1905356
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210419202257.161730-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/translate.c | 16 ++++++++--------
10
1 file changed, 8 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
17
addr = op_addr_rr_pre(s, a);
18
19
tmp = tcg_temp_new_i32();
20
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
21
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
22
store_reg(s, a->rt, tmp);
23
24
tcg_gen_addi_i32(addr, addr, 4);
25
26
tmp = tcg_temp_new_i32();
27
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
28
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
29
store_reg(s, a->rt + 1, tmp);
30
31
/* LDRD w/ base writeback is undefined if the registers overlap. */
32
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
37
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
38
tcg_temp_free_i32(tmp);
39
40
tcg_gen_addi_i32(addr, addr, 4);
41
42
tmp = load_reg(s, a->rt + 1);
43
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
44
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
45
tcg_temp_free_i32(tmp);
46
47
op_addr_rr_post(s, a, addr, -4);
48
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
49
addr = op_addr_ri_pre(s, a);
50
51
tmp = tcg_temp_new_i32();
52
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
53
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
54
store_reg(s, a->rt, tmp);
55
56
tcg_gen_addi_i32(addr, addr, 4);
57
58
tmp = tcg_temp_new_i32();
59
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
60
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
61
store_reg(s, rt2, tmp);
62
63
/* LDRD w/ base writeback is undefined if the registers overlap. */
64
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
65
addr = op_addr_ri_pre(s, a);
66
67
tmp = load_reg(s, a->rt);
68
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
69
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
70
tcg_temp_free_i32(tmp);
71
72
tcg_gen_addi_i32(addr, addr, 4);
73
74
tmp = load_reg(s, rt2);
75
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
76
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
77
tcg_temp_free_i32(tmp);
78
79
op_addr_ri_post(s, a, addr, -4);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-18-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
16
} else {
17
tmp = load_reg(s, i);
18
}
19
- gen_aa32_st32(s, tmp, addr, mem_idx);
20
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
21
tcg_temp_free_i32(tmp);
22
23
/* No need to add after the last transfer. */
24
@@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
25
}
26
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld32u(s, tmp, addr, mem_idx);
29
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
30
if (user) {
31
tmp2 = tcg_const_i32(i);
32
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_RFE(DisasContext *s, arg_RFE *a)
16
17
/* Load PC into tmp and CPSR into tmp2. */
18
t1 = tcg_temp_new_i32();
19
- gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
tcg_gen_addi_i32(addr, addr, 4);
22
t2 = tcg_temp_new_i32();
23
- gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
24
+ gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
25
26
if (a->w) {
27
/* Base writeback. */
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 8 ++++----
9
1 file changed, 4 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
16
for (i = 0; i < n; i++) {
17
if (a->l) {
18
/* load */
19
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd + i);
22
} else {
23
/* store */
24
vfp_load_reg32(tmp, a->vd + i);
25
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
26
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
27
}
28
tcg_gen_addi_i32(addr, addr, offset);
29
}
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
31
for (i = 0; i < n; i++) {
32
if (a->l) {
33
/* load */
34
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
35
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
36
vfp_store_reg64(tmp, a->vd + i);
37
} else {
38
/* store */
39
vfp_load_reg64(tmp, a->vd + i);
40
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
41
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
42
}
43
tcg_gen_addi_i32(addr, addr, offset);
44
}
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-22-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 12 ++++++------
9
1 file changed, 6 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
16
addr = add_reg_for_lit(s, a->rn, offset);
17
tmp = tcg_temp_new_i32();
18
if (a->l) {
19
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd);
22
} else {
23
vfp_load_reg32(tmp, a->vd);
24
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
26
}
27
tcg_temp_free_i32(tmp);
28
tcg_temp_free_i32(addr);
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
30
addr = add_reg_for_lit(s, a->rn, offset);
31
tmp = tcg_temp_new_i32();
32
if (a->l) {
33
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
34
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
35
vfp_store_reg32(tmp, a->vd);
36
} else {
37
vfp_load_reg32(tmp, a->vd);
38
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
39
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
40
}
41
tcg_temp_free_i32(tmp);
42
tcg_temp_free_i32(addr);
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
44
addr = add_reg_for_lit(s, a->rn, offset);
45
tmp = tcg_temp_new_i64();
46
if (a->l) {
47
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
48
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
49
vfp_store_reg64(tmp, a->vd);
50
} else {
51
vfp_load_reg64(tmp, a->vd);
52
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
53
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
54
}
55
tcg_temp_free_i64(tmp);
56
tcg_temp_free_i32(addr);
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Stephen Longfield <slongfield@google.com>
2
2
3
Size is used at lines 1088/1188 for the loop, which reads the last 4
4
bytes from the crc_ptr so it does need to get increased, however it
5
shouldn't be increased before the buffer is passed to CRC computation,
6
or the crc32 function will access uninitialized memory.
7
8
This was pointed out to me by clg@kaod.org during the code review of
9
a similar patch to hw/net/ftgmac100.c
10
11
Change-Id: Ib0464303b191af1e28abeb2f5105eb25aadb5e9b
12
Signed-off-by: Stephen Longfield <slongfield@google.com>
13
Reviewed-by: Patrick Venture <venture@google.com>
14
Message-id: 20221221183202.3788132-1-slongfield@google.com
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-23-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
17
---
8
target/arm/translate.h | 1 +
18
hw/net/imx_fec.c | 8 ++++----
9
target/arm/translate.c | 15 +++++++++++++
19
1 file changed, 4 insertions(+), 4 deletions(-)
10
target/arm/translate-neon.c.inc | 37 +++++++++++++++++++++++++--------
11
3 files changed, 44 insertions(+), 9 deletions(-)
12
20
13
diff --git a/target/arm/translate.h b/target/arm/translate.h
21
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
14
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.h
23
--- a/hw/net/imx_fec.c
16
+++ b/target/arm/translate.h
24
+++ b/hw/net/imx_fec.c
17
@@ -XXX,XX +XXX,XX @@ void arm_test_cc(DisasCompare *cmp, int cc);
25
@@ -XXX,XX +XXX,XX @@ static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
18
void arm_free_cc(DisasCompare *cmp);
26
return 0;
19
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
20
void arm_gen_test_cc(int cc, TCGLabel *label);
21
+MemOp pow2_align(unsigned i);
22
23
/* Return state of Alternate Half-precision flag, caller frees result */
24
static inline TCGv_i32 get_ahp_flag(void)
25
diff --git a/target/arm/translate.c b/target/arm/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.c
28
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
30
#define IS_USER_ONLY 0
31
#endif
32
33
+MemOp pow2_align(unsigned i)
34
+{
35
+ static const MemOp mop_align[] = {
36
+ 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16,
37
+ /*
38
+ * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such
39
+ * that 256-bit alignment (MO_ALIGN_32) cannot be supported:
40
+ * see get_alignment_bits(). Enforce only 128-bit alignment for now.
41
+ */
42
+ MO_ALIGN_16
43
+ };
44
+ g_assert(i < ARRAY_SIZE(mop_align));
45
+ return mop_align[i];
46
+}
47
+
48
/*
49
* Abstractions of "generate code to do a guest load/store for
50
* AArch32", where a vaddr is always 32 bits (and is zero
51
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-neon.c.inc
54
+++ b/target/arm/translate-neon.c.inc
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
56
int size = a->size;
57
int nregs = a->n + 1;
58
TCGv_i32 addr, tmp;
59
+ MemOp mop, align;
60
61
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
62
return false;
63
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
64
return false;
65
}
27
}
66
28
67
+ align = 0;
29
- /* 4 bytes for the CRC. */
68
if (size == 3) {
30
- size += 4;
69
if (nregs != 4 || a->a == 0) {
31
crc = cpu_to_be32(crc32(~0, buf, size));
70
return false;
32
+ /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
71
}
33
+ size += 4;
72
/* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
34
crc_ptr = (uint8_t *) &crc;
73
- size = 2;
35
74
- }
36
/* Huge frames are truncated. */
75
- if (nregs == 1 && a->a == 1 && size == 0) {
37
@@ -XXX,XX +XXX,XX @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
76
- return false;
38
return 0;
77
- }
78
- if (nregs == 3 && a->a == 1) {
79
- return false;
80
+ size = MO_32;
81
+ align = MO_ALIGN_16;
82
+ } else if (a->a) {
83
+ switch (nregs) {
84
+ case 1:
85
+ if (size == 0) {
86
+ return false;
87
+ }
88
+ align = MO_ALIGN;
89
+ break;
90
+ case 2:
91
+ align = pow2_align(size + 1);
92
+ break;
93
+ case 3:
94
+ return false;
95
+ case 4:
96
+ align = pow2_align(size + 2);
97
+ break;
98
+ default:
99
+ g_assert_not_reached();
100
+ }
101
}
39
}
102
40
103
if (!vfp_access_check(s)) {
41
- /* 4 bytes for the CRC. */
104
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
42
- size += 4;
105
*/
43
crc = cpu_to_be32(crc32(~0, buf, size));
106
stride = a->t ? 2 : 1;
44
+ /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
107
vec_size = nregs == 1 ? stride * 8 : 8;
45
+ size += 4;
108
-
46
crc_ptr = (uint8_t *) &crc;
109
+ mop = size | align;
47
110
tmp = tcg_temp_new_i32();
48
if (shift16) {
111
addr = tcg_temp_new_i32();
112
load_reg_var(s, addr, a->rn);
113
for (reg = 0; reg < nregs; reg++) {
114
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
115
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
116
if ((vd & 1) && vec_size == 16) {
117
/*
118
* We cannot write 16 bytes at once because the
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
120
}
121
tcg_gen_addi_i32(addr, addr, 1 << size);
122
vd += stride;
123
+
124
+ /* Subsequent memory operations inherit alignment */
125
+ mop &= ~MO_AMASK;
126
}
127
tcg_temp_free_i32(tmp);
128
tcg_temp_free_i32(addr);
129
--
49
--
130
2.20.1
50
2.25.1
131
132
diff view generated by jsdifflib