1
The big thing here is RTH's patchset implementing ARMv8.1-VHE
1
Hi; here's the latest round of arm patches. I have included also
2
emulation; otherwise just a handful of smaller fixes.
2
my patchset for the RTC devices to avoid keeping time_t and
3
time_t diffs in 32-bit variables.
3
4
4
thanks
5
thanks
5
-- PMM
6
-- PMM
6
7
7
The following changes since commit 346ed3151f1c43e72c40cb55b392a1d4cface62c:
8
The following changes since commit 156618d9ea67f2f2e31d9dedd97f2dcccbe6808c:
8
9
9
Merge remote-tracking branch 'remotes/awilliam/tags/vfio-update-20200206.0' into staging (2020-02-07 11:52:15 +0000)
10
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging (2023-08-30 09:20:27 -0400)
10
11
11
are available in the Git repository at:
12
are available in the Git repository at:
12
13
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200207
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230831
14
15
15
for you to fetch changes up to af6c91b490e9b1bce7a168f8a9c848f3e60f616e:
16
for you to fetch changes up to e73b8bb8a3e9a162f70e9ffbf922d4fafc96bbfb:
16
17
17
stellaris: delay timer_new to avoid memleaks (2020-02-07 14:04:28 +0000)
18
hw/arm: Set number of MPU regions correctly for an505, an521, an524 (2023-08-31 11:07:02 +0100)
18
19
19
----------------------------------------------------------------
20
----------------------------------------------------------------
20
target-arm queue:
21
target-arm queue:
21
* monitor: fix query-cpu-model-expansion crash when using machine type none
22
* Some of the preliminary patches for Cortex-A710 support
22
* Support emulation of the ARMv8.1-VHE architecture feature
23
* i.MX7 and i.MX6UL refactoring
23
* bcm2835_dma: fix bugs in TD mode handling
24
* Implement SRC device for i.MX7
24
* docs/arm-cpu-features: Make kvm-no-adjvtime comment clearer
25
* Catch illegal-exception-return from EL3 with bad NSE/NS
25
* stellaris, stm32f2xx_timer, armv7m_systick: fix minor memory leaks
26
* Use 64-bit offsets for holding time_t differences in RTC devices
27
* Model correct number of MPU regions for an505, an521, an524 boards
26
28
27
----------------------------------------------------------------
29
----------------------------------------------------------------
28
Alex Bennée (1):
30
Alex Bennée (1):
29
target/arm: check TGE and E2H flags for EL0 pauth traps
31
target/arm: properly document FEAT_CRC32
30
32
31
Liang Yan (1):
33
Jean-Christophe Dubois (6):
32
target/arm/monitor: query-cpu-model-expansion crashed qemu when using machine type none
34
Remove i.MX7 IOMUX GPR device from i.MX6UL
35
Refactor i.MX6UL processor code
36
Add i.MX6UL missing devices.
37
Refactor i.MX7 processor code
38
Add i.MX7 missing TZ devices and memory regions
39
Add i.MX7 SRC device implementation
33
40
34
Pan Nengyuan (3):
41
Peter Maydell (8):
35
armv7m_systick: delay timer_new to avoid memleaks
42
target/arm: Catch illegal-exception-return from EL3 with bad NSE/NS
36
stm32f2xx_timer: delay timer_new to avoid memleaks
43
hw/rtc/m48t59: Use 64-bit arithmetic in set_alarm()
37
stellaris: delay timer_new to avoid memleaks
44
hw/rtc/twl92230: Use int64_t for sec_offset and alm_sec
45
hw/rtc/aspeed_rtc: Use 64-bit offset for holding time_t difference
46
rtc: Use time_t for passing and returning time offsets
47
target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init
48
hw/arm/armv7m: Add mpu-ns-regions and mpu-s-regions properties
49
hw/arm: Set number of MPU regions correctly for an505, an521, an524
38
50
39
Philippe Mathieu-Daudé (1):
51
Richard Henderson (9):
40
docs/arm-cpu-features: Make kvm-no-adjvtime comment clearer
52
target/arm: Reduce dcz_blocksize to uint8_t
53
target/arm: Allow cpu to configure GM blocksize
54
target/arm: Support more GM blocksizes
55
target/arm: When tag memory is not present, set MTE=1
56
target/arm: Introduce make_ccsidr64
57
target/arm: Apply access checks to neoverse-n1 special registers
58
target/arm: Apply access checks to neoverse-v1 special registers
59
target/arm: Suppress FEAT_TRBE (Trace Buffer Extension)
60
target/arm: Implement FEAT_HPDS2 as a no-op
41
61
42
Rene Stange (2):
62
docs/system/arm/emulation.rst | 2 +
43
bcm2835_dma: Fix the ylen loop in TD mode
63
include/hw/arm/armsse.h | 5 +
44
bcm2835_dma: Re-initialize xlen in TD mode
64
include/hw/arm/armv7m.h | 8 +
65
include/hw/arm/fsl-imx6ul.h | 158 ++++++++++++++++---
66
include/hw/arm/fsl-imx7.h | 338 ++++++++++++++++++++++++++++++-----------
67
include/hw/misc/imx7_src.h | 66 ++++++++
68
include/hw/rtc/aspeed_rtc.h | 2 +-
69
include/sysemu/rtc.h | 4 +-
70
target/arm/cpregs.h | 2 +
71
target/arm/cpu.h | 5 +-
72
target/arm/internals.h | 6 -
73
target/arm/tcg/translate.h | 2 +
74
hw/arm/armsse.c | 16 ++
75
hw/arm/armv7m.c | 21 +++
76
hw/arm/fsl-imx6ul.c | 174 +++++++++++++--------
77
hw/arm/fsl-imx7.c | 201 +++++++++++++++++++-----
78
hw/arm/mps2-tz.c | 29 ++++
79
hw/misc/imx7_src.c | 276 +++++++++++++++++++++++++++++++++
80
hw/rtc/aspeed_rtc.c | 5 +-
81
hw/rtc/m48t59.c | 2 +-
82
hw/rtc/twl92230.c | 4 +-
83
softmmu/rtc.c | 4 +-
84
target/arm/cpu.c | 207 ++++++++++++++-----------
85
target/arm/helper.c | 15 +-
86
target/arm/tcg/cpu32.c | 2 +-
87
target/arm/tcg/cpu64.c | 102 +++++++++----
88
target/arm/tcg/helper-a64.c | 9 ++
89
target/arm/tcg/mte_helper.c | 90 ++++++++---
90
target/arm/tcg/translate-a64.c | 5 +-
91
hw/misc/meson.build | 1 +
92
hw/misc/trace-events | 4 +
93
31 files changed, 1393 insertions(+), 372 deletions(-)
94
create mode 100644 include/hw/misc/imx7_src.h
95
create mode 100644 hw/misc/imx7_src.c
45
96
46
Richard Henderson (40):
47
target/arm: Define isar_feature_aa64_vh
48
target/arm: Enable HCR_E2H for VHE
49
target/arm: Add CONTEXTIDR_EL2
50
target/arm: Add TTBR1_EL2
51
target/arm: Update CNTVCT_EL0 for VHE
52
target/arm: Split out vae1_tlbmask
53
target/arm: Split out alle1_tlbmask
54
target/arm: Simplify tlb_force_broadcast alternatives
55
target/arm: Rename ARMMMUIdx*_S12NSE* to ARMMMUIdx*_E10_*
56
target/arm: Rename ARMMMUIdx_S2NS to ARMMMUIdx_Stage2
57
target/arm: Rename ARMMMUIdx_S1NSE* to ARMMMUIdx_Stage1_E*
58
target/arm: Rename ARMMMUIdx_S1SE[01] to ARMMMUIdx_SE10_[01]
59
target/arm: Rename ARMMMUIdx*_S1E3 to ARMMMUIdx*_SE3
60
target/arm: Rename ARMMMUIdx_S1E2 to ARMMMUIdx_E2
61
target/arm: Recover 4 bits from TBFLAGs
62
target/arm: Expand TBFLAG_ANY.MMUIDX to 4 bits
63
target/arm: Rearrange ARMMMUIdxBit
64
target/arm: Tidy ARMMMUIdx m-profile definitions
65
target/arm: Reorganize ARMMMUIdx
66
target/arm: Add regime_has_2_ranges
67
target/arm: Update arm_mmu_idx for VHE
68
target/arm: Update arm_sctlr for VHE
69
target/arm: Update aa64_zva_access for EL2
70
target/arm: Update ctr_el0_access for EL2
71
target/arm: Add the hypervisor virtual counter
72
target/arm: Update timer access for VHE
73
target/arm: Update define_one_arm_cp_reg_with_opaque for VHE
74
target/arm: Add VHE system register redirection and aliasing
75
target/arm: Add VHE timer register redirection and aliasing
76
target/arm: Flush tlb for ASID changes in EL2&0 translation regime
77
target/arm: Flush tlbs for E2&0 translation regime
78
target/arm: Update arm_phys_excp_target_el for TGE
79
target/arm: Update {fp,sve}_exception_el for VHE
80
target/arm: Update get_a64_user_mem_index for VHE
81
target/arm: Update arm_cpu_do_interrupt_aarch64 for VHE
82
target/arm: Enable ARMv8.1-VHE in -cpu max
83
target/arm: Move arm_excp_unmasked to cpu.c
84
target/arm: Pass more cpu state to arm_excp_unmasked
85
target/arm: Use bool for unmasked in arm_excp_unmasked
86
target/arm: Raise only one interrupt in arm_cpu_exec_interrupt
87
88
target/arm/cpu-param.h | 2 +-
89
target/arm/cpu-qom.h | 1 +
90
target/arm/cpu.h | 423 ++++++----------
91
target/arm/internals.h | 73 ++-
92
target/arm/translate.h | 4 +-
93
hw/arm/stellaris.c | 7 +-
94
hw/dma/bcm2835_dma.c | 8 +-
95
hw/timer/armv7m_systick.c | 6 +
96
hw/timer/stm32f2xx_timer.c | 5 +
97
target/arm/cpu.c | 162 +++++-
98
target/arm/cpu64.c | 1 +
99
target/arm/debug_helper.c | 50 +-
100
target/arm/helper-a64.c | 2 +-
101
target/arm/helper.c | 1211 ++++++++++++++++++++++++++++++++------------
102
target/arm/monitor.c | 15 +-
103
target/arm/pauth_helper.c | 14 +-
104
target/arm/translate-a64.c | 47 +-
105
target/arm/translate.c | 74 +--
106
docs/arm-cpu-features.rst | 2 +-
107
19 files changed, 1415 insertions(+), 692 deletions(-)
108
diff view generated by jsdifflib
Deleted patch
1
From: Liang Yan <lyan@suse.com>
2
1
3
Commit e19afd566781 mentioned that target-arm only supports queryable
4
cpu models 'max', 'host', and the current type when KVM is in use.
5
The logic works well until using machine type none.
6
7
For machine type none, cpu_type will be null if cpu option is not
8
set by command line, strlen(cpu_type) will terminate process.
9
So We add a check above it.
10
11
This won't affect i386 and s390x since they do not use current_cpu.
12
13
Signed-off-by: Liang Yan <lyan@suse.com>
14
Message-id: 20200203134251.12986-1-lyan@suse.com
15
Reviewed-by: Andrew Jones <drjones@redhat.com>
16
Tested-by: Andrew Jones <drjones@redhat.com>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
target/arm/monitor.c | 15 +++++++++------
20
1 file changed, 9 insertions(+), 6 deletions(-)
21
22
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/monitor.c
25
+++ b/target/arm/monitor.c
26
@@ -XXX,XX +XXX,XX @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
27
}
28
29
if (kvm_enabled()) {
30
- const char *cpu_type = current_machine->cpu_type;
31
- int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
32
bool supported = false;
33
34
if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) {
35
/* These are kvmarm's recommended cpu types */
36
supported = true;
37
- } else if (strlen(model->name) == len &&
38
- !strncmp(model->name, cpu_type, len)) {
39
- /* KVM is enabled and we're using this type, so it works. */
40
- supported = true;
41
+ } else if (current_machine->cpu_type) {
42
+ const char *cpu_type = current_machine->cpu_type;
43
+ int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
44
+
45
+ if (strlen(model->name) == len &&
46
+ !strncmp(model->name, cpu_type, len)) {
47
+ /* KVM is enabled and we're using this type, so it works. */
48
+ supported = true;
49
+ }
50
}
51
if (!supported) {
52
error_setg(errp, "We cannot guarantee the CPU type '%s' works "
53
--
54
2.20.1
55
56
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200206105448.4726-2-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/cpu.h | 5 +++++
10
1 file changed, 5 insertions(+)
11
12
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu.h
15
+++ b/target/arm/cpu.h
16
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
17
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
18
}
19
20
+static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
21
+{
22
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
23
+}
24
+
25
static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
26
{
27
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200206105448.4726-3-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/cpu.h | 7 -------
10
target/arm/helper.c | 6 +++++-
11
2 files changed, 5 insertions(+), 8 deletions(-)
12
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
18
#define HCR_ATA (1ULL << 56)
19
#define HCR_DCT (1ULL << 57)
20
21
-/*
22
- * When we actually implement ARMv8.1-VHE we should add HCR_E2H to
23
- * HCR_MASK and then clear it again if the feature bit is not set in
24
- * hcr_write().
25
- */
26
-#define HCR_MASK ((1ULL << 34) - 1)
27
-
28
#define SCR_NS (1U << 0)
29
#define SCR_IRQ (1U << 1)
30
#define SCR_FIQ (1U << 2)
31
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/helper.c
34
+++ b/target/arm/helper.c
35
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
36
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
37
{
38
ARMCPU *cpu = env_archcpu(env);
39
- uint64_t valid_mask = HCR_MASK;
40
+ /* Begin with bits defined in base ARMv8.0. */
41
+ uint64_t valid_mask = MAKE_64BIT_MASK(0, 34);
42
43
if (arm_feature(env, ARM_FEATURE_EL3)) {
44
valid_mask &= ~HCR_HCD;
45
@@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
46
*/
47
valid_mask &= ~HCR_TSC;
48
}
49
+ if (cpu_isar_feature(aa64_vh, cpu)) {
50
+ valid_mask |= HCR_E2H;
51
+ }
52
if (cpu_isar_feature(aa64_lor, cpu)) {
53
valid_mask |= HCR_TLOR;
54
}
55
--
56
2.20.1
57
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The EL2&0 translation regime is affected by Load Register (unpriv).
3
This value is only 4 bits wide.
4
4
5
The code structure used here will facilitate later changes in this
6
area for implementing UAO and NV.
7
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20200206105448.4726-36-richard.henderson@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-id: 20230811214031.171020-2-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
target/arm/cpu.h | 9 ++++----
11
target/arm/cpu.h | 3 ++-
15
target/arm/translate.h | 2 ++
12
1 file changed, 2 insertions(+), 1 deletion(-)
16
target/arm/helper.c | 22 +++++++++++++++++++
17
target/arm/translate-a64.c | 44 ++++++++++++++++++++++++--------------
18
4 files changed, 57 insertions(+), 20 deletions(-)
19
13
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
16
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
18
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
25
* | | | TBFLAG_A32 | |
19
bool prop_lpa2;
26
* | | +-----+----------+ TBFLAG_AM32 |
20
27
* | TBFLAG_ANY | |TBFLAG_M32| |
21
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
28
- * | | +-------------------------|
22
- uint32_t dcz_blocksize;
29
- * | | | TBFLAG_A64 |
23
+ uint8_t dcz_blocksize;
30
- * +--------------+-----------+-------------------------+
31
- * 31 20 14 0
32
+ * | | +-+----------+--------------|
33
+ * | | | TBFLAG_A64 |
34
+ * +--------------+---------+---------------------------+
35
+ * 31 20 15 0
36
*
37
* Unless otherwise noted, these bits are cached in env->hflags.
38
*/
39
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
40
FIELD(TBFLAG_A64, BT, 9, 1)
41
FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
42
FIELD(TBFLAG_A64, TBID, 12, 2)
43
+FIELD(TBFLAG_A64, UNPRIV, 14, 1)
44
45
static inline bool bswap_code(bool sctlr_b)
46
{
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate.h
50
+++ b/target/arm/translate.h
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
52
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
53
*/
54
bool is_ldex;
55
+ /* True if AccType_UNPRIV should be used for LDTR et al */
56
+ bool unpriv;
57
/* True if v8.3-PAuth is active. */
58
bool pauth_active;
59
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
60
diff --git a/target/arm/helper.c b/target/arm/helper.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/helper.c
63
+++ b/target/arm/helper.c
64
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
65
}
66
}
67
68
+ /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
69
+ /* TODO: ARMv8.2-UAO */
70
+ switch (mmu_idx) {
71
+ case ARMMMUIdx_E10_1:
72
+ case ARMMMUIdx_SE10_1:
73
+ /* TODO: ARMv8.3-NV */
74
+ flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
75
+ break;
76
+ case ARMMMUIdx_E20_2:
77
+ /* TODO: ARMv8.4-SecEL2 */
78
+ /*
79
+ * Note that E20_2 is gated by HCR_EL2.E2H == 1, but E20_0 is
80
+ * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
81
+ */
82
+ if (env->cp15.hcr_el2 & HCR_TGE) {
83
+ flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
84
+ }
85
+ break;
86
+ default:
87
+ break;
88
+ }
89
+
24
+
90
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
25
uint64_t rvbar_prop; /* Property/input signals. */
91
}
26
92
27
/* Configurable aspects of GIC cpu interface (which is part of the CPU) */
93
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-a64.c
96
+++ b/target/arm/translate-a64.c
97
@@ -XXX,XX +XXX,XX @@ void a64_translate_init(void)
98
offsetof(CPUARMState, exclusive_high), "exclusive_high");
99
}
100
101
-static inline int get_a64_user_mem_index(DisasContext *s)
102
+/*
103
+ * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
104
+ */
105
+static int get_a64_user_mem_index(DisasContext *s)
106
{
107
- /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
108
- * if EL1, access as if EL0; otherwise access at current EL
109
+ /*
110
+ * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
111
+ * which is the usual mmu_idx for this cpu state.
112
*/
113
- ARMMMUIdx useridx;
114
+ ARMMMUIdx useridx = s->mmu_idx;
115
116
- switch (s->mmu_idx) {
117
- case ARMMMUIdx_E10_1:
118
- useridx = ARMMMUIdx_E10_0;
119
- break;
120
- case ARMMMUIdx_SE10_1:
121
- useridx = ARMMMUIdx_SE10_0;
122
- break;
123
- case ARMMMUIdx_Stage2:
124
- g_assert_not_reached();
125
- default:
126
- useridx = s->mmu_idx;
127
- break;
128
+ if (s->unpriv) {
129
+ /*
130
+ * We have pre-computed the condition for AccType_UNPRIV.
131
+ * Therefore we should never get here with a mmu_idx for
132
+ * which we do not know the corresponding user mmu_idx.
133
+ */
134
+ switch (useridx) {
135
+ case ARMMMUIdx_E10_1:
136
+ useridx = ARMMMUIdx_E10_0;
137
+ break;
138
+ case ARMMMUIdx_E20_2:
139
+ useridx = ARMMMUIdx_E20_0;
140
+ break;
141
+ case ARMMMUIdx_SE10_1:
142
+ useridx = ARMMMUIdx_SE10_0;
143
+ break;
144
+ default:
145
+ g_assert_not_reached();
146
+ }
147
}
148
return arm_to_core_mmu_idx(useridx);
149
}
150
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
151
dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
152
dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
153
dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
154
+ dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
155
dc->vec_len = 0;
156
dc->vec_stride = 0;
157
dc->cp_regs = arm_cpu->cp_regs;
158
--
28
--
159
2.20.1
29
2.34.1
160
30
161
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is part of a reorganization to the set of mmu_idx.
3
Previously we hard-coded the blocksize with GMID_EL1_BS.
4
The EL3 regime only has a single stage translation, and
4
But the value we choose for -cpu max does not match the
5
is always secure.
5
value that cortex-a710 uses.
6
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Mirror the way we handle dcz_blocksize.
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-14-richard.henderson@linaro.org
11
Message-id: 20230811214031.171020-3-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
target/arm/cpu.h | 4 ++--
14
target/arm/cpu.h | 2 ++
14
target/arm/internals.h | 2 +-
15
target/arm/internals.h | 6 -----
15
target/arm/helper.c | 14 +++++++-------
16
target/arm/tcg/translate.h | 2 ++
16
target/arm/translate.c | 2 +-
17
target/arm/helper.c | 11 +++++---
17
4 files changed, 11 insertions(+), 11 deletions(-)
18
target/arm/tcg/cpu64.c | 1 +
19
target/arm/tcg/mte_helper.c | 46 ++++++++++++++++++++++------------
20
target/arm/tcg/translate-a64.c | 5 ++--
21
7 files changed, 45 insertions(+), 28 deletions(-)
18
22
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
25
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
26
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
27
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
24
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
28
25
ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
29
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
26
ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
30
uint8_t dcz_blocksize;
27
- ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
31
+ /* GM blocksize, in log_2(words), ie low 4 bits of GMID_EL0 */
28
+ ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,
32
+ uint8_t gm_blocksize;
29
ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
33
30
ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
34
uint64_t rvbar_prop; /* Property/input signals. */
31
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
35
32
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
33
ARMMMUIdxBit_E10_0 = 1 << 0,
34
ARMMMUIdxBit_E10_1 = 1 << 1,
35
ARMMMUIdxBit_S1E2 = 1 << 2,
36
- ARMMMUIdxBit_S1E3 = 1 << 3,
37
+ ARMMMUIdxBit_SE3 = 1 << 3,
38
ARMMMUIdxBit_SE10_0 = 1 << 4,
39
ARMMMUIdxBit_SE10_1 = 1 << 5,
40
ARMMMUIdxBit_Stage2 = 1 << 6,
41
diff --git a/target/arm/internals.h b/target/arm/internals.h
36
diff --git a/target/arm/internals.h b/target/arm/internals.h
42
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/internals.h
38
--- a/target/arm/internals.h
44
+++ b/target/arm/internals.h
39
+++ b/target/arm/internals.h
45
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
40
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs);
46
case ARMMMUIdx_MPriv:
41
47
case ARMMMUIdx_MUser:
42
#endif /* !CONFIG_USER_ONLY */
48
return false;
43
49
- case ARMMMUIdx_S1E3:
44
-/*
50
+ case ARMMMUIdx_SE3:
45
- * The log2 of the words in the tag block, for GMID_EL1.BS.
51
case ARMMMUIdx_SE10_0:
46
- * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
52
case ARMMMUIdx_SE10_1:
47
- */
53
case ARMMMUIdx_MSPrivNegPri:
48
-#define GMID_EL1_BS 6
49
-
50
/*
51
* SVE predicates are 1/8 the size of SVE vectors, and cannot use
52
* the same simd_desc() encoding due to restrictions on size.
53
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/tcg/translate.h
56
+++ b/target/arm/tcg/translate.h
57
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
58
int8_t btype;
59
/* A copy of cpu->dcz_blocksize. */
60
uint8_t dcz_blocksize;
61
+ /* A copy of cpu->gm_blocksize. */
62
+ uint8_t gm_blocksize;
63
/* True if this page is guarded. */
64
bool guarded_page;
65
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
54
diff --git a/target/arm/helper.c b/target/arm/helper.c
66
diff --git a/target/arm/helper.c b/target/arm/helper.c
55
index XXXXXXX..XXXXXXX 100644
67
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/helper.c
68
--- a/target/arm/helper.c
57
+++ b/target/arm/helper.c
69
+++ b/target/arm/helper.c
58
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
70
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_reginfo[] = {
59
/* stage 1 current state PL1: ATS1CPR, ATS1CPW */
71
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
60
switch (el) {
72
.access = PL1_RW, .accessfn = access_mte,
61
case 3:
73
.fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
62
- mmu_idx = ARMMMUIdx_S1E3;
74
- { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
63
+ mmu_idx = ARMMMUIdx_SE3;
75
- .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
64
break;
76
- .access = PL1_R, .accessfn = access_aa64_tid5,
65
case 2:
77
- .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
66
mmu_idx = ARMMMUIdx_Stage1_E1;
78
{ .name = "TCO", .state = ARM_CP_STATE_AA64,
67
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
79
.opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
68
mmu_idx = ARMMMUIdx_S1E2;
80
.type = ARM_CP_NO_RAW,
69
break;
81
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
70
case 6: /* AT S1E3R, AT S1E3W */
82
* then define only a RAZ/WI version of PSTATE.TCO.
71
- mmu_idx = ARMMMUIdx_S1E3;
83
*/
72
+ mmu_idx = ARMMMUIdx_SE3;
84
if (cpu_isar_feature(aa64_mte, cpu)) {
73
break;
85
+ ARMCPRegInfo gmid_reginfo = {
74
default:
86
+ .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
75
g_assert_not_reached();
87
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
76
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
88
+ .access = PL1_R, .accessfn = access_aa64_tid5,
77
ARMCPU *cpu = env_archcpu(env);
89
+ .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
78
CPUState *cs = CPU(cpu);
90
+ };
79
91
+ define_one_arm_cp_reg(cpu, &gmid_reginfo);
80
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
92
define_arm_cp_regs(cpu, mte_reginfo);
81
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
93
define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
94
} else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
95
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/target/arm/tcg/cpu64.c
98
+++ b/target/arm/tcg/cpu64.c
99
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
100
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
101
cpu->dcz_blocksize = 7; /* 512 bytes */
102
#endif
103
+ cpu->gm_blocksize = 6; /* 256 bytes */
104
105
cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
106
cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
107
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/tcg/mte_helper.c
110
+++ b/target/arm/tcg/mte_helper.c
111
@@ -XXX,XX +XXX,XX @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
112
}
82
}
113
}
83
114
84
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
115
-#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
85
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
116
-
117
uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
86
{
118
{
87
CPUState *cs = env_cpu(env);
119
int mmu_idx = cpu_mmu_index(env, false);
88
120
uintptr_t ra = GETPC();
89
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
121
+ int gm_bs = env_archcpu(env)->gm_blocksize;
90
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
122
+ int gm_bs_bytes = 4 << gm_bs;
123
void *tag_mem;
124
125
- ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
126
+ ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
127
128
/* Trap if accessing an invalid page. */
129
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
130
- LDGM_STGM_SIZE, MMU_DATA_LOAD,
131
- LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
132
+ gm_bs_bytes, MMU_DATA_LOAD,
133
+ gm_bs_bytes / (2 * TAG_GRANULE), ra);
134
135
/* The tag is squashed to zero if the page does not support tags. */
136
if (!tag_mem) {
137
return 0;
138
}
139
140
- QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
141
/*
142
- * We are loading 64-bits worth of tags. The ordering of elements
143
- * within the word corresponds to a 64-bit little-endian operation.
144
+ * The ordering of elements within the word corresponds to
145
+ * a little-endian operation.
146
*/
147
- return ldq_le_p(tag_mem);
148
+ switch (gm_bs) {
149
+ case 6:
150
+ /* 256 bytes -> 16 tags -> 64 result bits */
151
+ return ldq_le_p(tag_mem);
152
+ default:
153
+ /* cpu configured with unsupported gm blocksize. */
154
+ g_assert_not_reached();
155
+ }
91
}
156
}
92
157
93
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
158
void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
94
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
159
{
95
CPUState *cs = CPU(cpu);
160
int mmu_idx = cpu_mmu_index(env, false);
96
uint64_t pageaddr = sextract64(value << 12, 0, 56);
161
uintptr_t ra = GETPC();
97
162
+ int gm_bs = env_archcpu(env)->gm_blocksize;
98
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
163
+ int gm_bs_bytes = 4 << gm_bs;
99
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
164
void *tag_mem;
165
166
- ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
167
+ ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
168
169
/* Trap if accessing an invalid page. */
170
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
171
- LDGM_STGM_SIZE, MMU_DATA_LOAD,
172
- LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
173
+ gm_bs_bytes, MMU_DATA_LOAD,
174
+ gm_bs_bytes / (2 * TAG_GRANULE), ra);
175
176
/*
177
* Tag store only happens if the page support tags,
178
@@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
179
return;
180
}
181
182
- QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
183
/*
184
- * We are storing 64-bits worth of tags. The ordering of elements
185
- * within the word corresponds to a 64-bit little-endian operation.
186
+ * The ordering of elements within the word corresponds to
187
+ * a little-endian operation.
188
*/
189
- stq_le_p(tag_mem, val);
190
+ switch (gm_bs) {
191
+ case 6:
192
+ stq_le_p(tag_mem, val);
193
+ break;
194
+ default:
195
+ /* cpu configured with unsupported gm blocksize. */
196
+ g_assert_not_reached();
197
+ }
100
}
198
}
101
199
102
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
200
void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
103
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
201
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
104
uint64_t pageaddr = sextract64(value << 12, 0, 56);
202
index XXXXXXX..XXXXXXX 100644
105
203
--- a/target/arm/tcg/translate-a64.c
106
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
204
+++ b/target/arm/tcg/translate-a64.c
107
- ARMMMUIdxBit_S1E3);
205
@@ -XXX,XX +XXX,XX @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
108
+ ARMMMUIdxBit_SE3);
206
gen_helper_stgm(cpu_env, addr, tcg_rt);
109
}
207
} else {
110
208
MMUAccessType acc = MMU_DATA_STORE;
111
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
209
- int size = 4 << GMID_EL1_BS;
112
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
210
+ int size = 4 << s->gm_blocksize;
113
case ARMMMUIdx_Stage2:
211
114
case ARMMMUIdx_S1E2:
212
clean_addr = clean_data_tbi(s, addr);
115
return 2;
213
tcg_gen_andi_i64(clean_addr, clean_addr, -size);
116
- case ARMMMUIdx_S1E3:
214
@@ -XXX,XX +XXX,XX @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
117
+ case ARMMMUIdx_SE3:
215
gen_helper_ldgm(tcg_rt, cpu_env, addr);
118
return 3;
216
} else {
119
case ARMMMUIdx_SE10_0:
217
MMUAccessType acc = MMU_DATA_LOAD;
120
return arm_el_is_aa64(env, 3) ? 1 : 3;
218
- int size = 4 << GMID_EL1_BS;
121
diff --git a/target/arm/translate.c b/target/arm/translate.c
219
+ int size = 4 << s->gm_blocksize;
122
index XXXXXXX..XXXXXXX 100644
220
123
--- a/target/arm/translate.c
221
clean_addr = clean_data_tbi(s, addr);
124
+++ b/target/arm/translate.c
222
tcg_gen_andi_i64(clean_addr, clean_addr, -size);
125
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
223
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
126
case ARMMMUIdx_E10_0:
224
dc->cp_regs = arm_cpu->cp_regs;
127
case ARMMMUIdx_E10_1:
225
dc->features = env->features;
128
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
226
dc->dcz_blocksize = arm_cpu->dcz_blocksize;
129
- case ARMMMUIdx_S1E3:
227
+ dc->gm_blocksize = arm_cpu->gm_blocksize;
130
+ case ARMMMUIdx_SE3:
228
131
case ARMMMUIdx_SE10_0:
229
#ifdef CONFIG_USER_ONLY
132
case ARMMMUIdx_SE10_1:
230
/* In sve_probe_page, we assume TBI is enabled. */
133
return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
134
--
231
--
135
2.20.1
232
2.34.1
136
137
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Avoid redundant computation of cpu state by passing it in
3
Support all of the easy GM block sizes.
4
from the caller, which has already computed it for itself.
4
Use direct memory operations, since the pointers are aligned.
5
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
While BS=2 (16 bytes, 1 tag) is a legal setting, that requires
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
an atomic store of one nibble. This is not difficult, but there
8
is also no point in supporting it until required.
9
10
Note that cortex-a710 sets GM blocksize to match its cacheline
11
size of 64 bytes. I expect many implementations will also
12
match the cacheline, which makes 16 bytes very unlikely.
13
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-40-richard.henderson@linaro.org
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Message-id: 20230811214031.171020-4-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
18
---
12
target/arm/cpu.c | 22 ++++++++++++----------
19
target/arm/cpu.c | 18 +++++++++---
13
1 file changed, 12 insertions(+), 10 deletions(-)
20
target/arm/tcg/mte_helper.c | 56 +++++++++++++++++++++++++++++++------
21
2 files changed, 62 insertions(+), 12 deletions(-)
14
22
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
23
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
25
--- a/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
26
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
27
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
28
ID_PFR1, VIRTUALIZATION, 0);
29
}
30
31
+ if (cpu_isar_feature(aa64_mte, cpu)) {
32
+ /*
33
+ * The architectural range of GM blocksize is 2-6, however qemu
34
+ * doesn't support blocksize of 2 (see HELPER(ldgm)).
35
+ */
36
+ if (tcg_enabled()) {
37
+ assert(cpu->gm_blocksize >= 3 && cpu->gm_blocksize <= 6);
38
+ }
39
+
40
#ifndef CONFIG_USER_ONLY
41
- if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
42
/*
43
* Disable the MTE feature bits if we do not have tag-memory
44
* provided by the machine.
45
*/
46
- cpu->isar.id_aa64pfr1 =
47
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
48
- }
49
+ if (cpu->tag_memory == NULL) {
50
+ cpu->isar.id_aa64pfr1 =
51
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
52
+ }
53
#endif
54
+ }
55
56
if (tcg_enabled()) {
57
/*
58
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/tcg/mte_helper.c
61
+++ b/target/arm/tcg/mte_helper.c
62
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
63
int gm_bs = env_archcpu(env)->gm_blocksize;
64
int gm_bs_bytes = 4 << gm_bs;
65
void *tag_mem;
66
+ uint64_t ret;
67
+ int shift;
68
69
ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
70
71
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
72
73
/*
74
* The ordering of elements within the word corresponds to
75
- * a little-endian operation.
76
+ * a little-endian operation. Computation of shift comes from
77
+ *
78
+ * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
79
+ * data<index*4+3:index*4> = tag
80
+ *
81
+ * Because of the alignment of ptr above, BS=6 has shift=0.
82
+ * All memory operations are aligned. Defer support for BS=2,
83
+ * requiring insertion or extraction of a nibble, until we
84
+ * support a cpu that requires it.
85
*/
86
switch (gm_bs) {
87
+ case 3:
88
+ /* 32 bytes -> 2 tags -> 8 result bits */
89
+ ret = *(uint8_t *)tag_mem;
90
+ break;
91
+ case 4:
92
+ /* 64 bytes -> 4 tags -> 16 result bits */
93
+ ret = cpu_to_le16(*(uint16_t *)tag_mem);
94
+ break;
95
+ case 5:
96
+ /* 128 bytes -> 8 tags -> 32 result bits */
97
+ ret = cpu_to_le32(*(uint32_t *)tag_mem);
98
+ break;
99
case 6:
100
/* 256 bytes -> 16 tags -> 64 result bits */
101
- return ldq_le_p(tag_mem);
102
+ return cpu_to_le64(*(uint64_t *)tag_mem);
103
default:
104
- /* cpu configured with unsupported gm blocksize. */
105
+ /*
106
+ * CPU configured with unsupported/invalid gm blocksize.
107
+ * This is detected early in arm_cpu_realizefn.
108
+ */
109
g_assert_not_reached();
110
}
111
+ shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
112
+ return ret << shift;
20
}
113
}
21
114
22
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
115
void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
23
- unsigned int target_el)
116
@@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
24
+ unsigned int target_el,
117
int gm_bs = env_archcpu(env)->gm_blocksize;
25
+ unsigned int cur_el, bool secure,
118
int gm_bs_bytes = 4 << gm_bs;
26
+ uint64_t hcr_el2)
119
void *tag_mem;
27
{
120
+ int shift;
28
CPUARMState *env = cs->env_ptr;
121
29
- unsigned int cur_el = arm_current_el(env);
122
ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
30
- bool secure = arm_is_secure(env);
123
31
bool pstate_unmasked;
124
@@ -XXX,XX +XXX,XX @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
32
int8_t unmasked = 0;
125
return;
33
- uint64_t hcr_el2;
34
35
/*
36
* Don't take exceptions if they target a lower EL.
37
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
38
return false;
39
}
126
}
40
127
41
- hcr_el2 = arm_hcr_el2_eff(env);
128
- /*
42
-
129
- * The ordering of elements within the word corresponds to
43
switch (excp_idx) {
130
- * a little-endian operation.
44
case EXCP_FIQ:
131
- */
45
pstate_unmasked = !(env->daif & PSTATE_F);
132
+ /* See LDGM for comments on BS and on shift. */
46
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
133
+ shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
47
CPUARMState *env = cs->env_ptr;
134
+ val >>= shift;
48
uint32_t cur_el = arm_current_el(env);
135
switch (gm_bs) {
49
bool secure = arm_is_secure(env);
136
+ case 3:
50
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
137
+ /* 32 bytes -> 2 tags -> 8 result bits */
51
uint32_t target_el;
138
+ *(uint8_t *)tag_mem = val;
52
uint32_t excp_idx;
139
+ break;
53
bool ret = false;
140
+ case 4:
54
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
141
+ /* 64 bytes -> 4 tags -> 16 result bits */
55
if (interrupt_request & CPU_INTERRUPT_FIQ) {
142
+ *(uint16_t *)tag_mem = cpu_to_le16(val);
56
excp_idx = EXCP_FIQ;
143
+ break;
57
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
144
+ case 5:
58
- if (arm_excp_unmasked(cs, excp_idx, target_el)) {
145
+ /* 128 bytes -> 8 tags -> 32 result bits */
59
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
146
+ *(uint32_t *)tag_mem = cpu_to_le32(val);
60
+ cur_el, secure, hcr_el2)) {
147
+ break;
61
cs->exception_index = excp_idx;
148
case 6:
62
env->exception.target_el = target_el;
149
- stq_le_p(tag_mem, val);
63
cc->do_interrupt(cs);
150
+ /* 256 bytes -> 16 tags -> 64 result bits */
64
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
151
+ *(uint64_t *)tag_mem = cpu_to_le64(val);
65
if (interrupt_request & CPU_INTERRUPT_HARD) {
152
break;
66
excp_idx = EXCP_IRQ;
153
default:
67
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
154
/* cpu configured with unsupported gm blocksize. */
68
- if (arm_excp_unmasked(cs, excp_idx, target_el)) {
69
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
70
+ cur_el, secure, hcr_el2)) {
71
cs->exception_index = excp_idx;
72
env->exception.target_el = target_el;
73
cc->do_interrupt(cs);
74
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
75
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
76
excp_idx = EXCP_VIRQ;
77
target_el = 1;
78
- if (arm_excp_unmasked(cs, excp_idx, target_el)) {
79
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
80
+ cur_el, secure, hcr_el2)) {
81
cs->exception_index = excp_idx;
82
env->exception.target_el = target_el;
83
cc->do_interrupt(cs);
84
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
85
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
86
excp_idx = EXCP_VFIQ;
87
target_el = 1;
88
- if (arm_excp_unmasked(cs, excp_idx, target_el)) {
89
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
90
+ cur_el, secure, hcr_el2)) {
91
cs->exception_index = excp_idx;
92
env->exception.target_el = target_el;
93
cc->do_interrupt(cs);
94
--
155
--
95
2.20.1
156
2.34.1
96
97
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The value computed is fully boolean; using int8_t is odd.
3
When the cpu support MTE, but the system does not, reduce cpu
4
support to user instructions at EL0 instead of completely
5
disabling MTE. If we encounter a cpu implementation which does
6
something else, we can revisit this setting.
4
7
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200206105448.4726-41-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20230811214031.171020-5-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
target/arm/cpu.c | 6 +++---
13
target/arm/cpu.c | 7 ++++---
12
1 file changed, 3 insertions(+), 3 deletions(-)
14
1 file changed, 4 insertions(+), 3 deletions(-)
13
15
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.c
18
--- a/target/arm/cpu.c
17
+++ b/target/arm/cpu.c
19
+++ b/target/arm/cpu.c
18
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
20
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
19
{
21
20
CPUARMState *env = cs->env_ptr;
22
#ifndef CONFIG_USER_ONLY
21
bool pstate_unmasked;
23
/*
22
- int8_t unmasked = 0;
24
- * Disable the MTE feature bits if we do not have tag-memory
23
+ bool unmasked = false;
25
- * provided by the machine.
24
26
+ * If we do not have tag-memory provided by the machine,
25
/*
27
+ * reduce MTE support to instructions enabled at EL0.
26
* Don't take exceptions if they target a lower EL.
28
+ * This matches Cortex-A710 BROADCASTMTE input being LOW.
27
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
29
*/
28
* don't affect the masking logic, only the interrupt routing.
30
if (cpu->tag_memory == NULL) {
29
*/
31
cpu->isar.id_aa64pfr1 =
30
if (target_el == 3 || !secure) {
32
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
31
- unmasked = 1;
33
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
32
+ unmasked = true;
33
}
34
} else {
35
/*
36
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
37
}
38
39
if ((scr || hcr) && !secure) {
40
- unmasked = 1;
41
+ unmasked = true;
42
}
43
}
34
}
35
#endif
44
}
36
}
45
--
37
--
46
2.20.1
38
2.34.1
47
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Several of the EL1/0 registers are redirected to the EL2 version when in
3
Do not hard-code the constants for Neoverse V1.
4
EL2 and HCR_EL2.E2H is set. Many of these registers have side effects.
5
Link together the two ARMCPRegInfo structures after they have been
6
properly instantiated. Install common dispatch routines to all of the
7
relevant registers.
8
4
9
The same set of registers that are redirected also have additional
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
EL12/EL02 aliases created to access the original register that was
11
redirected.
12
13
Omit the generic timer registers from redirection here, because we'll
14
need multiple kinds of redirection from both EL0 and EL2.
15
16
Tested-by: Alex Bennée <alex.bennee@linaro.org>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230811214031.171020-6-richard.henderson@linaro.org
19
Message-id: 20200206105448.4726-29-richard.henderson@linaro.org
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
9
---
22
target/arm/cpu.h | 13 ++++
10
target/arm/tcg/cpu64.c | 48 ++++++++++++++++++++++++++++--------------
23
target/arm/helper.c | 162 ++++++++++++++++++++++++++++++++++++++++++++
11
1 file changed, 32 insertions(+), 16 deletions(-)
24
2 files changed, 175 insertions(+)
25
12
26
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
27
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/cpu.h
15
--- a/target/arm/tcg/cpu64.c
29
+++ b/target/arm/cpu.h
16
+++ b/target/arm/tcg/cpu64.c
30
@@ -XXX,XX +XXX,XX @@ struct ARMCPRegInfo {
17
@@ -XXX,XX +XXX,XX @@
31
* fieldoffset is 0 then no reset will be done.
18
#include "qemu/module.h"
32
*/
19
#include "qapi/visitor.h"
33
CPResetFn *resetfn;
20
#include "hw/qdev-properties.h"
21
+#include "qemu/units.h"
22
#include "internals.h"
23
#include "cpregs.h"
24
25
+static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize,
26
+ unsigned cachesize)
27
+{
28
+ unsigned lg_linesize = ctz32(linesize);
29
+ unsigned sets;
34
+
30
+
35
+ /*
31
+ /*
36
+ * "Original" writefn and readfn.
32
+ * The 64-bit CCSIDR_EL1 format is:
37
+ * For ARMv8.1-VHE register aliases, we overwrite the read/write
33
+ * [55:32] number of sets - 1
38
+ * accessor functions of various EL1/EL0 to perform the runtime
34
+ * [23:3] associativity - 1
39
+ * check for which sysreg should actually be modified, and then
35
+ * [2:0] log2(linesize) - 4
40
+ * forwards the operation. Before overwriting the accessors,
36
+ * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
41
+ * the original function is copied here, so that accesses that
42
+ * really do go to the EL1/EL0 version proceed normally.
43
+ * (The corresponding EL2 register is linked via opaque.)
44
+ */
37
+ */
45
+ CPReadFn *orig_readfn;
38
+ assert(assoc != 0);
46
+ CPWriteFn *orig_writefn;
39
+ assert(is_power_of_2(linesize));
47
};
40
+ assert(lg_linesize >= 4 && lg_linesize <= 7 + 4);
48
41
+
49
/* Macros which are lvalues for the field in CPUARMState for the
42
+ /* sets * associativity * linesize == cachesize. */
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
+ sets = cachesize / (assoc * linesize);
51
index XXXXXXX..XXXXXXX 100644
44
+ assert(cachesize % (assoc * linesize) == 0);
52
--- a/target/arm/helper.c
45
+
53
+++ b/target/arm/helper.c
46
+ return ((uint64_t)(sets - 1) << 32)
54
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
47
+ | ((assoc - 1) << 3)
55
REGINFO_SENTINEL
48
+ | (lg_linesize - 4);
56
};
57
58
+#ifndef CONFIG_USER_ONLY
59
+/* Test if system register redirection is to occur in the current state. */
60
+static bool redirect_for_e2h(CPUARMState *env)
61
+{
62
+ return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
63
+}
49
+}
64
+
50
+
65
+static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
51
static void aarch64_a35_initfn(Object *obj)
66
+{
67
+ CPReadFn *readfn;
68
+
69
+ if (redirect_for_e2h(env)) {
70
+ /* Switch to the saved EL2 version of the register. */
71
+ ri = ri->opaque;
72
+ readfn = ri->readfn;
73
+ } else {
74
+ readfn = ri->orig_readfn;
75
+ }
76
+ if (readfn == NULL) {
77
+ readfn = raw_read;
78
+ }
79
+ return readfn(env, ri);
80
+}
81
+
82
+static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
83
+ uint64_t value)
84
+{
85
+ CPWriteFn *writefn;
86
+
87
+ if (redirect_for_e2h(env)) {
88
+ /* Switch to the saved EL2 version of the register. */
89
+ ri = ri->opaque;
90
+ writefn = ri->writefn;
91
+ } else {
92
+ writefn = ri->orig_writefn;
93
+ }
94
+ if (writefn == NULL) {
95
+ writefn = raw_write;
96
+ }
97
+ writefn(env, ri, value);
98
+}
99
+
100
+static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
101
+{
102
+ struct E2HAlias {
103
+ uint32_t src_key, dst_key, new_key;
104
+ const char *src_name, *dst_name, *new_name;
105
+ bool (*feature)(const ARMISARegisters *id);
106
+ };
107
+
108
+#define K(op0, op1, crn, crm, op2) \
109
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
110
+
111
+ static const struct E2HAlias aliases[] = {
112
+ { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
113
+ "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
114
+ { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
115
+ "CPACR", "CPTR_EL2", "CPACR_EL12" },
116
+ { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
117
+ "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
118
+ { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
119
+ "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
120
+ { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
121
+ "TCR_EL1", "TCR_EL2", "TCR_EL12" },
122
+ { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
123
+ "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
124
+ { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
125
+ "ELR_EL1", "ELR_EL2", "ELR_EL12" },
126
+ { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
127
+ "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
128
+ { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
129
+ "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
130
+ { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
131
+ "ESR_EL1", "ESR_EL2", "ESR_EL12" },
132
+ { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
133
+ "FAR_EL1", "FAR_EL2", "FAR_EL12" },
134
+ { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
135
+ "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
136
+ { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
137
+ "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
138
+ { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
139
+ "VBAR", "VBAR_EL2", "VBAR_EL12" },
140
+ { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
141
+ "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
142
+ { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
143
+ "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
144
+
145
+ /*
146
+ * Note that redirection of ZCR is mentioned in the description
147
+ * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
148
+ * not in the summary table.
149
+ */
150
+ { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
151
+ "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
152
+
153
+ /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
154
+ /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
155
+ };
156
+#undef K
157
+
158
+ size_t i;
159
+
160
+ for (i = 0; i < ARRAY_SIZE(aliases); i++) {
161
+ const struct E2HAlias *a = &aliases[i];
162
+ ARMCPRegInfo *src_reg, *dst_reg;
163
+
164
+ if (a->feature && !a->feature(&cpu->isar)) {
165
+ continue;
166
+ }
167
+
168
+ src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
169
+ dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
170
+ g_assert(src_reg != NULL);
171
+ g_assert(dst_reg != NULL);
172
+
173
+ /* Cross-compare names to detect typos in the keys. */
174
+ g_assert(strcmp(src_reg->name, a->src_name) == 0);
175
+ g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
176
+
177
+ /* None of the core system registers use opaque; we will. */
178
+ g_assert(src_reg->opaque == NULL);
179
+
180
+ /* Create alias before redirection so we dup the right data. */
181
+ if (a->new_key) {
182
+ ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
183
+ uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
184
+ bool ok;
185
+
186
+ new_reg->name = a->new_name;
187
+ new_reg->type |= ARM_CP_ALIAS;
188
+ /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
189
+ new_reg->access &= PL2_RW | PL3_RW;
190
+
191
+ ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
192
+ g_assert(ok);
193
+ }
194
+
195
+ src_reg->opaque = dst_reg;
196
+ src_reg->orig_readfn = src_reg->readfn ?: raw_read;
197
+ src_reg->orig_writefn = src_reg->writefn ?: raw_write;
198
+ if (!src_reg->raw_readfn) {
199
+ src_reg->raw_readfn = raw_read;
200
+ }
201
+ if (!src_reg->raw_writefn) {
202
+ src_reg->raw_writefn = raw_write;
203
+ }
204
+ src_reg->readfn = el2_e2h_read;
205
+ src_reg->writefn = el2_e2h_write;
206
+ }
207
+}
208
+#endif
209
+
210
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
211
bool isread)
212
{
52
{
213
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
53
ARMCPU *cpu = ARM_CPU(obj);
214
: cpu_isar_feature(aa32_predinv, cpu)) {
54
@@ -XXX,XX +XXX,XX @@ static void aarch64_neoverse_v1_initfn(Object *obj)
215
define_arm_cp_regs(cpu, predinv_reginfo);
55
* The Neoverse-V1 r1p2 TRM lists 32-bit format CCSIDR_EL1 values,
216
}
56
* but also says it implements CCIDX, which means they should be
217
+
57
* 64-bit format. So we here use values which are based on the textual
218
+#ifndef CONFIG_USER_ONLY
58
- * information in chapter 2 of the TRM (and on the fact that
219
+ /*
59
- * sets * associativity * linesize == cachesize).
220
+ * Register redirections and aliases must be done last,
60
- *
221
+ * after the registers from the other extensions have been defined.
61
- * The 64-bit CCSIDR_EL1 format is:
222
+ */
62
- * [55:32] number of sets - 1
223
+ if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
63
- * [23:3] associativity - 1
224
+ define_arm_vh_e2h_redirects_aliases(cpu);
64
- * [2:0] log2(linesize) - 4
225
+ }
65
- * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
226
+#endif
66
- *
227
}
67
- * L1: 4-way set associative 64-byte line size, total size 64K,
228
68
- * so sets is 256.
229
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
69
+ * information in chapter 2 of the TRM:
70
*
71
+ * L1: 4-way set associative 64-byte line size, total size 64K.
72
* L2: 8-way set associative, 64 byte line size, either 512K or 1MB.
73
- * We pick 1MB, so this has 2048 sets.
74
- *
75
* L3: No L3 (this matches the CLIDR_EL1 value).
76
*/
77
- cpu->ccsidr[0] = 0x000000ff0000001aull; /* 64KB L1 dcache */
78
- cpu->ccsidr[1] = 0x000000ff0000001aull; /* 64KB L1 icache */
79
- cpu->ccsidr[2] = 0x000007ff0000003aull; /* 1MB L2 cache */
80
+ cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
81
+ cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
82
+ cpu->ccsidr[2] = make_ccsidr64(8, 64, 1 * MiB); /* L2 cache */
83
84
/* From 3.2.115 SCTLR_EL3 */
85
cpu->reset_sctlr = 0x30c50838;
230
--
86
--
231
2.20.1
87
2.34.1
232
233
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
No functional change, but unify code sequences.
3
Access to many of the special registers is enabled or disabled
4
by ACTLR_EL[23], which we implement as constant 0, which means
5
that all writes outside EL3 should trap.
4
6
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-8-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20230811214031.171020-7-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/helper.c | 86 +++++++++++++--------------------------------
12
target/arm/cpregs.h | 2 ++
13
1 file changed, 24 insertions(+), 62 deletions(-)
13
target/arm/helper.c | 4 ++--
14
target/arm/tcg/cpu64.c | 46 +++++++++++++++++++++++++++++++++---------
15
3 files changed, 41 insertions(+), 11 deletions(-)
14
16
17
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpregs.h
20
+++ b/target/arm/cpregs.h
21
@@ -XXX,XX +XXX,XX @@ static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
22
void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
23
#endif
24
25
+CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool);
26
+
27
#endif /* TARGET_ARM_CPREGS_H */
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
28
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
30
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
31
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
32
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
20
tlb_flush_by_mmuidx(cs, mask);
21
}
33
}
22
34
23
-static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
35
/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
24
- uint64_t value)
36
-static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
25
+static int alle1_tlbmask(CPUARMState *env)
37
- bool isread)
38
+CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
39
+ bool isread)
26
{
40
{
27
- /* Note that the 'ALL' scope must invalidate both stage 1 and
41
if (arm_current_el(env) == 1) {
28
+ /*
42
uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
29
+ * Note that the 'ALL' scope must invalidate both stage 1 and
43
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
30
* stage 2 translations, whereas most other scopes only invalidate
44
index XXXXXXX..XXXXXXX 100644
31
* stage 1 translations.
45
--- a/target/arm/tcg/cpu64.c
32
*/
46
+++ b/target/arm/tcg/cpu64.c
33
- ARMCPU *cpu = env_archcpu(env);
47
@@ -XXX,XX +XXX,XX @@ static void aarch64_a64fx_initfn(Object *obj)
34
- CPUState *cs = CPU(cpu);
48
/* TODO: Add A64FX specific HPC extension registers */
35
-
36
if (arm_is_secure_below_el3(env)) {
37
- tlb_flush_by_mmuidx(cs,
38
- ARMMMUIdxBit_S1SE1 |
39
- ARMMMUIdxBit_S1SE0);
40
+ return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
41
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
42
+ return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS;
43
} else {
44
- if (arm_feature(env, ARM_FEATURE_EL2)) {
45
- tlb_flush_by_mmuidx(cs,
46
- ARMMMUIdxBit_S12NSE1 |
47
- ARMMMUIdxBit_S12NSE0 |
48
- ARMMMUIdxBit_S2NS);
49
- } else {
50
- tlb_flush_by_mmuidx(cs,
51
- ARMMMUIdxBit_S12NSE1 |
52
- ARMMMUIdxBit_S12NSE0);
53
- }
54
+ return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
55
}
56
}
49
}
57
50
58
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
51
+static CPAccessResult access_actlr_w(CPUARMState *env, const ARMCPRegInfo *r,
59
+ uint64_t value)
52
+ bool read)
60
+{
53
+{
61
+ CPUState *cs = env_cpu(env);
54
+ if (!read) {
62
+ int mask = alle1_tlbmask(env);
55
+ int el = arm_current_el(env);
63
+
56
+
64
+ tlb_flush_by_mmuidx(cs, mask);
57
+ /* Because ACTLR_EL2 is constant 0, writes below EL2 trap to EL2. */
58
+ if (el < 2 && arm_is_el2_enabled(env)) {
59
+ return CP_ACCESS_TRAP_EL2;
60
+ }
61
+ /* Because ACTLR_EL3 is constant 0, writes below EL3 trap to EL3. */
62
+ if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
63
+ return CP_ACCESS_TRAP_EL3;
64
+ }
65
+ }
66
+ return CP_ACCESS_OK;
65
+}
67
+}
66
+
68
+
67
static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
69
static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
68
uint64_t value)
70
{ .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
69
{
71
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
70
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
72
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
71
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
73
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
72
uint64_t value)
74
+ /* Traps and enables are the same as for TCR_EL1. */
73
{
75
+ .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, },
74
- /* Note that the 'ALL' scope must invalidate both stage 1 and
76
{ .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
75
- * stage 2 translations, whereas most other scopes only invalidate
77
.opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
76
- * stage 1 translations.
78
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
77
- */
79
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
78
CPUState *cs = env_cpu(env);
80
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
79
- bool sec = arm_is_secure_below_el3(env);
81
{ .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
80
- bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
82
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
81
+ int mask = alle1_tlbmask(env);
83
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
82
84
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
83
- if (sec) {
85
+ .accessfn = access_actlr_w },
84
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
86
{ .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
85
- ARMMMUIdxBit_S1SE1 |
87
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
86
- ARMMMUIdxBit_S1SE0);
88
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
87
- } else if (has_el2) {
89
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
88
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
90
+ .accessfn = access_actlr_w },
89
- ARMMMUIdxBit_S12NSE1 |
91
{ .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
90
- ARMMMUIdxBit_S12NSE0 |
92
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
91
- ARMMMUIdxBit_S2NS);
93
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
92
- } else {
94
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
93
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
95
+ .accessfn = access_actlr_w },
94
- ARMMMUIdxBit_S12NSE1 |
96
/*
95
- ARMMMUIdxBit_S12NSE0);
97
* Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
96
- }
98
* (and in particular its system registers).
97
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
99
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
98
}
100
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
99
101
{ .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
100
static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
102
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
101
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
103
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 },
102
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
104
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010,
103
uint64_t value)
105
+ .accessfn = access_actlr_w },
104
{
106
{ .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
105
- ARMCPU *cpu = env_archcpu(env);
107
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
106
- CPUState *cs = CPU(cpu);
108
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
107
- bool sec = arm_is_secure_below_el3(env);
109
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
108
+ CPUState *cs = env_cpu(env);
110
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
109
+ int mask = vae1_tlbmask(env);
111
{ .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
110
uint64_t pageaddr = sextract64(value << 12, 0, 56);
112
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
111
113
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
112
- if (sec) {
114
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
113
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
115
+ .accessfn = access_actlr_w },
114
- ARMMMUIdxBit_S1SE1 |
116
{ .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64,
115
- ARMMMUIdxBit_S1SE0);
117
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2,
116
- } else {
118
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
117
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
119
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
118
- ARMMMUIdxBit_S12NSE1 |
120
+ .accessfn = access_actlr_w },
119
- ARMMMUIdxBit_S12NSE0);
121
{ .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64,
120
- }
122
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1,
121
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
123
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
122
}
124
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
123
125
+ .accessfn = access_actlr_w },
124
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
126
{ .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64,
125
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
127
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
126
* since we don't support flush-for-specific-ASID-only or
128
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
127
* flush-last-level-only.
129
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
128
*/
130
+ .accessfn = access_actlr_w },
129
- ARMCPU *cpu = env_archcpu(env);
131
};
130
- CPUState *cs = CPU(cpu);
132
131
+ CPUState *cs = env_cpu(env);
133
static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
132
+ int mask = vae1_tlbmask(env);
133
uint64_t pageaddr = sextract64(value << 12, 0, 56);
134
135
if (tlb_force_broadcast(env)) {
136
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
137
return;
138
}
139
140
- if (arm_is_secure_below_el3(env)) {
141
- tlb_flush_page_by_mmuidx(cs, pageaddr,
142
- ARMMMUIdxBit_S1SE1 |
143
- ARMMMUIdxBit_S1SE0);
144
- } else {
145
- tlb_flush_page_by_mmuidx(cs, pageaddr,
146
- ARMMMUIdxBit_S12NSE1 |
147
- ARMMMUIdxBit_S12NSE0);
148
- }
149
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
150
}
151
152
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
153
--
134
--
154
2.20.1
135
2.34.1
155
156
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
There is only one additional EL1 register modeled, which
4
also needs to use access_actlr_w.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230811214031.171020-8-richard.henderson@linaro.org
6
Message-id: 20200206105448.4726-38-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
10
---
9
target/arm/cpu64.c | 1 +
11
target/arm/tcg/cpu64.c | 3 ++-
10
1 file changed, 1 insertion(+)
12
1 file changed, 2 insertions(+), 1 deletion(-)
11
13
12
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
14
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu64.c
16
--- a/target/arm/tcg/cpu64.c
15
+++ b/target/arm/cpu64.c
17
+++ b/target/arm/tcg/cpu64.c
16
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
18
@@ -XXX,XX +XXX,XX @@ static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
17
t = cpu->isar.id_aa64mmfr1;
19
static const ARMCPRegInfo neoverse_v1_cp_reginfo[] = {
18
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
20
{ .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
19
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
21
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
20
+ t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
22
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
21
cpu->isar.id_aa64mmfr1 = t;
23
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
22
24
+ .accessfn = access_actlr_w },
23
/* Replicate the same data to the 32-bit id registers. */
25
{ .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
26
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
27
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
24
--
28
--
25
2.20.1
29
2.34.1
26
27
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The fall through organization of this function meant that we
3
Like FEAT_TRF (Self-hosted Trace Extension), suppress tracing
4
would raise an interrupt, then might overwrite that with another.
4
external to the cpu, which is out of scope for QEMU.
5
Since interrupt prioritization is IMPLEMENTATION DEFINED, we
6
can recognize these in any order we choose.
7
5
8
Unify the code to raise the interrupt in a block at the end.
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
10
Tested-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230811214031.171020-10-richard.henderson@linaro.org
13
Message-id: 20200206105448.4726-42-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
10
---
16
target/arm/cpu.c | 30 ++++++++++++------------------
11
target/arm/cpu.c | 3 +++
17
1 file changed, 12 insertions(+), 18 deletions(-)
12
1 file changed, 3 insertions(+)
18
13
19
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.c
16
--- a/target/arm/cpu.c
22
+++ b/target/arm/cpu.c
17
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
18
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
24
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
19
/* FEAT_SPE (Statistical Profiling Extension) */
25
uint32_t target_el;
20
cpu->isar.id_aa64dfr0 =
26
uint32_t excp_idx;
21
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
27
- bool ret = false;
22
+ /* FEAT_TRBE (Trace Buffer Extension) */
28
+
23
+ cpu->isar.id_aa64dfr0 =
29
+ /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
24
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
30
25
/* FEAT_TRF (Self-hosted Trace Extension) */
31
if (interrupt_request & CPU_INTERRUPT_FIQ) {
26
cpu->isar.id_aa64dfr0 =
32
excp_idx = EXCP_FIQ;
27
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
33
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
34
if (arm_excp_unmasked(cs, excp_idx, target_el,
35
cur_el, secure, hcr_el2)) {
36
- cs->exception_index = excp_idx;
37
- env->exception.target_el = target_el;
38
- cc->do_interrupt(cs);
39
- ret = true;
40
+ goto found;
41
}
42
}
43
if (interrupt_request & CPU_INTERRUPT_HARD) {
44
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
45
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
46
if (arm_excp_unmasked(cs, excp_idx, target_el,
47
cur_el, secure, hcr_el2)) {
48
- cs->exception_index = excp_idx;
49
- env->exception.target_el = target_el;
50
- cc->do_interrupt(cs);
51
- ret = true;
52
+ goto found;
53
}
54
}
55
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
56
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
57
target_el = 1;
58
if (arm_excp_unmasked(cs, excp_idx, target_el,
59
cur_el, secure, hcr_el2)) {
60
- cs->exception_index = excp_idx;
61
- env->exception.target_el = target_el;
62
- cc->do_interrupt(cs);
63
- ret = true;
64
+ goto found;
65
}
66
}
67
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
68
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
69
target_el = 1;
70
if (arm_excp_unmasked(cs, excp_idx, target_el,
71
cur_el, secure, hcr_el2)) {
72
- cs->exception_index = excp_idx;
73
- env->exception.target_el = target_el;
74
- cc->do_interrupt(cs);
75
- ret = true;
76
+ goto found;
77
}
78
}
79
+ return false;
80
81
- return ret;
82
+ found:
83
+ cs->exception_index = excp_idx;
84
+ env->exception.target_el = target_el;
85
+ cc->do_interrupt(cs);
86
+ return true;
87
}
88
89
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
90
--
28
--
91
2.20.1
29
2.34.1
92
93
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
When VHE is enabled, the exception level below EL2 is not EL1,
3
This feature allows the operating system to set TCR_ELx.HWU*
4
but EL0, and so to identify the entry vector offset for exceptions
4
to allow the implementation to use the PBHA bits from the
5
targeting EL2 we need to look at the width of EL0, not of EL1.
5
block and page descriptors for for IMPLEMENTATION DEFINED
6
purposes. Since QEMU has no need to use these bits, we may
7
simply ignore them.
6
8
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20230811214031.171020-11-richard.henderson@linaro.org
10
Message-id: 20200206105448.4726-37-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
target/arm/helper.c | 9 +++++++--
14
docs/system/arm/emulation.rst | 1 +
14
1 file changed, 7 insertions(+), 2 deletions(-)
15
target/arm/tcg/cpu32.c | 2 +-
16
target/arm/tcg/cpu64.c | 2 +-
17
3 files changed, 3 insertions(+), 2 deletions(-)
15
18
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
21
--- a/docs/system/arm/emulation.rst
19
+++ b/target/arm/helper.c
22
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
* immediately lower than the target level is using AArch32 or AArch64
24
- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state)
22
*/
25
- FEAT_HCX (Support for the HCRX_EL2 register)
23
bool is_aa64;
26
- FEAT_HPDS (Hierarchical permission disables)
24
+ uint64_t hcr;
27
+- FEAT_HPDS2 (Translation table page-based hardware attributes)
25
28
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
26
switch (new_el) {
29
- FEAT_IDST (ID space trap handling)
27
case 3:
30
- FEAT_IESB (Implicit error synchronization event)
28
is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
31
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
29
break;
32
index XXXXXXX..XXXXXXX 100644
30
case 2:
33
--- a/target/arm/tcg/cpu32.c
31
- is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
34
+++ b/target/arm/tcg/cpu32.c
32
- break;
35
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
33
+ hcr = arm_hcr_el2_eff(env);
36
cpu->isar.id_mmfr3 = t;
34
+ if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
37
35
+ is_aa64 = (hcr & HCR_RW) != 0;
38
t = cpu->isar.id_mmfr4;
36
+ break;
39
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */
37
+ }
40
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */
38
+ /* fall through */
41
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
39
case 1:
42
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
40
is_aa64 = is_a64(env);
43
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */
41
break;
44
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/tcg/cpu64.c
47
+++ b/target/arm/tcg/cpu64.c
48
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
49
t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
50
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
51
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
52
- t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
53
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 2); /* FEAT_HPDS2 */
54
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
55
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3); /* FEAT_PAN3 */
56
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
42
--
57
--
43
2.20.1
58
2.34.1
44
45
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
According to ARM ARM we should only trap from the EL1&0 regime.
3
This is a mandatory feature for Armv8.1 architectures but we don't
4
state the feature clearly in our emulation list. Also include
5
FEAT_CRC32 comment in aarch64_max_tcg_initfn for ease of grepping.
4
6
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20230824075406.1515566-1-alex.bennee@linaro.org
9
Message-id: 20200206105448.4726-35-richard.henderson@linaro.org
10
Cc: qemu-stable@nongnu.org
11
Message-Id: <20230222110104.3996971-1-alex.bennee@linaro.org>
12
[PMM: pluralize 'instructions' in docs]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
target/arm/pauth_helper.c | 5 ++++-
15
docs/system/arm/emulation.rst | 1 +
13
1 file changed, 4 insertions(+), 1 deletion(-)
16
target/arm/tcg/cpu64.c | 2 +-
17
2 files changed, 2 insertions(+), 1 deletion(-)
14
18
15
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/pauth_helper.c
21
--- a/docs/system/arm/emulation.rst
18
+++ b/target/arm/pauth_helper.c
22
+++ b/docs/system/arm/emulation.rst
19
@@ -XXX,XX +XXX,XX @@ static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
20
if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
24
- FEAT_BBM at level 2 (Translation table break-before-make levels)
21
uint64_t hcr = arm_hcr_el2_eff(env);
25
- FEAT_BF16 (AArch64 BFloat16 instructions)
22
bool trap = !(hcr & HCR_API);
26
- FEAT_BTI (Branch Target Identification)
23
- /* FIXME: ARMv8.1-VHE: trap only applies to EL1&0 regime. */
27
+- FEAT_CRC32 (CRC32 instructions)
24
+ if (el == 0) {
28
- FEAT_CSV2 (Cache speculation variant 2)
25
+ /* Trap only applies to EL1&0 regime. */
29
- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1)
26
+ trap &= (hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE);
30
- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
27
+ }
31
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
28
/* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */
32
index XXXXXXX..XXXXXXX 100644
29
if (trap) {
33
--- a/target/arm/tcg/cpu64.c
30
pauth_trap(env, 2, ra);
34
+++ b/target/arm/tcg/cpu64.c
35
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
36
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
37
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
38
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
39
- t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
40
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); /* FEAT_CRC32 */
41
t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */
42
t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */
43
t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */
31
--
44
--
32
2.20.1
45
2.34.1
33
46
34
47
diff view generated by jsdifflib
1
From: Pan Nengyuan <pannengyuan@huawei.com>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
There is a memory leak when we call 'device_list_properties' with typename = stellaris-gptm. It's easy to reproduce as follow:
3
i.MX7 IOMUX GPR device is not equivalent to i.MX6UL IOMUXC GPR device.
4
In particular, register 22 is not present on i.MX6UL and this is actualy
5
The only register that is really emulated in the i.MX7 IOMUX GPR device.
4
6
5
virsh qemu-monitor-command vm1 --pretty '{"execute": "device-list-properties", "arguments": {"typename": "stellaris-gptm"}}'
7
Note: The i.MX6UL code is actually also implementing the IOMUX GPR device
8
as an unimplemented device at the same bus adress and the 2 instantiations
9
were actualy colliding. So we go back to the unimplemented device for now.
6
10
7
This patch delay timer_new in realize to fix it.
11
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
8
12
Message-id: 48681bf51ee97646479bb261bee19abebbc8074e.1692964892.git.jcd@tribudubois.net
9
Reported-by: Euler Robot <euler.robot@huawei.com>
10
Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Message-id: 20200205070659.22488-4-pannengyuan@huawei.com
13
Cc: qemu-arm@nongnu.org
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
15
---
17
hw/arm/stellaris.c | 7 ++++++-
16
include/hw/arm/fsl-imx6ul.h | 2 --
18
1 file changed, 6 insertions(+), 1 deletion(-)
17
hw/arm/fsl-imx6ul.c | 11 -----------
18
2 files changed, 13 deletions(-)
19
19
20
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
20
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
21
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/stellaris.c
22
--- a/include/hw/arm/fsl-imx6ul.h
23
+++ b/hw/arm/stellaris.c
23
+++ b/include/hw/arm/fsl-imx6ul.h
24
@@ -XXX,XX +XXX,XX @@ static void stellaris_gptm_init(Object *obj)
24
@@ -XXX,XX +XXX,XX @@
25
sysbus_init_mmio(sbd, &s->iomem);
25
#include "hw/misc/imx6ul_ccm.h"
26
26
#include "hw/misc/imx6_src.h"
27
s->opaque[0] = s->opaque[1] = s;
27
#include "hw/misc/imx7_snvs.h"
28
+}
28
-#include "hw/misc/imx7_gpr.h"
29
+
29
#include "hw/intc/imx_gpcv2.h"
30
+static void stellaris_gptm_realize(DeviceState *dev, Error **errp)
30
#include "hw/watchdog/wdt_imx2.h"
31
+{
31
#include "hw/gpio/imx_gpio.h"
32
+ gptm_state *s = STELLARIS_GPTM(dev);
32
@@ -XXX,XX +XXX,XX @@ struct FslIMX6ULState {
33
s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]);
33
IMX6SRCState src;
34
s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]);
34
IMX7SNVSState snvs;
35
}
35
IMXGPCv2State gpcv2;
36
36
- IMX7GPRState gpr;
37
IMXSPIState spi[FSL_IMX6UL_NUM_ECSPIS];
38
IMXI2CState i2c[FSL_IMX6UL_NUM_I2CS];
39
IMXSerialState uart[FSL_IMX6UL_NUM_UARTS];
40
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/arm/fsl-imx6ul.c
43
+++ b/hw/arm/fsl-imx6ul.c
44
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
45
*/
46
object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS);
47
48
- /*
49
- * GPR
50
- */
51
- object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR);
37
-
52
-
38
/* System controller. */
53
/*
39
54
* GPIOs 1 to 5
40
typedef struct {
55
*/
41
@@ -XXX,XX +XXX,XX @@ static void stellaris_gptm_class_init(ObjectClass *klass, void *data)
56
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
42
DeviceClass *dc = DEVICE_CLASS(klass);
57
FSL_IMX6UL_WDOGn_IRQ[i]));
43
58
}
44
dc->vmsd = &vmstate_stellaris_gptm;
59
45
+ dc->realize = stellaris_gptm_realize;
60
- /*
46
}
61
- * GPR
47
62
- */
48
static const TypeInfo stellaris_gptm_info = {
63
- sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort);
64
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX6UL_IOMUXC_GPR_ADDR);
65
-
66
/*
67
* SDMA
68
*/
49
--
69
--
50
2.20.1
70
2.34.1
51
52
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
The virtual offset may be 0 depending on EL, E2H and TGE.
3
* Add Addr and size definition for most i.MX6UL devices in i.MX6UL header file.
4
* Use those newly defined named constants whenever possible.
5
* Standardize the way we init a familly of unimplemented devices
6
- SAI
7
- PWM
8
- CAN
9
* Add/rework few comments
4
10
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Message-id: d579043fbd4e4b490370783fda43fc02c8e9be75.1692964892.git.jcd@tribudubois.net
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20200206105448.4726-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
15
---
11
target/arm/helper.c | 40 +++++++++++++++++++++++++++++++++++++---
16
include/hw/arm/fsl-imx6ul.h | 156 +++++++++++++++++++++++++++++++-----
12
1 file changed, 37 insertions(+), 3 deletions(-)
17
hw/arm/fsl-imx6ul.c | 147 ++++++++++++++++++++++-----------
18
2 files changed, 232 insertions(+), 71 deletions(-)
13
19
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
22
--- a/include/hw/arm/fsl-imx6ul.h
17
+++ b/target/arm/helper.c
23
+++ b/include/hw/arm/fsl-imx6ul.h
18
@@ -XXX,XX +XXX,XX @@ static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
24
@@ -XXX,XX +XXX,XX @@
19
return gt_get_countervalue(env);
25
#include "exec/memory.h"
20
}
26
#include "cpu.h"
21
27
#include "qom/object.h"
22
+static uint64_t gt_virt_cnt_offset(CPUARMState *env)
28
+#include "qemu/units.h"
23
+{
29
24
+ uint64_t hcr;
30
#define TYPE_FSL_IMX6UL "fsl-imx6ul"
25
+
31
OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6ULState, FSL_IMX6UL)
26
+ switch (arm_current_el(env)) {
32
@@ -XXX,XX +XXX,XX @@ enum FslIMX6ULConfiguration {
27
+ case 2:
33
FSL_IMX6UL_NUM_ADCS = 2,
28
+ hcr = arm_hcr_el2_eff(env);
34
FSL_IMX6UL_NUM_USB_PHYS = 2,
29
+ if (hcr & HCR_E2H) {
35
FSL_IMX6UL_NUM_USBS = 2,
30
+ return 0;
36
+ FSL_IMX6UL_NUM_SAIS = 3,
31
+ }
37
+ FSL_IMX6UL_NUM_CANS = 2,
32
+ break;
38
+ FSL_IMX6UL_NUM_PWMS = 4,
33
+ case 0:
39
};
34
+ hcr = arm_hcr_el2_eff(env);
40
35
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
41
struct FslIMX6ULState {
36
+ return 0;
42
@@ -XXX,XX +XXX,XX @@ struct FslIMX6ULState {
37
+ }
43
38
+ break;
44
enum FslIMX6ULMemoryMap {
45
FSL_IMX6UL_MMDC_ADDR = 0x80000000,
46
- FSL_IMX6UL_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
47
+ FSL_IMX6UL_MMDC_SIZE = (2 * GiB),
48
49
FSL_IMX6UL_QSPI1_MEM_ADDR = 0x60000000,
50
- FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000,
51
- FSL_IMX6UL_EIM_CS_ADDR = 0x50000000,
52
- FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000,
53
- FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000,
54
+ FSL_IMX6UL_QSPI1_MEM_SIZE = (256 * MiB),
55
56
- /* AIPS-2 */
57
+ FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000,
58
+ FSL_IMX6UL_EIM_ALIAS_SIZE = (128 * MiB),
59
+
60
+ FSL_IMX6UL_EIM_CS_ADDR = 0x50000000,
61
+ FSL_IMX6UL_EIM_CS_SIZE = (128 * MiB),
62
+
63
+ FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000,
64
+ FSL_IMX6UL_AES_ENCRYPT_SIZE = (1 * MiB),
65
+
66
+ FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000,
67
+ FSL_IMX6UL_QSPI1_RX_SIZE = (32 * MiB),
68
+
69
+ /* AIPS-2 Begin */
70
FSL_IMX6UL_UART6_ADDR = 0x021FC000,
71
+
72
FSL_IMX6UL_I2C4_ADDR = 0x021F8000,
73
+
74
FSL_IMX6UL_UART5_ADDR = 0x021F4000,
75
FSL_IMX6UL_UART4_ADDR = 0x021F0000,
76
FSL_IMX6UL_UART3_ADDR = 0x021EC000,
77
FSL_IMX6UL_UART2_ADDR = 0x021E8000,
78
+
79
FSL_IMX6UL_WDOG3_ADDR = 0x021E4000,
80
+
81
FSL_IMX6UL_QSPI_ADDR = 0x021E0000,
82
+ FSL_IMX6UL_QSPI_SIZE = 0x500,
83
+
84
FSL_IMX6UL_SYS_CNT_CTRL_ADDR = 0x021DC000,
85
+ FSL_IMX6UL_SYS_CNT_CTRL_SIZE = (16 * KiB),
86
+
87
FSL_IMX6UL_SYS_CNT_CMP_ADDR = 0x021D8000,
88
+ FSL_IMX6UL_SYS_CNT_CMP_SIZE = (16 * KiB),
89
+
90
FSL_IMX6UL_SYS_CNT_RD_ADDR = 0x021D4000,
91
+ FSL_IMX6UL_SYS_CNT_RD_SIZE = (16 * KiB),
92
+
93
FSL_IMX6UL_TZASC_ADDR = 0x021D0000,
94
+ FSL_IMX6UL_TZASC_SIZE = (16 * KiB),
95
+
96
FSL_IMX6UL_PXP_ADDR = 0x021CC000,
97
+ FSL_IMX6UL_PXP_SIZE = (16 * KiB),
98
+
99
FSL_IMX6UL_LCDIF_ADDR = 0x021C8000,
100
+ FSL_IMX6UL_LCDIF_SIZE = 0x100,
101
+
102
FSL_IMX6UL_CSI_ADDR = 0x021C4000,
103
+ FSL_IMX6UL_CSI_SIZE = 0x100,
104
+
105
FSL_IMX6UL_CSU_ADDR = 0x021C0000,
106
+ FSL_IMX6UL_CSU_SIZE = (16 * KiB),
107
+
108
FSL_IMX6UL_OCOTP_CTRL_ADDR = 0x021BC000,
109
+ FSL_IMX6UL_OCOTP_CTRL_SIZE = (4 * KiB),
110
+
111
FSL_IMX6UL_EIM_ADDR = 0x021B8000,
112
+ FSL_IMX6UL_EIM_SIZE = 0x100,
113
+
114
FSL_IMX6UL_SIM2_ADDR = 0x021B4000,
115
+
116
FSL_IMX6UL_MMDC_CFG_ADDR = 0x021B0000,
117
+ FSL_IMX6UL_MMDC_CFG_SIZE = (4 * KiB),
118
+
119
FSL_IMX6UL_ROMCP_ADDR = 0x021AC000,
120
+ FSL_IMX6UL_ROMCP_SIZE = 0x300,
121
+
122
FSL_IMX6UL_I2C3_ADDR = 0x021A8000,
123
FSL_IMX6UL_I2C2_ADDR = 0x021A4000,
124
FSL_IMX6UL_I2C1_ADDR = 0x021A0000,
125
+
126
FSL_IMX6UL_ADC2_ADDR = 0x0219C000,
127
FSL_IMX6UL_ADC1_ADDR = 0x02198000,
128
+ FSL_IMX6UL_ADCn_SIZE = 0x100,
129
+
130
FSL_IMX6UL_USDHC2_ADDR = 0x02194000,
131
FSL_IMX6UL_USDHC1_ADDR = 0x02190000,
132
- FSL_IMX6UL_SIM1_ADDR = 0x0218C000,
133
- FSL_IMX6UL_ENET1_ADDR = 0x02188000,
134
- FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800,
135
- FSL_IMX6UL_USBO2_USB_ADDR = 0x02184000,
136
- FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000,
137
- FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000,
138
- FSL_IMX6UL_CAAM_ADDR = 0x02140000,
139
- FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000,
140
141
- /* AIPS-1 */
142
+ FSL_IMX6UL_SIM1_ADDR = 0x0218C000,
143
+ FSL_IMX6UL_SIMn_SIZE = (16 * KiB),
144
+
145
+ FSL_IMX6UL_ENET1_ADDR = 0x02188000,
146
+
147
+ FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800,
148
+ FSL_IMX6UL_USBO2_USB1_ADDR = 0x02184000,
149
+ FSL_IMX6UL_USBO2_USB2_ADDR = 0x02184200,
150
+
151
+ FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000,
152
+ FSL_IMX6UL_USBO2_PL301_SIZE = (16 * KiB),
153
+
154
+ FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000,
155
+ FSL_IMX6UL_AIPS2_CFG_SIZE = 0x100,
156
+
157
+ FSL_IMX6UL_CAAM_ADDR = 0x02140000,
158
+ FSL_IMX6UL_CAAM_SIZE = (16 * KiB),
159
+
160
+ FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000,
161
+ FSL_IMX6UL_A7MPCORE_DAP_SIZE = (4 * KiB),
162
+ /* AIPS-2 End */
163
+
164
+ /* AIPS-1 Begin */
165
FSL_IMX6UL_PWM8_ADDR = 0x020FC000,
166
FSL_IMX6UL_PWM7_ADDR = 0x020F8000,
167
FSL_IMX6UL_PWM6_ADDR = 0x020F4000,
168
FSL_IMX6UL_PWM5_ADDR = 0x020F0000,
169
+
170
FSL_IMX6UL_SDMA_ADDR = 0x020EC000,
171
+ FSL_IMX6UL_SDMA_SIZE = 0x300,
172
+
173
FSL_IMX6UL_GPT2_ADDR = 0x020E8000,
174
+
175
FSL_IMX6UL_IOMUXC_GPR_ADDR = 0x020E4000,
176
+ FSL_IMX6UL_IOMUXC_GPR_SIZE = 0x40,
177
+
178
FSL_IMX6UL_IOMUXC_ADDR = 0x020E0000,
179
+ FSL_IMX6UL_IOMUXC_SIZE = 0x700,
180
+
181
FSL_IMX6UL_GPC_ADDR = 0x020DC000,
182
+
183
FSL_IMX6UL_SRC_ADDR = 0x020D8000,
184
+
185
FSL_IMX6UL_EPIT2_ADDR = 0x020D4000,
186
FSL_IMX6UL_EPIT1_ADDR = 0x020D0000,
187
+
188
FSL_IMX6UL_SNVS_HP_ADDR = 0x020CC000,
189
+
190
FSL_IMX6UL_USBPHY2_ADDR = 0x020CA000,
191
- FSL_IMX6UL_USBPHY2_SIZE = (4 * 1024),
192
FSL_IMX6UL_USBPHY1_ADDR = 0x020C9000,
193
- FSL_IMX6UL_USBPHY1_SIZE = (4 * 1024),
194
+
195
FSL_IMX6UL_ANALOG_ADDR = 0x020C8000,
196
+ FSL_IMX6UL_ANALOG_SIZE = 0x300,
197
+
198
FSL_IMX6UL_CCM_ADDR = 0x020C4000,
199
+
200
FSL_IMX6UL_WDOG2_ADDR = 0x020C0000,
201
FSL_IMX6UL_WDOG1_ADDR = 0x020BC000,
202
+
203
FSL_IMX6UL_KPP_ADDR = 0x020B8000,
204
+ FSL_IMX6UL_KPP_SIZE = 0x10,
205
+
206
FSL_IMX6UL_ENET2_ADDR = 0x020B4000,
207
+
208
FSL_IMX6UL_SNVS_LP_ADDR = 0x020B0000,
209
+ FSL_IMX6UL_SNVS_LP_SIZE = (16 * KiB),
210
+
211
FSL_IMX6UL_GPIO5_ADDR = 0x020AC000,
212
FSL_IMX6UL_GPIO4_ADDR = 0x020A8000,
213
FSL_IMX6UL_GPIO3_ADDR = 0x020A4000,
214
FSL_IMX6UL_GPIO2_ADDR = 0x020A0000,
215
FSL_IMX6UL_GPIO1_ADDR = 0x0209C000,
216
+
217
FSL_IMX6UL_GPT1_ADDR = 0x02098000,
218
+
219
FSL_IMX6UL_CAN2_ADDR = 0x02094000,
220
FSL_IMX6UL_CAN1_ADDR = 0x02090000,
221
+ FSL_IMX6UL_CANn_SIZE = (4 * KiB),
222
+
223
FSL_IMX6UL_PWM4_ADDR = 0x0208C000,
224
FSL_IMX6UL_PWM3_ADDR = 0x02088000,
225
FSL_IMX6UL_PWM2_ADDR = 0x02084000,
226
FSL_IMX6UL_PWM1_ADDR = 0x02080000,
227
+ FSL_IMX6UL_PWMn_SIZE = 0x20,
228
+
229
FSL_IMX6UL_AIPS1_CFG_ADDR = 0x0207C000,
230
+ FSL_IMX6UL_AIPS1_CFG_SIZE = (16 * KiB),
231
+
232
FSL_IMX6UL_BEE_ADDR = 0x02044000,
233
+ FSL_IMX6UL_BEE_SIZE = (16 * KiB),
234
+
235
FSL_IMX6UL_TOUCH_CTRL_ADDR = 0x02040000,
236
+ FSL_IMX6UL_TOUCH_CTRL_SIZE = 0x100,
237
+
238
FSL_IMX6UL_SPBA_ADDR = 0x0203C000,
239
+ FSL_IMX6UL_SPBA_SIZE = 0x100,
240
+
241
FSL_IMX6UL_ASRC_ADDR = 0x02034000,
242
+ FSL_IMX6UL_ASRC_SIZE = 0x100,
243
+
244
FSL_IMX6UL_SAI3_ADDR = 0x02030000,
245
FSL_IMX6UL_SAI2_ADDR = 0x0202C000,
246
FSL_IMX6UL_SAI1_ADDR = 0x02028000,
247
+ FSL_IMX6UL_SAIn_SIZE = 0x200,
248
+
249
FSL_IMX6UL_UART8_ADDR = 0x02024000,
250
FSL_IMX6UL_UART1_ADDR = 0x02020000,
251
FSL_IMX6UL_UART7_ADDR = 0x02018000,
252
+
253
FSL_IMX6UL_ECSPI4_ADDR = 0x02014000,
254
FSL_IMX6UL_ECSPI3_ADDR = 0x02010000,
255
FSL_IMX6UL_ECSPI2_ADDR = 0x0200C000,
256
FSL_IMX6UL_ECSPI1_ADDR = 0x02008000,
257
+
258
FSL_IMX6UL_SPDIF_ADDR = 0x02004000,
259
+ FSL_IMX6UL_SPDIF_SIZE = 0x100,
260
+ /* AIPS-1 End */
261
+
262
+ FSL_IMX6UL_BCH_ADDR = 0x01808000,
263
+ FSL_IMX6UL_BCH_SIZE = 0x200,
264
+
265
+ FSL_IMX6UL_GPMI_ADDR = 0x01806000,
266
+ FSL_IMX6UL_GPMI_SIZE = 0x200,
267
268
FSL_IMX6UL_APBH_DMA_ADDR = 0x01804000,
269
- FSL_IMX6UL_APBH_DMA_SIZE = (32 * 1024),
270
+ FSL_IMX6UL_APBH_DMA_SIZE = (4 * KiB),
271
272
FSL_IMX6UL_A7MPCORE_ADDR = 0x00A00000,
273
274
FSL_IMX6UL_OCRAM_ALIAS_ADDR = 0x00920000,
275
- FSL_IMX6UL_OCRAM_ALIAS_SIZE = 0x00060000,
276
+ FSL_IMX6UL_OCRAM_ALIAS_SIZE = (384 * KiB),
277
+
278
FSL_IMX6UL_OCRAM_MEM_ADDR = 0x00900000,
279
- FSL_IMX6UL_OCRAM_MEM_SIZE = 0x00020000,
280
+ FSL_IMX6UL_OCRAM_MEM_SIZE = (128 * KiB),
281
+
282
FSL_IMX6UL_CAAM_MEM_ADDR = 0x00100000,
283
- FSL_IMX6UL_CAAM_MEM_SIZE = 0x00008000,
284
+ FSL_IMX6UL_CAAM_MEM_SIZE = (32 * KiB),
285
+
286
FSL_IMX6UL_ROM_ADDR = 0x00000000,
287
- FSL_IMX6UL_ROM_SIZE = 0x00018000,
288
+ FSL_IMX6UL_ROM_SIZE = (96 * KiB),
289
};
290
291
enum FslIMX6ULIRQs {
292
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
293
index XXXXXXX..XXXXXXX 100644
294
--- a/hw/arm/fsl-imx6ul.c
295
+++ b/hw/arm/fsl-imx6ul.c
296
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
297
object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS);
298
299
/*
300
- * GPIOs 1 to 5
301
+ * GPIOs
302
*/
303
for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) {
304
snprintf(name, NAME_SIZE, "gpio%d", i);
305
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
306
}
307
308
/*
309
- * GPT 1, 2
310
+ * GPTs
311
*/
312
for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) {
313
snprintf(name, NAME_SIZE, "gpt%d", i);
314
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
315
}
316
317
/*
318
- * EPIT 1, 2
319
+ * EPITs
320
*/
321
for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) {
322
snprintf(name, NAME_SIZE, "epit%d", i + 1);
323
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
324
}
325
326
/*
327
- * eCSPI
328
+ * eCSPIs
329
*/
330
for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) {
331
snprintf(name, NAME_SIZE, "spi%d", i + 1);
332
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
333
}
334
335
/*
336
- * I2C
337
+ * I2Cs
338
*/
339
for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) {
340
snprintf(name, NAME_SIZE, "i2c%d", i + 1);
341
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
342
}
343
344
/*
345
- * UART
346
+ * UARTs
347
*/
348
for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) {
349
snprintf(name, NAME_SIZE, "uart%d", i);
350
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
351
}
352
353
/*
354
- * Ethernet
355
+ * Ethernets
356
*/
357
for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) {
358
snprintf(name, NAME_SIZE, "eth%d", i);
359
object_initialize_child(obj, name, &s->eth[i], TYPE_IMX_ENET);
360
}
361
362
- /* USB */
363
+ /*
364
+ * USB PHYs
365
+ */
366
for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) {
367
snprintf(name, NAME_SIZE, "usbphy%d", i);
368
object_initialize_child(obj, name, &s->usbphy[i], TYPE_IMX_USBPHY);
369
}
370
+
371
+ /*
372
+ * USBs
373
+ */
374
for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) {
375
snprintf(name, NAME_SIZE, "usb%d", i);
376
object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA);
377
}
378
379
/*
380
- * SDHCI
381
+ * SDHCIs
382
*/
383
for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) {
384
snprintf(name, NAME_SIZE, "usdhc%d", i);
385
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
386
}
387
388
/*
389
- * Watchdog
390
+ * Watchdogs
391
*/
392
for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) {
393
snprintf(name, NAME_SIZE, "wdt%d", i);
394
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
395
* A7MPCORE DAP
396
*/
397
create_unimplemented_device("a7mpcore-dap", FSL_IMX6UL_A7MPCORE_DAP_ADDR,
398
- 0x100000);
399
+ FSL_IMX6UL_A7MPCORE_DAP_SIZE);
400
401
/*
402
- * GPT 1, 2
403
+ * GPTs
404
*/
405
for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) {
406
static const hwaddr FSL_IMX6UL_GPTn_ADDR[FSL_IMX6UL_NUM_GPTS] = {
407
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
408
}
409
410
/*
411
- * EPIT 1, 2
412
+ * EPITs
413
*/
414
for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) {
415
static const hwaddr FSL_IMX6UL_EPITn_ADDR[FSL_IMX6UL_NUM_EPITS] = {
416
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
417
}
418
419
/*
420
- * GPIO
421
+ * GPIOs
422
*/
423
for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) {
424
static const hwaddr FSL_IMX6UL_GPIOn_ADDR[FSL_IMX6UL_NUM_GPIOS] = {
425
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
426
}
427
428
/*
429
- * IOMUXC and IOMUXC_GPR
430
+ * IOMUXC
431
*/
432
- for (i = 0; i < 1; i++) {
433
- static const hwaddr FSL_IMX6UL_IOMUXCn_ADDR[FSL_IMX6UL_NUM_IOMUXCS] = {
434
- FSL_IMX6UL_IOMUXC_ADDR,
435
- FSL_IMX6UL_IOMUXC_GPR_ADDR,
436
- };
437
-
438
- snprintf(name, NAME_SIZE, "iomuxc%d", i);
439
- create_unimplemented_device(name, FSL_IMX6UL_IOMUXCn_ADDR[i], 0x4000);
440
- }
441
+ create_unimplemented_device("iomuxc", FSL_IMX6UL_IOMUXC_ADDR,
442
+ FSL_IMX6UL_IOMUXC_SIZE);
443
+ create_unimplemented_device("iomuxc_gpr", FSL_IMX6UL_IOMUXC_GPR_ADDR,
444
+ FSL_IMX6UL_IOMUXC_GPR_SIZE);
445
446
/*
447
* CCM
448
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
449
sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort);
450
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX6UL_GPC_ADDR);
451
452
- /* Initialize all ECSPI */
453
+ /*
454
+ * ECSPIs
455
+ */
456
for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) {
457
static const hwaddr FSL_IMX6UL_SPIn_ADDR[FSL_IMX6UL_NUM_ECSPIS] = {
458
FSL_IMX6UL_ECSPI1_ADDR,
459
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
460
}
461
462
/*
463
- * I2C
464
+ * I2Cs
465
*/
466
for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) {
467
static const hwaddr FSL_IMX6UL_I2Cn_ADDR[FSL_IMX6UL_NUM_I2CS] = {
468
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
469
}
470
471
/*
472
- * UART
473
+ * UARTs
474
*/
475
for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) {
476
static const hwaddr FSL_IMX6UL_UARTn_ADDR[FSL_IMX6UL_NUM_UARTS] = {
477
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
478
}
479
480
/*
481
- * Ethernet
482
+ * Ethernets
483
*
484
* We must use two loops since phy_connected affects the other interface
485
* and we have to set all properties before calling sysbus_realize().
486
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
487
FSL_IMX6UL_ENETn_TIMER_IRQ[i]));
488
}
489
490
- /* USB */
491
+ /*
492
+ * USB PHYs
493
+ */
494
for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) {
495
+ static const hwaddr
496
+ FSL_IMX6UL_USB_PHYn_ADDR[FSL_IMX6UL_NUM_USB_PHYS] = {
497
+ FSL_IMX6UL_USBPHY1_ADDR,
498
+ FSL_IMX6UL_USBPHY2_ADDR,
499
+ };
500
+
501
sysbus_realize(SYS_BUS_DEVICE(&s->usbphy[i]), &error_abort);
502
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0,
503
- FSL_IMX6UL_USBPHY1_ADDR + i * 0x1000);
504
+ FSL_IMX6UL_USB_PHYn_ADDR[i]);
505
}
506
507
+ /*
508
+ * USBs
509
+ */
510
for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) {
511
+ static const hwaddr FSL_IMX6UL_USB02_USBn_ADDR[FSL_IMX6UL_NUM_USBS] = {
512
+ FSL_IMX6UL_USBO2_USB1_ADDR,
513
+ FSL_IMX6UL_USBO2_USB2_ADDR,
514
+ };
515
+
516
static const int FSL_IMX6UL_USBn_IRQ[] = {
517
FSL_IMX6UL_USB1_IRQ,
518
FSL_IMX6UL_USB2_IRQ,
519
};
520
+
521
sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort);
522
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
523
- FSL_IMX6UL_USBO2_USB_ADDR + i * 0x200);
524
+ FSL_IMX6UL_USB02_USBn_ADDR[i]);
525
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0,
526
qdev_get_gpio_in(DEVICE(&s->a7mpcore),
527
FSL_IMX6UL_USBn_IRQ[i]));
528
}
529
530
/*
531
- * USDHC
532
+ * USDHCs
533
*/
534
for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) {
535
static const hwaddr FSL_IMX6UL_USDHCn_ADDR[FSL_IMX6UL_NUM_USDHCS] = {
536
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
537
sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6UL_SNVS_HP_ADDR);
538
539
/*
540
- * Watchdog
541
+ * Watchdogs
542
*/
543
for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) {
544
static const hwaddr FSL_IMX6UL_WDOGn_ADDR[FSL_IMX6UL_NUM_WDTS] = {
545
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
546
FSL_IMX6UL_WDOG2_ADDR,
547
FSL_IMX6UL_WDOG3_ADDR,
548
};
549
+
550
static const int FSL_IMX6UL_WDOGn_IRQ[FSL_IMX6UL_NUM_WDTS] = {
551
FSL_IMX6UL_WDOG1_IRQ,
552
FSL_IMX6UL_WDOG2_IRQ,
553
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
554
/*
555
* SDMA
556
*/
557
- create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000);
558
+ create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR,
559
+ FSL_IMX6UL_SDMA_SIZE);
560
561
/*
562
- * SAI (Audio SSI (Synchronous Serial Interface))
563
+ * SAIs (Audio SSI (Synchronous Serial Interface))
564
*/
565
- create_unimplemented_device("sai1", FSL_IMX6UL_SAI1_ADDR, 0x4000);
566
- create_unimplemented_device("sai2", FSL_IMX6UL_SAI2_ADDR, 0x4000);
567
- create_unimplemented_device("sai3", FSL_IMX6UL_SAI3_ADDR, 0x4000);
568
+ for (i = 0; i < FSL_IMX6UL_NUM_SAIS; i++) {
569
+ static const hwaddr FSL_IMX6UL_SAIn_ADDR[FSL_IMX6UL_NUM_SAIS] = {
570
+ FSL_IMX6UL_SAI1_ADDR,
571
+ FSL_IMX6UL_SAI2_ADDR,
572
+ FSL_IMX6UL_SAI3_ADDR,
573
+ };
574
+
575
+ snprintf(name, NAME_SIZE, "sai%d", i);
576
+ create_unimplemented_device(name, FSL_IMX6UL_SAIn_ADDR[i],
577
+ FSL_IMX6UL_SAIn_SIZE);
39
+ }
578
+ }
40
+
579
41
+ return env->cp15.cntvoff_el2;
580
/*
42
+}
581
- * PWM
43
+
582
+ * PWMs
44
static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
583
*/
45
{
584
- create_unimplemented_device("pwm1", FSL_IMX6UL_PWM1_ADDR, 0x4000);
46
- return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
585
- create_unimplemented_device("pwm2", FSL_IMX6UL_PWM2_ADDR, 0x4000);
47
+ return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
586
- create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000);
48
}
587
- create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000);
49
588
+ for (i = 0; i < FSL_IMX6UL_NUM_PWMS; i++) {
50
static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
589
+ static const hwaddr FSL_IMX6UL_PWMn_ADDR[FSL_IMX6UL_NUM_PWMS] = {
51
@@ -XXX,XX +XXX,XX @@ static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
590
+ FSL_IMX6UL_PWM1_ADDR,
52
static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
591
+ FSL_IMX6UL_PWM2_ADDR,
53
int timeridx)
592
+ FSL_IMX6UL_PWM3_ADDR,
54
{
593
+ FSL_IMX6UL_PWM4_ADDR,
55
- uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
594
+ };
56
+ uint64_t offset = 0;
595
+
57
+
596
+ snprintf(name, NAME_SIZE, "pwm%d", i);
58
+ switch (timeridx) {
597
+ create_unimplemented_device(name, FSL_IMX6UL_PWMn_ADDR[i],
59
+ case GTIMER_VIRT:
598
+ FSL_IMX6UL_PWMn_SIZE);
60
+ offset = gt_virt_cnt_offset(env);
61
+ break;
62
+ }
599
+ }
63
600
64
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
601
/*
65
(gt_get_countervalue(env) - offset));
602
* Audio ASRC (asynchronous sample rate converter)
66
@@ -XXX,XX +XXX,XX @@ static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
603
*/
67
int timeridx,
604
- create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, 0x4000);
68
uint64_t value)
605
+ create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR,
69
{
606
+ FSL_IMX6UL_ASRC_SIZE);
70
- uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
607
71
+ uint64_t offset = 0;
608
/*
72
+
609
- * CAN
73
+ switch (timeridx) {
610
+ * CANs
74
+ case GTIMER_VIRT:
611
*/
75
+ offset = gt_virt_cnt_offset(env);
612
- create_unimplemented_device("can1", FSL_IMX6UL_CAN1_ADDR, 0x4000);
76
+ break;
613
- create_unimplemented_device("can2", FSL_IMX6UL_CAN2_ADDR, 0x4000);
614
+ for (i = 0; i < FSL_IMX6UL_NUM_CANS; i++) {
615
+ static const hwaddr FSL_IMX6UL_CANn_ADDR[FSL_IMX6UL_NUM_CANS] = {
616
+ FSL_IMX6UL_CAN1_ADDR,
617
+ FSL_IMX6UL_CAN2_ADDR,
618
+ };
619
+
620
+ snprintf(name, NAME_SIZE, "can%d", i);
621
+ create_unimplemented_device(name, FSL_IMX6UL_CANn_ADDR[i],
622
+ FSL_IMX6UL_CANn_SIZE);
77
+ }
623
+ }
78
624
79
trace_arm_gt_tval_write(timeridx, value);
625
/*
80
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
626
* APHB_DMA
627
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
628
};
629
630
snprintf(name, NAME_SIZE, "adc%d", i);
631
- create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i], 0x4000);
632
+ create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i],
633
+ FSL_IMX6UL_ADCn_SIZE);
634
}
635
636
/*
637
* LCD
638
*/
639
- create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, 0x4000);
640
+ create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR,
641
+ FSL_IMX6UL_LCDIF_SIZE);
642
643
/*
644
* ROM memory
81
--
645
--
82
2.20.1
646
2.34.1
83
84
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
The TGE bit routes all asynchronous exceptions to EL2.
3
* Add TZASC as unimplemented device.
4
- Allow bare metal application to access this (unimplemented) device
5
* Add CSU as unimplemented device.
6
- Allow bare metal application to access this (unimplemented) device
7
* Add 4 missing PWM devices
4
8
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 59e4dc56e14eccfefd379275ec19048dff9c10b3.1692964892.git.jcd@tribudubois.net
8
Message-id: 20200206105448.4726-33-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
target/arm/helper.c | 6 ++++++
14
include/hw/arm/fsl-imx6ul.h | 2 +-
12
1 file changed, 6 insertions(+)
15
hw/arm/fsl-imx6ul.c | 16 ++++++++++++++++
16
2 files changed, 17 insertions(+), 1 deletion(-)
13
17
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
20
--- a/include/hw/arm/fsl-imx6ul.h
17
+++ b/target/arm/helper.c
21
+++ b/include/hw/arm/fsl-imx6ul.h
18
@@ -XXX,XX +XXX,XX @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
22
@@ -XXX,XX +XXX,XX @@ enum FslIMX6ULConfiguration {
19
break;
23
FSL_IMX6UL_NUM_USBS = 2,
20
};
24
FSL_IMX6UL_NUM_SAIS = 3,
25
FSL_IMX6UL_NUM_CANS = 2,
26
- FSL_IMX6UL_NUM_PWMS = 4,
27
+ FSL_IMX6UL_NUM_PWMS = 8,
28
};
29
30
struct FslIMX6ULState {
31
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/arm/fsl-imx6ul.c
34
+++ b/hw/arm/fsl-imx6ul.c
35
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
36
FSL_IMX6UL_PWM2_ADDR,
37
FSL_IMX6UL_PWM3_ADDR,
38
FSL_IMX6UL_PWM4_ADDR,
39
+ FSL_IMX6UL_PWM5_ADDR,
40
+ FSL_IMX6UL_PWM6_ADDR,
41
+ FSL_IMX6UL_PWM7_ADDR,
42
+ FSL_IMX6UL_PWM8_ADDR,
43
};
44
45
snprintf(name, NAME_SIZE, "pwm%d", i);
46
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
47
create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR,
48
FSL_IMX6UL_LCDIF_SIZE);
21
49
22
+ /*
50
+ /*
23
+ * For these purposes, TGE and AMO/IMO/FMO both force the
51
+ * CSU
24
+ * interrupt to EL2. Fold TGE into the bit extracted above.
25
+ */
52
+ */
26
+ hcr |= (hcr_el2 & HCR_TGE) != 0;
53
+ create_unimplemented_device("csu", FSL_IMX6UL_CSU_ADDR,
54
+ FSL_IMX6UL_CSU_SIZE);
27
+
55
+
28
/* Perform a table-lookup for the target EL given the current state */
56
+ /*
29
target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
57
+ * TZASC
30
58
+ */
59
+ create_unimplemented_device("tzasc", FSL_IMX6UL_TZASC_ADDR,
60
+ FSL_IMX6UL_TZASC_SIZE);
61
+
62
/*
63
* ROM memory
64
*/
31
--
65
--
32
2.20.1
66
2.34.1
33
67
34
68
diff view generated by jsdifflib
1
From: Pan Nengyuan <pannengyuan@huawei.com>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
There is a memory leak when we call 'device_list_properties' with typename = stm32f2xx_timer. It's easy to reproduce as follow:
3
* Add Addr and size definition for all i.MX7 devices in i.MX7 header file.
4
* Use those newly defined named constants whenever possible.
5
* Standardize the way we init a familly of unimplemented devices
6
- SAI
7
- PWM
8
- CAN
9
* Add/rework few comments
4
10
5
virsh qemu-monitor-command vm1 --pretty '{"execute": "device-list-properties", "arguments": {"typename": "stm32f2xx_timer"}}'
11
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
6
12
Message-id: 59e195d33e4d486a8d131392acd46633c8c10ed7.1692964892.git.jcd@tribudubois.net
7
This patch delay timer_new to fix this memleaks.
8
9
Reported-by: Euler Robot <euler.robot@huawei.com>
10
Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-id: 20200205070659.22488-3-pannengyuan@huawei.com
14
Cc: Alistair Francis <alistair@alistair23.me>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
15
---
18
hw/timer/stm32f2xx_timer.c | 5 +++++
16
include/hw/arm/fsl-imx7.h | 330 ++++++++++++++++++++++++++++----------
19
1 file changed, 5 insertions(+)
17
hw/arm/fsl-imx7.c | 130 ++++++++++-----
18
2 files changed, 335 insertions(+), 125 deletions(-)
20
19
21
diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c
20
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
22
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/timer/stm32f2xx_timer.c
22
--- a/include/hw/arm/fsl-imx7.h
24
+++ b/hw/timer/stm32f2xx_timer.c
23
+++ b/include/hw/arm/fsl-imx7.h
25
@@ -XXX,XX +XXX,XX @@ static void stm32f2xx_timer_init(Object *obj)
24
@@ -XXX,XX +XXX,XX @@
26
memory_region_init_io(&s->iomem, obj, &stm32f2xx_timer_ops, s,
25
#include "hw/misc/imx7_ccm.h"
27
"stm32f2xx_timer", 0x400);
26
#include "hw/misc/imx7_snvs.h"
28
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
27
#include "hw/misc/imx7_gpr.h"
29
+}
28
-#include "hw/misc/imx6_src.h"
30
29
#include "hw/watchdog/wdt_imx2.h"
31
+static void stm32f2xx_timer_realize(DeviceState *dev, Error **errp)
30
#include "hw/gpio/imx_gpio.h"
32
+{
31
#include "hw/char/imx_serial.h"
33
+ STM32F2XXTimerState *s = STM32F2XXTIMER(dev);
32
@@ -XXX,XX +XXX,XX @@
34
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s);
33
#include "hw/usb/chipidea.h"
34
#include "cpu.h"
35
#include "qom/object.h"
36
+#include "qemu/units.h"
37
38
#define TYPE_FSL_IMX7 "fsl-imx7"
39
OBJECT_DECLARE_SIMPLE_TYPE(FslIMX7State, FSL_IMX7)
40
@@ -XXX,XX +XXX,XX @@ enum FslIMX7Configuration {
41
FSL_IMX7_NUM_ECSPIS = 4,
42
FSL_IMX7_NUM_USBS = 3,
43
FSL_IMX7_NUM_ADCS = 2,
44
+ FSL_IMX7_NUM_SAIS = 3,
45
+ FSL_IMX7_NUM_CANS = 2,
46
+ FSL_IMX7_NUM_PWMS = 4,
47
};
48
49
struct FslIMX7State {
50
@@ -XXX,XX +XXX,XX @@ struct FslIMX7State {
51
52
enum FslIMX7MemoryMap {
53
FSL_IMX7_MMDC_ADDR = 0x80000000,
54
- FSL_IMX7_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
55
+ FSL_IMX7_MMDC_SIZE = (2 * GiB),
56
57
- FSL_IMX7_GPIO1_ADDR = 0x30200000,
58
- FSL_IMX7_GPIO2_ADDR = 0x30210000,
59
- FSL_IMX7_GPIO3_ADDR = 0x30220000,
60
- FSL_IMX7_GPIO4_ADDR = 0x30230000,
61
- FSL_IMX7_GPIO5_ADDR = 0x30240000,
62
- FSL_IMX7_GPIO6_ADDR = 0x30250000,
63
- FSL_IMX7_GPIO7_ADDR = 0x30260000,
64
+ FSL_IMX7_QSPI1_MEM_ADDR = 0x60000000,
65
+ FSL_IMX7_QSPI1_MEM_SIZE = (256 * MiB),
66
67
- FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
68
+ FSL_IMX7_PCIE1_MEM_ADDR = 0x40000000,
69
+ FSL_IMX7_PCIE1_MEM_SIZE = (256 * MiB),
70
71
- FSL_IMX7_WDOG1_ADDR = 0x30280000,
72
- FSL_IMX7_WDOG2_ADDR = 0x30290000,
73
- FSL_IMX7_WDOG3_ADDR = 0x302A0000,
74
- FSL_IMX7_WDOG4_ADDR = 0x302B0000,
75
+ FSL_IMX7_QSPI1_RX_BUF_ADDR = 0x34000000,
76
+ FSL_IMX7_QSPI1_RX_BUF_SIZE = (32 * MiB),
77
78
- FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
79
+ /* PCIe Peripherals */
80
+ FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
81
82
- FSL_IMX7_GPT1_ADDR = 0x302D0000,
83
- FSL_IMX7_GPT2_ADDR = 0x302E0000,
84
- FSL_IMX7_GPT3_ADDR = 0x302F0000,
85
- FSL_IMX7_GPT4_ADDR = 0x30300000,
86
+ /* MMAP Peripherals */
87
+ FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
88
+ FSL_IMX7_DMA_APBH_SIZE = 0x8000,
89
90
- FSL_IMX7_IOMUXC_ADDR = 0x30330000,
91
- FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
92
- FSL_IMX7_IOMUXCn_SIZE = 0x1000,
93
+ /* GPV configuration */
94
+ FSL_IMX7_GPV6_ADDR = 0x32600000,
95
+ FSL_IMX7_GPV5_ADDR = 0x32500000,
96
+ FSL_IMX7_GPV4_ADDR = 0x32400000,
97
+ FSL_IMX7_GPV3_ADDR = 0x32300000,
98
+ FSL_IMX7_GPV2_ADDR = 0x32200000,
99
+ FSL_IMX7_GPV1_ADDR = 0x32100000,
100
+ FSL_IMX7_GPV0_ADDR = 0x32000000,
101
+ FSL_IMX7_GPVn_SIZE = (1 * MiB),
102
103
- FSL_IMX7_OCOTP_ADDR = 0x30350000,
104
- FSL_IMX7_OCOTP_SIZE = 0x10000,
105
+ /* Arm Peripherals */
106
+ FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
107
108
- FSL_IMX7_ANALOG_ADDR = 0x30360000,
109
- FSL_IMX7_SNVS_ADDR = 0x30370000,
110
- FSL_IMX7_CCM_ADDR = 0x30380000,
111
+ /* AIPS-3 Begin */
112
113
- FSL_IMX7_SRC_ADDR = 0x30390000,
114
- FSL_IMX7_SRC_SIZE = 0x1000,
115
+ FSL_IMX7_ENET2_ADDR = 0x30BF0000,
116
+ FSL_IMX7_ENET1_ADDR = 0x30BE0000,
117
118
- FSL_IMX7_ADC1_ADDR = 0x30610000,
119
- FSL_IMX7_ADC2_ADDR = 0x30620000,
120
- FSL_IMX7_ADCn_SIZE = 0x1000,
121
+ FSL_IMX7_SDMA_ADDR = 0x30BD0000,
122
+ FSL_IMX7_SDMA_SIZE = (4 * KiB),
123
124
- FSL_IMX7_PWM1_ADDR = 0x30660000,
125
- FSL_IMX7_PWM2_ADDR = 0x30670000,
126
- FSL_IMX7_PWM3_ADDR = 0x30680000,
127
- FSL_IMX7_PWM4_ADDR = 0x30690000,
128
- FSL_IMX7_PWMn_SIZE = 0x10000,
129
+ FSL_IMX7_EIM_ADDR = 0x30BC0000,
130
+ FSL_IMX7_EIM_SIZE = (4 * KiB),
131
132
- FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
133
- FSL_IMX7_PCIE_PHY_SIZE = 0x10000,
134
+ FSL_IMX7_QSPI_ADDR = 0x30BB0000,
135
+ FSL_IMX7_QSPI_SIZE = 0x8000,
136
137
- FSL_IMX7_GPC_ADDR = 0x303A0000,
138
+ FSL_IMX7_SIM2_ADDR = 0x30BA0000,
139
+ FSL_IMX7_SIM1_ADDR = 0x30B90000,
140
+ FSL_IMX7_SIMn_SIZE = (4 * KiB),
141
+
142
+ FSL_IMX7_USDHC3_ADDR = 0x30B60000,
143
+ FSL_IMX7_USDHC2_ADDR = 0x30B50000,
144
+ FSL_IMX7_USDHC1_ADDR = 0x30B40000,
145
+
146
+ FSL_IMX7_USB3_ADDR = 0x30B30000,
147
+ FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
148
+ FSL_IMX7_USB2_ADDR = 0x30B20000,
149
+ FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
150
+ FSL_IMX7_USB1_ADDR = 0x30B10000,
151
+ FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
152
+ FSL_IMX7_USBMISCn_SIZE = 0x200,
153
+
154
+ FSL_IMX7_USB_PL301_ADDR = 0x30AD0000,
155
+ FSL_IMX7_USB_PL301_SIZE = (64 * KiB),
156
+
157
+ FSL_IMX7_SEMAPHORE_HS_ADDR = 0x30AC0000,
158
+ FSL_IMX7_SEMAPHORE_HS_SIZE = (64 * KiB),
159
+
160
+ FSL_IMX7_MUB_ADDR = 0x30AB0000,
161
+ FSL_IMX7_MUA_ADDR = 0x30AA0000,
162
+ FSL_IMX7_MUn_SIZE = (KiB),
163
+
164
+ FSL_IMX7_UART7_ADDR = 0x30A90000,
165
+ FSL_IMX7_UART6_ADDR = 0x30A80000,
166
+ FSL_IMX7_UART5_ADDR = 0x30A70000,
167
+ FSL_IMX7_UART4_ADDR = 0x30A60000,
168
+
169
+ FSL_IMX7_I2C4_ADDR = 0x30A50000,
170
+ FSL_IMX7_I2C3_ADDR = 0x30A40000,
171
+ FSL_IMX7_I2C2_ADDR = 0x30A30000,
172
+ FSL_IMX7_I2C1_ADDR = 0x30A20000,
173
+
174
+ FSL_IMX7_CAN2_ADDR = 0x30A10000,
175
+ FSL_IMX7_CAN1_ADDR = 0x30A00000,
176
+ FSL_IMX7_CANn_SIZE = (4 * KiB),
177
+
178
+ FSL_IMX7_AIPS3_CONF_ADDR = 0x309F0000,
179
+ FSL_IMX7_AIPS3_CONF_SIZE = (64 * KiB),
180
181
FSL_IMX7_CAAM_ADDR = 0x30900000,
182
- FSL_IMX7_CAAM_SIZE = 0x40000,
183
+ FSL_IMX7_CAAM_SIZE = (256 * KiB),
184
185
- FSL_IMX7_CAN1_ADDR = 0x30A00000,
186
- FSL_IMX7_CAN2_ADDR = 0x30A10000,
187
- FSL_IMX7_CANn_SIZE = 0x10000,
188
+ FSL_IMX7_SPBA_ADDR = 0x308F0000,
189
+ FSL_IMX7_SPBA_SIZE = (4 * KiB),
190
191
- FSL_IMX7_I2C1_ADDR = 0x30A20000,
192
- FSL_IMX7_I2C2_ADDR = 0x30A30000,
193
- FSL_IMX7_I2C3_ADDR = 0x30A40000,
194
- FSL_IMX7_I2C4_ADDR = 0x30A50000,
195
+ FSL_IMX7_SAI3_ADDR = 0x308C0000,
196
+ FSL_IMX7_SAI2_ADDR = 0x308B0000,
197
+ FSL_IMX7_SAI1_ADDR = 0x308A0000,
198
+ FSL_IMX7_SAIn_SIZE = (4 * KiB),
199
200
- FSL_IMX7_ECSPI1_ADDR = 0x30820000,
201
- FSL_IMX7_ECSPI2_ADDR = 0x30830000,
202
- FSL_IMX7_ECSPI3_ADDR = 0x30840000,
203
- FSL_IMX7_ECSPI4_ADDR = 0x30630000,
204
-
205
- FSL_IMX7_LCDIF_ADDR = 0x30730000,
206
- FSL_IMX7_LCDIF_SIZE = 0x1000,
207
-
208
- FSL_IMX7_UART1_ADDR = 0x30860000,
209
+ FSL_IMX7_UART3_ADDR = 0x30880000,
210
/*
211
* Some versions of the reference manual claim that UART2 is @
212
* 0x30870000, but experiments with HW + DT files in upstream
213
@@ -XXX,XX +XXX,XX @@ enum FslIMX7MemoryMap {
214
* actually located @ 0x30890000
215
*/
216
FSL_IMX7_UART2_ADDR = 0x30890000,
217
- FSL_IMX7_UART3_ADDR = 0x30880000,
218
- FSL_IMX7_UART4_ADDR = 0x30A60000,
219
- FSL_IMX7_UART5_ADDR = 0x30A70000,
220
- FSL_IMX7_UART6_ADDR = 0x30A80000,
221
- FSL_IMX7_UART7_ADDR = 0x30A90000,
222
+ FSL_IMX7_UART1_ADDR = 0x30860000,
223
224
- FSL_IMX7_SAI1_ADDR = 0x308A0000,
225
- FSL_IMX7_SAI2_ADDR = 0x308B0000,
226
- FSL_IMX7_SAI3_ADDR = 0x308C0000,
227
- FSL_IMX7_SAIn_SIZE = 0x10000,
228
+ FSL_IMX7_ECSPI3_ADDR = 0x30840000,
229
+ FSL_IMX7_ECSPI2_ADDR = 0x30830000,
230
+ FSL_IMX7_ECSPI1_ADDR = 0x30820000,
231
+ FSL_IMX7_ECSPIn_SIZE = (4 * KiB),
232
233
- FSL_IMX7_ENET1_ADDR = 0x30BE0000,
234
- FSL_IMX7_ENET2_ADDR = 0x30BF0000,
235
+ /* AIPS-3 End */
236
237
- FSL_IMX7_USB1_ADDR = 0x30B10000,
238
- FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
239
- FSL_IMX7_USB2_ADDR = 0x30B20000,
240
- FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
241
- FSL_IMX7_USB3_ADDR = 0x30B30000,
242
- FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
243
- FSL_IMX7_USBMISCn_SIZE = 0x200,
244
+ /* AIPS-2 Begin */
245
246
- FSL_IMX7_USDHC1_ADDR = 0x30B40000,
247
- FSL_IMX7_USDHC2_ADDR = 0x30B50000,
248
- FSL_IMX7_USDHC3_ADDR = 0x30B60000,
249
+ FSL_IMX7_AXI_DEBUG_MON_ADDR = 0x307E0000,
250
+ FSL_IMX7_AXI_DEBUG_MON_SIZE = (64 * KiB),
251
252
- FSL_IMX7_SDMA_ADDR = 0x30BD0000,
253
- FSL_IMX7_SDMA_SIZE = 0x1000,
254
+ FSL_IMX7_PERFMON2_ADDR = 0x307D0000,
255
+ FSL_IMX7_PERFMON1_ADDR = 0x307C0000,
256
+ FSL_IMX7_PERFMONn_SIZE = (64 * KiB),
257
+
258
+ FSL_IMX7_DDRC_ADDR = 0x307A0000,
259
+ FSL_IMX7_DDRC_SIZE = (4 * KiB),
260
+
261
+ FSL_IMX7_DDRC_PHY_ADDR = 0x30790000,
262
+ FSL_IMX7_DDRC_PHY_SIZE = (4 * KiB),
263
+
264
+ FSL_IMX7_TZASC_ADDR = 0x30780000,
265
+ FSL_IMX7_TZASC_SIZE = (64 * KiB),
266
+
267
+ FSL_IMX7_MIPI_DSI_ADDR = 0x30760000,
268
+ FSL_IMX7_MIPI_DSI_SIZE = (4 * KiB),
269
+
270
+ FSL_IMX7_MIPI_CSI_ADDR = 0x30750000,
271
+ FSL_IMX7_MIPI_CSI_SIZE = 0x4000,
272
+
273
+ FSL_IMX7_LCDIF_ADDR = 0x30730000,
274
+ FSL_IMX7_LCDIF_SIZE = 0x8000,
275
+
276
+ FSL_IMX7_CSI_ADDR = 0x30710000,
277
+ FSL_IMX7_CSI_SIZE = (4 * KiB),
278
+
279
+ FSL_IMX7_PXP_ADDR = 0x30700000,
280
+ FSL_IMX7_PXP_SIZE = 0x4000,
281
+
282
+ FSL_IMX7_EPDC_ADDR = 0x306F0000,
283
+ FSL_IMX7_EPDC_SIZE = (4 * KiB),
284
+
285
+ FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
286
+ FSL_IMX7_PCIE_PHY_SIZE = (4 * KiB),
287
+
288
+ FSL_IMX7_SYSCNT_CTRL_ADDR = 0x306C0000,
289
+ FSL_IMX7_SYSCNT_CMP_ADDR = 0x306B0000,
290
+ FSL_IMX7_SYSCNT_RD_ADDR = 0x306A0000,
291
+
292
+ FSL_IMX7_PWM4_ADDR = 0x30690000,
293
+ FSL_IMX7_PWM3_ADDR = 0x30680000,
294
+ FSL_IMX7_PWM2_ADDR = 0x30670000,
295
+ FSL_IMX7_PWM1_ADDR = 0x30660000,
296
+ FSL_IMX7_PWMn_SIZE = (4 * KiB),
297
+
298
+ FSL_IMX7_FlEXTIMER2_ADDR = 0x30650000,
299
+ FSL_IMX7_FlEXTIMER1_ADDR = 0x30640000,
300
+ FSL_IMX7_FLEXTIMERn_SIZE = (4 * KiB),
301
+
302
+ FSL_IMX7_ECSPI4_ADDR = 0x30630000,
303
+
304
+ FSL_IMX7_ADC2_ADDR = 0x30620000,
305
+ FSL_IMX7_ADC1_ADDR = 0x30610000,
306
+ FSL_IMX7_ADCn_SIZE = (4 * KiB),
307
+
308
+ FSL_IMX7_AIPS2_CONF_ADDR = 0x305F0000,
309
+ FSL_IMX7_AIPS2_CONF_SIZE = (64 * KiB),
310
+
311
+ /* AIPS-2 End */
312
+
313
+ /* AIPS-1 Begin */
314
+
315
+ FSL_IMX7_CSU_ADDR = 0x303E0000,
316
+ FSL_IMX7_CSU_SIZE = (64 * KiB),
317
+
318
+ FSL_IMX7_RDC_ADDR = 0x303D0000,
319
+ FSL_IMX7_RDC_SIZE = (4 * KiB),
320
+
321
+ FSL_IMX7_SEMAPHORE2_ADDR = 0x303C0000,
322
+ FSL_IMX7_SEMAPHORE1_ADDR = 0x303B0000,
323
+ FSL_IMX7_SEMAPHOREn_SIZE = (4 * KiB),
324
+
325
+ FSL_IMX7_GPC_ADDR = 0x303A0000,
326
+
327
+ FSL_IMX7_SRC_ADDR = 0x30390000,
328
+ FSL_IMX7_SRC_SIZE = (4 * KiB),
329
+
330
+ FSL_IMX7_CCM_ADDR = 0x30380000,
331
+
332
+ FSL_IMX7_SNVS_HP_ADDR = 0x30370000,
333
+
334
+ FSL_IMX7_ANALOG_ADDR = 0x30360000,
335
+
336
+ FSL_IMX7_OCOTP_ADDR = 0x30350000,
337
+ FSL_IMX7_OCOTP_SIZE = 0x10000,
338
+
339
+ FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
340
+ FSL_IMX7_IOMUXC_GPR_SIZE = (4 * KiB),
341
+
342
+ FSL_IMX7_IOMUXC_ADDR = 0x30330000,
343
+ FSL_IMX7_IOMUXC_SIZE = (4 * KiB),
344
+
345
+ FSL_IMX7_KPP_ADDR = 0x30320000,
346
+ FSL_IMX7_KPP_SIZE = (4 * KiB),
347
+
348
+ FSL_IMX7_ROMCP_ADDR = 0x30310000,
349
+ FSL_IMX7_ROMCP_SIZE = (4 * KiB),
350
+
351
+ FSL_IMX7_GPT4_ADDR = 0x30300000,
352
+ FSL_IMX7_GPT3_ADDR = 0x302F0000,
353
+ FSL_IMX7_GPT2_ADDR = 0x302E0000,
354
+ FSL_IMX7_GPT1_ADDR = 0x302D0000,
355
+
356
+ FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
357
+ FSL_IMX7_IOMUXC_LPSR_SIZE = (4 * KiB),
358
+
359
+ FSL_IMX7_WDOG4_ADDR = 0x302B0000,
360
+ FSL_IMX7_WDOG3_ADDR = 0x302A0000,
361
+ FSL_IMX7_WDOG2_ADDR = 0x30290000,
362
+ FSL_IMX7_WDOG1_ADDR = 0x30280000,
363
+
364
+ FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
365
+
366
+ FSL_IMX7_GPIO7_ADDR = 0x30260000,
367
+ FSL_IMX7_GPIO6_ADDR = 0x30250000,
368
+ FSL_IMX7_GPIO5_ADDR = 0x30240000,
369
+ FSL_IMX7_GPIO4_ADDR = 0x30230000,
370
+ FSL_IMX7_GPIO3_ADDR = 0x30220000,
371
+ FSL_IMX7_GPIO2_ADDR = 0x30210000,
372
+ FSL_IMX7_GPIO1_ADDR = 0x30200000,
373
+
374
+ FSL_IMX7_AIPS1_CONF_ADDR = 0x301F0000,
375
+ FSL_IMX7_AIPS1_CONF_SIZE = (64 * KiB),
376
377
- FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
378
FSL_IMX7_A7MPCORE_DAP_ADDR = 0x30000000,
379
+ FSL_IMX7_A7MPCORE_DAP_SIZE = (1 * MiB),
380
381
- FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
382
- FSL_IMX7_PCIE_REG_SIZE = 16 * 1024,
383
+ /* AIPS-1 End */
384
385
- FSL_IMX7_GPR_ADDR = 0x30340000,
386
+ FSL_IMX7_EIM_CS0_ADDR = 0x28000000,
387
+ FSL_IMX7_EIM_CS0_SIZE = (128 * MiB),
388
389
- FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
390
- FSL_IMX7_DMA_APBH_SIZE = 0x2000,
391
+ FSL_IMX7_OCRAM_PXP_ADDR = 0x00940000,
392
+ FSL_IMX7_OCRAM_PXP_SIZE = (32 * KiB),
393
+
394
+ FSL_IMX7_OCRAM_EPDC_ADDR = 0x00920000,
395
+ FSL_IMX7_OCRAM_EPDC_SIZE = (128 * KiB),
396
+
397
+ FSL_IMX7_OCRAM_MEM_ADDR = 0x00900000,
398
+ FSL_IMX7_OCRAM_MEM_SIZE = (128 * KiB),
399
+
400
+ FSL_IMX7_TCMU_ADDR = 0x00800000,
401
+ FSL_IMX7_TCMU_SIZE = (32 * KiB),
402
+
403
+ FSL_IMX7_TCML_ADDR = 0x007F8000,
404
+ FSL_IMX7_TCML_SIZE = (32 * KiB),
405
+
406
+ FSL_IMX7_OCRAM_S_ADDR = 0x00180000,
407
+ FSL_IMX7_OCRAM_S_SIZE = (32 * KiB),
408
+
409
+ FSL_IMX7_CAAM_MEM_ADDR = 0x00100000,
410
+ FSL_IMX7_CAAM_MEM_SIZE = (32 * KiB),
411
+
412
+ FSL_IMX7_ROM_ADDR = 0x00000000,
413
+ FSL_IMX7_ROM_SIZE = (96 * KiB),
414
};
415
416
enum FslIMX7IRQs {
417
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/hw/arm/fsl-imx7.c
420
+++ b/hw/arm/fsl-imx7.c
421
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
422
char name[NAME_SIZE];
423
int i;
424
425
+ /*
426
+ * CPUs
427
+ */
428
for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX7_NUM_CPUS); i++) {
429
snprintf(name, NAME_SIZE, "cpu%d", i);
430
object_initialize_child(obj, name, &s->cpu[i],
431
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
432
TYPE_A15MPCORE_PRIV);
433
434
/*
435
- * GPIOs 1 to 7
436
+ * GPIOs
437
*/
438
for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) {
439
snprintf(name, NAME_SIZE, "gpio%d", i);
440
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
441
}
442
443
/*
444
- * GPT1, 2, 3, 4
445
+ * GPTs
446
*/
447
for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) {
448
snprintf(name, NAME_SIZE, "gpt%d", i);
449
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
450
*/
451
object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2);
452
453
+ /*
454
+ * ECSPIs
455
+ */
456
for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) {
457
snprintf(name, NAME_SIZE, "spi%d", i + 1);
458
object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI);
459
}
460
461
-
462
+ /*
463
+ * I2Cs
464
+ */
465
for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) {
466
snprintf(name, NAME_SIZE, "i2c%d", i + 1);
467
object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C);
468
}
469
470
/*
471
- * UART
472
+ * UARTs
473
*/
474
for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) {
475
snprintf(name, NAME_SIZE, "uart%d", i);
476
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
477
}
478
479
/*
480
- * Ethernet
481
+ * Ethernets
482
*/
483
for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) {
484
snprintf(name, NAME_SIZE, "eth%d", i);
485
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
486
}
487
488
/*
489
- * SDHCI
490
+ * SDHCIs
491
*/
492
for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) {
493
snprintf(name, NAME_SIZE, "usdhc%d", i);
494
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
495
object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS);
496
497
/*
498
- * Watchdog
499
+ * Watchdogs
500
*/
501
for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) {
502
snprintf(name, NAME_SIZE, "wdt%d", i);
503
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
504
*/
505
object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR);
506
507
+ /*
508
+ * PCIE
509
+ */
510
object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST);
511
512
+ /*
513
+ * USBs
514
+ */
515
for (i = 0; i < FSL_IMX7_NUM_USBS; i++) {
516
snprintf(name, NAME_SIZE, "usb%d", i);
517
object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA);
518
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
519
return;
520
}
521
522
+ /*
523
+ * CPUs
524
+ */
525
for (i = 0; i < smp_cpus; i++) {
526
o = OBJECT(&s->cpu[i]);
527
528
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
529
* A7MPCORE DAP
530
*/
531
create_unimplemented_device("a7mpcore-dap", FSL_IMX7_A7MPCORE_DAP_ADDR,
532
- 0x100000);
533
+ FSL_IMX7_A7MPCORE_DAP_SIZE);
534
535
/*
536
- * GPT1, 2, 3, 4
537
+ * GPTs
538
*/
539
for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) {
540
static const hwaddr FSL_IMX7_GPTn_ADDR[FSL_IMX7_NUM_GPTS] = {
541
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
542
FSL_IMX7_GPTn_IRQ[i]));
543
}
544
545
+ /*
546
+ * GPIOs
547
+ */
548
for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) {
549
static const hwaddr FSL_IMX7_GPIOn_ADDR[FSL_IMX7_NUM_GPIOS] = {
550
FSL_IMX7_GPIO1_ADDR,
551
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
552
/*
553
* IOMUXC and IOMUXC_LPSR
554
*/
555
- for (i = 0; i < FSL_IMX7_NUM_IOMUXCS; i++) {
556
- static const hwaddr FSL_IMX7_IOMUXCn_ADDR[FSL_IMX7_NUM_IOMUXCS] = {
557
- FSL_IMX7_IOMUXC_ADDR,
558
- FSL_IMX7_IOMUXC_LPSR_ADDR,
559
- };
560
-
561
- snprintf(name, NAME_SIZE, "iomuxc%d", i);
562
- create_unimplemented_device(name, FSL_IMX7_IOMUXCn_ADDR[i],
563
- FSL_IMX7_IOMUXCn_SIZE);
564
- }
565
+ create_unimplemented_device("iomuxc", FSL_IMX7_IOMUXC_ADDR,
566
+ FSL_IMX7_IOMUXC_SIZE);
567
+ create_unimplemented_device("iomuxc_lspr", FSL_IMX7_IOMUXC_LPSR_ADDR,
568
+ FSL_IMX7_IOMUXC_LPSR_SIZE);
569
570
/*
571
* CCM
572
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
573
sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort);
574
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX7_GPC_ADDR);
575
576
- /* Initialize all ECSPI */
577
+ /*
578
+ * ECSPIs
579
+ */
580
for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) {
581
static const hwaddr FSL_IMX7_SPIn_ADDR[FSL_IMX7_NUM_ECSPIS] = {
582
FSL_IMX7_ECSPI1_ADDR,
583
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
584
FSL_IMX7_SPIn_IRQ[i]));
585
}
586
587
+ /*
588
+ * I2Cs
589
+ */
590
for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) {
591
static const hwaddr FSL_IMX7_I2Cn_ADDR[FSL_IMX7_NUM_I2CS] = {
592
FSL_IMX7_I2C1_ADDR,
593
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
594
}
595
596
/*
597
- * UART
598
+ * UARTs
599
*/
600
for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) {
601
static const hwaddr FSL_IMX7_UARTn_ADDR[FSL_IMX7_NUM_UARTS] = {
602
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
603
}
604
605
/*
606
- * Ethernet
607
+ * Ethernets
608
*
609
* We must use two loops since phy_connected affects the other interface
610
* and we have to set all properties before calling sysbus_realize().
611
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
612
}
613
614
/*
615
- * USDHC
616
+ * USDHCs
617
*/
618
for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) {
619
static const hwaddr FSL_IMX7_USDHCn_ADDR[FSL_IMX7_NUM_USDHCS] = {
620
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
621
* SNVS
622
*/
623
sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort);
624
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_ADDR);
625
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_HP_ADDR);
626
627
/*
628
* SRC
629
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
630
create_unimplemented_device("src", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE);
631
632
/*
633
- * Watchdog
634
+ * Watchdogs
635
*/
636
for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) {
637
static const hwaddr FSL_IMX7_WDOGn_ADDR[FSL_IMX7_NUM_WDTS] = {
638
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
639
create_unimplemented_device("caam", FSL_IMX7_CAAM_ADDR, FSL_IMX7_CAAM_SIZE);
640
641
/*
642
- * PWM
643
+ * PWMs
644
*/
645
- create_unimplemented_device("pwm1", FSL_IMX7_PWM1_ADDR, FSL_IMX7_PWMn_SIZE);
646
- create_unimplemented_device("pwm2", FSL_IMX7_PWM2_ADDR, FSL_IMX7_PWMn_SIZE);
647
- create_unimplemented_device("pwm3", FSL_IMX7_PWM3_ADDR, FSL_IMX7_PWMn_SIZE);
648
- create_unimplemented_device("pwm4", FSL_IMX7_PWM4_ADDR, FSL_IMX7_PWMn_SIZE);
649
+ for (i = 0; i < FSL_IMX7_NUM_PWMS; i++) {
650
+ static const hwaddr FSL_IMX7_PWMn_ADDR[FSL_IMX7_NUM_PWMS] = {
651
+ FSL_IMX7_PWM1_ADDR,
652
+ FSL_IMX7_PWM2_ADDR,
653
+ FSL_IMX7_PWM3_ADDR,
654
+ FSL_IMX7_PWM4_ADDR,
655
+ };
656
+
657
+ snprintf(name, NAME_SIZE, "pwm%d", i);
658
+ create_unimplemented_device(name, FSL_IMX7_PWMn_ADDR[i],
659
+ FSL_IMX7_PWMn_SIZE);
660
+ }
661
662
/*
663
- * CAN
664
+ * CANs
665
*/
666
- create_unimplemented_device("can1", FSL_IMX7_CAN1_ADDR, FSL_IMX7_CANn_SIZE);
667
- create_unimplemented_device("can2", FSL_IMX7_CAN2_ADDR, FSL_IMX7_CANn_SIZE);
668
+ for (i = 0; i < FSL_IMX7_NUM_CANS; i++) {
669
+ static const hwaddr FSL_IMX7_CANn_ADDR[FSL_IMX7_NUM_CANS] = {
670
+ FSL_IMX7_CAN1_ADDR,
671
+ FSL_IMX7_CAN2_ADDR,
672
+ };
673
+
674
+ snprintf(name, NAME_SIZE, "can%d", i);
675
+ create_unimplemented_device(name, FSL_IMX7_CANn_ADDR[i],
676
+ FSL_IMX7_CANn_SIZE);
677
+ }
678
679
/*
680
- * SAI (Audio SSI (Synchronous Serial Interface))
681
+ * SAIs (Audio SSI (Synchronous Serial Interface))
682
*/
683
- create_unimplemented_device("sai1", FSL_IMX7_SAI1_ADDR, FSL_IMX7_SAIn_SIZE);
684
- create_unimplemented_device("sai2", FSL_IMX7_SAI2_ADDR, FSL_IMX7_SAIn_SIZE);
685
- create_unimplemented_device("sai2", FSL_IMX7_SAI3_ADDR, FSL_IMX7_SAIn_SIZE);
686
+ for (i = 0; i < FSL_IMX7_NUM_SAIS; i++) {
687
+ static const hwaddr FSL_IMX7_SAIn_ADDR[FSL_IMX7_NUM_SAIS] = {
688
+ FSL_IMX7_SAI1_ADDR,
689
+ FSL_IMX7_SAI2_ADDR,
690
+ FSL_IMX7_SAI3_ADDR,
691
+ };
692
+
693
+ snprintf(name, NAME_SIZE, "sai%d", i);
694
+ create_unimplemented_device(name, FSL_IMX7_SAIn_ADDR[i],
695
+ FSL_IMX7_SAIn_SIZE);
696
+ }
697
698
/*
699
* OCOTP
700
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
701
create_unimplemented_device("ocotp", FSL_IMX7_OCOTP_ADDR,
702
FSL_IMX7_OCOTP_SIZE);
703
704
+ /*
705
+ * GPR
706
+ */
707
sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort);
708
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_GPR_ADDR);
709
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_IOMUXC_GPR_ADDR);
710
711
+ /*
712
+ * PCIE
713
+ */
714
sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort);
715
sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR);
716
717
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
718
irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ);
719
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq);
720
721
-
722
+ /*
723
+ * USBs
724
+ */
725
for (i = 0; i < FSL_IMX7_NUM_USBS; i++) {
726
static const hwaddr FSL_IMX7_USBMISCn_ADDR[FSL_IMX7_NUM_USBS] = {
727
FSL_IMX7_USBMISC1_ADDR,
728
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
729
*/
730
create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR,
731
FSL_IMX7_PCIE_PHY_SIZE);
732
+
35
}
733
}
36
734
37
@@ -XXX,XX +XXX,XX @@ static void stm32f2xx_timer_class_init(ObjectClass *klass, void *data)
735
static Property fsl_imx7_properties[] = {
38
dc->reset = stm32f2xx_timer_reset;
39
device_class_set_props(dc, stm32f2xx_timer_properties);
40
dc->vmsd = &vmstate_stm32f2xx_timer;
41
+ dc->realize = stm32f2xx_timer_realize;
42
}
43
44
static const TypeInfo stm32f2xx_timer_info = {
45
--
736
--
46
2.20.1
737
2.34.1
47
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
Prepare for, but do not yet implement, the EL2&0 regime.
3
* Add TZASC as unimplemented device.
4
This involves adding the new MMUIdx enumerators and adjusting
4
- Allow bare metal application to access this (unimplemented) device
5
some of the MMUIdx related predicates to match.
5
* Add CSU as unimplemented device.
6
- Allow bare metal application to access this (unimplemented) device
7
* Add various memory segments
8
- OCRAM
9
- OCRAM EPDC
10
- OCRAM PXP
11
- OCRAM S
12
- ROM
13
- CAAM
6
14
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: f887a3483996ba06d40bd62ffdfb0ecf68621987.1692964892.git.jcd@tribudubois.net
10
Message-id: 20200206105448.4726-20-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
19
---
13
target/arm/cpu-param.h | 2 +-
20
include/hw/arm/fsl-imx7.h | 7 +++++
14
target/arm/cpu.h | 134 ++++++++++++++++++-----------------------
21
hw/arm/fsl-imx7.c | 63 +++++++++++++++++++++++++++++++++++++++
15
target/arm/internals.h | 35 +++++++++++
22
2 files changed, 70 insertions(+)
16
target/arm/helper.c | 66 +++++++++++++++++---
17
target/arm/translate.c | 1 -
18
5 files changed, 152 insertions(+), 86 deletions(-)
19
23
20
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
24
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
21
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu-param.h
26
--- a/include/hw/arm/fsl-imx7.h
23
+++ b/target/arm/cpu-param.h
27
+++ b/include/hw/arm/fsl-imx7.h
24
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@ struct FslIMX7State {
25
# define TARGET_PAGE_BITS_MIN 10
29
IMX7GPRState gpr;
26
#endif
30
ChipideaState usb[FSL_IMX7_NUM_USBS];
27
31
DesignwarePCIEHost pcie;
28
-#define NB_MMU_MODES 8
32
+ MemoryRegion rom;
29
+#define NB_MMU_MODES 9
33
+ MemoryRegion caam;
30
34
+ MemoryRegion ocram;
31
#endif
35
+ MemoryRegion ocram_epdc;
32
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
36
+ MemoryRegion ocram_pxp;
37
+ MemoryRegion ocram_s;
38
+
39
uint32_t phy_num[FSL_IMX7_NUM_ETHS];
40
bool phy_connected[FSL_IMX7_NUM_ETHS];
41
};
42
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
33
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/cpu.h
44
--- a/hw/arm/fsl-imx7.c
35
+++ b/target/arm/cpu.h
45
+++ b/hw/arm/fsl-imx7.c
36
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
46
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
37
* + NonSecure EL1 & 0 stage 1
47
create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR,
38
* + NonSecure EL1 & 0 stage 2
48
FSL_IMX7_PCIE_PHY_SIZE);
39
* + NonSecure EL2
49
40
- * + Secure EL1 & EL0
41
+ * + NonSecure EL2 & 0 (ARMv8.1-VHE)
42
+ * + Secure EL1 & 0
43
* + Secure EL3
44
* If EL3 is 32-bit:
45
* + NonSecure PL1 & 0 stage 1
46
* + NonSecure PL1 & 0 stage 2
47
* + NonSecure PL2
48
- * + Secure PL0 & PL1
49
+ * + Secure PL0
50
+ * + Secure PL1
51
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
52
*
53
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
54
- * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
55
- * may differ in access permissions even if the VA->PA map is the same
56
+ * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
57
+ * because they may differ in access permissions even if the VA->PA map is
58
+ * the same
59
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
60
* translation, which means that we have one mmu_idx that deals with two
61
* concatenated translation regimes [this sort of combined s1+2 TLB is
62
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
63
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
64
* translation regimes, because they map reasonably well to each other
65
* and they can't both be active at the same time.
66
- * This gives us the following list of mmu_idx values:
67
+ * 5. we want to be able to use the TLB for accesses done as part of a
68
+ * stage1 page table walk, rather than having to walk the stage2 page
69
+ * table over and over.
70
*
71
- * NS EL0 (aka NS PL0) stage 1+2
72
- * NS EL1 (aka NS PL1) stage 1+2
73
+ * This gives us the following list of cases:
74
+ *
75
+ * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
76
+ * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
77
+ * NS EL0 EL2&0
78
+ * NS EL2 EL2&0
79
* NS EL2 (aka NS PL2)
80
+ * S EL0 EL1&0 (aka S PL0)
81
+ * S EL1 EL1&0 (not used if EL3 is 32 bit)
82
* S EL3 (aka S PL1)
83
- * S EL0 (aka S PL0)
84
- * S EL1 (not used if EL3 is 32 bit)
85
- * NS EL0+1 stage 2
86
+ * NS EL1&0 stage 2
87
*
88
- * (The last of these is an mmu_idx because we want to be able to use the TLB
89
- * for the accesses done as part of a stage 1 page table walk, rather than
90
- * having to walk the stage 2 page table over and over.)
91
+ * for a total of 9 different mmu_idx.
92
*
93
* R profile CPUs have an MPU, but can use the same set of MMU indexes
94
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
95
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
96
* For M profile we arrange them to have a bit for priv, a bit for negpri
97
* and a bit for secure.
98
*/
99
-#define ARM_MMU_IDX_A 0x10 /* A profile */
100
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
101
-#define ARM_MMU_IDX_M 0x40 /* M profile */
102
+#define ARM_MMU_IDX_A 0x10 /* A profile */
103
+#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
104
+#define ARM_MMU_IDX_M 0x40 /* M profile */
105
106
-/* meanings of the bits for M profile mmu idx values */
107
-#define ARM_MMU_IDX_M_PRIV 0x1
108
+/* Meanings of the bits for M profile mmu idx values */
109
+#define ARM_MMU_IDX_M_PRIV 0x1
110
#define ARM_MMU_IDX_M_NEGPRI 0x2
111
-#define ARM_MMU_IDX_M_S 0x4
112
+#define ARM_MMU_IDX_M_S 0x4 /* Secure */
113
114
-#define ARM_MMU_IDX_TYPE_MASK (~0x7)
115
-#define ARM_MMU_IDX_COREIDX_MASK 0x7
116
+#define ARM_MMU_IDX_TYPE_MASK \
117
+ (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
118
+#define ARM_MMU_IDX_COREIDX_MASK 0xf
119
120
typedef enum ARMMMUIdx {
121
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
122
- ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
123
- ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A,
124
- ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,
125
- ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
126
- ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
127
- ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
128
+ /*
50
+ /*
129
+ * A-profile.
51
+ * CSU
130
+ */
52
+ */
131
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
53
+ create_unimplemented_device("csu", FSL_IMX7_CSU_ADDR,
132
+ ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
54
+ FSL_IMX7_CSU_SIZE);
133
+
134
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
135
+
136
+ ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A,
137
+ ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A,
138
+
139
+ ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A,
140
+ ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A,
141
+ ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
142
+
143
+ ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
144
+
55
+
145
+ /*
56
+ /*
146
+ * These are not allocated TLBs and are used only for AT system
57
+ * TZASC
147
+ * instructions or for the first stage of an S12 page table walk.
148
+ */
58
+ */
149
+ ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
59
+ create_unimplemented_device("tzasc", FSL_IMX7_TZASC_ADDR,
150
+ ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
60
+ FSL_IMX7_TZASC_SIZE);
151
+
61
+
152
+ /*
62
+ /*
153
+ * M-profile.
63
+ * OCRAM memory
154
+ */
64
+ */
155
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
65
+ memory_region_init_ram(&s->ocram, NULL, "imx7.ocram",
156
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
66
+ FSL_IMX7_OCRAM_MEM_SIZE,
157
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
67
+ &error_abort);
158
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
68
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_MEM_ADDR,
159
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
69
+ &s->ocram);
160
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
161
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
162
- /* Indexes below here don't have TLBs and are used only for AT system
163
- * instructions or for the first stage of an S12 page table walk.
164
- */
165
- ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
166
- ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
167
} ARMMMUIdx;
168
169
/*
170
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
171
172
typedef enum ARMMMUIdxBit {
173
TO_CORE_BIT(E10_0),
174
+ TO_CORE_BIT(E20_0),
175
TO_CORE_BIT(E10_1),
176
TO_CORE_BIT(E2),
177
+ TO_CORE_BIT(E20_2),
178
TO_CORE_BIT(SE10_0),
179
TO_CORE_BIT(SE10_1),
180
TO_CORE_BIT(SE3),
181
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
182
183
#define MMU_USER_IDX 0
184
185
-static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
186
-{
187
- return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
188
-}
189
-
190
-static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
191
-{
192
- if (arm_feature(env, ARM_FEATURE_M)) {
193
- return mmu_idx | ARM_MMU_IDX_M;
194
- } else {
195
- return mmu_idx | ARM_MMU_IDX_A;
196
- }
197
-}
198
-
199
-/* Return the exception level we're running at if this is our mmu_idx */
200
-static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
201
-{
202
- switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
203
- case ARM_MMU_IDX_A:
204
- return mmu_idx & 3;
205
- case ARM_MMU_IDX_M:
206
- return mmu_idx & ARM_MMU_IDX_M_PRIV;
207
- default:
208
- g_assert_not_reached();
209
- }
210
-}
211
-
212
-/*
213
- * Return the MMU index for a v7M CPU with all relevant information
214
- * manually specified.
215
- */
216
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
217
- bool secstate, bool priv, bool negpri);
218
-
219
-/* Return the MMU index for a v7M CPU in the specified security and
220
- * privilege state.
221
- */
222
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
223
- bool secstate, bool priv);
224
-
225
-/* Return the MMU index for a v7M CPU in the specified security state */
226
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
227
-
228
/**
229
* cpu_mmu_index:
230
* @env: The cpu environment
231
diff --git a/target/arm/internals.h b/target/arm/internals.h
232
index XXXXXXX..XXXXXXX 100644
233
--- a/target/arm/internals.h
234
+++ b/target/arm/internals.h
235
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
236
MMUAccessType access_type, int mmu_idx,
237
bool probe, uintptr_t retaddr);
238
239
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
240
+{
241
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
242
+}
243
+
70
+
244
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
71
+ /*
245
+{
72
+ * OCRAM EPDC memory
246
+ if (arm_feature(env, ARM_FEATURE_M)) {
73
+ */
247
+ return mmu_idx | ARM_MMU_IDX_M;
74
+ memory_region_init_ram(&s->ocram_epdc, NULL, "imx7.ocram_epdc",
248
+ } else {
75
+ FSL_IMX7_OCRAM_EPDC_SIZE,
249
+ return mmu_idx | ARM_MMU_IDX_A;
76
+ &error_abort);
250
+ }
77
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_EPDC_ADDR,
251
+}
78
+ &s->ocram_epdc);
252
+
79
+
253
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
80
+ /*
81
+ * OCRAM PXP memory
82
+ */
83
+ memory_region_init_ram(&s->ocram_pxp, NULL, "imx7.ocram_pxp",
84
+ FSL_IMX7_OCRAM_PXP_SIZE,
85
+ &error_abort);
86
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_PXP_ADDR,
87
+ &s->ocram_pxp);
254
+
88
+
255
+/*
89
+ /*
256
+ * Return the MMU index for a v7M CPU with all relevant information
90
+ * OCRAM_S memory
257
+ * manually specified.
91
+ */
258
+ */
92
+ memory_region_init_ram(&s->ocram_s, NULL, "imx7.ocram_s",
259
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
93
+ FSL_IMX7_OCRAM_S_SIZE,
260
+ bool secstate, bool priv, bool negpri);
94
+ &error_abort);
95
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_OCRAM_S_ADDR,
96
+ &s->ocram_s);
261
+
97
+
262
+/*
98
+ /*
263
+ * Return the MMU index for a v7M CPU in the specified security and
99
+ * ROM memory
264
+ * privilege state.
100
+ */
265
+ */
101
+ memory_region_init_rom(&s->rom, OBJECT(dev), "imx7.rom",
266
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
102
+ FSL_IMX7_ROM_SIZE, &error_abort);
267
+ bool secstate, bool priv);
103
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_ROM_ADDR,
104
+ &s->rom);
268
+
105
+
269
+/* Return the MMU index for a v7M CPU in the specified security state */
106
+ /*
270
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
107
+ * CAAM memory
271
+
108
+ */
272
/* Return true if the stage 1 translation regime is using LPAE format page
109
+ memory_region_init_rom(&s->caam, OBJECT(dev), "imx7.caam",
273
* tables */
110
+ FSL_IMX7_CAAM_MEM_SIZE, &error_abort);
274
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
111
+ memory_region_add_subregion(get_system_memory(), FSL_IMX7_CAAM_MEM_ADDR,
275
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
112
+ &s->caam);
276
switch (mmu_idx) {
277
case ARMMMUIdx_E10_0:
278
case ARMMMUIdx_E10_1:
279
+ case ARMMMUIdx_E20_0:
280
+ case ARMMMUIdx_E20_2:
281
case ARMMMUIdx_Stage1_E0:
282
case ARMMMUIdx_Stage1_E1:
283
case ARMMMUIdx_E2:
284
diff --git a/target/arm/helper.c b/target/arm/helper.c
285
index XXXXXXX..XXXXXXX 100644
286
--- a/target/arm/helper.c
287
+++ b/target/arm/helper.c
288
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
289
#endif /* !CONFIG_USER_ONLY */
290
291
/* Return the exception level which controls this address translation regime */
292
-static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
293
+static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
294
{
295
switch (mmu_idx) {
296
+ case ARMMMUIdx_E20_0:
297
+ case ARMMMUIdx_E20_2:
298
case ARMMMUIdx_Stage2:
299
case ARMMMUIdx_E2:
300
return 2;
301
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
302
case ARMMMUIdx_SE10_1:
303
case ARMMMUIdx_Stage1_E0:
304
case ARMMMUIdx_Stage1_E1:
305
+ case ARMMMUIdx_E10_0:
306
+ case ARMMMUIdx_E10_1:
307
case ARMMMUIdx_MPrivNegPri:
308
case ARMMMUIdx_MUserNegPri:
309
case ARMMMUIdx_MPriv:
310
@@ -XXX,XX +XXX,XX @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
311
*/
312
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
313
{
314
- if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
315
- mmu_idx += (ARMMMUIdx_Stage1_E0 - ARMMMUIdx_E10_0);
316
+ switch (mmu_idx) {
317
+ case ARMMMUIdx_E10_0:
318
+ return ARMMMUIdx_Stage1_E0;
319
+ case ARMMMUIdx_E10_1:
320
+ return ARMMMUIdx_Stage1_E1;
321
+ default:
322
+ return mmu_idx;
323
}
324
- return mmu_idx;
325
}
113
}
326
114
327
/* Return true if the translation regime is using LPAE format page tables */
115
static Property fsl_imx7_properties[] = {
328
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
329
{
330
switch (mmu_idx) {
331
case ARMMMUIdx_SE10_0:
332
+ case ARMMMUIdx_E20_0:
333
case ARMMMUIdx_Stage1_E0:
334
case ARMMMUIdx_MUser:
335
case ARMMMUIdx_MSUser:
336
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
337
return 0;
338
}
339
340
+/* Return the exception level we're running at if this is our mmu_idx */
341
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
342
+{
343
+ if (mmu_idx & ARM_MMU_IDX_M) {
344
+ return mmu_idx & ARM_MMU_IDX_M_PRIV;
345
+ }
346
+
347
+ switch (mmu_idx) {
348
+ case ARMMMUIdx_E10_0:
349
+ case ARMMMUIdx_E20_0:
350
+ case ARMMMUIdx_SE10_0:
351
+ return 0;
352
+ case ARMMMUIdx_E10_1:
353
+ case ARMMMUIdx_SE10_1:
354
+ return 1;
355
+ case ARMMMUIdx_E2:
356
+ case ARMMMUIdx_E20_2:
357
+ return 2;
358
+ case ARMMMUIdx_SE3:
359
+ return 3;
360
+ default:
361
+ g_assert_not_reached();
362
+ }
363
+}
364
+
365
#ifndef CONFIG_TCG
366
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
367
{
368
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
369
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
370
}
371
372
- if (el < 2 && arm_is_secure_below_el3(env)) {
373
- return ARMMMUIdx_SE10_0 + el;
374
- } else {
375
- return ARMMMUIdx_E10_0 + el;
376
+ switch (el) {
377
+ case 0:
378
+ /* TODO: ARMv8.1-VHE */
379
+ if (arm_is_secure_below_el3(env)) {
380
+ return ARMMMUIdx_SE10_0;
381
+ }
382
+ return ARMMMUIdx_E10_0;
383
+ case 1:
384
+ if (arm_is_secure_below_el3(env)) {
385
+ return ARMMMUIdx_SE10_1;
386
+ }
387
+ return ARMMMUIdx_E10_1;
388
+ case 2:
389
+ /* TODO: ARMv8.1-VHE */
390
+ /* TODO: ARMv8.4-SecEL2 */
391
+ return ARMMMUIdx_E2;
392
+ case 3:
393
+ return ARMMMUIdx_SE3;
394
+ default:
395
+ g_assert_not_reached();
396
}
397
}
398
399
diff --git a/target/arm/translate.c b/target/arm/translate.c
400
index XXXXXXX..XXXXXXX 100644
401
--- a/target/arm/translate.c
402
+++ b/target/arm/translate.c
403
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
404
case ARMMMUIdx_MSUserNegPri:
405
case ARMMMUIdx_MSPrivNegPri:
406
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
407
- case ARMMMUIdx_Stage2:
408
default:
409
g_assert_not_reached();
410
}
411
--
116
--
412
2.20.1
117
2.34.1
413
118
414
119
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Jean-Christophe Dubois <jcd@tribudubois.net>
2
2
3
Apart from the wholesale redirection that HCR_EL2.E2H performs
3
The SRC device is normally used to start the secondary CPU.
4
for EL2, there's a separate redirection specific to the timers
4
5
that happens for EL0 when running in the EL2&0 regime.
5
When running Linux directly, QEMU is emulating a PSCI interface that UBOOT
6
6
is installing at boot time and therefore the fact that the SRC device is
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
unimplemented is hidden as Qemu respond directly to PSCI requets without
8
using the SRC device.
9
10
But if you try to run a more bare metal application (maybe uboot itself),
11
then it is not possible to start the secondary CPU as the SRC is an
12
unimplemented device.
13
14
This patch adds the ability to start the secondary CPU through the SRC
15
device so that you can use this feature in bare metal applications.
16
17
Signed-off-by: Jean-Christophe Dubois <jcd@tribudubois.net>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: ce9a0162defd2acee5dc7f8a674743de0cded569.1692964892.git.jcd@tribudubois.net
10
Message-id: 20200206105448.4726-30-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
21
---
13
target/arm/helper.c | 181 +++++++++++++++++++++++++++++++++++++++++---
22
include/hw/arm/fsl-imx7.h | 3 +-
14
1 file changed, 169 insertions(+), 12 deletions(-)
23
include/hw/misc/imx7_src.h | 66 +++++++++
15
24
hw/arm/fsl-imx7.c | 8 +-
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
25
hw/misc/imx7_src.c | 276 +++++++++++++++++++++++++++++++++++++
26
hw/misc/meson.build | 1 +
27
hw/misc/trace-events | 4 +
28
6 files changed, 356 insertions(+), 2 deletions(-)
29
create mode 100644 include/hw/misc/imx7_src.h
30
create mode 100644 hw/misc/imx7_src.c
31
32
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
17
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
34
--- a/include/hw/arm/fsl-imx7.h
19
+++ b/target/arm/helper.c
35
+++ b/include/hw/arm/fsl-imx7.h
20
@@ -XXX,XX +XXX,XX @@ static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
36
@@ -XXX,XX +XXX,XX @@
21
gt_ctl_write(env, ri, GTIMER_PHYS, value);
37
#include "hw/misc/imx7_ccm.h"
22
}
38
#include "hw/misc/imx7_snvs.h"
23
39
#include "hw/misc/imx7_gpr.h"
24
+static int gt_phys_redir_timeridx(CPUARMState *env)
40
+#include "hw/misc/imx7_src.h"
25
+{
41
#include "hw/watchdog/wdt_imx2.h"
26
+ switch (arm_mmu_idx(env)) {
42
#include "hw/gpio/imx_gpio.h"
27
+ case ARMMMUIdx_E20_0:
43
#include "hw/char/imx_serial.h"
28
+ case ARMMMUIdx_E20_2:
44
@@ -XXX,XX +XXX,XX @@ struct FslIMX7State {
29
+ return GTIMER_HYP;
45
IMX7CCMState ccm;
46
IMX7AnalogState analog;
47
IMX7SNVSState snvs;
48
+ IMX7SRCState src;
49
IMXGPCv2State gpcv2;
50
IMXSPIState spi[FSL_IMX7_NUM_ECSPIS];
51
IMXI2CState i2c[FSL_IMX7_NUM_I2CS];
52
@@ -XXX,XX +XXX,XX @@ enum FslIMX7MemoryMap {
53
FSL_IMX7_GPC_ADDR = 0x303A0000,
54
55
FSL_IMX7_SRC_ADDR = 0x30390000,
56
- FSL_IMX7_SRC_SIZE = (4 * KiB),
57
58
FSL_IMX7_CCM_ADDR = 0x30380000,
59
60
diff --git a/include/hw/misc/imx7_src.h b/include/hw/misc/imx7_src.h
61
new file mode 100644
62
index XXXXXXX..XXXXXXX
63
--- /dev/null
64
+++ b/include/hw/misc/imx7_src.h
65
@@ -XXX,XX +XXX,XX @@
66
+/*
67
+ * IMX7 System Reset Controller
68
+ *
69
+ * Copyright (C) 2023 Jean-Christophe Dubois <jcd@tribudubois.net>
70
+ *
71
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
72
+ * See the COPYING file in the top-level directory.
73
+ */
74
+
75
+#ifndef IMX7_SRC_H
76
+#define IMX7_SRC_H
77
+
78
+#include "hw/sysbus.h"
79
+#include "qemu/bitops.h"
80
+#include "qom/object.h"
81
+
82
+#define SRC_SCR 0
83
+#define SRC_A7RCR0 1
84
+#define SRC_A7RCR1 2
85
+#define SRC_M4RCR 3
86
+#define SRC_ERCR 5
87
+#define SRC_HSICPHY_RCR 7
88
+#define SRC_USBOPHY1_RCR 8
89
+#define SRC_USBOPHY2_RCR 9
90
+#define SRC_MPIPHY_RCR 10
91
+#define SRC_PCIEPHY_RCR 11
92
+#define SRC_SBMR1 22
93
+#define SRC_SRSR 23
94
+#define SRC_SISR 26
95
+#define SRC_SIMR 27
96
+#define SRC_SBMR2 28
97
+#define SRC_GPR1 29
98
+#define SRC_GPR2 30
99
+#define SRC_GPR3 31
100
+#define SRC_GPR4 32
101
+#define SRC_GPR5 33
102
+#define SRC_GPR6 34
103
+#define SRC_GPR7 35
104
+#define SRC_GPR8 36
105
+#define SRC_GPR9 37
106
+#define SRC_GPR10 38
107
+#define SRC_MAX 39
108
+
109
+/* SRC_A7SCR1 */
110
+#define R_CORE1_ENABLE_SHIFT 1
111
+#define R_CORE1_ENABLE_LENGTH 1
112
+/* SRC_A7SCR0 */
113
+#define R_CORE1_RST_SHIFT 5
114
+#define R_CORE1_RST_LENGTH 1
115
+#define R_CORE0_RST_SHIFT 4
116
+#define R_CORE0_RST_LENGTH 1
117
+
118
+#define TYPE_IMX7_SRC "imx7.src"
119
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7SRCState, IMX7_SRC)
120
+
121
+struct IMX7SRCState {
122
+ /* <private> */
123
+ SysBusDevice parent_obj;
124
+
125
+ /* <public> */
126
+ MemoryRegion iomem;
127
+
128
+ uint32_t regs[SRC_MAX];
129
+};
130
+
131
+#endif /* IMX7_SRC_H */
132
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/hw/arm/fsl-imx7.c
135
+++ b/hw/arm/fsl-imx7.c
136
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_init(Object *obj)
137
*/
138
object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2);
139
140
+ /*
141
+ * SRC
142
+ */
143
+ object_initialize_child(obj, "src", &s->src, TYPE_IMX7_SRC);
144
+
145
/*
146
* ECSPIs
147
*/
148
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
149
/*
150
* SRC
151
*/
152
- create_unimplemented_device("src", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE);
153
+ sysbus_realize(SYS_BUS_DEVICE(&s->src), &error_abort);
154
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX7_SRC_ADDR);
155
156
/*
157
* Watchdogs
158
diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c
159
new file mode 100644
160
index XXXXXXX..XXXXXXX
161
--- /dev/null
162
+++ b/hw/misc/imx7_src.c
163
@@ -XXX,XX +XXX,XX @@
164
+/*
165
+ * IMX7 System Reset Controller
166
+ *
167
+ * Copyright (c) 2023 Jean-Christophe Dubois <jcd@tribudubois.net>
168
+ *
169
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
170
+ * See the COPYING file in the top-level directory.
171
+ *
172
+ */
173
+
174
+#include "qemu/osdep.h"
175
+#include "hw/misc/imx7_src.h"
176
+#include "migration/vmstate.h"
177
+#include "qemu/bitops.h"
178
+#include "qemu/log.h"
179
+#include "qemu/main-loop.h"
180
+#include "qemu/module.h"
181
+#include "target/arm/arm-powerctl.h"
182
+#include "hw/core/cpu.h"
183
+#include "hw/registerfields.h"
184
+
185
+#include "trace.h"
186
+
187
+static const char *imx7_src_reg_name(uint32_t reg)
188
+{
189
+ static char unknown[20];
190
+
191
+ switch (reg) {
192
+ case SRC_SCR:
193
+ return "SRC_SCR";
194
+ case SRC_A7RCR0:
195
+ return "SRC_A7RCR0";
196
+ case SRC_A7RCR1:
197
+ return "SRC_A7RCR1";
198
+ case SRC_M4RCR:
199
+ return "SRC_M4RCR";
200
+ case SRC_ERCR:
201
+ return "SRC_ERCR";
202
+ case SRC_HSICPHY_RCR:
203
+ return "SRC_HSICPHY_RCR";
204
+ case SRC_USBOPHY1_RCR:
205
+ return "SRC_USBOPHY1_RCR";
206
+ case SRC_USBOPHY2_RCR:
207
+ return "SRC_USBOPHY2_RCR";
208
+ case SRC_PCIEPHY_RCR:
209
+ return "SRC_PCIEPHY_RCR";
210
+ case SRC_SBMR1:
211
+ return "SRC_SBMR1";
212
+ case SRC_SRSR:
213
+ return "SRC_SRSR";
214
+ case SRC_SISR:
215
+ return "SRC_SISR";
216
+ case SRC_SIMR:
217
+ return "SRC_SIMR";
218
+ case SRC_SBMR2:
219
+ return "SRC_SBMR2";
220
+ case SRC_GPR1:
221
+ return "SRC_GPR1";
222
+ case SRC_GPR2:
223
+ return "SRC_GPR2";
224
+ case SRC_GPR3:
225
+ return "SRC_GPR3";
226
+ case SRC_GPR4:
227
+ return "SRC_GPR4";
228
+ case SRC_GPR5:
229
+ return "SRC_GPR5";
230
+ case SRC_GPR6:
231
+ return "SRC_GPR6";
232
+ case SRC_GPR7:
233
+ return "SRC_GPR7";
234
+ case SRC_GPR8:
235
+ return "SRC_GPR8";
236
+ case SRC_GPR9:
237
+ return "SRC_GPR9";
238
+ case SRC_GPR10:
239
+ return "SRC_GPR10";
30
+ default:
240
+ default:
31
+ return GTIMER_PHYS;
241
+ sprintf(unknown, "%u ?", reg);
242
+ return unknown;
32
+ }
243
+ }
33
+}
244
+}
34
+
245
+
35
+static int gt_virt_redir_timeridx(CPUARMState *env)
246
+static const VMStateDescription vmstate_imx7_src = {
36
+{
247
+ .name = TYPE_IMX7_SRC,
37
+ switch (arm_mmu_idx(env)) {
248
+ .version_id = 1,
38
+ case ARMMMUIdx_E20_0:
249
+ .minimum_version_id = 1,
39
+ case ARMMMUIdx_E20_2:
250
+ .fields = (VMStateField[]) {
40
+ return GTIMER_HYPVIRT;
251
+ VMSTATE_UINT32_ARRAY(regs, IMX7SRCState, SRC_MAX),
252
+ VMSTATE_END_OF_LIST()
253
+ },
254
+};
255
+
256
+static void imx7_src_reset(DeviceState *dev)
257
+{
258
+ IMX7SRCState *s = IMX7_SRC(dev);
259
+
260
+ memset(s->regs, 0, sizeof(s->regs));
261
+
262
+ /* Set reset values */
263
+ s->regs[SRC_SCR] = 0xA0;
264
+ s->regs[SRC_SRSR] = 0x1;
265
+ s->regs[SRC_SIMR] = 0x1F;
266
+}
267
+
268
+static uint64_t imx7_src_read(void *opaque, hwaddr offset, unsigned size)
269
+{
270
+ uint32_t value = 0;
271
+ IMX7SRCState *s = (IMX7SRCState *)opaque;
272
+ uint32_t index = offset >> 2;
273
+
274
+ if (index < SRC_MAX) {
275
+ value = s->regs[index];
276
+ } else {
277
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
278
+ HWADDR_PRIx "\n", TYPE_IMX7_SRC, __func__, offset);
279
+ }
280
+
281
+ trace_imx7_src_read(imx7_src_reg_name(index), value);
282
+
283
+ return value;
284
+}
285
+
286
+
287
+/*
288
+ * The reset is asynchronous so we need to defer clearing the reset
289
+ * bit until the work is completed.
290
+ */
291
+
292
+struct SRCSCRResetInfo {
293
+ IMX7SRCState *s;
294
+ uint32_t reset_bit;
295
+};
296
+
297
+static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
298
+{
299
+ struct SRCSCRResetInfo *ri = data.host_ptr;
300
+ IMX7SRCState *s = ri->s;
301
+
302
+ assert(qemu_mutex_iothread_locked());
303
+
304
+ s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);
305
+
306
+ trace_imx7_src_write(imx7_src_reg_name(SRC_A7RCR0), s->regs[SRC_A7RCR0]);
307
+
308
+ g_free(ri);
309
+}
310
+
311
+static void imx7_defer_clear_reset_bit(uint32_t cpuid,
312
+ IMX7SRCState *s,
313
+ uint32_t reset_shift)
314
+{
315
+ struct SRCSCRResetInfo *ri;
316
+ CPUState *cpu = arm_get_cpu_by_id(cpuid);
317
+
318
+ if (!cpu) {
319
+ return;
320
+ }
321
+
322
+ ri = g_new(struct SRCSCRResetInfo, 1);
323
+ ri->s = s;
324
+ ri->reset_bit = reset_shift;
325
+
326
+ async_run_on_cpu(cpu, imx7_clear_reset_bit, RUN_ON_CPU_HOST_PTR(ri));
327
+}
328
+
329
+
330
+static void imx7_src_write(void *opaque, hwaddr offset, uint64_t value,
331
+ unsigned size)
332
+{
333
+ IMX7SRCState *s = (IMX7SRCState *)opaque;
334
+ uint32_t index = offset >> 2;
335
+ long unsigned int change_mask;
336
+ uint32_t current_value = value;
337
+
338
+ if (index >= SRC_MAX) {
339
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
340
+ HWADDR_PRIx "\n", TYPE_IMX7_SRC, __func__, offset);
341
+ return;
342
+ }
343
+
344
+ trace_imx7_src_write(imx7_src_reg_name(SRC_A7RCR0), s->regs[SRC_A7RCR0]);
345
+
346
+ change_mask = s->regs[index] ^ (uint32_t)current_value;
347
+
348
+ switch (index) {
349
+ case SRC_A7RCR0:
350
+ if (FIELD_EX32(change_mask, CORE0, RST)) {
351
+ arm_reset_cpu(0);
352
+ imx7_defer_clear_reset_bit(0, s, R_CORE0_RST_SHIFT);
353
+ }
354
+ if (FIELD_EX32(change_mask, CORE1, RST)) {
355
+ arm_reset_cpu(1);
356
+ imx7_defer_clear_reset_bit(1, s, R_CORE1_RST_SHIFT);
357
+ }
358
+ s->regs[index] = current_value;
359
+ break;
360
+ case SRC_A7RCR1:
361
+ /*
362
+ * On real hardware when the system reset controller starts a
363
+ * secondary CPU it runs through some boot ROM code which reads
364
+ * the SRC_GPRX registers controlling the start address and branches
365
+ * to it.
366
+ * Here we are taking a short cut and branching directly to the
367
+ * requested address (we don't want to run the boot ROM code inside
368
+ * QEMU)
369
+ */
370
+ if (FIELD_EX32(change_mask, CORE1, ENABLE)) {
371
+ if (FIELD_EX32(current_value, CORE1, ENABLE)) {
372
+ /* CORE 1 is brought up */
373
+ arm_set_cpu_on(1, s->regs[SRC_GPR3], s->regs[SRC_GPR4],
374
+ 3, false);
375
+ } else {
376
+ /* CORE 1 is shut down */
377
+ arm_set_cpu_off(1);
378
+ }
379
+ /* We clear the reset bits as the processor changed state */
380
+ imx7_defer_clear_reset_bit(1, s, R_CORE1_RST_SHIFT);
381
+ clear_bit(R_CORE1_RST_SHIFT, &change_mask);
382
+ }
383
+ s->regs[index] = current_value;
384
+ break;
41
+ default:
385
+ default:
42
+ return GTIMER_VIRT;
386
+ s->regs[index] = current_value;
387
+ break;
43
+ }
388
+ }
44
+}
389
+}
45
+
390
+
46
+static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
391
+static const struct MemoryRegionOps imx7_src_ops = {
47
+ const ARMCPRegInfo *ri)
392
+ .read = imx7_src_read,
48
+{
393
+ .write = imx7_src_write,
49
+ int timeridx = gt_phys_redir_timeridx(env);
394
+ .endianness = DEVICE_NATIVE_ENDIAN,
50
+ return env->cp15.c14_timer[timeridx].cval;
395
+ .valid = {
51
+}
396
+ /*
52
+
397
+ * Our device would not work correctly if the guest was doing
53
+static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
398
+ * unaligned access. This might not be a limitation on the real
54
+ uint64_t value)
399
+ * device but in practice there is no reason for a guest to access
55
+{
400
+ * this device unaligned.
56
+ int timeridx = gt_phys_redir_timeridx(env);
401
+ */
57
+ gt_cval_write(env, ri, timeridx, value);
402
+ .min_access_size = 4,
58
+}
403
+ .max_access_size = 4,
59
+
404
+ .unaligned = false,
60
+static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
405
+ },
61
+ const ARMCPRegInfo *ri)
406
+};
62
+{
407
+
63
+ int timeridx = gt_phys_redir_timeridx(env);
408
+static void imx7_src_realize(DeviceState *dev, Error **errp)
64
+ return gt_tval_read(env, ri, timeridx);
409
+{
65
+}
410
+ IMX7SRCState *s = IMX7_SRC(dev);
66
+
411
+
67
+static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
412
+ memory_region_init_io(&s->iomem, OBJECT(dev), &imx7_src_ops, s,
68
+ uint64_t value)
413
+ TYPE_IMX7_SRC, 0x1000);
69
+{
414
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
70
+ int timeridx = gt_phys_redir_timeridx(env);
415
+}
71
+ gt_tval_write(env, ri, timeridx, value);
416
+
72
+}
417
+static void imx7_src_class_init(ObjectClass *klass, void *data)
73
+
418
+{
74
+static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
419
+ DeviceClass *dc = DEVICE_CLASS(klass);
75
+ const ARMCPRegInfo *ri)
420
+
76
+{
421
+ dc->realize = imx7_src_realize;
77
+ int timeridx = gt_phys_redir_timeridx(env);
422
+ dc->reset = imx7_src_reset;
78
+ return env->cp15.c14_timer[timeridx].ctl;
423
+ dc->vmsd = &vmstate_imx7_src;
79
+}
424
+ dc->desc = "i.MX6 System Reset Controller";
80
+
425
+}
81
+static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
426
+
82
+ uint64_t value)
427
+static const TypeInfo imx7_src_info = {
83
+{
428
+ .name = TYPE_IMX7_SRC,
84
+ int timeridx = gt_phys_redir_timeridx(env);
429
+ .parent = TYPE_SYS_BUS_DEVICE,
85
+ gt_ctl_write(env, ri, timeridx, value);
430
+ .instance_size = sizeof(IMX7SRCState),
86
+}
431
+ .class_init = imx7_src_class_init,
87
+
432
+};
88
static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
433
+
89
{
434
+static void imx7_src_register_types(void)
90
gt_timer_reset(env, ri, GTIMER_VIRT);
435
+{
91
@@ -XXX,XX +XXX,XX @@ static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
436
+ type_register_static(&imx7_src_info);
92
gt_recalc_timer(cpu, GTIMER_VIRT);
437
+}
93
}
438
+
94
439
+type_init(imx7_src_register_types)
95
+static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
440
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
96
+ const ARMCPRegInfo *ri)
441
index XXXXXXX..XXXXXXX 100644
97
+{
442
--- a/hw/misc/meson.build
98
+ int timeridx = gt_virt_redir_timeridx(env);
443
+++ b/hw/misc/meson.build
99
+ return env->cp15.c14_timer[timeridx].cval;
444
@@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_IMX', if_true: files(
100
+}
445
'imx6_src.c',
101
+
446
'imx6ul_ccm.c',
102
+static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
447
'imx7_ccm.c',
103
+ uint64_t value)
448
+ 'imx7_src.c',
104
+{
449
'imx7_gpr.c',
105
+ int timeridx = gt_virt_redir_timeridx(env);
450
'imx7_snvs.c',
106
+ gt_cval_write(env, ri, timeridx, value);
451
'imx_ccm.c',
107
+}
452
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
108
+
453
index XXXXXXX..XXXXXXX 100644
109
+static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
454
--- a/hw/misc/trace-events
110
+ const ARMCPRegInfo *ri)
455
+++ b/hw/misc/trace-events
111
+{
456
@@ -XXX,XX +XXX,XX @@ ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d"
112
+ int timeridx = gt_virt_redir_timeridx(env);
457
ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32
113
+ return gt_tval_read(env, ri, timeridx);
458
ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32
114
+}
459
115
+
460
+# imx7_src.c
116
+static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
461
+imx7_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32
117
+ uint64_t value)
462
+imx7_src_write(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32
118
+{
463
+
119
+ int timeridx = gt_virt_redir_timeridx(env);
464
# iotkit-sysinfo.c
120
+ gt_tval_write(env, ri, timeridx, value);
465
iotkit_sysinfo_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
121
+}
466
iotkit_sysinfo_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
122
+
123
+static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
124
+ const ARMCPRegInfo *ri)
125
+{
126
+ int timeridx = gt_virt_redir_timeridx(env);
127
+ return env->cp15.c14_timer[timeridx].ctl;
128
+}
129
+
130
+static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
131
+ uint64_t value)
132
+{
133
+ int timeridx = gt_virt_redir_timeridx(env);
134
+ gt_ctl_write(env, ri, timeridx, value);
135
+}
136
+
137
static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
138
{
139
gt_timer_reset(env, ri, GTIMER_HYP);
140
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
141
.accessfn = gt_ptimer_access,
142
.fieldoffset = offsetoflow32(CPUARMState,
143
cp15.c14_timer[GTIMER_PHYS].ctl),
144
- .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
145
+ .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
146
+ .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
147
},
148
{ .name = "CNTP_CTL_S",
149
.cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
150
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
151
.accessfn = gt_ptimer_access,
152
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
153
.resetvalue = 0,
154
- .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
155
+ .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
156
+ .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
157
},
158
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
159
.type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
160
.accessfn = gt_vtimer_access,
161
.fieldoffset = offsetoflow32(CPUARMState,
162
cp15.c14_timer[GTIMER_VIRT].ctl),
163
- .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
164
+ .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
165
+ .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
166
},
167
{ .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
168
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
169
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
170
.accessfn = gt_vtimer_access,
171
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
172
.resetvalue = 0,
173
- .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
174
+ .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
175
+ .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
176
},
177
/* TimerValue views: a 32 bit downcounting view of the underlying state */
178
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
179
.secure = ARM_CP_SECSTATE_NS,
180
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
181
.accessfn = gt_ptimer_access,
182
- .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
183
+ .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
184
},
185
{ .name = "CNTP_TVAL_S",
186
.cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
187
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
188
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
189
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
190
.accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
191
- .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
192
+ .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
193
},
194
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
195
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
196
.accessfn = gt_vtimer_access,
197
- .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
198
+ .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
199
},
200
{ .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
201
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
202
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
203
.accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
204
- .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
205
+ .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
206
},
207
/* The counter itself */
208
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
209
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
210
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
211
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
212
.accessfn = gt_ptimer_access,
213
- .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
214
+ .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
215
+ .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
216
},
217
{ .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
218
.secure = ARM_CP_SECSTATE_S,
219
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
220
.type = ARM_CP_IO,
221
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
222
.resetvalue = 0, .accessfn = gt_ptimer_access,
223
- .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
224
+ .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
225
+ .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
226
},
227
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
228
.access = PL0_RW,
229
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
230
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
231
.accessfn = gt_vtimer_access,
232
- .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
233
+ .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
234
+ .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
235
},
236
{ .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
237
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
238
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
239
.type = ARM_CP_IO,
240
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
241
.resetvalue = 0, .accessfn = gt_vtimer_access,
242
- .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
243
+ .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
244
+ .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
245
},
246
/* Secure timer -- this is actually restricted to only EL3
247
* and configurably Secure-EL1 via the accessfn.
248
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
249
REGINFO_SENTINEL
250
};
251
252
+static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
253
+ bool isread)
254
+{
255
+ if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
256
+ return CP_ACCESS_TRAP;
257
+ }
258
+ return CP_ACCESS_OK;
259
+}
260
+
261
#else
262
263
/* In user-mode most of the generic timer registers are inaccessible
264
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vhe_reginfo[] = {
265
.access = PL2_RW,
266
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
267
.writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
268
+ { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
269
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
270
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
271
+ .access = PL2_RW, .accessfn = e2h_access,
272
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
273
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
274
+ { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
275
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
276
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
277
+ .access = PL2_RW, .accessfn = e2h_access,
278
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
279
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
280
+ { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
281
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
282
+ .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
283
+ .access = PL2_RW, .accessfn = e2h_access,
284
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
285
+ { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
286
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
287
+ .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
288
+ .access = PL2_RW, .accessfn = e2h_access,
289
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
290
+ { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
291
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
292
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
293
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
294
+ .access = PL2_RW, .accessfn = e2h_access,
295
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
296
+ { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
297
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
298
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
299
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
300
+ .access = PL2_RW, .accessfn = e2h_access,
301
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
302
#endif
303
REGINFO_SENTINEL
304
};
305
--
467
--
306
2.20.1
468
2.34.1
307
308
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The architecture requires (R_TYTWB) that an attempt to return from EL3
2
when SCR_EL3.{NSE,NS} are {1,0} is an illegal exception return. (This
3
enforces that the CPU can't ever be executing below EL3 with the
4
NSE,NS bits indicating an invalid security state.)
2
5
3
Not all of the breakpoint types are supported, but those that
6
We were missing this check; add it.
4
only examine contextidr are extended to support the new register.
5
7
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-4-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230807150618.101357-1-peter.maydell@linaro.org
11
---
11
---
12
target/arm/debug_helper.c | 50 +++++++++++++++++++++++++++++----------
12
target/arm/tcg/helper-a64.c | 9 +++++++++
13
target/arm/helper.c | 12 ++++++++++
13
1 file changed, 9 insertions(+)
14
2 files changed, 50 insertions(+), 12 deletions(-)
15
14
16
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
15
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/debug_helper.c
17
--- a/target/arm/tcg/helper-a64.c
19
+++ b/target/arm/debug_helper.c
18
+++ b/target/arm/tcg/helper-a64.c
20
@@ -XXX,XX +XXX,XX @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
19
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
21
int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
20
spsr &= ~PSTATE_SS;
22
int bt;
23
uint32_t contextidr;
24
+ uint64_t hcr_el2;
25
26
/*
27
* Links to unimplemented or non-context aware breakpoints are
28
@@ -XXX,XX +XXX,XX @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
29
}
21
}
30
22
31
bt = extract64(bcr, 20, 4);
32
-
33
- /*
34
- * We match the whole register even if this is AArch32 using the
35
- * short descriptor format (in which case it holds both PROCID and ASID),
36
- * since we don't implement the optional v7 context ID masking.
37
- */
38
- contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
39
+ hcr_el2 = arm_hcr_el2_eff(env);
40
41
switch (bt) {
42
case 3: /* linked context ID match */
43
- if (arm_current_el(env) > 1) {
44
- /* Context matches never fire in EL2 or (AArch64) EL3 */
45
+ switch (arm_current_el(env)) {
46
+ default:
47
+ /* Context matches never fire in AArch64 EL3 */
48
return false;
49
+ case 2:
50
+ if (!(hcr_el2 & HCR_E2H)) {
51
+ /* Context matches never fire in EL2 without E2H enabled. */
52
+ return false;
53
+ }
54
+ contextidr = env->cp15.contextidr_el[2];
55
+ break;
56
+ case 1:
57
+ contextidr = env->cp15.contextidr_el[1];
58
+ break;
59
+ case 0:
60
+ if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
61
+ contextidr = env->cp15.contextidr_el[2];
62
+ } else {
63
+ contextidr = env->cp15.contextidr_el[1];
64
+ }
65
+ break;
66
}
67
- return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
68
- case 5: /* linked address mismatch (reserved in AArch64) */
69
+ break;
70
+
71
+ case 7: /* linked contextidr_el1 match */
72
+ contextidr = env->cp15.contextidr_el[1];
73
+ break;
74
+ case 13: /* linked contextidr_el2 match */
75
+ contextidr = env->cp15.contextidr_el[2];
76
+ break;
77
+
78
case 9: /* linked VMID match (reserved if no EL2) */
79
case 11: /* linked context ID and VMID match (reserved if no EL2) */
80
+ case 15: /* linked full context ID match */
81
default:
82
/*
83
* Links to Unlinked context breakpoints must generate no
84
@@ -XXX,XX +XXX,XX @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
85
return false;
86
}
87
88
- return false;
89
+ /*
23
+ /*
90
+ * We match the whole register even if this is AArch32 using the
24
+ * FEAT_RME forbids return from EL3 with an invalid security state.
91
+ * short descriptor format (in which case it holds both PROCID and ASID),
25
+ * We don't need an explicit check for FEAT_RME here because we enforce
92
+ * since we don't implement the optional v7 context ID masking.
26
+ * in scr_write() that you can't set the NSE bit without it.
93
+ */
27
+ */
94
+ return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
28
+ if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
95
}
29
+ goto illegal_return;
96
97
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
98
diff --git a/target/arm/helper.c b/target/arm/helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/helper.c
101
+++ b/target/arm/helper.c
102
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo jazelle_regs[] = {
103
REGINFO_SENTINEL
104
};
105
106
+static const ARMCPRegInfo vhe_reginfo[] = {
107
+ { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
108
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
109
+ .access = PL2_RW,
110
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
111
+ REGINFO_SENTINEL
112
+};
113
+
114
void register_cp_regs_for_features(ARMCPU *cpu)
115
{
116
/* Register all the coprocessor registers based on feature bits */
117
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
118
define_arm_cp_regs(cpu, lor_reginfo);
119
}
120
121
+ if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
122
+ define_arm_cp_regs(cpu, vhe_reginfo);
123
+ }
30
+ }
124
+
31
+
125
if (cpu_isar_feature(aa64_sve, cpu)) {
32
new_el = el_from_spsr(spsr);
126
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
33
if (new_el == -1) {
127
if (arm_feature(env, ARM_FEATURE_EL2)) {
34
goto illegal_return;
128
--
35
--
129
2.20.1
36
2.34.1
130
131
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
In the m48t59 device we almost always use 64-bit arithmetic when
2
dealing with time_t deltas. The one exception is in set_alarm(),
3
which currently uses a plain 'int' to hold the difference between two
4
time_t values. Switch to int64_t instead to avoid any possible
5
overflow issues.
2
6
3
The bold text sounds like 'knock knock'. Only bolding the
4
second 'not' makes it easier to read.
5
6
Fixes: dea101a1ae
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Andrew Jones <drjones@redhat.com>
9
Message-id: 20200206225148.23923-1-philmd@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
---
9
---
12
docs/arm-cpu-features.rst | 2 +-
10
hw/rtc/m48t59.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 1 insertion(+), 1 deletion(-)
14
12
15
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
13
diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/docs/arm-cpu-features.rst
15
--- a/hw/rtc/m48t59.c
18
+++ b/docs/arm-cpu-features.rst
16
+++ b/hw/rtc/m48t59.c
19
@@ -XXX,XX +XXX,XX @@ the list of KVM VCPU features and their descriptions.
17
@@ -XXX,XX +XXX,XX @@ static void alarm_cb (void *opaque)
20
18
21
kvm-no-adjvtime By default kvm-no-adjvtime is disabled. This
19
static void set_alarm(M48t59State *NVRAM)
22
means that by default the virtual time
20
{
23
- adjustment is enabled (vtime is *not not*
21
- int diff;
24
+ adjustment is enabled (vtime is not *not*
22
+ int64_t diff;
25
adjusted).
23
if (NVRAM->alrm_timer != NULL) {
26
24
timer_del(NVRAM->alrm_timer);
27
When virtual time adjustment is enabled each
25
diff = qemu_timedate_diff(&NVRAM->alarm) - NVRAM->time_offset;
28
--
26
--
29
2.20.1
27
2.34.1
30
28
31
29
diff view generated by jsdifflib
1
From: Rene Stange <rsta2@o2online.de>
1
In the twl92230 device, use int64_t for the two state fields
2
sec_offset and alm_sec, because we set these to values that
3
are either time_t or differences between two time_t values.
2
4
3
In TD (two dimensions) DMA mode ylen has to be increased by one after
5
These fields aren't saved in vmstate anywhere, so we can
4
reading it from the TXFR_LEN register, because a value of zero has to
6
safely widen them.
5
result in one run through of the ylen loop. This has been tested on a
6
real Raspberry Pi 3 Model B+. In the previous implementation the ylen
7
loop was not passed at all for a value of zero.
8
7
9
Signed-off-by: Rene Stange <rsta2@o2online.de>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
12
---
10
---
13
hw/dma/bcm2835_dma.c | 4 ++--
11
hw/rtc/twl92230.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
12
1 file changed, 2 insertions(+), 2 deletions(-)
15
13
16
diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c
14
diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/dma/bcm2835_dma.c
16
--- a/hw/rtc/twl92230.c
19
+++ b/hw/dma/bcm2835_dma.c
17
+++ b/hw/rtc/twl92230.c
20
@@ -XXX,XX +XXX,XX @@ static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
18
@@ -XXX,XX +XXX,XX @@ struct MenelausState {
21
ch->stride = ldl_le_phys(&s->dma_as, ch->conblk_ad + 16);
19
struct tm tm;
22
ch->nextconbk = ldl_le_phys(&s->dma_as, ch->conblk_ad + 20);
20
struct tm new;
23
21
struct tm alm;
24
+ ylen = 1;
22
- int sec_offset;
25
if (ch->ti & BCM2708_DMA_TDMODE) {
23
- int alm_sec;
26
/* 2D transfer mode */
24
+ int64_t sec_offset;
27
- ylen = (ch->txfr_len >> 16) & 0x3fff;
25
+ int64_t alm_sec;
28
+ ylen += (ch->txfr_len >> 16) & 0x3fff;
26
int next_comp;
29
xlen = ch->txfr_len & 0xffff;
27
} rtc;
30
dst_stride = ch->stride >> 16;
28
uint16_t rtc_next_vmstate;
31
src_stride = ch->stride & 0xffff;
32
} else {
33
- ylen = 1;
34
xlen = ch->txfr_len;
35
dst_stride = 0;
36
src_stride = 0;
37
--
29
--
38
2.20.1
30
2.34.1
39
31
40
32
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In the aspeed_rtc device we store a difference between two time_t
2
values in an 'int'. This is not really correct when time_t could
3
be 64 bits. Enlarge the field to 'int64_t'.
2
4
3
At the same time, add writefn to TTBR0_EL2 and TCR_EL2.
5
This is a migration compatibility break for the aspeed boards.
4
A later patch will update any ASID therein.
6
While we are changing the vmstate, remove the accidental
7
duplicate of the offset field.
5
8
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Cédric Le Goater <clg@kaod.org>
11
---
11
---
12
target/arm/helper.c | 13 ++++++++++++-
12
include/hw/rtc/aspeed_rtc.h | 2 +-
13
1 file changed, 12 insertions(+), 1 deletion(-)
13
hw/rtc/aspeed_rtc.c | 5 ++---
14
2 files changed, 3 insertions(+), 4 deletions(-)
14
15
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
diff --git a/include/hw/rtc/aspeed_rtc.h b/include/hw/rtc/aspeed_rtc.h
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
--- a/include/hw/rtc/aspeed_rtc.h
18
+++ b/target/arm/helper.c
19
+++ b/include/hw/rtc/aspeed_rtc.h
19
@@ -XXX,XX +XXX,XX @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
20
@@ -XXX,XX +XXX,XX @@ struct AspeedRtcState {
20
raw_write(env, ri, value);
21
qemu_irq irq;
21
}
22
22
23
uint32_t reg[0x18];
23
+static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
24
- int offset;
24
+ uint64_t value)
25
+ int64_t offset;
25
+{
26
26
+ /* TODO: There are ASID fields in here with HCR_EL2.E2H */
27
+ raw_write(env, ri, value);
28
+}
29
+
30
static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
31
uint64_t value)
32
{
33
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
34
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
35
{ .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
36
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
37
- .access = PL2_RW, .resetvalue = 0,
38
+ .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
39
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
40
{ .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
41
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
42
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vhe_reginfo[] = {
43
.opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
44
.access = PL2_RW,
45
.fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
46
+ { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
47
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
48
+ .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
49
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
50
REGINFO_SENTINEL
51
};
27
};
52
28
29
diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/rtc/aspeed_rtc.c
32
+++ b/hw/rtc/aspeed_rtc.c
33
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps aspeed_rtc_ops = {
34
35
static const VMStateDescription vmstate_aspeed_rtc = {
36
.name = TYPE_ASPEED_RTC,
37
- .version_id = 1,
38
+ .version_id = 2,
39
.fields = (VMStateField[]) {
40
VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18),
41
- VMSTATE_INT32(offset, AspeedRtcState),
42
- VMSTATE_INT32(offset, AspeedRtcState),
43
+ VMSTATE_INT64(offset, AspeedRtcState),
44
VMSTATE_END_OF_LIST()
45
}
46
};
53
--
47
--
54
2.20.1
48
2.34.1
55
49
56
50
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
No functional change, but unify code sequences.
4
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-7-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 32 +++++++++++++-------------------
13
1 file changed, 13 insertions(+), 19 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
20
* Page D4-1736 (DDI0487A.b)
21
*/
22
23
+static int vae1_tlbmask(CPUARMState *env)
24
+{
25
+ if (arm_is_secure_below_el3(env)) {
26
+ return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
27
+ } else {
28
+ return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
29
+ }
30
+}
31
+
32
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
33
uint64_t value)
34
{
35
CPUState *cs = env_cpu(env);
36
- bool sec = arm_is_secure_below_el3(env);
37
+ int mask = vae1_tlbmask(env);
38
39
- if (sec) {
40
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
41
- ARMMMUIdxBit_S1SE1 |
42
- ARMMMUIdxBit_S1SE0);
43
- } else {
44
- tlb_flush_by_mmuidx_all_cpus_synced(cs,
45
- ARMMMUIdxBit_S12NSE1 |
46
- ARMMMUIdxBit_S12NSE0);
47
- }
48
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
49
}
50
51
static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
52
uint64_t value)
53
{
54
CPUState *cs = env_cpu(env);
55
+ int mask = vae1_tlbmask(env);
56
57
if (tlb_force_broadcast(env)) {
58
tlbi_aa64_vmalle1is_write(env, NULL, value);
59
return;
60
}
61
62
- if (arm_is_secure_below_el3(env)) {
63
- tlb_flush_by_mmuidx(cs,
64
- ARMMMUIdxBit_S1SE1 |
65
- ARMMMUIdxBit_S1SE0);
66
- } else {
67
- tlb_flush_by_mmuidx(cs,
68
- ARMMMUIdxBit_S12NSE1 |
69
- ARMMMUIdxBit_S12NSE0);
70
- }
71
+ tlb_flush_by_mmuidx(cs, mask);
72
}
73
74
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
75
--
76
2.20.1
77
78
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Rather than call to a separate function and re-compute any
4
parameters for the flush, simply use the correct flush
5
function directly.
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-9-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/helper.c | 52 +++++++++++++++++++++------------------------
14
1 file changed, 24 insertions(+), 28 deletions(-)
15
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
19
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
21
uint64_t value)
22
{
23
/* Invalidate all (TLBIALL) */
24
- ARMCPU *cpu = env_archcpu(env);
25
+ CPUState *cs = env_cpu(env);
26
27
if (tlb_force_broadcast(env)) {
28
- tlbiall_is_write(env, NULL, value);
29
- return;
30
+ tlb_flush_all_cpus_synced(cs);
31
+ } else {
32
+ tlb_flush(cs);
33
}
34
-
35
- tlb_flush(CPU(cpu));
36
}
37
38
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
39
uint64_t value)
40
{
41
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
42
- ARMCPU *cpu = env_archcpu(env);
43
+ CPUState *cs = env_cpu(env);
44
45
+ value &= TARGET_PAGE_MASK;
46
if (tlb_force_broadcast(env)) {
47
- tlbimva_is_write(env, NULL, value);
48
- return;
49
+ tlb_flush_page_all_cpus_synced(cs, value);
50
+ } else {
51
+ tlb_flush_page(cs, value);
52
}
53
-
54
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
55
}
56
57
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
58
uint64_t value)
59
{
60
/* Invalidate by ASID (TLBIASID) */
61
- ARMCPU *cpu = env_archcpu(env);
62
+ CPUState *cs = env_cpu(env);
63
64
if (tlb_force_broadcast(env)) {
65
- tlbiasid_is_write(env, NULL, value);
66
- return;
67
+ tlb_flush_all_cpus_synced(cs);
68
+ } else {
69
+ tlb_flush(cs);
70
}
71
-
72
- tlb_flush(CPU(cpu));
73
}
74
75
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
76
uint64_t value)
77
{
78
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
79
- ARMCPU *cpu = env_archcpu(env);
80
+ CPUState *cs = env_cpu(env);
81
82
+ value &= TARGET_PAGE_MASK;
83
if (tlb_force_broadcast(env)) {
84
- tlbimvaa_is_write(env, NULL, value);
85
- return;
86
+ tlb_flush_page_all_cpus_synced(cs, value);
87
+ } else {
88
+ tlb_flush_page(cs, value);
89
}
90
-
91
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
92
}
93
94
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
95
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
96
int mask = vae1_tlbmask(env);
97
98
if (tlb_force_broadcast(env)) {
99
- tlbi_aa64_vmalle1is_write(env, NULL, value);
100
- return;
101
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
102
+ } else {
103
+ tlb_flush_by_mmuidx(cs, mask);
104
}
105
-
106
- tlb_flush_by_mmuidx(cs, mask);
107
}
108
109
static int alle1_tlbmask(CPUARMState *env)
110
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
111
uint64_t pageaddr = sextract64(value << 12, 0, 56);
112
113
if (tlb_force_broadcast(env)) {
114
- tlbi_aa64_vae1is_write(env, NULL, value);
115
- return;
116
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
117
+ } else {
118
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
119
}
120
-
121
- tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
122
}
123
124
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
125
--
126
2.20.1
127
128
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This is part of a reorganization to the set of mmu_idx.
4
This emphasizes that they apply to the EL1&0 regime.
5
6
The ultimate goal is
7
8
-- Non-secure regimes:
9
ARMMMUIdx_E10_0,
10
ARMMMUIdx_E20_0,
11
ARMMMUIdx_E10_1,
12
ARMMMUIdx_E2,
13
ARMMMUIdx_E20_2,
14
15
-- Secure regimes:
16
ARMMMUIdx_SE10_0,
17
ARMMMUIdx_SE10_1,
18
ARMMMUIdx_SE3,
19
20
-- Helper mmu_idx for non-secure EL1&0 stage1 and stage2
21
ARMMMUIdx_Stage2,
22
ARMMMUIdx_Stage1_E0,
23
ARMMMUIdx_Stage1_E1,
24
25
The 'S' prefix is reserved for "Secure". Unless otherwise specified,
26
each mmu_idx represents all stages of translation.
27
28
Tested-by: Alex Bennée <alex.bennee@linaro.org>
29
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
30
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
31
Message-id: 20200206105448.4726-10-richard.henderson@linaro.org
32
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
---
34
target/arm/cpu.h | 8 ++++----
35
target/arm/internals.h | 4 ++--
36
target/arm/helper.c | 40 +++++++++++++++++++-------------------
37
target/arm/translate-a64.c | 4 ++--
38
target/arm/translate.c | 6 +++---
39
5 files changed, 31 insertions(+), 31 deletions(-)
40
41
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/cpu.h
44
+++ b/target/arm/cpu.h
45
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
46
#define ARM_MMU_IDX_COREIDX_MASK 0x7
47
48
typedef enum ARMMMUIdx {
49
- ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A,
50
- ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A,
51
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
52
+ ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
53
ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
54
ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
55
ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
56
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
57
* for use when calling tlb_flush_by_mmuidx() and friends.
58
*/
59
typedef enum ARMMMUIdxBit {
60
- ARMMMUIdxBit_S12NSE0 = 1 << 0,
61
- ARMMMUIdxBit_S12NSE1 = 1 << 1,
62
+ ARMMMUIdxBit_E10_0 = 1 << 0,
63
+ ARMMMUIdxBit_E10_1 = 1 << 1,
64
ARMMMUIdxBit_S1E2 = 1 << 2,
65
ARMMMUIdxBit_S1E3 = 1 << 3,
66
ARMMMUIdxBit_S1SE0 = 1 << 4,
67
diff --git a/target/arm/internals.h b/target/arm/internals.h
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/internals.h
70
+++ b/target/arm/internals.h
71
@@ -XXX,XX +XXX,XX @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
72
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
73
{
74
switch (mmu_idx) {
75
- case ARMMMUIdx_S12NSE0:
76
- case ARMMMUIdx_S12NSE1:
77
+ case ARMMMUIdx_E10_0:
78
+ case ARMMMUIdx_E10_1:
79
case ARMMMUIdx_S1NSE0:
80
case ARMMMUIdx_S1NSE1:
81
case ARMMMUIdx_S1E2:
82
diff --git a/target/arm/helper.c b/target/arm/helper.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/arm/helper.c
85
+++ b/target/arm/helper.c
86
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
87
CPUState *cs = env_cpu(env);
88
89
tlb_flush_by_mmuidx(cs,
90
- ARMMMUIdxBit_S12NSE1 |
91
- ARMMMUIdxBit_S12NSE0 |
92
+ ARMMMUIdxBit_E10_1 |
93
+ ARMMMUIdxBit_E10_0 |
94
ARMMMUIdxBit_S2NS);
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
98
CPUState *cs = env_cpu(env);
99
100
tlb_flush_by_mmuidx_all_cpus_synced(cs,
101
- ARMMMUIdxBit_S12NSE1 |
102
- ARMMMUIdxBit_S12NSE0 |
103
+ ARMMMUIdxBit_E10_1 |
104
+ ARMMMUIdxBit_E10_0 |
105
ARMMMUIdxBit_S2NS);
106
}
107
108
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
109
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
110
111
if (arm_feature(env, ARM_FEATURE_EL2)) {
112
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
113
+ if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
114
format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
115
} else {
116
format64 |= arm_current_el(env) == 2;
117
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
118
break;
119
case 4:
120
/* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
121
- mmu_idx = ARMMMUIdx_S12NSE1;
122
+ mmu_idx = ARMMMUIdx_E10_1;
123
break;
124
case 6:
125
/* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
126
- mmu_idx = ARMMMUIdx_S12NSE0;
127
+ mmu_idx = ARMMMUIdx_E10_0;
128
break;
129
default:
130
g_assert_not_reached();
131
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
132
mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
133
break;
134
case 4: /* AT S12E1R, AT S12E1W */
135
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
136
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_E10_1;
137
break;
138
case 6: /* AT S12E0R, AT S12E0W */
139
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
140
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_E10_0;
141
break;
142
default:
143
g_assert_not_reached();
144
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
145
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
146
if (raw_read(env, ri) != value) {
147
tlb_flush_by_mmuidx(cs,
148
- ARMMMUIdxBit_S12NSE1 |
149
- ARMMMUIdxBit_S12NSE0 |
150
+ ARMMMUIdxBit_E10_1 |
151
+ ARMMMUIdxBit_E10_0 |
152
ARMMMUIdxBit_S2NS);
153
raw_write(env, ri, value);
154
}
155
@@ -XXX,XX +XXX,XX @@ static int vae1_tlbmask(CPUARMState *env)
156
if (arm_is_secure_below_el3(env)) {
157
return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
158
} else {
159
- return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
160
+ return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
161
}
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
165
if (arm_is_secure_below_el3(env)) {
166
return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
167
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
168
- return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS;
169
+ return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_S2NS;
170
} else {
171
- return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
172
+ return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
173
}
174
}
175
176
@@ -XXX,XX +XXX,XX @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
177
*/
178
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
179
{
180
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
181
- mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
182
+ if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
183
+ mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_E10_0);
184
}
185
return mmu_idx;
186
}
187
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
188
return true;
189
default:
190
return false;
191
- case ARMMMUIdx_S12NSE0:
192
- case ARMMMUIdx_S12NSE1:
193
+ case ARMMMUIdx_E10_0:
194
+ case ARMMMUIdx_E10_1:
195
g_assert_not_reached();
196
}
197
}
198
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
199
target_ulong *page_size,
200
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
201
{
202
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
203
+ if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
204
/* Call ourselves recursively to do the stage 1 and then stage 2
205
* translations.
206
*/
207
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
208
if (el < 2 && arm_is_secure_below_el3(env)) {
209
return ARMMMUIdx_S1SE0 + el;
210
} else {
211
- return ARMMMUIdx_S12NSE0 + el;
212
+ return ARMMMUIdx_E10_0 + el;
213
}
214
}
215
216
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/target/arm/translate-a64.c
219
+++ b/target/arm/translate-a64.c
220
@@ -XXX,XX +XXX,XX @@ static inline int get_a64_user_mem_index(DisasContext *s)
221
ARMMMUIdx useridx;
222
223
switch (s->mmu_idx) {
224
- case ARMMMUIdx_S12NSE1:
225
- useridx = ARMMMUIdx_S12NSE0;
226
+ case ARMMMUIdx_E10_1:
227
+ useridx = ARMMMUIdx_E10_0;
228
break;
229
case ARMMMUIdx_S1SE1:
230
useridx = ARMMMUIdx_S1SE0;
231
diff --git a/target/arm/translate.c b/target/arm/translate.c
232
index XXXXXXX..XXXXXXX 100644
233
--- a/target/arm/translate.c
234
+++ b/target/arm/translate.c
235
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
236
*/
237
switch (s->mmu_idx) {
238
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
239
- case ARMMMUIdx_S12NSE0:
240
- case ARMMMUIdx_S12NSE1:
241
- return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
242
+ case ARMMMUIdx_E10_0:
243
+ case ARMMMUIdx_E10_1:
244
+ return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
245
case ARMMMUIdx_S1E3:
246
case ARMMMUIdx_S1SE0:
247
case ARMMMUIdx_S1SE1:
248
--
249
2.20.1
250
251
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
The EL1&0 regime is the only one that uses 2-stage translation.
4
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200206105448.4726-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 4 +--
12
target/arm/internals.h | 2 +-
13
target/arm/helper.c | 57 ++++++++++++++++++++------------------
14
target/arm/translate-a64.c | 2 +-
15
target/arm/translate.c | 2 +-
16
5 files changed, 35 insertions(+), 32 deletions(-)
17
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
23
ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
24
ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
25
ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
26
- ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
27
+ ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
28
ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
29
ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
30
ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M,
31
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
32
ARMMMUIdxBit_S1E3 = 1 << 3,
33
ARMMMUIdxBit_S1SE0 = 1 << 4,
34
ARMMMUIdxBit_S1SE1 = 1 << 5,
35
- ARMMMUIdxBit_S2NS = 1 << 6,
36
+ ARMMMUIdxBit_Stage2 = 1 << 6,
37
ARMMMUIdxBit_MUser = 1 << 0,
38
ARMMMUIdxBit_MPriv = 1 << 1,
39
ARMMMUIdxBit_MUserNegPri = 1 << 2,
40
diff --git a/target/arm/internals.h b/target/arm/internals.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/internals.h
43
+++ b/target/arm/internals.h
44
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
45
case ARMMMUIdx_S1NSE0:
46
case ARMMMUIdx_S1NSE1:
47
case ARMMMUIdx_S1E2:
48
- case ARMMMUIdx_S2NS:
49
+ case ARMMMUIdx_Stage2:
50
case ARMMMUIdx_MPrivNegPri:
51
case ARMMMUIdx_MUserNegPri:
52
case ARMMMUIdx_MPriv:
53
diff --git a/target/arm/helper.c b/target/arm/helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/helper.c
56
+++ b/target/arm/helper.c
57
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
58
tlb_flush_by_mmuidx(cs,
59
ARMMMUIdxBit_E10_1 |
60
ARMMMUIdxBit_E10_0 |
61
- ARMMMUIdxBit_S2NS);
62
+ ARMMMUIdxBit_Stage2);
63
}
64
65
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
66
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
tlb_flush_by_mmuidx_all_cpus_synced(cs,
68
ARMMMUIdxBit_E10_1 |
69
ARMMMUIdxBit_E10_0 |
70
- ARMMMUIdxBit_S2NS);
71
+ ARMMMUIdxBit_Stage2);
72
}
73
74
static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
75
@@ -XXX,XX +XXX,XX @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
76
77
pageaddr = sextract64(value << 12, 0, 40);
78
79
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
80
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
81
}
82
83
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
84
@@ -XXX,XX +XXX,XX @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
85
pageaddr = sextract64(value << 12, 0, 40);
86
87
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
88
- ARMMMUIdxBit_S2NS);
89
+ ARMMMUIdxBit_Stage2);
90
}
91
92
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
93
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
94
ARMCPU *cpu = env_archcpu(env);
95
CPUState *cs = CPU(cpu);
96
97
- /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
98
+ /*
99
+ * A change in VMID to the stage2 page table (Stage2) invalidates
100
+ * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
101
+ */
102
if (raw_read(env, ri) != value) {
103
tlb_flush_by_mmuidx(cs,
104
ARMMMUIdxBit_E10_1 |
105
ARMMMUIdxBit_E10_0 |
106
- ARMMMUIdxBit_S2NS);
107
+ ARMMMUIdxBit_Stage2);
108
raw_write(env, ri, value);
109
}
110
}
111
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
112
if (arm_is_secure_below_el3(env)) {
113
return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
114
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
115
- return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_S2NS;
116
+ return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2;
117
} else {
118
return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
119
}
120
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
121
122
pageaddr = sextract64(value << 12, 0, 48);
123
124
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
125
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
126
}
127
128
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
129
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
130
pageaddr = sextract64(value << 12, 0, 48);
131
132
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
133
- ARMMMUIdxBit_S2NS);
134
+ ARMMMUIdxBit_Stage2);
135
}
136
137
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
138
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
139
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
140
{
141
switch (mmu_idx) {
142
- case ARMMMUIdx_S2NS:
143
+ case ARMMMUIdx_Stage2:
144
case ARMMMUIdx_S1E2:
145
return 2;
146
case ARMMMUIdx_S1E3:
147
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
148
}
149
}
150
151
- if (mmu_idx == ARMMMUIdx_S2NS) {
152
+ if (mmu_idx == ARMMMUIdx_Stage2) {
153
/* HCR.DC means HCR.VM behaves as 1 */
154
return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
155
}
156
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_big_endian(CPUARMState *env,
157
static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
158
int ttbrn)
159
{
160
- if (mmu_idx == ARMMMUIdx_S2NS) {
161
+ if (mmu_idx == ARMMMUIdx_Stage2) {
162
return env->cp15.vttbr_el2;
163
}
164
if (ttbrn == 0) {
165
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
166
/* Return the TCR controlling this translation regime */
167
static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
168
{
169
- if (mmu_idx == ARMMMUIdx_S2NS) {
170
+ if (mmu_idx == ARMMMUIdx_Stage2) {
171
return &env->cp15.vtcr_el2;
172
}
173
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
174
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
175
bool have_wxn;
176
int wxn = 0;
177
178
- assert(mmu_idx != ARMMMUIdx_S2NS);
179
+ assert(mmu_idx != ARMMMUIdx_Stage2);
180
181
user_rw = simple_ap_to_rw_prot_is_user(ap, true);
182
if (is_user) {
183
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
184
ARMMMUFaultInfo *fi)
185
{
186
if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
187
- !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
188
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
189
target_ulong s2size;
190
hwaddr s2pa;
191
int s2prot;
192
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
193
pcacheattrs = &cacheattrs;
194
}
195
196
- ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
197
+ ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa,
198
&txattrs, &s2prot, &s2size, fi, pcacheattrs);
199
if (ret) {
200
assert(fi->type != ARMFault_None);
201
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
202
tsz = extract32(tcr, 0, 6);
203
using64k = extract32(tcr, 14, 1);
204
using16k = extract32(tcr, 15, 1);
205
- if (mmu_idx == ARMMMUIdx_S2NS) {
206
+ if (mmu_idx == ARMMMUIdx_Stage2) {
207
/* VTCR_EL2 */
208
tbi = tbid = hpd = false;
209
} else {
210
@@ -XXX,XX +XXX,XX @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
211
int select, tsz;
212
bool epd, hpd;
213
214
- if (mmu_idx == ARMMMUIdx_S2NS) {
215
+ if (mmu_idx == ARMMMUIdx_Stage2) {
216
/* VTCR */
217
bool sext = extract32(tcr, 4, 1);
218
bool sign = extract32(tcr, 3, 1);
219
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
220
level = 1;
221
/* There is no TTBR1 for EL2 */
222
ttbr1_valid = (el != 2);
223
- addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
224
+ addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
225
inputsize = addrsize - param.tsz;
226
}
227
228
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
229
goto do_fault;
230
}
231
232
- if (mmu_idx != ARMMMUIdx_S2NS) {
233
+ if (mmu_idx != ARMMMUIdx_Stage2) {
234
/* The starting level depends on the virtual address size (which can
235
* be up to 48 bits) and the translation granule size. It indicates
236
* the number of strides (stride bits at a time) needed to
237
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
238
attrs = extract64(descriptor, 2, 10)
239
| (extract64(descriptor, 52, 12) << 10);
240
241
- if (mmu_idx == ARMMMUIdx_S2NS) {
242
+ if (mmu_idx == ARMMMUIdx_Stage2) {
243
/* Stage 2 table descriptors do not include any attribute fields */
244
break;
245
}
246
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
247
ap = extract32(attrs, 4, 2);
248
xn = extract32(attrs, 12, 1);
249
250
- if (mmu_idx == ARMMMUIdx_S2NS) {
251
+ if (mmu_idx == ARMMMUIdx_Stage2) {
252
ns = true;
253
*prot = get_S2prot(env, ap, xn);
254
} else {
255
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
256
}
257
258
if (cacheattrs != NULL) {
259
- if (mmu_idx == ARMMMUIdx_S2NS) {
260
+ if (mmu_idx == ARMMMUIdx_Stage2) {
261
cacheattrs->attrs = convert_stage2_attrs(env,
262
extract32(attrs, 0, 4));
263
} else {
264
@@ -XXX,XX +XXX,XX @@ do_fault:
265
fi->type = fault_type;
266
fi->level = level;
267
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
268
- fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
269
+ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2);
270
return true;
271
}
272
273
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
274
prot, page_size, fi, cacheattrs);
275
276
/* If S1 fails or S2 is disabled, return early. */
277
- if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
278
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
279
*phys_ptr = ipa;
280
return ret;
281
}
282
283
/* S1 is done. Now do S2 translation. */
284
- ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
285
+ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
286
phys_ptr, attrs, &s2_prot,
287
page_size, fi,
288
cacheattrs != NULL ? &cacheattrs2 : NULL);
289
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
290
/* Fast Context Switch Extension. This doesn't exist at all in v8.
291
* In v7 and earlier it affects all stage 1 translations.
292
*/
293
- if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
294
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
295
&& !arm_feature(env, ARM_FEATURE_V8)) {
296
if (regime_el(env, mmu_idx) == 3) {
297
address += env->cp15.fcseidr_s;
298
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
299
index XXXXXXX..XXXXXXX 100644
300
--- a/target/arm/translate-a64.c
301
+++ b/target/arm/translate-a64.c
302
@@ -XXX,XX +XXX,XX @@ static inline int get_a64_user_mem_index(DisasContext *s)
303
case ARMMMUIdx_S1SE1:
304
useridx = ARMMMUIdx_S1SE0;
305
break;
306
- case ARMMMUIdx_S2NS:
307
+ case ARMMMUIdx_Stage2:
308
g_assert_not_reached();
309
default:
310
useridx = s->mmu_idx;
311
diff --git a/target/arm/translate.c b/target/arm/translate.c
312
index XXXXXXX..XXXXXXX 100644
313
--- a/target/arm/translate.c
314
+++ b/target/arm/translate.c
315
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
316
case ARMMMUIdx_MSUserNegPri:
317
case ARMMMUIdx_MSPrivNegPri:
318
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
319
- case ARMMMUIdx_S2NS:
320
+ case ARMMMUIdx_Stage2:
321
default:
322
g_assert_not_reached();
323
}
324
--
325
2.20.1
326
327
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This is part of a reorganization to the set of mmu_idx.
4
The EL1&0 regime is the only one that uses 2-stage translation.
5
Spelling out Stage avoids confusion with Secure.
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-12-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 4 ++--
14
target/arm/internals.h | 6 +++---
15
target/arm/helper.c | 27 ++++++++++++++-------------
16
3 files changed, 19 insertions(+), 18 deletions(-)
17
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
23
/* Indexes below here don't have TLBs and are used only for AT system
24
* instructions or for the first stage of an S12 page table walk.
25
*/
26
- ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB,
27
- ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB,
28
+ ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
29
+ ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
30
} ARMMMUIdx;
31
32
/* Bit macros for the core-mmu-index values for each index,
33
diff --git a/target/arm/internals.h b/target/arm/internals.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/internals.h
36
+++ b/target/arm/internals.h
37
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
38
switch (mmu_idx) {
39
case ARMMMUIdx_E10_0:
40
case ARMMMUIdx_E10_1:
41
- case ARMMMUIdx_S1NSE0:
42
- case ARMMMUIdx_S1NSE1:
43
+ case ARMMMUIdx_Stage1_E0:
44
+ case ARMMMUIdx_Stage1_E1:
45
case ARMMMUIdx_S1E2:
46
case ARMMMUIdx_Stage2:
47
case ARMMMUIdx_MPrivNegPri:
48
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env);
49
#ifdef CONFIG_USER_ONLY
50
static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
51
{
52
- return ARMMMUIdx_S1NSE0;
53
+ return ARMMMUIdx_Stage1_E0;
54
}
55
#else
56
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
57
diff --git a/target/arm/helper.c b/target/arm/helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/helper.c
60
+++ b/target/arm/helper.c
61
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
62
bool take_exc = false;
63
64
if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
65
- && (mmu_idx == ARMMMUIdx_S1NSE1 || mmu_idx == ARMMMUIdx_S1NSE0)) {
66
+ && (mmu_idx == ARMMMUIdx_Stage1_E1 ||
67
+ mmu_idx == ARMMMUIdx_Stage1_E0)) {
68
/*
69
* Synchronous stage 2 fault on an access made as part of the
70
* translation table walk for AT S1E0* or AT S1E1* insn
71
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
72
mmu_idx = ARMMMUIdx_S1E3;
73
break;
74
case 2:
75
- mmu_idx = ARMMMUIdx_S1NSE1;
76
+ mmu_idx = ARMMMUIdx_Stage1_E1;
77
break;
78
case 1:
79
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
80
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_Stage1_E1;
81
break;
82
default:
83
g_assert_not_reached();
84
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
85
mmu_idx = ARMMMUIdx_S1SE0;
86
break;
87
case 2:
88
- mmu_idx = ARMMMUIdx_S1NSE0;
89
+ mmu_idx = ARMMMUIdx_Stage1_E0;
90
break;
91
case 1:
92
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
93
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_Stage1_E0;
94
break;
95
default:
96
g_assert_not_reached();
97
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
98
case 0:
99
switch (ri->opc1) {
100
case 0: /* AT S1E1R, AT S1E1W */
101
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
102
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_Stage1_E1;
103
break;
104
case 4: /* AT S1E2R, AT S1E2W */
105
mmu_idx = ARMMMUIdx_S1E2;
106
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
107
}
108
break;
109
case 2: /* AT S1E0R, AT S1E0W */
110
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
111
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_Stage1_E0;
112
break;
113
case 4: /* AT S12E1R, AT S12E1W */
114
mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_E10_1;
115
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
116
case ARMMMUIdx_S1SE0:
117
return arm_el_is_aa64(env, 3) ? 1 : 3;
118
case ARMMMUIdx_S1SE1:
119
- case ARMMMUIdx_S1NSE0:
120
- case ARMMMUIdx_S1NSE1:
121
+ case ARMMMUIdx_Stage1_E0:
122
+ case ARMMMUIdx_Stage1_E1:
123
case ARMMMUIdx_MPrivNegPri:
124
case ARMMMUIdx_MUserNegPri:
125
case ARMMMUIdx_MPriv:
126
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
127
}
128
129
if ((env->cp15.hcr_el2 & HCR_DC) &&
130
- (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
131
+ (mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1)) {
132
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
133
return true;
134
}
135
@@ -XXX,XX +XXX,XX @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
136
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
137
{
138
if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
139
- mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_E10_0);
140
+ mmu_idx += (ARMMMUIdx_Stage1_E0 - ARMMMUIdx_E10_0);
141
}
142
return mmu_idx;
143
}
144
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
145
{
146
switch (mmu_idx) {
147
case ARMMMUIdx_S1SE0:
148
- case ARMMMUIdx_S1NSE0:
149
+ case ARMMMUIdx_Stage1_E0:
150
case ARMMMUIdx_MUser:
151
case ARMMMUIdx_MSUser:
152
case ARMMMUIdx_MUserNegPri:
153
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
154
hwaddr addr, MemTxAttrs txattrs,
155
ARMMMUFaultInfo *fi)
156
{
157
- if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
158
+ if ((mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1) &&
159
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
160
target_ulong s2size;
161
hwaddr s2pa;
162
--
163
2.20.1
164
165
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This is part of a reorganization to the set of mmu_idx.
4
This emphasizes that they apply to the Secure EL1&0 regime.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 8 ++++----
13
target/arm/internals.h | 4 ++--
14
target/arm/translate.h | 2 +-
15
target/arm/helper.c | 26 +++++++++++++-------------
16
target/arm/translate-a64.c | 4 ++--
17
target/arm/translate.c | 6 +++---
18
6 files changed, 25 insertions(+), 25 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
25
ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
26
ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
27
ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
28
- ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
29
- ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
30
+ ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
31
+ ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
32
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
33
ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
34
ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
35
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
36
ARMMMUIdxBit_E10_1 = 1 << 1,
37
ARMMMUIdxBit_S1E2 = 1 << 2,
38
ARMMMUIdxBit_S1E3 = 1 << 3,
39
- ARMMMUIdxBit_S1SE0 = 1 << 4,
40
- ARMMMUIdxBit_S1SE1 = 1 << 5,
41
+ ARMMMUIdxBit_SE10_0 = 1 << 4,
42
+ ARMMMUIdxBit_SE10_1 = 1 << 5,
43
ARMMMUIdxBit_Stage2 = 1 << 6,
44
ARMMMUIdxBit_MUser = 1 << 0,
45
ARMMMUIdxBit_MPriv = 1 << 1,
46
diff --git a/target/arm/internals.h b/target/arm/internals.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/internals.h
49
+++ b/target/arm/internals.h
50
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
51
case ARMMMUIdx_MUser:
52
return false;
53
case ARMMMUIdx_S1E3:
54
- case ARMMMUIdx_S1SE0:
55
- case ARMMMUIdx_S1SE1:
56
+ case ARMMMUIdx_SE10_0:
57
+ case ARMMMUIdx_SE10_1:
58
case ARMMMUIdx_MSPrivNegPri:
59
case ARMMMUIdx_MSUserNegPri:
60
case ARMMMUIdx_MSPriv:
61
diff --git a/target/arm/translate.h b/target/arm/translate.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/translate.h
64
+++ b/target/arm/translate.h
65
@@ -XXX,XX +XXX,XX @@ static inline int default_exception_el(DisasContext *s)
66
* exceptions can only be routed to ELs above 1, so we target the higher of
67
* 1 or the current EL.
68
*/
69
- return (s->mmu_idx == ARMMMUIdx_S1SE0 && s->secure_routed_to_el3)
70
+ return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3)
71
? 3 : MAX(1, s->current_el);
72
}
73
74
diff --git a/target/arm/helper.c b/target/arm/helper.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/helper.c
77
+++ b/target/arm/helper.c
78
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
79
mmu_idx = ARMMMUIdx_Stage1_E1;
80
break;
81
case 1:
82
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_Stage1_E1;
83
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
84
break;
85
default:
86
g_assert_not_reached();
87
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
88
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
89
switch (el) {
90
case 3:
91
- mmu_idx = ARMMMUIdx_S1SE0;
92
+ mmu_idx = ARMMMUIdx_SE10_0;
93
break;
94
case 2:
95
mmu_idx = ARMMMUIdx_Stage1_E0;
96
break;
97
case 1:
98
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_Stage1_E0;
99
+ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
100
break;
101
default:
102
g_assert_not_reached();
103
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
104
case 0:
105
switch (ri->opc1) {
106
case 0: /* AT S1E1R, AT S1E1W */
107
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_Stage1_E1;
108
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
109
break;
110
case 4: /* AT S1E2R, AT S1E2W */
111
mmu_idx = ARMMMUIdx_S1E2;
112
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
113
}
114
break;
115
case 2: /* AT S1E0R, AT S1E0W */
116
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_Stage1_E0;
117
+ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
118
break;
119
case 4: /* AT S12E1R, AT S12E1W */
120
- mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_E10_1;
121
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
122
break;
123
case 6: /* AT S12E0R, AT S12E0W */
124
- mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_E10_0;
125
+ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
126
break;
127
default:
128
g_assert_not_reached();
129
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
130
static int vae1_tlbmask(CPUARMState *env)
131
{
132
if (arm_is_secure_below_el3(env)) {
133
- return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
134
+ return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_0;
135
} else {
136
return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
137
}
138
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
139
* stage 1 translations.
140
*/
141
if (arm_is_secure_below_el3(env)) {
142
- return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
143
+ return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_0;
144
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
145
return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2;
146
} else {
147
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
148
return 2;
149
case ARMMMUIdx_S1E3:
150
return 3;
151
- case ARMMMUIdx_S1SE0:
152
+ case ARMMMUIdx_SE10_0:
153
return arm_el_is_aa64(env, 3) ? 1 : 3;
154
- case ARMMMUIdx_S1SE1:
155
+ case ARMMMUIdx_SE10_1:
156
case ARMMMUIdx_Stage1_E0:
157
case ARMMMUIdx_Stage1_E1:
158
case ARMMMUIdx_MPrivNegPri:
159
@@ -XXX,XX +XXX,XX @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
160
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
161
{
162
switch (mmu_idx) {
163
- case ARMMMUIdx_S1SE0:
164
+ case ARMMMUIdx_SE10_0:
165
case ARMMMUIdx_Stage1_E0:
166
case ARMMMUIdx_MUser:
167
case ARMMMUIdx_MSUser:
168
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
169
}
170
171
if (el < 2 && arm_is_secure_below_el3(env)) {
172
- return ARMMMUIdx_S1SE0 + el;
173
+ return ARMMMUIdx_SE10_0 + el;
174
} else {
175
return ARMMMUIdx_E10_0 + el;
176
}
177
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
178
index XXXXXXX..XXXXXXX 100644
179
--- a/target/arm/translate-a64.c
180
+++ b/target/arm/translate-a64.c
181
@@ -XXX,XX +XXX,XX @@ static inline int get_a64_user_mem_index(DisasContext *s)
182
case ARMMMUIdx_E10_1:
183
useridx = ARMMMUIdx_E10_0;
184
break;
185
- case ARMMMUIdx_S1SE1:
186
- useridx = ARMMMUIdx_S1SE0;
187
+ case ARMMMUIdx_SE10_1:
188
+ useridx = ARMMMUIdx_SE10_0;
189
break;
190
case ARMMMUIdx_Stage2:
191
g_assert_not_reached();
192
diff --git a/target/arm/translate.c b/target/arm/translate.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/target/arm/translate.c
195
+++ b/target/arm/translate.c
196
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
197
case ARMMMUIdx_E10_1:
198
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
199
case ARMMMUIdx_S1E3:
200
- case ARMMMUIdx_S1SE0:
201
- case ARMMMUIdx_S1SE1:
202
- return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
203
+ case ARMMMUIdx_SE10_0:
204
+ case ARMMMUIdx_SE10_1:
205
+ return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
206
case ARMMMUIdx_MUser:
207
case ARMMMUIdx_MPriv:
208
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
209
--
210
2.20.1
211
212
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This is part of a reorganization to the set of mmu_idx.
4
The non-secure EL2 regime only has a single stage translation;
5
there is no point in pointing out that the idx is for stage1.
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-15-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 4 ++--
14
target/arm/internals.h | 2 +-
15
target/arm/helper.c | 22 +++++++++++-----------
16
target/arm/translate.c | 2 +-
17
4 files changed, 15 insertions(+), 15 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
24
typedef enum ARMMMUIdx {
25
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
26
ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
27
- ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
28
+ ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A,
29
ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,
30
ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
31
ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
32
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
33
typedef enum ARMMMUIdxBit {
34
ARMMMUIdxBit_E10_0 = 1 << 0,
35
ARMMMUIdxBit_E10_1 = 1 << 1,
36
- ARMMMUIdxBit_S1E2 = 1 << 2,
37
+ ARMMMUIdxBit_E2 = 1 << 2,
38
ARMMMUIdxBit_SE3 = 1 << 3,
39
ARMMMUIdxBit_SE10_0 = 1 << 4,
40
ARMMMUIdxBit_SE10_1 = 1 << 5,
41
diff --git a/target/arm/internals.h b/target/arm/internals.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/internals.h
44
+++ b/target/arm/internals.h
45
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
46
case ARMMMUIdx_E10_1:
47
case ARMMMUIdx_Stage1_E0:
48
case ARMMMUIdx_Stage1_E1:
49
- case ARMMMUIdx_S1E2:
50
+ case ARMMMUIdx_E2:
51
case ARMMMUIdx_Stage2:
52
case ARMMMUIdx_MPrivNegPri:
53
case ARMMMUIdx_MUserNegPri:
54
diff --git a/target/arm/helper.c b/target/arm/helper.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/helper.c
57
+++ b/target/arm/helper.c
58
@@ -XXX,XX +XXX,XX @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
59
{
60
CPUState *cs = env_cpu(env);
61
62
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
63
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
64
}
65
66
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
@@ -XXX,XX +XXX,XX @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
68
{
69
CPUState *cs = env_cpu(env);
70
71
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
72
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
73
}
74
75
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
76
@@ -XXX,XX +XXX,XX @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
77
CPUState *cs = env_cpu(env);
78
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
79
80
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
81
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
82
}
83
84
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
85
@@ -XXX,XX +XXX,XX @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
86
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
87
88
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
89
- ARMMMUIdxBit_S1E2);
90
+ ARMMMUIdxBit_E2);
91
}
92
93
static const ARMCPRegInfo cp_reginfo[] = {
94
@@ -XXX,XX +XXX,XX @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
95
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
96
uint64_t par64;
97
98
- par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
99
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
100
101
A32_BANKED_CURRENT_REG_SET(env, par, par64);
102
}
103
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
104
mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
105
break;
106
case 4: /* AT S1E2R, AT S1E2W */
107
- mmu_idx = ARMMMUIdx_S1E2;
108
+ mmu_idx = ARMMMUIdx_E2;
109
break;
110
case 6: /* AT S1E3R, AT S1E3W */
111
mmu_idx = ARMMMUIdx_SE3;
112
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
113
ARMCPU *cpu = env_archcpu(env);
114
CPUState *cs = CPU(cpu);
115
116
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
117
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
118
}
119
120
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
121
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
122
{
123
CPUState *cs = env_cpu(env);
124
125
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
126
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
127
}
128
129
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
130
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
131
CPUState *cs = CPU(cpu);
132
uint64_t pageaddr = sextract64(value << 12, 0, 56);
133
134
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
135
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
136
}
137
138
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
139
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
140
uint64_t pageaddr = sextract64(value << 12, 0, 56);
141
142
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
143
- ARMMMUIdxBit_S1E2);
144
+ ARMMMUIdxBit_E2);
145
}
146
147
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
148
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
149
{
150
switch (mmu_idx) {
151
case ARMMMUIdx_Stage2:
152
- case ARMMMUIdx_S1E2:
153
+ case ARMMMUIdx_E2:
154
return 2;
155
case ARMMMUIdx_SE3:
156
return 3;
157
diff --git a/target/arm/translate.c b/target/arm/translate.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate.c
160
+++ b/target/arm/translate.c
161
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
162
* otherwise, access as if at PL0.
163
*/
164
switch (s->mmu_idx) {
165
- case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
166
+ case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
167
case ARMMMUIdx_E10_0:
168
case ARMMMUIdx_E10_1:
169
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
170
--
171
2.20.1
172
173
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We had completely run out of TBFLAG bits.
4
Split A- and M-profile bits into two overlapping buckets.
5
This results in 4 free bits.
6
7
We used to initialize all of the a32 and m32 fields in DisasContext
8
by assignment, in arm_tr_init_disas_context. Now we only initialize
9
either the a32 or m32 by assignment, because the bits overlap in
10
tbflags. So zero the entire structure in gen_intermediate_code.
11
12
Tested-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20200206105448.4726-16-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/cpu.h | 68 ++++++++++++++++++++++++++----------------
19
target/arm/helper.c | 17 +++++------
20
target/arm/translate.c | 57 +++++++++++++++++++----------------
21
3 files changed, 82 insertions(+), 60 deletions(-)
22
23
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/cpu.h
26
+++ b/target/arm/cpu.h
27
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
28
* We put flags which are shared between 32 and 64 bit mode at the top
29
* of the word, and flags which apply to only one mode at the bottom.
30
*
31
+ * 31 21 18 14 9 0
32
+ * +--------------+-----+-----+----------+--------------+
33
+ * | | | TBFLAG_A32 | |
34
+ * | | +-----+----------+ TBFLAG_AM32 |
35
+ * | TBFLAG_ANY | |TBFLAG_M32| |
36
+ * | | +-------------------------|
37
+ * | | | TBFLAG_A64 |
38
+ * +--------------+-----------+-------------------------+
39
+ * 31 21 14 0
40
+ *
41
* Unless otherwise noted, these bits are cached in env->hflags.
42
*/
43
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
44
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */
45
/* Target EL if we take a floating-point-disabled exception */
46
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
47
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
48
-/*
49
- * For A-profile only, target EL for debug exceptions.
50
- * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
51
- */
52
+/* For A-profile only, target EL for debug exceptions. */
53
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
54
55
-/* Bit usage when in AArch32 state: */
56
-FIELD(TBFLAG_A32, THUMB, 0, 1) /* Not cached. */
57
-FIELD(TBFLAG_A32, VECLEN, 1, 3) /* Not cached. */
58
-FIELD(TBFLAG_A32, VECSTRIDE, 4, 2) /* Not cached. */
59
+/*
60
+ * Bit usage when in AArch32 state, both A- and M-profile.
61
+ */
62
+FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
63
+FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
64
+
65
+/*
66
+ * Bit usage when in AArch32 state, for A-profile only.
67
+ */
68
+FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
69
+FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
70
/*
71
* We store the bottom two bits of the CPAR as TB flags and handle
72
* checks on the other bits at runtime. This shares the same bits as
73
* VECSTRIDE, which is OK as no XScale CPU has VFP.
74
* Not cached, because VECLEN+VECSTRIDE are not cached.
75
*/
76
-FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
77
+FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
78
+FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
79
+FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
80
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
81
/*
82
* Indicates whether cp register reads and writes by guest code should access
83
* the secure or nonsecure bank of banked registers; note that this is not
84
* the same thing as the current security state of the processor!
85
*/
86
-FIELD(TBFLAG_A32, NS, 6, 1)
87
-FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
88
-FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */
89
-FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
90
-FIELD(TBFLAG_A32, HSTR_ACTIVE, 17, 1)
91
+FIELD(TBFLAG_A32, NS, 17, 1)
92
93
-/* For M profile only, set if FPCCR.LSPACT is set */
94
-FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */
95
-/* For M profile only, set if we must create a new FP context */
96
-FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */
97
-/* For M profile only, set if FPCCR.S does not match current security state */
98
-FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */
99
-/* For M profile only, Handler (ie not Thread) mode */
100
-FIELD(TBFLAG_A32, HANDLER, 21, 1)
101
-/* For M profile only, whether we should generate stack-limit checks */
102
-FIELD(TBFLAG_A32, STACKCHECK, 22, 1)
103
+/*
104
+ * Bit usage when in AArch32 state, for M-profile only.
105
+ */
106
+/* Handler (ie not Thread) mode */
107
+FIELD(TBFLAG_M32, HANDLER, 9, 1)
108
+/* Whether we should generate stack-limit checks */
109
+FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
110
+/* Set if FPCCR.LSPACT is set */
111
+FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
112
+/* Set if we must create a new FP context */
113
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
114
+/* Set if FPCCR.S does not match current security state */
115
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
116
117
-/* Bit usage when in AArch64 state */
118
+/*
119
+ * Bit usage when in AArch64 state
120
+ */
121
FIELD(TBFLAG_A64, TBII, 0, 2)
122
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
123
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
124
diff --git a/target/arm/helper.c b/target/arm/helper.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/target/arm/helper.c
127
+++ b/target/arm/helper.c
128
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
129
{
130
uint32_t flags = 0;
131
132
- /* v8M always enables the fpu. */
133
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
134
-
135
if (arm_v7m_is_handler_mode(env)) {
136
- flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
137
+ flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
138
}
139
140
/*
141
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
142
if (arm_feature(env, ARM_FEATURE_V8) &&
143
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
144
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
145
- flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
146
+ flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
147
}
148
149
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
150
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
151
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
152
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
153
!= env->v7m.secure) {
154
- flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
155
+ flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
156
}
157
158
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
159
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
160
* active FP context; we must create a new FP context before
161
* executing any FP insn.
162
*/
163
- flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
164
+ flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
165
}
166
167
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
168
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
169
- flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
170
+ flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
171
}
172
} else {
173
/*
174
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
175
}
176
}
177
178
- flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
179
- flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
180
+ flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
181
+ flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
182
pstate_for_ss = env->uncached_cpsr;
183
}
184
185
diff --git a/target/arm/translate.c b/target/arm/translate.c
186
index XXXXXXX..XXXXXXX 100644
187
--- a/target/arm/translate.c
188
+++ b/target/arm/translate.c
189
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
190
*/
191
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
192
!arm_el_is_aa64(env, 3);
193
- dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
194
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
195
- dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
196
+ dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
197
dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
198
- condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
199
+ condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
200
dc->condexec_mask = (condexec & 0xf) << 1;
201
dc->condexec_cond = condexec >> 4;
202
+
203
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
204
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
205
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
206
#if !defined(CONFIG_USER_ONLY)
207
dc->user = (dc->current_el == 0);
208
#endif
209
- dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
210
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
211
- dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
212
- dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
213
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
214
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
215
- dc->vec_stride = 0;
216
+
217
+ if (arm_feature(env, ARM_FEATURE_M)) {
218
+ dc->vfp_enabled = 1;
219
+ dc->be_data = MO_TE;
220
+ dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
221
+ dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
222
+ regime_is_secure(env, dc->mmu_idx);
223
+ dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
224
+ dc->v8m_fpccr_s_wrong =
225
+ FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
226
+ dc->v7m_new_fp_ctxt_needed =
227
+ FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
228
+ dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
229
} else {
230
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
231
- dc->c15_cpar = 0;
232
+ dc->be_data =
233
+ FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
234
+ dc->debug_target_el =
235
+ FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
236
+ dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
237
+ dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
238
+ dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
239
+ dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
240
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
241
+ dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
242
+ } else {
243
+ dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
244
+ dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
245
+ }
246
}
247
- dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
248
- dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
249
- regime_is_secure(env, dc->mmu_idx);
250
- dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
251
- dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
252
- dc->v7m_new_fp_ctxt_needed =
253
- FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
254
- dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
255
dc->cp_regs = cpu->cp_regs;
256
dc->features = env->features;
257
258
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
259
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
260
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
261
dc->is_ldex = false;
262
- if (!arm_feature(env, ARM_FEATURE_M)) {
263
- dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
264
- }
265
266
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
267
268
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
269
/* generate intermediate code for basic block 'tb'. */
270
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
271
{
272
- DisasContext dc;
273
+ DisasContext dc = { };
274
const TranslatorOps *ops = &arm_translator_ops;
275
276
- if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
277
+ if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
278
ops = &thumb_translator_ops;
279
}
280
#ifdef TARGET_AARCH64
281
--
282
2.20.1
283
284
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We are about to expand the number of mmuidx to 10, and so need 4 bits.
4
For the benefit of reading the number out of -d exec, align it to the
5
penultimate nibble.
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-17-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 16 ++++++++--------
14
1 file changed, 8 insertions(+), 8 deletions(-)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
21
* We put flags which are shared between 32 and 64 bit mode at the top
22
* of the word, and flags which apply to only one mode at the bottom.
23
*
24
- * 31 21 18 14 9 0
25
+ * 31 20 18 14 9 0
26
* +--------------+-----+-----+----------+--------------+
27
* | | | TBFLAG_A32 | |
28
* | | +-----+----------+ TBFLAG_AM32 |
29
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
30
* | | +-------------------------|
31
* | | | TBFLAG_A64 |
32
* +--------------+-----------+-------------------------+
33
- * 31 21 14 0
34
+ * 31 20 14 0
35
*
36
* Unless otherwise noted, these bits are cached in env->hflags.
37
*/
38
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
39
-FIELD(TBFLAG_ANY, MMUIDX, 28, 3)
40
-FIELD(TBFLAG_ANY, SS_ACTIVE, 27, 1)
41
-FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */
42
+FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
43
+FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
44
+FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
45
+FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
46
/* Target EL if we take a floating-point-disabled exception */
47
-FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
48
-FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
49
+FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
50
/* For A-profile only, target EL for debug exceptions. */
51
-FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
52
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
53
54
/*
55
* Bit usage when in AArch32 state, both A- and M-profile.
56
--
57
2.20.1
58
59
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Define via macro expansion, so that renumbering of the base ARMMMUIdx
4
symbols is automatically reflected in the bit definitions.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200206105448.4726-18-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 39 +++++++++++++++++++++++----------------
14
1 file changed, 23 insertions(+), 16 deletions(-)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
21
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
22
} ARMMMUIdx;
23
24
-/* Bit macros for the core-mmu-index values for each index,
25
+/*
26
+ * Bit macros for the core-mmu-index values for each index,
27
* for use when calling tlb_flush_by_mmuidx() and friends.
28
*/
29
+#define TO_CORE_BIT(NAME) \
30
+ ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
31
+
32
typedef enum ARMMMUIdxBit {
33
- ARMMMUIdxBit_E10_0 = 1 << 0,
34
- ARMMMUIdxBit_E10_1 = 1 << 1,
35
- ARMMMUIdxBit_E2 = 1 << 2,
36
- ARMMMUIdxBit_SE3 = 1 << 3,
37
- ARMMMUIdxBit_SE10_0 = 1 << 4,
38
- ARMMMUIdxBit_SE10_1 = 1 << 5,
39
- ARMMMUIdxBit_Stage2 = 1 << 6,
40
- ARMMMUIdxBit_MUser = 1 << 0,
41
- ARMMMUIdxBit_MPriv = 1 << 1,
42
- ARMMMUIdxBit_MUserNegPri = 1 << 2,
43
- ARMMMUIdxBit_MPrivNegPri = 1 << 3,
44
- ARMMMUIdxBit_MSUser = 1 << 4,
45
- ARMMMUIdxBit_MSPriv = 1 << 5,
46
- ARMMMUIdxBit_MSUserNegPri = 1 << 6,
47
- ARMMMUIdxBit_MSPrivNegPri = 1 << 7,
48
+ TO_CORE_BIT(E10_0),
49
+ TO_CORE_BIT(E10_1),
50
+ TO_CORE_BIT(E2),
51
+ TO_CORE_BIT(SE10_0),
52
+ TO_CORE_BIT(SE10_1),
53
+ TO_CORE_BIT(SE3),
54
+ TO_CORE_BIT(Stage2),
55
+
56
+ TO_CORE_BIT(MUser),
57
+ TO_CORE_BIT(MPriv),
58
+ TO_CORE_BIT(MUserNegPri),
59
+ TO_CORE_BIT(MPrivNegPri),
60
+ TO_CORE_BIT(MSUser),
61
+ TO_CORE_BIT(MSPriv),
62
+ TO_CORE_BIT(MSUserNegPri),
63
+ TO_CORE_BIT(MSPrivNegPri),
64
} ARMMMUIdxBit;
65
66
+#undef TO_CORE_BIT
67
+
68
#define MMU_USER_IDX 0
69
70
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
71
--
72
2.20.1
73
74
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Replace the magic numbers with the relevant ARM_MMU_IDX_M_* constants.
4
Keep the definitions short by referencing previous symbols.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-19-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 16 ++++++++--------
13
1 file changed, 8 insertions(+), 8 deletions(-)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
20
ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
21
ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
22
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
23
- ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
24
- ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
25
- ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M,
26
- ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M,
27
- ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M,
28
- ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M,
29
- ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M,
30
- ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M,
31
+ ARMMMUIdx_MUser = ARM_MMU_IDX_M,
32
+ ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
33
+ ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
34
+ ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
35
+ ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
36
+ ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
37
+ ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
38
+ ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
39
/* Indexes below here don't have TLBs and are used only for AT system
40
* instructions or for the first stage of an S12 page table walk.
41
*/
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a predicate to indicate whether the regime has
4
both positive and negative addresses.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-21-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/internals.h | 18 ++++++++++++++++++
13
target/arm/helper.c | 23 ++++++-----------------
14
target/arm/translate-a64.c | 3 +--
15
3 files changed, 25 insertions(+), 19 deletions(-)
16
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/internals.h
20
+++ b/target/arm/internals.h
21
@@ -XXX,XX +XXX,XX @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
22
}
23
}
24
25
+/* Return true if this address translation regime has two ranges. */
26
+static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
27
+{
28
+ switch (mmu_idx) {
29
+ case ARMMMUIdx_Stage1_E0:
30
+ case ARMMMUIdx_Stage1_E1:
31
+ case ARMMMUIdx_E10_0:
32
+ case ARMMMUIdx_E10_1:
33
+ case ARMMMUIdx_E20_0:
34
+ case ARMMMUIdx_E20_2:
35
+ case ARMMMUIdx_SE10_0:
36
+ case ARMMMUIdx_SE10_1:
37
+ return true;
38
+ default:
39
+ return false;
40
+ }
41
+}
42
+
43
/* Return true if this address translation regime is secure */
44
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
45
{
46
diff --git a/target/arm/helper.c b/target/arm/helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/helper.c
49
+++ b/target/arm/helper.c
50
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
51
}
52
53
if (is_aa64) {
54
- switch (regime_el(env, mmu_idx)) {
55
- case 1:
56
- if (!is_user) {
57
- xn = pxn || (user_rw & PAGE_WRITE);
58
- }
59
- break;
60
- case 2:
61
- case 3:
62
- break;
63
+ if (regime_has_2_ranges(mmu_idx) && !is_user) {
64
+ xn = pxn || (user_rw & PAGE_WRITE);
65
}
66
} else if (arm_feature(env, ARM_FEATURE_V7)) {
67
switch (regime_el(env, mmu_idx)) {
68
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
69
ARMMMUIdx mmu_idx)
70
{
71
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
72
- uint32_t el = regime_el(env, mmu_idx);
73
bool tbi, tbid, epd, hpd, using16k, using64k;
74
int select, tsz;
75
76
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
77
*/
78
select = extract64(va, 55, 1);
79
80
- if (el > 1) {
81
+ if (!regime_has_2_ranges(mmu_idx)) {
82
tsz = extract32(tcr, 0, 6);
83
using64k = extract32(tcr, 14, 1);
84
using16k = extract32(tcr, 15, 1);
85
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
86
param = aa64_va_parameters(env, address, mmu_idx,
87
access_type != MMU_INST_FETCH);
88
level = 0;
89
- /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
90
- * invalid.
91
- */
92
- ttbr1_valid = (el < 2);
93
+ ttbr1_valid = regime_has_2_ranges(mmu_idx);
94
addrsize = 64 - 8 * param.tbi;
95
inputsize = 64 - param.tsz;
96
} else {
97
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
98
99
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
100
101
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
102
- if (regime_el(env, stage1) < 2) {
103
+ /* Get control bits for tagged addresses. */
104
+ if (regime_has_2_ranges(mmu_idx)) {
105
ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
106
tbid = (p1.tbi << 1) | p0.tbi;
107
tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
108
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/translate-a64.c
111
+++ b/target/arm/translate-a64.c
112
@@ -XXX,XX +XXX,XX @@ static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
113
if (tbi == 0) {
114
/* Load unmodified address */
115
tcg_gen_mov_i64(dst, src);
116
- } else if (s->current_el >= 2) {
117
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
118
+ } else if (!regime_has_2_ranges(s->mmu_idx)) {
119
/* Force tag byte to all zero */
120
tcg_gen_extract_i64(dst, src, 0, 56);
121
} else {
122
--
123
2.20.1
124
125
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Return the indexes for the EL2&0 regime when the appropriate bits
4
are set within HCR_EL2.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-22-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 11 +++++++++--
13
1 file changed, 9 insertions(+), 2 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
20
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
21
}
22
23
+ /* See ARM pseudo-function ELIsInHost. */
24
switch (el) {
25
case 0:
26
- /* TODO: ARMv8.1-VHE */
27
if (arm_is_secure_below_el3(env)) {
28
return ARMMMUIdx_SE10_0;
29
}
30
+ if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)
31
+ && arm_el_is_aa64(env, 2)) {
32
+ return ARMMMUIdx_E20_0;
33
+ }
34
return ARMMMUIdx_E10_0;
35
case 1:
36
if (arm_is_secure_below_el3(env)) {
37
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
38
}
39
return ARMMMUIdx_E10_1;
40
case 2:
41
- /* TODO: ARMv8.1-VHE */
42
/* TODO: ARMv8.4-SecEL2 */
43
+ /* Note that TGE does not apply at EL2. */
44
+ if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
45
+ return ARMMMUIdx_E20_2;
46
+ }
47
return ARMMMUIdx_E2;
48
case 3:
49
return ARMMMUIdx_SE3;
50
--
51
2.20.1
52
53
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The functions qemu_get_timedate() and qemu_timedate_diff() take
2
and return a time offset as an integer. Coverity points out that
3
means that when an RTC device implementation holds an offset
4
as a time_t, as the m48t59 does, the time_t will get truncated.
5
(CID 1507157, 1517772).
2
6
3
Use the correct sctlr for EL2&0 regime. Due to header ordering,
7
The functions work with time_t internally, so make them use that type
4
and where arm_mmu_idx_el is declared, we need to move the function
8
in their APIs.
5
out of line. Use the function in many more places in order to
6
select the correct control.
7
9
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
10
Note that this won't help any Y2038 issues where either the device
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
model itself is keeping the offset in a 32-bit integer, or where the
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
hardware under emulation has Y2038 or other rollover problems. If we
11
Message-id: 20200206105448.4726-23-richard.henderson@linaro.org
13
missed any cases of the former then hopefully Coverity will warn us
14
about them since after this patch we'd be truncating a time_t in
15
assignments from qemu_timedate_diff().)
16
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
---
19
---
14
target/arm/cpu.h | 10 +---------
20
include/sysemu/rtc.h | 4 ++--
15
target/arm/helper-a64.c | 2 +-
21
softmmu/rtc.c | 4 ++--
16
target/arm/helper.c | 20 +++++++++++++++-----
22
2 files changed, 4 insertions(+), 4 deletions(-)
17
target/arm/pauth_helper.c | 9 +--------
18
4 files changed, 18 insertions(+), 23 deletions(-)
19
23
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h
21
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
26
--- a/include/sysemu/rtc.h
23
+++ b/target/arm/cpu.h
27
+++ b/include/sysemu/rtc.h
24
@@ -XXX,XX +XXX,XX @@ static inline bool arm_sctlr_b(CPUARMState *env)
28
@@ -XXX,XX +XXX,XX @@
25
(env->cp15.sctlr_el[1] & SCTLR_B) != 0;
29
* The behaviour of the clock whose value this function returns will
30
* depend on the -rtc command line option passed by the user.
31
*/
32
-void qemu_get_timedate(struct tm *tm, int offset);
33
+void qemu_get_timedate(struct tm *tm, time_t offset);
34
35
/**
36
* qemu_timedate_diff: Return difference between a struct tm and the RTC
37
@@ -XXX,XX +XXX,XX @@ void qemu_get_timedate(struct tm *tm, int offset);
38
* a timestamp one hour further ahead than the current RTC time
39
* then this function will return 3600.
40
*/
41
-int qemu_timedate_diff(struct tm *tm);
42
+time_t qemu_timedate_diff(struct tm *tm);
43
44
#endif
45
diff --git a/softmmu/rtc.c b/softmmu/rtc.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/softmmu/rtc.c
48
+++ b/softmmu/rtc.c
49
@@ -XXX,XX +XXX,XX @@ static time_t qemu_ref_timedate(QEMUClockType clock)
50
return value;
26
}
51
}
27
52
28
-static inline uint64_t arm_sctlr(CPUARMState *env, int el)
53
-void qemu_get_timedate(struct tm *tm, int offset)
29
-{
54
+void qemu_get_timedate(struct tm *tm, time_t offset)
30
- if (el == 0) {
31
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
32
- return env->cp15.sctlr_el[1];
33
- } else {
34
- return env->cp15.sctlr_el[el];
35
- }
36
-}
37
+uint64_t arm_sctlr(CPUARMState *env, int el);
38
39
static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
40
bool sctlr_b)
41
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/helper-a64.c
44
+++ b/target/arm/helper-a64.c
45
@@ -XXX,XX +XXX,XX @@ static void daif_check(CPUARMState *env, uint32_t op,
46
uint32_t imm, uintptr_t ra)
47
{
55
{
48
/* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
56
time_t ti = qemu_ref_timedate(rtc_clock);
49
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
57
50
+ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
58
@@ -XXX,XX +XXX,XX @@ void qemu_get_timedate(struct tm *tm, int offset)
51
raise_exception_ra(env, EXCP_UDEF,
52
syn_aa64_sysregtrap(0, extract32(op, 0, 3),
53
extract32(op, 3, 3), 4,
54
diff --git a/target/arm/helper.c b/target/arm/helper.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/helper.c
57
+++ b/target/arm/helper.c
58
@@ -XXX,XX +XXX,XX @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
59
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
60
bool isread)
61
{
62
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
63
+ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
64
return CP_ACCESS_TRAP;
65
}
66
return CP_ACCESS_OK;
67
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
68
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
69
* SCTLR_EL1.UCI is set.
70
*/
71
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
72
+ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UCI)) {
73
return CP_ACCESS_TRAP;
74
}
75
return CP_ACCESS_OK;
76
@@ -XXX,XX +XXX,XX @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
77
}
59
}
78
}
60
}
79
61
80
-#ifndef CONFIG_USER_ONLY
62
-int qemu_timedate_diff(struct tm *tm)
81
+uint64_t arm_sctlr(CPUARMState *env, int el)
63
+time_t qemu_timedate_diff(struct tm *tm)
82
+{
83
+ /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
84
+ if (el == 0) {
85
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
86
+ el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1);
87
+ }
88
+ return env->cp15.sctlr_el[el];
89
+}
90
91
/* Return the SCTLR value which controls this address translation regime */
92
-static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
93
+static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
94
{
64
{
95
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
65
time_t seconds;
96
}
66
97
98
+#ifndef CONFIG_USER_ONLY
99
+
100
/* Return true if the specified stage of address translation is disabled */
101
static inline bool regime_translation_disabled(CPUARMState *env,
102
ARMMMUIdx mmu_idx)
103
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
104
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
105
}
106
107
- sctlr = arm_sctlr(env, el);
108
+ sctlr = regime_sctlr(env, stage1);
109
110
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
111
flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
112
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/target/arm/pauth_helper.c
115
+++ b/target/arm/pauth_helper.c
116
@@ -XXX,XX +XXX,XX @@ static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
117
118
static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit)
119
{
120
- uint32_t sctlr;
121
- if (el == 0) {
122
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
123
- sctlr = env->cp15.sctlr_el[1];
124
- } else {
125
- sctlr = env->cp15.sctlr_el[el];
126
- }
127
- return (sctlr & bit) != 0;
128
+ return (arm_sctlr(env, el) & bit) != 0;
129
}
130
131
uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y)
132
--
67
--
133
2.20.1
68
2.34.1
134
69
135
70
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
The comment that we don't support EL2 is somewhat out of date.
4
Update to include checks against HCR_EL2.TDZ.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-24-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 26 +++++++++++++++++++++-----
13
1 file changed, 21 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
20
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
21
bool isread)
22
{
23
- /* We don't implement EL2, so the only control on DC ZVA is the
24
- * bit in the SCTLR which can prohibit access for EL0.
25
- */
26
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
27
- return CP_ACCESS_TRAP;
28
+ int cur_el = arm_current_el(env);
29
+
30
+ if (cur_el < 2) {
31
+ uint64_t hcr = arm_hcr_el2_eff(env);
32
+
33
+ if (cur_el == 0) {
34
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
35
+ if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
36
+ return CP_ACCESS_TRAP_EL2;
37
+ }
38
+ } else {
39
+ if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
40
+ return CP_ACCESS_TRAP;
41
+ }
42
+ if (hcr & HCR_TDZ) {
43
+ return CP_ACCESS_TRAP_EL2;
44
+ }
45
+ }
46
+ } else if (hcr & HCR_TDZ) {
47
+ return CP_ACCESS_TRAP_EL2;
48
+ }
49
}
50
return CP_ACCESS_OK;
51
}
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Update to include checks against HCR_EL2.TID2.
4
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200206105448.4726-25-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 26 +++++++++++++++++++++-----
12
1 file changed, 21 insertions(+), 5 deletions(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
19
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
20
bool isread)
21
{
22
- /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
23
- * but the AArch32 CTR has its own reginfo struct)
24
- */
25
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
26
- return CP_ACCESS_TRAP;
27
+ int cur_el = arm_current_el(env);
28
+
29
+ if (cur_el < 2) {
30
+ uint64_t hcr = arm_hcr_el2_eff(env);
31
+
32
+ if (cur_el == 0) {
33
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
34
+ if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
35
+ return CP_ACCESS_TRAP_EL2;
36
+ }
37
+ } else {
38
+ if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
39
+ return CP_ACCESS_TRAP;
40
+ }
41
+ if (hcr & HCR_TID2) {
42
+ return CP_ACCESS_TRAP_EL2;
43
+ }
44
+ }
45
+ } else if (hcr & HCR_TID2) {
46
+ return CP_ACCESS_TRAP_EL2;
47
+ }
48
}
49
50
if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
51
--
52
2.20.1
53
54
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Where architecturally one ARM_FEATURE_X flag implies another
2
2
ARM_FEATURE_Y, we allow the CPU init function to only set X, and then
3
This inline function has one user in cpu.c, and need not be exposed
3
set Y for it. Currently we do this in two places -- we set a few
4
otherwise. Code movement only, with fixups for checkpatch.
4
flags in arm_cpu_post_init() because we need them to decide which
5
5
properties to create on the CPU object, and then we do the rest in
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
arm_cpu_realizefn(). However, this is fragile, because it's easy to
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
add a new property and not notice that this means that an X-implies-Y
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
check now has to move from realize to post-init.
9
Message-id: 20200206105448.4726-39-richard.henderson@linaro.org
9
10
As a specific example, the pmsav7-dregion property is conditional
11
on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear
12
on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and
13
rely on V8-implies-V7, which doesn't happen until the realizefn.
14
15
Move all of these X-implies-Y checks into a new function, which
16
we call at the top of arm_cpu_post_init(), so the feature bits
17
are available at that point.
18
19
This does now give us the reverse issue, that if there's a feature
20
bit which is enabled or disabled by the setting of a property then
21
then X-implies-Y features that are dependent on that property need to
22
be in realize, not in this new function. But the only one of those
23
is the "EL3 implies VBAR" which is already in the right place, so
24
putting things this way round seems better to me.
25
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
28
Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org
11
---
29
---
12
target/arm/cpu.h | 111 -------------------------------------------
30
target/arm/cpu.c | 179 +++++++++++++++++++++++++----------------------
13
target/arm/cpu.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++
31
1 file changed, 97 insertions(+), 82 deletions(-)
14
2 files changed, 119 insertions(+), 111 deletions(-)
32
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
21
#define ARM_CPUID_TI915T 0x54029152
22
#define ARM_CPUID_TI925T 0x54029252
23
24
-static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
25
- unsigned int target_el)
26
-{
27
- CPUARMState *env = cs->env_ptr;
28
- unsigned int cur_el = arm_current_el(env);
29
- bool secure = arm_is_secure(env);
30
- bool pstate_unmasked;
31
- int8_t unmasked = 0;
32
- uint64_t hcr_el2;
33
-
34
- /* Don't take exceptions if they target a lower EL.
35
- * This check should catch any exceptions that would not be taken but left
36
- * pending.
37
- */
38
- if (cur_el > target_el) {
39
- return false;
40
- }
41
-
42
- hcr_el2 = arm_hcr_el2_eff(env);
43
-
44
- switch (excp_idx) {
45
- case EXCP_FIQ:
46
- pstate_unmasked = !(env->daif & PSTATE_F);
47
- break;
48
-
49
- case EXCP_IRQ:
50
- pstate_unmasked = !(env->daif & PSTATE_I);
51
- break;
52
-
53
- case EXCP_VFIQ:
54
- if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
55
- /* VFIQs are only taken when hypervized and non-secure. */
56
- return false;
57
- }
58
- return !(env->daif & PSTATE_F);
59
- case EXCP_VIRQ:
60
- if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
61
- /* VIRQs are only taken when hypervized and non-secure. */
62
- return false;
63
- }
64
- return !(env->daif & PSTATE_I);
65
- default:
66
- g_assert_not_reached();
67
- }
68
-
69
- /* Use the target EL, current execution state and SCR/HCR settings to
70
- * determine whether the corresponding CPSR bit is used to mask the
71
- * interrupt.
72
- */
73
- if ((target_el > cur_el) && (target_el != 1)) {
74
- /* Exceptions targeting a higher EL may not be maskable */
75
- if (arm_feature(env, ARM_FEATURE_AARCH64)) {
76
- /* 64-bit masking rules are simple: exceptions to EL3
77
- * can't be masked, and exceptions to EL2 can only be
78
- * masked from Secure state. The HCR and SCR settings
79
- * don't affect the masking logic, only the interrupt routing.
80
- */
81
- if (target_el == 3 || !secure) {
82
- unmasked = 1;
83
- }
84
- } else {
85
- /* The old 32-bit-only environment has a more complicated
86
- * masking setup. HCR and SCR bits not only affect interrupt
87
- * routing but also change the behaviour of masking.
88
- */
89
- bool hcr, scr;
90
-
91
- switch (excp_idx) {
92
- case EXCP_FIQ:
93
- /* If FIQs are routed to EL3 or EL2 then there are cases where
94
- * we override the CPSR.F in determining if the exception is
95
- * masked or not. If neither of these are set then we fall back
96
- * to the CPSR.F setting otherwise we further assess the state
97
- * below.
98
- */
99
- hcr = hcr_el2 & HCR_FMO;
100
- scr = (env->cp15.scr_el3 & SCR_FIQ);
101
-
102
- /* When EL3 is 32-bit, the SCR.FW bit controls whether the
103
- * CPSR.F bit masks FIQ interrupts when taken in non-secure
104
- * state. If SCR.FW is set then FIQs can be masked by CPSR.F
105
- * when non-secure but only when FIQs are only routed to EL3.
106
- */
107
- scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
108
- break;
109
- case EXCP_IRQ:
110
- /* When EL3 execution state is 32-bit, if HCR.IMO is set then
111
- * we may override the CPSR.I masking when in non-secure state.
112
- * The SCR.IRQ setting has already been taken into consideration
113
- * when setting the target EL, so it does not have a further
114
- * affect here.
115
- */
116
- hcr = hcr_el2 & HCR_IMO;
117
- scr = false;
118
- break;
119
- default:
120
- g_assert_not_reached();
121
- }
122
-
123
- if ((scr || hcr) && !secure) {
124
- unmasked = 1;
125
- }
126
- }
127
- }
128
-
129
- /* The PSTATE bits only mask the interrupt if we have not overriden the
130
- * ability above.
131
- */
132
- return unmasked || pstate_unmasked;
133
-}
134
-
135
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
136
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
137
#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
138
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
33
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
139
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
140
--- a/target/arm/cpu.c
35
--- a/target/arm/cpu.c
141
+++ b/target/arm/cpu.c
36
+++ b/target/arm/cpu.c
142
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
37
@@ -XXX,XX +XXX,XX @@ unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
143
arm_rebuild_hflags(env);
38
NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
144
}
39
}
145
40
146
+static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
41
+static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
147
+ unsigned int target_el)
148
+{
42
+{
149
+ CPUARMState *env = cs->env_ptr;
43
+ CPUARMState *env = &cpu->env;
150
+ unsigned int cur_el = arm_current_el(env);
44
+ bool no_aa32 = false;
151
+ bool secure = arm_is_secure(env);
152
+ bool pstate_unmasked;
153
+ int8_t unmasked = 0;
154
+ uint64_t hcr_el2;
155
+
45
+
156
+ /*
46
+ /*
157
+ * Don't take exceptions if they target a lower EL.
47
+ * Some features automatically imply others: set the feature
158
+ * This check should catch any exceptions that would not be taken
48
+ * bits explicitly for these cases.
159
+ * but left pending.
160
+ */
49
+ */
161
+ if (cur_el > target_el) {
50
+
162
+ return false;
51
+ if (arm_feature(env, ARM_FEATURE_M)) {
163
+ }
52
+ set_feature(env, ARM_FEATURE_PMSA);
164
+
53
+ }
165
+ hcr_el2 = arm_hcr_el2_eff(env);
54
+
166
+
55
+ if (arm_feature(env, ARM_FEATURE_V8)) {
167
+ switch (excp_idx) {
56
+ if (arm_feature(env, ARM_FEATURE_M)) {
168
+ case EXCP_FIQ:
57
+ set_feature(env, ARM_FEATURE_V7);
169
+ pstate_unmasked = !(env->daif & PSTATE_F);
58
+ } else {
170
+ break;
59
+ set_feature(env, ARM_FEATURE_V7VE);
171
+
172
+ case EXCP_IRQ:
173
+ pstate_unmasked = !(env->daif & PSTATE_I);
174
+ break;
175
+
176
+ case EXCP_VFIQ:
177
+ if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
178
+ /* VFIQs are only taken when hypervized and non-secure. */
179
+ return false;
180
+ }
60
+ }
181
+ return !(env->daif & PSTATE_F);
61
+ }
182
+ case EXCP_VIRQ:
62
+
183
+ if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
63
+ /*
184
+ /* VIRQs are only taken when hypervized and non-secure. */
64
+ * There exist AArch64 cpus without AArch32 support. When KVM
185
+ return false;
65
+ * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
66
+ * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
67
+ * As a general principle, we also do not make ID register
68
+ * consistency checks anywhere unless using TCG, because only
69
+ * for TCG would a consistency-check failure be a QEMU bug.
70
+ */
71
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
72
+ no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
73
+ }
74
+
75
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
76
+ /*
77
+ * v7 Virtualization Extensions. In real hardware this implies
78
+ * EL2 and also the presence of the Security Extensions.
79
+ * For QEMU, for backwards-compatibility we implement some
80
+ * CPUs or CPU configs which have no actual EL2 or EL3 but do
81
+ * include the various other features that V7VE implies.
82
+ * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
83
+ * Security Extensions is ARM_FEATURE_EL3.
84
+ */
85
+ assert(!tcg_enabled() || no_aa32 ||
86
+ cpu_isar_feature(aa32_arm_div, cpu));
87
+ set_feature(env, ARM_FEATURE_LPAE);
88
+ set_feature(env, ARM_FEATURE_V7);
89
+ }
90
+ if (arm_feature(env, ARM_FEATURE_V7)) {
91
+ set_feature(env, ARM_FEATURE_VAPA);
92
+ set_feature(env, ARM_FEATURE_THUMB2);
93
+ set_feature(env, ARM_FEATURE_MPIDR);
94
+ if (!arm_feature(env, ARM_FEATURE_M)) {
95
+ set_feature(env, ARM_FEATURE_V6K);
96
+ } else {
97
+ set_feature(env, ARM_FEATURE_V6);
186
+ }
98
+ }
187
+ return !(env->daif & PSTATE_I);
99
+
188
+ default:
100
+ /*
189
+ g_assert_not_reached();
101
+ * Always define VBAR for V7 CPUs even if it doesn't exist in
190
+ }
102
+ * non-EL3 configs. This is needed by some legacy boards.
191
+
103
+ */
104
+ set_feature(env, ARM_FEATURE_VBAR);
105
+ }
106
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
107
+ set_feature(env, ARM_FEATURE_V6);
108
+ set_feature(env, ARM_FEATURE_MVFR);
109
+ }
110
+ if (arm_feature(env, ARM_FEATURE_V6)) {
111
+ set_feature(env, ARM_FEATURE_V5);
112
+ if (!arm_feature(env, ARM_FEATURE_M)) {
113
+ assert(!tcg_enabled() || no_aa32 ||
114
+ cpu_isar_feature(aa32_jazelle, cpu));
115
+ set_feature(env, ARM_FEATURE_AUXCR);
116
+ }
117
+ }
118
+ if (arm_feature(env, ARM_FEATURE_V5)) {
119
+ set_feature(env, ARM_FEATURE_V4T);
120
+ }
121
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
122
+ set_feature(env, ARM_FEATURE_V7MP);
123
+ }
124
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
125
+ set_feature(env, ARM_FEATURE_CBAR);
126
+ }
127
+ if (arm_feature(env, ARM_FEATURE_THUMB2) &&
128
+ !arm_feature(env, ARM_FEATURE_M)) {
129
+ set_feature(env, ARM_FEATURE_THUMB_DSP);
130
+ }
131
+}
132
+
133
void arm_cpu_post_init(Object *obj)
134
{
135
ARMCPU *cpu = ARM_CPU(obj);
136
137
- /* M profile implies PMSA. We have to do this here rather than
138
- * in realize with the other feature-implication checks because
139
- * we look at the PMSA bit to see if we should add some properties.
192
+ /*
140
+ /*
193
+ * Use the target EL, current execution state and SCR/HCR settings to
141
+ * Some features imply others. Figure this out now, because we
194
+ * determine whether the corresponding CPSR bit is used to mask the
142
+ * are going to look at the feature bits in deciding which
195
+ * interrupt.
143
+ * properties to add.
196
+ */
144
*/
197
+ if ((target_el > cur_el) && (target_el != 1)) {
145
- if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
198
+ /* Exceptions targeting a higher EL may not be maskable */
146
- set_feature(&cpu->env, ARM_FEATURE_PMSA);
199
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
147
- }
200
+ /*
148
+ arm_cpu_propagate_feature_implications(cpu);
201
+ * 64-bit masking rules are simple: exceptions to EL3
149
202
+ * can't be masked, and exceptions to EL2 can only be
150
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
203
+ * masked from Secure state. The HCR and SCR settings
151
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
204
+ * don't affect the masking logic, only the interrupt routing.
152
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
205
+ */
153
CPUARMState *env = &cpu->env;
206
+ if (target_el == 3 || !secure) {
154
int pagebits;
207
+ unmasked = 1;
155
Error *local_err = NULL;
208
+ }
156
- bool no_aa32 = false;
209
+ } else {
157
210
+ /*
158
/* Use pc-relative instructions in system-mode */
211
+ * The old 32-bit-only environment has a more complicated
159
#ifndef CONFIG_USER_ONLY
212
+ * masking setup. HCR and SCR bits not only affect interrupt
160
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
213
+ * routing but also change the behaviour of masking.
161
cpu->isar.id_isar3 = u;
214
+ */
162
}
215
+ bool hcr, scr;
163
216
+
164
- /* Some features automatically imply others: */
217
+ switch (excp_idx) {
165
- if (arm_feature(env, ARM_FEATURE_V8)) {
218
+ case EXCP_FIQ:
166
- if (arm_feature(env, ARM_FEATURE_M)) {
219
+ /*
167
- set_feature(env, ARM_FEATURE_V7);
220
+ * If FIQs are routed to EL3 or EL2 then there are cases where
168
- } else {
221
+ * we override the CPSR.F in determining if the exception is
169
- set_feature(env, ARM_FEATURE_V7VE);
222
+ * masked or not. If neither of these are set then we fall back
170
- }
223
+ * to the CPSR.F setting otherwise we further assess the state
171
- }
224
+ * below.
172
-
225
+ */
173
- /*
226
+ hcr = hcr_el2 & HCR_FMO;
174
- * There exist AArch64 cpus without AArch32 support. When KVM
227
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
175
- * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
228
+
176
- * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
229
+ /*
177
- * As a general principle, we also do not make ID register
230
+ * When EL3 is 32-bit, the SCR.FW bit controls whether the
178
- * consistency checks anywhere unless using TCG, because only
231
+ * CPSR.F bit masks FIQ interrupts when taken in non-secure
179
- * for TCG would a consistency-check failure be a QEMU bug.
232
+ * state. If SCR.FW is set then FIQs can be masked by CPSR.F
180
- */
233
+ * when non-secure but only when FIQs are only routed to EL3.
181
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
234
+ */
182
- no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
235
+ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
183
- }
236
+ break;
184
-
237
+ case EXCP_IRQ:
185
- if (arm_feature(env, ARM_FEATURE_V7VE)) {
238
+ /*
186
- /* v7 Virtualization Extensions. In real hardware this implies
239
+ * When EL3 execution state is 32-bit, if HCR.IMO is set then
187
- * EL2 and also the presence of the Security Extensions.
240
+ * we may override the CPSR.I masking when in non-secure state.
188
- * For QEMU, for backwards-compatibility we implement some
241
+ * The SCR.IRQ setting has already been taken into consideration
189
- * CPUs or CPU configs which have no actual EL2 or EL3 but do
242
+ * when setting the target EL, so it does not have a further
190
- * include the various other features that V7VE implies.
243
+ * affect here.
191
- * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
244
+ */
192
- * Security Extensions is ARM_FEATURE_EL3.
245
+ hcr = hcr_el2 & HCR_IMO;
193
- */
246
+ scr = false;
194
- assert(!tcg_enabled() || no_aa32 ||
247
+ break;
195
- cpu_isar_feature(aa32_arm_div, cpu));
248
+ default:
196
- set_feature(env, ARM_FEATURE_LPAE);
249
+ g_assert_not_reached();
197
- set_feature(env, ARM_FEATURE_V7);
250
+ }
198
- }
251
+
199
- if (arm_feature(env, ARM_FEATURE_V7)) {
252
+ if ((scr || hcr) && !secure) {
200
- set_feature(env, ARM_FEATURE_VAPA);
253
+ unmasked = 1;
201
- set_feature(env, ARM_FEATURE_THUMB2);
254
+ }
202
- set_feature(env, ARM_FEATURE_MPIDR);
255
+ }
203
- if (!arm_feature(env, ARM_FEATURE_M)) {
256
+ }
204
- set_feature(env, ARM_FEATURE_V6K);
257
+
205
- } else {
258
+ /*
206
- set_feature(env, ARM_FEATURE_V6);
259
+ * The PSTATE bits only mask the interrupt if we have not overriden the
207
- }
260
+ * ability above.
208
-
261
+ */
209
- /* Always define VBAR for V7 CPUs even if it doesn't exist in
262
+ return unmasked || pstate_unmasked;
210
- * non-EL3 configs. This is needed by some legacy boards.
263
+}
211
- */
264
+
212
- set_feature(env, ARM_FEATURE_VBAR);
265
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
213
- }
266
{
214
- if (arm_feature(env, ARM_FEATURE_V6K)) {
267
CPUClass *cc = CPU_GET_CLASS(cs);
215
- set_feature(env, ARM_FEATURE_V6);
216
- set_feature(env, ARM_FEATURE_MVFR);
217
- }
218
- if (arm_feature(env, ARM_FEATURE_V6)) {
219
- set_feature(env, ARM_FEATURE_V5);
220
- if (!arm_feature(env, ARM_FEATURE_M)) {
221
- assert(!tcg_enabled() || no_aa32 ||
222
- cpu_isar_feature(aa32_jazelle, cpu));
223
- set_feature(env, ARM_FEATURE_AUXCR);
224
- }
225
- }
226
- if (arm_feature(env, ARM_FEATURE_V5)) {
227
- set_feature(env, ARM_FEATURE_V4T);
228
- }
229
- if (arm_feature(env, ARM_FEATURE_LPAE)) {
230
- set_feature(env, ARM_FEATURE_V7MP);
231
- }
232
- if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
233
- set_feature(env, ARM_FEATURE_CBAR);
234
- }
235
- if (arm_feature(env, ARM_FEATURE_THUMB2) &&
236
- !arm_feature(env, ARM_FEATURE_M)) {
237
- set_feature(env, ARM_FEATURE_THUMB_DSP);
238
- }
239
240
/*
241
* We rely on no XScale CPU having VFP so we can use the same bits in the
268
--
242
--
269
2.20.1
243
2.34.1
270
271
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
M-profile CPUs generally allow configuration of the number of MPU
2
regions that they have. We don't currently model this, so our
3
implementations of some of the board models provide CPUs with the
4
wrong number of regions. RTOSes like Zephyr that hardcode the
5
expected number of regions may therefore not run on the model if they
6
are set up to run on real hardware.
2
7
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Add properties mpu-ns-regions and mpu-s-regions to the ARMV7M object,
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
matching the ability of hardware to configure the number of Secure
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
and NonSecure regions separately. Our actual CPU implementation
6
Message-id: 20200206105448.4726-26-richard.henderson@linaro.org
11
doesn't currently support that, and it happens that none of the MPS
12
boards we model set the number of regions differently for Secure vs
13
NonSecure, so we provide an interface to the boards and SoCs that
14
won't need to change if we ever do add that functionality in future,
15
but make it an error to configure the two properties to different
16
values.
17
18
(The property name on the CPU is the somewhat misnamed-for-M-profile
19
"pmsav7-dregion", so we don't follow that naming convention for
20
the properties here. The TRM doesn't say what the CPU configuration
21
variable names are, so we pick something, and follow the lowercase
22
convention we already have for properties here.)
23
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
26
Message-id: 20230724174335.2150499-3-peter.maydell@linaro.org
8
---
27
---
9
target/arm/cpu-qom.h | 1 +
28
include/hw/arm/armv7m.h | 8 ++++++++
10
target/arm/cpu.h | 11 +++++----
29
hw/arm/armv7m.c | 21 +++++++++++++++++++++
11
target/arm/cpu.c | 3 ++-
30
2 files changed, 29 insertions(+)
12
target/arm/helper.c | 56 ++++++++++++++++++++++++++++++++++++++++++++
13
4 files changed, 65 insertions(+), 6 deletions(-)
14
31
15
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
32
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
16
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu-qom.h
34
--- a/include/hw/arm/armv7m.h
18
+++ b/target/arm/cpu-qom.h
35
+++ b/include/hw/arm/armv7m.h
19
@@ -XXX,XX +XXX,XX @@ void arm_gt_ptimer_cb(void *opaque);
36
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M)
20
void arm_gt_vtimer_cb(void *opaque);
37
* + Property "vfp": enable VFP (forwarded to CPU object)
21
void arm_gt_htimer_cb(void *opaque);
38
* + Property "dsp": enable DSP (forwarded to CPU object)
22
void arm_gt_stimer_cb(void *opaque);
39
* + Property "enable-bitband": expose bitbanded IO
23
+void arm_gt_hvtimer_cb(void *opaque);
40
+ * + Property "mpu-ns-regions": number of Non-Secure MPU regions (forwarded
24
41
+ * to CPU object pmsav7-dregion property; default is whatever the default
25
#define ARM_AFF0_SHIFT 0
42
+ * for the CPU is)
26
#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
43
+ * + Property "mpu-s-regions": number of Secure MPU regions (default is
27
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
44
+ * whatever the default for the CPU is; must currently be set to the same
45
+ * value as mpu-ns-regions if the CPU implements the Security Extension)
46
* + Clock input "refclk" is the external reference clock for the systick timers
47
* + Clock input "cpuclk" is the main CPU clock
48
*/
49
@@ -XXX,XX +XXX,XX @@ struct ARMv7MState {
50
Object *idau;
51
uint32_t init_svtor;
52
uint32_t init_nsvtor;
53
+ uint32_t mpu_ns_regions;
54
+ uint32_t mpu_s_regions;
55
bool enable_bitband;
56
bool start_powered_off;
57
bool vfp;
58
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
28
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/cpu.h
60
--- a/hw/arm/armv7m.c
30
+++ b/target/arm/cpu.h
61
+++ b/hw/arm/armv7m.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct ARMGenericTimer {
62
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
32
uint64_t ctl; /* Timer Control register */
33
} ARMGenericTimer;
34
35
-#define GTIMER_PHYS 0
36
-#define GTIMER_VIRT 1
37
-#define GTIMER_HYP 2
38
-#define GTIMER_SEC 3
39
-#define NUM_GTIMERS 4
40
+#define GTIMER_PHYS 0
41
+#define GTIMER_VIRT 1
42
+#define GTIMER_HYP 2
43
+#define GTIMER_SEC 3
44
+#define GTIMER_HYPVIRT 4
45
+#define NUM_GTIMERS 5
46
47
typedef struct {
48
uint64_t raw_tcr;
49
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/cpu.c
52
+++ b/target/arm/cpu.c
53
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
54
}
63
}
55
}
64
}
56
65
57
-
66
+ /*
58
{
67
+ * Real M-profile hardware can be configured with a different number of
59
uint64_t scale;
68
+ * MPU regions for Secure vs NonSecure. QEMU's CPU implementation doesn't
60
69
+ * support that yet, so catch attempts to select that.
61
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
70
+ */
62
arm_gt_htimer_cb, cpu);
71
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
63
cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
72
+ s->mpu_ns_regions != s->mpu_s_regions) {
64
arm_gt_stimer_cb, cpu);
73
+ error_setg(errp,
65
+ cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
74
+ "mpu-ns-regions and mpu-s-regions properties must have the same value");
66
+ arm_gt_hvtimer_cb, cpu);
75
+ return;
67
}
76
+ }
68
#endif
77
+ if (s->mpu_ns_regions != UINT_MAX &&
69
78
+ object_property_find(OBJECT(s->cpu), "pmsav7-dregion")) {
70
diff --git a/target/arm/helper.c b/target/arm/helper.c
79
+ if (!object_property_set_uint(OBJECT(s->cpu), "pmsav7-dregion",
71
index XXXXXXX..XXXXXXX 100644
80
+ s->mpu_ns_regions, errp)) {
72
--- a/target/arm/helper.c
81
+ return;
73
+++ b/target/arm/helper.c
82
+ }
74
@@ -XXX,XX +XXX,XX @@ static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
83
+ }
75
76
switch (timeridx) {
77
case GTIMER_VIRT:
78
+ case GTIMER_HYPVIRT:
79
offset = gt_virt_cnt_offset(env);
80
break;
81
}
82
@@ -XXX,XX +XXX,XX @@ static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
83
84
switch (timeridx) {
85
case GTIMER_VIRT:
86
+ case GTIMER_HYPVIRT:
87
offset = gt_virt_cnt_offset(env);
88
break;
89
}
90
@@ -XXX,XX +XXX,XX @@ static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
91
gt_ctl_write(env, ri, GTIMER_SEC, value);
92
}
93
94
+static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
95
+{
96
+ gt_timer_reset(env, ri, GTIMER_HYPVIRT);
97
+}
98
+
84
+
99
+static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
85
/*
100
+ uint64_t value)
86
* Tell the CPU where the NVIC is; it will fail realize if it doesn't
101
+{
87
* have one. Similarly, tell the NVIC where its CPU is.
102
+ gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
88
@@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = {
103
+}
89
false),
104
+
90
DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true),
105
+static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
91
DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true),
106
+{
92
+ DEFINE_PROP_UINT32("mpu-ns-regions", ARMv7MState, mpu_ns_regions, UINT_MAX),
107
+ return gt_tval_read(env, ri, GTIMER_HYPVIRT);
93
+ DEFINE_PROP_UINT32("mpu-s-regions", ARMv7MState, mpu_s_regions, UINT_MAX),
108
+}
94
DEFINE_PROP_END_OF_LIST(),
109
+
110
+static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
111
+ uint64_t value)
112
+{
113
+ gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
114
+}
115
+
116
+static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
117
+ uint64_t value)
118
+{
119
+ gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
120
+}
121
+
122
void arm_gt_ptimer_cb(void *opaque)
123
{
124
ARMCPU *cpu = opaque;
125
@@ -XXX,XX +XXX,XX @@ void arm_gt_stimer_cb(void *opaque)
126
gt_recalc_timer(cpu, GTIMER_SEC);
127
}
128
129
+void arm_gt_hvtimer_cb(void *opaque)
130
+{
131
+ ARMCPU *cpu = opaque;
132
+
133
+ gt_recalc_timer(cpu, GTIMER_HYPVIRT);
134
+}
135
+
136
static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
137
{
138
ARMCPU *cpu = env_archcpu(env);
139
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vhe_reginfo[] = {
140
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
141
.access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
142
.fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
143
+#ifndef CONFIG_USER_ONLY
144
+ { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
145
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
146
+ .fieldoffset =
147
+ offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
148
+ .type = ARM_CP_IO, .access = PL2_RW,
149
+ .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
150
+ { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
151
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
152
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
153
+ .resetfn = gt_hv_timer_reset,
154
+ .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
155
+ { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
156
+ .type = ARM_CP_IO,
157
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
158
+ .access = PL2_RW,
159
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
160
+ .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
161
+#endif
162
REGINFO_SENTINEL
163
};
95
};
164
96
165
--
97
--
166
2.20.1
98
2.34.1
167
99
168
100
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200206105448.4726-27-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/helper.c | 102 +++++++++++++++++++++++++++++++++++---------
10
1 file changed, 81 insertions(+), 21 deletions(-)
11
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper.c
15
+++ b/target/arm/helper.c
16
@@ -XXX,XX +XXX,XX @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
17
* Writable only at the highest implemented exception level.
18
*/
19
int el = arm_current_el(env);
20
+ uint64_t hcr;
21
+ uint32_t cntkctl;
22
23
switch (el) {
24
case 0:
25
- if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
26
+ hcr = arm_hcr_el2_eff(env);
27
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
28
+ cntkctl = env->cp15.cnthctl_el2;
29
+ } else {
30
+ cntkctl = env->cp15.c14_cntkctl;
31
+ }
32
+ if (!extract32(cntkctl, 0, 2)) {
33
return CP_ACCESS_TRAP;
34
}
35
break;
36
@@ -XXX,XX +XXX,XX @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
37
{
38
unsigned int cur_el = arm_current_el(env);
39
bool secure = arm_is_secure(env);
40
+ uint64_t hcr = arm_hcr_el2_eff(env);
41
42
- /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
43
- if (cur_el == 0 &&
44
- !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
45
- return CP_ACCESS_TRAP;
46
- }
47
+ switch (cur_el) {
48
+ case 0:
49
+ /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
50
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
51
+ return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
52
+ ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
53
+ }
54
55
- if (arm_feature(env, ARM_FEATURE_EL2) &&
56
- timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
57
- !extract32(env->cp15.cnthctl_el2, 0, 1)) {
58
- return CP_ACCESS_TRAP_EL2;
59
+ /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
60
+ if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
61
+ return CP_ACCESS_TRAP;
62
+ }
63
+
64
+ /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
65
+ if (hcr & HCR_E2H) {
66
+ if (timeridx == GTIMER_PHYS &&
67
+ !extract32(env->cp15.cnthctl_el2, 10, 1)) {
68
+ return CP_ACCESS_TRAP_EL2;
69
+ }
70
+ } else {
71
+ /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
72
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
73
+ timeridx == GTIMER_PHYS && !secure &&
74
+ !extract32(env->cp15.cnthctl_el2, 1, 1)) {
75
+ return CP_ACCESS_TRAP_EL2;
76
+ }
77
+ }
78
+ break;
79
+
80
+ case 1:
81
+ /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
82
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
83
+ timeridx == GTIMER_PHYS && !secure &&
84
+ (hcr & HCR_E2H
85
+ ? !extract32(env->cp15.cnthctl_el2, 10, 1)
86
+ : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
87
+ return CP_ACCESS_TRAP_EL2;
88
+ }
89
+ break;
90
}
91
return CP_ACCESS_OK;
92
}
93
@@ -XXX,XX +XXX,XX @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
94
{
95
unsigned int cur_el = arm_current_el(env);
96
bool secure = arm_is_secure(env);
97
+ uint64_t hcr = arm_hcr_el2_eff(env);
98
99
- /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
100
- * EL0[PV]TEN is zero.
101
- */
102
- if (cur_el == 0 &&
103
- !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
104
- return CP_ACCESS_TRAP;
105
- }
106
+ switch (cur_el) {
107
+ case 0:
108
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
109
+ /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
110
+ return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
111
+ ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
112
+ }
113
114
- if (arm_feature(env, ARM_FEATURE_EL2) &&
115
- timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
116
- !extract32(env->cp15.cnthctl_el2, 1, 1)) {
117
- return CP_ACCESS_TRAP_EL2;
118
+ /*
119
+ * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
120
+ * EL0 if EL0[PV]TEN is zero.
121
+ */
122
+ if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
123
+ return CP_ACCESS_TRAP;
124
+ }
125
+ /* fall through */
126
+
127
+ case 1:
128
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
129
+ timeridx == GTIMER_PHYS && !secure) {
130
+ if (hcr & HCR_E2H) {
131
+ /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
132
+ if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
133
+ return CP_ACCESS_TRAP_EL2;
134
+ }
135
+ } else {
136
+ /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
137
+ if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
138
+ return CP_ACCESS_TRAP_EL2;
139
+ }
140
+ }
141
+ }
142
+ break;
143
}
144
return CP_ACCESS_OK;
145
}
146
--
147
2.20.1
148
149
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
For ARMv8.1, op1 == 5 is reserved for EL2 aliases of
4
EL1 and EL0 registers.
5
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200206105448.4726-28-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 5 +----
13
1 file changed, 1 insertion(+), 4 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
20
mask = PL0_RW;
21
break;
22
case 4:
23
+ case 5:
24
/* min_EL EL2 */
25
mask = PL2_RW;
26
break;
27
- case 5:
28
- /* unallocated encoding, so not possible */
29
- assert(false);
30
- break;
31
case 6:
32
/* min_EL EL3 */
33
mask = PL3_RW;
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Since we only support a single ASID, flush the tlb when it changes.
4
5
Note that TCR_EL2, like TCR_EL1, has the A1 bit that chooses between
6
the two TTBR* registers for the location of the ASID.
7
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20200206105448.4726-31-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/helper.c | 22 +++++++++++++++-------
15
1 file changed, 15 insertions(+), 7 deletions(-)
16
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
20
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
22
tcr->base_mask = 0xffffc000u;
23
}
24
25
-static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
26
+static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
27
uint64_t value)
28
{
29
ARMCPU *cpu = env_archcpu(env);
30
@@ -XXX,XX +XXX,XX @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
31
static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
32
uint64_t value)
33
{
34
- /* TODO: There are ASID fields in here with HCR_EL2.E2H */
35
+ /*
36
+ * If we are running with E2&0 regime, then an ASID is active.
37
+ * Flush if that might be changing. Note we're not checking
38
+ * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
39
+ * holds the active ASID, only checking the field that might.
40
+ */
41
+ if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
42
+ (arm_hcr_el2_eff(env) & HCR_E2H)) {
43
+ tlb_flush_by_mmuidx(env_cpu(env),
44
+ ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_0);
45
+ }
46
raw_write(env, ri, value);
47
}
48
49
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
50
offsetof(CPUARMState, cp15.ttbr1_ns) } },
51
{ .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
52
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
53
- .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
54
+ .access = PL1_RW, .writefn = vmsa_tcr_el12_write,
55
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
56
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
57
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
58
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
59
.resetvalue = 0 },
60
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
61
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
62
- .access = PL2_RW,
63
- /* no .writefn needed as this can't cause an ASID change;
64
- * no .raw_writefn or .resetfn needed as we never use mask/base_mask
65
- */
66
+ .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
67
+ /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
68
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
69
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
70
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
71
--
72
2.20.1
73
74
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200206105448.4726-32-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/helper.c | 25 ++++++++++++++++++-------
10
1 file changed, 18 insertions(+), 7 deletions(-)
11
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper.c
15
+++ b/target/arm/helper.c
16
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
17
18
static int vae1_tlbmask(CPUARMState *env)
19
{
20
+ /* Since we exclude secure first, we may read HCR_EL2 directly. */
21
if (arm_is_secure_below_el3(env)) {
22
return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_0;
23
+ } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
24
+ == (HCR_E2H | HCR_TGE)) {
25
+ return ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_0;
26
} else {
27
return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
28
}
29
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
30
}
31
}
32
33
+static int e2_tlbmask(CPUARMState *env)
34
+{
35
+ /* TODO: ARMv8.4-SecEL2 */
36
+ return ARMMMUIdxBit_E20_0 | ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E2;
37
+}
38
+
39
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
40
uint64_t value)
41
{
42
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
43
static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
44
uint64_t value)
45
{
46
- ARMCPU *cpu = env_archcpu(env);
47
- CPUState *cs = CPU(cpu);
48
+ CPUState *cs = env_cpu(env);
49
+ int mask = e2_tlbmask(env);
50
51
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
52
+ tlb_flush_by_mmuidx(cs, mask);
53
}
54
55
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
56
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
57
uint64_t value)
58
{
59
CPUState *cs = env_cpu(env);
60
+ int mask = e2_tlbmask(env);
61
62
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
63
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
64
}
65
66
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
68
* Currently handles both VAE2 and VALE2, since we don't support
69
* flush-last-level-only.
70
*/
71
- ARMCPU *cpu = env_archcpu(env);
72
- CPUState *cs = CPU(cpu);
73
+ CPUState *cs = env_cpu(env);
74
+ int mask = e2_tlbmask(env);
75
uint64_t pageaddr = sextract64(value << 12, 0, 56);
76
77
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
78
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
79
}
80
81
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
82
--
83
2.20.1
84
85
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
When TGE+E2H are both set, CPACR_EL1 is ignored.
4
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200206105448.4726-34-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 53 ++++++++++++++++++++++++---------------------
12
1 file changed, 28 insertions(+), 25 deletions(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
19
int sve_exception_el(CPUARMState *env, int el)
20
{
21
#ifndef CONFIG_USER_ONLY
22
- if (el <= 1) {
23
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
24
+
25
+ if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
26
bool disabled = false;
27
28
/* The CPACR.ZEN controls traps to EL1:
29
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
30
}
31
if (disabled) {
32
/* route_to_el2 */
33
- return (arm_feature(env, ARM_FEATURE_EL2)
34
- && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
35
+ return hcr_el2 & HCR_TGE ? 2 : 1;
36
}
37
38
/* Check CPACR.FPEN. */
39
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
40
int fp_exception_el(CPUARMState *env, int cur_el)
41
{
42
#ifndef CONFIG_USER_ONLY
43
- int fpen;
44
-
45
/* CPACR and the CPTR registers don't exist before v6, so FP is
46
* always accessible
47
*/
48
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
49
* 0, 2 : trap EL0 and EL1/PL1 accesses
50
* 1 : trap only EL0 accesses
51
* 3 : trap no accesses
52
+ * This register is ignored if E2H+TGE are both set.
53
*/
54
- fpen = extract32(env->cp15.cpacr_el1, 20, 2);
55
- switch (fpen) {
56
- case 0:
57
- case 2:
58
- if (cur_el == 0 || cur_el == 1) {
59
- /* Trap to PL1, which might be EL1 or EL3 */
60
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
61
+ if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
62
+ int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
63
+
64
+ switch (fpen) {
65
+ case 0:
66
+ case 2:
67
+ if (cur_el == 0 || cur_el == 1) {
68
+ /* Trap to PL1, which might be EL1 or EL3 */
69
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
70
+ return 3;
71
+ }
72
+ return 1;
73
+ }
74
+ if (cur_el == 3 && !is_a64(env)) {
75
+ /* Secure PL1 running at EL3 */
76
return 3;
77
}
78
- return 1;
79
+ break;
80
+ case 1:
81
+ if (cur_el == 0) {
82
+ return 1;
83
+ }
84
+ break;
85
+ case 3:
86
+ break;
87
}
88
- if (cur_el == 3 && !is_a64(env)) {
89
- /* Secure PL1 running at EL3 */
90
- return 3;
91
- }
92
- break;
93
- case 1:
94
- if (cur_el == 0) {
95
- return 1;
96
- }
97
- break;
98
- case 3:
99
- break;
100
}
101
102
/*
103
--
104
2.20.1
105
106
diff view generated by jsdifflib
Deleted patch
1
From: Rene Stange <rsta2@o2online.de>
2
1
3
TD (two dimensions) DMA mode did not work, because the xlen variable
4
has not been re-initialized before each additional ylen run through
5
in bcm2835_dma_update(). Fix it.
6
7
Signed-off-by: Rene Stange <rsta2@o2online.de>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
hw/dma/bcm2835_dma.c | 4 +++-
12
1 file changed, 3 insertions(+), 1 deletion(-)
13
14
diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/dma/bcm2835_dma.c
17
+++ b/hw/dma/bcm2835_dma.c
18
@@ -XXX,XX +XXX,XX @@
19
static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
20
{
21
BCM2835DMAChan *ch = &s->chan[c];
22
- uint32_t data, xlen, ylen;
23
+ uint32_t data, xlen, xlen_td, ylen;
24
int16_t dst_stride, src_stride;
25
26
if (!(s->enable & (1 << c))) {
27
@@ -XXX,XX +XXX,XX @@ static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
28
dst_stride = 0;
29
src_stride = 0;
30
}
31
+ xlen_td = xlen;
32
33
while (ylen != 0) {
34
/* Normal transfer mode */
35
@@ -XXX,XX +XXX,XX @@ static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
36
if (--ylen != 0) {
37
ch->source_ad += src_stride;
38
ch->dest_ad += dst_stride;
39
+ xlen = xlen_td;
40
}
41
}
42
ch->cs |= BCM2708_DMA_END;
43
--
44
2.20.1
45
46
diff view generated by jsdifflib
1
From: Pan Nengyuan <pannengyuan@huawei.com>
1
The IoTKit, SSE200 and SSE300 all default to 8 MPU regions. The
2
2
MPS2/MPS3 FPGA images don't override these except in the case of
3
There is a memory leak when we call 'device_list_properties' with typename = armv7m_systick. It's easy to reproduce as follow:
3
AN547, which uses 16 MPU regions.
4
4
5
virsh qemu-monitor-command vm1 --pretty '{"execute": "device-list-properties", "arguments": {"typename": "armv7m_systick"}}'
5
Define properties on the ARMSSE object for the MPU regions (using the
6
6
same names as the documented RTL configuration settings, and
7
This patch delay timer_new to fix this memleaks.
7
following the pattern we already have for this device of using
8
8
all-caps names as the RTL does), and set them in the board code.
9
Reported-by: Euler Robot <euler.robot@huawei.com>
9
10
Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com>
10
We don't actually need to override the default except on AN547,
11
Message-id: 20200205070659.22488-2-pannengyuan@huawei.com
11
but it's simpler code to have the board code set them always
12
Cc: qemu-arm@nongnu.org
12
rather than tracking which board subtypes want to set them to
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
a non-default value separately from what that value is.
14
15
Tho overall effect is that for mps2-an505, mps2-an521 and mps3-an524
16
we now correctly use 8 MPU regions, while mps3-an547 stays at its
17
current 16 regions.
18
19
It's possible some guest code wrongly depended on the previous
20
incorrectly modeled number of memory regions. (Such guest code
21
should ideally check the number of regions via the MPU_TYPE
22
register.) The old behaviour can be obtained with additional
23
-global arguments to QEMU:
24
25
For mps2-an521 and mps2-an524:
26
-global sse-200.CPU0_MPU_NS=16 -global sse-200.CPU0_MPU_S=16 -global sse-200.CPU1_MPU_NS=16 -global sse-200.CPU1_MPU_S=16
27
28
For mps2-an505:
29
-global sse-200.CPU0_MPU_NS=16 -global sse-200.CPU0_MPU_S=16
30
31
NB that the way the implementation allows this use of -global
32
is slightly fragile: if the board code explicitly sets the
33
properties on the sse-200 object, this overrides the -global
34
command line option. So we rely on:
35
- the boards that need fixing all happen to use the SSE defaults
36
- we can write the board code to only set the property if it
37
is different from the default, rather than having all boards
38
explicitly set the property
39
- the board that does need to use a non-default value happens
40
to need to set it to the same value (16) we previously used
41
This works, but there are some kinds of refactoring of the
42
mps2-tz.c code that would break the support for -global here.
43
44
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1772
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
45
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
46
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
47
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
48
Message-id: 20230724174335.2150499-4-peter.maydell@linaro.org
15
---
49
---
16
hw/timer/armv7m_systick.c | 6 ++++++
50
include/hw/arm/armsse.h | 5 +++++
17
1 file changed, 6 insertions(+)
51
hw/arm/armsse.c | 16 ++++++++++++++++
18
52
hw/arm/mps2-tz.c | 29 +++++++++++++++++++++++++++++
19
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
53
3 files changed, 50 insertions(+)
54
55
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
20
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/timer/armv7m_systick.c
57
--- a/include/hw/arm/armsse.h
22
+++ b/hw/timer/armv7m_systick.c
58
+++ b/include/hw/arm/armsse.h
23
@@ -XXX,XX +XXX,XX @@ static void systick_instance_init(Object *obj)
59
@@ -XXX,XX +XXX,XX @@
24
memory_region_init_io(&s->iomem, obj, &systick_ops, s, "systick", 0xe0);
60
* (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
25
sysbus_init_mmio(sbd, &s->iomem);
61
* SSE-200 both are present; CPU0 in an SSE-200 has neither.
26
sysbus_init_irq(sbd, &s->irq);
62
* Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
27
+}
63
+ * + QOM properties "CPU0_MPU_NS", "CPU0_MPU_S", "CPU1_MPU_NS" and "CPU1_MPU_S"
64
+ * which set the number of MPU regions on the CPUs. If there is only one
65
+ * CPU the CPU1 properties are not present.
66
* + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
67
* which are wired to its NVIC lines 32 .. n+32
68
* + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
69
@@ -XXX,XX +XXX,XX @@ struct ARMSSE {
70
uint32_t exp_numirq;
71
uint32_t sram_addr_width;
72
uint32_t init_svtor;
73
+ uint32_t cpu_mpu_ns[SSE_MAX_CPUS];
74
+ uint32_t cpu_mpu_s[SSE_MAX_CPUS];
75
bool cpu_fpu[SSE_MAX_CPUS];
76
bool cpu_dsp[SSE_MAX_CPUS];
77
};
78
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/hw/arm/armsse.c
81
+++ b/hw/arm/armsse.c
82
@@ -XXX,XX +XXX,XX @@ static Property iotkit_properties[] = {
83
DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
84
DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
85
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
86
+ DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8),
87
+ DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
88
DEFINE_PROP_END_OF_LIST()
89
};
90
91
@@ -XXX,XX +XXX,XX @@ static Property sse200_properties[] = {
92
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false),
93
DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true),
94
DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true),
95
+ DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8),
96
+ DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
97
+ DEFINE_PROP_UINT32("CPU1_MPU_NS", ARMSSE, cpu_mpu_ns[1], 8),
98
+ DEFINE_PROP_UINT32("CPU1_MPU_S", ARMSSE, cpu_mpu_s[1], 8),
99
DEFINE_PROP_END_OF_LIST()
100
};
101
102
@@ -XXX,XX +XXX,XX @@ static Property sse300_properties[] = {
103
DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
104
DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
105
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
106
+ DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8),
107
+ DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
108
DEFINE_PROP_END_OF_LIST()
109
};
110
111
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
112
return;
113
}
114
}
115
+ if (!object_property_set_uint(cpuobj, "mpu-ns-regions",
116
+ s->cpu_mpu_ns[i], errp)) {
117
+ return;
118
+ }
119
+ if (!object_property_set_uint(cpuobj, "mpu-s-regions",
120
+ s->cpu_mpu_s[i], errp)) {
121
+ return;
122
+ }
123
124
if (i > 0) {
125
memory_region_add_subregion_overlap(&s->cpu_container[i], 0,
126
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/hw/arm/mps2-tz.c
129
+++ b/hw/arm/mps2-tz.c
130
@@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass {
131
int uart_overflow_irq; /* number of the combined UART overflow IRQ */
132
uint32_t init_svtor; /* init-svtor setting for SSE */
133
uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */
134
+ uint32_t cpu0_mpu_ns; /* CPU0_MPU_NS setting for SSE */
135
+ uint32_t cpu0_mpu_s; /* CPU0_MPU_S setting for SSE */
136
+ uint32_t cpu1_mpu_ns; /* CPU1_MPU_NS setting for SSE */
137
+ uint32_t cpu1_mpu_s; /* CPU1_MPU_S setting for SSE */
138
const RAMInfo *raminfo;
139
const char *armsse_type;
140
uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */
141
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_TYPE(MPS2TZMachineState, MPS2TZMachineClass, MPS2TZ_MACHINE)
142
#define MPS3_DDR_SIZE (2 * GiB)
143
#endif
144
145
+/* For cpu{0,1}_mpu_{ns,s}, means "leave at SSE's default value" */
146
+#define MPU_REGION_DEFAULT UINT32_MAX
28
+
147
+
29
+static void systick_realize(DeviceState *dev, Error **errp)
148
static const uint32_t an505_oscclk[] = {
30
+{
149
40000000,
31
+ SysTickState *s = SYSTICK(dev);
150
24580000,
32
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, systick_timer_tick, s);
151
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
152
OBJECT(system_memory), &error_abort);
153
qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq);
154
qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor);
155
+ if (mmc->cpu0_mpu_ns != MPU_REGION_DEFAULT) {
156
+ qdev_prop_set_uint32(iotkitdev, "CPU0_MPU_NS", mmc->cpu0_mpu_ns);
157
+ }
158
+ if (mmc->cpu0_mpu_s != MPU_REGION_DEFAULT) {
159
+ qdev_prop_set_uint32(iotkitdev, "CPU0_MPU_S", mmc->cpu0_mpu_s);
160
+ }
161
+ if (object_property_find(OBJECT(iotkitdev), "CPU1_MPU_NS")) {
162
+ if (mmc->cpu1_mpu_ns != MPU_REGION_DEFAULT) {
163
+ qdev_prop_set_uint32(iotkitdev, "CPU1_MPU_NS", mmc->cpu1_mpu_ns);
164
+ }
165
+ if (mmc->cpu1_mpu_s != MPU_REGION_DEFAULT) {
166
+ qdev_prop_set_uint32(iotkitdev, "CPU1_MPU_S", mmc->cpu1_mpu_s);
167
+ }
168
+ }
169
qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
170
qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk);
171
qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk);
172
@@ -XXX,XX +XXX,XX @@ static void mps2tz_class_init(ObjectClass *oc, void *data)
173
{
174
MachineClass *mc = MACHINE_CLASS(oc);
175
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(oc);
176
+ MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
177
178
mc->init = mps2tz_common_init;
179
mc->reset = mps2_machine_reset;
180
iic->check = mps2_tz_idau_check;
181
+
182
+ /* Most machines leave these at the SSE defaults */
183
+ mmc->cpu0_mpu_ns = MPU_REGION_DEFAULT;
184
+ mmc->cpu0_mpu_s = MPU_REGION_DEFAULT;
185
+ mmc->cpu1_mpu_ns = MPU_REGION_DEFAULT;
186
+ mmc->cpu1_mpu_s = MPU_REGION_DEFAULT;
33
}
187
}
34
188
35
@@ -XXX,XX +XXX,XX @@ static void systick_class_init(ObjectClass *klass, void *data)
189
static void mps2tz_set_default_ram_info(MPS2TZMachineClass *mmc)
36
190
@@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
37
dc->vmsd = &vmstate_systick;
191
mmc->numirq = 96;
38
dc->reset = systick_reset;
192
mmc->uart_overflow_irq = 48;
39
+ dc->realize = systick_realize;
193
mmc->init_svtor = 0x00000000;
40
}
194
+ mmc->cpu0_mpu_s = mmc->cpu0_mpu_ns = 16;
41
195
mmc->sram_addr_width = 21;
42
static const TypeInfo armv7m_systick_info = {
196
mmc->raminfo = an547_raminfo;
197
mmc->armsse_type = TYPE_SSE300;
43
--
198
--
44
2.20.1
199
2.34.1
45
200
46
201
diff view generated by jsdifflib