1
Arm stuff, mostly patches from RTH.
1
Arm queue; the big bit here is RTH's MTE for user-mode series.
2
2
3
thanks
4
-- PMM
3
-- PMM
5
4
6
The following changes since commit 01a9a51ffaf4699827ea6425cb2b834a356e159d:
5
The following changes since commit 83339e21d05c824ebc9131d644f25c23d0e41ecf:
7
6
8
Merge remote-tracking branch 'remotes/kraxel/tags/ui-20190205-pull-request' into staging (2019-02-05 14:01:29 +0000)
7
Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging (2021-02-10 15:42:20 +0000)
9
8
10
are available in the Git repository at:
9
are available in the Git repository at:
11
10
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190205
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210211
13
12
14
for you to fetch changes up to a15945d98d3a3390c3da344d1b47218e91e49d8b:
13
for you to fetch changes up to 5213c78932ecf4bae18d62baf8735724e25fb478:
15
14
16
target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI (2019-02-05 16:52:42 +0000)
15
target/arm: Correctly initialize MDCR_EL2.HPMN (2021-02-11 11:50:16 +0000)
17
16
18
----------------------------------------------------------------
17
----------------------------------------------------------------
19
target-arm queue:
18
target-arm queue:
20
* Implement Armv8.5-BTI extension for system emulation mode
19
* Correctly initialize MDCR_EL2.HPMN
21
* Implement the PR_PAC_RESET_KEYS prctl() for linux-user mode's Armv8.3-PAuth support
20
* versal: Use nr_apu_cpus in favor of hard coding 2
22
* Support TBI (top-byte-ignore) properly for linux-user mode
21
* npcm7xx: Add ethernet device
23
* gdbstub: allow killing QEMU via vKill command
22
* Enable ARMv8.4-MemTag for user-mode emulation
24
* hw/arm/boot: Support DTB autoload for firmware-only boots
23
* accel/tcg: Add URL of clang bug to comment about our workaround
25
* target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI
24
* Add support for FEAT_DIT, Data Independent Timing
25
* Remove GPIO from unimplemented NPCM7XX
26
* Fix SCR RES1 handling
27
* Don't migrate CPUARMState.features
26
28
27
----------------------------------------------------------------
29
----------------------------------------------------------------
28
Max Filippov (1):
30
Aaron Lindsay (1):
29
gdbstub: allow killing QEMU via vKill command
31
target/arm: Don't migrate CPUARMState.features
30
32
31
Peter Maydell (7):
33
Daniel Müller (1):
32
target/arm: Compute TB_FLAGS for TBI for user-only
34
target/arm: Correctly initialize MDCR_EL2.HPMN
33
hw/arm/boot: Fix block comment style in arm_load_kernel()
34
hw/arm/boot: Factor out "direct kernel boot" code into its own function
35
hw/arm/boot: Factor out "set up firmware boot" code
36
hw/arm/boot: Clarify why arm_setup_firmware_boot() doesn't set env->boot_info
37
hw/arm/boot: Support DTB autoload for firmware-only boots
38
target/arm: Make FPSCR/FPCR trapped-exception bits RAZ/WI
39
35
40
Richard Henderson (14):
36
Doug Evans (3):
41
target/arm: Introduce isar_feature_aa64_bti
37
hw/net: Add npcm7xx emc model
42
target/arm: Add PSTATE.BTYPE
38
hw/arm: Add npcm7xx emc model
43
target/arm: Add BT and BTYPE to tb->flags
39
tests/qtests: Add npcm7xx emc model test
44
exec: Add target-specific tlb bits to MemTxAttrs
45
target/arm: Cache the GP bit for a page in MemTxAttrs
46
target/arm: Default handling of BTYPE during translation
47
target/arm: Reset btype for direct branches
48
target/arm: Set btype for indirect branches
49
target/arm: Enable BTI for -cpu max
50
linux-user: Implement PR_PAC_RESET_KEYS
51
tests/tcg/aarch64: Add pauth smoke test
52
target/arm: Add TBFLAG_A64_TBID, split out gen_top_byte_ignore
53
target/arm: Clean TBI for data operations in the translator
54
target/arm: Enable TBI for user-only
55
40
56
tests/tcg/aarch64/Makefile.target | 6 +-
41
Edgar E. Iglesias (1):
57
include/exec/memattrs.h | 10 +
42
hw/arm: versal: Use nr_apu_cpus in favor of hard coding 2
58
linux-user/aarch64/target_syscall.h | 7 +
59
target/arm/cpu.h | 27 +-
60
target/arm/internals.h | 27 +-
61
target/arm/translate.h | 12 +-
62
gdbstub.c | 4 +
63
hw/arm/boot.c | 166 +++++++------
64
linux-user/syscall.c | 36 +++
65
target/arm/cpu.c | 6 +
66
target/arm/cpu64.c | 4 +
67
target/arm/helper.c | 80 +++---
68
target/arm/translate-a64.c | 476 +++++++++++++++++++++++++-----------
69
tests/tcg/aarch64/pauth-1.c | 23 ++
70
14 files changed, 623 insertions(+), 261 deletions(-)
71
create mode 100644 tests/tcg/aarch64/pauth-1.c
72
43
44
Hao Wu (1):
45
hw/arm: Remove GPIO from unimplemented NPCM7XX
46
47
Mike Nawrocki (1):
48
target/arm: Fix SCR RES1 handling
49
50
Peter Maydell (2):
51
arm: Update infocenter.arm.com URLs
52
accel/tcg: Add URL of clang bug to comment about our workaround
53
54
Rebecca Cran (4):
55
target/arm: Add support for FEAT_DIT, Data Independent Timing
56
target/arm: Support AA32 DIT by moving PSTATE_SS from cpsr into env->pstate
57
target/arm: Set ID_AA64PFR0.DIT and ID_PFR0.DIT to 1 for "max" AA64 CPU
58
target/arm: Set ID_PFR0.DIT to 1 for "max" 32-bit CPU
59
60
Richard Henderson (31):
61
tcg: Introduce target-specific page data for user-only
62
linux-user: Introduce PAGE_ANON
63
exec: Use uintptr_t for guest_base
64
exec: Use uintptr_t in cpu_ldst.h
65
exec: Improve types for guest_addr_valid
66
linux-user: Check for overflow in access_ok
67
linux-user: Tidy VERIFY_READ/VERIFY_WRITE
68
bsd-user: Tidy VERIFY_READ/VERIFY_WRITE
69
linux-user: Do not use guest_addr_valid for h2g_valid
70
linux-user: Fix guest_addr_valid vs reserved_va
71
exec: Introduce cpu_untagged_addr
72
exec: Use cpu_untagged_addr in g2h; split out g2h_untagged
73
linux-user: Explicitly untag memory management syscalls
74
linux-user: Use guest_range_valid in access_ok
75
exec: Rename guest_{addr,range}_valid to *_untagged
76
linux-user: Use cpu_untagged_addr in access_ok; split out *_untagged
77
linux-user: Move lock_user et al out of line
78
linux-user: Fix types in uaccess.c
79
linux-user: Handle tags in lock_user/unlock_user
80
linux-user/aarch64: Implement PR_TAGGED_ADDR_ENABLE
81
target/arm: Improve gen_top_byte_ignore
82
target/arm: Use the proper TBI settings for linux-user
83
linux-user/aarch64: Implement PR_MTE_TCF and PR_MTE_TAG
84
linux-user/aarch64: Implement PROT_MTE
85
target/arm: Split out syndrome.h from internals.h
86
linux-user/aarch64: Pass syndrome to EXC_*_ABORT
87
linux-user/aarch64: Signal SEGV_MTESERR for sync tag check fault
88
linux-user/aarch64: Signal SEGV_MTEAERR for async tag check error
89
target/arm: Add allocation tag storage for user mode
90
target/arm: Enable MTE for user-only
91
tests/tcg/aarch64: Add mte smoke tests
92
93
docs/system/arm/nuvoton.rst | 3 +-
94
bsd-user/qemu.h | 9 +-
95
include/exec/cpu-all.h | 47 +-
96
include/exec/cpu_ldst.h | 39 +-
97
include/exec/exec-all.h | 2 +-
98
include/hw/arm/npcm7xx.h | 2 +
99
include/hw/dma/pl080.h | 7 +-
100
include/hw/misc/arm_integrator_debug.h | 2 +-
101
include/hw/net/npcm7xx_emc.h | 286 +++++++++++
102
include/hw/ssi/pl022.h | 5 +-
103
linux-user/aarch64/target_signal.h | 3 +
104
linux-user/aarch64/target_syscall.h | 13 +
105
linux-user/qemu.h | 76 +--
106
linux-user/syscall_defs.h | 1 +
107
target/arm/cpu-param.h | 3 +
108
target/arm/cpu.h | 49 ++
109
target/arm/internals.h | 255 +---------
110
target/arm/syndrome.h | 273 +++++++++++
111
tests/tcg/aarch64/mte.h | 60 +++
112
accel/tcg/cpu-exec.c | 25 +-
113
accel/tcg/translate-all.c | 32 +-
114
accel/tcg/user-exec.c | 51 +-
115
bsd-user/main.c | 4 +-
116
hw/arm/aspeed_ast2600.c | 2 +-
117
hw/arm/musca.c | 4 +-
118
hw/arm/npcm7xx.c | 58 ++-
119
hw/arm/xlnx-versal.c | 4 +-
120
hw/misc/arm_integrator_debug.c | 2 +-
121
hw/net/npcm7xx_emc.c | 857 +++++++++++++++++++++++++++++++++
122
hw/timer/arm_timer.c | 7 +-
123
linux-user/aarch64/cpu_loop.c | 38 +-
124
linux-user/elfload.c | 18 +-
125
linux-user/flatload.c | 2 +-
126
linux-user/hppa/cpu_loop.c | 39 +-
127
linux-user/i386/cpu_loop.c | 6 +-
128
linux-user/i386/signal.c | 5 +-
129
linux-user/main.c | 4 +-
130
linux-user/mmap.c | 86 ++--
131
linux-user/ppc/signal.c | 4 +-
132
linux-user/syscall.c | 165 +++++--
133
linux-user/uaccess.c | 82 +++-
134
target/arm/cpu.c | 29 +-
135
target/arm/cpu64.c | 5 +
136
target/arm/helper-a64.c | 31 +-
137
target/arm/helper.c | 71 ++-
138
target/arm/machine.c | 2 +-
139
target/arm/mte_helper.c | 39 +-
140
target/arm/op_helper.c | 9 +-
141
target/arm/tlb_helper.c | 15 +-
142
target/arm/translate-a64.c | 37 +-
143
target/hppa/op_helper.c | 2 +-
144
target/i386/tcg/mem_helper.c | 2 +-
145
target/s390x/mem_helper.c | 4 +-
146
tests/qtest/npcm7xx_emc-test.c | 812 +++++++++++++++++++++++++++++++
147
tests/tcg/aarch64/mte-1.c | 28 ++
148
tests/tcg/aarch64/mte-2.c | 45 ++
149
tests/tcg/aarch64/mte-3.c | 51 ++
150
tests/tcg/aarch64/mte-4.c | 45 ++
151
tests/tcg/aarch64/pauth-2.c | 1 -
152
hw/net/meson.build | 1 +
153
hw/net/trace-events | 17 +
154
tests/qtest/meson.build | 1 +
155
tests/tcg/aarch64/Makefile.target | 6 +
156
tests/tcg/configure.sh | 4 +
157
64 files changed, 3312 insertions(+), 575 deletions(-)
158
create mode 100644 include/hw/net/npcm7xx_emc.h
159
create mode 100644 target/arm/syndrome.h
160
create mode 100644 tests/tcg/aarch64/mte.h
161
create mode 100644 hw/net/npcm7xx_emc.c
162
create mode 100644 tests/qtest/npcm7xx_emc-test.c
163
create mode 100644 tests/tcg/aarch64/mte-1.c
164
create mode 100644 tests/tcg/aarch64/mte-2.c
165
create mode 100644 tests/tcg/aarch64/mte-3.c
166
create mode 100644 tests/tcg/aarch64/mte-4.c
167
diff view generated by jsdifflib
New patch
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
1
2
3
As feature flags are added or removed, the meanings of bits in the
4
`features` field can change between QEMU versions, causing migration
5
failures. Additionally, migrating the field is not useful because it is
6
a constant function of the CPU being used.
7
8
Fixes: LP:1914696
9
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
10
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Andrew Jones <drjones@redhat.com>
12
Tested-by: Andrew Jones <drjones@redhat.com>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/machine.c | 2 +-
17
1 file changed, 1 insertion(+), 1 deletion(-)
18
19
diff --git a/target/arm/machine.c b/target/arm/machine.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/machine.c
22
+++ b/target/arm/machine.c
23
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_arm_cpu = {
24
VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
25
VMSTATE_UINT64(env.exclusive_val, ARMCPU),
26
VMSTATE_UINT64(env.exclusive_high, ARMCPU),
27
- VMSTATE_UINT64(env.features, ARMCPU),
28
+ VMSTATE_UNUSED(sizeof(uint64_t)),
29
VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
30
VMSTATE_UINT32(env.exception.fsr, ARMCPU),
31
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
32
--
33
2.20.1
34
35
diff view generated by jsdifflib
1
The {IOE, DZE, OFE, UFE, IXE, IDE} bits in the FPSCR/FPCR are for
1
From: Mike Nawrocki <michael.nawrocki@gtri.gatech.edu>
2
enabling trapped IEEE floating point exceptions (where IEEE exception
3
conditions cause a CPU exception rather than updating the FPSR status
4
bits). QEMU doesn't implement this (and nor does the hardware we're
5
modelling), but for implementations which don't implement trapped
6
exception handling these control bits are supposed to be RAZ/WI.
7
This allows guest code to test for whether the feature is present
8
by trying to write to the bit and checking whether it sticks.
9
2
10
QEMU is incorrectly making these bits read as written. Make them
3
The FW and AW bits of SCR_EL3 are RES1 only in some contexts. Force them
11
RAZ/WI as the architecture requires.
4
to 1 only when there is no support for AArch32 at EL1 or above.
12
5
13
In particular this was causing problems for the NetBSD automatic
6
The reset value will be 0x30 only if the CPU is AArch64-only; if there
14
test suite.
7
is support for AArch32 at EL1 or above, it will be reset to 0.
15
8
16
Reported-by: Martin Husemann <martin@netbsd.org>
9
Also adds helper function isar_feature_aa64_aa32_el1 to check if AArch32
10
is supported at EL1 or above.
11
12
Signed-off-by: Mike Nawrocki <michael.nawrocki@gtri.gatech.edu>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210203165552.16306-2-michael.nawrocki@gtri.gatech.edu
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20190131130700.28392-1-peter.maydell@linaro.org
20
---
16
---
21
target/arm/cpu.h | 6 ++++++
17
target/arm/cpu.h | 5 +++++
22
target/arm/helper.c | 6 ++++++
18
target/arm/helper.c | 16 ++++++++++++++--
23
2 files changed, 12 insertions(+)
19
2 files changed, 19 insertions(+), 2 deletions(-)
24
20
25
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
26
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu.h
23
--- a/target/arm/cpu.h
28
+++ b/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
29
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
25
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
30
#define FPSR_MASK 0xf800009f
26
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
31
#define FPCR_MASK 0x07ff9f00
27
}
32
28
33
+#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
29
+static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
34
+#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
30
+{
35
+#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
31
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
36
+#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
32
+}
37
+#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
33
+
38
+#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
34
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
39
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
35
{
40
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
36
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
41
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper.c
39
--- a/target/arm/helper.c
45
+++ b/target/arm/helper.c
40
+++ b/target/arm/helper.c
46
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
41
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
47
val &= ~FPCR_FZ16;
42
ARMCPU *cpu = env_archcpu(env);
48
}
43
49
44
if (ri->state == ARM_CP_STATE_AA64) {
45
- value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
46
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
47
+ !cpu_isar_feature(aa64_aa32_el1, cpu)) {
48
+ value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
49
+ }
50
valid_mask &= ~SCR_NET;
51
52
if (cpu_isar_feature(aa64_lor, cpu)) {
53
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
54
raw_write(env, ri, value);
55
}
56
57
+static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
58
+{
50
+ /*
59
+ /*
51
+ * We don't implement trapped exception handling, so the
60
+ * scr_write will set the RES1 bits on an AArch64-only CPU.
52
+ * trap enable bits are all RAZ/WI (not RES0!)
61
+ * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
53
+ */
62
+ */
54
+ val &= ~(FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE);
63
+ scr_write(env, ri, 0);
64
+}
55
+
65
+
56
changed = env->vfp.xregs[ARM_VFP_FPSCR];
66
static CPAccessResult access_aa64_tid2(CPUARMState *env,
57
env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
67
const ARMCPRegInfo *ri,
58
env->vfp.vec_len = (val >> 16) & 7;
68
bool isread)
69
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
70
{ .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
71
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
72
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
73
- .resetvalue = 0, .writefn = scr_write },
74
+ .resetfn = scr_reset, .writefn = scr_write },
75
{ .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
76
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
77
.access = PL1_RW, .accessfn = access_trap_aa32s_el1,
59
--
78
--
60
2.20.1
79
2.20.1
61
80
62
81
diff view generated by jsdifflib
New patch
1
From: Hao Wu <wuhaotsh@google.com>
1
2
3
NPCM7XX GPIO devices have been implemented in hw/gpio/npcm7xx-gpio.c. So
4
we removed them from the unimplemented devices list.
5
6
Reviewed-by: Doug Evans<dje@google.com>
7
Reviewed-by: Tyrong Ting<kfting@nuvoton.com>
8
Signed-off-by: Hao Wu<wuhaotsh@google.com>
9
Message-id: 20210129005845.416272-2-wuhaotsh@google.com
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/arm/npcm7xx.c | 8 --------
14
1 file changed, 8 deletions(-)
15
16
diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/npcm7xx.c
19
+++ b/hw/arm/npcm7xx.c
20
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
21
create_unimplemented_device("npcm7xx.pcierc", 0xe1000000, 64 * KiB);
22
create_unimplemented_device("npcm7xx.kcs", 0xf0007000, 4 * KiB);
23
create_unimplemented_device("npcm7xx.gfxi", 0xf000e000, 4 * KiB);
24
- create_unimplemented_device("npcm7xx.gpio[0]", 0xf0010000, 4 * KiB);
25
- create_unimplemented_device("npcm7xx.gpio[1]", 0xf0011000, 4 * KiB);
26
- create_unimplemented_device("npcm7xx.gpio[2]", 0xf0012000, 4 * KiB);
27
- create_unimplemented_device("npcm7xx.gpio[3]", 0xf0013000, 4 * KiB);
28
- create_unimplemented_device("npcm7xx.gpio[4]", 0xf0014000, 4 * KiB);
29
- create_unimplemented_device("npcm7xx.gpio[5]", 0xf0015000, 4 * KiB);
30
- create_unimplemented_device("npcm7xx.gpio[6]", 0xf0016000, 4 * KiB);
31
- create_unimplemented_device("npcm7xx.gpio[7]", 0xf0017000, 4 * KiB);
32
create_unimplemented_device("npcm7xx.smbus[0]", 0xf0080000, 4 * KiB);
33
create_unimplemented_device("npcm7xx.smbus[1]", 0xf0081000, 4 * KiB);
34
create_unimplemented_device("npcm7xx.smbus[2]", 0xf0082000, 4 * KiB);
35
--
36
2.20.1
37
38
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Rebecca Cran <rebecca@nuviainc.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Add support for FEAT_DIT. DIT (Data Independent Timing) is a required
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
feature for ARMv8.4. Since virtual machine execution is largely
5
Message-id: 20190128223118.5255-4-richard.henderson@linaro.org
5
nondeterministic and TCG is outside of the security domain, it's
6
implemented as a NOP.
7
8
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210208065700.19454-2-rebecca@nuviainc.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
12
---
8
target/arm/cpu.h | 2 ++
13
target/arm/cpu.h | 12 ++++++++++++
9
target/arm/translate.h | 4 ++++
14
target/arm/internals.h | 6 ++++++
10
target/arm/helper.c | 22 +++++++++++++++-------
15
target/arm/helper.c | 22 ++++++++++++++++++++++
11
target/arm/translate-a64.c | 2 ++
16
target/arm/translate-a64.c | 12 ++++++++++++
12
4 files changed, 23 insertions(+), 7 deletions(-)
17
4 files changed, 52 insertions(+)
13
18
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TBII, 0, 2)
23
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
19
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
24
#define CPSR_IT_2_7 (0xfc00U)
20
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
25
#define CPSR_GE (0xfU << 16)
21
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
26
#define CPSR_IL (1U << 20)
22
+FIELD(TBFLAG_A64, BT, 9, 1)
27
+#define CPSR_DIT (1U << 21)
23
+FIELD(TBFLAG_A64, BTYPE, 10, 2)
28
#define CPSR_PAN (1U << 22)
24
29
#define CPSR_J (1U << 24)
25
static inline bool bswap_code(bool sctlr_b)
30
#define CPSR_IT_0_1 (3U << 25)
26
{
31
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
27
diff --git a/target/arm/translate.h b/target/arm/translate.h
32
#define PSTATE_SS (1U << 21)
33
#define PSTATE_PAN (1U << 22)
34
#define PSTATE_UAO (1U << 23)
35
+#define PSTATE_DIT (1U << 24)
36
#define PSTATE_TCO (1U << 25)
37
#define PSTATE_V (1U << 28)
38
#define PSTATE_C (1U << 29)
39
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
40
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
41
}
42
43
+static inline bool isar_feature_aa32_dit(const ARMISARegisters *id)
44
+{
45
+ return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0;
46
+}
47
+
48
/*
49
* 64-bit feature tests via id registers.
50
*/
51
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
52
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
53
}
54
55
+static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
56
+{
57
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
58
+}
59
+
60
/*
61
* Feature tests for "does this exist in either 32-bit or 64-bit?"
62
*/
63
diff --git a/target/arm/internals.h b/target/arm/internals.h
28
index XXXXXXX..XXXXXXX 100644
64
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate.h
65
--- a/target/arm/internals.h
30
+++ b/target/arm/translate.h
66
+++ b/target/arm/internals.h
31
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
67
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
32
bool ss_same_el;
68
if (isar_feature_aa32_pan(id)) {
33
/* True if v8.3-PAuth is active. */
69
valid |= CPSR_PAN;
34
bool pauth_active;
70
}
35
+ /* True with v8.5-BTI and SCTLR_ELx.BT* set. */
71
+ if (isar_feature_aa32_dit(id)) {
36
+ bool bt;
72
+ valid |= CPSR_DIT;
37
+ /* A copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. */
73
+ }
38
+ uint8_t btype;
74
39
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
75
return valid;
40
int c15_cpar;
76
}
41
/* TCG op of the current insn_start. */
77
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
78
if (isar_feature_aa64_uao(id)) {
79
valid |= PSTATE_UAO;
80
}
81
+ if (isar_feature_aa64_dit(id)) {
82
+ valid |= PSTATE_DIT;
83
+ }
84
if (isar_feature_aa64_mte(id)) {
85
valid |= PSTATE_TCO;
86
}
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
87
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
index XXXXXXX..XXXXXXX 100644
88
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper.c
89
--- a/target/arm/helper.c
45
+++ b/target/arm/helper.c
90
+++ b/target/arm/helper.c
46
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
91
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo uao_reginfo = {
47
92
.readfn = aa64_uao_read, .writefn = aa64_uao_write
48
if (is_a64(env)) {
93
};
49
ARMCPU *cpu = arm_env_get_cpu(env);
94
50
+ uint64_t sctlr;
95
+static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
51
96
+{
52
*pc = env->pc;
97
+ return env->pstate & PSTATE_DIT;
53
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
98
+}
54
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
55
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
56
}
57
58
+ if (current_el == 0) {
59
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
60
+ sctlr = env->cp15.sctlr_el[1];
61
+ } else {
62
+ sctlr = env->cp15.sctlr_el[current_el];
63
+ }
64
if (cpu_isar_feature(aa64_pauth, cpu)) {
65
/*
66
* In order to save space in flags, we record only whether
67
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
68
* a nop, or "active" when some action must be performed.
69
* The decision of which action to take is left to a helper.
70
*/
71
- uint64_t sctlr;
72
- if (current_el == 0) {
73
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
74
- sctlr = env->cp15.sctlr_el[1];
75
- } else {
76
- sctlr = env->cp15.sctlr_el[current_el];
77
- }
78
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
79
flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
80
}
81
}
82
+
99
+
83
+ if (cpu_isar_feature(aa64_bti, cpu)) {
100
+static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
84
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
101
+ uint64_t value)
85
+ if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
102
+{
86
+ flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
103
+ env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
87
+ }
104
+}
88
+ flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
105
+
89
+ }
106
+static const ARMCPRegInfo dit_reginfo = {
90
} else {
107
+ .name = "DIT", .state = ARM_CP_STATE_AA64,
91
*pc = env->regs[15];
108
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
92
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
109
+ .type = ARM_CP_NO_RAW, .access = PL0_RW,
110
+ .readfn = aa64_dit_read, .writefn = aa64_dit_write
111
+};
112
+
113
static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
114
const ARMCPRegInfo *ri,
115
bool isread)
116
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
117
define_one_arm_cp_reg(cpu, &uao_reginfo);
118
}
119
120
+ if (cpu_isar_feature(aa64_dit, cpu)) {
121
+ define_one_arm_cp_reg(cpu, &dit_reginfo);
122
+ }
123
+
124
if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
125
define_arm_cp_regs(cpu, vhe_reginfo);
126
}
93
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
127
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
94
index XXXXXXX..XXXXXXX 100644
128
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-a64.c
129
--- a/target/arm/translate-a64.c
96
+++ b/target/arm/translate-a64.c
130
+++ b/target/arm/translate-a64.c
97
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
131
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
98
dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
132
tcg_temp_free_i32(t1);
99
dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
133
break;
100
dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
134
101
+ dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
135
+ case 0x1a: /* DIT */
102
+ dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
136
+ if (!dc_isar_feature(aa64_dit, s)) {
103
dc->vec_len = 0;
137
+ goto do_unallocated;
104
dc->vec_stride = 0;
138
+ }
105
dc->cp_regs = arm_cpu->cp_regs;
139
+ if (crm & 1) {
140
+ set_pstate_bits(PSTATE_DIT);
141
+ } else {
142
+ clear_pstate_bits(PSTATE_DIT);
143
+ }
144
+ /* There's no need to rebuild hflags because DIT is a nop */
145
+ break;
146
+
147
case 0x1e: /* DAIFSet */
148
t1 = tcg_const_i32(crm);
149
gen_helper_msr_i_daifset(cpu_env, t1);
106
--
150
--
107
2.20.1
151
2.20.1
108
152
109
153
diff view generated by jsdifflib
1
Factor out the "boot via firmware" code path from arm_load_kernel()
1
From: Rebecca Cran <rebecca@nuviainc.com>
2
into its own function.
3
2
4
This commit only moves code around; no semantic changes.
3
cpsr has been treated as being the same as spsr, but it isn't.
4
Since PSTATE_SS isn't in cpsr, remove it and move it into env->pstate.
5
5
6
This allows us to add support for CPSR_DIT, adding helper functions
7
to merge SPSR_ELx to and from CPSR.
8
9
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210208065700.19454-3-rebecca@nuviainc.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
9
Message-id: 20190131112240.8395-4-peter.maydell@linaro.org
10
---
13
---
11
hw/arm/boot.c | 92 +++++++++++++++++++++++++++------------------------
14
target/arm/helper-a64.c | 27 +++++++++++++++++++++++----
12
1 file changed, 49 insertions(+), 43 deletions(-)
15
target/arm/helper.c | 24 ++++++++++++++++++------
16
target/arm/op_helper.c | 9 +--------
17
3 files changed, 42 insertions(+), 18 deletions(-)
13
18
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
19
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/boot.c
21
--- a/target/arm/helper-a64.c
17
+++ b/hw/arm/boot.c
22
+++ b/target/arm/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
23
@@ -XXX,XX +XXX,XX @@ static int el_from_spsr(uint32_t spsr)
19
}
24
}
20
}
25
}
21
26
22
+static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info)
27
+static void cpsr_write_from_spsr_elx(CPUARMState *env,
28
+ uint32_t val)
23
+{
29
+{
24
+ /* Set up for booting firmware (which might load a kernel via fw_cfg) */
30
+ uint32_t mask;
25
+
31
+
26
+ if (have_dtb(info)) {
32
+ /* Save SPSR_ELx.SS into PSTATE. */
27
+ /*
33
+ env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
28
+ * If we have a device tree blob, but no kernel to supply it to (or
34
+ val &= ~PSTATE_SS;
29
+ * the kernel is supposed to be loaded by the bootloader), copy the
35
+
30
+ * DTB to the base of RAM for the bootloader to pick up.
36
+ /* Move DIT to the correct location for CPSR */
31
+ */
37
+ if (val & PSTATE_DIT) {
32
+ info->dtb_start = info->loader_start;
38
+ val &= ~PSTATE_DIT;
39
+ val |= CPSR_DIT;
33
+ }
40
+ }
34
+
41
+
35
+ if (info->kernel_filename) {
42
+ mask = aarch32_cpsr_valid_mask(env->features, \
36
+ FWCfgState *fw_cfg;
43
+ &env_archcpu(env)->isar);
37
+ bool try_decompressing_kernel;
44
+ cpsr_write(env, val, mask, CPSRWriteRaw);
38
+
39
+ fw_cfg = fw_cfg_find();
40
+ try_decompressing_kernel = arm_feature(&cpu->env,
41
+ ARM_FEATURE_AARCH64);
42
+
43
+ /*
44
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
45
+ * We don't process them here at all, it's all left to the
46
+ * firmware.
47
+ */
48
+ load_image_to_fw_cfg(fw_cfg,
49
+ FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
50
+ info->kernel_filename,
51
+ try_decompressing_kernel);
52
+ load_image_to_fw_cfg(fw_cfg,
53
+ FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
54
+ info->initrd_filename, false);
55
+
56
+ if (info->kernel_cmdline) {
57
+ fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
58
+ strlen(info->kernel_cmdline) + 1);
59
+ fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
60
+ info->kernel_cmdline);
61
+ }
62
+ }
63
+
64
+ /*
65
+ * We will start from address 0 (typically a boot ROM image) in the
66
+ * same way as hardware.
67
+ */
68
+}
45
+}
69
+
46
+
70
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
47
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
71
{
48
{
72
CPUState *cs;
49
int cur_el = arm_current_el(env);
73
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
50
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
74
51
- uint32_t mask, spsr = env->banked_spsr[spsr_idx];
75
/* Load the kernel. */
52
+ uint32_t spsr = env->banked_spsr[spsr_idx];
76
if (!info->kernel_filename || info->firmware_loaded) {
53
int new_el;
77
-
54
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
78
- if (have_dtb(info)) {
55
79
- /*
56
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
80
- * If we have a device tree blob, but no kernel to supply it to (or
57
* will sort the register banks out for us, and we've already
81
- * the kernel is supposed to be loaded by the bootloader), copy the
58
* caught all the bad-mode cases in el_from_spsr().
82
- * DTB to the base of RAM for the bootloader to pick up.
59
*/
83
- */
60
- mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
84
- info->dtb_start = info->loader_start;
61
- cpsr_write(env, spsr, mask, CPSRWriteRaw);
85
- }
62
+ cpsr_write_from_spsr_elx(env, spsr);
86
-
63
if (!arm_singlestep_active(env)) {
87
- if (info->kernel_filename) {
64
- env->uncached_cpsr &= ~PSTATE_SS;
88
- FWCfgState *fw_cfg;
65
+ env->pstate &= ~PSTATE_SS;
89
- bool try_decompressing_kernel;
66
}
90
-
67
aarch64_sync_64_to_32(env);
91
- fw_cfg = fw_cfg_find();
68
92
- try_decompressing_kernel = arm_feature(&cpu->env,
69
diff --git a/target/arm/helper.c b/target/arm/helper.c
93
- ARM_FEATURE_AARCH64);
70
index XXXXXXX..XXXXXXX 100644
94
-
71
--- a/target/arm/helper.c
95
- /*
72
+++ b/target/arm/helper.c
96
- * Expose the kernel, the command line, and the initrd in fw_cfg.
73
@@ -XXX,XX +XXX,XX @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
97
- * We don't process them here at all, it's all left to the
74
* For exceptions taken to AArch32 we must clear the SS bit in both
98
- * firmware.
75
* PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
99
- */
76
*/
100
- load_image_to_fw_cfg(fw_cfg,
77
- env->uncached_cpsr &= ~PSTATE_SS;
101
- FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
78
+ env->pstate &= ~PSTATE_SS;
102
- info->kernel_filename,
79
env->spsr = cpsr_read(env);
103
- try_decompressing_kernel);
80
/* Clear IT bits. */
104
- load_image_to_fw_cfg(fw_cfg,
81
env->condexec_bits = 0;
105
- FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
82
@@ -XXX,XX +XXX,XX @@ static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
106
- info->initrd_filename, false);
83
}
107
-
84
}
108
- if (info->kernel_cmdline) {
85
109
- fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
86
+static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
110
- strlen(info->kernel_cmdline) + 1);
87
+{
111
- fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
88
+ uint32_t ret = cpsr_read(env);
112
- info->kernel_cmdline);
89
+
113
- }
90
+ /* Move DIT to the correct location for SPSR_ELx */
114
- }
91
+ if (ret & CPSR_DIT) {
115
-
92
+ ret &= ~CPSR_DIT;
116
- /*
93
+ ret |= PSTATE_DIT;
117
- * We will start from address 0 (typically a boot ROM image) in the
94
+ }
118
- * same way as hardware.
95
+ /* Merge PSTATE.SS into SPSR_ELx */
119
- */
96
+ ret |= env->pstate & PSTATE_SS;
120
+ arm_setup_firmware_boot(cpu, info);
97
+
121
return;
98
+ return ret;
99
+}
100
+
101
/* Handle exception entry to a target EL which is using AArch64 */
102
static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
103
{
104
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
105
aarch64_save_sp(env, arm_current_el(env));
106
env->elr_el[new_el] = env->pc;
122
} else {
107
} else {
123
arm_setup_direct_kernel_boot(cpu, info);
108
- old_mode = cpsr_read(env);
109
+ old_mode = cpsr_read_for_spsr_elx(env);
110
env->elr_el[new_el] = env->regs[15];
111
112
aarch64_sync_32_to_64(env);
113
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
114
target_ulong *cs_base, uint32_t *pflags)
115
{
116
uint32_t flags = env->hflags;
117
- uint32_t pstate_for_ss;
118
119
*cs_base = 0;
120
assert_hflags_rebuild_correctly(env);
121
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
122
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
123
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
124
}
125
- pstate_for_ss = env->pstate;
126
} else {
127
*pc = env->regs[15];
128
129
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
130
131
flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
132
flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
133
- pstate_for_ss = env->uncached_cpsr;
134
}
135
136
/*
137
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
138
* SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
139
*/
140
if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
141
- (pstate_for_ss & PSTATE_SS)) {
142
+ (env->pstate & PSTATE_SS)) {
143
flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
144
}
145
146
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/arm/op_helper.c
149
+++ b/target/arm/op_helper.c
150
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
151
152
uint32_t HELPER(cpsr_read)(CPUARMState *env)
153
{
154
- /*
155
- * We store the ARMv8 PSTATE.SS bit in env->uncached_cpsr.
156
- * This is convenient for populating SPSR_ELx, but must be
157
- * hidden from aarch32 mode, where it is not visible.
158
- *
159
- * TODO: ARMv8.4-DIT -- need to move SS somewhere else.
160
- */
161
- return cpsr_read(env) & ~(CPSR_EXEC | PSTATE_SS);
162
+ return cpsr_read(env) & ~CPSR_EXEC;
163
}
164
165
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
124
--
166
--
125
2.20.1
167
2.20.1
126
168
127
169
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Rebecca Cran <rebecca@nuviainc.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Enable FEAT_DIT for the "max" AARCH64 CPU.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20190128223118.5255-11-richard.henderson@linaro.org
5
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210208065700.19454-4-rebecca@nuviainc.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
9
---
8
target/arm/cpu64.c | 4 ++++
10
target/arm/cpu64.c | 5 +++++
9
1 file changed, 4 insertions(+)
11
1 file changed, 5 insertions(+)
10
12
11
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
13
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/cpu64.c
15
--- a/target/arm/cpu64.c
14
+++ b/target/arm/cpu64.c
16
+++ b/target/arm/cpu64.c
15
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
17
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
18
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
16
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
19
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
20
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
21
+ t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
17
cpu->isar.id_aa64pfr0 = t;
22
cpu->isar.id_aa64pfr0 = t;
18
23
19
+ t = cpu->isar.id_aa64pfr1;
24
t = cpu->isar.id_aa64pfr1;
20
+ t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
21
+ cpu->isar.id_aa64pfr1 = t;
26
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
27
cpu->isar.id_isar6 = u;
28
29
+ u = cpu->isar.id_pfr0;
30
+ u = FIELD_DP32(u, ID_PFR0, DIT, 1);
31
+ cpu->isar.id_pfr0 = u;
22
+
32
+
23
t = cpu->isar.id_aa64mmfr1;
33
u = cpu->isar.id_mmfr3;
24
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
34
u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
25
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
35
cpu->isar.id_mmfr3 = u;
26
--
36
--
27
2.20.1
37
2.20.1
28
38
29
39
diff view generated by jsdifflib
1
From: Max Filippov <jcmvbkbc@gmail.com>
1
From: Rebecca Cran <rebecca@nuviainc.com>
2
2
3
With multiprocess extensions gdb uses 'vKill' packet instead of 'k' to
3
Enable FEAT_DIT for the "max" 32-bit CPU.
4
kill the inferior. Handle 'vKill' the same way 'k' was handled in the
5
presence of single process.
6
4
7
Fixes: 7cf48f6752e5 ("gdbstub: add multiprocess support to
5
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
8
(f|s)ThreadInfo and ThreadExtraInfo")
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
7
Message-id: 20210208065700.19454-5-rebecca@nuviainc.com
10
Cc: Luc Michel <luc.michel@greensocs.com>
11
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
12
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
13
Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com>
14
Tested-by: KONRAD Frederic <frederic.konrad@adacore.com>
15
Message-id: 20190130192403.13754-1-jcmvbkbc@gmail.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
9
---
18
gdbstub.c | 4 ++++
10
target/arm/cpu.c | 4 ++++
19
1 file changed, 4 insertions(+)
11
1 file changed, 4 insertions(+)
20
12
21
diff --git a/gdbstub.c b/gdbstub.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/gdbstub.c
15
--- a/target/arm/cpu.c
24
+++ b/gdbstub.c
16
+++ b/target/arm/cpu.c
25
@@ -XXX,XX +XXX,XX @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
17
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
26
18
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
27
put_packet(s, buf);
19
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
28
break;
20
cpu->isar.id_mmfr4 = t;
29
+ } else if (strncmp(p, "Kill;", 5) == 0) {
21
+
30
+ /* Kill the target */
22
+ t = cpu->isar.id_pfr0;
31
+ error_report("QEMU: Terminated via GDBstub");
23
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
32
+ exit(0);
24
+ cpu->isar.id_pfr0 = t;
33
} else {
25
}
34
goto unknown_command;
26
#endif
35
}
27
}
36
--
28
--
37
2.20.1
29
2.20.1
38
30
39
31
diff view generated by jsdifflib
1
The code path for booting firmware doesn't set env->boot_info. At
1
Update infocenter.arm.com URLs for various pieces of Arm
2
first sight this looks odd, so add a comment saying why we don't.
2
documentation to the new developer.arm.com equivalents. (There is a
3
redirection in place from the old URLs, but we might as well update
4
our comments in case the redirect ever disappears in future.)
5
6
This patch covers all the URLs which are not MPS2/SSE-200/IoTKit
7
related (those are dealt with in a different patch).
3
8
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
11
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Message-id: 20190131112240.8395-5-peter.maydell@linaro.org
12
Message-id: 20210205171456.19939-1-peter.maydell@linaro.org
8
---
13
---
9
hw/arm/boot.c | 3 ++-
14
include/hw/dma/pl080.h | 7 ++++---
10
1 file changed, 2 insertions(+), 1 deletion(-)
15
include/hw/misc/arm_integrator_debug.h | 2 +-
16
include/hw/ssi/pl022.h | 5 +++--
17
hw/arm/aspeed_ast2600.c | 2 +-
18
hw/arm/musca.c | 4 ++--
19
hw/misc/arm_integrator_debug.c | 2 +-
20
hw/timer/arm_timer.c | 7 ++++---
21
7 files changed, 16 insertions(+), 13 deletions(-)
11
22
12
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
23
diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h
13
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/arm/boot.c
25
--- a/include/hw/dma/pl080.h
15
+++ b/hw/arm/boot.c
26
+++ b/include/hw/dma/pl080.h
16
@@ -XXX,XX +XXX,XX @@ static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info)
27
@@ -XXX,XX +XXX,XX @@
17
28
* (at your option) any later version.
18
/*
29
*/
19
* We will start from address 0 (typically a boot ROM image) in the
30
20
- * same way as hardware.
31
-/* This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
21
+ * same way as hardware. Leave env->boot_info NULL, so that
32
+/*
22
+ * do_cpu_reset() knows it does not need to alter the PC on reset.
33
+ * This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
23
*/
34
* The PL080 TRM is:
35
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0196g/DDI0196.pdf
36
+ * https://developer.arm.com/documentation/ddi0196/latest
37
* and the PL081 TRM is:
38
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0218e/DDI0218.pdf
39
+ * https://developer.arm.com/documentation/ddi0218/latest
40
*
41
* QEMU interface:
42
* + sysbus IRQ 0: DMACINTR combined interrupt line
43
diff --git a/include/hw/misc/arm_integrator_debug.h b/include/hw/misc/arm_integrator_debug.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/include/hw/misc/arm_integrator_debug.h
46
+++ b/include/hw/misc/arm_integrator_debug.h
47
@@ -XXX,XX +XXX,XX @@
48
*
49
* Browse the data sheet:
50
*
51
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0159b/Babbfijf.html
52
+ * https://developer.arm.com/documentation/dui0159/b/peripherals-and-interfaces/debug-leds-and-dip-switch-interface
53
*
54
* Copyright (c) 2013 Alex Bennée <alex@bennee.com>
55
*
56
diff --git a/include/hw/ssi/pl022.h b/include/hw/ssi/pl022.h
57
index XXXXXXX..XXXXXXX 100644
58
--- a/include/hw/ssi/pl022.h
59
+++ b/include/hw/ssi/pl022.h
60
@@ -XXX,XX +XXX,XX @@
61
* (at your option) any later version.
62
*/
63
64
-/* This is a model of the Arm PrimeCell PL022 synchronous serial port.
65
+/*
66
+ * This is a model of the Arm PrimeCell PL022 synchronous serial port.
67
* The PL022 TRM is:
68
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0194h/DDI0194H_ssp_pl022_trm.pdf
69
+ * https://developer.arm.com/documentation/ddi0194/latest
70
*
71
* QEMU interface:
72
* + sysbus IRQ: SSPINTR combined interrupt line
73
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/hw/arm/aspeed_ast2600.c
76
+++ b/hw/arm/aspeed_ast2600.c
77
@@ -XXX,XX +XXX,XX @@ static void aspeed_soc_ast2600_init(Object *obj)
78
/*
79
* ASPEED ast2600 has 0xf as cluster ID
80
*
81
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0388e/CIHEBGFG.html
82
+ * https://developer.arm.com/documentation/ddi0388/e/the-system-control-coprocessors/summary-of-system-control-coprocessor-registers/multiprocessor-affinity-register
83
*/
84
static uint64_t aspeed_calc_affinity(int cpu)
85
{
86
diff --git a/hw/arm/musca.c b/hw/arm/musca.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/hw/arm/musca.c
89
+++ b/hw/arm/musca.c
90
@@ -XXX,XX +XXX,XX @@
91
* https://developer.arm.com/products/system-design/development-boards/iot-test-chips-and-boards/musca-a-test-chip-board
92
* https://developer.arm.com/products/system-design/development-boards/iot-test-chips-and-boards/musca-b-test-chip-board
93
* We model the A and B1 variants of this board, as described in the TRMs:
94
- * http://infocenter.arm.com/help/topic/com.arm.doc.101107_0000_00_en/index.html
95
- * http://infocenter.arm.com/help/topic/com.arm.doc.101312_0000_00_en/index.html
96
+ * https://developer.arm.com/documentation/101107/latest/
97
+ * https://developer.arm.com/documentation/101312/latest/
98
*/
99
100
#include "qemu/osdep.h"
101
diff --git a/hw/misc/arm_integrator_debug.c b/hw/misc/arm_integrator_debug.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/hw/misc/arm_integrator_debug.c
104
+++ b/hw/misc/arm_integrator_debug.c
105
@@ -XXX,XX +XXX,XX @@
106
* to this area.
107
*
108
* The real h/w is described at:
109
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0159b/Babbfijf.html
110
+ * https://developer.arm.com/documentation/dui0159/b/peripherals-and-interfaces/debug-leds-and-dip-switch-interface
111
*
112
* Copyright (c) 2013 Alex Bennée <alex@bennee.com>
113
*
114
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/hw/timer/arm_timer.c
117
+++ b/hw/timer/arm_timer.c
118
@@ -XXX,XX +XXX,XX @@ static arm_timer_state *arm_timer_init(uint32_t freq)
119
return s;
24
}
120
}
25
121
122
-/* ARM PrimeCell SP804 dual timer module.
123
+/*
124
+ * ARM PrimeCell SP804 dual timer module.
125
* Docs at
126
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0271d/index.html
127
-*/
128
+ * https://developer.arm.com/documentation/ddi0271/latest/
129
+ */
130
131
#define TYPE_SP804 "sp804"
132
OBJECT_DECLARE_SIMPLE_TYPE(SP804State, SP804)
26
--
133
--
27
2.20.1
134
2.20.1
28
135
29
136
diff view generated by jsdifflib
1
The arm_boot_info struct has a skip_dtb_autoload flag: if this is
1
In cpu_exec() we have a longstanding workaround for compilers which
2
set to true by the board code then arm_load_kernel() will not
2
do not correctly implement the part of the sigsetjmp()/siglongjmp()
3
load the DTB itself, but will leave this for the board code to
3
spec which requires that local variables which are not changed
4
do itself later. However, the check for this is done in a
4
between the setjmp and the longjmp retain their value.
5
code path which is only executed for the case where we load
6
a kernel image file. If we're taking the "boot via firmware"
7
code path then the flag isn't honoured and the DTB is never
8
loaded.
9
5
10
We didn't notice this because the only real user of "boot
6
I recently ran across the upstream clang bug report for this; add a
11
via firmware" that cares about the DTB is the virt board
7
link to it to the comment describing the workaround, and generally
12
(for UEFI boot), and that always wants skip_dtb_autoload
8
expand the comment, so that we have a reasonable chance in future of
13
anyway. But the SBSA reference board model we're planning to
9
understanding why it's there and determining when we can remove it,
14
add will want the flag to behave correctly.
10
assuming clang eventually fixes the bug.
15
11
16
Now we've refactored the arm_load_kernel() function, the
12
Remove the /* buggy compiler */ comments on the #else and #endif:
17
fix is simple: drop the early 'return' so we fall into
13
they don't add anything to understanding and are somewhat misleading
18
the same "load the DTB" code the boot-direct-kernel path uses.
14
since they're sandwiching the code path for *non*-buggy compilers.
19
15
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
22
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
18
Message-id: 20210129130330.30820-1-peter.maydell@linaro.org
23
Message-id: 20190131112240.8395-6-peter.maydell@linaro.org
24
---
19
---
25
hw/arm/boot.c | 1 -
20
accel/tcg/cpu-exec.c | 25 +++++++++++++++++++------
26
1 file changed, 1 deletion(-)
21
1 file changed, 19 insertions(+), 6 deletions(-)
27
22
28
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
23
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
29
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/boot.c
25
--- a/accel/tcg/cpu-exec.c
31
+++ b/hw/arm/boot.c
26
+++ b/accel/tcg/cpu-exec.c
32
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
27
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
33
/* Load the kernel. */
28
/* prepare setjmp context for exception handling */
34
if (!info->kernel_filename || info->firmware_loaded) {
29
if (sigsetjmp(cpu->jmp_env, 0) != 0) {
35
arm_setup_firmware_boot(cpu, info);
30
#if defined(__clang__)
36
- return;
31
- /* Some compilers wrongly smash all local variables after
37
} else {
32
- * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
38
arm_setup_direct_kernel_boot(cpu, info);
33
+ /*
39
}
34
+ * Some compilers wrongly smash all local variables after
35
+ * siglongjmp (the spec requires that only non-volatile locals
36
+ * which are changed between the sigsetjmp and siglongjmp are
37
+ * permitted to be trashed). There were bug reports for gcc
38
+ * 4.5.0 and clang. The bug is fixed in all versions of gcc
39
+ * that we support, but is still unfixed in clang:
40
+ * https://bugs.llvm.org/show_bug.cgi?id=21183
41
+ *
42
* Reload essential local variables here for those compilers.
43
- * Newer versions of gcc would complain about this code (-Wclobbered). */
44
+ * Newer versions of gcc would complain about this code (-Wclobbered),
45
+ * so we only perform the workaround for clang.
46
+ */
47
cpu = current_cpu;
48
cc = CPU_GET_CLASS(cpu);
49
-#else /* buggy compiler */
50
- /* Assert that the compiler does not smash local variables. */
51
+#else
52
+ /*
53
+ * Non-buggy compilers preserve these locals; assert that
54
+ * they have the correct value.
55
+ */
56
g_assert(cpu == current_cpu);
57
g_assert(cc == CPU_GET_CLASS(cpu));
58
-#endif /* buggy compiler */
59
+#endif
60
+
61
#ifndef CONFIG_SOFTMMU
62
tcg_debug_assert(!have_mmap_lock());
63
#endif
40
--
64
--
41
2.20.1
65
2.20.1
42
66
43
67
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Also create field definitions for id_aa64pfr1 from ARMv8.5.
3
This data can be allocated by page_alloc_target_data() and
4
released by page_set_flags(start, end, prot | PAGE_RESET).
5
6
This data will be used to hold tag memory for AArch64 MTE.
4
7
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190128223118.5255-2-richard.henderson@linaro.org
10
Message-id: 20210210000223.884088-2-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
12
---
10
target/arm/cpu.h | 10 ++++++++++
13
include/exec/cpu-all.h | 42 +++++++++++++++++++++++++++++++++------
11
1 file changed, 10 insertions(+)
14
accel/tcg/translate-all.c | 28 ++++++++++++++++++++++++++
15
linux-user/mmap.c | 4 +++-
16
linux-user/syscall.c | 4 ++--
17
4 files changed, 69 insertions(+), 9 deletions(-)
12
18
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
21
--- a/include/exec/cpu-all.h
16
+++ b/target/arm/cpu.h
22
+++ b/include/exec/cpu-all.h
17
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64PFR0, GIC, 24, 4)
23
@@ -XXX,XX +XXX,XX @@ extern intptr_t qemu_host_page_mask;
18
FIELD(ID_AA64PFR0, RAS, 28, 4)
24
#define PAGE_EXEC 0x0004
19
FIELD(ID_AA64PFR0, SVE, 32, 4)
25
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
20
26
#define PAGE_VALID 0x0008
21
+FIELD(ID_AA64PFR1, BT, 0, 4)
27
-/* original state of the write flag (used when tracking self-modifying
22
+FIELD(ID_AA64PFR1, SBSS, 4, 4)
28
- code */
23
+FIELD(ID_AA64PFR1, MTE, 8, 4)
29
+/*
24
+FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
30
+ * Original state of the write flag (used when tracking self-modifying code)
31
+ */
32
#define PAGE_WRITE_ORG 0x0010
33
-/* Invalidate the TLB entry immediately, helpful for s390x
34
- * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
35
-#define PAGE_WRITE_INV 0x0040
36
+/*
37
+ * Invalidate the TLB entry immediately, helpful for s390x
38
+ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
39
+ */
40
+#define PAGE_WRITE_INV 0x0020
41
+/* For use with page_set_flags: page is being replaced; target_data cleared. */
42
+#define PAGE_RESET 0x0040
25
+
43
+
26
FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
44
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27
FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
45
/* FIXME: Code that sets/uses this is broken and needs to go away. */
28
FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
46
-#define PAGE_RESERVED 0x0020
29
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
47
+#define PAGE_RESERVED 0x0100
30
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
48
#endif
49
/* Target-specific bits that will be used via page_get_flags(). */
50
#define PAGE_TARGET_1 0x0080
51
@@ -XXX,XX +XXX,XX @@ int walk_memory_regions(void *, walk_memory_regions_fn);
52
int page_get_flags(target_ulong address);
53
void page_set_flags(target_ulong start, target_ulong end, int flags);
54
int page_check_range(target_ulong start, target_ulong len, int flags);
55
+
56
+/**
57
+ * page_alloc_target_data(address, size)
58
+ * @address: guest virtual address
59
+ * @size: size of data to allocate
60
+ *
61
+ * Allocate @size bytes of out-of-band data to associate with the
62
+ * guest page at @address. If the page is not mapped, NULL will
63
+ * be returned. If there is existing data associated with @address,
64
+ * no new memory will be allocated.
65
+ *
66
+ * The memory will be freed when the guest page is deallocated,
67
+ * e.g. with the munmap system call.
68
+ */
69
+void *page_alloc_target_data(target_ulong address, size_t size);
70
+
71
+/**
72
+ * page_get_target_data(address)
73
+ * @address: guest virtual address
74
+ *
75
+ * Return any out-of-bound memory assocated with the guest page
76
+ * at @address, as per page_alloc_target_data.
77
+ */
78
+void *page_get_target_data(target_ulong address);
79
#endif
80
81
CPUArchState *cpu_copy(CPUArchState *env);
82
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/accel/tcg/translate-all.c
85
+++ b/accel/tcg/translate-all.c
86
@@ -XXX,XX +XXX,XX @@ typedef struct PageDesc {
87
unsigned int code_write_count;
88
#else
89
unsigned long flags;
90
+ void *target_data;
91
#endif
92
#ifndef CONFIG_USER_ONLY
93
QemuSpin lock;
94
@@ -XXX,XX +XXX,XX @@ int page_get_flags(target_ulong address)
95
void page_set_flags(target_ulong start, target_ulong end, int flags)
96
{
97
target_ulong addr, len;
98
+ bool reset_target_data;
99
100
/* This function should never be called with addresses outside the
101
guest address space. If this assert fires, it probably indicates
102
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
103
if (flags & PAGE_WRITE) {
104
flags |= PAGE_WRITE_ORG;
105
}
106
+ reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
107
+ flags &= ~PAGE_RESET;
108
109
for (addr = start, len = end - start;
110
len != 0;
111
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
112
p->first_tb) {
113
tb_invalidate_phys_page(addr, 0);
114
}
115
+ if (reset_target_data && p->target_data) {
116
+ g_free(p->target_data);
117
+ p->target_data = NULL;
118
+ }
119
p->flags = flags;
120
}
31
}
121
}
32
122
33
+static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
123
+void *page_get_target_data(target_ulong address)
34
+{
124
+{
35
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
125
+ PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
126
+ return p ? p->target_data : NULL;
36
+}
127
+}
37
+
128
+
38
/*
129
+void *page_alloc_target_data(target_ulong address, size_t size)
39
* Forward to the above feature tests given an ARMCPU pointer.
130
+{
40
*/
131
+ PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
132
+ void *ret = NULL;
133
+
134
+ if (p->flags & PAGE_VALID) {
135
+ ret = p->target_data;
136
+ if (!ret) {
137
+ p->target_data = ret = g_malloc0(size);
138
+ }
139
+ }
140
+ return ret;
141
+}
142
+
143
int page_check_range(target_ulong start, target_ulong len, int flags)
144
{
145
PageDesc *p;
146
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/linux-user/mmap.c
149
+++ b/linux-user/mmap.c
150
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
151
}
152
}
153
the_end1:
154
+ page_flags |= PAGE_RESET;
155
page_set_flags(start, start + len, page_flags);
156
the_end:
157
trace_target_mmap_complete(start);
158
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
159
new_addr = h2g(host_addr);
160
prot = page_get_flags(old_addr);
161
page_set_flags(old_addr, old_addr + old_size, 0);
162
- page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
163
+ page_set_flags(new_addr, new_addr + new_size,
164
+ prot | PAGE_VALID | PAGE_RESET);
165
}
166
tb_invalidate_phys_range(new_addr, new_addr + new_size);
167
mmap_unlock();
168
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
169
index XXXXXXX..XXXXXXX 100644
170
--- a/linux-user/syscall.c
171
+++ b/linux-user/syscall.c
172
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
173
raddr=h2g((unsigned long)host_raddr);
174
175
page_set_flags(raddr, raddr + shm_info.shm_segsz,
176
- PAGE_VALID | PAGE_READ |
177
- ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
178
+ PAGE_VALID | PAGE_RESET | PAGE_READ |
179
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
180
181
for (i = 0; i < N_SHM_REGIONS; i++) {
182
if (!shm_regions[i].in_use) {
41
--
183
--
42
2.20.1
184
2.20.1
43
185
44
186
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Record whether the backing page is anonymous, or if it has file
4
backing. This will allow us to get close to the Linux AArch64
5
ABI for MTE, which allows tag memory only on ram-backed VMAs.
6
7
The real ABI allows tag memory on files, when those files are
8
on ram-backed filesystems, such as tmpfs. We will not be able
9
to implement that in QEMU linux-user.
10
11
Thankfully, anonymous memory for malloc arenas is the primary
12
consumer of this feature, so this restricted version should
13
still be of use.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210210000223.884088-3-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
include/exec/cpu-all.h | 2 ++
21
linux-user/mmap.c | 3 +++
22
2 files changed, 5 insertions(+)
23
24
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/cpu-all.h
27
+++ b/include/exec/cpu-all.h
28
@@ -XXX,XX +XXX,XX @@ extern intptr_t qemu_host_page_mask;
29
#define PAGE_WRITE_INV 0x0020
30
/* For use with page_set_flags: page is being replaced; target_data cleared. */
31
#define PAGE_RESET 0x0040
32
+/* For linux-user, indicates that the page is MAP_ANON. */
33
+#define PAGE_ANON 0x0080
34
35
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
36
/* FIXME: Code that sets/uses this is broken and needs to go away. */
37
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/linux-user/mmap.c
40
+++ b/linux-user/mmap.c
41
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
42
}
43
}
44
the_end1:
45
+ if (flags & MAP_ANONYMOUS) {
46
+ page_flags |= PAGE_ANON;
47
+ }
48
page_flags |= PAGE_RESET;
49
page_set_flags(start, start + len, page_flags);
50
the_end:
51
--
52
2.20.1
53
54
diff view generated by jsdifflib
1
Fix the block comment style in arm_load_kernel() to QEMU's
1
From: Richard Henderson <richard.henderson@linaro.org>
2
current style preferences. This will allow us to do some
3
refactoring of this function without checkpatch complaining
4
about the code-motion patches.
5
2
3
This is more descriptive than 'unsigned long'.
4
No functional change, since these match on all linux+bsd hosts.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-4-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
9
Message-id: 20190131112240.8395-2-peter.maydell@linaro.org
10
---
10
---
11
hw/arm/boot.c | 30 ++++++++++++++++++++----------
11
include/exec/cpu-all.h | 2 +-
12
1 file changed, 20 insertions(+), 10 deletions(-)
12
bsd-user/main.c | 4 ++--
13
linux-user/elfload.c | 4 ++--
14
linux-user/main.c | 4 ++--
15
4 files changed, 7 insertions(+), 7 deletions(-)
13
16
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
17
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/boot.c
19
--- a/include/exec/cpu-all.h
17
+++ b/hw/arm/boot.c
20
+++ b/include/exec/cpu-all.h
18
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
21
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
19
static const ARMInsnFixup *primary_loader;
22
/* On some host systems the guest address space is reserved on the host.
20
AddressSpace *as = arm_boot_address_space(cpu, info);
23
* This allows the guest address space to be offset to a convenient location.
21
24
*/
22
- /* CPU objects (unlike devices) are not automatically reset on system
25
-extern unsigned long guest_base;
23
+ /*
26
+extern uintptr_t guest_base;
24
+ * CPU objects (unlike devices) are not automatically reset on system
27
extern bool have_guest_base;
25
* reset, so we must always register a handler to do so. If we're
28
extern unsigned long reserved_va;
26
* actually loading a kernel, the handler is also responsible for
29
27
* arranging that we start it correctly.
30
diff --git a/bsd-user/main.c b/bsd-user/main.c
28
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
31
index XXXXXXX..XXXXXXX 100644
29
qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
32
--- a/bsd-user/main.c
33
+++ b/bsd-user/main.c
34
@@ -XXX,XX +XXX,XX @@
35
36
int singlestep;
37
unsigned long mmap_min_addr;
38
-unsigned long guest_base;
39
+uintptr_t guest_base;
40
bool have_guest_base;
41
unsigned long reserved_va;
42
43
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
44
g_free(target_environ);
45
46
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
47
- qemu_log("guest_base 0x%lx\n", guest_base);
48
+ qemu_log("guest_base %p\n", (void *)guest_base);
49
log_page_dump("binary load");
50
51
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
52
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/linux-user/elfload.c
55
+++ b/linux-user/elfload.c
56
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
57
void *addr, *test;
58
59
if (!QEMU_IS_ALIGNED(guest_base, align)) {
60
- fprintf(stderr, "Requested guest base 0x%lx does not satisfy "
61
+ fprintf(stderr, "Requested guest base %p does not satisfy "
62
"host minimum alignment (0x%lx)\n",
63
- guest_base, align);
64
+ (void *)guest_base, align);
65
exit(EXIT_FAILURE);
30
}
66
}
31
67
32
- /* The board code is not supposed to set secure_board_setup unless
68
diff --git a/linux-user/main.c b/linux-user/main.c
33
+ /*
69
index XXXXXXX..XXXXXXX 100644
34
+ * The board code is not supposed to set secure_board_setup unless
70
--- a/linux-user/main.c
35
* running its code in secure mode is actually possible, and KVM
71
+++ b/linux-user/main.c
36
* doesn't support secure.
72
@@ -XXX,XX +XXX,XX @@ static const char *cpu_model;
37
*/
73
static const char *cpu_type;
38
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
74
static const char *seed_optarg;
39
if (!info->kernel_filename || info->firmware_loaded) {
75
unsigned long mmap_min_addr;
40
76
-unsigned long guest_base;
41
if (have_dtb(info)) {
77
+uintptr_t guest_base;
42
- /* If we have a device tree blob, but no kernel to supply it to (or
78
bool have_guest_base;
43
+ /*
79
44
+ * If we have a device tree blob, but no kernel to supply it to (or
80
/*
45
* the kernel is supposed to be loaded by the bootloader), copy the
81
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
46
* DTB to the base of RAM for the bootloader to pick up.
82
g_free(target_environ);
47
*/
83
48
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
84
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
49
try_decompressing_kernel = arm_feature(&cpu->env,
85
- qemu_log("guest_base 0x%lx\n", guest_base);
50
ARM_FEATURE_AARCH64);
86
+ qemu_log("guest_base %p\n", (void *)guest_base);
51
87
log_page_dump("binary load");
52
- /* Expose the kernel, the command line, and the initrd in fw_cfg.
88
53
+ /*
89
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
54
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
55
* We don't process them here at all, it's all left to the
56
* firmware.
57
*/
58
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
59
}
60
}
61
62
- /* We will start from address 0 (typically a boot ROM image) in the
63
+ /*
64
+ * We will start from address 0 (typically a boot ROM image) in the
65
* same way as hardware.
66
*/
67
return;
68
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
69
if (info->nb_cpus == 0)
70
info->nb_cpus = 1;
71
72
- /* We want to put the initrd far enough into RAM that when the
73
+ /*
74
+ * We want to put the initrd far enough into RAM that when the
75
* kernel is uncompressed it will not clobber the initrd. However
76
* on boards without much RAM we must ensure that we still leave
77
* enough room for a decent sized initrd, and on boards with large
78
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
79
kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr,
80
&elf_high_addr, elf_machine, as);
81
if (kernel_size > 0 && have_dtb(info)) {
82
- /* If there is still some room left at the base of RAM, try and put
83
+ /*
84
+ * If there is still some room left at the base of RAM, try and put
85
* the DTB there like we do for images loaded with -bios or -pflash.
86
*/
87
if (elf_low_addr > info->loader_start
88
|| elf_high_addr < info->loader_start) {
89
- /* Set elf_low_addr as address limit for arm_load_dtb if it may be
90
+ /*
91
+ * Set elf_low_addr as address limit for arm_load_dtb if it may be
92
* pointing into RAM, otherwise pass '0' (no limit)
93
*/
94
if (elf_low_addr < info->loader_start) {
95
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
96
fixupcontext[FIXUP_BOARDID] = info->board_id;
97
fixupcontext[FIXUP_BOARD_SETUP] = info->board_setup_addr;
98
99
- /* for device tree boot, we pass the DTB directly in r2. Otherwise
100
+ /*
101
+ * for device tree boot, we pass the DTB directly in r2. Otherwise
102
* we point to the kernel args.
103
*/
104
if (have_dtb(info)) {
105
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
106
info->write_board_setup(cpu, info);
107
}
108
109
- /* Notify devices which need to fake up firmware initialization
110
+ /*
111
+ * Notify devices which need to fake up firmware initialization
112
* that we're doing a direct kernel boot.
113
*/
114
object_child_foreach_recursive(object_get_root(),
115
--
90
--
116
2.20.1
91
2.20.1
117
92
118
93
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is more descriptive than 'unsigned long'.
4
No functional change, since these match on all linux+bsd hosts.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-5-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
include/exec/cpu_ldst.h | 6 +++---
12
1 file changed, 3 insertions(+), 3 deletions(-)
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@ typedef uint64_t abi_ptr;
19
#endif
20
21
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
22
-#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
23
+#define g2h(x) ((void *)((uintptr_t)(abi_ptr)(x) + guest_base))
24
25
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
26
#define guest_addr_valid(x) (1)
27
#else
28
#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
29
#endif
30
-#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
31
+#define h2g_valid(x) guest_addr_valid((uintptr_t)(x) - guest_base)
32
33
static inline int guest_range_valid(unsigned long start, unsigned long len)
34
{
35
@@ -XXX,XX +XXX,XX @@ static inline int guest_range_valid(unsigned long start, unsigned long len)
36
}
37
38
#define h2g_nocheck(x) ({ \
39
- unsigned long __ret = (unsigned long)(x) - guest_base; \
40
+ uintptr_t __ret = (uintptr_t)(x) - guest_base; \
41
(abi_ptr)__ret; \
42
})
43
44
--
45
2.20.1
46
47
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Return bool not int; pass abi_ulong not 'unsigned long'.
4
All callers use abi_ulong already, so the change in type
5
has no effect.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210210000223.884088-6-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
include/exec/cpu_ldst.h | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu_ldst.h
18
+++ b/include/exec/cpu_ldst.h
19
@@ -XXX,XX +XXX,XX @@ typedef uint64_t abi_ptr;
20
#endif
21
#define h2g_valid(x) guest_addr_valid((uintptr_t)(x) - guest_base)
22
23
-static inline int guest_range_valid(unsigned long start, unsigned long len)
24
+static inline bool guest_range_valid(abi_ulong start, abi_ulong len)
25
{
26
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
27
}
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Verify that addr + size - 1 does not wrap around.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210210000223.884088-7-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/qemu.h | 17 ++++++++++++-----
11
1 file changed, 12 insertions(+), 5 deletions(-)
12
13
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/qemu.h
16
+++ b/linux-user/qemu.h
17
@@ -XXX,XX +XXX,XX @@ extern unsigned long guest_stack_size;
18
#define VERIFY_READ 0
19
#define VERIFY_WRITE 1 /* implies read access */
20
21
-static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
22
+static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
23
{
24
- return guest_addr_valid(addr) &&
25
- (size == 0 || guest_addr_valid(addr + size - 1)) &&
26
- page_check_range((target_ulong)addr, size,
27
- (type == VERIFY_READ) ? PAGE_READ : (PAGE_READ | PAGE_WRITE)) == 0;
28
+ if (!guest_addr_valid(addr)) {
29
+ return false;
30
+ }
31
+ if (size != 0 &&
32
+ (addr + size - 1 < addr ||
33
+ !guest_addr_valid(addr + size - 1))) {
34
+ return false;
35
+ }
36
+ return page_check_range((target_ulong)addr, size,
37
+ (type == VERIFY_READ) ? PAGE_READ :
38
+ (PAGE_READ | PAGE_WRITE)) == 0;
39
}
40
41
/* NOTE __get_user and __put_user use host pointers and don't check access.
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These constants are only ever used with access_ok, and friends.
4
Rather than translating them to PAGE_* bits, let them equal
5
the PAGE_* bits to begin.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210210000223.884088-8-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
linux-user/qemu.h | 8 +++-----
13
1 file changed, 3 insertions(+), 5 deletions(-)
14
15
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/qemu.h
18
+++ b/linux-user/qemu.h
19
@@ -XXX,XX +XXX,XX @@ extern unsigned long guest_stack_size;
20
21
/* user access */
22
23
-#define VERIFY_READ 0
24
-#define VERIFY_WRITE 1 /* implies read access */
25
+#define VERIFY_READ PAGE_READ
26
+#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
27
28
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
29
{
30
@@ -XXX,XX +XXX,XX @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
31
!guest_addr_valid(addr + size - 1))) {
32
return false;
33
}
34
- return page_check_range((target_ulong)addr, size,
35
- (type == VERIFY_READ) ? PAGE_READ :
36
- (PAGE_READ | PAGE_WRITE)) == 0;
37
+ return page_check_range((target_ulong)addr, size, type) == 0;
38
}
39
40
/* NOTE __get_user and __put_user use host pointers and don't check access.
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These constants are only ever used with access_ok, and friends.
4
Rather than translating them to PAGE_* bits, let them equal
5
the PAGE_* bits to begin.
6
7
Reviewed-by: Warner Losh <imp@bsdimp.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210210000223.884088-9-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
bsd-user/qemu.h | 9 ++++-----
14
1 file changed, 4 insertions(+), 5 deletions(-)
15
16
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/bsd-user/qemu.h
19
+++ b/bsd-user/qemu.h
20
@@ -XXX,XX +XXX,XX @@ extern unsigned long x86_stack_size;
21
22
/* user access */
23
24
-#define VERIFY_READ 0
25
-#define VERIFY_WRITE 1 /* implies read access */
26
+#define VERIFY_READ PAGE_READ
27
+#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
28
29
-static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
30
+static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
31
{
32
- return page_check_range((target_ulong)addr, size,
33
- (type == VERIFY_READ) ? PAGE_READ : (PAGE_READ | PAGE_WRITE)) == 0;
34
+ return page_check_range((target_ulong)addr, size, type) == 0;
35
}
36
37
/* NOTE __get_user and __put_user use host pointers and don't check access. */
38
--
39
2.20.1
40
41
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is the only use of guest_addr_valid that does not begin
4
with a guest address, but a host address being transformed to
5
a guest address.
6
7
We will shortly adjust guest_addr_valid to handle guest memory
8
tags, and the host address should not be subjected to that.
9
10
Move h2g_valid adjacent to the other h2g macros.
11
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210210000223.884088-10-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
include/exec/cpu_ldst.h | 5 ++++-
18
1 file changed, 4 insertions(+), 1 deletion(-)
19
20
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu_ldst.h
23
+++ b/include/exec/cpu_ldst.h
24
@@ -XXX,XX +XXX,XX @@ typedef uint64_t abi_ptr;
25
#else
26
#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
27
#endif
28
-#define h2g_valid(x) guest_addr_valid((uintptr_t)(x) - guest_base)
29
30
static inline bool guest_range_valid(abi_ulong start, abi_ulong len)
31
{
32
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
33
}
34
35
+#define h2g_valid(x) \
36
+ (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
37
+ (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
38
+
39
#define h2g_nocheck(x) ({ \
40
uintptr_t __ret = (uintptr_t)(x) - guest_base; \
41
(abi_ptr)__ret; \
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We must always use GUEST_ADDR_MAX, because even 32-bit hosts can
4
use -R <reserved_va> to restrict the memory address of the guest.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
include/exec/cpu_ldst.h | 9 ++++-----
12
1 file changed, 4 insertions(+), 5 deletions(-)
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@ typedef uint64_t abi_ptr;
19
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
20
#define g2h(x) ((void *)((uintptr_t)(abi_ptr)(x) + guest_base))
21
22
-#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
23
-#define guest_addr_valid(x) (1)
24
-#else
25
-#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
26
-#endif
27
+static inline bool guest_addr_valid(abi_ulong x)
28
+{
29
+ return x <= GUEST_ADDR_MAX;
30
+}
31
32
static inline bool guest_range_valid(abi_ulong start, abi_ulong len)
33
{
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Provide an identity fallback for target that do not
4
use tagged addresses.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-12-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
include/exec/cpu_ldst.h | 7 +++++++
12
1 file changed, 7 insertions(+)
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@ typedef uint64_t abi_ptr;
19
#define TARGET_ABI_FMT_ptr "%"PRIx64
20
#endif
21
22
+#ifndef TARGET_TAGGED_ADDRESSES
23
+static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
24
+{
25
+ return x;
26
+}
27
+#endif
28
+
29
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
30
#define g2h(x) ((void *)((uintptr_t)(abi_ptr)(x) + guest_base))
31
32
--
33
2.20.1
34
35
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Use g2h_untagged in contexts that have no cpu, e.g. the binary
4
loaders that operate before the primary cpu is created. As a
5
colollary, target_mmap and friends must use untagged addresses,
6
since they are used by the loaders.
7
8
Use g2h_untagged on values returned from target_mmap, as the
9
kernel never applies a tag itself.
10
11
Use g2h_untagged on all pc values. The only current user of
12
tags, aarch64, removes tags from code addresses upon branch,
13
so "pc" is always untagged.
14
15
Use g2h with the cpu context on hand wherever possible.
16
17
Use g2h_untagged in lock_user, which will be updated soon.
18
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 20210210000223.884088-13-richard.henderson@linaro.org
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
---
24
include/exec/cpu_ldst.h | 12 +++++-
25
include/exec/exec-all.h | 2 +-
26
linux-user/qemu.h | 6 +--
27
accel/tcg/translate-all.c | 4 +-
28
accel/tcg/user-exec.c | 48 ++++++++++++------------
29
linux-user/elfload.c | 12 +++---
30
linux-user/flatload.c | 2 +-
31
linux-user/hppa/cpu_loop.c | 31 ++++++++--------
32
linux-user/i386/cpu_loop.c | 4 +-
33
linux-user/mmap.c | 45 +++++++++++-----------
34
linux-user/ppc/signal.c | 4 +-
35
linux-user/syscall.c | 72 +++++++++++++++++++-----------------
36
target/arm/helper-a64.c | 4 +-
37
target/hppa/op_helper.c | 2 +-
38
target/i386/tcg/mem_helper.c | 2 +-
39
target/s390x/mem_helper.c | 4 +-
40
16 files changed, 135 insertions(+), 119 deletions(-)
41
42
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/exec/cpu_ldst.h
45
+++ b/include/exec/cpu_ldst.h
46
@@ -XXX,XX +XXX,XX @@ static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
47
#endif
48
49
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
50
-#define g2h(x) ((void *)((uintptr_t)(abi_ptr)(x) + guest_base))
51
+static inline void *g2h_untagged(abi_ptr x)
52
+{
53
+ return (void *)((uintptr_t)(x) + guest_base);
54
+}
55
+
56
+static inline void *g2h(CPUState *cs, abi_ptr x)
57
+{
58
+ return g2h_untagged(cpu_untagged_addr(cs, x));
59
+}
60
61
static inline bool guest_addr_valid(abi_ulong x)
62
{
63
@@ -XXX,XX +XXX,XX @@ static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
64
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
65
MMUAccessType access_type, int mmu_idx)
66
{
67
- return g2h(addr);
68
+ return g2h(env_cpu(env), addr);
69
}
70
#else
71
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
72
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/include/exec/exec-all.h
75
+++ b/include/exec/exec-all.h
76
@@ -XXX,XX +XXX,XX @@ static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
77
void **hostp)
78
{
79
if (hostp) {
80
- *hostp = g2h(addr);
81
+ *hostp = g2h_untagged(addr);
82
}
83
return addr;
84
}
85
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/linux-user/qemu.h
88
+++ b/linux-user/qemu.h
89
@@ -XXX,XX +XXX,XX @@ static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy
90
return addr;
91
}
92
#else
93
- return g2h(guest_addr);
94
+ return g2h_untagged(guest_addr);
95
#endif
96
}
97
98
@@ -XXX,XX +XXX,XX @@ static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
99
#ifdef DEBUG_REMAP
100
if (!host_ptr)
101
return;
102
- if (host_ptr == g2h(guest_addr))
103
+ if (host_ptr == g2h_untagged(guest_addr))
104
return;
105
if (len > 0)
106
- memcpy(g2h(guest_addr), host_ptr, len);
107
+ memcpy(g2h_untagged(guest_addr), host_ptr, len);
108
g_free(host_ptr);
109
#endif
110
}
111
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/accel/tcg/translate-all.c
114
+++ b/accel/tcg/translate-all.c
115
@@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
116
prot |= p2->flags;
117
p2->flags &= ~PAGE_WRITE;
118
}
119
- mprotect(g2h(page_addr), qemu_host_page_size,
120
+ mprotect(g2h_untagged(page_addr), qemu_host_page_size,
121
(prot & PAGE_BITS) & ~PAGE_WRITE);
122
if (DEBUG_TB_INVALIDATE_GATE) {
123
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
124
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
125
}
126
#endif
127
}
128
- mprotect((void *)g2h(host_start), qemu_host_page_size,
129
+ mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
130
prot & PAGE_BITS);
131
}
132
mmap_unlock();
133
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/accel/tcg/user-exec.c
136
+++ b/accel/tcg/user-exec.c
137
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
138
int flags;
139
140
flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
141
- *phost = flags ? NULL : g2h(addr);
142
+ *phost = flags ? NULL : g2h(env_cpu(env), addr);
143
return flags;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
147
flags = probe_access_internal(env, addr, size, access_type, false, ra);
148
g_assert(flags == 0);
149
150
- return size ? g2h(addr) : NULL;
151
+ return size ? g2h(env_cpu(env), addr) : NULL;
152
}
153
154
#if defined(__i386__)
155
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
156
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
157
158
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
159
- ret = ldub_p(g2h(ptr));
160
+ ret = ldub_p(g2h(env_cpu(env), ptr));
161
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
162
return ret;
163
}
164
@@ -XXX,XX +XXX,XX @@ int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
165
uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
166
167
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
168
- ret = ldsb_p(g2h(ptr));
169
+ ret = ldsb_p(g2h(env_cpu(env), ptr));
170
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
171
return ret;
172
}
173
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
174
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
175
176
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
177
- ret = lduw_be_p(g2h(ptr));
178
+ ret = lduw_be_p(g2h(env_cpu(env), ptr));
179
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
180
return ret;
181
}
182
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
183
uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
184
185
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
186
- ret = ldsw_be_p(g2h(ptr));
187
+ ret = ldsw_be_p(g2h(env_cpu(env), ptr));
188
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
189
return ret;
190
}
191
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
192
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
193
194
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
195
- ret = ldl_be_p(g2h(ptr));
196
+ ret = ldl_be_p(g2h(env_cpu(env), ptr));
197
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
198
return ret;
199
}
200
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
201
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
202
203
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
204
- ret = ldq_be_p(g2h(ptr));
205
+ ret = ldq_be_p(g2h(env_cpu(env), ptr));
206
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
207
return ret;
208
}
209
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
210
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
211
212
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
213
- ret = lduw_le_p(g2h(ptr));
214
+ ret = lduw_le_p(g2h(env_cpu(env), ptr));
215
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
216
return ret;
217
}
218
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
219
uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
220
221
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
222
- ret = ldsw_le_p(g2h(ptr));
223
+ ret = ldsw_le_p(g2h(env_cpu(env), ptr));
224
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
225
return ret;
226
}
227
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
228
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
229
230
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
231
- ret = ldl_le_p(g2h(ptr));
232
+ ret = ldl_le_p(g2h(env_cpu(env), ptr));
233
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
234
return ret;
235
}
236
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
237
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
238
239
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
240
- ret = ldq_le_p(g2h(ptr));
241
+ ret = ldq_le_p(g2h(env_cpu(env), ptr));
242
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
243
return ret;
244
}
245
@@ -XXX,XX +XXX,XX @@ void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
246
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
247
248
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
249
- stb_p(g2h(ptr), val);
250
+ stb_p(g2h(env_cpu(env), ptr), val);
251
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
252
}
253
254
@@ -XXX,XX +XXX,XX @@ void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
255
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
256
257
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
258
- stw_be_p(g2h(ptr), val);
259
+ stw_be_p(g2h(env_cpu(env), ptr), val);
260
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
261
}
262
263
@@ -XXX,XX +XXX,XX @@ void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
264
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
265
266
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
267
- stl_be_p(g2h(ptr), val);
268
+ stl_be_p(g2h(env_cpu(env), ptr), val);
269
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
270
}
271
272
@@ -XXX,XX +XXX,XX @@ void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
273
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
274
275
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
276
- stq_be_p(g2h(ptr), val);
277
+ stq_be_p(g2h(env_cpu(env), ptr), val);
278
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
279
}
280
281
@@ -XXX,XX +XXX,XX @@ void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
282
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
283
284
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
285
- stw_le_p(g2h(ptr), val);
286
+ stw_le_p(g2h(env_cpu(env), ptr), val);
287
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
288
}
289
290
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
291
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
292
293
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
294
- stl_le_p(g2h(ptr), val);
295
+ stl_le_p(g2h(env_cpu(env), ptr), val);
296
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
297
}
298
299
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
300
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
301
302
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
303
- stq_le_p(g2h(ptr), val);
304
+ stq_le_p(g2h(env_cpu(env), ptr), val);
305
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
306
}
307
308
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
309
uint32_t ret;
310
311
set_helper_retaddr(1);
312
- ret = ldub_p(g2h(ptr));
313
+ ret = ldub_p(g2h_untagged(ptr));
314
clear_helper_retaddr();
315
return ret;
316
}
317
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
318
uint32_t ret;
319
320
set_helper_retaddr(1);
321
- ret = lduw_p(g2h(ptr));
322
+ ret = lduw_p(g2h_untagged(ptr));
323
clear_helper_retaddr();
324
return ret;
325
}
326
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
327
uint32_t ret;
328
329
set_helper_retaddr(1);
330
- ret = ldl_p(g2h(ptr));
331
+ ret = ldl_p(g2h_untagged(ptr));
332
clear_helper_retaddr();
333
return ret;
334
}
335
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
336
uint64_t ret;
337
338
set_helper_retaddr(1);
339
- ret = ldq_p(g2h(ptr));
340
+ ret = ldq_p(g2h_untagged(ptr));
341
clear_helper_retaddr();
342
return ret;
343
}
344
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
345
if (unlikely(addr & (size - 1))) {
346
cpu_loop_exit_atomic(env_cpu(env), retaddr);
347
}
348
- void *ret = g2h(addr);
349
+ void *ret = g2h(env_cpu(env), addr);
350
set_helper_retaddr(retaddr);
351
return ret;
352
}
353
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
354
index XXXXXXX..XXXXXXX 100644
355
--- a/linux-user/elfload.c
356
+++ b/linux-user/elfload.c
357
@@ -XXX,XX +XXX,XX @@ enum {
358
359
static bool init_guest_commpage(void)
360
{
361
- void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
362
+ void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
363
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
364
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
365
366
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
367
}
368
369
/* Set kernel helper versions; rest of page is 0. */
370
- __put_user(5, (uint32_t *)g2h(0xffff0ffcu));
371
+ __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
372
373
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
374
perror("Protecting guest commpage");
375
@@ -XXX,XX +XXX,XX @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
376
here is still actually needed. For now, continue with it,
377
but merge it with the "normal" mmap that would allocate the bss. */
378
379
- host_start = (uintptr_t) g2h(elf_bss);
380
- host_end = (uintptr_t) g2h(last_bss);
381
+ host_start = (uintptr_t) g2h_untagged(elf_bss);
382
+ host_end = (uintptr_t) g2h_untagged(last_bss);
383
host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
384
385
if (host_map_start < host_end) {
386
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
387
}
388
389
/* Reserve the address space for the binary, or reserved_va. */
390
- test = g2h(guest_loaddr);
391
+ test = g2h_untagged(guest_loaddr);
392
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
393
if (test != addr) {
394
pgb_fail_in_use(image_name);
395
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
396
397
/* Reserve the memory on the host. */
398
assert(guest_base != 0);
399
- test = g2h(0);
400
+ test = g2h_untagged(0);
401
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
402
if (addr == MAP_FAILED || addr != test) {
403
error_report("Unable to reserve 0x%lx bytes of virtual address "
404
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/linux-user/flatload.c
407
+++ b/linux-user/flatload.c
408
@@ -XXX,XX +XXX,XX @@ static int load_flat_file(struct linux_binprm * bprm,
409
}
410
411
/* zero the BSS. */
412
- memset(g2h(datapos + data_len), 0, bss_len);
413
+ memset(g2h_untagged(datapos + data_len), 0, bss_len);
414
415
return 0;
416
}
417
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/linux-user/hppa/cpu_loop.c
420
+++ b/linux-user/hppa/cpu_loop.c
421
@@ -XXX,XX +XXX,XX @@
422
423
static abi_ulong hppa_lws(CPUHPPAState *env)
424
{
425
+ CPUState *cs = env_cpu(env);
426
uint32_t which = env->gr[20];
427
abi_ulong addr = env->gr[26];
428
abi_ulong old = env->gr[25];
429
@@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env)
430
}
431
old = tswap32(old);
432
new = tswap32(new);
433
- ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
434
+ ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
435
ret = tswap32(ret);
436
break;
437
438
@@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env)
439
can be host-endian as well. */
440
switch (size) {
441
case 0:
442
- old = *(uint8_t *)g2h(old);
443
- new = *(uint8_t *)g2h(new);
444
- ret = qatomic_cmpxchg((uint8_t *)g2h(addr), old, new);
445
+ old = *(uint8_t *)g2h(cs, old);
446
+ new = *(uint8_t *)g2h(cs, new);
447
+ ret = qatomic_cmpxchg((uint8_t *)g2h(cs, addr), old, new);
448
ret = ret != old;
449
break;
450
case 1:
451
- old = *(uint16_t *)g2h(old);
452
- new = *(uint16_t *)g2h(new);
453
- ret = qatomic_cmpxchg((uint16_t *)g2h(addr), old, new);
454
+ old = *(uint16_t *)g2h(cs, old);
455
+ new = *(uint16_t *)g2h(cs, new);
456
+ ret = qatomic_cmpxchg((uint16_t *)g2h(cs, addr), old, new);
457
ret = ret != old;
458
break;
459
case 2:
460
- old = *(uint32_t *)g2h(old);
461
- new = *(uint32_t *)g2h(new);
462
- ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
463
+ old = *(uint32_t *)g2h(cs, old);
464
+ new = *(uint32_t *)g2h(cs, new);
465
+ ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
466
ret = ret != old;
467
break;
468
case 3:
469
{
470
uint64_t o64, n64, r64;
471
- o64 = *(uint64_t *)g2h(old);
472
- n64 = *(uint64_t *)g2h(new);
473
+ o64 = *(uint64_t *)g2h(cs, old);
474
+ n64 = *(uint64_t *)g2h(cs, new);
475
#ifdef CONFIG_ATOMIC64
476
- r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(addr),
477
+ r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(cs, addr),
478
o64, n64);
479
ret = r64 != o64;
480
#else
481
start_exclusive();
482
- r64 = *(uint64_t *)g2h(addr);
483
+ r64 = *(uint64_t *)g2h(cs, addr);
484
ret = 1;
485
if (r64 == o64) {
486
- *(uint64_t *)g2h(addr) = n64;
487
+ *(uint64_t *)g2h(cs, addr) = n64;
488
ret = 0;
489
}
490
end_exclusive();
491
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
492
index XXXXXXX..XXXXXXX 100644
493
--- a/linux-user/i386/cpu_loop.c
494
+++ b/linux-user/i386/cpu_loop.c
495
@@ -XXX,XX +XXX,XX @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
496
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
497
PROT_READ|PROT_WRITE,
498
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
499
- idt_table = g2h(env->idt.base);
500
+ idt_table = g2h_untagged(env->idt.base);
501
set_idt(0, 0);
502
set_idt(1, 0);
503
set_idt(2, 0);
504
@@ -XXX,XX +XXX,XX @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
505
PROT_READ|PROT_WRITE,
506
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
507
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
508
- gdt_table = g2h(env->gdt.base);
509
+ gdt_table = g2h_untagged(env->gdt.base);
510
#ifdef TARGET_ABI32
511
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
512
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
513
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
514
index XXXXXXX..XXXXXXX 100644
515
--- a/linux-user/mmap.c
516
+++ b/linux-user/mmap.c
517
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
518
}
519
end = host_end;
520
}
521
- ret = mprotect(g2h(host_start), qemu_host_page_size,
522
+ ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
523
prot1 & PAGE_BITS);
524
if (ret != 0) {
525
goto error;
526
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
527
for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
528
prot1 |= page_get_flags(addr);
529
}
530
- ret = mprotect(g2h(host_end - qemu_host_page_size),
531
+ ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
532
qemu_host_page_size, prot1 & PAGE_BITS);
533
if (ret != 0) {
534
goto error;
535
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
536
537
/* handle the pages in the middle */
538
if (host_start < host_end) {
539
- ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
540
+ ret = mprotect(g2h_untagged(host_start),
541
+ host_end - host_start, host_prot);
542
if (ret != 0) {
543
goto error;
544
}
545
@@ -XXX,XX +XXX,XX @@ static int mmap_frag(abi_ulong real_start,
546
int prot1, prot_new;
547
548
real_end = real_start + qemu_host_page_size;
549
- host_start = g2h(real_start);
550
+ host_start = g2h_untagged(real_start);
551
552
/* get the protection of the target pages outside the mapping */
553
prot1 = 0;
554
@@ -XXX,XX +XXX,XX @@ static int mmap_frag(abi_ulong real_start,
555
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
556
557
/* read the corresponding file data */
558
- if (pread(fd, g2h(start), end - start, offset) == -1)
559
+ if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
560
return -1;
561
562
/* put final protection */
563
@@ -XXX,XX +XXX,XX @@ static int mmap_frag(abi_ulong real_start,
564
mprotect(host_start, qemu_host_page_size, prot_new);
565
}
566
if (prot_new & PROT_WRITE) {
567
- memset(g2h(start), 0, end - start);
568
+ memset(g2h_untagged(start), 0, end - start);
569
}
570
}
571
return 0;
572
@@ -XXX,XX +XXX,XX @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
573
* - mremap() with MREMAP_FIXED flag
574
* - shmat() with SHM_REMAP flag
575
*/
576
- ptr = mmap(g2h(addr), size, PROT_NONE,
577
+ ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
578
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
579
580
/* ENOMEM, if host address space has no memory */
581
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
582
/* Note: we prefer to control the mapping address. It is
583
especially important if qemu_host_page_size >
584
qemu_real_host_page_size */
585
- p = mmap(g2h(start), host_len, host_prot,
586
+ p = mmap(g2h_untagged(start), host_len, host_prot,
587
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
588
if (p == MAP_FAILED) {
589
goto fail;
590
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
591
/* update start so that it points to the file position at 'offset' */
592
host_start = (unsigned long)p;
593
if (!(flags & MAP_ANONYMOUS)) {
594
- p = mmap(g2h(start), len, host_prot,
595
+ p = mmap(g2h_untagged(start), len, host_prot,
596
flags | MAP_FIXED, fd, host_offset);
597
if (p == MAP_FAILED) {
598
- munmap(g2h(start), host_len);
599
+ munmap(g2h_untagged(start), host_len);
600
goto fail;
601
}
602
host_start += offset - host_offset;
603
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
604
-1, 0);
605
if (retaddr == -1)
606
goto fail;
607
- if (pread(fd, g2h(start), len, offset) == -1)
608
+ if (pread(fd, g2h_untagged(start), len, offset) == -1)
609
goto fail;
610
if (!(host_prot & PROT_WRITE)) {
611
ret = target_mprotect(start, len, target_prot);
612
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
613
offset1 = 0;
614
else
615
offset1 = offset + real_start - start;
616
- p = mmap(g2h(real_start), real_end - real_start,
617
+ p = mmap(g2h_untagged(real_start), real_end - real_start,
618
host_prot, flags, fd, offset1);
619
if (p == MAP_FAILED)
620
goto fail;
621
@@ -XXX,XX +XXX,XX @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
622
real_end -= qemu_host_page_size;
623
}
624
if (real_start != real_end) {
625
- mmap(g2h(real_start), real_end - real_start, PROT_NONE,
626
+ mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
627
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
628
-1, 0);
629
}
630
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
631
if (reserved_va) {
632
mmap_reserve(real_start, real_end - real_start);
633
} else {
634
- ret = munmap(g2h(real_start), real_end - real_start);
635
+ ret = munmap(g2h_untagged(real_start), real_end - real_start);
636
}
637
}
638
639
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
640
mmap_lock();
641
642
if (flags & MREMAP_FIXED) {
643
- host_addr = mremap(g2h(old_addr), old_size, new_size,
644
- flags, g2h(new_addr));
645
+ host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
646
+ flags, g2h_untagged(new_addr));
647
648
if (reserved_va && host_addr != MAP_FAILED) {
649
/* If new and old addresses overlap then the above mremap will
650
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
651
errno = ENOMEM;
652
host_addr = MAP_FAILED;
653
} else {
654
- host_addr = mremap(g2h(old_addr), old_size, new_size,
655
- flags | MREMAP_FIXED, g2h(mmap_start));
656
+ host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
657
+ flags | MREMAP_FIXED,
658
+ g2h_untagged(mmap_start));
659
if (reserved_va) {
660
mmap_reserve(old_addr, old_size);
661
}
662
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
663
}
664
}
665
if (prot == 0) {
666
- host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
667
+ host_addr = mremap(g2h_untagged(old_addr),
668
+ old_size, new_size, flags);
669
670
if (host_addr != MAP_FAILED) {
671
/* Check if address fits target address space */
672
if (!guest_range_valid(h2g(host_addr), new_size)) {
673
/* Revert mremap() changes */
674
- host_addr = mremap(g2h(old_addr), new_size, old_size,
675
- flags);
676
+ host_addr = mremap(g2h_untagged(old_addr),
677
+ new_size, old_size, flags);
678
errno = ENOMEM;
679
host_addr = MAP_FAILED;
680
} else if (reserved_va && old_size > new_size) {
681
diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c
682
index XXXXXXX..XXXXXXX 100644
683
--- a/linux-user/ppc/signal.c
684
+++ b/linux-user/ppc/signal.c
685
@@ -XXX,XX +XXX,XX @@ static void restore_user_regs(CPUPPCState *env,
686
uint64_t v_addr;
687
/* 64-bit needs to recover the pointer to the vectors from the frame */
688
__get_user(v_addr, &frame->v_regs);
689
- v_regs = g2h(v_addr);
690
+ v_regs = g2h(env_cpu(env), v_addr);
691
#else
692
v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
693
#endif
694
@@ -XXX,XX +XXX,XX @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
695
if (get_ppc64_abi(image) < 2) {
696
/* ELFv1 PPC64 function pointers are pointers to OPD entries. */
697
struct target_func_ptr *handler =
698
- (struct target_func_ptr *)g2h(ka->_sa_handler);
699
+ (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
700
env->nip = tswapl(handler->entry);
701
env->gpr[2] = tswapl(handler->toc);
702
} else {
703
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
704
index XXXXXXX..XXXXXXX 100644
705
--- a/linux-user/syscall.c
706
+++ b/linux-user/syscall.c
707
@@ -XXX,XX +XXX,XX @@ abi_long do_brk(abi_ulong new_brk)
708
/* Heap contents are initialized to zero, as for anonymous
709
* mapped pages. */
710
if (new_brk > target_brk) {
711
- memset(g2h(target_brk), 0, new_brk - target_brk);
712
+ memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
713
}
714
    target_brk = new_brk;
715
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
716
@@ -XXX,XX +XXX,XX @@ abi_long do_brk(abi_ulong new_brk)
717
* come from the remaining part of the previous page: it may
718
* contains garbage data due to a previous heap usage (grown
719
* then shrunken). */
720
- memset(g2h(target_brk), 0, brk_page - target_brk);
721
+ memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
722
723
target_brk = new_brk;
724
brk_page = HOST_PAGE_ALIGN(target_brk);
725
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
726
mmap_lock();
727
728
if (shmaddr)
729
- host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
730
+ host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
731
else {
732
abi_ulong mmap_start;
733
734
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
735
errno = ENOMEM;
736
host_raddr = (void *)-1;
737
} else
738
- host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
739
+ host_raddr = shmat(shmid, g2h_untagged(mmap_start),
740
+ shmflg | SHM_REMAP);
741
}
742
743
if (host_raddr == (void *)-1) {
744
@@ -XXX,XX +XXX,XX @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
745
break;
746
}
747
}
748
- rv = get_errno(shmdt(g2h(shmaddr)));
749
+ rv = get_errno(shmdt(g2h_untagged(shmaddr)));
750
751
mmap_unlock();
752
753
@@ -XXX,XX +XXX,XX @@ static abi_long write_ldt(CPUX86State *env,
754
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
755
if (env->ldt.base == -1)
756
return -TARGET_ENOMEM;
757
- memset(g2h(env->ldt.base), 0,
758
+ memset(g2h_untagged(env->ldt.base), 0,
759
TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
760
env->ldt.limit = 0xffff;
761
- ldt_table = g2h(env->ldt.base);
762
+ ldt_table = g2h_untagged(env->ldt.base);
763
}
764
765
/* NOTE: same code as Linux kernel */
766
@@ -XXX,XX +XXX,XX @@ static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
767
#if defined(TARGET_ABI32)
768
abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
769
{
770
- uint64_t *gdt_table = g2h(env->gdt.base);
771
+ uint64_t *gdt_table = g2h_untagged(env->gdt.base);
772
struct target_modify_ldt_ldt_s ldt_info;
773
struct target_modify_ldt_ldt_s *target_ldt_info;
774
int seg_32bit, contents, read_exec_only, limit_in_pages;
775
@@ -XXX,XX +XXX,XX @@ install:
776
static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
777
{
778
struct target_modify_ldt_ldt_s *target_ldt_info;
779
- uint64_t *gdt_table = g2h(env->gdt.base);
780
+ uint64_t *gdt_table = g2h_untagged(env->gdt.base);
781
uint32_t base_addr, limit, flags;
782
int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
783
int seg_not_present, useable, lm;
784
@@ -XXX,XX +XXX,XX @@ static int do_safe_futex(int *uaddr, int op, int val,
785
tricky. However they're probably useless because guest atomic
786
operations won't work either. */
787
#if defined(TARGET_NR_futex)
788
-static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
789
- target_ulong uaddr2, int val3)
790
+static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
791
+ target_ulong timeout, target_ulong uaddr2, int val3)
792
{
793
struct timespec ts, *pts;
794
int base_op;
795
@@ -XXX,XX +XXX,XX @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
796
} else {
797
pts = NULL;
798
}
799
- return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
800
+ return do_safe_futex(g2h(cpu, uaddr),
801
+ op, tswap32(val), pts, NULL, val3);
802
case FUTEX_WAKE:
803
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
804
+ return do_safe_futex(g2h(cpu, uaddr),
805
+ op, val, NULL, NULL, 0);
806
case FUTEX_FD:
807
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
808
+ return do_safe_futex(g2h(cpu, uaddr),
809
+ op, val, NULL, NULL, 0);
810
case FUTEX_REQUEUE:
811
case FUTEX_CMP_REQUEUE:
812
case FUTEX_WAKE_OP:
813
@@ -XXX,XX +XXX,XX @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
814
to satisfy the compiler. We do not need to tswap TIMEOUT
815
since it's not compared to guest memory. */
816
pts = (struct timespec *)(uintptr_t) timeout;
817
- return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
818
+ return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
819
(base_op == FUTEX_CMP_REQUEUE
820
- ? tswap32(val3)
821
- : val3));
822
+ ? tswap32(val3) : val3));
823
default:
824
return -TARGET_ENOSYS;
825
}
826
@@ -XXX,XX +XXX,XX @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
827
#endif
828
829
#if defined(TARGET_NR_futex_time64)
830
-static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
831
+static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
832
+ int val, target_ulong timeout,
833
target_ulong uaddr2, int val3)
834
{
835
struct timespec ts, *pts;
836
@@ -XXX,XX +XXX,XX @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
837
} else {
838
pts = NULL;
839
}
840
- return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
841
+ return do_safe_futex(g2h(cpu, uaddr), op,
842
+ tswap32(val), pts, NULL, val3);
843
case FUTEX_WAKE:
844
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
845
+ return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
846
case FUTEX_FD:
847
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
848
+ return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
849
case FUTEX_REQUEUE:
850
case FUTEX_CMP_REQUEUE:
851
case FUTEX_WAKE_OP:
852
@@ -XXX,XX +XXX,XX @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
853
to satisfy the compiler. We do not need to tswap TIMEOUT
854
since it's not compared to guest memory. */
855
pts = (struct timespec *)(uintptr_t) timeout;
856
- return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
857
+ return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
858
(base_op == FUTEX_CMP_REQUEUE
859
- ? tswap32(val3)
860
- : val3));
861
+ ? tswap32(val3) : val3));
862
default:
863
return -TARGET_ENOSYS;
864
}
865
@@ -XXX,XX +XXX,XX @@ static int open_self_maps(void *cpu_env, int fd)
866
const char *path;
867
868
max = h2g_valid(max - 1) ?
869
- max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
870
+ max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
871
872
if (page_check_range(h2g(min), max - min, flags) == -1) {
873
continue;
874
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
875
876
if (ts->child_tidptr) {
877
put_user_u32(0, ts->child_tidptr);
878
- do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
879
- NULL, NULL, 0);
880
+ do_sys_futex(g2h(cpu, ts->child_tidptr),
881
+ FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
882
}
883
thread_cpu = NULL;
884
g_free(ts);
885
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
886
if (!arg5) {
887
ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
888
} else {
889
- ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
890
+ ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
891
}
892
ret = get_errno(ret);
893
894
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
895
/* ??? msync/mlock/munlock are broken for softmmu. */
896
#ifdef TARGET_NR_msync
897
case TARGET_NR_msync:
898
- return get_errno(msync(g2h(arg1), arg2, arg3));
899
+ return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
900
#endif
901
#ifdef TARGET_NR_mlock
902
case TARGET_NR_mlock:
903
- return get_errno(mlock(g2h(arg1), arg2));
904
+ return get_errno(mlock(g2h(cpu, arg1), arg2));
905
#endif
906
#ifdef TARGET_NR_munlock
907
case TARGET_NR_munlock:
908
- return get_errno(munlock(g2h(arg1), arg2));
909
+ return get_errno(munlock(g2h(cpu, arg1), arg2));
910
#endif
911
#ifdef TARGET_NR_mlockall
912
case TARGET_NR_mlockall:
913
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
914
915
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
916
case TARGET_NR_set_tid_address:
917
- return get_errno(set_tid_address((int *)g2h(arg1)));
918
+ return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
919
#endif
920
921
case TARGET_NR_tkill:
922
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
923
#endif
924
#ifdef TARGET_NR_futex
925
case TARGET_NR_futex:
926
- return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
927
+ return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
928
#endif
929
#ifdef TARGET_NR_futex_time64
930
case TARGET_NR_futex_time64:
931
- return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
932
+ return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
933
#endif
934
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
935
case TARGET_NR_inotify_init:
936
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
937
index XXXXXXX..XXXXXXX 100644
938
--- a/target/arm/helper-a64.c
939
+++ b/target/arm/helper-a64.c
940
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
941
942
#ifdef CONFIG_USER_ONLY
943
/* ??? Enforce alignment. */
944
- uint64_t *haddr = g2h(addr);
945
+ uint64_t *haddr = g2h(env_cpu(env), addr);
946
947
set_helper_retaddr(ra);
948
o0 = ldq_le_p(haddr + 0);
949
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
950
951
#ifdef CONFIG_USER_ONLY
952
/* ??? Enforce alignment. */
953
- uint64_t *haddr = g2h(addr);
954
+ uint64_t *haddr = g2h(env_cpu(env), addr);
955
956
set_helper_retaddr(ra);
957
o1 = ldq_be_p(haddr + 0);
958
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
959
index XXXXXXX..XXXXXXX 100644
960
--- a/target/hppa/op_helper.c
961
+++ b/target/hppa/op_helper.c
962
@@ -XXX,XX +XXX,XX @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
963
#ifdef CONFIG_USER_ONLY
964
uint32_t old, new, cmp;
965
966
- uint32_t *haddr = g2h(addr - 1);
967
+ uint32_t *haddr = g2h(env_cpu(env), addr - 1);
968
old = *haddr;
969
while (1) {
970
new = (old & ~mask) | (val & mask);
971
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
972
index XXXXXXX..XXXXXXX 100644
973
--- a/target/i386/tcg/mem_helper.c
974
+++ b/target/i386/tcg/mem_helper.c
975
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
976
977
#ifdef CONFIG_USER_ONLY
978
{
979
- uint64_t *haddr = g2h(a0);
980
+ uint64_t *haddr = g2h(env_cpu(env), a0);
981
cmpv = cpu_to_le64(cmpv);
982
newv = cpu_to_le64(newv);
983
oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
984
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
985
index XXXXXXX..XXXXXXX 100644
986
--- a/target/s390x/mem_helper.c
987
+++ b/target/s390x/mem_helper.c
988
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
989
990
if (parallel) {
991
#ifdef CONFIG_USER_ONLY
992
- uint32_t *haddr = g2h(a1);
993
+ uint32_t *haddr = g2h(env_cpu(env), a1);
994
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
995
#else
996
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
997
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
998
if (parallel) {
999
#ifdef CONFIG_ATOMIC64
1000
# ifdef CONFIG_USER_ONLY
1001
- uint64_t *haddr = g2h(a1);
1002
+ uint64_t *haddr = g2h(env_cpu(env), a1);
1003
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
1004
# else
1005
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1006
--
1007
2.20.1
1008
1009
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We define target_mmap et al as untagged, so that they can be
4
used from the binary loaders. Explicitly call cpu_untagged_addr
5
for munmap, mprotect, mremap syscall entry points.
6
7
Add a few comments for the syscalls that are exempted by the
8
kernel's tagged-address-abi.rst.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210210000223.884088-14-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
linux-user/syscall.c | 11 +++++++++++
16
1 file changed, 11 insertions(+)
17
18
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/linux-user/syscall.c
21
+++ b/linux-user/syscall.c
22
@@ -XXX,XX +XXX,XX @@ abi_long do_brk(abi_ulong new_brk)
23
abi_long mapped_addr;
24
abi_ulong new_alloc_size;
25
26
+ /* brk pointers are always untagged */
27
+
28
DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
29
30
if (!new_brk) {
31
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
32
int i,ret;
33
abi_ulong shmlba;
34
35
+ /* shmat pointers are always untagged */
36
+
37
/* find out the length of the shared memory segment */
38
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
39
if (is_error(ret)) {
40
@@ -XXX,XX +XXX,XX @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
41
int i;
42
abi_long rv;
43
44
+ /* shmdt pointers are always untagged */
45
+
46
mmap_lock();
47
48
for (i = 0; i < N_SHM_REGIONS; ++i) {
49
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
50
v5, v6));
51
}
52
#else
53
+ /* mmap pointers are always untagged */
54
ret = get_errno(target_mmap(arg1, arg2, arg3,
55
target_to_host_bitmask(arg4, mmap_flags_tbl),
56
arg5,
57
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
58
return get_errno(ret);
59
#endif
60
case TARGET_NR_munmap:
61
+ arg1 = cpu_untagged_addr(cpu, arg1);
62
return get_errno(target_munmap(arg1, arg2));
63
case TARGET_NR_mprotect:
64
+ arg1 = cpu_untagged_addr(cpu, arg1);
65
{
66
TaskState *ts = cpu->opaque;
67
/* Special hack to detect libc making the stack executable. */
68
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
69
return get_errno(target_mprotect(arg1, arg2, arg3));
70
#ifdef TARGET_NR_mremap
71
case TARGET_NR_mremap:
72
+ arg1 = cpu_untagged_addr(cpu, arg1);
73
+ /* mremap new_addr (arg5) is always untagged */
74
return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
75
#endif
76
/* ??? msync/mlock/munlock are broken for softmmu. */
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We're currently open-coding the range check in access_ok;
4
use guest_range_valid when size != 0.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/qemu.h | 9 +++------
12
1 file changed, 3 insertions(+), 6 deletions(-)
13
14
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/qemu.h
17
+++ b/linux-user/qemu.h
18
@@ -XXX,XX +XXX,XX @@ extern unsigned long guest_stack_size;
19
20
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
21
{
22
- if (!guest_addr_valid(addr)) {
23
- return false;
24
- }
25
- if (size != 0 &&
26
- (addr + size - 1 < addr ||
27
- !guest_addr_valid(addr + size - 1))) {
28
+ if (size == 0
29
+ ? !guest_addr_valid(addr)
30
+ : !guest_range_valid(addr, size)) {
31
return false;
32
}
33
return page_check_range((target_ulong)addr, size, type) == 0;
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The places that use these are better off using untagged
4
addresses, so do not provide a tagged versions. Rename
5
to make it clear about the address type.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210210000223.884088-16-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
include/exec/cpu_ldst.h | 4 ++--
13
linux-user/qemu.h | 4 ++--
14
accel/tcg/user-exec.c | 3 ++-
15
linux-user/mmap.c | 12 ++++++------
16
linux-user/syscall.c | 2 +-
17
5 files changed, 13 insertions(+), 12 deletions(-)
18
19
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/cpu_ldst.h
22
+++ b/include/exec/cpu_ldst.h
23
@@ -XXX,XX +XXX,XX @@ static inline void *g2h(CPUState *cs, abi_ptr x)
24
return g2h_untagged(cpu_untagged_addr(cs, x));
25
}
26
27
-static inline bool guest_addr_valid(abi_ulong x)
28
+static inline bool guest_addr_valid_untagged(abi_ulong x)
29
{
30
return x <= GUEST_ADDR_MAX;
31
}
32
33
-static inline bool guest_range_valid(abi_ulong start, abi_ulong len)
34
+static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
35
{
36
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
37
}
38
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/linux-user/qemu.h
41
+++ b/linux-user/qemu.h
42
@@ -XXX,XX +XXX,XX @@ extern unsigned long guest_stack_size;
43
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
44
{
45
if (size == 0
46
- ? !guest_addr_valid(addr)
47
- : !guest_range_valid(addr, size)) {
48
+ ? !guest_addr_valid_untagged(addr)
49
+ : !guest_range_valid_untagged(addr, size)) {
50
return false;
51
}
52
return page_check_range((target_ulong)addr, size, type) == 0;
53
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/accel/tcg/user-exec.c
56
+++ b/accel/tcg/user-exec.c
57
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
58
g_assert_not_reached();
59
}
60
61
- if (!guest_addr_valid(addr) || page_check_range(addr, 1, flags) < 0) {
62
+ if (!guest_addr_valid_untagged(addr) ||
63
+ page_check_range(addr, 1, flags) < 0) {
64
if (nonfault) {
65
return TLB_INVALID_MASK;
66
} else {
67
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/linux-user/mmap.c
70
+++ b/linux-user/mmap.c
71
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
72
}
73
len = TARGET_PAGE_ALIGN(len);
74
end = start + len;
75
- if (!guest_range_valid(start, len)) {
76
+ if (!guest_range_valid_untagged(start, len)) {
77
return -TARGET_ENOMEM;
78
}
79
if (len == 0) {
80
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
81
* It can fail only on 64-bit host with 32-bit target.
82
* On any other target/host host mmap() handles this error correctly.
83
*/
84
- if (end < start || !guest_range_valid(start, len)) {
85
+ if (end < start || !guest_range_valid_untagged(start, len)) {
86
errno = ENOMEM;
87
goto fail;
88
}
89
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
90
if (start & ~TARGET_PAGE_MASK)
91
return -TARGET_EINVAL;
92
len = TARGET_PAGE_ALIGN(len);
93
- if (len == 0 || !guest_range_valid(start, len)) {
94
+ if (len == 0 || !guest_range_valid_untagged(start, len)) {
95
return -TARGET_EINVAL;
96
}
97
98
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
99
int prot;
100
void *host_addr;
101
102
- if (!guest_range_valid(old_addr, old_size) ||
103
+ if (!guest_range_valid_untagged(old_addr, old_size) ||
104
((flags & MREMAP_FIXED) &&
105
- !guest_range_valid(new_addr, new_size))) {
106
+ !guest_range_valid_untagged(new_addr, new_size))) {
107
errno = ENOMEM;
108
return -1;
109
}
110
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
111
112
if (host_addr != MAP_FAILED) {
113
/* Check if address fits target address space */
114
- if (!guest_range_valid(h2g(host_addr), new_size)) {
115
+ if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
116
/* Revert mremap() changes */
117
host_addr = mremap(g2h_untagged(old_addr),
118
new_size, old_size, flags);
119
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/linux-user/syscall.c
122
+++ b/linux-user/syscall.c
123
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
124
return -TARGET_EINVAL;
125
}
126
}
127
- if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
128
+ if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
129
return -TARGET_EINVAL;
130
}
131
132
--
133
2.20.1
134
135
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Provide both tagged and untagged versions of access_ok.
4
In a few places use thread_cpu, as the user is several
5
callees removed from do_syscall1.
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190128223118.5255-9-richard.henderson@linaro.org
9
Message-id: 20210210000223.884088-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
target/arm/translate-a64.c | 37 ++++++++++++++++++++++++++++++++++++-
12
linux-user/qemu.h | 11 +++++++++--
9
1 file changed, 36 insertions(+), 1 deletion(-)
13
linux-user/elfload.c | 2 +-
14
linux-user/hppa/cpu_loop.c | 8 ++++----
15
linux-user/i386/cpu_loop.c | 2 +-
16
linux-user/i386/signal.c | 5 +++--
17
linux-user/syscall.c | 9 ++++++---
18
6 files changed, 24 insertions(+), 13 deletions(-)
10
19
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
20
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
22
--- a/linux-user/qemu.h
14
+++ b/target/arm/translate-a64.c
23
+++ b/linux-user/qemu.h
15
@@ -XXX,XX +XXX,XX @@ static void reset_btype(DisasContext *s)
24
@@ -XXX,XX +XXX,XX @@ extern unsigned long guest_stack_size;
16
}
25
#define VERIFY_READ PAGE_READ
26
#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
27
28
-static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
29
+static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
30
{
31
if (size == 0
32
? !guest_addr_valid_untagged(addr)
33
@@ -XXX,XX +XXX,XX @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
34
return page_check_range((target_ulong)addr, size, type) == 0;
17
}
35
}
18
36
19
+static void set_btype(DisasContext *s, int val)
37
+static inline bool access_ok(CPUState *cpu, int type,
38
+ abi_ulong addr, abi_ulong size)
20
+{
39
+{
21
+ TCGv_i32 tcg_val;
40
+ return access_ok_untagged(type, cpu_untagged_addr(cpu, addr), size);
22
+
23
+ /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
24
+ tcg_debug_assert(val >= 1 && val <= 3);
25
+
26
+ tcg_val = tcg_const_i32(val);
27
+ tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
28
+ tcg_temp_free_i32(tcg_val);
29
+ s->btype = -1;
30
+}
41
+}
31
+
42
+
32
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
43
/* NOTE __get_user and __put_user use host pointers and don't check access.
33
fprintf_function cpu_fprintf, int flags)
44
These are usually used to access struct data members once the struct has
45
been locked - usually with lock_user_struct. */
46
@@ -XXX,XX +XXX,XX @@ abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
47
host area will have the same contents as the guest. */
48
static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
34
{
49
{
35
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
50
- if (!access_ok(type, guest_addr, len))
36
static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
51
+ if (!access_ok_untagged(type, guest_addr, len)) {
52
return NULL;
53
+ }
54
#ifdef DEBUG_REMAP
55
{
56
void *addr;
57
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/linux-user/elfload.c
60
+++ b/linux-user/elfload.c
61
@@ -XXX,XX +XXX,XX @@ static int vma_get_mapping_count(const struct mm_struct *mm)
62
static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
37
{
63
{
38
unsigned int opc, op2, op3, rn, op4;
64
/* if we cannot even read the first page, skip it */
39
+ unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
65
- if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
40
TCGv_i64 dst;
66
+ if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
41
TCGv_i64 modifier;
67
return (0);
42
68
43
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
69
/*
44
case 0: /* BR */
70
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
45
case 1: /* BLR */
71
index XXXXXXX..XXXXXXX 100644
46
case 2: /* RET */
72
--- a/linux-user/hppa/cpu_loop.c
47
+ btype_mod = opc;
73
+++ b/linux-user/hppa/cpu_loop.c
48
switch (op3) {
74
@@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env)
49
case 0:
75
return -TARGET_ENOSYS;
50
/* BR, BLR, RET */
76
51
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
77
case 0: /* elf32 atomic 32bit cmpxchg */
52
default:
78
- if ((addr & 3) || !access_ok(VERIFY_WRITE, addr, 4)) {
53
goto do_unallocated;
79
+ if ((addr & 3) || !access_ok(cs, VERIFY_WRITE, addr, 4)) {
80
return -TARGET_EFAULT;
54
}
81
}
55
-
82
old = tswap32(old);
56
gen_a64_set_pc(s, dst);
83
@@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env)
57
/* BLR also needs to load return address */
84
return -TARGET_ENOSYS;
58
if (opc == 1) {
59
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
60
if ((op3 & ~1) != 2) {
61
goto do_unallocated;
62
}
85
}
63
+ btype_mod = opc & 1;
86
if (((addr | old | new) & ((1 << size) - 1))
64
if (s->pauth_active) {
87
- || !access_ok(VERIFY_WRITE, addr, 1 << size)
65
dst = new_tmp_a64(s);
88
- || !access_ok(VERIFY_READ, old, 1 << size)
66
modifier = cpu_reg_sp(s, op4);
89
- || !access_ok(VERIFY_READ, new, 1 << size)) {
67
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
90
+ || !access_ok(cs, VERIFY_WRITE, addr, 1 << size)
68
return;
91
+ || !access_ok(cs, VERIFY_READ, old, 1 << size)
92
+ || !access_ok(cs, VERIFY_READ, new, 1 << size)) {
93
return -TARGET_EFAULT;
94
}
95
/* Note that below we use host-endian loads so that the cmpxchg
96
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/linux-user/i386/cpu_loop.c
99
+++ b/linux-user/i386/cpu_loop.c
100
@@ -XXX,XX +XXX,XX @@ static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len)
101
* For all the vsyscalls, NULL means "don't write anything" not
102
* "write it at address 0".
103
*/
104
- if (addr == 0 || access_ok(VERIFY_WRITE, addr, len)) {
105
+ if (addr == 0 || access_ok(env_cpu(env), VERIFY_WRITE, addr, len)) {
106
return true;
69
}
107
}
70
108
71
+ switch (btype_mod) {
109
diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c
72
+ case 0: /* BR */
110
index XXXXXXX..XXXXXXX 100644
73
+ if (dc_isar_feature(aa64_bti, s)) {
111
--- a/linux-user/i386/signal.c
74
+ /* BR to {x16,x17} or !guard -> 1, else 3. */
112
+++ b/linux-user/i386/signal.c
75
+ set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
113
@@ -XXX,XX +XXX,XX @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
114
115
fpstate_addr = tswapl(sc->fpstate);
116
if (fpstate_addr != 0) {
117
- if (!access_ok(VERIFY_READ, fpstate_addr,
118
- sizeof(struct target_fpstate)))
119
+ if (!access_ok(env_cpu(env), VERIFY_READ, fpstate_addr,
120
+ sizeof(struct target_fpstate))) {
121
goto badframe;
76
+ }
122
+ }
77
+ break;
123
#ifndef TARGET_X86_64
78
+
124
cpu_x86_frstor(env, fpstate_addr, 1);
79
+ case 1: /* BLR */
125
#else
80
+ if (dc_isar_feature(aa64_bti, s)) {
126
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
81
+ /* BLR sets BTYPE to 2, regardless of source guarded page. */
127
index XXXXXXX..XXXXXXX 100644
82
+ set_btype(s, 2);
128
--- a/linux-user/syscall.c
83
+ }
129
+++ b/linux-user/syscall.c
84
+ break;
130
@@ -XXX,XX +XXX,XX @@ static abi_long do_accept4(int fd, abi_ulong target_addr,
85
+
131
return -TARGET_EINVAL;
86
+ default: /* RET or none of the above. */
132
}
87
+ /* BTYPE will be set to 0 by normal end-of-insn processing. */
133
88
+ break;
134
- if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
135
+ if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
136
return -TARGET_EFAULT;
89
+ }
137
+ }
90
+
138
91
s->base.is_jmp = DISAS_JUMP;
139
addr = alloca(addrlen);
92
}
140
141
@@ -XXX,XX +XXX,XX @@ static abi_long do_getpeername(int fd, abi_ulong target_addr,
142
return -TARGET_EINVAL;
143
}
144
145
- if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
146
+ if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
147
return -TARGET_EFAULT;
148
+ }
149
150
addr = alloca(addrlen);
151
152
@@ -XXX,XX +XXX,XX @@ static abi_long do_getsockname(int fd, abi_ulong target_addr,
153
return -TARGET_EINVAL;
154
}
155
156
- if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
157
+ if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
158
return -TARGET_EFAULT;
159
+ }
160
161
addr = alloca(addrlen);
93
162
94
--
163
--
95
2.20.1
164
2.20.1
96
165
97
166
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These functions are not small, except for unlock_user
4
without debugging enabled. Move them out of line, and
5
add missing braces on the way.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210210000223.884088-18-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
linux-user/qemu.h | 45 ++++++-------------------------------------
13
linux-user/uaccess.c | 46 ++++++++++++++++++++++++++++++++++++++++++++
14
2 files changed, 52 insertions(+), 39 deletions(-)
15
16
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/linux-user/qemu.h
19
+++ b/linux-user/qemu.h
20
@@ -XXX,XX +XXX,XX @@ abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
21
22
/* Lock an area of guest memory into the host. If copy is true then the
23
host area will have the same contents as the guest. */
24
-static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
25
-{
26
- if (!access_ok_untagged(type, guest_addr, len)) {
27
- return NULL;
28
- }
29
-#ifdef DEBUG_REMAP
30
- {
31
- void *addr;
32
- addr = g_malloc(len);
33
- if (copy)
34
- memcpy(addr, g2h(guest_addr), len);
35
- else
36
- memset(addr, 0, len);
37
- return addr;
38
- }
39
-#else
40
- return g2h_untagged(guest_addr);
41
-#endif
42
-}
43
+void *lock_user(int type, abi_ulong guest_addr, long len, int copy);
44
45
/* Unlock an area of guest memory. The first LEN bytes must be
46
flushed back to guest memory. host_ptr = NULL is explicitly
47
allowed and does nothing. */
48
-static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
49
- long len)
50
-{
51
-
52
#ifdef DEBUG_REMAP
53
- if (!host_ptr)
54
- return;
55
- if (host_ptr == g2h_untagged(guest_addr))
56
- return;
57
- if (len > 0)
58
- memcpy(g2h_untagged(guest_addr), host_ptr, len);
59
- g_free(host_ptr);
60
+static inline void unlock_user(void *host_ptr, abi_ulong guest_addr, long len)
61
+{ }
62
+#else
63
+void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
64
#endif
65
-}
66
67
/* Return the length of a string in target memory or -TARGET_EFAULT if
68
access error. */
69
abi_long target_strlen(abi_ulong gaddr);
70
71
/* Like lock_user but for null terminated strings. */
72
-static inline void *lock_user_string(abi_ulong guest_addr)
73
-{
74
- abi_long len;
75
- len = target_strlen(guest_addr);
76
- if (len < 0)
77
- return NULL;
78
- return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
79
-}
80
+void *lock_user_string(abi_ulong guest_addr);
81
82
/* Helper macros for locking/unlocking a target struct. */
83
#define lock_user_struct(type, host_ptr, guest_addr, copy)    \
84
diff --git a/linux-user/uaccess.c b/linux-user/uaccess.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/linux-user/uaccess.c
87
+++ b/linux-user/uaccess.c
88
@@ -XXX,XX +XXX,XX @@
89
90
#include "qemu.h"
91
92
+void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
93
+{
94
+ if (!access_ok_untagged(type, guest_addr, len)) {
95
+ return NULL;
96
+ }
97
+#ifdef DEBUG_REMAP
98
+ {
99
+ void *addr;
100
+ addr = g_malloc(len);
101
+ if (copy) {
102
+ memcpy(addr, g2h(guest_addr), len);
103
+ } else {
104
+ memset(addr, 0, len);
105
+ }
106
+ return addr;
107
+ }
108
+#else
109
+ return g2h_untagged(guest_addr);
110
+#endif
111
+}
112
+
113
+#ifdef DEBUG_REMAP
114
+void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
115
+{
116
+ if (!host_ptr) {
117
+ return;
118
+ }
119
+ if (host_ptr == g2h_untagged(guest_addr)) {
120
+ return;
121
+ }
122
+ if (len > 0) {
123
+ memcpy(g2h_untagged(guest_addr), host_ptr, len);
124
+ }
125
+ g_free(host_ptr);
126
+}
127
+#endif
128
+
129
+void *lock_user_string(abi_ulong guest_addr)
130
+{
131
+ abi_long len = target_strlen(guest_addr);
132
+ if (len < 0) {
133
+ return NULL;
134
+ }
135
+ return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
136
+}
137
+
138
/* copy_from_user() and copy_to_user() are usually used to copy data
139
* buffers between the target and host. These internally perform
140
* locking/unlocking of the memory.
141
--
142
2.20.1
143
144
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Place this in its own field within ENV, as that will
3
For copy_*_user, only 0 and -TARGET_EFAULT are returned; no need
4
make it easier to reset from within TCG generated code.
4
to involve abi_long. Use size_t for lengths. Use bool for the
5
5
lock_user copy argument. Use ssize_t for target_strlen, because
6
With the change to pstate_read/write, exception entry
6
we can't overflow the host memory space.
7
and return are automatically handled.
8
7
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20190128223118.5255-3-richard.henderson@linaro.org
10
Message-id: 20210210000223.884088-19-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
target/arm/cpu.h | 8 ++++++--
13
linux-user/qemu.h | 14 ++++++--------
15
target/arm/translate-a64.c | 3 +++
14
linux-user/uaccess.c | 45 ++++++++++++++++++++++----------------------
16
2 files changed, 9 insertions(+), 2 deletions(-)
15
2 files changed, 29 insertions(+), 30 deletions(-)
17
16
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
19
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
19
--- a/linux-user/qemu.h
21
+++ b/target/arm/cpu.h
20
+++ b/linux-user/qemu.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
21
@@ -XXX,XX +XXX,XX @@
23
* semantics as for AArch32, as described in the comments on each field)
22
#include "exec/cpu_ldst.h"
24
* nRW (also known as M[4]) is kept, inverted, in env->aarch64
23
25
* DAIF (exception masks) are kept in env->daif
24
#undef DEBUG_REMAP
26
+ * BTYPE is kept in env->btype
25
-#ifdef DEBUG_REMAP
27
* all other bits are stored in their correct places in env->pstate
26
-#endif /* DEBUG_REMAP */
28
*/
27
29
uint32_t pstate;
28
#include "exec/user/abitypes.h"
30
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
29
31
uint32_t GE; /* cpsr[19:16] */
30
@@ -XXX,XX +XXX,XX @@ static inline bool access_ok(CPUState *cpu, int type,
32
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
31
* buffers between the target and host. These internally perform
33
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
32
* locking/unlocking of the memory.
34
+ uint32_t btype; /* BTI branch type. spsr[11:10]. */
33
*/
35
uint64_t daif; /* exception masks, in the bits they are in PSTATE */
34
-abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
36
35
-abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
37
uint64_t elr_el[4]; /* AArch64 exception link regs */
36
+int copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
38
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
37
+int copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
39
#define PSTATE_I (1U << 7)
38
40
#define PSTATE_A (1U << 8)
39
/* Functions for accessing guest memory. The tget and tput functions
41
#define PSTATE_D (1U << 9)
40
read/write single values, byteswapping as necessary. The lock_user function
42
+#define PSTATE_BTYPE (3U << 10)
41
@@ -XXX,XX +XXX,XX @@ abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
43
#define PSTATE_IL (1U << 20)
42
44
#define PSTATE_SS (1U << 21)
43
/* Lock an area of guest memory into the host. If copy is true then the
45
#define PSTATE_V (1U << 28)
44
host area will have the same contents as the guest. */
46
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
45
-void *lock_user(int type, abi_ulong guest_addr, long len, int copy);
47
#define PSTATE_N (1U << 31)
46
+void *lock_user(int type, abi_ulong guest_addr, size_t len, bool copy);
48
#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
47
49
#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
48
/* Unlock an area of guest memory. The first LEN bytes must be
50
-#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
49
flushed back to guest memory. host_ptr = NULL is explicitly
51
+#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
50
allowed and does nothing. */
52
/* Mode values for AArch64 */
51
-#ifdef DEBUG_REMAP
53
#define PSTATE_MODE_EL3h 13
52
-static inline void unlock_user(void *host_ptr, abi_ulong guest_addr, long len)
54
#define PSTATE_MODE_EL3t 12
53
+#ifndef DEBUG_REMAP
55
@@ -XXX,XX +XXX,XX @@ static inline uint32_t pstate_read(CPUARMState *env)
54
+static inline void unlock_user(void *host_ptr, abi_ulong guest_addr, size_t len)
56
ZF = (env->ZF == 0);
55
{ }
57
return (env->NF & 0x80000000) | (ZF << 30)
56
#else
58
| (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
57
void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
59
- | env->pstate | env->daif;
58
@@ -XXX,XX +XXX,XX @@ void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
60
+ | env->pstate | env->daif | (env->btype << 10);
59
60
/* Return the length of a string in target memory or -TARGET_EFAULT if
61
access error. */
62
-abi_long target_strlen(abi_ulong gaddr);
63
+ssize_t target_strlen(abi_ulong gaddr);
64
65
/* Like lock_user but for null terminated strings. */
66
void *lock_user_string(abi_ulong guest_addr);
67
diff --git a/linux-user/uaccess.c b/linux-user/uaccess.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/linux-user/uaccess.c
70
+++ b/linux-user/uaccess.c
71
@@ -XXX,XX +XXX,XX @@
72
73
#include "qemu.h"
74
75
-void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
76
+void *lock_user(int type, abi_ulong guest_addr, size_t len, bool copy)
77
{
78
if (!access_ok_untagged(type, guest_addr, len)) {
79
return NULL;
80
@@ -XXX,XX +XXX,XX @@ void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
61
}
81
}
62
82
63
static inline void pstate_write(CPUARMState *env, uint32_t val)
83
#ifdef DEBUG_REMAP
64
@@ -XXX,XX +XXX,XX @@ static inline void pstate_write(CPUARMState *env, uint32_t val)
84
-void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
65
env->CF = (val >> 29) & 1;
85
+void unlock_user(void *host_ptr, abi_ulong guest_addr, size_t len);
66
env->VF = (val << 3) & 0x80000000;
86
{
67
env->daif = val & PSTATE_DAIF;
87
if (!host_ptr) {
68
+ env->btype = (val >> 10) & 3;
88
return;
69
env->pstate = val & ~CACHED_PSTATE_BITS;
89
@@ -XXX,XX +XXX,XX @@ void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
90
if (host_ptr == g2h_untagged(guest_addr)) {
91
return;
92
}
93
- if (len > 0) {
94
+ if (len != 0) {
95
memcpy(g2h_untagged(guest_addr), host_ptr, len);
96
}
97
g_free(host_ptr);
98
@@ -XXX,XX +XXX,XX @@ void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
99
100
void *lock_user_string(abi_ulong guest_addr)
101
{
102
- abi_long len = target_strlen(guest_addr);
103
+ ssize_t len = target_strlen(guest_addr);
104
if (len < 0) {
105
return NULL;
106
}
107
- return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
108
+ return lock_user(VERIFY_READ, guest_addr, (size_t)len + 1, 1);
70
}
109
}
71
110
72
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
111
/* copy_from_user() and copy_to_user() are usually used to copy data
73
index XXXXXXX..XXXXXXX 100644
112
* buffers between the target and host. These internally perform
74
--- a/target/arm/translate-a64.c
113
* locking/unlocking of the memory.
75
+++ b/target/arm/translate-a64.c
114
*/
76
@@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
115
-abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len)
77
el,
116
+int copy_from_user(void *hptr, abi_ulong gaddr, size_t len)
78
psr & PSTATE_SP ? 'h' : 't');
117
{
79
118
- abi_long ret = 0;
80
+ if (cpu_isar_feature(aa64_bti, cpu)) {
119
- void *ghptr;
81
+ cpu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
120
+ int ret = 0;
121
+ void *ghptr = lock_user(VERIFY_READ, gaddr, len, 1);
122
123
- if ((ghptr = lock_user(VERIFY_READ, gaddr, len, 1))) {
124
+ if (ghptr) {
125
memcpy(hptr, ghptr, len);
126
unlock_user(ghptr, gaddr, 0);
127
- } else
128
+ } else {
129
ret = -TARGET_EFAULT;
130
-
82
+ }
131
+ }
83
if (!(flags & CPU_DUMP_FPU)) {
132
return ret;
84
cpu_fprintf(f, "\n");
133
}
85
return;
134
135
-
136
-abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len)
137
+int copy_to_user(abi_ulong gaddr, void *hptr, size_t len)
138
{
139
- abi_long ret = 0;
140
- void *ghptr;
141
+ int ret = 0;
142
+ void *ghptr = lock_user(VERIFY_WRITE, gaddr, len, 0);
143
144
- if ((ghptr = lock_user(VERIFY_WRITE, gaddr, len, 0))) {
145
+ if (ghptr) {
146
memcpy(ghptr, hptr, len);
147
unlock_user(ghptr, gaddr, len);
148
- } else
149
+ } else {
150
ret = -TARGET_EFAULT;
151
+ }
152
153
return ret;
154
}
155
156
/* Return the length of a string in target memory or -TARGET_EFAULT if
157
access error */
158
-abi_long target_strlen(abi_ulong guest_addr1)
159
+ssize_t target_strlen(abi_ulong guest_addr1)
160
{
161
uint8_t *ptr;
162
abi_ulong guest_addr;
163
- int max_len, len;
164
+ size_t max_len, len;
165
166
guest_addr = guest_addr1;
167
for(;;) {
168
@@ -XXX,XX +XXX,XX @@ abi_long target_strlen(abi_ulong guest_addr1)
169
unlock_user(ptr, guest_addr, 0);
170
guest_addr += len;
171
/* we don't allow wrapping or integer overflow */
172
- if (guest_addr == 0 ||
173
- (guest_addr - guest_addr1) > 0x7fffffff)
174
+ if (guest_addr == 0 || (guest_addr - guest_addr1) > 0x7fffffff) {
175
return -TARGET_EFAULT;
176
- if (len != max_len)
177
+ }
178
+ if (len != max_len) {
179
break;
180
+ }
181
}
182
return guest_addr - guest_addr1;
183
}
86
--
184
--
87
2.20.1
185
2.20.1
88
186
89
187
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is all of the non-exception cases of DISAS_NORETURN.
3
Resolve the untagged address once, using thread_cpu.
4
Tidy the DEBUG_REMAP code using glib routines.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20190128223118.5255-8-richard.henderson@linaro.org
8
Message-id: 20210210000223.884088-20-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/translate-a64.c | 6 ++++++
11
linux-user/uaccess.c | 27 ++++++++++++++-------------
11
1 file changed, 6 insertions(+)
12
1 file changed, 14 insertions(+), 13 deletions(-)
12
13
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/linux-user/uaccess.c b/linux-user/uaccess.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
16
--- a/linux-user/uaccess.c
16
+++ b/target/arm/translate-a64.c
17
+++ b/linux-user/uaccess.c
17
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@
19
20
void *lock_user(int type, abi_ulong guest_addr, size_t len, bool copy)
21
{
22
+ void *host_addr;
23
+
24
+ guest_addr = cpu_untagged_addr(thread_cpu, guest_addr);
25
if (!access_ok_untagged(type, guest_addr, len)) {
26
return NULL;
18
}
27
}
19
28
+ host_addr = g2h_untagged(guest_addr);
20
/* B Branch / BL Branch with link */
29
#ifdef DEBUG_REMAP
21
+ reset_btype(s);
30
- {
22
gen_goto_tb(s, 0, addr);
31
- void *addr;
32
- addr = g_malloc(len);
33
- if (copy) {
34
- memcpy(addr, g2h(guest_addr), len);
35
- } else {
36
- memset(addr, 0, len);
37
- }
38
- return addr;
39
+ if (copy) {
40
+ host_addr = g_memdup(host_addr, len);
41
+ } else {
42
+ host_addr = g_malloc0(len);
43
}
44
-#else
45
- return g2h_untagged(guest_addr);
46
#endif
47
+ return host_addr;
23
}
48
}
24
49
25
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
50
#ifdef DEBUG_REMAP
26
tcg_cmp = read_cpu_reg(s, rt, sf);
51
void unlock_user(void *host_ptr, abi_ulong guest_addr, size_t len);
27
label_match = gen_new_label();
52
{
28
53
+ void *host_ptr_conv;
29
+ reset_btype(s);
30
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
31
tcg_cmp, 0, label_match);
32
33
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
34
tcg_cmp = tcg_temp_new_i64();
35
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
36
label_match = gen_new_label();
37
+
54
+
38
+ reset_btype(s);
55
if (!host_ptr) {
39
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
40
tcg_cmp, 0, label_match);
41
tcg_temp_free_i64(tcg_cmp);
42
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
43
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
44
cond = extract32(insn, 0, 4);
45
46
+ reset_btype(s);
47
if (cond < 0x0e) {
48
/* genuinely conditional branches */
49
TCGLabel *label_match = gen_new_label();
50
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
51
* a self-modified code correctly and also to take
52
* any pending interrupts immediately.
53
*/
54
+ reset_btype(s);
55
gen_goto_tb(s, 0, s->pc);
56
return;
56
return;
57
default:
57
}
58
- if (host_ptr == g2h_untagged(guest_addr)) {
59
+ host_ptr_conv = g2h(thread_cpu, guest_addr);
60
+ if (host_ptr == host_ptr_conv) {
61
return;
62
}
63
if (len != 0) {
64
- memcpy(g2h_untagged(guest_addr), host_ptr, len);
65
+ memcpy(host_ptr_conv, host_ptr, len);
66
}
67
g_free(host_ptr);
68
}
58
--
69
--
59
2.20.1
70
2.20.1
60
71
61
72
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
This is the prctl bit that controls whether syscalls accept tagged
4
addresses. See Documentation/arm64/tagged-address-abi.rst in the
5
linux kernel.
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190201195404.30486-2-richard.henderson@linaro.org
9
Message-id: 20210210000223.884088-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
linux-user/aarch64/target_syscall.h | 7 ++++++
12
linux-user/aarch64/target_syscall.h | 4 ++++
9
linux-user/syscall.c | 36 +++++++++++++++++++++++++++++
13
target/arm/cpu-param.h | 3 +++
10
2 files changed, 43 insertions(+)
14
target/arm/cpu.h | 31 +++++++++++++++++++++++++++++
15
linux-user/syscall.c | 24 ++++++++++++++++++++++
16
4 files changed, 62 insertions(+)
11
17
12
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
18
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
13
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/aarch64/target_syscall.h
20
--- a/linux-user/aarch64/target_syscall.h
15
+++ b/linux-user/aarch64/target_syscall.h
21
+++ b/linux-user/aarch64/target_syscall.h
16
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
22
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
17
#define TARGET_PR_SVE_SET_VL 50
23
# define TARGET_PR_PAC_APDBKEY (1 << 3)
18
#define TARGET_PR_SVE_GET_VL 51
24
# define TARGET_PR_PAC_APGAKEY (1 << 4)
19
25
20
+#define TARGET_PR_PAC_RESET_KEYS 54
26
+#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
21
+# define TARGET_PR_PAC_APIAKEY (1 << 0)
27
+#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
22
+# define TARGET_PR_PAC_APIBKEY (1 << 1)
28
+# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
23
+# define TARGET_PR_PAC_APDAKEY (1 << 2)
24
+# define TARGET_PR_PAC_APDBKEY (1 << 3)
25
+# define TARGET_PR_PAC_APGAKEY (1 << 4)
26
+
29
+
27
void arm_init_pauth_key(ARMPACKey *key);
28
29
#endif /* AARCH64_TARGET_SYSCALL_H */
30
#endif /* AARCH64_TARGET_SYSCALL_H */
31
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu-param.h
34
+++ b/target/arm/cpu-param.h
35
@@ -XXX,XX +XXX,XX @@
36
37
#ifdef CONFIG_USER_ONLY
38
#define TARGET_PAGE_BITS 12
39
+# ifdef TARGET_AARCH64
40
+# define TARGET_TAGGED_ADDRESSES
41
+# endif
42
#else
43
/*
44
* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
45
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/cpu.h
48
+++ b/target/arm/cpu.h
49
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
50
const struct arm_boot_info *boot_info;
51
/* Store GICv3CPUState to access from this struct */
52
void *gicv3state;
53
+
54
+#ifdef TARGET_TAGGED_ADDRESSES
55
+ /* Linux syscall tagged address support */
56
+ bool tagged_addr_enable;
57
+#endif
58
} CPUARMState;
59
60
static inline void set_feature(CPUARMState *env, int feature)
61
@@ -XXX,XX +XXX,XX @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
62
*/
63
#define PAGE_BTI PAGE_TARGET_1
64
65
+#ifdef TARGET_TAGGED_ADDRESSES
66
+/**
67
+ * cpu_untagged_addr:
68
+ * @cs: CPU context
69
+ * @x: tagged address
70
+ *
71
+ * Remove any address tag from @x. This is explicitly related to the
72
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
73
+ *
74
+ * There should be a better place to put this, but we need this in
75
+ * include/exec/cpu_ldst.h, and not some place linux-user specific.
76
+ */
77
+static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
78
+{
79
+ ARMCPU *cpu = ARM_CPU(cs);
80
+ if (cpu->env.tagged_addr_enable) {
81
+ /*
82
+ * TBI is enabled for userspace but not kernelspace addresses.
83
+ * Only clear the tag if bit 55 is clear.
84
+ */
85
+ x &= sextract64(x, 0, 56);
86
+ }
87
+ return x;
88
+}
89
+#endif
90
+
91
/*
92
* Naming convention for isar_feature functions:
93
* Functions which test 32-bit ID registers should have _aa32_ in
30
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
94
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
31
index XXXXXXX..XXXXXXX 100644
95
index XXXXXXX..XXXXXXX 100644
32
--- a/linux-user/syscall.c
96
--- a/linux-user/syscall.c
33
+++ b/linux-user/syscall.c
97
+++ b/linux-user/syscall.c
34
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
98
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
35
}
99
}
36
}
100
}
37
return ret;
101
return -TARGET_EINVAL;
38
+ case TARGET_PR_PAC_RESET_KEYS:
102
+ case TARGET_PR_SET_TAGGED_ADDR_CTRL:
39
+ {
103
+ {
104
+ abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
40
+ CPUARMState *env = cpu_env;
105
+ CPUARMState *env = cpu_env;
41
+ ARMCPU *cpu = arm_env_get_cpu(env);
42
+
106
+
43
+ if (arg3 || arg4 || arg5) {
107
+ if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
44
+ return -TARGET_EINVAL;
108
+ return -TARGET_EINVAL;
45
+ }
109
+ }
46
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
110
+ env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
47
+ int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
111
+ return 0;
48
+ TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
112
+ }
49
+ TARGET_PR_PAC_APGAKEY);
113
+ case TARGET_PR_GET_TAGGED_ADDR_CTRL:
50
+ if (arg2 == 0) {
114
+ {
51
+ arg2 = all;
115
+ abi_long ret = 0;
52
+ } else if (arg2 & ~all) {
116
+ CPUARMState *env = cpu_env;
53
+ return -TARGET_EINVAL;
117
+
54
+ }
118
+ if (arg2 || arg3 || arg4 || arg5) {
55
+ if (arg2 & TARGET_PR_PAC_APIAKEY) {
119
+ return -TARGET_EINVAL;
56
+ arm_init_pauth_key(&env->apia_key);
57
+ }
58
+ if (arg2 & TARGET_PR_PAC_APIBKEY) {
59
+ arm_init_pauth_key(&env->apib_key);
60
+ }
61
+ if (arg2 & TARGET_PR_PAC_APDAKEY) {
62
+ arm_init_pauth_key(&env->apda_key);
63
+ }
64
+ if (arg2 & TARGET_PR_PAC_APDBKEY) {
65
+ arm_init_pauth_key(&env->apdb_key);
66
+ }
67
+ if (arg2 & TARGET_PR_PAC_APGAKEY) {
68
+ arm_init_pauth_key(&env->apga_key);
69
+ }
70
+ return 0;
71
+ }
120
+ }
121
+ if (env->tagged_addr_enable) {
122
+ ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
123
+ }
124
+ return ret;
72
+ }
125
+ }
73
+ return -TARGET_EINVAL;
74
#endif /* AARCH64 */
126
#endif /* AARCH64 */
75
case PR_GET_SECCOMP:
127
case PR_GET_SECCOMP:
76
case PR_SET_SECCOMP:
128
case PR_SET_SECCOMP:
77
--
129
--
78
2.20.1
130
2.20.1
79
131
80
132
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This will allow TBI to be used in user-only mode, as well as
3
Use simple arithmetic instead of a conditional
4
avoid ping-ponging the softmmu TLB when TBI is in use. It
4
move when tbi0 != tbi1.
5
will also enable other armv8 extensions.
6
5
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190204132126.3255-3-richard.henderson@linaro.org
8
Message-id: 20210210000223.884088-22-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 217 ++++++++++++++++++++-----------------
11
target/arm/translate-a64.c | 25 ++++++++++++++-----------
13
1 file changed, 116 insertions(+), 101 deletions(-)
12
1 file changed, 14 insertions(+), 11 deletions(-)
14
13
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
16
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
18
@@ -XXX,XX +XXX,XX @@ static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
20
gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
19
/* Sign-extend from bit 55. */
21
}
20
tcg_gen_sextract_i64(dst, src, 0, 56);
22
21
23
+/*
22
- if (tbi != 3) {
24
+ * Return a "clean" address for ADDR according to TBID.
23
- TCGv_i64 tcg_zero = tcg_const_i64(0);
25
+ * This is always a fresh temporary, as we need to be able to
26
+ * increment this independently of a dirty write-back address.
27
+ */
28
+static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
29
+{
30
+ TCGv_i64 clean = new_tmp_a64(s);
31
+ gen_top_byte_ignore(s, clean, addr, s->tbid);
32
+ return clean;
33
+}
34
+
35
typedef struct DisasCompare64 {
36
TCGCond cond;
37
TCGv_i64 value;
38
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
39
TCGv_i64 tcg_rs = cpu_reg(s, rs);
40
TCGv_i64 tcg_rt = cpu_reg(s, rt);
41
int memidx = get_mem_index(s);
42
- TCGv_i64 addr = cpu_reg_sp(s, rn);
43
+ TCGv_i64 clean_addr;
44
45
if (rn == 31) {
46
gen_check_sp_alignment(s);
47
}
48
- tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
49
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
50
+ tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
51
size | MO_ALIGN | s->be_data);
52
}
53
54
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
55
TCGv_i64 s2 = cpu_reg(s, rs + 1);
56
TCGv_i64 t1 = cpu_reg(s, rt);
57
TCGv_i64 t2 = cpu_reg(s, rt + 1);
58
- TCGv_i64 addr = cpu_reg_sp(s, rn);
59
+ TCGv_i64 clean_addr;
60
int memidx = get_mem_index(s);
61
62
if (rn == 31) {
63
gen_check_sp_alignment(s);
64
}
65
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
66
67
if (size == 2) {
68
TCGv_i64 cmp = tcg_temp_new_i64();
69
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
70
tcg_gen_concat32_i64(cmp, s2, s1);
71
}
72
73
- tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
74
+ tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
75
MO_64 | MO_ALIGN | s->be_data);
76
tcg_temp_free_i64(val);
77
78
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
79
if (HAVE_CMPXCHG128) {
80
TCGv_i32 tcg_rs = tcg_const_i32(rs);
81
if (s->be_data == MO_LE) {
82
- gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
83
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs,
84
+ clean_addr, t1, t2);
85
} else {
86
- gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
87
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs,
88
+ clean_addr, t1, t2);
89
}
90
tcg_temp_free_i32(tcg_rs);
91
} else {
92
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
93
TCGv_i64 zero = tcg_const_i64(0);
94
95
/* Load the two words, in memory order. */
96
- tcg_gen_qemu_ld_i64(d1, addr, memidx,
97
+ tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
98
MO_64 | MO_ALIGN_16 | s->be_data);
99
- tcg_gen_addi_i64(a2, addr, 8);
100
- tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
101
+ tcg_gen_addi_i64(a2, clean_addr, 8);
102
+ tcg_gen_qemu_ld_i64(d2, clean_addr, memidx, MO_64 | s->be_data);
103
104
/* Compare the two words, also in memory order. */
105
tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
106
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
107
/* If compare equal, write back new data, else write back old data. */
108
tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
109
tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
110
- tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
111
+ tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
112
tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
113
tcg_temp_free_i64(a2);
114
tcg_temp_free_i64(c1);
115
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
116
int is_lasr = extract32(insn, 15, 1);
117
int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
118
int size = extract32(insn, 30, 2);
119
- TCGv_i64 tcg_addr;
120
+ TCGv_i64 clean_addr;
121
122
switch (o2_L_o1_o0) {
123
case 0x0: /* STXR */
124
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
125
if (is_lasr) {
126
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
127
}
128
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
129
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
130
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
131
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
132
return;
133
134
case 0x4: /* LDXR */
135
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
136
if (rn == 31) {
137
gen_check_sp_alignment(s);
138
}
139
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
140
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
141
s->is_ldex = true;
142
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
143
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
144
if (is_lasr) {
145
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
146
}
147
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
148
gen_check_sp_alignment(s);
149
}
150
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
151
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
152
- do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
153
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
154
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
155
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
156
return;
157
158
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
159
if (rn == 31) {
160
gen_check_sp_alignment(s);
161
}
162
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
163
- do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
164
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
165
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
166
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
167
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
168
return;
169
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
170
if (is_lasr) {
171
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
172
}
173
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
174
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
175
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
176
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
177
return;
178
}
179
if (rt2 == 31
180
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
181
if (rn == 31) {
182
gen_check_sp_alignment(s);
183
}
184
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
185
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
186
s->is_ldex = true;
187
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
188
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
189
if (is_lasr) {
190
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
191
}
192
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
193
int opc = extract32(insn, 30, 2);
194
bool is_signed = false;
195
int size = 2;
196
- TCGv_i64 tcg_rt, tcg_addr;
197
+ TCGv_i64 tcg_rt, clean_addr;
198
199
if (is_vector) {
200
if (opc == 3) {
201
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
202
203
tcg_rt = cpu_reg(s, rt);
204
205
- tcg_addr = tcg_const_i64((s->pc - 4) + imm);
206
+ clean_addr = tcg_const_i64((s->pc - 4) + imm);
207
if (is_vector) {
208
- do_fp_ld(s, rt, tcg_addr, size);
209
+ do_fp_ld(s, rt, clean_addr, size);
210
} else {
211
/* Only unsigned 32bit loads target 32bit registers. */
212
bool iss_sf = opc != 0;
213
214
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
215
+ do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
216
true, rt, iss_sf, false);
217
}
218
- tcg_temp_free_i64(tcg_addr);
219
+ tcg_temp_free_i64(clean_addr);
220
}
221
222
/*
223
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
224
bool postindex = false;
225
bool wback = false;
226
227
- TCGv_i64 tcg_addr; /* calculated address */
228
+ TCGv_i64 clean_addr, dirty_addr;
229
+
230
int size;
231
232
if (opc == 3) {
233
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
234
gen_check_sp_alignment(s);
235
}
236
237
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
238
-
24
-
239
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
25
- /*
240
if (!postindex) {
26
- * The two TBI bits differ.
241
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
27
- * If tbi0, then !tbi1: only use the extension if positive.
242
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
28
- * if !tbi0, then tbi1: only use the extension if negative.
243
}
29
- */
244
+ clean_addr = clean_data_tbi(s, dirty_addr);
30
- tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
245
31
- dst, dst, tcg_zero, dst, src);
246
if (is_vector) {
32
- tcg_temp_free_i64(tcg_zero);
247
if (is_load) {
33
+ switch (tbi) {
248
- do_fp_ld(s, rt, tcg_addr, size);
34
+ case 1:
249
+ do_fp_ld(s, rt, clean_addr, size);
35
+ /* tbi0 but !tbi1: only use the extension if positive */
250
} else {
36
+ tcg_gen_and_i64(dst, dst, src);
251
- do_fp_st(s, rt, tcg_addr, size);
37
+ break;
252
+ do_fp_st(s, rt, clean_addr, size);
38
+ case 2:
253
}
39
+ /* !tbi0 but tbi1: only use the extension if negative */
254
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
40
+ tcg_gen_or_i64(dst, dst, src);
255
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
41
+ break;
256
if (is_load) {
42
+ case 3:
257
- do_fp_ld(s, rt2, tcg_addr, size);
43
+ /* tbi0 and tbi1: always use the extension */
258
+ do_fp_ld(s, rt2, clean_addr, size);
44
+ break;
259
} else {
45
+ default:
260
- do_fp_st(s, rt2, tcg_addr, size);
46
+ g_assert_not_reached();
261
+ do_fp_st(s, rt2, clean_addr, size);
262
}
263
} else {
264
TCGv_i64 tcg_rt = cpu_reg(s, rt);
265
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
266
/* Do not modify tcg_rt before recognizing any exception
267
* from the second load.
268
*/
269
- do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
270
+ do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
271
false, 0, false, false);
272
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
273
- do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
274
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
275
+ do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
276
false, 0, false, false);
277
278
tcg_gen_mov_i64(tcg_rt, tmp);
279
tcg_temp_free_i64(tmp);
280
} else {
281
- do_gpr_st(s, tcg_rt, tcg_addr, size,
282
+ do_gpr_st(s, tcg_rt, clean_addr, size,
283
false, 0, false, false);
284
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
285
- do_gpr_st(s, tcg_rt2, tcg_addr, size,
286
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
287
+ do_gpr_st(s, tcg_rt2, clean_addr, size,
288
false, 0, false, false);
289
}
47
}
290
}
48
}
291
292
if (wback) {
293
if (postindex) {
294
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
295
- } else {
296
- tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
297
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
298
}
299
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
300
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
301
}
302
}
49
}
303
304
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
305
bool post_index;
306
bool writeback;
307
308
- TCGv_i64 tcg_addr;
309
+ TCGv_i64 clean_addr, dirty_addr;
310
311
if (is_vector) {
312
size |= (opc & 2) << 1;
313
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
314
if (rn == 31) {
315
gen_check_sp_alignment(s);
316
}
317
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
318
319
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
320
if (!post_index) {
321
- tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
322
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
323
}
324
+ clean_addr = clean_data_tbi(s, dirty_addr);
325
326
if (is_vector) {
327
if (is_store) {
328
- do_fp_st(s, rt, tcg_addr, size);
329
+ do_fp_st(s, rt, clean_addr, size);
330
} else {
331
- do_fp_ld(s, rt, tcg_addr, size);
332
+ do_fp_ld(s, rt, clean_addr, size);
333
}
334
} else {
335
TCGv_i64 tcg_rt = cpu_reg(s, rt);
336
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
337
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
338
339
if (is_store) {
340
- do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
341
+ do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
342
iss_valid, rt, iss_sf, false);
343
} else {
344
- do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
345
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
346
is_signed, is_extended, memidx,
347
iss_valid, rt, iss_sf, false);
348
}
349
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
350
if (writeback) {
351
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
352
if (post_index) {
353
- tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
354
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
355
}
356
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
357
+ tcg_gen_mov_i64(tcg_rn, dirty_addr);
358
}
359
}
360
361
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
362
bool is_store = false;
363
bool is_extended = false;
364
365
- TCGv_i64 tcg_rm;
366
- TCGv_i64 tcg_addr;
367
+ TCGv_i64 tcg_rm, clean_addr, dirty_addr;
368
369
if (extract32(opt, 1, 1) == 0) {
370
unallocated_encoding(s);
371
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
372
if (rn == 31) {
373
gen_check_sp_alignment(s);
374
}
375
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
376
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
377
378
tcg_rm = read_cpu_reg(s, rm, 1);
379
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
380
381
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
382
+ tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
383
+ clean_addr = clean_data_tbi(s, dirty_addr);
384
385
if (is_vector) {
386
if (is_store) {
387
- do_fp_st(s, rt, tcg_addr, size);
388
+ do_fp_st(s, rt, clean_addr, size);
389
} else {
390
- do_fp_ld(s, rt, tcg_addr, size);
391
+ do_fp_ld(s, rt, clean_addr, size);
392
}
393
} else {
394
TCGv_i64 tcg_rt = cpu_reg(s, rt);
395
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
396
if (is_store) {
397
- do_gpr_st(s, tcg_rt, tcg_addr, size,
398
+ do_gpr_st(s, tcg_rt, clean_addr, size,
399
true, rt, iss_sf, false);
400
} else {
401
- do_gpr_ld(s, tcg_rt, tcg_addr, size,
402
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
403
is_signed, is_extended,
404
true, rt, iss_sf, false);
405
}
406
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
407
unsigned int imm12 = extract32(insn, 10, 12);
408
unsigned int offset;
409
410
- TCGv_i64 tcg_addr;
411
+ TCGv_i64 clean_addr, dirty_addr;
412
413
bool is_store;
414
bool is_signed = false;
415
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
416
if (rn == 31) {
417
gen_check_sp_alignment(s);
418
}
419
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
420
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
421
offset = imm12 << size;
422
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
423
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
424
+ clean_addr = clean_data_tbi(s, dirty_addr);
425
426
if (is_vector) {
427
if (is_store) {
428
- do_fp_st(s, rt, tcg_addr, size);
429
+ do_fp_st(s, rt, clean_addr, size);
430
} else {
431
- do_fp_ld(s, rt, tcg_addr, size);
432
+ do_fp_ld(s, rt, clean_addr, size);
433
}
434
} else {
435
TCGv_i64 tcg_rt = cpu_reg(s, rt);
436
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
437
if (is_store) {
438
- do_gpr_st(s, tcg_rt, tcg_addr, size,
439
+ do_gpr_st(s, tcg_rt, clean_addr, size,
440
true, rt, iss_sf, false);
441
} else {
442
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
443
+ do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
444
true, rt, iss_sf, false);
445
}
446
}
447
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
448
int rs = extract32(insn, 16, 5);
449
int rn = extract32(insn, 5, 5);
450
int o3_opc = extract32(insn, 12, 4);
451
- TCGv_i64 tcg_rn, tcg_rs;
452
+ TCGv_i64 tcg_rs, clean_addr;
453
AtomicThreeOpFn *fn;
454
455
if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
456
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
457
if (rn == 31) {
458
gen_check_sp_alignment(s);
459
}
460
- tcg_rn = cpu_reg_sp(s, rn);
461
+ clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
462
tcg_rs = read_cpu_reg(s, rs, true);
463
464
if (o3_opc == 1) { /* LDCLR */
465
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
466
/* The tcg atomic primitives are all full barriers. Therefore we
467
* can ignore the Acquire and Release bits of this instruction.
468
*/
469
- fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
470
+ fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
471
s->be_data | size | MO_ALIGN);
472
}
473
474
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
475
bool is_wback = extract32(insn, 11, 1);
476
bool use_key_a = !extract32(insn, 23, 1);
477
int offset;
478
- TCGv_i64 tcg_addr, tcg_rt;
479
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
480
481
if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
482
unallocated_encoding(s);
483
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
484
if (rn == 31) {
485
gen_check_sp_alignment(s);
486
}
487
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
488
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
489
490
if (s->pauth_active) {
491
if (use_key_a) {
492
- gen_helper_autda(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
493
+ gen_helper_autda(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
494
} else {
495
- gen_helper_autdb(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
496
+ gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
497
}
498
}
499
500
/* Form the 10-bit signed, scaled offset. */
501
offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
502
offset = sextract32(offset << size, 0, 10 + size);
503
- tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
504
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
505
+
506
+ /* Note that "clean" and "dirty" here refer to TBI not PAC. */
507
+ clean_addr = clean_data_tbi(s, dirty_addr);
508
509
tcg_rt = cpu_reg(s, rt);
510
-
511
- do_gpr_ld(s, tcg_rt, tcg_addr, size, /* is_signed */ false,
512
+ do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
513
/* extend */ false, /* iss_valid */ !is_wback,
514
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
515
516
if (is_wback) {
517
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
518
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
519
}
520
}
521
522
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
523
bool is_store = !extract32(insn, 22, 1);
524
bool is_postidx = extract32(insn, 23, 1);
525
bool is_q = extract32(insn, 30, 1);
526
- TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
527
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
528
TCGMemOp endian = s->be_data;
529
530
int ebytes; /* bytes per element */
531
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
532
elements = (is_q ? 16 : 8) / ebytes;
533
534
tcg_rn = cpu_reg_sp(s, rn);
535
- tcg_addr = tcg_temp_new_i64();
536
- tcg_gen_mov_i64(tcg_addr, tcg_rn);
537
+ clean_addr = clean_data_tbi(s, tcg_rn);
538
tcg_ebytes = tcg_const_i64(ebytes);
539
540
for (r = 0; r < rpt; r++) {
541
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
542
for (xs = 0; xs < selem; xs++) {
543
int tt = (rt + r + xs) % 32;
544
if (is_store) {
545
- do_vec_st(s, tt, e, tcg_addr, size, endian);
546
+ do_vec_st(s, tt, e, clean_addr, size, endian);
547
} else {
548
- do_vec_ld(s, tt, e, tcg_addr, size, endian);
549
+ do_vec_ld(s, tt, e, clean_addr, size, endian);
550
}
551
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
552
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
553
}
554
}
555
}
556
+ tcg_temp_free_i64(tcg_ebytes);
557
558
if (!is_store) {
559
/* For non-quad operations, setting a slice of the low
560
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
561
562
if (is_postidx) {
563
if (rm == 31) {
564
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
565
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
566
} else {
567
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
568
}
569
}
570
- tcg_temp_free_i64(tcg_ebytes);
571
- tcg_temp_free_i64(tcg_addr);
572
}
573
574
/* AdvSIMD load/store single structure
575
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
576
bool replicate = false;
577
int index = is_q << 3 | S << 2 | size;
578
int ebytes, xs;
579
- TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
580
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
581
582
if (extract32(insn, 31, 1)) {
583
unallocated_encoding(s);
584
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
585
}
586
587
tcg_rn = cpu_reg_sp(s, rn);
588
- tcg_addr = tcg_temp_new_i64();
589
- tcg_gen_mov_i64(tcg_addr, tcg_rn);
590
+ clean_addr = clean_data_tbi(s, tcg_rn);
591
tcg_ebytes = tcg_const_i64(ebytes);
592
593
for (xs = 0; xs < selem; xs++) {
594
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
595
/* Load and replicate to all elements */
596
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
597
598
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
599
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
600
get_mem_index(s), s->be_data + scale);
601
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
602
(is_q + 1) * 8, vec_full_reg_size(s),
603
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
604
} else {
605
/* Load/store one element per register */
606
if (is_load) {
607
- do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
608
+ do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
609
} else {
610
- do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
611
+ do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
612
}
613
}
614
- tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
615
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
616
rt = (rt + 1) % 32;
617
}
618
+ tcg_temp_free_i64(tcg_ebytes);
619
620
if (is_postidx) {
621
if (rm == 31) {
622
- tcg_gen_mov_i64(tcg_rn, tcg_addr);
623
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
624
} else {
625
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
626
}
627
}
628
- tcg_temp_free_i64(tcg_ebytes);
629
- tcg_temp_free_i64(tcg_addr);
630
}
631
632
/* Loads and stores */
633
--
50
--
634
2.20.1
51
2.20.1
635
52
636
53
diff view generated by jsdifflib
1
Enables, but does not turn on, TBI for CONFIG_USER_ONLY.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
We were fudging TBI1 enabled to speed up the generated code.
4
Now that we've improved the code generation, remove this.
5
Also, tidy the comment to reflect the current code.
6
7
The pauth test was testing a kernel address (-1) and making
8
incorrect assumptions about TBI1; stick to userland addresses.
2
9
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190204132126.3255-4-richard.henderson@linaro.org
12
Message-id: 20210210000223.884088-23-richard.henderson@linaro.org
6
[PMM: adjusted #ifdeffery to placate clang, which otherwise complains
7
about static functions that are unused in the CONFIG_USER_ONLY build]
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
14
---
10
target/arm/internals.h | 21 --------------------
15
target/arm/internals.h | 4 ++--
11
target/arm/helper.c | 45 ++++++++++++++++++++++--------------------
16
target/arm/cpu.c | 10 +++-------
12
2 files changed, 24 insertions(+), 42 deletions(-)
17
tests/tcg/aarch64/pauth-2.c | 1 -
18
3 files changed, 5 insertions(+), 10 deletions(-)
13
19
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
20
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/internals.h
22
--- a/target/arm/internals.h
17
+++ b/target/arm/internals.h
23
+++ b/target/arm/internals.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
24
@@ -XXX,XX +XXX,XX @@ static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
19
bool using64k : 1;
25
*/
20
} ARMVAParameters;
26
static inline uint64_t useronly_clean_ptr(uint64_t ptr)
21
27
{
22
-#ifdef CONFIG_USER_ONLY
28
- /* TBI is known to be enabled. */
23
-static inline ARMVAParameters aa64_va_parameters_both(CPUARMState *env,
29
#ifdef CONFIG_USER_ONLY
24
- uint64_t va,
30
- ptr = sextract64(ptr, 0, 56);
25
- ARMMMUIdx mmu_idx)
31
+ /* TBI0 is known to be enabled, while TBI1 is disabled. */
26
-{
32
+ ptr &= sextract64(ptr, 0, 56);
27
- return (ARMVAParameters) {
28
- /* 48-bit address space */
29
- .tsz = 16,
30
- /* We can't handle tagged addresses properly in user-only mode */
31
- .tbi = false,
32
- };
33
-}
34
-
35
-static inline ARMVAParameters aa64_va_parameters(CPUARMState *env,
36
- uint64_t va,
37
- ARMMMUIdx mmu_idx, bool data)
38
-{
39
- return aa64_va_parameters_both(env, va, mmu_idx);
40
-}
41
-#else
42
ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
43
ARMMMUIdx mmu_idx);
44
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
45
ARMMMUIdx mmu_idx, bool data);
46
-#endif
47
48
#endif
33
#endif
49
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
return ptr;
35
}
36
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
50
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/helper.c
38
--- a/target/arm/cpu.c
52
+++ b/target/arm/helper.c
39
+++ b/target/arm/cpu.c
53
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rbit)(uint32_t x)
40
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
54
return revbit32(x);
41
env->vfp.zcr_el[1] = MIN(cpu->sve_max_vq - 1, 3);
42
}
43
/*
44
- * Enable TBI0 and TBI1. While the real kernel only enables TBI0,
45
- * turning on both here will produce smaller code and otherwise
46
- * make no difference to the user-level emulation.
47
- *
48
- * In sve_probe_page, we assume that this is set.
49
- * Do not modify this without other changes.
50
+ * Enable TBI0 but not TBI1.
51
+ * Note that this must match useronly_clean_ptr.
52
*/
53
- env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
54
+ env->cp15.tcr_el[1].raw_tcr = (1ULL << 37);
55
#else
56
/* Reset into the highest available EL */
57
if (arm_feature(env, ARM_FEATURE_EL3)) {
58
diff --git a/tests/tcg/aarch64/pauth-2.c b/tests/tcg/aarch64/pauth-2.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/tests/tcg/aarch64/pauth-2.c
61
+++ b/tests/tcg/aarch64/pauth-2.c
62
@@ -XXX,XX +XXX,XX @@ void do_test(uint64_t value)
63
int main()
64
{
65
do_test(0);
66
- do_test(-1);
67
do_test(0xda004acedeadbeefull);
68
return 0;
55
}
69
}
56
57
-#if defined(CONFIG_USER_ONLY)
58
+#ifdef CONFIG_USER_ONLY
59
60
/* These should probably raise undefined insn exceptions. */
61
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
62
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
63
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
64
}
65
}
66
+#endif /* !CONFIG_USER_ONLY */
67
68
/* Return the exception level which controls this address translation regime */
69
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
70
@@ -XXX,XX +XXX,XX @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
71
}
72
}
73
74
+#ifndef CONFIG_USER_ONLY
75
+
76
/* Return the SCTLR value which controls this address translation regime */
77
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
78
{
79
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_big_endian(CPUARMState *env,
80
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
81
}
82
83
+/* Return the TTBR associated with this translation regime */
84
+static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
85
+ int ttbrn)
86
+{
87
+ if (mmu_idx == ARMMMUIdx_S2NS) {
88
+ return env->cp15.vttbr_el2;
89
+ }
90
+ if (ttbrn == 0) {
91
+ return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
92
+ } else {
93
+ return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
94
+ }
95
+}
96
+
97
+#endif /* !CONFIG_USER_ONLY */
98
+
99
/* Return the TCR controlling this translation regime */
100
static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
101
{
102
@@ -XXX,XX +XXX,XX @@ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
103
return mmu_idx;
104
}
105
106
-/* Return the TTBR associated with this translation regime */
107
-static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
108
- int ttbrn)
109
-{
110
- if (mmu_idx == ARMMMUIdx_S2NS) {
111
- return env->cp15.vttbr_el2;
112
- }
113
- if (ttbrn == 0) {
114
- return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
115
- } else {
116
- return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
117
- }
118
-}
119
-
120
/* Return true if the translation regime is using LPAE format page tables */
121
static inline bool regime_using_lpae_format(CPUARMState *env,
122
ARMMMUIdx mmu_idx)
123
@@ -XXX,XX +XXX,XX @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
124
return regime_using_lpae_format(env, mmu_idx);
125
}
126
127
+#ifndef CONFIG_USER_ONLY
128
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
129
{
130
switch (mmu_idx) {
131
@@ -XXX,XX +XXX,XX @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
132
133
return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
134
}
135
+#endif /* !CONFIG_USER_ONLY */
136
137
ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
138
ARMMMUIdx mmu_idx)
139
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
140
return ret;
141
}
142
143
+#ifndef CONFIG_USER_ONLY
144
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
145
ARMMMUIdx mmu_idx)
146
{
147
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
148
*pc = env->pc;
149
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
150
151
-#ifndef CONFIG_USER_ONLY
152
- /*
153
- * Get control bits for tagged addresses. Note that the
154
- * translator only uses this for instruction addresses.
155
- */
156
+ /* Get control bits for tagged addresses. */
157
{
158
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
159
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
160
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
161
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
162
flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
163
}
164
-#endif
165
166
if (cpu_isar_feature(aa64_sve, cpu)) {
167
int sve_el = sve_exception_el(env, current_el);
168
--
70
--
169
2.20.1
71
2.20.1
170
72
171
73
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These prctl fields are required for the function of MTE.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210210000223.884088-24-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/aarch64/target_syscall.h | 9 ++++++
11
linux-user/syscall.c | 43 +++++++++++++++++++++++++++++
12
2 files changed, 52 insertions(+)
13
14
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/aarch64/target_syscall.h
17
+++ b/linux-user/aarch64/target_syscall.h
18
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
19
#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
20
#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
21
# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
22
+/* MTE tag check fault modes */
23
+# define TARGET_PR_MTE_TCF_SHIFT 1
24
+# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
25
+# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
26
+# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
27
+# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
28
+/* MTE tag inclusion mask */
29
+# define TARGET_PR_MTE_TAG_SHIFT 3
30
+# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
31
32
#endif /* AARCH64_TARGET_SYSCALL_H */
33
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/linux-user/syscall.c
36
+++ b/linux-user/syscall.c
37
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
38
{
39
abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
40
CPUARMState *env = cpu_env;
41
+ ARMCPU *cpu = env_archcpu(env);
42
+
43
+ if (cpu_isar_feature(aa64_mte, cpu)) {
44
+ valid_mask |= TARGET_PR_MTE_TCF_MASK;
45
+ valid_mask |= TARGET_PR_MTE_TAG_MASK;
46
+ }
47
48
if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
49
return -TARGET_EINVAL;
50
}
51
env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
52
+
53
+ if (cpu_isar_feature(aa64_mte, cpu)) {
54
+ switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
55
+ case TARGET_PR_MTE_TCF_NONE:
56
+ case TARGET_PR_MTE_TCF_SYNC:
57
+ case TARGET_PR_MTE_TCF_ASYNC:
58
+ break;
59
+ default:
60
+ return -EINVAL;
61
+ }
62
+
63
+ /*
64
+ * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
65
+ * Note that the syscall values are consistent with hw.
66
+ */
67
+ env->cp15.sctlr_el[1] =
68
+ deposit64(env->cp15.sctlr_el[1], 38, 2,
69
+ arg2 >> TARGET_PR_MTE_TCF_SHIFT);
70
+
71
+ /*
72
+ * Write PR_MTE_TAG to GCR_EL1[Exclude].
73
+ * Note that the syscall uses an include mask,
74
+ * and hardware uses an exclude mask -- invert.
75
+ */
76
+ env->cp15.gcr_el1 =
77
+ deposit64(env->cp15.gcr_el1, 0, 16,
78
+ ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
79
+ arm_rebuild_hflags(env);
80
+ }
81
return 0;
82
}
83
case TARGET_PR_GET_TAGGED_ADDR_CTRL:
84
{
85
abi_long ret = 0;
86
CPUARMState *env = cpu_env;
87
+ ARMCPU *cpu = env_archcpu(env);
88
89
if (arg2 || arg3 || arg4 || arg5) {
90
return -TARGET_EINVAL;
91
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
92
if (env->tagged_addr_enable) {
93
ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
94
}
95
+ if (cpu_isar_feature(aa64_mte, cpu)) {
96
+ /* See above. */
97
+ ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
98
+ << TARGET_PR_MTE_TCF_SHIFT);
99
+ ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
100
+ ~env->cp15.gcr_el1);
101
+ }
102
return ret;
103
}
104
#endif /* AARCH64 */
105
--
106
2.20.1
107
108
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Split out gen_top_byte_ignore in preparation of handling these
3
Remember the PROT_MTE bit as PAGE_MTE/PAGE_TARGET_2.
4
data accesses; the new tbflags field is not yet honored.
4
Otherwise this does not yet have effect.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190204132126.3255-2-richard.henderson@linaro.org
8
Message-id: 20210210000223.884088-25-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/cpu.h | 1 +
11
include/exec/cpu-all.h | 1 +
12
target/arm/translate.h | 3 +-
12
linux-user/syscall_defs.h | 1 +
13
target/arm/helper.c | 1 +
13
target/arm/cpu.h | 1 +
14
target/arm/translate-a64.c | 72 +++++++++++++++++++-------------------
14
linux-user/mmap.c | 22 ++++++++++++++--------
15
4 files changed, 40 insertions(+), 37 deletions(-)
15
4 files changed, 17 insertions(+), 8 deletions(-)
16
16
17
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-all.h
20
+++ b/include/exec/cpu-all.h
21
@@ -XXX,XX +XXX,XX @@ extern intptr_t qemu_host_page_mask;
22
#endif
23
/* Target-specific bits that will be used via page_get_flags(). */
24
#define PAGE_TARGET_1 0x0080
25
+#define PAGE_TARGET_2 0x0200
26
27
#if defined(CONFIG_USER_ONLY)
28
void page_dump(FILE *f);
29
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/linux-user/syscall_defs.h
32
+++ b/linux-user/syscall_defs.h
33
@@ -XXX,XX +XXX,XX @@ struct target_winsize {
34
35
#ifdef TARGET_AARCH64
36
#define TARGET_PROT_BTI 0x10
37
+#define TARGET_PROT_MTE 0x20
38
#endif
39
40
/* Common */
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
41
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
43
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
44
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
45
@@ -XXX,XX +XXX,XX @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
22
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
46
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
23
FIELD(TBFLAG_A64, BT, 9, 1)
47
*/
24
FIELD(TBFLAG_A64, BTYPE, 10, 2)
48
#define PAGE_BTI PAGE_TARGET_1
25
+FIELD(TBFLAG_A64, TBID, 12, 2)
49
+#define PAGE_MTE PAGE_TARGET_2
26
50
27
static inline bool bswap_code(bool sctlr_b)
51
#ifdef TARGET_TAGGED_ADDRESSES
28
{
52
/**
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
53
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
30
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
55
--- a/linux-user/mmap.c
32
+++ b/target/arm/translate.h
56
+++ b/linux-user/mmap.c
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
57
@@ -XXX,XX +XXX,XX @@ static int validate_prot_to_pageflags(int *host_prot, int prot)
34
int user;
58
| (prot & PROT_EXEC ? PROT_READ : 0);
59
60
#ifdef TARGET_AARCH64
61
- /*
62
- * The PROT_BTI bit is only accepted if the cpu supports the feature.
63
- * Since this is the unusual case, don't bother checking unless
64
- * the bit has been requested. If set and valid, record the bit
65
- * within QEMU's page_flags.
66
- */
67
- if (prot & TARGET_PROT_BTI) {
68
+ {
69
ARMCPU *cpu = ARM_CPU(thread_cpu);
70
- if (cpu_isar_feature(aa64_bti, cpu)) {
71
+
72
+ /*
73
+ * The PROT_BTI bit is only accepted if the cpu supports the feature.
74
+ * Since this is the unusual case, don't bother checking unless
75
+ * the bit has been requested. If set and valid, record the bit
76
+ * within QEMU's page_flags.
77
+ */
78
+ if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
79
valid |= TARGET_PROT_BTI;
80
page_flags |= PAGE_BTI;
81
}
82
+ /* Similarly for the PROT_MTE bit. */
83
+ if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
84
+ valid |= TARGET_PROT_MTE;
85
+ page_flags |= PAGE_MTE;
86
+ }
87
}
35
#endif
88
#endif
36
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
89
37
- uint8_t tbii; /* TBI1|TBI0 for EL0/1 or TBI for EL2/3 */
38
+ uint8_t tbii; /* TBI1|TBI0 for insns */
39
+ uint8_t tbid; /* TBI1|TBI0 for data */
40
bool ns; /* Use non-secure CPREG bank on access */
41
int fp_excp_el; /* FP exception EL or 0 if enabled */
42
int sve_excp_el; /* SVE exception EL or 0 if enabled */
43
diff --git a/target/arm/helper.c b/target/arm/helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/helper.c
46
+++ b/target/arm/helper.c
47
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
48
}
49
50
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
51
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
52
}
53
#endif
54
55
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.c
58
+++ b/target/arm/translate-a64.c
59
@@ -XXX,XX +XXX,XX @@ void gen_a64_set_pc_im(uint64_t val)
60
tcg_gen_movi_i64(cpu_pc, val);
61
}
62
63
-/* Load the PC from a generic TCG variable.
64
+/*
65
+ * Handle Top Byte Ignore (TBI) bits.
66
*
67
- * If address tagging is enabled via the TCR TBI bits, then loading
68
- * an address into the PC will clear out any tag in it:
69
+ * If address tagging is enabled via the TCR TBI bits:
70
* + for EL2 and EL3 there is only one TBI bit, and if it is set
71
* then the address is zero-extended, clearing bits [63:56]
72
* + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
73
@@ -XXX,XX +XXX,XX @@ void gen_a64_set_pc_im(uint64_t val)
74
* If the appropriate TBI bit is set for the address then
75
* the address is sign-extended from bit 55 into bits [63:56]
76
*
77
- * We can avoid doing this for relative-branches, because the
78
- * PC + offset can never overflow into the tag bits (assuming
79
- * that virtual addresses are less than 56 bits wide, as they
80
- * are currently), but we must handle it for branch-to-register.
81
+ * Here We have concatenated TBI{1,0} into tbi.
82
*/
83
-static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
84
+static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
85
+ TCGv_i64 src, int tbi)
86
{
87
- /* Note that TBII is TBI1:TBI0. */
88
- int tbi = s->tbii;
89
-
90
- if (s->current_el <= 1) {
91
- if (tbi != 0) {
92
- /* Sign-extend from bit 55. */
93
- tcg_gen_sextract_i64(cpu_pc, src, 0, 56);
94
-
95
- if (tbi != 3) {
96
- TCGv_i64 tcg_zero = tcg_const_i64(0);
97
-
98
- /*
99
- * The two TBI bits differ.
100
- * If tbi0, then !tbi1: only use the extension if positive.
101
- * if !tbi0, then tbi1: only use the extension if negative.
102
- */
103
- tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
104
- cpu_pc, cpu_pc, tcg_zero, cpu_pc, src);
105
- tcg_temp_free_i64(tcg_zero);
106
- }
107
- return;
108
- }
109
+ if (tbi == 0) {
110
+ /* Load unmodified address */
111
+ tcg_gen_mov_i64(dst, src);
112
+ } else if (s->current_el >= 2) {
113
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
114
+ /* Force tag byte to all zero */
115
+ tcg_gen_extract_i64(dst, src, 0, 56);
116
} else {
117
- if (tbi != 0) {
118
- /* Force tag byte to all zero */
119
- tcg_gen_extract_i64(cpu_pc, src, 0, 56);
120
- return;
121
+ /* Sign-extend from bit 55. */
122
+ tcg_gen_sextract_i64(dst, src, 0, 56);
123
+
124
+ if (tbi != 3) {
125
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
126
+
127
+ /*
128
+ * The two TBI bits differ.
129
+ * If tbi0, then !tbi1: only use the extension if positive.
130
+ * if !tbi0, then tbi1: only use the extension if negative.
131
+ */
132
+ tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
133
+ dst, dst, tcg_zero, dst, src);
134
+ tcg_temp_free_i64(tcg_zero);
135
}
136
}
137
+}
138
139
- /* Load unmodified address */
140
- tcg_gen_mov_i64(cpu_pc, src);
141
+static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
142
+{
143
+ /*
144
+ * If address tagging is enabled for instructions via the TCR TBI bits,
145
+ * then loading an address into the PC will clear out any tag.
146
+ */
147
+ gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
148
}
149
150
typedef struct DisasCompare64 {
151
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
152
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
153
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
154
dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
155
+ dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
156
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
157
#if !defined(CONFIG_USER_ONLY)
158
dc->user = (dc->current_el == 0);
159
--
90
--
160
2.20.1
91
2.20.1
161
92
162
93
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The branch target exception for guarded pages has high priority,
3
Move everything related to syndromes to a new file,
4
and only 8 instructions are valid for that case. Perform this
4
which can be shared with linux-user.
5
check before doing any other decode.
6
7
Clear BTYPE after all insns that neither set BTYPE nor exit via
8
exception (DISAS_NORETURN).
9
10
Not yet handled are insns that exit via DISAS_NORETURN for some
11
other reason, like direct branches.
12
5
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20190128223118.5255-7-richard.henderson@linaro.org
8
Message-id: 20210210000223.884088-26-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
10
---
18
target/arm/internals.h | 6 ++
11
target/arm/internals.h | 245 +-----------------------------------
19
target/arm/translate.h | 9 ++-
12
target/arm/syndrome.h | 273 +++++++++++++++++++++++++++++++++++++++++
20
target/arm/translate-a64.c | 139 +++++++++++++++++++++++++++++++++++++
13
2 files changed, 274 insertions(+), 244 deletions(-)
21
3 files changed, 152 insertions(+), 2 deletions(-)
14
create mode 100644 target/arm/syndrome.h
22
15
23
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
24
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/internals.h
18
--- a/target/arm/internals.h
26
+++ b/target/arm/internals.h
19
+++ b/target/arm/internals.h
27
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
20
@@ -XXX,XX +XXX,XX @@
28
EC_FPIDTRAP = 0x08,
21
#define TARGET_ARM_INTERNALS_H
29
EC_PACTRAP = 0x09,
22
30
EC_CP14RRTTRAP = 0x0c,
23
#include "hw/registerfields.h"
24
+#include "syndrome.h"
25
26
/* register banks for CPU modes */
27
#define BANK_USRSYS 0
28
@@ -XXX,XX +XXX,XX @@ static inline bool extended_addresses_enabled(CPUARMState *env)
29
(arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
30
}
31
32
-/* Valid Syndrome Register EC field values */
33
-enum arm_exception_class {
34
- EC_UNCATEGORIZED = 0x00,
35
- EC_WFX_TRAP = 0x01,
36
- EC_CP15RTTRAP = 0x03,
37
- EC_CP15RRTTRAP = 0x04,
38
- EC_CP14RTTRAP = 0x05,
39
- EC_CP14DTTRAP = 0x06,
40
- EC_ADVSIMDFPACCESSTRAP = 0x07,
41
- EC_FPIDTRAP = 0x08,
42
- EC_PACTRAP = 0x09,
43
- EC_CP14RRTTRAP = 0x0c,
44
- EC_BTITRAP = 0x0d,
45
- EC_ILLEGALSTATE = 0x0e,
46
- EC_AA32_SVC = 0x11,
47
- EC_AA32_HVC = 0x12,
48
- EC_AA32_SMC = 0x13,
49
- EC_AA64_SVC = 0x15,
50
- EC_AA64_HVC = 0x16,
51
- EC_AA64_SMC = 0x17,
52
- EC_SYSTEMREGISTERTRAP = 0x18,
53
- EC_SVEACCESSTRAP = 0x19,
54
- EC_INSNABORT = 0x20,
55
- EC_INSNABORT_SAME_EL = 0x21,
56
- EC_PCALIGNMENT = 0x22,
57
- EC_DATAABORT = 0x24,
58
- EC_DATAABORT_SAME_EL = 0x25,
59
- EC_SPALIGNMENT = 0x26,
60
- EC_AA32_FPTRAP = 0x28,
61
- EC_AA64_FPTRAP = 0x2c,
62
- EC_SERROR = 0x2f,
63
- EC_BREAKPOINT = 0x30,
64
- EC_BREAKPOINT_SAME_EL = 0x31,
65
- EC_SOFTWARESTEP = 0x32,
66
- EC_SOFTWARESTEP_SAME_EL = 0x33,
67
- EC_WATCHPOINT = 0x34,
68
- EC_WATCHPOINT_SAME_EL = 0x35,
69
- EC_AA32_BKPT = 0x38,
70
- EC_VECTORCATCH = 0x3a,
71
- EC_AA64_BKPT = 0x3c,
72
-};
73
-
74
-#define ARM_EL_EC_SHIFT 26
75
-#define ARM_EL_IL_SHIFT 25
76
-#define ARM_EL_ISV_SHIFT 24
77
-#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
78
-#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
79
-
80
-static inline uint32_t syn_get_ec(uint32_t syn)
81
-{
82
- return syn >> ARM_EL_EC_SHIFT;
83
-}
84
-
85
-/* Utility functions for constructing various kinds of syndrome value.
86
- * Note that in general we follow the AArch64 syndrome values; in a
87
- * few cases the value in HSR for exceptions taken to AArch32 Hyp
88
- * mode differs slightly, and we fix this up when populating HSR in
89
- * arm_cpu_do_interrupt_aarch32_hyp().
90
- * The exception is FP/SIMD access traps -- these report extra information
91
- * when taking an exception to AArch32. For those we include the extra coproc
92
- * and TA fields, and mask them out when taking the exception to AArch64.
93
- */
94
-static inline uint32_t syn_uncategorized(void)
95
-{
96
- return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
97
-}
98
-
99
-static inline uint32_t syn_aa64_svc(uint32_t imm16)
100
-{
101
- return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
102
-}
103
-
104
-static inline uint32_t syn_aa64_hvc(uint32_t imm16)
105
-{
106
- return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
107
-}
108
-
109
-static inline uint32_t syn_aa64_smc(uint32_t imm16)
110
-{
111
- return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
112
-}
113
-
114
-static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
115
-{
116
- return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
117
- | (is_16bit ? 0 : ARM_EL_IL);
118
-}
119
-
120
-static inline uint32_t syn_aa32_hvc(uint32_t imm16)
121
-{
122
- return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
123
-}
124
-
125
-static inline uint32_t syn_aa32_smc(void)
126
-{
127
- return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
128
-}
129
-
130
-static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
131
-{
132
- return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
133
-}
134
-
135
-static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
136
-{
137
- return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
138
- | (is_16bit ? 0 : ARM_EL_IL);
139
-}
140
-
141
-static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
142
- int crn, int crm, int rt,
143
- int isread)
144
-{
145
- return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
146
- | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
147
- | (crm << 1) | isread;
148
-}
149
-
150
-static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
151
- int crn, int crm, int rt, int isread,
152
- bool is_16bit)
153
-{
154
- return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
155
- | (is_16bit ? 0 : ARM_EL_IL)
156
- | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
157
- | (crn << 10) | (rt << 5) | (crm << 1) | isread;
158
-}
159
-
160
-static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
161
- int crn, int crm, int rt, int isread,
162
- bool is_16bit)
163
-{
164
- return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
165
- | (is_16bit ? 0 : ARM_EL_IL)
166
- | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
167
- | (crn << 10) | (rt << 5) | (crm << 1) | isread;
168
-}
169
-
170
-static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
171
- int rt, int rt2, int isread,
172
- bool is_16bit)
173
-{
174
- return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
175
- | (is_16bit ? 0 : ARM_EL_IL)
176
- | (cv << 24) | (cond << 20) | (opc1 << 16)
177
- | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
178
-}
179
-
180
-static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
181
- int rt, int rt2, int isread,
182
- bool is_16bit)
183
-{
184
- return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
185
- | (is_16bit ? 0 : ARM_EL_IL)
186
- | (cv << 24) | (cond << 20) | (opc1 << 16)
187
- | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
188
-}
189
-
190
-static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
191
-{
192
- /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
193
- return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
194
- | (is_16bit ? 0 : ARM_EL_IL)
195
- | (cv << 24) | (cond << 20) | 0xa;
196
-}
197
-
198
-static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
199
-{
200
- /* AArch32 SIMD trap: TA == 1 coproc == 0 */
201
- return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
202
- | (is_16bit ? 0 : ARM_EL_IL)
203
- | (cv << 24) | (cond << 20) | (1 << 5);
204
-}
205
-
206
-static inline uint32_t syn_sve_access_trap(void)
207
-{
208
- return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
209
-}
210
-
211
-static inline uint32_t syn_pactrap(void)
212
-{
213
- return EC_PACTRAP << ARM_EL_EC_SHIFT;
214
-}
215
-
216
-static inline uint32_t syn_btitrap(int btype)
217
-{
218
- return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
219
-}
220
-
221
-static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
222
-{
223
- return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
224
- | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
225
-}
226
-
227
-static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
228
- int ea, int cm, int s1ptw,
229
- int wnr, int fsc)
230
-{
231
- return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
232
- | ARM_EL_IL
233
- | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
234
- | (wnr << 6) | fsc;
235
-}
236
-
237
-static inline uint32_t syn_data_abort_with_iss(int same_el,
238
- int sas, int sse, int srt,
239
- int sf, int ar,
240
- int ea, int cm, int s1ptw,
241
- int wnr, int fsc,
242
- bool is_16bit)
243
-{
244
- return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
245
- | (is_16bit ? 0 : ARM_EL_IL)
246
- | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
247
- | (sf << 15) | (ar << 14)
248
- | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
249
-}
250
-
251
-static inline uint32_t syn_swstep(int same_el, int isv, int ex)
252
-{
253
- return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
254
- | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
255
-}
256
-
257
-static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
258
-{
259
- return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
260
- | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
261
-}
262
-
263
-static inline uint32_t syn_breakpoint(int same_el)
264
-{
265
- return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
266
- | ARM_EL_IL | 0x22;
267
-}
268
-
269
-static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
270
-{
271
- return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
272
- (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
273
- (cv << 24) | (cond << 20) | ti;
274
-}
275
-
276
/* Update a QEMU watchpoint based on the information the guest has set in the
277
* DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
278
*/
279
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
280
new file mode 100644
281
index XXXXXXX..XXXXXXX
282
--- /dev/null
283
+++ b/target/arm/syndrome.h
284
@@ -XXX,XX +XXX,XX @@
285
+/*
286
+ * QEMU ARM CPU -- syndrome functions and types
287
+ *
288
+ * Copyright (c) 2014 Linaro Ltd
289
+ *
290
+ * This program is free software; you can redistribute it and/or
291
+ * modify it under the terms of the GNU General Public License
292
+ * as published by the Free Software Foundation; either version 2
293
+ * of the License, or (at your option) any later version.
294
+ *
295
+ * This program is distributed in the hope that it will be useful,
296
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
297
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
298
+ * GNU General Public License for more details.
299
+ *
300
+ * You should have received a copy of the GNU General Public License
301
+ * along with this program; if not, see
302
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
303
+ *
304
+ * This header defines functions, types, etc which need to be shared
305
+ * between different source files within target/arm/ but which are
306
+ * private to it and not required by the rest of QEMU.
307
+ */
308
+
309
+#ifndef TARGET_ARM_SYNDROME_H
310
+#define TARGET_ARM_SYNDROME_H
311
+
312
+/* Valid Syndrome Register EC field values */
313
+enum arm_exception_class {
314
+ EC_UNCATEGORIZED = 0x00,
315
+ EC_WFX_TRAP = 0x01,
316
+ EC_CP15RTTRAP = 0x03,
317
+ EC_CP15RRTTRAP = 0x04,
318
+ EC_CP14RTTRAP = 0x05,
319
+ EC_CP14DTTRAP = 0x06,
320
+ EC_ADVSIMDFPACCESSTRAP = 0x07,
321
+ EC_FPIDTRAP = 0x08,
322
+ EC_PACTRAP = 0x09,
323
+ EC_CP14RRTTRAP = 0x0c,
31
+ EC_BTITRAP = 0x0d,
324
+ EC_BTITRAP = 0x0d,
32
EC_ILLEGALSTATE = 0x0e,
325
+ EC_ILLEGALSTATE = 0x0e,
33
EC_AA32_SVC = 0x11,
326
+ EC_AA32_SVC = 0x11,
34
EC_AA32_HVC = 0x12,
327
+ EC_AA32_HVC = 0x12,
35
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_pactrap(void)
328
+ EC_AA32_SMC = 0x13,
36
return EC_PACTRAP << ARM_EL_EC_SHIFT;
329
+ EC_AA64_SVC = 0x15,
37
}
330
+ EC_AA64_HVC = 0x16,
38
331
+ EC_AA64_SMC = 0x17,
332
+ EC_SYSTEMREGISTERTRAP = 0x18,
333
+ EC_SVEACCESSTRAP = 0x19,
334
+ EC_INSNABORT = 0x20,
335
+ EC_INSNABORT_SAME_EL = 0x21,
336
+ EC_PCALIGNMENT = 0x22,
337
+ EC_DATAABORT = 0x24,
338
+ EC_DATAABORT_SAME_EL = 0x25,
339
+ EC_SPALIGNMENT = 0x26,
340
+ EC_AA32_FPTRAP = 0x28,
341
+ EC_AA64_FPTRAP = 0x2c,
342
+ EC_SERROR = 0x2f,
343
+ EC_BREAKPOINT = 0x30,
344
+ EC_BREAKPOINT_SAME_EL = 0x31,
345
+ EC_SOFTWARESTEP = 0x32,
346
+ EC_SOFTWARESTEP_SAME_EL = 0x33,
347
+ EC_WATCHPOINT = 0x34,
348
+ EC_WATCHPOINT_SAME_EL = 0x35,
349
+ EC_AA32_BKPT = 0x38,
350
+ EC_VECTORCATCH = 0x3a,
351
+ EC_AA64_BKPT = 0x3c,
352
+};
353
+
354
+#define ARM_EL_EC_SHIFT 26
355
+#define ARM_EL_IL_SHIFT 25
356
+#define ARM_EL_ISV_SHIFT 24
357
+#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
358
+#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
359
+
360
+static inline uint32_t syn_get_ec(uint32_t syn)
361
+{
362
+ return syn >> ARM_EL_EC_SHIFT;
363
+}
364
+
365
+/*
366
+ * Utility functions for constructing various kinds of syndrome value.
367
+ * Note that in general we follow the AArch64 syndrome values; in a
368
+ * few cases the value in HSR for exceptions taken to AArch32 Hyp
369
+ * mode differs slightly, and we fix this up when populating HSR in
370
+ * arm_cpu_do_interrupt_aarch32_hyp().
371
+ * The exception is FP/SIMD access traps -- these report extra information
372
+ * when taking an exception to AArch32. For those we include the extra coproc
373
+ * and TA fields, and mask them out when taking the exception to AArch64.
374
+ */
375
+static inline uint32_t syn_uncategorized(void)
376
+{
377
+ return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
378
+}
379
+
380
+static inline uint32_t syn_aa64_svc(uint32_t imm16)
381
+{
382
+ return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
383
+}
384
+
385
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
386
+{
387
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
388
+}
389
+
390
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
391
+{
392
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
393
+}
394
+
395
+static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
396
+{
397
+ return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
398
+ | (is_16bit ? 0 : ARM_EL_IL);
399
+}
400
+
401
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
402
+{
403
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
404
+}
405
+
406
+static inline uint32_t syn_aa32_smc(void)
407
+{
408
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
409
+}
410
+
411
+static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
412
+{
413
+ return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
414
+}
415
+
416
+static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
417
+{
418
+ return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
419
+ | (is_16bit ? 0 : ARM_EL_IL);
420
+}
421
+
422
+static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
423
+ int crn, int crm, int rt,
424
+ int isread)
425
+{
426
+ return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
427
+ | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
428
+ | (crm << 1) | isread;
429
+}
430
+
431
+static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
432
+ int crn, int crm, int rt, int isread,
433
+ bool is_16bit)
434
+{
435
+ return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
436
+ | (is_16bit ? 0 : ARM_EL_IL)
437
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
438
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
439
+}
440
+
441
+static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
442
+ int crn, int crm, int rt, int isread,
443
+ bool is_16bit)
444
+{
445
+ return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
446
+ | (is_16bit ? 0 : ARM_EL_IL)
447
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
448
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
449
+}
450
+
451
+static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
452
+ int rt, int rt2, int isread,
453
+ bool is_16bit)
454
+{
455
+ return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
456
+ | (is_16bit ? 0 : ARM_EL_IL)
457
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
458
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
459
+}
460
+
461
+static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
462
+ int rt, int rt2, int isread,
463
+ bool is_16bit)
464
+{
465
+ return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
466
+ | (is_16bit ? 0 : ARM_EL_IL)
467
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
468
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
469
+}
470
+
471
+static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
472
+{
473
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
474
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
475
+ | (is_16bit ? 0 : ARM_EL_IL)
476
+ | (cv << 24) | (cond << 20) | 0xa;
477
+}
478
+
479
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
480
+{
481
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
482
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
483
+ | (is_16bit ? 0 : ARM_EL_IL)
484
+ | (cv << 24) | (cond << 20) | (1 << 5);
485
+}
486
+
487
+static inline uint32_t syn_sve_access_trap(void)
488
+{
489
+ return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
490
+}
491
+
492
+static inline uint32_t syn_pactrap(void)
493
+{
494
+ return EC_PACTRAP << ARM_EL_EC_SHIFT;
495
+}
496
+
39
+static inline uint32_t syn_btitrap(int btype)
497
+static inline uint32_t syn_btitrap(int btype)
40
+{
498
+{
41
+ return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
499
+ return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
42
+}
500
+}
43
+
501
+
44
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
502
+static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
45
{
503
+{
46
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
504
+ return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
505
+ | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
48
index XXXXXXX..XXXXXXX 100644
506
+}
49
--- a/target/arm/translate.h
507
+
50
+++ b/target/arm/translate.h
508
+static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
509
+ int ea, int cm, int s1ptw,
52
bool pauth_active;
510
+ int wnr, int fsc)
53
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
511
+{
54
bool bt;
512
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
55
- /* A copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. */
513
+ | ARM_EL_IL
56
- uint8_t btype;
514
+ | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
57
+ /*
515
+ | (wnr << 6) | fsc;
58
+ * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
516
+}
59
+ * < 0, set by the current instruction.
517
+
60
+ */
518
+static inline uint32_t syn_data_abort_with_iss(int same_el,
61
+ int8_t btype;
519
+ int sas, int sse, int srt,
62
+ /* True if this page is guarded. */
520
+ int sf, int ar,
63
+ bool guarded_page;
521
+ int ea, int cm, int s1ptw,
64
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
522
+ int wnr, int fsc,
65
int c15_cpar;
523
+ bool is_16bit)
66
/* TCG op of the current insn_start. */
524
+{
67
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
525
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
68
index XXXXXXX..XXXXXXX 100644
526
+ | (is_16bit ? 0 : ARM_EL_IL)
69
--- a/target/arm/translate-a64.c
527
+ | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
70
+++ b/target/arm/translate-a64.c
528
+ | (sf << 15) | (ar << 14)
71
@@ -XXX,XX +XXX,XX @@ static inline int get_a64_user_mem_index(DisasContext *s)
529
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
72
return arm_to_core_mmu_idx(useridx);
530
+}
73
}
531
+
74
532
+static inline uint32_t syn_swstep(int same_el, int isv, int ex)
75
+static void reset_btype(DisasContext *s)
533
+{
76
+{
534
+ return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
77
+ if (s->btype != 0) {
535
+ | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
78
+ TCGv_i32 zero = tcg_const_i32(0);
536
+}
79
+ tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
537
+
80
+ tcg_temp_free_i32(zero);
538
+static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
81
+ s->btype = 0;
539
+{
82
+ }
540
+ return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
83
+}
541
+ | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
84
+
542
+}
85
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
543
+
86
fprintf_function cpu_fprintf, int flags)
544
+static inline uint32_t syn_breakpoint(int same_el)
87
{
545
+{
88
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
546
+ return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
89
}
547
+ | ARM_EL_IL | 0x22;
90
}
548
+}
91
549
+
92
+/**
550
+static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
93
+ * is_guarded_page:
551
+{
94
+ * @env: The cpu environment
552
+ return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
95
+ * @s: The DisasContext
553
+ (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
96
+ *
554
+ (cv << 24) | (cond << 20) | ti;
97
+ * Return true if the page is guarded.
555
+}
98
+ */
556
+
99
+static bool is_guarded_page(CPUARMState *env, DisasContext *s)
557
+#endif /* TARGET_ARM_SYNDROME_H */
100
+{
101
+#ifdef CONFIG_USER_ONLY
102
+ return false; /* FIXME */
103
+#else
104
+ uint64_t addr = s->base.pc_first;
105
+ int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
106
+ unsigned int index = tlb_index(env, mmu_idx, addr);
107
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
108
+
109
+ /*
110
+ * We test this immediately after reading an insn, which means
111
+ * that any normal page must be in the TLB. The only exception
112
+ * would be for executing from flash or device memory, which
113
+ * does not retain the TLB entry.
114
+ *
115
+ * FIXME: Assume false for those, for now. We could use
116
+ * arm_cpu_get_phys_page_attrs_debug to re-read the page
117
+ * table entry even for that case.
118
+ */
119
+ return (tlb_hit(entry->addr_code, addr) &&
120
+ env->iotlb[mmu_idx][index].attrs.target_tlb_bit0);
121
+#endif
122
+}
123
+
124
+/**
125
+ * btype_destination_ok:
126
+ * @insn: The instruction at the branch destination
127
+ * @bt: SCTLR_ELx.BT
128
+ * @btype: PSTATE.BTYPE, and is non-zero
129
+ *
130
+ * On a guarded page, there are a limited number of insns
131
+ * that may be present at the branch target:
132
+ * - branch target identifiers,
133
+ * - paciasp, pacibsp,
134
+ * - BRK insn
135
+ * - HLT insn
136
+ * Anything else causes a Branch Target Exception.
137
+ *
138
+ * Return true if the branch is compatible, false to raise BTITRAP.
139
+ */
140
+static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
141
+{
142
+ if ((insn & 0xfffff01fu) == 0xd503201fu) {
143
+ /* HINT space */
144
+ switch (extract32(insn, 5, 7)) {
145
+ case 0b011001: /* PACIASP */
146
+ case 0b011011: /* PACIBSP */
147
+ /*
148
+ * If SCTLR_ELx.BT, then PACI*SP are not compatible
149
+ * with btype == 3. Otherwise all btype are ok.
150
+ */
151
+ return !bt || btype != 3;
152
+ case 0b100000: /* BTI */
153
+ /* Not compatible with any btype. */
154
+ return false;
155
+ case 0b100010: /* BTI c */
156
+ /* Not compatible with btype == 3 */
157
+ return btype != 3;
158
+ case 0b100100: /* BTI j */
159
+ /* Not compatible with btype == 2 */
160
+ return btype != 2;
161
+ case 0b100110: /* BTI jc */
162
+ /* Compatible with any btype. */
163
+ return true;
164
+ }
165
+ } else {
166
+ switch (insn & 0xffe0001fu) {
167
+ case 0xd4200000u: /* BRK */
168
+ case 0xd4400000u: /* HLT */
169
+ /* Give priority to the breakpoint exception. */
170
+ return true;
171
+ }
172
+ }
173
+ return false;
174
+}
175
+
176
/* C3.1 A64 instruction index by encoding */
177
static void disas_a64_insn(CPUARMState *env, DisasContext *s)
178
{
179
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
180
181
s->fp_access_checked = false;
182
183
+ if (dc_isar_feature(aa64_bti, s)) {
184
+ if (s->base.num_insns == 1) {
185
+ /*
186
+ * At the first insn of the TB, compute s->guarded_page.
187
+ * We delayed computing this until successfully reading
188
+ * the first insn of the TB, above. This (mostly) ensures
189
+ * that the softmmu tlb entry has been populated, and the
190
+ * page table GP bit is available.
191
+ *
192
+ * Note that we need to compute this even if btype == 0,
193
+ * because this value is used for BR instructions later
194
+ * where ENV is not available.
195
+ */
196
+ s->guarded_page = is_guarded_page(env, s);
197
+
198
+ /* First insn can have btype set to non-zero. */
199
+ tcg_debug_assert(s->btype >= 0);
200
+
201
+ /*
202
+ * Note that the Branch Target Exception has fairly high
203
+ * priority -- below debugging exceptions but above most
204
+ * everything else. This allows us to handle this now
205
+ * instead of waiting until the insn is otherwise decoded.
206
+ */
207
+ if (s->btype != 0
208
+ && s->guarded_page
209
+ && !btype_destination_ok(insn, s->bt, s->btype)) {
210
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
211
+ default_exception_el(s));
212
+ return;
213
+ }
214
+ } else {
215
+ /* Not the first insn: btype must be 0. */
216
+ tcg_debug_assert(s->btype == 0);
217
+ }
218
+ }
219
+
220
switch (extract32(insn, 25, 4)) {
221
case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
222
unallocated_encoding(s);
223
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
224
225
/* if we allocated any temporaries, free them here */
226
free_tmp_a64(s);
227
+
228
+ /*
229
+ * After execution of most insns, btype is reset to 0.
230
+ * Note that we set btype == -1 when the insn sets btype.
231
+ */
232
+ if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
233
+ reset_btype(s);
234
+ }
235
}
236
237
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
238
--
558
--
239
2.20.1
559
2.20.1
240
560
241
561
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
A proper syndrome is required to fill in the proper si_code.
4
Use page_get_flags to determine permission vs translation for user-only.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210210000223.884088-27-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/cpu_loop.c | 24 +++++++++++++++++++++---
12
target/arm/tlb_helper.c | 15 +++++++++------
13
2 files changed, 30 insertions(+), 9 deletions(-)
14
15
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/cpu_loop.c
18
+++ b/linux-user/aarch64/cpu_loop.c
19
@@ -XXX,XX +XXX,XX @@
20
#include "cpu_loop-common.h"
21
#include "qemu/guest-random.h"
22
#include "hw/semihosting/common-semi.h"
23
+#include "target/arm/syndrome.h"
24
25
#define get_user_code_u32(x, gaddr, env) \
26
({ abi_long __r = get_user_u32((x), (gaddr)); \
27
@@ -XXX,XX +XXX,XX @@
28
void cpu_loop(CPUARMState *env)
29
{
30
CPUState *cs = env_cpu(env);
31
- int trapnr;
32
+ int trapnr, ec, fsc;
33
abi_long ret;
34
target_siginfo_t info;
35
36
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
37
case EXCP_DATA_ABORT:
38
info.si_signo = TARGET_SIGSEGV;
39
info.si_errno = 0;
40
- /* XXX: check env->error_code */
41
- info.si_code = TARGET_SEGV_MAPERR;
42
info._sifields._sigfault._addr = env->exception.vaddress;
43
+
44
+ /* We should only arrive here with EC in {DATAABORT, INSNABORT}. */
45
+ ec = syn_get_ec(env->exception.syndrome);
46
+ assert(ec == EC_DATAABORT || ec == EC_INSNABORT);
47
+
48
+ /* Both EC have the same format for FSC, or close enough. */
49
+ fsc = extract32(env->exception.syndrome, 0, 6);
50
+ switch (fsc) {
51
+ case 0x04 ... 0x07: /* Translation fault, level {0-3} */
52
+ info.si_code = TARGET_SEGV_MAPERR;
53
+ break;
54
+ case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
55
+ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
56
+ info.si_code = TARGET_SEGV_ACCERR;
57
+ break;
58
+ default:
59
+ g_assert_not_reached();
60
+ }
61
+
62
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
63
break;
64
case EXCP_DEBUG:
65
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/tlb_helper.c
68
+++ b/target/arm/tlb_helper.c
69
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
70
bool probe, uintptr_t retaddr)
71
{
72
ARMCPU *cpu = ARM_CPU(cs);
73
+ ARMMMUFaultInfo fi = {};
74
75
#ifdef CONFIG_USER_ONLY
76
- cpu->env.exception.vaddress = address;
77
- if (access_type == MMU_INST_FETCH) {
78
- cs->exception_index = EXCP_PREFETCH_ABORT;
79
+ int flags = page_get_flags(useronly_clean_ptr(address));
80
+ if (flags & PAGE_VALID) {
81
+ fi.type = ARMFault_Permission;
82
} else {
83
- cs->exception_index = EXCP_DATA_ABORT;
84
+ fi.type = ARMFault_Translation;
85
}
86
- cpu_loop_exit_restore(cs, retaddr);
87
+
88
+ /* now we have a real cpu fault */
89
+ cpu_restore_state(cs, retaddr, true);
90
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
91
#else
92
hwaddr phys_addr;
93
target_ulong page_size;
94
int prot, ret;
95
MemTxAttrs attrs = {};
96
- ARMMMUFaultInfo fi = {};
97
ARMCacheAttrs cacheattrs = {};
98
99
/*
100
--
101
2.20.1
102
103
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210210000223.884088-28-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/target_signal.h | 2 ++
9
linux-user/aarch64/cpu_loop.c | 3 +++
10
2 files changed, 5 insertions(+)
11
12
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/aarch64/target_signal.h
15
+++ b/linux-user/aarch64/target_signal.h
16
@@ -XXX,XX +XXX,XX @@ typedef struct target_sigaltstack {
17
18
#include "../generic/signal.h"
19
20
+#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
21
+
22
#define TARGET_ARCH_HAS_SETUP_FRAME
23
#endif /* AARCH64_TARGET_SIGNAL_H */
24
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/aarch64/cpu_loop.c
27
+++ b/linux-user/aarch64/cpu_loop.c
28
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
29
case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
30
info.si_code = TARGET_SEGV_ACCERR;
31
break;
32
+ case 0x11: /* Synchronous Tag Check Fault */
33
+ info.si_code = TARGET_SEGV_MTESERR;
34
+ break;
35
default:
36
g_assert_not_reached();
37
}
38
--
39
2.20.1
40
41
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The real kernel collects _TIF_MTE_ASYNC_FAULT into the current thread's
4
state on any kernel entry (interrupt, exception etc), and then delivers
5
the signal in advance of resuming the thread.
6
7
This means that while the signal won't be delivered immediately, it will
8
not be delayed forever -- at minimum it will be delivered after the next
9
clock interrupt.
10
11
We don't have a clock interrupt in linux-user, so we issue a cpu_kick
12
to signal a return to the main loop at the end of the current TB.
13
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210210000223.884088-29-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
linux-user/aarch64/target_signal.h | 1 +
20
linux-user/aarch64/cpu_loop.c | 11 +++++++++++
21
target/arm/mte_helper.c | 10 ++++++++++
22
3 files changed, 22 insertions(+)
23
24
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/aarch64/target_signal.h
27
+++ b/linux-user/aarch64/target_signal.h
28
@@ -XXX,XX +XXX,XX @@ typedef struct target_sigaltstack {
29
30
#include "../generic/signal.h"
31
32
+#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
33
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
34
35
#define TARGET_ARCH_HAS_SETUP_FRAME
36
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/linux-user/aarch64/cpu_loop.c
39
+++ b/linux-user/aarch64/cpu_loop.c
40
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
41
EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
42
abort();
43
}
44
+
45
+ /* Check for MTE asynchronous faults */
46
+ if (unlikely(env->cp15.tfsr_el[0])) {
47
+ env->cp15.tfsr_el[0] = 0;
48
+ info.si_signo = TARGET_SIGSEGV;
49
+ info.si_errno = 0;
50
+ info._sifields._sigfault._addr = 0;
51
+ info.si_code = TARGET_SEGV_MTEAERR;
52
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
53
+ }
54
+
55
process_pending_signals(env);
56
/* Exception return on AArch64 always clears the exclusive monitor,
57
* so any return to running guest code implies this.
58
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mte_helper.c
61
+++ b/target/arm/mte_helper.c
62
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
63
select = 0;
64
}
65
env->cp15.tfsr_el[el] |= 1 << select;
66
+#ifdef CONFIG_USER_ONLY
67
+ /*
68
+ * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
69
+ * which then sends a SIGSEGV when the thread is next scheduled.
70
+ * This cpu will return to the main loop at the end of the TB,
71
+ * which is rather sooner than "normal". But the alternative
72
+ * is waiting until the next syscall.
73
+ */
74
+ qemu_cpu_kick(env_cpu(env));
75
+#endif
76
break;
77
78
default:
79
--
80
2.20.1
81
82
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These bits can be used to cache target-specific data in cputlb
3
Use the now-saved PAGE_ANON and PAGE_MTE bits,
4
read from the page tables.
4
and the per-page saved data.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20210210000223.884088-30-richard.henderson@linaro.org
8
Message-id: 20190128223118.5255-5-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
include/exec/memattrs.h | 10 ++++++++++
11
target/arm/mte_helper.c | 29 +++++++++++++++++++++++++++--
12
1 file changed, 10 insertions(+)
12
1 file changed, 27 insertions(+), 2 deletions(-)
13
13
14
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
14
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memattrs.h
16
--- a/target/arm/mte_helper.c
17
+++ b/include/exec/memattrs.h
17
+++ b/target/arm/mte_helper.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs {
18
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
19
unsigned int user:1;
19
int tag_size, uintptr_t ra)
20
/* Requester ID (for MSI for example) */
20
{
21
unsigned int requester_id:16;
21
#ifdef CONFIG_USER_ONLY
22
+ /*
22
- /* Tag storage not implemented. */
23
+ * The following are target-specific page-table bits. These are not
23
- return NULL;
24
+ * related to actual memory transactions at all. However, this structure
24
+ uint64_t clean_ptr = useronly_clean_ptr(ptr);
25
+ * is part of the tlb_fill interface, cached in the cputlb structure,
25
+ int flags = page_get_flags(clean_ptr);
26
+ * and has unused bits. These fields will be read by target-specific
26
+ uint8_t *tags;
27
+ * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
27
+ uintptr_t index;
28
+ */
28
+
29
+ unsigned int target_tlb_bit0 : 1;
29
+ if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE : PAGE_READ))) {
30
+ unsigned int target_tlb_bit1 : 1;
30
+ /* SIGSEGV */
31
+ unsigned int target_tlb_bit2 : 1;
31
+ arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access,
32
} MemTxAttrs;
32
+ ptr_mmu_idx, false, ra);
33
33
+ g_assert_not_reached();
34
/* Bus masters which don't specify any attributes will get this,
34
+ }
35
+
36
+ /* Require both MAP_ANON and PROT_MTE for the page. */
37
+ if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
38
+ return NULL;
39
+ }
40
+
41
+ tags = page_get_target_data(clean_ptr);
42
+ if (tags == NULL) {
43
+ size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
44
+ tags = page_alloc_target_data(clean_ptr, alloc_size);
45
+ assert(tags != NULL);
46
+ }
47
+
48
+ index = extract32(ptr, LOG2_TAG_GRANULE + 1,
49
+ TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
50
+ return tags + index;
51
#else
52
uintptr_t index;
53
CPUIOTLBEntry *iotlbentry;
35
--
54
--
36
2.20.1
55
2.20.1
37
56
38
57
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
This has been enabled in the linux kernel since v3.11
4
(commit d50240a5f6cea, 2013-09-03,
5
"arm64: mm: permit use of tagged pointers at EL0").
6
2
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190204132126.3255-5-richard.henderson@linaro.org
5
Message-id: 20210210000223.884088-31-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
7
---
12
target/arm/cpu.c | 6 ++++++
8
target/arm/cpu.c | 15 +++++++++++++++
13
1 file changed, 6 insertions(+)
9
1 file changed, 15 insertions(+)
14
10
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
11
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
13
--- a/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
14
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
15
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
20
env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
16
* Note that this must match useronly_clean_ptr.
21
env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
17
*/
22
env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
18
env->cp15.tcr_el[1].raw_tcr = (1ULL << 37);
23
+ /*
19
+
24
+ * Enable TBI0 and TBI1. While the real kernel only enables TBI0,
20
+ /* Enable MTE */
25
+ * turning on both here will produce smaller code and otherwise
21
+ if (cpu_isar_feature(aa64_mte, cpu)) {
26
+ * make no difference to the user-level emulation.
22
+ /* Enable tag access, but leave TCF0 as No Effect (0). */
27
+ */
23
+ env->cp15.sctlr_el[1] |= SCTLR_ATA0;
28
+ env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
24
+ /*
25
+ * Exclude all tags, so that tag 0 is always used.
26
+ * This corresponds to Linux current->thread.gcr_incl = 0.
27
+ *
28
+ * Set RRND, so that helper_irg() will generate a seed later.
29
+ * Here in cpu_reset(), the crypto subsystem has not yet been
30
+ * initialized.
31
+ */
32
+ env->cp15.gcr_el1 = 0x1ffff;
33
+ }
29
#else
34
#else
30
/* Reset into the highest available EL */
35
/* Reset into the highest available EL */
31
if (arm_feature(env, ARM_FEATURE_EL3)) {
36
if (arm_feature(env, ARM_FEATURE_EL3)) {
32
--
37
--
33
2.20.1
38
2.20.1
34
39
35
40
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190201195404.30486-3-richard.henderson@linaro.org
5
Message-id: 20210210000223.884088-32-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
7
---
8
tests/tcg/aarch64/Makefile.target | 6 +++++-
8
tests/tcg/aarch64/mte.h | 60 +++++++++++++++++++++++++++++++
9
tests/tcg/aarch64/pauth-1.c | 23 +++++++++++++++++++++++
9
tests/tcg/aarch64/mte-1.c | 28 +++++++++++++++
10
2 files changed, 28 insertions(+), 1 deletion(-)
10
tests/tcg/aarch64/mte-2.c | 45 +++++++++++++++++++++++
11
create mode 100644 tests/tcg/aarch64/pauth-1.c
11
tests/tcg/aarch64/mte-3.c | 51 ++++++++++++++++++++++++++
12
tests/tcg/aarch64/mte-4.c | 45 +++++++++++++++++++++++
13
tests/tcg/aarch64/Makefile.target | 6 ++++
14
tests/tcg/configure.sh | 4 +++
15
7 files changed, 239 insertions(+)
16
create mode 100644 tests/tcg/aarch64/mte.h
17
create mode 100644 tests/tcg/aarch64/mte-1.c
18
create mode 100644 tests/tcg/aarch64/mte-2.c
19
create mode 100644 tests/tcg/aarch64/mte-3.c
20
create mode 100644 tests/tcg/aarch64/mte-4.c
12
21
22
diff --git a/tests/tcg/aarch64/mte.h b/tests/tcg/aarch64/mte.h
23
new file mode 100644
24
index XXXXXXX..XXXXXXX
25
--- /dev/null
26
+++ b/tests/tcg/aarch64/mte.h
27
@@ -XXX,XX +XXX,XX @@
28
+/*
29
+ * Linux kernel fallback API definitions for MTE and test helpers.
30
+ *
31
+ * Copyright (c) 2021 Linaro Ltd
32
+ * SPDX-License-Identifier: GPL-2.0-or-later
33
+ */
34
+
35
+#include <assert.h>
36
+#include <string.h>
37
+#include <stdlib.h>
38
+#include <stdio.h>
39
+#include <unistd.h>
40
+#include <signal.h>
41
+#include <sys/mman.h>
42
+#include <sys/prctl.h>
43
+
44
+#ifndef PR_SET_TAGGED_ADDR_CTRL
45
+# define PR_SET_TAGGED_ADDR_CTRL 55
46
+#endif
47
+#ifndef PR_TAGGED_ADDR_ENABLE
48
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
49
+#endif
50
+#ifndef PR_MTE_TCF_SHIFT
51
+# define PR_MTE_TCF_SHIFT 1
52
+# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
53
+# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
54
+# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
55
+# define PR_MTE_TAG_SHIFT 3
56
+#endif
57
+
58
+#ifndef PROT_MTE
59
+# define PROT_MTE 0x20
60
+#endif
61
+
62
+#ifndef SEGV_MTEAERR
63
+# define SEGV_MTEAERR 8
64
+# define SEGV_MTESERR 9
65
+#endif
66
+
67
+static void enable_mte(int tcf)
68
+{
69
+ int r = prctl(PR_SET_TAGGED_ADDR_CTRL,
70
+ PR_TAGGED_ADDR_ENABLE | tcf | (0xfffe << PR_MTE_TAG_SHIFT),
71
+ 0, 0, 0);
72
+ if (r < 0) {
73
+ perror("PR_SET_TAGGED_ADDR_CTRL");
74
+ exit(2);
75
+ }
76
+}
77
+
78
+static void *alloc_mte_mem(size_t size)
79
+{
80
+ void *p = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_MTE,
81
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
82
+ if (p == MAP_FAILED) {
83
+ perror("mmap PROT_MTE");
84
+ exit(2);
85
+ }
86
+ return p;
87
+}
88
diff --git a/tests/tcg/aarch64/mte-1.c b/tests/tcg/aarch64/mte-1.c
89
new file mode 100644
90
index XXXXXXX..XXXXXXX
91
--- /dev/null
92
+++ b/tests/tcg/aarch64/mte-1.c
93
@@ -XXX,XX +XXX,XX @@
94
+/*
95
+ * Memory tagging, basic pass cases.
96
+ *
97
+ * Copyright (c) 2021 Linaro Ltd
98
+ * SPDX-License-Identifier: GPL-2.0-or-later
99
+ */
100
+
101
+#include "mte.h"
102
+
103
+int main(int ac, char **av)
104
+{
105
+ int *p0, *p1, *p2;
106
+ long c;
107
+
108
+ enable_mte(PR_MTE_TCF_NONE);
109
+ p0 = alloc_mte_mem(sizeof(*p0));
110
+
111
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(1));
112
+ assert(p1 != p0);
113
+ asm("subp %0,%1,%2" : "=r"(c) : "r"(p0), "r"(p1));
114
+ assert(c == 0);
115
+
116
+ asm("stg %0, [%0]" : : "r"(p1));
117
+ asm("ldg %0, [%1]" : "=r"(p2) : "r"(p0), "0"(p0));
118
+ assert(p1 == p2);
119
+
120
+ return 0;
121
+}
122
diff --git a/tests/tcg/aarch64/mte-2.c b/tests/tcg/aarch64/mte-2.c
123
new file mode 100644
124
index XXXXXXX..XXXXXXX
125
--- /dev/null
126
+++ b/tests/tcg/aarch64/mte-2.c
127
@@ -XXX,XX +XXX,XX @@
128
+/*
129
+ * Memory tagging, basic fail cases, synchronous signals.
130
+ *
131
+ * Copyright (c) 2021 Linaro Ltd
132
+ * SPDX-License-Identifier: GPL-2.0-or-later
133
+ */
134
+
135
+#include "mte.h"
136
+
137
+void pass(int sig, siginfo_t *info, void *uc)
138
+{
139
+ assert(info->si_code == SEGV_MTESERR);
140
+ exit(0);
141
+}
142
+
143
+int main(int ac, char **av)
144
+{
145
+ struct sigaction sa;
146
+ int *p0, *p1, *p2;
147
+ long excl = 1;
148
+
149
+ enable_mte(PR_MTE_TCF_SYNC);
150
+ p0 = alloc_mte_mem(sizeof(*p0));
151
+
152
+ /* Create two differently tagged pointers. */
153
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
154
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
155
+ assert(excl != 1);
156
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
157
+ assert(p1 != p2);
158
+
159
+ /* Store the tag from the first pointer. */
160
+ asm("stg %0, [%0]" : : "r"(p1));
161
+
162
+ *p1 = 0;
163
+
164
+ memset(&sa, 0, sizeof(sa));
165
+ sa.sa_sigaction = pass;
166
+ sa.sa_flags = SA_SIGINFO;
167
+ sigaction(SIGSEGV, &sa, NULL);
168
+
169
+ *p2 = 0;
170
+
171
+ abort();
172
+}
173
diff --git a/tests/tcg/aarch64/mte-3.c b/tests/tcg/aarch64/mte-3.c
174
new file mode 100644
175
index XXXXXXX..XXXXXXX
176
--- /dev/null
177
+++ b/tests/tcg/aarch64/mte-3.c
178
@@ -XXX,XX +XXX,XX @@
179
+/*
180
+ * Memory tagging, basic fail cases, asynchronous signals.
181
+ *
182
+ * Copyright (c) 2021 Linaro Ltd
183
+ * SPDX-License-Identifier: GPL-2.0-or-later
184
+ */
185
+
186
+#include "mte.h"
187
+
188
+void pass(int sig, siginfo_t *info, void *uc)
189
+{
190
+ assert(info->si_code == SEGV_MTEAERR);
191
+ exit(0);
192
+}
193
+
194
+int main(int ac, char **av)
195
+{
196
+ struct sigaction sa;
197
+ long *p0, *p1, *p2;
198
+ long excl = 1;
199
+
200
+ enable_mte(PR_MTE_TCF_ASYNC);
201
+ p0 = alloc_mte_mem(sizeof(*p0));
202
+
203
+ /* Create two differently tagged pointers. */
204
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
205
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
206
+ assert(excl != 1);
207
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
208
+ assert(p1 != p2);
209
+
210
+ /* Store the tag from the first pointer. */
211
+ asm("stg %0, [%0]" : : "r"(p1));
212
+
213
+ *p1 = 0;
214
+
215
+ memset(&sa, 0, sizeof(sa));
216
+ sa.sa_sigaction = pass;
217
+ sa.sa_flags = SA_SIGINFO;
218
+ sigaction(SIGSEGV, &sa, NULL);
219
+
220
+ /*
221
+ * Signal for async error will happen eventually.
222
+ * For a real kernel this should be after the next IRQ (e.g. timer).
223
+ * For qemu linux-user, we kick the cpu and exit at the next TB.
224
+ * In either case, loop until this happens (or killed by timeout).
225
+ * For extra sauce, yield, producing EXCP_YIELD to cpu_loop().
226
+ */
227
+ asm("str %0, [%0]; yield" : : "r"(p2));
228
+ while (1);
229
+}
230
diff --git a/tests/tcg/aarch64/mte-4.c b/tests/tcg/aarch64/mte-4.c
231
new file mode 100644
232
index XXXXXXX..XXXXXXX
233
--- /dev/null
234
+++ b/tests/tcg/aarch64/mte-4.c
235
@@ -XXX,XX +XXX,XX @@
236
+/*
237
+ * Memory tagging, re-reading tag checks.
238
+ *
239
+ * Copyright (c) 2021 Linaro Ltd
240
+ * SPDX-License-Identifier: GPL-2.0-or-later
241
+ */
242
+
243
+#include "mte.h"
244
+
245
+void __attribute__((noinline)) tagset(void *p, size_t size)
246
+{
247
+ size_t i;
248
+ for (i = 0; i < size; i += 16) {
249
+ asm("stg %0, [%0]" : : "r"(p + i));
250
+ }
251
+}
252
+
253
+void __attribute__((noinline)) tagcheck(void *p, size_t size)
254
+{
255
+ size_t i;
256
+ void *c;
257
+
258
+ for (i = 0; i < size; i += 16) {
259
+ asm("ldg %0, [%1]" : "=r"(c) : "r"(p + i), "0"(p));
260
+ assert(c == p);
261
+ }
262
+}
263
+
264
+int main(int ac, char **av)
265
+{
266
+ size_t size = getpagesize() * 4;
267
+ long excl = 1;
268
+ int *p0, *p1;
269
+
270
+ enable_mte(PR_MTE_TCF_ASYNC);
271
+ p0 = alloc_mte_mem(size);
272
+
273
+ /* Tag the pointer. */
274
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
275
+
276
+ tagset(p1, size);
277
+ tagcheck(p1, size);
278
+
279
+ return 0;
280
+}
13
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
281
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
14
index XXXXXXX..XXXXXXX 100644
282
index XXXXXXX..XXXXXXX 100644
15
--- a/tests/tcg/aarch64/Makefile.target
283
--- a/tests/tcg/aarch64/Makefile.target
16
+++ b/tests/tcg/aarch64/Makefile.target
284
+++ b/tests/tcg/aarch64/Makefile.target
17
@@ -XXX,XX +XXX,XX @@ VPATH         += $(AARCH64_SRC)
285
@@ -XXX,XX +XXX,XX @@ endif
18
# we don't build any of the ARM tests
286
# bti-2 tests PROT_BTI, so no special compiler support required.
19
AARCH64_TESTS=$(filter-out $(ARM_TESTS), $(TESTS))
287
AARCH64_TESTS += bti-2
20
AARCH64_TESTS+=fcvt
288
21
-TESTS:=$(AARCH64_TESTS)
289
+# MTE Tests
22
290
+ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
23
fcvt: LDFLAGS+=-lm
291
+AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4
24
292
+mte-%: CFLAGS += -march=armv8.5-a+memtag
25
run-fcvt: fcvt
293
+endif
26
    $(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
294
+
27
    $(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
295
# Semihosting smoke test for linux-user
28
+
296
AARCH64_TESTS += semihosting
29
+AARCH64_TESTS += pauth-1
297
run-semihosting: semihosting
30
+run-pauth-%: QEMU += -cpu max
298
diff --git a/tests/tcg/configure.sh b/tests/tcg/configure.sh
31
+
299
index XXXXXXX..XXXXXXX 100755
32
+TESTS:=$(AARCH64_TESTS)
300
--- a/tests/tcg/configure.sh
33
diff --git a/tests/tcg/aarch64/pauth-1.c b/tests/tcg/aarch64/pauth-1.c
301
+++ b/tests/tcg/configure.sh
34
new file mode 100644
302
@@ -XXX,XX +XXX,XX @@ for target in $target_list; do
35
index XXXXXXX..XXXXXXX
303
-mbranch-protection=standard -o $TMPE $TMPC; then
36
--- /dev/null
304
echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
37
+++ b/tests/tcg/aarch64/pauth-1.c
305
fi
38
@@ -XXX,XX +XXX,XX @@
306
+ if do_compiler "$target_compiler" $target_compiler_cflags \
39
+#include <assert.h>
307
+ -march=armv8.5-a+memtag -o $TMPE $TMPC; then
40
+#include <sys/prctl.h>
308
+ echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
41
+
309
+ fi
42
+asm(".arch armv8.4-a");
310
;;
43
+
311
esac
44
+#ifndef PR_PAC_RESET_KEYS
312
45
+#define PR_PAC_RESET_KEYS 54
46
+#define PR_PAC_APDAKEY (1 << 2)
47
+#endif
48
+
49
+int main()
50
+{
51
+ int x;
52
+ void *p0 = &x, *p1, *p2;
53
+
54
+ asm volatile("pacdza %0" : "=r"(p1) : "0"(p0));
55
+ prctl(PR_PAC_RESET_KEYS, PR_PAC_APDAKEY, 0, 0, 0);
56
+ asm volatile("pacdza %0" : "=r"(p2) : "0"(p0));
57
+
58
+ assert(p1 != p0);
59
+ assert(p1 != p2);
60
+ return 0;
61
+}
62
--
313
--
63
2.20.1
314
2.20.1
64
315
65
316
diff view generated by jsdifflib
New patch
1
From: Doug Evans <dje@google.com>
1
2
3
This is a 10/100 ethernet device that has several features.
4
Only the ones needed by the Linux driver have been implemented.
5
See npcm7xx_emc.c for a list of unimplemented features.
6
7
Reviewed-by: Hao Wu <wuhaotsh@google.com>
8
Reviewed-by: Avi Fishman <avi.fishman@nuvoton.com>
9
Signed-off-by: Doug Evans <dje@google.com>
10
Message-id: 20210209015541.778833-2-dje@google.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
include/hw/net/npcm7xx_emc.h | 286 ++++++++++++
15
hw/net/npcm7xx_emc.c | 857 +++++++++++++++++++++++++++++++++++
16
hw/net/meson.build | 1 +
17
hw/net/trace-events | 17 +
18
4 files changed, 1161 insertions(+)
19
create mode 100644 include/hw/net/npcm7xx_emc.h
20
create mode 100644 hw/net/npcm7xx_emc.c
21
22
diff --git a/include/hw/net/npcm7xx_emc.h b/include/hw/net/npcm7xx_emc.h
23
new file mode 100644
24
index XXXXXXX..XXXXXXX
25
--- /dev/null
26
+++ b/include/hw/net/npcm7xx_emc.h
27
@@ -XXX,XX +XXX,XX @@
28
+/*
29
+ * Nuvoton NPCM7xx EMC Module
30
+ *
31
+ * Copyright 2020 Google LLC
32
+ *
33
+ * This program is free software; you can redistribute it and/or modify it
34
+ * under the terms of the GNU General Public License as published by the
35
+ * Free Software Foundation; either version 2 of the License, or
36
+ * (at your option) any later version.
37
+ *
38
+ * This program is distributed in the hope that it will be useful, but WITHOUT
39
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
40
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
41
+ * for more details.
42
+ */
43
+
44
+#ifndef NPCM7XX_EMC_H
45
+#define NPCM7XX_EMC_H
46
+
47
+#include "hw/irq.h"
48
+#include "hw/sysbus.h"
49
+#include "net/net.h"
50
+
51
+/* 32-bit register indices. */
52
+enum NPCM7xxPWMRegister {
53
+ /* Control registers. */
54
+ REG_CAMCMR,
55
+ REG_CAMEN,
56
+
57
+ /* There are 16 CAMn[ML] registers. */
58
+ REG_CAMM_BASE,
59
+ REG_CAML_BASE,
60
+ REG_CAMML_LAST = 0x21,
61
+
62
+ REG_TXDLSA = 0x22,
63
+ REG_RXDLSA,
64
+ REG_MCMDR,
65
+ REG_MIID,
66
+ REG_MIIDA,
67
+ REG_FFTCR,
68
+ REG_TSDR,
69
+ REG_RSDR,
70
+ REG_DMARFC,
71
+ REG_MIEN,
72
+
73
+ /* Status registers. */
74
+ REG_MISTA,
75
+ REG_MGSTA,
76
+ REG_MPCNT,
77
+ REG_MRPC,
78
+ REG_MRPCC,
79
+ REG_MREPC,
80
+ REG_DMARFS,
81
+ REG_CTXDSA,
82
+ REG_CTXBSA,
83
+ REG_CRXDSA,
84
+ REG_CRXBSA,
85
+
86
+ NPCM7XX_NUM_EMC_REGS,
87
+};
88
+
89
+/* REG_CAMCMR fields */
90
+/* Enable CAM Compare */
91
+#define REG_CAMCMR_ECMP (1 << 4)
92
+/* Complement CAM Compare */
93
+#define REG_CAMCMR_CCAM (1 << 3)
94
+/* Accept Broadcast Packet */
95
+#define REG_CAMCMR_ABP (1 << 2)
96
+/* Accept Multicast Packet */
97
+#define REG_CAMCMR_AMP (1 << 1)
98
+/* Accept Unicast Packet */
99
+#define REG_CAMCMR_AUP (1 << 0)
100
+
101
+/* REG_MCMDR fields */
102
+/* Software Reset */
103
+#define REG_MCMDR_SWR (1 << 24)
104
+/* Internal Loopback Select */
105
+#define REG_MCMDR_LBK (1 << 21)
106
+/* Operation Mode Select */
107
+#define REG_MCMDR_OPMOD (1 << 20)
108
+/* Enable MDC Clock Generation */
109
+#define REG_MCMDR_ENMDC (1 << 19)
110
+/* Full-Duplex Mode Select */
111
+#define REG_MCMDR_FDUP (1 << 18)
112
+/* Enable SQE Checking */
113
+#define REG_MCMDR_ENSEQ (1 << 17)
114
+/* Send PAUSE Frame */
115
+#define REG_MCMDR_SDPZ (1 << 16)
116
+/* No Defer */
117
+#define REG_MCMDR_NDEF (1 << 9)
118
+/* Frame Transmission On */
119
+#define REG_MCMDR_TXON (1 << 8)
120
+/* Strip CRC Checksum */
121
+#define REG_MCMDR_SPCRC (1 << 5)
122
+/* Accept CRC Error Packet */
123
+#define REG_MCMDR_AEP (1 << 4)
124
+/* Accept Control Packet */
125
+#define REG_MCMDR_ACP (1 << 3)
126
+/* Accept Runt Packet */
127
+#define REG_MCMDR_ARP (1 << 2)
128
+/* Accept Long Packet */
129
+#define REG_MCMDR_ALP (1 << 1)
130
+/* Frame Reception On */
131
+#define REG_MCMDR_RXON (1 << 0)
132
+
133
+/* REG_MIEN fields */
134
+/* Enable Transmit Descriptor Unavailable Interrupt */
135
+#define REG_MIEN_ENTDU (1 << 23)
136
+/* Enable Transmit Completion Interrupt */
137
+#define REG_MIEN_ENTXCP (1 << 18)
138
+/* Enable Transmit Interrupt */
139
+#define REG_MIEN_ENTXINTR (1 << 16)
140
+/* Enable Receive Descriptor Unavailable Interrupt */
141
+#define REG_MIEN_ENRDU (1 << 10)
142
+/* Enable Receive Good Interrupt */
143
+#define REG_MIEN_ENRXGD (1 << 4)
144
+/* Enable Receive Interrupt */
145
+#define REG_MIEN_ENRXINTR (1 << 0)
146
+
147
+/* REG_MISTA fields */
148
+/* TODO: Add error fields and support simulated errors? */
149
+/* Transmit Bus Error Interrupt */
150
+#define REG_MISTA_TXBERR (1 << 24)
151
+/* Transmit Descriptor Unavailable Interrupt */
152
+#define REG_MISTA_TDU (1 << 23)
153
+/* Transmit Completion Interrupt */
154
+#define REG_MISTA_TXCP (1 << 18)
155
+/* Transmit Interrupt */
156
+#define REG_MISTA_TXINTR (1 << 16)
157
+/* Receive Bus Error Interrupt */
158
+#define REG_MISTA_RXBERR (1 << 11)
159
+/* Receive Descriptor Unavailable Interrupt */
160
+#define REG_MISTA_RDU (1 << 10)
161
+/* DMA Early Notification Interrupt */
162
+#define REG_MISTA_DENI (1 << 9)
163
+/* Maximum Frame Length Interrupt */
164
+#define REG_MISTA_DFOI (1 << 8)
165
+/* Receive Good Interrupt */
166
+#define REG_MISTA_RXGD (1 << 4)
167
+/* Packet Too Long Interrupt */
168
+#define REG_MISTA_PTLE (1 << 3)
169
+/* Receive Interrupt */
170
+#define REG_MISTA_RXINTR (1 << 0)
171
+
172
+/* REG_MGSTA fields */
173
+/* Transmission Halted */
174
+#define REG_MGSTA_TXHA (1 << 11)
175
+/* Receive Halted */
176
+#define REG_MGSTA_RXHA (1 << 11)
177
+
178
+/* REG_DMARFC fields */
179
+/* Maximum Receive Frame Length */
180
+#define REG_DMARFC_RXMS(word) extract32((word), 0, 16)
181
+
182
+/* REG MIIDA fields */
183
+/* Busy Bit */
184
+#define REG_MIIDA_BUSY (1 << 17)
185
+
186
+/* Transmit and receive descriptors */
187
+typedef struct NPCM7xxEMCTxDesc NPCM7xxEMCTxDesc;
188
+typedef struct NPCM7xxEMCRxDesc NPCM7xxEMCRxDesc;
189
+
190
+struct NPCM7xxEMCTxDesc {
191
+ uint32_t flags;
192
+ uint32_t txbsa;
193
+ uint32_t status_and_length;
194
+ uint32_t ntxdsa;
195
+};
196
+
197
+struct NPCM7xxEMCRxDesc {
198
+ uint32_t status_and_length;
199
+ uint32_t rxbsa;
200
+ uint32_t reserved;
201
+ uint32_t nrxdsa;
202
+};
203
+
204
+/* NPCM7xxEMCTxDesc.flags values */
205
+/* Owner: 0 = cpu, 1 = emc */
206
+#define TX_DESC_FLAG_OWNER_MASK (1 << 31)
207
+/* Transmit interrupt enable */
208
+#define TX_DESC_FLAG_INTEN (1 << 2)
209
+/* CRC append */
210
+#define TX_DESC_FLAG_CRCAPP (1 << 1)
211
+/* Padding enable */
212
+#define TX_DESC_FLAG_PADEN (1 << 0)
213
+
214
+/* NPCM7xxEMCTxDesc.status_and_length values */
215
+/* Collision count */
216
+#define TX_DESC_STATUS_CCNT_SHIFT 28
217
+#define TX_DESC_STATUS_CCNT_BITSIZE 4
218
+/* SQE error */
219
+#define TX_DESC_STATUS_SQE (1 << 26)
220
+/* Transmission paused */
221
+#define TX_DESC_STATUS_PAU (1 << 25)
222
+/* P transmission halted */
223
+#define TX_DESC_STATUS_TXHA (1 << 24)
224
+/* Late collision */
225
+#define TX_DESC_STATUS_LC (1 << 23)
226
+/* Transmission abort */
227
+#define TX_DESC_STATUS_TXABT (1 << 22)
228
+/* No carrier sense */
229
+#define TX_DESC_STATUS_NCS (1 << 21)
230
+/* Defer exceed */
231
+#define TX_DESC_STATUS_EXDEF (1 << 20)
232
+/* Transmission complete */
233
+#define TX_DESC_STATUS_TXCP (1 << 19)
234
+/* Transmission deferred */
235
+#define TX_DESC_STATUS_DEF (1 << 17)
236
+/* Transmit interrupt */
237
+#define TX_DESC_STATUS_TXINTR (1 << 16)
238
+
239
+#define TX_DESC_PKT_LEN(word) extract32((word), 0, 16)
240
+
241
+/* Transmit buffer start address */
242
+#define TX_DESC_TXBSA(word) ((uint32_t) (word) & ~3u)
243
+
244
+/* Next transmit descriptor start address */
245
+#define TX_DESC_NTXDSA(word) ((uint32_t) (word) & ~3u)
246
+
247
+/* NPCM7xxEMCRxDesc.status_and_length values */
248
+/* Owner: 0b00 = cpu, 0b01 = undefined, 0b10 = emc, 0b11 = undefined */
249
+#define RX_DESC_STATUS_OWNER_SHIFT 30
250
+#define RX_DESC_STATUS_OWNER_BITSIZE 2
251
+#define RX_DESC_STATUS_OWNER_MASK (3 << RX_DESC_STATUS_OWNER_SHIFT)
252
+/* Runt packet */
253
+#define RX_DESC_STATUS_RP (1 << 22)
254
+/* Alignment error */
255
+#define RX_DESC_STATUS_ALIE (1 << 21)
256
+/* Frame reception complete */
257
+#define RX_DESC_STATUS_RXGD (1 << 20)
258
+/* Packet too long */
259
+#define RX_DESC_STATUS_PTLE (1 << 19)
260
+/* CRC error */
261
+#define RX_DESC_STATUS_CRCE (1 << 17)
262
+/* Receive interrupt */
263
+#define RX_DESC_STATUS_RXINTR (1 << 16)
264
+
265
+#define RX_DESC_PKT_LEN(word) extract32((word), 0, 16)
266
+
267
+/* Receive buffer start address */
268
+#define RX_DESC_RXBSA(word) ((uint32_t) (word) & ~3u)
269
+
270
+/* Next receive descriptor start address */
271
+#define RX_DESC_NRXDSA(word) ((uint32_t) (word) & ~3u)
272
+
273
+/* Minimum packet length, when TX_DESC_FLAG_PADEN is set. */
274
+#define MIN_PACKET_LENGTH 64
275
+
276
+struct NPCM7xxEMCState {
277
+ /*< private >*/
278
+ SysBusDevice parent;
279
+ /*< public >*/
280
+
281
+ MemoryRegion iomem;
282
+
283
+ qemu_irq tx_irq;
284
+ qemu_irq rx_irq;
285
+
286
+ NICState *nic;
287
+ NICConf conf;
288
+
289
+ /* 0 or 1, for log messages */
290
+ uint8_t emc_num;
291
+
292
+ uint32_t regs[NPCM7XX_NUM_EMC_REGS];
293
+
294
+ /*
295
+ * tx is active. Set to true by TSDR and then switches off when out of
296
+ * descriptors. If the TXON bit in REG_MCMDR is off then this is off.
297
+ */
298
+ bool tx_active;
299
+
300
+ /*
301
+ * rx is active. Set to true by RSDR and then switches off when out of
302
+ * descriptors. If the RXON bit in REG_MCMDR is off then this is off.
303
+ */
304
+ bool rx_active;
305
+};
306
+
307
+typedef struct NPCM7xxEMCState NPCM7xxEMCState;
308
+
309
+#define TYPE_NPCM7XX_EMC "npcm7xx-emc"
310
+#define NPCM7XX_EMC(obj) \
311
+ OBJECT_CHECK(NPCM7xxEMCState, (obj), TYPE_NPCM7XX_EMC)
312
+
313
+#endif /* NPCM7XX_EMC_H */
314
diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c
315
new file mode 100644
316
index XXXXXXX..XXXXXXX
317
--- /dev/null
318
+++ b/hw/net/npcm7xx_emc.c
319
@@ -XXX,XX +XXX,XX @@
320
+/*
321
+ * Nuvoton NPCM7xx EMC Module
322
+ *
323
+ * Copyright 2020 Google LLC
324
+ *
325
+ * This program is free software; you can redistribute it and/or modify it
326
+ * under the terms of the GNU General Public License as published by the
327
+ * Free Software Foundation; either version 2 of the License, or
328
+ * (at your option) any later version.
329
+ *
330
+ * This program is distributed in the hope that it will be useful, but WITHOUT
331
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
332
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
333
+ * for more details.
334
+ *
335
+ * Unsupported/unimplemented features:
336
+ * - MCMDR.FDUP (full duplex) is ignored, half duplex is not supported
337
+ * - Only CAM0 is supported, CAM[1-15] are not
338
+ * - writes to CAMEN.[1-15] are ignored, these bits always read as zeroes
339
+ * - MII is not implemented, MIIDA.BUSY and MIID always return zero
340
+ * - MCMDR.LBK is not implemented
341
+ * - MCMDR.{OPMOD,ENSQE,AEP,ARP} are not supported
342
+ * - H/W FIFOs are not supported, MCMDR.FFTCR is ignored
343
+ * - MGSTA.SQE is not supported
344
+ * - pause and control frames are not implemented
345
+ * - MGSTA.CCNT is not supported
346
+ * - MPCNT, DMARFS are not implemented
347
+ */
348
+
349
+#include "qemu/osdep.h"
350
+
351
+/* For crc32 */
352
+#include <zlib.h>
353
+
354
+#include "qemu-common.h"
355
+#include "hw/irq.h"
356
+#include "hw/qdev-clock.h"
357
+#include "hw/qdev-properties.h"
358
+#include "hw/net/npcm7xx_emc.h"
359
+#include "net/eth.h"
360
+#include "migration/vmstate.h"
361
+#include "qemu/bitops.h"
362
+#include "qemu/error-report.h"
363
+#include "qemu/log.h"
364
+#include "qemu/module.h"
365
+#include "qemu/units.h"
366
+#include "sysemu/dma.h"
367
+#include "trace.h"
368
+
369
+#define CRC_LENGTH 4
370
+
371
+/*
372
+ * The maximum size of a (layer 2) ethernet frame as defined by 802.3.
373
+ * 1518 = 6(dest macaddr) + 6(src macaddr) + 2(proto) + 4(crc) + 1500(payload)
374
+ * This does not include an additional 4 for the vlan field (802.1q).
375
+ */
376
+#define MAX_ETH_FRAME_SIZE 1518
377
+
378
+static const char *emc_reg_name(int regno)
379
+{
380
+#define REG(name) case REG_ ## name: return #name;
381
+ switch (regno) {
382
+ REG(CAMCMR)
383
+ REG(CAMEN)
384
+ REG(TXDLSA)
385
+ REG(RXDLSA)
386
+ REG(MCMDR)
387
+ REG(MIID)
388
+ REG(MIIDA)
389
+ REG(FFTCR)
390
+ REG(TSDR)
391
+ REG(RSDR)
392
+ REG(DMARFC)
393
+ REG(MIEN)
394
+ REG(MISTA)
395
+ REG(MGSTA)
396
+ REG(MPCNT)
397
+ REG(MRPC)
398
+ REG(MRPCC)
399
+ REG(MREPC)
400
+ REG(DMARFS)
401
+ REG(CTXDSA)
402
+ REG(CTXBSA)
403
+ REG(CRXDSA)
404
+ REG(CRXBSA)
405
+ case REG_CAMM_BASE + 0: return "CAM0M";
406
+ case REG_CAML_BASE + 0: return "CAM0L";
407
+ case REG_CAMM_BASE + 2 ... REG_CAMML_LAST:
408
+ /* Only CAM0 is supported, fold the others into something simple. */
409
+ if (regno & 1) {
410
+ return "CAM<n>L";
411
+ } else {
412
+ return "CAM<n>M";
413
+ }
414
+ default: return "UNKNOWN";
415
+ }
416
+#undef REG
417
+}
418
+
419
+static void emc_reset(NPCM7xxEMCState *emc)
420
+{
421
+ trace_npcm7xx_emc_reset(emc->emc_num);
422
+
423
+ memset(&emc->regs[0], 0, sizeof(emc->regs));
424
+
425
+ /* These regs have non-zero reset values. */
426
+ emc->regs[REG_TXDLSA] = 0xfffffffc;
427
+ emc->regs[REG_RXDLSA] = 0xfffffffc;
428
+ emc->regs[REG_MIIDA] = 0x00900000;
429
+ emc->regs[REG_FFTCR] = 0x0101;
430
+ emc->regs[REG_DMARFC] = 0x0800;
431
+ emc->regs[REG_MPCNT] = 0x7fff;
432
+
433
+ emc->tx_active = false;
434
+ emc->rx_active = false;
435
+}
436
+
437
+static void npcm7xx_emc_reset(DeviceState *dev)
438
+{
439
+ NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
440
+ emc_reset(emc);
441
+}
442
+
443
+static void emc_soft_reset(NPCM7xxEMCState *emc)
444
+{
445
+ /*
446
+ * The docs say at least MCMDR.{LBK,OPMOD} bits are not changed during a
447
+ * soft reset, but does not go into further detail. For now, KISS.
448
+ */
449
+ uint32_t mcmdr = emc->regs[REG_MCMDR];
450
+ emc_reset(emc);
451
+ emc->regs[REG_MCMDR] = mcmdr & (REG_MCMDR_LBK | REG_MCMDR_OPMOD);
452
+
453
+ qemu_set_irq(emc->tx_irq, 0);
454
+ qemu_set_irq(emc->rx_irq, 0);
455
+}
456
+
457
+static void emc_set_link(NetClientState *nc)
458
+{
459
+ /* Nothing to do yet. */
460
+}
461
+
462
+/* MISTA.TXINTR is the union of the individual bits with their enables. */
463
+static void emc_update_mista_txintr(NPCM7xxEMCState *emc)
464
+{
465
+ /* Only look at the bits we support. */
466
+ uint32_t mask = (REG_MISTA_TXBERR |
467
+ REG_MISTA_TDU |
468
+ REG_MISTA_TXCP);
469
+ if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
470
+ emc->regs[REG_MISTA] |= REG_MISTA_TXINTR;
471
+ } else {
472
+ emc->regs[REG_MISTA] &= ~REG_MISTA_TXINTR;
473
+ }
474
+}
475
+
476
+/* MISTA.RXINTR is the union of the individual bits with their enables. */
477
+static void emc_update_mista_rxintr(NPCM7xxEMCState *emc)
478
+{
479
+ /* Only look at the bits we support. */
480
+ uint32_t mask = (REG_MISTA_RXBERR |
481
+ REG_MISTA_RDU |
482
+ REG_MISTA_RXGD);
483
+ if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
484
+ emc->regs[REG_MISTA] |= REG_MISTA_RXINTR;
485
+ } else {
486
+ emc->regs[REG_MISTA] &= ~REG_MISTA_RXINTR;
487
+ }
488
+}
489
+
490
+/* N.B. emc_update_mista_txintr must have already been called. */
491
+static void emc_update_tx_irq(NPCM7xxEMCState *emc)
492
+{
493
+ int level = !!(emc->regs[REG_MISTA] &
494
+ emc->regs[REG_MIEN] &
495
+ REG_MISTA_TXINTR);
496
+ trace_npcm7xx_emc_update_tx_irq(level);
497
+ qemu_set_irq(emc->tx_irq, level);
498
+}
499
+
500
+/* N.B. emc_update_mista_rxintr must have already been called. */
501
+static void emc_update_rx_irq(NPCM7xxEMCState *emc)
502
+{
503
+ int level = !!(emc->regs[REG_MISTA] &
504
+ emc->regs[REG_MIEN] &
505
+ REG_MISTA_RXINTR);
506
+ trace_npcm7xx_emc_update_rx_irq(level);
507
+ qemu_set_irq(emc->rx_irq, level);
508
+}
509
+
510
+/* Update IRQ states due to changes in MIEN,MISTA. */
511
+static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc)
512
+{
513
+ emc_update_mista_txintr(emc);
514
+ emc_update_tx_irq(emc);
515
+
516
+ emc_update_mista_rxintr(emc);
517
+ emc_update_rx_irq(emc);
518
+}
519
+
520
+static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc)
521
+{
522
+ if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) {
523
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
524
+ HWADDR_PRIx "\n", __func__, addr);
525
+ return -1;
526
+ }
527
+ desc->flags = le32_to_cpu(desc->flags);
528
+ desc->txbsa = le32_to_cpu(desc->txbsa);
529
+ desc->status_and_length = le32_to_cpu(desc->status_and_length);
530
+ desc->ntxdsa = le32_to_cpu(desc->ntxdsa);
531
+ return 0;
532
+}
533
+
534
+static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr)
535
+{
536
+ NPCM7xxEMCTxDesc le_desc;
537
+
538
+ le_desc.flags = cpu_to_le32(desc->flags);
539
+ le_desc.txbsa = cpu_to_le32(desc->txbsa);
540
+ le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
541
+ le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa);
542
+ if (dma_memory_write(&address_space_memory, addr, &le_desc,
543
+ sizeof(le_desc))) {
544
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
545
+ HWADDR_PRIx "\n", __func__, addr);
546
+ return -1;
547
+ }
548
+ return 0;
549
+}
550
+
551
+static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc)
552
+{
553
+ if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) {
554
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
555
+ HWADDR_PRIx "\n", __func__, addr);
556
+ return -1;
557
+ }
558
+ desc->status_and_length = le32_to_cpu(desc->status_and_length);
559
+ desc->rxbsa = le32_to_cpu(desc->rxbsa);
560
+ desc->reserved = le32_to_cpu(desc->reserved);
561
+ desc->nrxdsa = le32_to_cpu(desc->nrxdsa);
562
+ return 0;
563
+}
564
+
565
+static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr)
566
+{
567
+ NPCM7xxEMCRxDesc le_desc;
568
+
569
+ le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
570
+ le_desc.rxbsa = cpu_to_le32(desc->rxbsa);
571
+ le_desc.reserved = cpu_to_le32(desc->reserved);
572
+ le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa);
573
+ if (dma_memory_write(&address_space_memory, addr, &le_desc,
574
+ sizeof(le_desc))) {
575
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
576
+ HWADDR_PRIx "\n", __func__, addr);
577
+ return -1;
578
+ }
579
+ return 0;
580
+}
581
+
582
+static void emc_set_mista(NPCM7xxEMCState *emc, uint32_t flags)
583
+{
584
+ trace_npcm7xx_emc_set_mista(flags);
585
+ emc->regs[REG_MISTA] |= flags;
586
+ if (extract32(flags, 16, 16)) {
587
+ emc_update_mista_txintr(emc);
588
+ }
589
+ if (extract32(flags, 0, 16)) {
590
+ emc_update_mista_rxintr(emc);
591
+ }
592
+}
593
+
594
+static void emc_halt_tx(NPCM7xxEMCState *emc, uint32_t mista_flag)
595
+{
596
+ emc->tx_active = false;
597
+ emc_set_mista(emc, mista_flag);
598
+}
599
+
600
+static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag)
601
+{
602
+ emc->rx_active = false;
603
+ emc_set_mista(emc, mista_flag);
604
+}
605
+
606
+static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc,
607
+ const NPCM7xxEMCTxDesc *tx_desc,
608
+ uint32_t desc_addr)
609
+{
610
+ /* Update the current descriptor, if only to reset the owner flag. */
611
+ if (emc_write_tx_desc(tx_desc, desc_addr)) {
612
+ /*
613
+ * We just read it so this shouldn't generally happen.
614
+ * Error already reported.
615
+ */
616
+ emc_set_mista(emc, REG_MISTA_TXBERR);
617
+ }
618
+ emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa);
619
+}
620
+
621
+static void emc_set_next_rx_descriptor(NPCM7xxEMCState *emc,
622
+ const NPCM7xxEMCRxDesc *rx_desc,
623
+ uint32_t desc_addr)
624
+{
625
+ /* Update the current descriptor, if only to reset the owner flag. */
626
+ if (emc_write_rx_desc(rx_desc, desc_addr)) {
627
+ /*
628
+ * We just read it so this shouldn't generally happen.
629
+ * Error already reported.
630
+ */
631
+ emc_set_mista(emc, REG_MISTA_RXBERR);
632
+ }
633
+ emc->regs[REG_CRXDSA] = RX_DESC_NRXDSA(rx_desc->nrxdsa);
634
+}
635
+
636
+static void emc_try_send_next_packet(NPCM7xxEMCState *emc)
637
+{
638
+ /* Working buffer for sending out packets. Most packets fit in this. */
639
+#define TX_BUFFER_SIZE 2048
640
+ uint8_t tx_send_buffer[TX_BUFFER_SIZE];
641
+ uint32_t desc_addr = TX_DESC_NTXDSA(emc->regs[REG_CTXDSA]);
642
+ NPCM7xxEMCTxDesc tx_desc;
643
+ uint32_t next_buf_addr, length;
644
+ uint8_t *buf;
645
+ g_autofree uint8_t *malloced_buf = NULL;
646
+
647
+ if (emc_read_tx_desc(desc_addr, &tx_desc)) {
648
+ /* Error reading descriptor, already reported. */
649
+ emc_halt_tx(emc, REG_MISTA_TXBERR);
650
+ emc_update_tx_irq(emc);
651
+ return;
652
+ }
653
+
654
+ /* Nothing we can do if we don't own the descriptor. */
655
+ if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) {
656
+ trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
657
+ emc_halt_tx(emc, REG_MISTA_TDU);
658
+ emc_update_tx_irq(emc);
659
+ return;
660
+ }
661
+
662
+ /* Give the descriptor back regardless of what happens. */
663
+ tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK;
664
+ tx_desc.status_and_length &= 0xffff;
665
+
666
+ /*
667
+ * Despite the h/w documentation saying the tx buffer is word aligned,
668
+ * the linux driver does not word align the buffer. There is value in not
669
+ * aligning the buffer: See the description of NET_IP_ALIGN in linux
670
+ * kernel sources.
671
+ */
672
+ next_buf_addr = tx_desc.txbsa;
673
+ emc->regs[REG_CTXBSA] = next_buf_addr;
674
+ length = TX_DESC_PKT_LEN(tx_desc.status_and_length);
675
+ buf = &tx_send_buffer[0];
676
+
677
+ if (length > sizeof(tx_send_buffer)) {
678
+ malloced_buf = g_malloc(length);
679
+ buf = malloced_buf;
680
+ }
681
+
682
+ if (dma_memory_read(&address_space_memory, next_buf_addr, buf, length)) {
683
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
684
+ __func__, next_buf_addr);
685
+ emc_set_mista(emc, REG_MISTA_TXBERR);
686
+ emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
687
+ emc_update_tx_irq(emc);
688
+ trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
689
+ return;
690
+ }
691
+
692
+ if ((tx_desc.flags & TX_DESC_FLAG_PADEN) && (length < MIN_PACKET_LENGTH)) {
693
+ memset(buf + length, 0, MIN_PACKET_LENGTH - length);
694
+ length = MIN_PACKET_LENGTH;
695
+ }
696
+
697
+ /* N.B. emc_receive can get called here. */
698
+ qemu_send_packet(qemu_get_queue(emc->nic), buf, length);
699
+ trace_npcm7xx_emc_sent_packet(length);
700
+
701
+ tx_desc.status_and_length |= TX_DESC_STATUS_TXCP;
702
+ if (tx_desc.flags & TX_DESC_FLAG_INTEN) {
703
+ emc_set_mista(emc, REG_MISTA_TXCP);
704
+ }
705
+ if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_TXINTR) {
706
+ tx_desc.status_and_length |= TX_DESC_STATUS_TXINTR;
707
+ }
708
+
709
+ emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
710
+ emc_update_tx_irq(emc);
711
+ trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
712
+}
713
+
714
+static bool emc_can_receive(NetClientState *nc)
715
+{
716
+ NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
717
+
718
+ bool can_receive = emc->rx_active;
719
+ trace_npcm7xx_emc_can_receive(can_receive);
720
+ return can_receive;
721
+}
722
+
723
+/* If result is false then *fail_reason contains the reason. */
724
+static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf,
725
+ size_t len, const char **fail_reason)
726
+{
727
+ eth_pkt_types_e pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(buf));
728
+
729
+ switch (pkt_type) {
730
+ case ETH_PKT_BCAST:
731
+ if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
732
+ return true;
733
+ } else {
734
+ *fail_reason = "Broadcast packet disabled";
735
+ return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_ABP);
736
+ }
737
+ case ETH_PKT_MCAST:
738
+ if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
739
+ return true;
740
+ } else {
741
+ *fail_reason = "Multicast packet disabled";
742
+ return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_AMP);
743
+ }
744
+ case ETH_PKT_UCAST: {
745
+ bool matches;
746
+ if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) {
747
+ return true;
748
+ }
749
+ matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) &&
750
+ /* We only support one CAM register, CAM0. */
751
+ (emc->regs[REG_CAMEN] & (1 << 0)) &&
752
+ memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0);
753
+ if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
754
+ *fail_reason = "MACADDR matched, comparison complemented";
755
+ return !matches;
756
+ } else {
757
+ *fail_reason = "MACADDR didn't match";
758
+ return matches;
759
+ }
760
+ }
761
+ default:
762
+ g_assert_not_reached();
763
+ }
764
+}
765
+
766
+static bool emc_receive_filter(NPCM7xxEMCState *emc, const uint8_t *buf,
767
+ size_t len)
768
+{
769
+ const char *fail_reason = NULL;
770
+ bool ok = emc_receive_filter1(emc, buf, len, &fail_reason);
771
+ if (!ok) {
772
+ trace_npcm7xx_emc_packet_filtered_out(fail_reason);
773
+ }
774
+ return ok;
775
+}
776
+
777
+static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1)
778
+{
779
+ NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
780
+ const uint32_t len = len1;
781
+ size_t max_frame_len;
782
+ bool long_frame;
783
+ uint32_t desc_addr;
784
+ NPCM7xxEMCRxDesc rx_desc;
785
+ uint32_t crc;
786
+ uint8_t *crc_ptr;
787
+ uint32_t buf_addr;
788
+
789
+ trace_npcm7xx_emc_receiving_packet(len);
790
+
791
+ if (!emc_can_receive(nc)) {
792
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__);
793
+ return -1;
794
+ }
795
+
796
+ if (len < ETH_HLEN ||
797
+ /* Defensive programming: drop unsupportable large packets. */
798
+ len > 0xffff - CRC_LENGTH) {
799
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Dropped frame of %u bytes\n",
800
+ __func__, len);
801
+ return len;
802
+ }
803
+
804
+ /*
805
+ * DENI is set if EMC received the Length/Type field of the incoming
806
+ * packet, so it will be set regardless of what happens next.
807
+ */
808
+ emc_set_mista(emc, REG_MISTA_DENI);
809
+
810
+ if (!emc_receive_filter(emc, buf, len)) {
811
+ emc_update_rx_irq(emc);
812
+ return len;
813
+ }
814
+
815
+ /* Huge frames (> DMARFC) are dropped. */
816
+ max_frame_len = REG_DMARFC_RXMS(emc->regs[REG_DMARFC]);
817
+ if (len + CRC_LENGTH > max_frame_len) {
818
+ trace_npcm7xx_emc_packet_dropped(len);
819
+ emc_set_mista(emc, REG_MISTA_DFOI);
820
+ emc_update_rx_irq(emc);
821
+ return len;
822
+ }
823
+
824
+ /*
825
+ * Long Frames (> MAX_ETH_FRAME_SIZE) are also dropped, unless MCMDR.ALP
826
+ * is set.
827
+ */
828
+ long_frame = false;
829
+ if (len + CRC_LENGTH > MAX_ETH_FRAME_SIZE) {
830
+ if (emc->regs[REG_MCMDR] & REG_MCMDR_ALP) {
831
+ long_frame = true;
832
+ } else {
833
+ trace_npcm7xx_emc_packet_dropped(len);
834
+ emc_set_mista(emc, REG_MISTA_PTLE);
835
+ emc_update_rx_irq(emc);
836
+ return len;
837
+ }
838
+ }
839
+
840
+ desc_addr = RX_DESC_NRXDSA(emc->regs[REG_CRXDSA]);
841
+ if (emc_read_rx_desc(desc_addr, &rx_desc)) {
842
+ /* Error reading descriptor, already reported. */
843
+ emc_halt_rx(emc, REG_MISTA_RXBERR);
844
+ emc_update_rx_irq(emc);
845
+ return len;
846
+ }
847
+
848
+ /* Nothing we can do if we don't own the descriptor. */
849
+ if (!(rx_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK)) {
850
+ trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
851
+ emc_halt_rx(emc, REG_MISTA_RDU);
852
+ emc_update_rx_irq(emc);
853
+ return len;
854
+ }
855
+
856
+ crc = 0;
857
+ crc_ptr = (uint8_t *) &crc;
858
+ if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
859
+ crc = cpu_to_be32(crc32(~0, buf, len));
860
+ }
861
+
862
+ /* Give the descriptor back regardless of what happens. */
863
+ rx_desc.status_and_length &= ~RX_DESC_STATUS_OWNER_MASK;
864
+
865
+ buf_addr = rx_desc.rxbsa;
866
+ emc->regs[REG_CRXBSA] = buf_addr;
867
+ if (dma_memory_write(&address_space_memory, buf_addr, buf, len) ||
868
+ (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) &&
869
+ dma_memory_write(&address_space_memory, buf_addr + len, crc_ptr,
870
+ 4))) {
871
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n",
872
+ __func__);
873
+ emc_set_mista(emc, REG_MISTA_RXBERR);
874
+ emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
875
+ emc_update_rx_irq(emc);
876
+ trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
877
+ return len;
878
+ }
879
+
880
+ trace_npcm7xx_emc_received_packet(len);
881
+
882
+ /* Note: We've already verified len+4 <= 0xffff. */
883
+ rx_desc.status_and_length = len;
884
+ if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
885
+ rx_desc.status_and_length += 4;
886
+ }
887
+ rx_desc.status_and_length |= RX_DESC_STATUS_RXGD;
888
+ emc_set_mista(emc, REG_MISTA_RXGD);
889
+
890
+ if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_RXINTR) {
891
+ rx_desc.status_and_length |= RX_DESC_STATUS_RXINTR;
892
+ }
893
+ if (long_frame) {
894
+ rx_desc.status_and_length |= RX_DESC_STATUS_PTLE;
895
+ }
896
+
897
+ emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
898
+ emc_update_rx_irq(emc);
899
+ trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
900
+ return len;
901
+}
902
+
903
+static void emc_try_receive_next_packet(NPCM7xxEMCState *emc)
904
+{
905
+ if (emc_can_receive(qemu_get_queue(emc->nic))) {
906
+ qemu_flush_queued_packets(qemu_get_queue(emc->nic));
907
+ }
908
+}
909
+
910
+static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size)
911
+{
912
+ NPCM7xxEMCState *emc = opaque;
913
+ uint32_t reg = offset / sizeof(uint32_t);
914
+ uint32_t result;
915
+
916
+ if (reg >= NPCM7XX_NUM_EMC_REGS) {
917
+ qemu_log_mask(LOG_GUEST_ERROR,
918
+ "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
919
+ __func__, offset);
920
+ return 0;
921
+ }
922
+
923
+ switch (reg) {
924
+ case REG_MIID:
925
+ /*
926
+ * We don't implement MII. For determinism, always return zero as
927
+ * writes record the last value written for debugging purposes.
928
+ */
929
+ qemu_log_mask(LOG_UNIMP, "%s: Read of MIID, returning 0\n", __func__);
930
+ result = 0;
931
+ break;
932
+ case REG_TSDR:
933
+ case REG_RSDR:
934
+ qemu_log_mask(LOG_GUEST_ERROR,
935
+ "%s: Read of write-only reg, %s/%d\n",
936
+ __func__, emc_reg_name(reg), reg);
937
+ return 0;
938
+ default:
939
+ result = emc->regs[reg];
940
+ break;
941
+ }
942
+
943
+ trace_npcm7xx_emc_reg_read(emc->emc_num, result, emc_reg_name(reg), reg);
944
+ return result;
945
+}
946
+
947
+static void npcm7xx_emc_write(void *opaque, hwaddr offset,
948
+ uint64_t v, unsigned size)
949
+{
950
+ NPCM7xxEMCState *emc = opaque;
951
+ uint32_t reg = offset / sizeof(uint32_t);
952
+ uint32_t value = v;
953
+
954
+ g_assert(size == sizeof(uint32_t));
955
+
956
+ if (reg >= NPCM7XX_NUM_EMC_REGS) {
957
+ qemu_log_mask(LOG_GUEST_ERROR,
958
+ "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
959
+ __func__, offset);
960
+ return;
961
+ }
962
+
963
+ trace_npcm7xx_emc_reg_write(emc->emc_num, emc_reg_name(reg), reg, value);
964
+
965
+ switch (reg) {
966
+ case REG_CAMCMR:
967
+ emc->regs[reg] = value;
968
+ break;
969
+ case REG_CAMEN:
970
+ /* Only CAM0 is supported, don't pretend otherwise. */
971
+ if (value & ~1) {
972
+ qemu_log_mask(LOG_GUEST_ERROR,
973
+ "%s: Only CAM0 is supported, cannot enable others"
974
+ ": 0x%x\n",
975
+ __func__, value);
976
+ }
977
+ emc->regs[reg] = value & 1;
978
+ break;
979
+ case REG_CAMM_BASE + 0:
980
+ emc->regs[reg] = value;
981
+ emc->conf.macaddr.a[0] = value >> 24;
982
+ emc->conf.macaddr.a[1] = value >> 16;
983
+ emc->conf.macaddr.a[2] = value >> 8;
984
+ emc->conf.macaddr.a[3] = value >> 0;
985
+ break;
986
+ case REG_CAML_BASE + 0:
987
+ emc->regs[reg] = value;
988
+ emc->conf.macaddr.a[4] = value >> 24;
989
+ emc->conf.macaddr.a[5] = value >> 16;
990
+ break;
991
+ case REG_MCMDR: {
992
+ uint32_t prev;
993
+ if (value & REG_MCMDR_SWR) {
994
+ emc_soft_reset(emc);
995
+ /* On h/w the reset happens over multiple cycles. For now KISS. */
996
+ break;
997
+ }
998
+ prev = emc->regs[reg];
999
+ emc->regs[reg] = value;
1000
+ /* Update tx state. */
1001
+ if (!(prev & REG_MCMDR_TXON) &&
1002
+ (value & REG_MCMDR_TXON)) {
1003
+ emc->regs[REG_CTXDSA] = emc->regs[REG_TXDLSA];
1004
+ /*
1005
+ * Linux kernel turns TX on with CPU still holding descriptor,
1006
+ * which suggests we should wait for a write to TSDR before trying
1007
+ * to send a packet: so we don't send one here.
1008
+ */
1009
+ } else if ((prev & REG_MCMDR_TXON) &&
1010
+ !(value & REG_MCMDR_TXON)) {
1011
+ emc->regs[REG_MGSTA] |= REG_MGSTA_TXHA;
1012
+ }
1013
+ if (!(value & REG_MCMDR_TXON)) {
1014
+ emc_halt_tx(emc, 0);
1015
+ }
1016
+ /* Update rx state. */
1017
+ if (!(prev & REG_MCMDR_RXON) &&
1018
+ (value & REG_MCMDR_RXON)) {
1019
+ emc->regs[REG_CRXDSA] = emc->regs[REG_RXDLSA];
1020
+ } else if ((prev & REG_MCMDR_RXON) &&
1021
+ !(value & REG_MCMDR_RXON)) {
1022
+ emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA;
1023
+ }
1024
+ if (!(value & REG_MCMDR_RXON)) {
1025
+ emc_halt_rx(emc, 0);
1026
+ }
1027
+ break;
1028
+ }
1029
+ case REG_TXDLSA:
1030
+ case REG_RXDLSA:
1031
+ case REG_DMARFC:
1032
+ case REG_MIID:
1033
+ emc->regs[reg] = value;
1034
+ break;
1035
+ case REG_MIEN:
1036
+ emc->regs[reg] = value;
1037
+ emc_update_irq_from_reg_change(emc);
1038
+ break;
1039
+ case REG_MISTA:
1040
+ /* Clear the bits that have 1 in "value". */
1041
+ emc->regs[reg] &= ~value;
1042
+ emc_update_irq_from_reg_change(emc);
1043
+ break;
1044
+ case REG_MGSTA:
1045
+ /* Clear the bits that have 1 in "value". */
1046
+ emc->regs[reg] &= ~value;
1047
+ break;
1048
+ case REG_TSDR:
1049
+ if (emc->regs[REG_MCMDR] & REG_MCMDR_TXON) {
1050
+ emc->tx_active = true;
1051
+ /* Keep trying to send packets until we run out. */
1052
+ while (emc->tx_active) {
1053
+ emc_try_send_next_packet(emc);
1054
+ }
1055
+ }
1056
+ break;
1057
+ case REG_RSDR:
1058
+ if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) {
1059
+ emc->rx_active = true;
1060
+ emc_try_receive_next_packet(emc);
1061
+ }
1062
+ break;
1063
+ case REG_MIIDA:
1064
+ emc->regs[reg] = value & ~REG_MIIDA_BUSY;
1065
+ break;
1066
+ case REG_MRPC:
1067
+ case REG_MRPCC:
1068
+ case REG_MREPC:
1069
+ case REG_CTXDSA:
1070
+ case REG_CTXBSA:
1071
+ case REG_CRXDSA:
1072
+ case REG_CRXBSA:
1073
+ qemu_log_mask(LOG_GUEST_ERROR,
1074
+ "%s: Write to read-only reg %s/%d\n",
1075
+ __func__, emc_reg_name(reg), reg);
1076
+ break;
1077
+ default:
1078
+ qemu_log_mask(LOG_UNIMP, "%s: Write to unimplemented reg %s/%d\n",
1079
+ __func__, emc_reg_name(reg), reg);
1080
+ break;
1081
+ }
1082
+}
1083
+
1084
+static const struct MemoryRegionOps npcm7xx_emc_ops = {
1085
+ .read = npcm7xx_emc_read,
1086
+ .write = npcm7xx_emc_write,
1087
+ .endianness = DEVICE_LITTLE_ENDIAN,
1088
+ .valid = {
1089
+ .min_access_size = 4,
1090
+ .max_access_size = 4,
1091
+ .unaligned = false,
1092
+ },
1093
+};
1094
+
1095
+static void emc_cleanup(NetClientState *nc)
1096
+{
1097
+ /* Nothing to do yet. */
1098
+}
1099
+
1100
+static NetClientInfo net_npcm7xx_emc_info = {
1101
+ .type = NET_CLIENT_DRIVER_NIC,
1102
+ .size = sizeof(NICState),
1103
+ .can_receive = emc_can_receive,
1104
+ .receive = emc_receive,
1105
+ .cleanup = emc_cleanup,
1106
+ .link_status_changed = emc_set_link,
1107
+};
1108
+
1109
+static void npcm7xx_emc_realize(DeviceState *dev, Error **errp)
1110
+{
1111
+ NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
1112
+ SysBusDevice *sbd = SYS_BUS_DEVICE(emc);
1113
+
1114
+ memory_region_init_io(&emc->iomem, OBJECT(emc), &npcm7xx_emc_ops, emc,
1115
+ TYPE_NPCM7XX_EMC, 4 * KiB);
1116
+ sysbus_init_mmio(sbd, &emc->iomem);
1117
+ sysbus_init_irq(sbd, &emc->tx_irq);
1118
+ sysbus_init_irq(sbd, &emc->rx_irq);
1119
+
1120
+ qemu_macaddr_default_if_unset(&emc->conf.macaddr);
1121
+ emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf,
1122
+ object_get_typename(OBJECT(dev)), dev->id, emc);
1123
+ qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a);
1124
+}
1125
+
1126
+static void npcm7xx_emc_unrealize(DeviceState *dev)
1127
+{
1128
+ NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
1129
+
1130
+ qemu_del_nic(emc->nic);
1131
+}
1132
+
1133
+static const VMStateDescription vmstate_npcm7xx_emc = {
1134
+ .name = TYPE_NPCM7XX_EMC,
1135
+ .version_id = 0,
1136
+ .minimum_version_id = 0,
1137
+ .fields = (VMStateField[]) {
1138
+ VMSTATE_UINT8(emc_num, NPCM7xxEMCState),
1139
+ VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS),
1140
+ VMSTATE_BOOL(tx_active, NPCM7xxEMCState),
1141
+ VMSTATE_BOOL(rx_active, NPCM7xxEMCState),
1142
+ VMSTATE_END_OF_LIST(),
1143
+ },
1144
+};
1145
+
1146
+static Property npcm7xx_emc_properties[] = {
1147
+ DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf),
1148
+ DEFINE_PROP_END_OF_LIST(),
1149
+};
1150
+
1151
+static void npcm7xx_emc_class_init(ObjectClass *klass, void *data)
1152
+{
1153
+ DeviceClass *dc = DEVICE_CLASS(klass);
1154
+
1155
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1156
+ dc->desc = "NPCM7xx EMC Controller";
1157
+ dc->realize = npcm7xx_emc_realize;
1158
+ dc->unrealize = npcm7xx_emc_unrealize;
1159
+ dc->reset = npcm7xx_emc_reset;
1160
+ dc->vmsd = &vmstate_npcm7xx_emc;
1161
+ device_class_set_props(dc, npcm7xx_emc_properties);
1162
+}
1163
+
1164
+static const TypeInfo npcm7xx_emc_info = {
1165
+ .name = TYPE_NPCM7XX_EMC,
1166
+ .parent = TYPE_SYS_BUS_DEVICE,
1167
+ .instance_size = sizeof(NPCM7xxEMCState),
1168
+ .class_init = npcm7xx_emc_class_init,
1169
+};
1170
+
1171
+static void npcm7xx_emc_register_type(void)
1172
+{
1173
+ type_register_static(&npcm7xx_emc_info);
1174
+}
1175
+
1176
+type_init(npcm7xx_emc_register_type)
1177
diff --git a/hw/net/meson.build b/hw/net/meson.build
1178
index XXXXXXX..XXXXXXX 100644
1179
--- a/hw/net/meson.build
1180
+++ b/hw/net/meson.build
1181
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_I82596_COMMON', if_true: files('i82596.c'))
1182
softmmu_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c'))
1183
softmmu_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c'))
1184
softmmu_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c'))
1185
+softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c'))
1186
1187
softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c'))
1188
softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c'))
1189
diff --git a/hw/net/trace-events b/hw/net/trace-events
1190
index XXXXXXX..XXXXXXX 100644
1191
--- a/hw/net/trace-events
1192
+++ b/hw/net/trace-events
1193
@@ -XXX,XX +XXX,XX @@ imx_fec_receive_last(int last) "rx frame flags 0x%04x"
1194
imx_enet_receive(size_t size) "len %zu"
1195
imx_enet_receive_len(uint64_t addr, int len) "rx_bd 0x%"PRIx64" length %d"
1196
imx_enet_receive_last(int last) "rx frame flags 0x%04x"
1197
+
1198
+# npcm7xx_emc.c
1199
+npcm7xx_emc_reset(int emc_num) "Resetting emc%d"
1200
+npcm7xx_emc_update_tx_irq(int level) "Setting tx irq to %d"
1201
+npcm7xx_emc_update_rx_irq(int level) "Setting rx irq to %d"
1202
+npcm7xx_emc_set_mista(uint32_t flags) "ORing 0x%x into MISTA"
1203
+npcm7xx_emc_cpu_owned_desc(uint32_t addr) "Can't process cpu-owned descriptor @0x%x"
1204
+npcm7xx_emc_sent_packet(uint32_t len) "Sent %u byte packet"
1205
+npcm7xx_emc_tx_done(uint32_t ctxdsa) "TX done, CTXDSA=0x%x"
1206
+npcm7xx_emc_can_receive(int can_receive) "Can receive: %d"
1207
+npcm7xx_emc_packet_filtered_out(const char* fail_reason) "Packet filtered out: %s"
1208
+npcm7xx_emc_packet_dropped(uint32_t len) "%u byte packet dropped"
1209
+npcm7xx_emc_receiving_packet(uint32_t len) "Receiving %u byte packet"
1210
+npcm7xx_emc_received_packet(uint32_t len) "Received %u byte packet"
1211
+npcm7xx_emc_rx_done(uint32_t crxdsa) "RX done, CRXDSA=0x%x"
1212
+npcm7xx_emc_reg_read(int emc_num, uint32_t result, const char *name, int regno) "emc%d: 0x%x = reg[%s/%d]"
1213
+npcm7xx_emc_reg_write(int emc_num, const char *name, int regno, uint32_t value) "emc%d: reg[%s/%d] = 0x%x"
1214
--
1215
2.20.1
1216
1217
diff view generated by jsdifflib
1
Factor out the "direct kernel boot" code path from arm_load_kernel()
1
From: Doug Evans <dje@google.com>
2
into its own function; this function is getting long enough that
3
the code flow is a bit confusing.
4
2
5
This commit only moves code around; no semantic changes.
3
This is a 10/100 ethernet device that has several features.
4
Only the ones needed by the Linux driver have been implemented.
5
See npcm7xx_emc.c for a list of unimplemented features.
6
6
7
We leave the "load the dtb" code in arm_load_kernel() -- this
7
Reviewed-by: Hao Wu <wuhaotsh@google.com>
8
is currently only used by the "direct kernel boot" path, but
8
Reviewed-by: Avi Fishman <avi.fishman@nuvoton.com>
9
this is a bug which we will fix shortly.
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Doug Evans <dje@google.com>
11
Message-id: 20210209015541.778833-3-dje@google.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
docs/system/arm/nuvoton.rst | 3 ++-
15
include/hw/arm/npcm7xx.h | 2 ++
16
hw/arm/npcm7xx.c | 50 +++++++++++++++++++++++++++++++++++--
17
3 files changed, 52 insertions(+), 3 deletions(-)
10
18
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
14
Message-id: 20190131112240.8395-3-peter.maydell@linaro.org
15
---
16
hw/arm/boot.c | 150 +++++++++++++++++++++++++++-----------------------
17
1 file changed, 80 insertions(+), 70 deletions(-)
18
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
20
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/boot.c
21
--- a/docs/system/arm/nuvoton.rst
22
+++ b/hw/arm/boot.c
22
+++ b/docs/system/arm/nuvoton.rst
23
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
23
@@ -XXX,XX +XXX,XX @@ Supported devices
24
return size;
24
* GPIO controller
25
* Analog to Digital Converter (ADC)
26
* Pulse Width Modulation (PWM)
27
+ * Ethernet controller (EMC)
28
29
Missing devices
30
---------------
31
@@ -XXX,XX +XXX,XX @@ Missing devices
32
* Shared memory (SHM)
33
* eSPI slave interface
34
35
- * Ethernet controllers (GMAC and EMC)
36
+ * Ethernet controller (GMAC)
37
* USB device (USBD)
38
* SMBus controller (SMBF)
39
* Peripheral SPI controller (PSPI)
40
diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/include/hw/arm/npcm7xx.h
43
+++ b/include/hw/arm/npcm7xx.h
44
@@ -XXX,XX +XXX,XX @@
45
#include "hw/misc/npcm7xx_gcr.h"
46
#include "hw/misc/npcm7xx_pwm.h"
47
#include "hw/misc/npcm7xx_rng.h"
48
+#include "hw/net/npcm7xx_emc.h"
49
#include "hw/nvram/npcm7xx_otp.h"
50
#include "hw/timer/npcm7xx_timer.h"
51
#include "hw/ssi/npcm7xx_fiu.h"
52
@@ -XXX,XX +XXX,XX @@ typedef struct NPCM7xxState {
53
EHCISysBusState ehci;
54
OHCISysBusState ohci;
55
NPCM7xxFIUState fiu[2];
56
+ NPCM7xxEMCState emc[2];
57
} NPCM7xxState;
58
59
#define TYPE_NPCM7XX "npcm7xx"
60
diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/hw/arm/npcm7xx.c
63
+++ b/hw/arm/npcm7xx.c
64
@@ -XXX,XX +XXX,XX @@ enum NPCM7xxInterrupt {
65
NPCM7XX_UART1_IRQ,
66
NPCM7XX_UART2_IRQ,
67
NPCM7XX_UART3_IRQ,
68
+ NPCM7XX_EMC1RX_IRQ = 15,
69
+ NPCM7XX_EMC1TX_IRQ,
70
NPCM7XX_TIMER0_IRQ = 32, /* Timer Module 0 */
71
NPCM7XX_TIMER1_IRQ,
72
NPCM7XX_TIMER2_IRQ,
73
@@ -XXX,XX +XXX,XX @@ enum NPCM7xxInterrupt {
74
NPCM7XX_OHCI_IRQ = 62,
75
NPCM7XX_PWM0_IRQ = 93, /* PWM module 0 */
76
NPCM7XX_PWM1_IRQ, /* PWM module 1 */
77
+ NPCM7XX_EMC2RX_IRQ = 114,
78
+ NPCM7XX_EMC2TX_IRQ,
79
NPCM7XX_GPIO0_IRQ = 116,
80
NPCM7XX_GPIO1_IRQ,
81
NPCM7XX_GPIO2_IRQ,
82
@@ -XXX,XX +XXX,XX @@ static const hwaddr npcm7xx_pwm_addr[] = {
83
0xf0104000,
84
};
85
86
+/* Register base address for each EMC Module */
87
+static const hwaddr npcm7xx_emc_addr[] = {
88
+ 0xf0825000,
89
+ 0xf0826000,
90
+};
91
+
92
static const struct {
93
hwaddr regs_addr;
94
uint32_t unconnected_pins;
95
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_init(Object *obj)
96
for (i = 0; i < ARRAY_SIZE(s->pwm); i++) {
97
object_initialize_child(obj, "pwm[*]", &s->pwm[i], TYPE_NPCM7XX_PWM);
98
}
99
+
100
+ for (i = 0; i < ARRAY_SIZE(s->emc); i++) {
101
+ object_initialize_child(obj, "emc[*]", &s->emc[i], TYPE_NPCM7XX_EMC);
102
+ }
25
}
103
}
26
104
27
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
105
static void npcm7xx_realize(DeviceState *dev, Error **errp)
28
+static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
106
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
29
+ struct arm_boot_info *info)
107
sysbus_connect_irq(sbd, i, npcm7xx_irq(s, NPCM7XX_PWM0_IRQ + i));
30
{
31
+ /* Set up for a direct boot of a kernel image file. */
32
CPUState *cs;
33
+ AddressSpace *as = arm_boot_address_space(cpu, info);
34
int kernel_size;
35
int initrd_size;
36
int is_linux = 0;
37
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
38
int elf_machine;
39
hwaddr entry;
40
static const ARMInsnFixup *primary_loader;
41
- AddressSpace *as = arm_boot_address_space(cpu, info);
42
-
43
- /*
44
- * CPU objects (unlike devices) are not automatically reset on system
45
- * reset, so we must always register a handler to do so. If we're
46
- * actually loading a kernel, the handler is also responsible for
47
- * arranging that we start it correctly.
48
- */
49
- for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
50
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
51
- }
52
-
53
- /*
54
- * The board code is not supposed to set secure_board_setup unless
55
- * running its code in secure mode is actually possible, and KVM
56
- * doesn't support secure.
57
- */
58
- assert(!(info->secure_board_setup && kvm_enabled()));
59
-
60
- info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
61
- info->dtb_limit = 0;
62
-
63
- /* Load the kernel. */
64
- if (!info->kernel_filename || info->firmware_loaded) {
65
-
66
- if (have_dtb(info)) {
67
- /*
68
- * If we have a device tree blob, but no kernel to supply it to (or
69
- * the kernel is supposed to be loaded by the bootloader), copy the
70
- * DTB to the base of RAM for the bootloader to pick up.
71
- */
72
- info->dtb_start = info->loader_start;
73
- }
74
-
75
- if (info->kernel_filename) {
76
- FWCfgState *fw_cfg;
77
- bool try_decompressing_kernel;
78
-
79
- fw_cfg = fw_cfg_find();
80
- try_decompressing_kernel = arm_feature(&cpu->env,
81
- ARM_FEATURE_AARCH64);
82
-
83
- /*
84
- * Expose the kernel, the command line, and the initrd in fw_cfg.
85
- * We don't process them here at all, it's all left to the
86
- * firmware.
87
- */
88
- load_image_to_fw_cfg(fw_cfg,
89
- FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
90
- info->kernel_filename,
91
- try_decompressing_kernel);
92
- load_image_to_fw_cfg(fw_cfg,
93
- FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
94
- info->initrd_filename, false);
95
-
96
- if (info->kernel_cmdline) {
97
- fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
98
- strlen(info->kernel_cmdline) + 1);
99
- fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
100
- info->kernel_cmdline);
101
- }
102
- }
103
-
104
- /*
105
- * We will start from address 0 (typically a boot ROM image) in the
106
- * same way as hardware.
107
- */
108
- return;
109
- }
110
111
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
112
primary_loader = bootloader_aarch64;
113
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
114
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
115
ARM_CPU(cs)->env.boot_info = info;
116
}
108
}
117
+}
109
118
+
119
+void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
120
+{
121
+ CPUState *cs;
122
+ AddressSpace *as = arm_boot_address_space(cpu, info);
123
+
124
+ /*
110
+ /*
125
+ * CPU objects (unlike devices) are not automatically reset on system
111
+ * EMC Modules. Cannot fail.
126
+ * reset, so we must always register a handler to do so. If we're
112
+ * The mapping of the device to its netdev backend works as follows:
127
+ * actually loading a kernel, the handler is also responsible for
113
+ * emc[i] = nd_table[i]
128
+ * arranging that we start it correctly.
114
+ * This works around the inability to specify the netdev property for the
115
+ * emc device: it's not pluggable and thus the -device option can't be
116
+ * used.
129
+ */
117
+ */
130
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
118
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_emc_addr) != ARRAY_SIZE(s->emc));
131
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
119
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->emc) != 2);
120
+ for (i = 0; i < ARRAY_SIZE(s->emc); i++) {
121
+ s->emc[i].emc_num = i;
122
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->emc[i]);
123
+ if (nd_table[i].used) {
124
+ qemu_check_nic_model(&nd_table[i], TYPE_NPCM7XX_EMC);
125
+ qdev_set_nic_properties(DEVICE(sbd), &nd_table[i]);
126
+ }
127
+ /*
128
+ * The device exists regardless of whether it's connected to a QEMU
129
+ * netdev backend. So always instantiate it even if there is no
130
+ * backend.
131
+ */
132
+ sysbus_realize(sbd, &error_abort);
133
+ sysbus_mmio_map(sbd, 0, npcm7xx_emc_addr[i]);
134
+ int tx_irq = i == 0 ? NPCM7XX_EMC1TX_IRQ : NPCM7XX_EMC2TX_IRQ;
135
+ int rx_irq = i == 0 ? NPCM7XX_EMC1RX_IRQ : NPCM7XX_EMC2RX_IRQ;
136
+ /*
137
+ * N.B. The values for the second argument sysbus_connect_irq are
138
+ * chosen to match the registration order in npcm7xx_emc_realize.
139
+ */
140
+ sysbus_connect_irq(sbd, 0, npcm7xx_irq(s, tx_irq));
141
+ sysbus_connect_irq(sbd, 1, npcm7xx_irq(s, rx_irq));
132
+ }
142
+ }
133
+
143
+
134
+ /*
144
/*
135
+ * The board code is not supposed to set secure_board_setup unless
145
* Flash Interface Unit (FIU). Can fail if incorrect number of chip selects
136
+ * running its code in secure mode is actually possible, and KVM
146
* specified, but this is a programming error.
137
+ * doesn't support secure.
147
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
138
+ */
148
create_unimplemented_device("npcm7xx.vcd", 0xf0810000, 64 * KiB);
139
+ assert(!(info->secure_board_setup && kvm_enabled()));
149
create_unimplemented_device("npcm7xx.ece", 0xf0820000, 8 * KiB);
140
+
150
create_unimplemented_device("npcm7xx.vdma", 0xf0822000, 8 * KiB);
141
+ info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
151
- create_unimplemented_device("npcm7xx.emc1", 0xf0825000, 4 * KiB);
142
+ info->dtb_limit = 0;
152
- create_unimplemented_device("npcm7xx.emc2", 0xf0826000, 4 * KiB);
143
+
153
create_unimplemented_device("npcm7xx.usbd[0]", 0xf0830000, 4 * KiB);
144
+ /* Load the kernel. */
154
create_unimplemented_device("npcm7xx.usbd[1]", 0xf0831000, 4 * KiB);
145
+ if (!info->kernel_filename || info->firmware_loaded) {
155
create_unimplemented_device("npcm7xx.usbd[2]", 0xf0832000, 4 * KiB);
146
+
147
+ if (have_dtb(info)) {
148
+ /*
149
+ * If we have a device tree blob, but no kernel to supply it to (or
150
+ * the kernel is supposed to be loaded by the bootloader), copy the
151
+ * DTB to the base of RAM for the bootloader to pick up.
152
+ */
153
+ info->dtb_start = info->loader_start;
154
+ }
155
+
156
+ if (info->kernel_filename) {
157
+ FWCfgState *fw_cfg;
158
+ bool try_decompressing_kernel;
159
+
160
+ fw_cfg = fw_cfg_find();
161
+ try_decompressing_kernel = arm_feature(&cpu->env,
162
+ ARM_FEATURE_AARCH64);
163
+
164
+ /*
165
+ * Expose the kernel, the command line, and the initrd in fw_cfg.
166
+ * We don't process them here at all, it's all left to the
167
+ * firmware.
168
+ */
169
+ load_image_to_fw_cfg(fw_cfg,
170
+ FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA,
171
+ info->kernel_filename,
172
+ try_decompressing_kernel);
173
+ load_image_to_fw_cfg(fw_cfg,
174
+ FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA,
175
+ info->initrd_filename, false);
176
+
177
+ if (info->kernel_cmdline) {
178
+ fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
179
+ strlen(info->kernel_cmdline) + 1);
180
+ fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA,
181
+ info->kernel_cmdline);
182
+ }
183
+ }
184
+
185
+ /*
186
+ * We will start from address 0 (typically a boot ROM image) in the
187
+ * same way as hardware.
188
+ */
189
+ return;
190
+ } else {
191
+ arm_setup_direct_kernel_boot(cpu, info);
192
+ }
193
194
if (!info->skip_dtb_autoload && have_dtb(info)) {
195
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
196
--
156
--
197
2.20.1
157
2.20.1
198
158
199
159
diff view generated by jsdifflib
New patch
1
From: Doug Evans <dje@google.com>
1
2
3
Reviewed-by: Hao Wu <wuhaotsh@google.com>
4
Reviewed-by: Avi Fishman <avi.fishman@nuvoton.com>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Doug Evans <dje@google.com>
7
Message-id: 20210209015541.778833-4-dje@google.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
tests/qtest/npcm7xx_emc-test.c | 812 +++++++++++++++++++++++++++++++++
11
tests/qtest/meson.build | 1 +
12
2 files changed, 813 insertions(+)
13
create mode 100644 tests/qtest/npcm7xx_emc-test.c
14
15
diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c
16
new file mode 100644
17
index XXXXXXX..XXXXXXX
18
--- /dev/null
19
+++ b/tests/qtest/npcm7xx_emc-test.c
20
@@ -XXX,XX +XXX,XX @@
21
+/*
22
+ * QTests for Nuvoton NPCM7xx EMC Modules.
23
+ *
24
+ * Copyright 2020 Google LLC
25
+ *
26
+ * This program is free software; you can redistribute it and/or modify it
27
+ * under the terms of the GNU General Public License as published by the
28
+ * Free Software Foundation; either version 2 of the License, or
29
+ * (at your option) any later version.
30
+ *
31
+ * This program is distributed in the hope that it will be useful, but WITHOUT
32
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
34
+ * for more details.
35
+ */
36
+
37
+#include "qemu/osdep.h"
38
+#include "qemu-common.h"
39
+#include "libqos/libqos.h"
40
+#include "qapi/qmp/qdict.h"
41
+#include "qapi/qmp/qnum.h"
42
+#include "qemu/bitops.h"
43
+#include "qemu/iov.h"
44
+
45
+/* Name of the emc device. */
46
+#define TYPE_NPCM7XX_EMC "npcm7xx-emc"
47
+
48
+/* Timeout for various operations, in seconds. */
49
+#define TIMEOUT_SECONDS 10
50
+
51
+/* Address in memory of the descriptor. */
52
+#define DESC_ADDR (1 << 20) /* 1 MiB */
53
+
54
+/* Address in memory of the data packet. */
55
+#define DATA_ADDR (DESC_ADDR + 4096)
56
+
57
+#define CRC_LENGTH 4
58
+
59
+#define NUM_TX_DESCRIPTORS 3
60
+#define NUM_RX_DESCRIPTORS 2
61
+
62
+/* Size of tx,rx test buffers. */
63
+#define TX_DATA_LEN 64
64
+#define RX_DATA_LEN 64
65
+
66
+#define TX_STEP_COUNT 10000
67
+#define RX_STEP_COUNT 10000
68
+
69
+/* 32-bit register indices. */
70
+typedef enum NPCM7xxPWMRegister {
71
+ /* Control registers. */
72
+ REG_CAMCMR,
73
+ REG_CAMEN,
74
+
75
+ /* There are 16 CAMn[ML] registers. */
76
+ REG_CAMM_BASE,
77
+ REG_CAML_BASE,
78
+
79
+ REG_TXDLSA = 0x22,
80
+ REG_RXDLSA,
81
+ REG_MCMDR,
82
+ REG_MIID,
83
+ REG_MIIDA,
84
+ REG_FFTCR,
85
+ REG_TSDR,
86
+ REG_RSDR,
87
+ REG_DMARFC,
88
+ REG_MIEN,
89
+
90
+ /* Status registers. */
91
+ REG_MISTA,
92
+ REG_MGSTA,
93
+ REG_MPCNT,
94
+ REG_MRPC,
95
+ REG_MRPCC,
96
+ REG_MREPC,
97
+ REG_DMARFS,
98
+ REG_CTXDSA,
99
+ REG_CTXBSA,
100
+ REG_CRXDSA,
101
+ REG_CRXBSA,
102
+
103
+ NPCM7XX_NUM_EMC_REGS,
104
+} NPCM7xxPWMRegister;
105
+
106
+enum { NUM_CAMML_REGS = 16 };
107
+
108
+/* REG_CAMCMR fields */
109
+/* Enable CAM Compare */
110
+#define REG_CAMCMR_ECMP (1 << 4)
111
+/* Accept Unicast Packet */
112
+#define REG_CAMCMR_AUP (1 << 0)
113
+
114
+/* REG_MCMDR fields */
115
+/* Software Reset */
116
+#define REG_MCMDR_SWR (1 << 24)
117
+/* Frame Transmission On */
118
+#define REG_MCMDR_TXON (1 << 8)
119
+/* Accept Long Packet */
120
+#define REG_MCMDR_ALP (1 << 1)
121
+/* Frame Reception On */
122
+#define REG_MCMDR_RXON (1 << 0)
123
+
124
+/* REG_MIEN fields */
125
+/* Enable Transmit Completion Interrupt */
126
+#define REG_MIEN_ENTXCP (1 << 18)
127
+/* Enable Transmit Interrupt */
128
+#define REG_MIEN_ENTXINTR (1 << 16)
129
+/* Enable Receive Good Interrupt */
130
+#define REG_MIEN_ENRXGD (1 << 4)
131
+/* ENable Receive Interrupt */
132
+#define REG_MIEN_ENRXINTR (1 << 0)
133
+
134
+/* REG_MISTA fields */
135
+/* Transmit Bus Error Interrupt */
136
+#define REG_MISTA_TXBERR (1 << 24)
137
+/* Transmit Descriptor Unavailable Interrupt */
138
+#define REG_MISTA_TDU (1 << 23)
139
+/* Transmit Completion Interrupt */
140
+#define REG_MISTA_TXCP (1 << 18)
141
+/* Transmit Interrupt */
142
+#define REG_MISTA_TXINTR (1 << 16)
143
+/* Receive Bus Error Interrupt */
144
+#define REG_MISTA_RXBERR (1 << 11)
145
+/* Receive Descriptor Unavailable Interrupt */
146
+#define REG_MISTA_RDU (1 << 10)
147
+/* DMA Early Notification Interrupt */
148
+#define REG_MISTA_DENI (1 << 9)
149
+/* Maximum Frame Length Interrupt */
150
+#define REG_MISTA_DFOI (1 << 8)
151
+/* Receive Good Interrupt */
152
+#define REG_MISTA_RXGD (1 << 4)
153
+/* Packet Too Long Interrupt */
154
+#define REG_MISTA_PTLE (1 << 3)
155
+/* Receive Interrupt */
156
+#define REG_MISTA_RXINTR (1 << 0)
157
+
158
+typedef struct NPCM7xxEMCTxDesc NPCM7xxEMCTxDesc;
159
+typedef struct NPCM7xxEMCRxDesc NPCM7xxEMCRxDesc;
160
+
161
+struct NPCM7xxEMCTxDesc {
162
+ uint32_t flags;
163
+ uint32_t txbsa;
164
+ uint32_t status_and_length;
165
+ uint32_t ntxdsa;
166
+};
167
+
168
+struct NPCM7xxEMCRxDesc {
169
+ uint32_t status_and_length;
170
+ uint32_t rxbsa;
171
+ uint32_t reserved;
172
+ uint32_t nrxdsa;
173
+};
174
+
175
+/* NPCM7xxEMCTxDesc.flags values */
176
+/* Owner: 0 = cpu, 1 = emc */
177
+#define TX_DESC_FLAG_OWNER_MASK (1 << 31)
178
+/* Transmit interrupt enable */
179
+#define TX_DESC_FLAG_INTEN (1 << 2)
180
+
181
+/* NPCM7xxEMCTxDesc.status_and_length values */
182
+/* Transmission complete */
183
+#define TX_DESC_STATUS_TXCP (1 << 19)
184
+/* Transmit interrupt */
185
+#define TX_DESC_STATUS_TXINTR (1 << 16)
186
+
187
+/* NPCM7xxEMCRxDesc.status_and_length values */
188
+/* Owner: 0b00 = cpu, 0b10 = emc */
189
+#define RX_DESC_STATUS_OWNER_SHIFT 30
190
+#define RX_DESC_STATUS_OWNER_MASK 0xc0000000
191
+/* Frame Reception Complete */
192
+#define RX_DESC_STATUS_RXGD (1 << 20)
193
+/* Packet too long */
194
+#define RX_DESC_STATUS_PTLE (1 << 19)
195
+/* Receive Interrupt */
196
+#define RX_DESC_STATUS_RXINTR (1 << 16)
197
+
198
+#define RX_DESC_PKT_LEN(word) ((uint32_t) (word) & 0xffff)
199
+
200
+typedef struct EMCModule {
201
+ int rx_irq;
202
+ int tx_irq;
203
+ uint64_t base_addr;
204
+} EMCModule;
205
+
206
+typedef struct TestData {
207
+ const EMCModule *module;
208
+} TestData;
209
+
210
+static const EMCModule emc_module_list[] = {
211
+ {
212
+ .rx_irq = 15,
213
+ .tx_irq = 16,
214
+ .base_addr = 0xf0825000
215
+ },
216
+ {
217
+ .rx_irq = 114,
218
+ .tx_irq = 115,
219
+ .base_addr = 0xf0826000
220
+ }
221
+};
222
+
223
+/* Returns the index of the EMC module. */
224
+static int emc_module_index(const EMCModule *mod)
225
+{
226
+ ptrdiff_t diff = mod - emc_module_list;
227
+
228
+ g_assert_true(diff >= 0 && diff < ARRAY_SIZE(emc_module_list));
229
+
230
+ return diff;
231
+}
232
+
233
+static void packet_test_clear(void *sockets)
234
+{
235
+ int *test_sockets = sockets;
236
+
237
+ close(test_sockets[0]);
238
+ g_free(test_sockets);
239
+}
240
+
241
+static int *packet_test_init(int module_num, GString *cmd_line)
242
+{
243
+ int *test_sockets = g_new(int, 2);
244
+ int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, test_sockets);
245
+ g_assert_cmpint(ret, != , -1);
246
+
247
+ /*
248
+ * KISS and use -nic. We specify two nics (both emc{0,1}) because there's
249
+ * currently no way to specify only emc1: The driver implicitly relies on
250
+ * emc[i] == nd_table[i].
251
+ */
252
+ if (module_num == 0) {
253
+ g_string_append_printf(cmd_line,
254
+ " -nic socket,fd=%d,model=" TYPE_NPCM7XX_EMC " "
255
+ " -nic user,model=" TYPE_NPCM7XX_EMC " ",
256
+ test_sockets[1]);
257
+ } else {
258
+ g_string_append_printf(cmd_line,
259
+ " -nic user,model=" TYPE_NPCM7XX_EMC " "
260
+ " -nic socket,fd=%d,model=" TYPE_NPCM7XX_EMC " ",
261
+ test_sockets[1]);
262
+ }
263
+
264
+ g_test_queue_destroy(packet_test_clear, test_sockets);
265
+ return test_sockets;
266
+}
267
+
268
+static uint32_t emc_read(QTestState *qts, const EMCModule *mod,
269
+ NPCM7xxPWMRegister regno)
270
+{
271
+ return qtest_readl(qts, mod->base_addr + regno * sizeof(uint32_t));
272
+}
273
+
274
+static void emc_write(QTestState *qts, const EMCModule *mod,
275
+ NPCM7xxPWMRegister regno, uint32_t value)
276
+{
277
+ qtest_writel(qts, mod->base_addr + regno * sizeof(uint32_t), value);
278
+}
279
+
280
+/*
281
+ * Reset the EMC module.
282
+ * The module must be reset before, e.g., TXDLSA,RXDLSA are changed.
283
+ */
284
+static bool emc_soft_reset(QTestState *qts, const EMCModule *mod)
285
+{
286
+ uint32_t val;
287
+ uint64_t end_time;
288
+
289
+ emc_write(qts, mod, REG_MCMDR, REG_MCMDR_SWR);
290
+
291
+ /*
292
+ * Wait for device to reset as the linux driver does.
293
+ * During reset the AHB reads 0 for all registers. So first wait for
294
+ * something that resets to non-zero, and then wait for SWR becoming 0.
295
+ */
296
+ end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
297
+
298
+ do {
299
+ qtest_clock_step(qts, 100);
300
+ val = emc_read(qts, mod, REG_FFTCR);
301
+ } while (val == 0 && g_get_monotonic_time() < end_time);
302
+ if (val != 0) {
303
+ do {
304
+ qtest_clock_step(qts, 100);
305
+ val = emc_read(qts, mod, REG_MCMDR);
306
+ if ((val & REG_MCMDR_SWR) == 0) {
307
+ /*
308
+ * N.B. The CAMs have been reset here, so macaddr matching of
309
+ * incoming packets will not work.
310
+ */
311
+ return true;
312
+ }
313
+ } while (g_get_monotonic_time() < end_time);
314
+ }
315
+
316
+ g_message("%s: Timeout expired", __func__);
317
+ return false;
318
+}
319
+
320
+/* Check emc registers are reset to default value. */
321
+static void test_init(gconstpointer test_data)
322
+{
323
+ const TestData *td = test_data;
324
+ const EMCModule *mod = td->module;
325
+ QTestState *qts = qtest_init("-machine quanta-gsj");
326
+ int i;
327
+
328
+#define CHECK_REG(regno, value) \
329
+ do { \
330
+ g_assert_cmphex(emc_read(qts, mod, (regno)), ==, (value)); \
331
+ } while (0)
332
+
333
+ CHECK_REG(REG_CAMCMR, 0);
334
+ CHECK_REG(REG_CAMEN, 0);
335
+ CHECK_REG(REG_TXDLSA, 0xfffffffc);
336
+ CHECK_REG(REG_RXDLSA, 0xfffffffc);
337
+ CHECK_REG(REG_MCMDR, 0);
338
+ CHECK_REG(REG_MIID, 0);
339
+ CHECK_REG(REG_MIIDA, 0x00900000);
340
+ CHECK_REG(REG_FFTCR, 0x0101);
341
+ CHECK_REG(REG_DMARFC, 0x0800);
342
+ CHECK_REG(REG_MIEN, 0);
343
+ CHECK_REG(REG_MISTA, 0);
344
+ CHECK_REG(REG_MGSTA, 0);
345
+ CHECK_REG(REG_MPCNT, 0x7fff);
346
+ CHECK_REG(REG_MRPC, 0);
347
+ CHECK_REG(REG_MRPCC, 0);
348
+ CHECK_REG(REG_MREPC, 0);
349
+ CHECK_REG(REG_DMARFS, 0);
350
+ CHECK_REG(REG_CTXDSA, 0);
351
+ CHECK_REG(REG_CTXBSA, 0);
352
+ CHECK_REG(REG_CRXDSA, 0);
353
+ CHECK_REG(REG_CRXBSA, 0);
354
+
355
+#undef CHECK_REG
356
+
357
+ for (i = 0; i < NUM_CAMML_REGS; ++i) {
358
+ g_assert_cmpuint(emc_read(qts, mod, REG_CAMM_BASE + i * 2), ==,
359
+ 0);
360
+ g_assert_cmpuint(emc_read(qts, mod, REG_CAML_BASE + i * 2), ==,
361
+ 0);
362
+ }
363
+
364
+ qtest_quit(qts);
365
+}
366
+
367
+static bool emc_wait_irq(QTestState *qts, const EMCModule *mod, int step,
368
+ bool is_tx)
369
+{
370
+ uint64_t end_time =
371
+ g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
372
+
373
+ do {
374
+ if (qtest_get_irq(qts, is_tx ? mod->tx_irq : mod->rx_irq)) {
375
+ return true;
376
+ }
377
+ qtest_clock_step(qts, step);
378
+ } while (g_get_monotonic_time() < end_time);
379
+
380
+ g_message("%s: Timeout expired", __func__);
381
+ return false;
382
+}
383
+
384
+static bool emc_wait_mista(QTestState *qts, const EMCModule *mod, int step,
385
+ uint32_t flag)
386
+{
387
+ uint64_t end_time =
388
+ g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
389
+
390
+ do {
391
+ uint32_t mista = emc_read(qts, mod, REG_MISTA);
392
+ if (mista & flag) {
393
+ return true;
394
+ }
395
+ qtest_clock_step(qts, step);
396
+ } while (g_get_monotonic_time() < end_time);
397
+
398
+ g_message("%s: Timeout expired", __func__);
399
+ return false;
400
+}
401
+
402
+static bool wait_socket_readable(int fd)
403
+{
404
+ fd_set read_fds;
405
+ struct timeval tv;
406
+ int rv;
407
+
408
+ FD_ZERO(&read_fds);
409
+ FD_SET(fd, &read_fds);
410
+ tv.tv_sec = TIMEOUT_SECONDS;
411
+ tv.tv_usec = 0;
412
+ rv = select(fd + 1, &read_fds, NULL, NULL, &tv);
413
+ if (rv == -1) {
414
+ perror("select");
415
+ } else if (rv == 0) {
416
+ g_message("%s: Timeout expired", __func__);
417
+ }
418
+ return rv == 1;
419
+}
420
+
421
+static void init_tx_desc(NPCM7xxEMCTxDesc *desc, size_t count,
422
+ uint32_t desc_addr)
423
+{
424
+ g_assert(count >= 2);
425
+ memset(&desc[0], 0, sizeof(*desc) * count);
426
+ /* Leave the last one alone, owned by the cpu -> stops transmission. */
427
+ for (size_t i = 0; i < count - 1; ++i) {
428
+ desc[i].flags =
429
+ cpu_to_le32(TX_DESC_FLAG_OWNER_MASK | /* owner = 1: emc */
430
+ TX_DESC_FLAG_INTEN |
431
+ 0 | /* crc append = 0 */
432
+ 0 /* padding enable = 0 */);
433
+ desc[i].status_and_length =
434
+ cpu_to_le32(0 | /* collision count = 0 */
435
+ 0 | /* SQE = 0 */
436
+ 0 | /* PAU = 0 */
437
+ 0 | /* TXHA = 0 */
438
+ 0 | /* LC = 0 */
439
+ 0 | /* TXABT = 0 */
440
+ 0 | /* NCS = 0 */
441
+ 0 | /* EXDEF = 0 */
442
+ 0 | /* TXCP = 0 */
443
+ 0 | /* DEF = 0 */
444
+ 0 | /* TXINTR = 0 */
445
+ 0 /* length filled in later */);
446
+ desc[i].ntxdsa = cpu_to_le32(desc_addr + (i + 1) * sizeof(*desc));
447
+ }
448
+}
449
+
450
+static void enable_tx(QTestState *qts, const EMCModule *mod,
451
+ const NPCM7xxEMCTxDesc *desc, size_t count,
452
+ uint32_t desc_addr, uint32_t mien_flags)
453
+{
454
+ /* Write the descriptors to guest memory. */
455
+ qtest_memwrite(qts, desc_addr, desc, sizeof(*desc) * count);
456
+
457
+ /* Trigger sending the packet. */
458
+ /* The module must be reset before changing TXDLSA. */
459
+ g_assert(emc_soft_reset(qts, mod));
460
+ emc_write(qts, mod, REG_TXDLSA, desc_addr);
461
+ emc_write(qts, mod, REG_CTXDSA, ~0);
462
+ emc_write(qts, mod, REG_MIEN, REG_MIEN_ENTXCP | mien_flags);
463
+ {
464
+ uint32_t mcmdr = emc_read(qts, mod, REG_MCMDR);
465
+ mcmdr |= REG_MCMDR_TXON;
466
+ emc_write(qts, mod, REG_MCMDR, mcmdr);
467
+ }
468
+
469
+ /* Prod the device to send the packet. */
470
+ emc_write(qts, mod, REG_TSDR, 1);
471
+}
472
+
473
+static void emc_send_verify1(QTestState *qts, const EMCModule *mod, int fd,
474
+ bool with_irq, uint32_t desc_addr,
475
+ uint32_t next_desc_addr,
476
+ const char *test_data, int test_size)
477
+{
478
+ NPCM7xxEMCTxDesc result_desc;
479
+ uint32_t expected_mask, expected_value, recv_len;
480
+ int ret;
481
+ char buffer[TX_DATA_LEN];
482
+
483
+ g_assert(wait_socket_readable(fd));
484
+
485
+ /* Read the descriptor back. */
486
+ qtest_memread(qts, desc_addr, &result_desc, sizeof(result_desc));
487
+ /* Descriptor should be owned by cpu now. */
488
+ g_assert((result_desc.flags & TX_DESC_FLAG_OWNER_MASK) == 0);
489
+ /* Test the status bits, ignoring the length field. */
490
+ expected_mask = 0xffff << 16;
491
+ expected_value = TX_DESC_STATUS_TXCP;
492
+ if (with_irq) {
493
+ expected_value |= TX_DESC_STATUS_TXINTR;
494
+ }
495
+ g_assert_cmphex((result_desc.status_and_length & expected_mask), ==,
496
+ expected_value);
497
+
498
+ /* Check data sent to the backend. */
499
+ recv_len = ~0;
500
+ ret = qemu_recv(fd, &recv_len, sizeof(recv_len), MSG_DONTWAIT);
501
+ g_assert_cmpint(ret, == , sizeof(recv_len));
502
+
503
+ g_assert(wait_socket_readable(fd));
504
+ memset(buffer, 0xff, sizeof(buffer));
505
+ ret = qemu_recv(fd, buffer, test_size, MSG_DONTWAIT);
506
+ g_assert_cmpmem(buffer, ret, test_data, test_size);
507
+}
508
+
509
+static void emc_send_verify(QTestState *qts, const EMCModule *mod, int fd,
510
+ bool with_irq)
511
+{
512
+ NPCM7xxEMCTxDesc desc[NUM_TX_DESCRIPTORS];
513
+ uint32_t desc_addr = DESC_ADDR;
514
+ static const char test1_data[] = "TEST1";
515
+ static const char test2_data[] = "Testing 1 2 3 ...";
516
+ uint32_t data1_addr = DATA_ADDR;
517
+ uint32_t data2_addr = data1_addr + sizeof(test1_data);
518
+ bool got_tdu;
519
+ uint32_t end_desc_addr;
520
+
521
+ /* Prepare test data buffer. */
522
+ qtest_memwrite(qts, data1_addr, test1_data, sizeof(test1_data));
523
+ qtest_memwrite(qts, data2_addr, test2_data, sizeof(test2_data));
524
+
525
+ init_tx_desc(&desc[0], NUM_TX_DESCRIPTORS, desc_addr);
526
+ desc[0].txbsa = cpu_to_le32(data1_addr);
527
+ desc[0].status_and_length |= sizeof(test1_data);
528
+ desc[1].txbsa = cpu_to_le32(data2_addr);
529
+ desc[1].status_and_length |= sizeof(test2_data);
530
+
531
+ enable_tx(qts, mod, &desc[0], NUM_TX_DESCRIPTORS, desc_addr,
532
+ with_irq ? REG_MIEN_ENTXINTR : 0);
533
+
534
+ /*
535
+ * It's problematic to observe the interrupt for each packet.
536
+ * Instead just wait until all the packets go out.
537
+ */
538
+ got_tdu = false;
539
+ while (!got_tdu) {
540
+ if (with_irq) {
541
+ g_assert_true(emc_wait_irq(qts, mod, TX_STEP_COUNT,
542
+ /*is_tx=*/true));
543
+ } else {
544
+ g_assert_true(emc_wait_mista(qts, mod, TX_STEP_COUNT,
545
+ REG_MISTA_TXINTR));
546
+ }
547
+ got_tdu = !!(emc_read(qts, mod, REG_MISTA) & REG_MISTA_TDU);
548
+ /* If we don't have TDU yet, reset the interrupt. */
549
+ if (!got_tdu) {
550
+ emc_write(qts, mod, REG_MISTA,
551
+ emc_read(qts, mod, REG_MISTA) & 0xffff0000);
552
+ }
553
+ }
554
+
555
+ end_desc_addr = desc_addr + 2 * sizeof(desc[0]);
556
+ g_assert_cmphex(emc_read(qts, mod, REG_CTXDSA), ==, end_desc_addr);
557
+ g_assert_cmphex(emc_read(qts, mod, REG_MISTA), ==,
558
+ REG_MISTA_TXCP | REG_MISTA_TXINTR | REG_MISTA_TDU);
559
+
560
+ emc_send_verify1(qts, mod, fd, with_irq,
561
+ desc_addr, end_desc_addr,
562
+ test1_data, sizeof(test1_data));
563
+ emc_send_verify1(qts, mod, fd, with_irq,
564
+ desc_addr + sizeof(desc[0]), end_desc_addr,
565
+ test2_data, sizeof(test2_data));
566
+}
567
+
568
+static void init_rx_desc(NPCM7xxEMCRxDesc *desc, size_t count,
569
+ uint32_t desc_addr, uint32_t data_addr)
570
+{
571
+ g_assert_true(count >= 2);
572
+ memset(desc, 0, sizeof(*desc) * count);
573
+ desc[0].rxbsa = cpu_to_le32(data_addr);
574
+ desc[0].status_and_length =
575
+ cpu_to_le32(0b10 << RX_DESC_STATUS_OWNER_SHIFT | /* owner = 10: emc */
576
+ 0 | /* RP = 0 */
577
+ 0 | /* ALIE = 0 */
578
+ 0 | /* RXGD = 0 */
579
+ 0 | /* PTLE = 0 */
580
+ 0 | /* CRCE = 0 */
581
+ 0 | /* RXINTR = 0 */
582
+ 0 /* length (filled in later) */);
583
+ /* Leave the last one alone, owned by the cpu -> stops transmission. */
584
+ desc[0].nrxdsa = cpu_to_le32(desc_addr + sizeof(*desc));
585
+}
586
+
587
+static void enable_rx(QTestState *qts, const EMCModule *mod,
588
+ const NPCM7xxEMCRxDesc *desc, size_t count,
589
+ uint32_t desc_addr, uint32_t mien_flags,
590
+ uint32_t mcmdr_flags)
591
+{
592
+ /*
593
+ * Write the descriptor to guest memory.
594
+ * FWIW, IWBN if the docs said the buffer needs to be at least DMARFC
595
+ * bytes.
596
+ */
597
+ qtest_memwrite(qts, desc_addr, desc, sizeof(*desc) * count);
598
+
599
+ /* Trigger receiving the packet. */
600
+ /* The module must be reset before changing RXDLSA. */
601
+ g_assert(emc_soft_reset(qts, mod));
602
+ emc_write(qts, mod, REG_RXDLSA, desc_addr);
603
+ emc_write(qts, mod, REG_MIEN, REG_MIEN_ENRXGD | mien_flags);
604
+
605
+ /*
606
+ * We don't know what the device's macaddr is, so just accept all
607
+ * unicast packets (AUP).
608
+ */
609
+ emc_write(qts, mod, REG_CAMCMR, REG_CAMCMR_AUP);
610
+ emc_write(qts, mod, REG_CAMEN, 1 << 0);
611
+ {
612
+ uint32_t mcmdr = emc_read(qts, mod, REG_MCMDR);
613
+ mcmdr |= REG_MCMDR_RXON | mcmdr_flags;
614
+ emc_write(qts, mod, REG_MCMDR, mcmdr);
615
+ }
616
+
617
+ /* Prod the device to accept a packet. */
618
+ emc_write(qts, mod, REG_RSDR, 1);
619
+}
620
+
621
+static void emc_recv_verify(QTestState *qts, const EMCModule *mod, int fd,
622
+ bool with_irq)
623
+{
624
+ NPCM7xxEMCRxDesc desc[NUM_RX_DESCRIPTORS];
625
+ uint32_t desc_addr = DESC_ADDR;
626
+ uint32_t data_addr = DATA_ADDR;
627
+ int ret;
628
+ uint32_t expected_mask, expected_value;
629
+ NPCM7xxEMCRxDesc result_desc;
630
+
631
+ /* Prepare test data buffer. */
632
+ const char test[RX_DATA_LEN] = "TEST";
633
+ int len = htonl(sizeof(test));
634
+ const struct iovec iov[] = {
635
+ {
636
+ .iov_base = &len,
637
+ .iov_len = sizeof(len),
638
+ },{
639
+ .iov_base = (char *) test,
640
+ .iov_len = sizeof(test),
641
+ },
642
+ };
643
+
644
+ /*
645
+ * Reset the device BEFORE sending a test packet, otherwise the packet
646
+ * may get swallowed by an active device of an earlier test.
647
+ */
648
+ init_rx_desc(&desc[0], NUM_RX_DESCRIPTORS, desc_addr, data_addr);
649
+ enable_rx(qts, mod, &desc[0], NUM_RX_DESCRIPTORS, desc_addr,
650
+ with_irq ? REG_MIEN_ENRXINTR : 0, 0);
651
+
652
+ /* Send test packet to device's socket. */
653
+ ret = iov_send(fd, iov, 2, 0, sizeof(len) + sizeof(test));
654
+ g_assert_cmpint(ret, == , sizeof(test) + sizeof(len));
655
+
656
+ /* Wait for RX interrupt. */
657
+ if (with_irq) {
658
+ g_assert_true(emc_wait_irq(qts, mod, RX_STEP_COUNT, /*is_tx=*/false));
659
+ } else {
660
+ g_assert_true(emc_wait_mista(qts, mod, RX_STEP_COUNT, REG_MISTA_RXGD));
661
+ }
662
+
663
+ g_assert_cmphex(emc_read(qts, mod, REG_CRXDSA), ==,
664
+ desc_addr + sizeof(desc[0]));
665
+
666
+ expected_mask = 0xffff;
667
+ expected_value = (REG_MISTA_DENI |
668
+ REG_MISTA_RXGD |
669
+ REG_MISTA_RXINTR);
670
+ g_assert_cmphex((emc_read(qts, mod, REG_MISTA) & expected_mask),
671
+ ==, expected_value);
672
+
673
+ /* Read the descriptor back. */
674
+ qtest_memread(qts, desc_addr, &result_desc, sizeof(result_desc));
675
+ /* Descriptor should be owned by cpu now. */
676
+ g_assert((result_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK) == 0);
677
+ /* Test the status bits, ignoring the length field. */
678
+ expected_mask = 0xffff << 16;
679
+ expected_value = RX_DESC_STATUS_RXGD;
680
+ if (with_irq) {
681
+ expected_value |= RX_DESC_STATUS_RXINTR;
682
+ }
683
+ g_assert_cmphex((result_desc.status_and_length & expected_mask), ==,
684
+ expected_value);
685
+ g_assert_cmpint(RX_DESC_PKT_LEN(result_desc.status_and_length), ==,
686
+ RX_DATA_LEN + CRC_LENGTH);
687
+
688
+ {
689
+ char buffer[RX_DATA_LEN];
690
+ qtest_memread(qts, data_addr, buffer, sizeof(buffer));
691
+ g_assert_cmpstr(buffer, == , "TEST");
692
+ }
693
+}
694
+
695
+static void emc_test_ptle(QTestState *qts, const EMCModule *mod, int fd)
696
+{
697
+ NPCM7xxEMCRxDesc desc[NUM_RX_DESCRIPTORS];
698
+ uint32_t desc_addr = DESC_ADDR;
699
+ uint32_t data_addr = DATA_ADDR;
700
+ int ret;
701
+ NPCM7xxEMCRxDesc result_desc;
702
+ uint32_t expected_mask, expected_value;
703
+
704
+ /* Prepare test data buffer. */
705
+#define PTLE_DATA_LEN 1600
706
+ char test_data[PTLE_DATA_LEN];
707
+ int len = htonl(sizeof(test_data));
708
+ const struct iovec iov[] = {
709
+ {
710
+ .iov_base = &len,
711
+ .iov_len = sizeof(len),
712
+ },{
713
+ .iov_base = (char *) test_data,
714
+ .iov_len = sizeof(test_data),
715
+ },
716
+ };
717
+ memset(test_data, 42, sizeof(test_data));
718
+
719
+ /*
720
+ * Reset the device BEFORE sending a test packet, otherwise the packet
721
+ * may get swallowed by an active device of an earlier test.
722
+ */
723
+ init_rx_desc(&desc[0], NUM_RX_DESCRIPTORS, desc_addr, data_addr);
724
+ enable_rx(qts, mod, &desc[0], NUM_RX_DESCRIPTORS, desc_addr,
725
+ REG_MIEN_ENRXINTR, REG_MCMDR_ALP);
726
+
727
+ /* Send test packet to device's socket. */
728
+ ret = iov_send(fd, iov, 2, 0, sizeof(len) + sizeof(test_data));
729
+ g_assert_cmpint(ret, == , sizeof(test_data) + sizeof(len));
730
+
731
+ /* Wait for RX interrupt. */
732
+ g_assert_true(emc_wait_irq(qts, mod, RX_STEP_COUNT, /*is_tx=*/false));
733
+
734
+ /* Read the descriptor back. */
735
+ qtest_memread(qts, desc_addr, &result_desc, sizeof(result_desc));
736
+ /* Descriptor should be owned by cpu now. */
737
+ g_assert((result_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK) == 0);
738
+ /* Test the status bits, ignoring the length field. */
739
+ expected_mask = 0xffff << 16;
740
+ expected_value = (RX_DESC_STATUS_RXGD |
741
+ RX_DESC_STATUS_PTLE |
742
+ RX_DESC_STATUS_RXINTR);
743
+ g_assert_cmphex((result_desc.status_and_length & expected_mask), ==,
744
+ expected_value);
745
+ g_assert_cmpint(RX_DESC_PKT_LEN(result_desc.status_and_length), ==,
746
+ PTLE_DATA_LEN + CRC_LENGTH);
747
+
748
+ {
749
+ char buffer[PTLE_DATA_LEN];
750
+ qtest_memread(qts, data_addr, buffer, sizeof(buffer));
751
+ g_assert(memcmp(buffer, test_data, PTLE_DATA_LEN) == 0);
752
+ }
753
+}
754
+
755
+static void test_tx(gconstpointer test_data)
756
+{
757
+ const TestData *td = test_data;
758
+ GString *cmd_line = g_string_new("-machine quanta-gsj");
759
+ int *test_sockets = packet_test_init(emc_module_index(td->module),
760
+ cmd_line);
761
+ QTestState *qts = qtest_init(cmd_line->str);
762
+
763
+ /*
764
+ * TODO: For pedantic correctness test_sockets[0] should be closed after
765
+ * the fork and before the exec, but that will require some harness
766
+ * improvements.
767
+ */
768
+ close(test_sockets[1]);
769
+ /* Defensive programming */
770
+ test_sockets[1] = -1;
771
+
772
+ qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
773
+
774
+ emc_send_verify(qts, td->module, test_sockets[0], /*with_irq=*/false);
775
+ emc_send_verify(qts, td->module, test_sockets[0], /*with_irq=*/true);
776
+
777
+ qtest_quit(qts);
778
+}
779
+
780
+static void test_rx(gconstpointer test_data)
781
+{
782
+ const TestData *td = test_data;
783
+ GString *cmd_line = g_string_new("-machine quanta-gsj");
784
+ int *test_sockets = packet_test_init(emc_module_index(td->module),
785
+ cmd_line);
786
+ QTestState *qts = qtest_init(cmd_line->str);
787
+
788
+ /*
789
+ * TODO: For pedantic correctness test_sockets[0] should be closed after
790
+ * the fork and before the exec, but that will require some harness
791
+ * improvements.
792
+ */
793
+ close(test_sockets[1]);
794
+ /* Defensive programming */
795
+ test_sockets[1] = -1;
796
+
797
+ qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
798
+
799
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/false);
800
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/true);
801
+ emc_test_ptle(qts, td->module, test_sockets[0]);
802
+
803
+ qtest_quit(qts);
804
+}
805
+
806
+static void emc_add_test(const char *name, const TestData* td,
807
+ GTestDataFunc fn)
808
+{
809
+ g_autofree char *full_name = g_strdup_printf(
810
+ "npcm7xx_emc/emc[%d]/%s", emc_module_index(td->module), name);
811
+ qtest_add_data_func(full_name, td, fn);
812
+}
813
+#define add_test(name, td) emc_add_test(#name, td, test_##name)
814
+
815
+int main(int argc, char **argv)
816
+{
817
+ TestData test_data_list[ARRAY_SIZE(emc_module_list)];
818
+
819
+ g_test_init(&argc, &argv, NULL);
820
+
821
+ for (int i = 0; i < ARRAY_SIZE(emc_module_list); ++i) {
822
+ TestData *td = &test_data_list[i];
823
+
824
+ td->module = &emc_module_list[i];
825
+
826
+ add_test(init, td);
827
+ add_test(tx, td);
828
+ add_test(rx, td);
829
+ }
830
+
831
+ return g_test_run();
832
+}
833
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
834
index XXXXXXX..XXXXXXX 100644
835
--- a/tests/qtest/meson.build
836
+++ b/tests/qtest/meson.build
837
@@ -XXX,XX +XXX,XX @@ qtests_sparc64 = \
838
839
qtests_npcm7xx = \
840
['npcm7xx_adc-test',
841
+ 'npcm7xx_emc-test',
842
'npcm7xx_gpio-test',
843
'npcm7xx_pwm-test',
844
'npcm7xx_rng-test',
845
--
846
2.20.1
847
848
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Use nr_apu_cpus in favor of hard coding 2.
4
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Luc Michel <luc@lmichel.fr>
8
Message-id: 20210210142048.3125878-2-edgar.iglesias@gmail.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
hw/arm/xlnx-versal.c | 4 ++--
12
1 file changed, 2 insertions(+), 2 deletions(-)
13
14
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-versal.c
17
+++ b/hw/arm/xlnx-versal.c
18
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_gic(Versal *s, qemu_irq *pic)
19
gicbusdev = SYS_BUS_DEVICE(&s->fpd.apu.gic);
20
gicdev = DEVICE(&s->fpd.apu.gic);
21
qdev_prop_set_uint32(gicdev, "revision", 3);
22
- qdev_prop_set_uint32(gicdev, "num-cpu", 2);
23
+ qdev_prop_set_uint32(gicdev, "num-cpu", nr_apu_cpus);
24
qdev_prop_set_uint32(gicdev, "num-irq", XLNX_VERSAL_NR_IRQS + 32);
25
qdev_prop_set_uint32(gicdev, "len-redist-region-count", 1);
26
- qdev_prop_set_uint32(gicdev, "redist-region-count[0]", 2);
27
+ qdev_prop_set_uint32(gicdev, "redist-region-count[0]", nr_apu_cpus);
28
qdev_prop_set_bit(gicdev, "has-security-extensions", true);
29
30
sysbus_realize(SYS_BUS_DEVICE(&s->fpd.apu.gic), &error_fatal);
31
--
32
2.20.1
33
34
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Daniel Müller <muellerd@fb.com>
2
2
3
Caching the bit means that we will not have to re-walk the
3
When working with performance monitoring counters, we look at
4
page tables to look up the bit during translation.
4
MDCR_EL2.HPMN as part of the check whether a counter is enabled. This
5
check fails, because MDCR_EL2.HPMN is reset to 0, meaning that no
6
counters are "enabled" for < EL2.
7
That's in violation of the Arm specification, which states that
5
8
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
> On a Warm reset, this field [MDCR_EL2.HPMN] resets to the value in
10
> PMCR_EL0.N
11
12
That's also what a comment in the code acknowledges, but the necessary
13
adjustment seems to have been forgotten when support for more counters
14
was added.
15
This change fixes the issue by setting the reset value to PMCR.N, which
16
is four.
17
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20190128223118.5255-6-richard.henderson@linaro.org
9
[PMM: no need to OR in guarded bit status]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
20
---
12
target/arm/helper.c | 6 ++++++
21
target/arm/helper.c | 9 ++++-----
13
1 file changed, 6 insertions(+)
22
1 file changed, 4 insertions(+), 5 deletions(-)
14
23
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
24
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
26
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
27
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
28
@@ -XXX,XX +XXX,XX @@
20
bool ttbr1_valid;
29
#endif
21
uint64_t descaddrmask;
30
22
bool aarch64 = arm_el_is_aa64(env, el);
31
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
23
+ bool guarded = false;
32
+#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
24
33
25
/* TODO:
34
#ifndef CONFIG_USER_ONLY
26
* This code does not handle the different format TCR for VTCR_EL2.
35
27
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
36
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
28
}
37
.writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
29
/* Merge in attributes from table descriptors */
38
#endif
30
attrs |= nstable << 3; /* NS */
39
/* The only field of MDCR_EL2 that has a defined architectural reset value
31
+ guarded = extract64(descriptor, 50, 1); /* GP */
40
- * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
32
if (param.hpd) {
41
- * don't implement any PMU event counters, so using zero as a reset
33
/* HPD disables all the table attributes except NSTable. */
42
- * value for MDCR_EL2 is okay
34
break;
43
+ * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
35
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
44
*/
36
*/
45
{ .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
37
txattrs->secure = false;
46
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
38
}
47
- .access = PL2_RW, .resetvalue = 0,
39
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
48
+ .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
40
+ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
49
.fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
41
+ txattrs->target_tlb_bit0 = true;
50
{ .name = "HPFAR", .state = ARM_CP_STATE_AA32,
42
+ }
51
.cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
43
52
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
44
if (cacheattrs != NULL) {
53
* field as main ID register, and we implement four counters in
45
if (mmu_idx == ARMMMUIdx_S2NS) {
54
* addition to the cycle count register.
55
*/
56
- unsigned int i, pmcrn = 4;
57
+ unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
58
ARMCPRegInfo pmcr = {
59
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
60
.access = PL0_RW,
46
--
61
--
47
2.20.1
62
2.20.1
48
63
49
64
diff view generated by jsdifflib