1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
1
Hi; this is the latest target-arm queue; most of this is a refactoring
2
patchset from RTH for the arm page-table-walk emulation.
2
3
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
4
thanks
5
-- PMM
6
7
The following changes since commit f1d33f55c47dfdaf8daacd618588ad3ae4c452d1:
8
9
Merge tag 'pull-testing-gdbstub-plugins-gitdm-061022-3' of https://github.com/stsquad/qemu into staging (2022-10-06 07:11:56 -0400)
4
10
5
are available in the Git repository at:
11
are available in the Git repository at:
6
12
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20221010
8
14
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
15
for you to fetch changes up to 915f62844cf62e428c7c178149b5ff1cbe129b07:
10
16
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
17
docs/system/arm/emulation.rst: Report FEAT_GTG support (2022-10-10 14:52:25 +0100)
12
18
13
----------------------------------------------------------------
19
----------------------------------------------------------------
14
target-arm queue:
20
target-arm queue:
15
* more MVE instructions
21
* Retry KVM_CREATE_VM call if it fails EINTR
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
22
* allow setting SCR_EL3.EnTP2 when FEAT_SME is implemented
17
* target/arm: Check NaN mode before silencing NaN
23
* docs/nuvoton: Update URL for images
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
24
* refactoring of page table walk code
19
* hw/arm: Add basic power management to raspi.
25
* hw/arm/boot: set CPTR_EL3.ESM and SCR_EL3.EnTP2 when booting Linux with EL3
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
26
* Don't allow guest to use unimplemented granule sizes
27
* Report FEAT_GTG support
21
28
22
----------------------------------------------------------------
29
----------------------------------------------------------------
23
Joe Komlodi (1):
30
Jerome Forissier (2):
24
target/arm: Check NaN mode before silencing NaN
31
target/arm: allow setting SCR_EL3.EnTP2 when FEAT_SME is implemented
32
hw/arm/boot: set CPTR_EL3.ESM and SCR_EL3.EnTP2 when booting Linux with EL3
25
33
26
Maxim Uvarov (1):
34
Joel Stanley (1):
27
hw/gpio/gpio_pwr: use shutdown function for reboot
35
docs/nuvoton: Update URL for images
28
36
29
Nolan Leake (1):
37
Peter Maydell (4):
30
hw/arm: Add basic power management to raspi.
38
target/arm/kvm: Retry KVM_CREATE_VM call if it fails EINTR
39
target/arm: Don't allow guest to use unimplemented granule sizes
40
target/arm: Use ARMGranuleSize in ARMVAParameters
41
docs/system/arm/emulation.rst: Report FEAT_GTG support
31
42
32
Patrick Venture (2):
43
Richard Henderson (21):
33
docs/system/arm: Add quanta-q7l1-bmc reference
44
target/arm: Split s2walk_secure from ipa_secure in get_phys_addr
34
docs/system/arm: Add quanta-gbs-bmc reference
45
target/arm: Make the final stage1+2 write to secure be unconditional
46
target/arm: Add is_secure parameter to get_phys_addr_lpae
47
target/arm: Fix S2 disabled check in S1_ptw_translate
48
target/arm: Add is_secure parameter to regime_translation_disabled
49
target/arm: Split out get_phys_addr_with_secure
50
target/arm: Add is_secure parameter to v7m_read_half_insn
51
target/arm: Add TBFLAG_M32.SECURE
52
target/arm: Merge regime_is_secure into get_phys_addr
53
target/arm: Add is_secure parameter to do_ats_write
54
target/arm: Fold secure and non-secure a-profile mmu indexes
55
target/arm: Reorg regime_translation_disabled
56
target/arm: Drop secure check for HCR.TGE vs SCTLR_EL1.M
57
target/arm: Introduce arm_hcr_el2_eff_secstate
58
target/arm: Hoist read of *is_secure in S1_ptw_translate
59
target/arm: Remove env argument from combined_attrs_fwb
60
target/arm: Pass HCR to attribute subroutines.
61
target/arm: Fix ATS12NSO* from S PL1
62
target/arm: Split out get_phys_addr_disabled
63
target/arm: Fix cacheattr in get_phys_addr_disabled
64
target/arm: Use tlb_set_page_full
35
65
36
Peter Maydell (18):
66
docs/system/arm/emulation.rst | 1 +
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
67
docs/system/arm/nuvoton.rst | 4 +-
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
68
target/arm/cpu-param.h | 2 +-
39
target/arm: Make asimd_imm_const() public
69
target/arm/cpu.h | 181 ++++++++------
40
target/arm: Use asimd_imm_const for A64 decode
70
target/arm/internals.h | 150 ++++++-----
41
target/arm: Use dup_const() instead of bitfield_replicate()
71
hw/arm/boot.c | 4 +
42
target/arm: Implement MVE logical immediate insns
72
target/arm/helper.c | 332 ++++++++++++++----------
43
target/arm: Implement MVE vector shift left by immediate insns
73
target/arm/kvm.c | 4 +-
44
target/arm: Implement MVE vector shift right by immediate insns
74
target/arm/m_helper.c | 29 ++-
45
target/arm: Implement MVE VSHLL
75
target/arm/ptw.c | 570 ++++++++++++++++++++++--------------------
46
target/arm: Implement MVE VSRI, VSLI
76
target/arm/tlb_helper.c | 9 +-
47
target/arm: Implement MVE VSHRN, VRSHRN
77
target/arm/translate-a64.c | 8 -
48
target/arm: Implement MVE saturating narrowing shifts
78
target/arm/translate.c | 9 +-
49
target/arm: Implement MVE VSHLC
79
13 files changed, 717 insertions(+), 586 deletions(-)
50
target/arm: Implement MVE VADDLV
51
target/arm: Implement MVE long shifts by immediate
52
target/arm: Implement MVE long shifts by register
53
target/arm: Implement MVE shifts by immediate
54
target/arm: Implement MVE shifts by register
55
56
Philippe Mathieu-Daudé (1):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
58
59
docs/system/arm/aspeed.rst | 1 +
60
docs/system/arm/nuvoton.rst | 5 +-
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
63
target/arm/helper-mve.h | 108 +++++++
64
target/arm/translate.h | 41 +++
65
target/arm/mve.decode | 177 ++++++++++-
66
target/arm/t32.decode | 71 ++++-
67
hw/arm/bcm2835_peripherals.c | 13 +-
68
hw/gpio/gpio_pwr.c | 2 +-
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
70
target/arm/helper-a64.c | 12 +-
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
72
target/arm/translate-a64.c | 86 +-----
73
target/arm/translate-mve.c | 261 +++++++++++++++-
74
target/arm/translate-neon.c | 81 -----
75
target/arm/translate.c | 327 +++++++++++++++++++-
76
target/arm/vfp_helper.c | 24 +-
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
diff view generated by jsdifflib
New patch
1
Occasionally the KVM_CREATE_VM ioctl can return EINTR, even though
2
there is no pending signal to be taken. In commit 94ccff13382055
3
we added a retry-on-EINTR loop to the KVM_CREATE_VM call in the
4
generic KVM code. Adopt the same approach for the use of the
5
ioctl in the Arm-specific KVM code (where we use it to create a
6
scratch VM for probing for various things).
1
7
8
For more information, see the mailing list thread:
9
https://lore.kernel.org/qemu-devel/8735e0s1zw.wl-maz@kernel.org/
10
11
Reported-by: Vitaly Chikunov <vt@altlinux.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Vitaly Chikunov <vt@altlinux.org>
14
Reviewed-by: Eric Auger <eric.auger@redhat.com>
15
Acked-by: Marc Zyngier <maz@kernel.org>
16
Message-id: 20220930113824.1933293-1-peter.maydell@linaro.org
17
---
18
target/arm/kvm.c | 4 +++-
19
1 file changed, 3 insertions(+), 1 deletion(-)
20
21
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/kvm.c
24
+++ b/target/arm/kvm.c
25
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
26
if (max_vm_pa_size < 0) {
27
max_vm_pa_size = 0;
28
}
29
- vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
30
+ do {
31
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
32
+ } while (vmfd == -1 && errno == EINTR);
33
if (vmfd < 0) {
34
goto err;
35
}
36
--
37
2.25.1
diff view generated by jsdifflib
New patch
1
From: Jerome Forissier <jerome.forissier@linaro.org>
1
2
3
Updates write_scr() to allow setting SCR_EL3.EnTP2 when FEAT_SME is
4
implemented. SCR_EL3 being a 64-bit register, valid_mask is changed
5
to uint64_t and the SCR_* constants in target/arm/cpu.h are extended
6
to 64-bit so that masking and bitwise not (~) behave as expected.
7
8
This enables booting Linux with Trusted Firmware-A at EL3 with
9
"-M virt,secure=on -cpu max".
10
11
Cc: qemu-stable@nongnu.org
12
Fixes: 78cb9776662a ("target/arm: Enable SME for -cpu max")
13
Signed-off-by: Jerome Forissier <jerome.forissier@linaro.org>
14
Reviewed-by: Andre Przywara <andre.przywara@arm.com>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20221004072354.27037-1-jerome.forissier@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
target/arm/cpu.h | 54 ++++++++++++++++++++++-----------------------
20
target/arm/helper.c | 5 ++++-
21
2 files changed, 31 insertions(+), 28 deletions(-)
22
23
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/cpu.h
26
+++ b/target/arm/cpu.h
27
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
28
29
#define HPFAR_NS (1ULL << 63)
30
31
-#define SCR_NS (1U << 0)
32
-#define SCR_IRQ (1U << 1)
33
-#define SCR_FIQ (1U << 2)
34
-#define SCR_EA (1U << 3)
35
-#define SCR_FW (1U << 4)
36
-#define SCR_AW (1U << 5)
37
-#define SCR_NET (1U << 6)
38
-#define SCR_SMD (1U << 7)
39
-#define SCR_HCE (1U << 8)
40
-#define SCR_SIF (1U << 9)
41
-#define SCR_RW (1U << 10)
42
-#define SCR_ST (1U << 11)
43
-#define SCR_TWI (1U << 12)
44
-#define SCR_TWE (1U << 13)
45
-#define SCR_TLOR (1U << 14)
46
-#define SCR_TERR (1U << 15)
47
-#define SCR_APK (1U << 16)
48
-#define SCR_API (1U << 17)
49
-#define SCR_EEL2 (1U << 18)
50
-#define SCR_EASE (1U << 19)
51
-#define SCR_NMEA (1U << 20)
52
-#define SCR_FIEN (1U << 21)
53
-#define SCR_ENSCXT (1U << 25)
54
-#define SCR_ATA (1U << 26)
55
-#define SCR_FGTEN (1U << 27)
56
-#define SCR_ECVEN (1U << 28)
57
-#define SCR_TWEDEN (1U << 29)
58
+#define SCR_NS (1ULL << 0)
59
+#define SCR_IRQ (1ULL << 1)
60
+#define SCR_FIQ (1ULL << 2)
61
+#define SCR_EA (1ULL << 3)
62
+#define SCR_FW (1ULL << 4)
63
+#define SCR_AW (1ULL << 5)
64
+#define SCR_NET (1ULL << 6)
65
+#define SCR_SMD (1ULL << 7)
66
+#define SCR_HCE (1ULL << 8)
67
+#define SCR_SIF (1ULL << 9)
68
+#define SCR_RW (1ULL << 10)
69
+#define SCR_ST (1ULL << 11)
70
+#define SCR_TWI (1ULL << 12)
71
+#define SCR_TWE (1ULL << 13)
72
+#define SCR_TLOR (1ULL << 14)
73
+#define SCR_TERR (1ULL << 15)
74
+#define SCR_APK (1ULL << 16)
75
+#define SCR_API (1ULL << 17)
76
+#define SCR_EEL2 (1ULL << 18)
77
+#define SCR_EASE (1ULL << 19)
78
+#define SCR_NMEA (1ULL << 20)
79
+#define SCR_FIEN (1ULL << 21)
80
+#define SCR_ENSCXT (1ULL << 25)
81
+#define SCR_ATA (1ULL << 26)
82
+#define SCR_FGTEN (1ULL << 27)
83
+#define SCR_ECVEN (1ULL << 28)
84
+#define SCR_TWEDEN (1ULL << 29)
85
#define SCR_TWEDEL MAKE_64BIT_MASK(30, 4)
86
#define SCR_TME (1ULL << 34)
87
#define SCR_AMVOFFEN (1ULL << 35)
88
diff --git a/target/arm/helper.c b/target/arm/helper.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/helper.c
91
+++ b/target/arm/helper.c
92
@@ -XXX,XX +XXX,XX @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
93
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
94
{
95
/* Begin with base v8.0 state. */
96
- uint32_t valid_mask = 0x3fff;
97
+ uint64_t valid_mask = 0x3fff;
98
ARMCPU *cpu = env_archcpu(env);
99
100
/*
101
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
102
if (cpu_isar_feature(aa64_doublefault, cpu)) {
103
valid_mask |= SCR_EASE | SCR_NMEA;
104
}
105
+ if (cpu_isar_feature(aa64_sme, cpu)) {
106
+ valid_mask |= SCR_ENTP2;
107
+ }
108
} else {
109
valid_mask &= ~(SCR_RW | SCR_ST);
110
if (cpu_isar_feature(aa32_ras, cpu)) {
111
--
112
2.25.1
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Joel Stanley <joel@jms.id.au>
2
2
3
Add line item reference to quanta-gbs-bmc machine.
3
openpower.xyz was retired some time ago. The OpenBMC Jenkins is where
4
images can be found these days.
4
5
5
Signed-off-by: Patrick Venture <venture@google.com>
6
Signed-off-by: Joel Stanley <joel@jms.id.au>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Reviewed-by: Hao Wu <wuhaotsh@google.com>
7
Message-id: 20210615192848.1065297-3-venture@google.com
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
[PMM: fixed underline Sphinx warning]
9
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20221004050042.22681-1-joel@jms.id.au
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
docs/system/arm/nuvoton.rst | 5 +++--
13
docs/system/arm/nuvoton.rst | 4 ++--
12
1 file changed, 3 insertions(+), 2 deletions(-)
14
1 file changed, 2 insertions(+), 2 deletions(-)
13
15
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
16
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/nuvoton.rst
18
--- a/docs/system/arm/nuvoton.rst
17
+++ b/docs/system/arm/nuvoton.rst
19
+++ b/docs/system/arm/nuvoton.rst
18
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ Boot options
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
21
20
-=====================================================
22
The Nuvoton machines can boot from an OpenBMC firmware image, or directly into
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
23
a kernel using the ``-kernel`` option. OpenBMC images for ``quanta-gsj`` and
22
+================================================================
24
-possibly others can be downloaded from the OpenPOWER jenkins :
23
25
+possibly others can be downloaded from the OpenBMC jenkins :
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
26
25
designed to be used as Baseboard Management Controllers (BMCs) in various
27
- https://openpower.xyz/
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
28
+ https://jenkins.openbmc.org/
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
29
28
Hyperscale applications. The following machines are based on this chip :
30
The firmware image should be attached as an MTD drive. Example :
29
31
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
31
- ``quanta-gsj`` Quanta GSJ server BMC
32
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
34
--
32
--
35
2.20.1
33
2.25.1
36
34
37
35
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The starting security state comes with the translation regime,
4
not the current state of arm_is_secure_below_el3().
5
6
Create a new local variable, s2walk_secure, which does not need
7
to be written back to result->attrs.secure -- we compute that
8
value later, after the S2 walk is complete.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20221001162318.153420-2-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/ptw.c | 18 +++++++++---------
16
1 file changed, 9 insertions(+), 9 deletions(-)
17
18
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/ptw.c
21
+++ b/target/arm/ptw.c
22
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
23
hwaddr ipa;
24
int s1_prot;
25
int ret;
26
- bool ipa_secure;
27
+ bool ipa_secure, s2walk_secure;
28
ARMCacheAttrs cacheattrs1;
29
ARMMMUIdx s2_mmu_idx;
30
bool is_el0;
31
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
32
33
ipa = result->phys;
34
ipa_secure = result->attrs.secure;
35
- if (arm_is_secure_below_el3(env)) {
36
- if (ipa_secure) {
37
- result->attrs.secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
38
- } else {
39
- result->attrs.secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
40
- }
41
+ if (is_secure) {
42
+ /* Select TCR based on the NS bit from the S1 walk. */
43
+ s2walk_secure = !(ipa_secure
44
+ ? env->cp15.vstcr_el2 & VSTCR_SW
45
+ : env->cp15.vtcr_el2 & VTCR_NSW);
46
} else {
47
assert(!ipa_secure);
48
+ s2walk_secure = false;
49
}
50
51
- s2_mmu_idx = (result->attrs.secure
52
+ s2_mmu_idx = (s2walk_secure
53
? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
54
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
55
56
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
57
result->cacheattrs);
58
59
/* Check if IPA translates to secure or non-secure PA space. */
60
- if (arm_is_secure_below_el3(env)) {
61
+ if (is_secure) {
62
if (ipa_secure) {
63
result->attrs.secure =
64
!(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
65
--
66
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
While the stage2 call to get_phys_addr_lpae should never set
4
attrs.secure when given a non-secure input, it's just as easy
5
to make the final update to attrs.secure be unconditional and
6
false in the case of non-secure input.
7
8
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20221007152159.1414065-1-richard.henderson@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/ptw.c | 21 ++++++++++-----------
15
1 file changed, 10 insertions(+), 11 deletions(-)
16
17
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/ptw.c
20
+++ b/target/arm/ptw.c
21
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
22
result->cacheattrs = combine_cacheattrs(env, cacheattrs1,
23
result->cacheattrs);
24
25
- /* Check if IPA translates to secure or non-secure PA space. */
26
- if (is_secure) {
27
- if (ipa_secure) {
28
- result->attrs.secure =
29
- !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
30
- } else {
31
- result->attrs.secure =
32
- !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
33
- || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
34
- }
35
- }
36
+ /*
37
+ * Check if IPA translates to secure or non-secure PA space.
38
+ * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
39
+ */
40
+ result->attrs.secure =
41
+ (is_secure
42
+ && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
43
+ && (ipa_secure
44
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
45
+
46
return 0;
47
} else {
48
/*
49
--
50
2.25.1
diff view generated by jsdifflib
1
Implement the MVE shifts by immediate, which perform shifts
1
From: Richard Henderson <richard.henderson@linaro.org>
2
on a single general-purpose register.
3
2
4
These patterns overlap with the long-shift-by-immediates,
3
Remove the use of regime_is_secure from get_phys_addr_lpae,
5
so we have to rearrange the grouping a little here.
4
using the new parameter instead.
6
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20221001162318.153420-3-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
10
---
10
---
11
target/arm/helper-mve.h | 3 ++
11
target/arm/ptw.c | 20 ++++++++++----------
12
target/arm/translate.h | 1 +
12
1 file changed, 10 insertions(+), 10 deletions(-)
13
target/arm/t32.decode | 31 ++++++++++++++-----
14
target/arm/mve_helper.c | 10 ++++++
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
5 files changed, 104 insertions(+), 9 deletions(-)
17
13
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
16
--- a/target/arm/ptw.c
21
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/ptw.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
+
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
32
+++ b/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
38
39
/**
40
* arm_tbflags_from_tb:
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/t32.decode
44
+++ b/target/arm/t32.decode
45
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@
46
19
47
&mve_shl_ri rdalo rdahi shim
20
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
48
&mve_shl_rr rdalo rdahi rm
21
MMUAccessType access_type, ARMMMUIdx mmu_idx,
49
+&mve_sh_ri rda shim
22
- bool s1_is_el0, GetPhysAddrResult *result,
50
23
- ARMMMUFaultInfo *fi)
51
# rdahi: bits [3:1] from insn, bit 0 is 1
24
+ bool is_secure, bool s1_is_el0,
52
# rdalo: bits [3:1] from insn, bit 0 is 0
25
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
53
@@ -XXX,XX +XXX,XX @@
26
__attribute__((nonnull));
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
27
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
28
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
29
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
30
GetPhysAddrResult s2 = {};
58
+ &mve_sh_ri shim=%imm5_12_6
31
int ret;
59
32
33
- ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
34
- &s2, fi);
35
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
36
+ *is_secure, false, &s2, fi);
37
if (ret) {
38
assert(fi->type != ARMFault_None);
39
fi->s2addr = addr;
40
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
41
*/
42
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
43
MMUAccessType access_type, ARMMMUIdx mmu_idx,
44
- bool s1_is_el0, GetPhysAddrResult *result,
45
- ARMMMUFaultInfo *fi)
46
+ bool is_secure, bool s1_is_el0,
47
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
60
{
48
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
49
ARMCPU *cpu = env_archcpu(env);
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
50
/* Read an LPAE long-descriptor translation table. */
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
51
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
64
# handling them as r13 and r15 accesses with the same semantics as A32).
52
* remain non-secure. We implement this by just ORing in the NSTable/NS
65
[
53
* bits at each step.
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
54
*/
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
55
- tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
56
+ tableattrs = is_secure ? 0 : (1 << 4);
69
+ {
57
for (;;) {
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
58
uint64_t descriptor;
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
59
bool nstable;
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
60
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
73
+ }
61
memset(result, 0, sizeof(*result));
74
62
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
63
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
64
- is_el0, result, fi);
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
65
+ s2walk_secure, is_el0, result, fi);
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
66
fi->s2addr = ipa;
79
+ {
67
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
68
/* Combine the S1 and S2 perms. */
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
69
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
70
}
83
+ }
71
84
+
72
if (regime_using_lpae_format(env, mmu_idx)) {
85
+ {
73
- return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
74
- result, fi);
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
75
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx,
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
76
+ is_secure, false, result, fi);
89
+ }
77
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
90
+
78
return get_phys_addr_v6(env, address, access_type, mmu_idx,
91
+ {
79
is_secure, result, fi);
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
94
+ }
95
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/mve_helper.c
101
+++ b/target/arm/mve_helper.c
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
103
{
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
105
}
106
+
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
108
+{
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
110
+}
111
+
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
113
+{
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
115
+}
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate.c
119
+++ b/target/arm/translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
121
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
123
{
124
- TCGv_i32 t = tcg_temp_new_i32();
125
+ TCGv_i32 t;
126
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
128
+ if (sh == 32) {
129
+ tcg_gen_movi_i32(d, 0);
130
+ return;
131
+ }
132
+ t = tcg_temp_new_i32();
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
134
tcg_gen_sari_i32(d, a, sh);
135
tcg_gen_add_i32(d, d, t);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
137
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
139
{
140
- TCGv_i32 t = tcg_temp_new_i32();
141
+ TCGv_i32 t;
142
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
144
+ if (sh == 32) {
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
146
+ return;
147
+ }
148
+ t = tcg_temp_new_i32();
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
150
tcg_gen_shri_i32(d, a, sh);
151
tcg_gen_add_i32(d, d, t);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
154
}
155
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
162
+ if (!dc_isar_feature(aa32_mve, s) ||
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
164
+ a->rda == 13 || a->rda == 15) {
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
166
+ unallocated_encoding(s);
167
+ return true;
168
+ }
169
+
170
+ if (a->shim == 0) {
171
+ a->shim = 32;
172
+ }
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
174
+
175
+ return true;
176
+}
177
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
179
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
181
+}
182
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
184
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
186
+}
187
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
189
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
191
+}
192
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
194
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
196
+}
197
+
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
199
+{
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
201
+}
202
+
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
204
+{
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
206
+}
207
+
208
/*
209
* Multiply and multiply accumulate
210
*/
211
--
80
--
212
2.20.1
81
2.25.1
213
82
214
83
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
3
Pass the correct stage2 mmu_idx to regime_translation_disabled,
4
entry.
4
which we computed afterward.
5
5
6
Signed-off-by: Patrick Venture <venture@google.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20210615192848.1065297-2-venture@google.com
8
Message-id: 20221001162318.153420-4-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
docs/system/arm/aspeed.rst | 1 +
11
target/arm/ptw.c | 6 +++---
12
1 file changed, 1 insertion(+)
12
1 file changed, 3 insertions(+), 3 deletions(-)
13
13
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/aspeed.rst
16
--- a/target/arm/ptw.c
17
+++ b/docs/system/arm/aspeed.rst
17
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ etc.
18
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
19
AST2400 SoC based machines :
19
hwaddr addr, bool *is_secure,
20
20
ARMMMUFaultInfo *fi)
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
21
{
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
22
+ ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
23
23
+
24
AST2500 SoC based machines :
24
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
25
- !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
26
- ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
27
- : ARMMMUIdx_Stage2;
28
+ !regime_translation_disabled(env, s2_mmu_idx)) {
29
GetPhysAddrResult s2 = {};
30
int ret;
25
31
26
--
32
--
27
2.20.1
33
2.25.1
28
29
diff view generated by jsdifflib
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
1
From: Richard Henderson <richard.henderson@linaro.org>
2
that it accumulates 32-bit elements into a 64-bit accumulator
3
stored in a pair of general-purpose registers.
4
2
3
Remove the use of regime_is_secure from regime_translation_disabled,
4
using the new parameter instead.
5
6
This fixes a bug in S1_ptw_translate and get_phys_addr where we had
7
passed ARMMMUIdx_Stage2 and not ARMMMUIdx_Stage2_S to determine if
8
Stage2 is disabled, affecting FEAT_SEL2.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20221001162318.153420-5-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
8
---
15
---
9
target/arm/helper-mve.h | 3 ++
16
target/arm/ptw.c | 20 +++++++++++---------
10
target/arm/mve.decode | 6 +++-
17
1 file changed, 11 insertions(+), 9 deletions(-)
11
target/arm/mve_helper.c | 19 ++++++++++++
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
14
18
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
21
--- a/target/arm/ptw.c
18
+++ b/target/arm/helper-mve.h
22
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
@@ -XXX,XX +XXX,XX @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
25
+
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
35
36
# Vector add across vector
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
38
+{
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
41
+ rdahi=%rdahi rdalo=%rdalo
42
+}
43
44
# Predicate operations
45
%mask_22_13 22:1 13:3
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
51
DO_VADDV(vaddvuh, 2, uint16_t)
52
DO_VADDV(vaddvuw, 4, uint32_t)
53
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
56
+ uint64_t ra) \
57
+ { \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ TYPE *m = vm; \
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
62
+ if (mask & 1) { \
63
+ ra += (LTYPE)m[H4(e)]; \
64
+ } \
65
+ } \
66
+ mve_advance_vpt(env); \
67
+ return ra; \
68
+ } \
69
+
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
72
+
73
/* Shifts by immediate */
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate-mve.c
79
+++ b/target/arm/translate-mve.c
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
81
return true;
82
}
24
}
83
25
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
26
/* Return true if the specified stage of address translation is disabled */
85
+{
27
-static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
86
+ /*
28
+static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
29
+ bool is_secure)
88
+ * elements of the vector into a 64-bit result stored in
89
+ * a pair of general-purpose registers.
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
91
+ */
92
+ TCGv_ptr qm;
93
+ TCGv_i64 rda;
94
+ TCGv_i32 rdalo, rdahi;
95
+
96
+ if (!dc_isar_feature(aa32_mve, s)) {
97
+ return false;
98
+ }
99
+ /*
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
102
+ */
103
+ if (a->rdahi == 13 || a->rdahi == 15) {
104
+ return false;
105
+ }
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
107
+ return true;
108
+ }
109
+
110
+ /*
111
+ * This insn is subject to beat-wise execution. Partial execution
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
114
+ */
115
+ if (a->a || mve_skip_first_beat(s)) {
116
+ /* Accumulate input from RdaHi:RdaLo */
117
+ rda = tcg_temp_new_i64();
118
+ rdalo = load_reg(s, a->rdalo);
119
+ rdahi = load_reg(s, a->rdahi);
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
121
+ tcg_temp_free_i32(rdalo);
122
+ tcg_temp_free_i32(rdahi);
123
+ } else {
124
+ /* Accumulate starting at zero */
125
+ rda = tcg_const_i64(0);
126
+ }
127
+
128
+ qm = mve_qreg_ptr(a->qm);
129
+ if (a->u) {
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
131
+ } else {
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
133
+ }
134
+ tcg_temp_free_ptr(qm);
135
+
136
+ rdalo = tcg_temp_new_i32();
137
+ rdahi = tcg_temp_new_i32();
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
139
+ tcg_gen_extrh_i64_i32(rdahi, rda);
140
+ store_reg(s, a->rdalo, rdalo);
141
+ store_reg(s, a->rdahi, rdahi);
142
+ tcg_temp_free_i64(rda);
143
+ mve_update_eci(s);
144
+ return true;
145
+}
146
+
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
148
{
30
{
149
TCGv_ptr qd;
31
uint64_t hcr_el2;
32
33
if (arm_feature(env, ARM_FEATURE_M)) {
34
- switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
35
+ switch (env->v7m.mpu_ctrl[is_secure] &
36
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
37
case R_V7M_MPU_CTRL_ENABLE_MASK:
38
/* Enabled, but not for HardFault and NMI */
39
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
40
41
if (hcr_el2 & HCR_TGE) {
42
/* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
43
- if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
44
+ if (!is_secure && regime_el(env, mmu_idx) == 1) {
45
return true;
46
}
47
}
48
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
49
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
50
51
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
52
- !regime_translation_disabled(env, s2_mmu_idx)) {
53
+ !regime_translation_disabled(env, s2_mmu_idx, *is_secure)) {
54
GetPhysAddrResult s2 = {};
55
int ret;
56
57
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
58
uint32_t base;
59
bool is_user = regime_is_user(env, mmu_idx);
60
61
- if (regime_translation_disabled(env, mmu_idx)) {
62
+ if (regime_translation_disabled(env, mmu_idx, is_secure)) {
63
/* MPU disabled. */
64
result->phys = address;
65
result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
66
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
67
result->page_size = TARGET_PAGE_SIZE;
68
result->prot = 0;
69
70
- if (regime_translation_disabled(env, mmu_idx) ||
71
+ if (regime_translation_disabled(env, mmu_idx, secure) ||
72
m_is_ppb_region(env, address)) {
73
/*
74
* MPU disabled or M profile PPB access: use default memory map.
75
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
76
* are done in arm_v7m_load_vector(), which always does a direct
77
* read using address_space_ldl(), rather than going via this function.
78
*/
79
- if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
80
+ if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
81
hit = true;
82
} else if (m_is_ppb_region(env, address)) {
83
hit = true;
84
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
85
result, fi);
86
87
/* If S1 fails or S2 is disabled, return early. */
88
- if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
89
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
90
+ is_secure)) {
91
return ret;
92
}
93
94
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
95
96
/* Definitely a real MMU, not an MPU */
97
98
- if (regime_translation_disabled(env, mmu_idx)) {
99
+ if (regime_translation_disabled(env, mmu_idx, is_secure)) {
100
uint64_t hcr;
101
uint8_t memattr;
102
150
--
103
--
151
2.20.1
104
2.25.1
152
105
153
106
diff view generated by jsdifflib
1
The function asimd_imm_const() in translate-neon.c is an
1
From: Richard Henderson <richard.henderson@linaro.org>
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
3
also want for MVE. Move the implementation to translate.c, with a
4
prototype in translate.h.
5
2
3
Retain the existing get_phys_addr interface using the security
4
state derived from mmu_idx. Move the kerneldoc comments to the
5
header file where they belong.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20221001162318.153420-6-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
9
---
11
---
10
target/arm/translate.h | 16 ++++++++++
12
target/arm/internals.h | 40 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-neon.c | 63 -------------------------------------
13
target/arm/ptw.c | 44 ++++++++++++++----------------------------
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
14
2 files changed, 55 insertions(+), 29 deletions(-)
13
3 files changed, 73 insertions(+), 63 deletions(-)
14
15
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
--- a/target/arm/internals.h
18
+++ b/target/arm/translate.h
19
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
20
@@ -XXX,XX +XXX,XX @@ typedef struct GetPhysAddrResult {
20
return opc | s->be_data;
21
ARMCacheAttrs cacheattrs;
22
} GetPhysAddrResult;
23
24
+/**
25
+ * get_phys_addr_with_secure: get the physical address for a virtual address
26
+ * @env: CPUARMState
27
+ * @address: virtual address to get physical address for
28
+ * @access_type: 0 for read, 1 for write, 2 for execute
29
+ * @mmu_idx: MMU index indicating required translation regime
30
+ * @is_secure: security state for the access
31
+ * @result: set on translation success.
32
+ * @fi: set to fault info if the translation fails
33
+ *
34
+ * Find the physical address corresponding to the given virtual address,
35
+ * by doing a translation table walk on MMU based systems or using the
36
+ * MPU state on MPU based systems.
37
+ *
38
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
39
+ * prot and page_size may not be filled in, and the populated fsr value provides
40
+ * information on why the translation aborted, in the format of a
41
+ * DFSR/IFSR fault register, with the following caveats:
42
+ * * we honour the short vs long DFSR format differences.
43
+ * * the WnR bit is never set (the caller must do this).
44
+ * * for PSMAv5 based systems we don't bother to return a full FSR format
45
+ * value.
46
+ */
47
+bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
48
+ MMUAccessType access_type,
49
+ ARMMMUIdx mmu_idx, bool is_secure,
50
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
51
+ __attribute__((nonnull));
52
+
53
+/**
54
+ * get_phys_addr: get the physical address for a virtual address
55
+ * @env: CPUARMState
56
+ * @address: virtual address to get physical address for
57
+ * @access_type: 0 for read, 1 for write, 2 for execute
58
+ * @mmu_idx: MMU index indicating required translation regime
59
+ * @result: set on translation success.
60
+ * @fi: set to fault info if the translation fails
61
+ *
62
+ * Similarly, but use the security regime of @mmu_idx.
63
+ */
64
bool get_phys_addr(CPUARMState *env, target_ulong address,
65
MMUAccessType access_type, ARMMMUIdx mmu_idx,
66
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
67
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/ptw.c
70
+++ b/target/arm/ptw.c
71
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
72
return ret;
21
}
73
}
22
74
23
+/**
75
-/**
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
76
- * get_phys_addr - get the physical address for this virtual address
25
+ *
77
- *
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
78
- * Find the physical address corresponding to the given virtual address,
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
79
- * by doing a translation table walk on MMU based systems or using the
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
80
- * MPU state on MPU based systems.
29
+ *
81
- *
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
82
- * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
31
+ * callers must catch this.
83
- * prot and page_size may not be filled in, and the populated fsr value provides
32
+ *
84
- * information on why the translation aborted, in the format of a
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
85
- * DFSR/IFSR fault register, with the following caveats:
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
86
- * * we honour the short vs long DFSR format differences.
35
+ * we produce an immediate constant value of 0 in these cases.
87
- * * the WnR bit is never set (the caller must do this).
36
+ */
88
- * * for PSMAv5 based systems we don't bother to return a full FSR format
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
89
- * value.
38
+
90
- *
39
#endif /* TARGET_ARM_TRANSLATE_H */
91
- * @env: CPUARMState
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
92
- * @address: virtual address to get physical address for
41
index XXXXXXX..XXXXXXX 100644
93
- * @access_type: 0 for read, 1 for write, 2 for execute
42
--- a/target/arm/translate-neon.c
94
- * @mmu_idx: MMU index indicating required translation regime
43
+++ b/target/arm/translate-neon.c
95
- * @result: set on translation success.
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
96
- * @fi: set to fault info if the translation fails
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
97
- */
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
98
-bool get_phys_addr(CPUARMState *env, target_ulong address,
47
99
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
100
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
49
-{
101
+bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
50
- /*
102
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
51
- * Expand the encoded constant.
103
+ bool is_secure, GetPhysAddrResult *result,
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
104
+ ARMMMUFaultInfo *fi)
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
91
- for (n = 0; n < 8; n++) {
92
- if (imm & (1 << n)) {
93
- imm64 |= (0xffULL << (n * 8));
94
- }
95
- }
96
- return imm64;
97
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
99
- break;
100
- case 15:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
103
- break;
104
- }
105
- if (op) {
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
109
-}
110
-
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
112
GVecGen2iFn *fn)
113
{
105
{
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
106
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
115
index XXXXXXX..XXXXXXX 100644
107
- bool is_secure = regime_is_secure(env, mmu_idx);
116
--- a/target/arm/translate.c
108
117
+++ b/target/arm/translate.c
109
if (mmu_idx != s1_mmu_idx) {
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
110
/*
119
a64_translate_init();
111
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
112
ARMMMUIdx s2_mmu_idx;
113
bool is_el0;
114
115
- ret = get_phys_addr(env, address, access_type, s1_mmu_idx,
116
- result, fi);
117
+ ret = get_phys_addr_with_secure(env, address, access_type,
118
+ s1_mmu_idx, is_secure, result, fi);
119
120
/* If S1 fails or S2 is disabled, return early. */
121
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
122
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
123
}
120
}
124
}
121
125
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
126
+bool get_phys_addr(CPUARMState *env, target_ulong address,
127
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
128
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
123
+{
129
+{
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
130
+ return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
125
+ switch (cmode) {
131
+ regime_is_secure(env, mmu_idx),
126
+ case 0: case 1:
132
+ result, fi);
127
+ /* no-op */
128
+ break;
129
+ case 2: case 3:
130
+ imm <<= 8;
131
+ break;
132
+ case 4: case 5:
133
+ imm <<= 16;
134
+ break;
135
+ case 6: case 7:
136
+ imm <<= 24;
137
+ break;
138
+ case 8: case 9:
139
+ imm |= imm << 16;
140
+ break;
141
+ case 10: case 11:
142
+ imm = (imm << 8) | (imm << 24);
143
+ break;
144
+ case 12:
145
+ imm = (imm << 8) | 0xff;
146
+ break;
147
+ case 13:
148
+ imm = (imm << 16) | 0xffff;
149
+ break;
150
+ case 14:
151
+ if (op) {
152
+ /*
153
+ * This is the only case where the top and bottom 32 bits
154
+ * of the encoded constant differ.
155
+ */
156
+ uint64_t imm64 = 0;
157
+ int n;
158
+
159
+ for (n = 0; n < 8; n++) {
160
+ if (imm & (1 << n)) {
161
+ imm64 |= (0xffULL << (n * 8));
162
+ }
163
+ }
164
+ return imm64;
165
+ }
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
167
+ break;
168
+ case 15:
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
171
+ break;
172
+ }
173
+ if (op) {
174
+ imm = ~imm;
175
+ }
176
+ return dup_const(MO_32, imm);
177
+}
133
+}
178
+
134
+
179
/* Generate a label used for skipping this instruction */
135
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
180
void arm_gen_condlabel(DisasContext *s)
136
MemTxAttrs *attrs)
181
{
137
{
182
--
138
--
183
2.20.1
139
2.25.1
184
185
diff view generated by jsdifflib
1
The MVE extension to v8.1M includes some new shift instructions which
1
From: Richard Henderson <richard.henderson@linaro.org>
2
sit entirely within the non-coprocessor part of the encoding space
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
2
7
Implement the long shifts by immediate, which perform shifts on a
3
Remove the use of regime_is_secure from v7m_read_half_insn, using
8
pair of general-purpose registers treated as a 64-bit quantity, with
4
the new parameter instead.
9
an immediate shift count between 1 and 32.
10
5
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
6
As it happens, both callers pass true, propagated from the argument
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
7
to arm_v7m_mmu_idx_for_secstate which created the mmu_idx argument,
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
8
but that is a detail of v7m_handle_execute_nsc we need not expose
14
is too difficult, because the functions that generate the code are
9
to the callee.
15
shared between a dozen different kinds of arithmetic or logical
16
instruction for all A32, T16 and T32 encodings, and for some insns
17
and some encodings Rm==13,15 are valid.)
18
10
19
We make the helper functions we need for UQSHLL and SQSHLL take
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
a 32-bit value which the helper casts to int8_t because we'll need
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
21
these helpers also for the shift-by-register insns, where the shift
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
count might be < 0 or > 32.
14
Message-id: 20221001162318.153420-7-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
target/arm/m_helper.c | 9 ++++-----
18
1 file changed, 4 insertions(+), 5 deletions(-)
23
19
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
27
---
28
target/arm/helper-mve.h | 3 ++
29
target/arm/translate.h | 1 +
30
target/arm/t32.decode | 28 +++++++++++++
31
target/arm/mve_helper.c | 10 +++++
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
34
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
36
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper-mve.h
22
--- a/target/arm/m_helper.c
38
+++ b/target/arm/helper-mve.h
23
+++ b/target/arm/m_helper.c
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
43
+
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate.h
49
+++ b/target/arm/translate.h
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@
63
&mcr !extern cp opc1 crn crm opc2 rt
64
&mcrr !extern cp opc1 crm rt rt2
65
66
+&mve_shl_ri rdalo rdahi shim
67
+
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
70
+%rdahi_9 9:3 !function=times_2_plus_1
71
+%rdalo_17 17:3 !function=times_2
72
+
73
# Data-processing (register)
74
75
%imm5_12_6 12:3 6:2
76
@@ -XXX,XX +XXX,XX @@
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
79
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
86
}
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
88
{
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
115
mve_advance_vpt(env);
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
122
+}
123
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
+{
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
+}
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
25
return true;
134
}
26
}
135
27
136
+/*
28
-static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
137
+ * v8.1M MVE wide-shifts
29
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
138
+ */
30
uint32_t addr, uint16_t *insn)
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
31
{
140
+ WideShiftImmFn *fn)
32
/*
141
+{
33
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
142
+ TCGv_i64 rda;
34
ARMMMUFaultInfo fi = {};
143
+ TCGv_i32 rdalo, rdahi;
35
MemTxResult txres;
144
+
36
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
37
- v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx,
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
38
- regime_is_secure(env, mmu_idx), &sattrs);
147
+ return false;
39
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
148
+ }
40
if (!sattrs.nsc || sattrs.ns) {
149
+ if (a->rdahi == 15) {
41
/*
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
42
* This must be the second half of the insn, and it straddles a
151
+ return false;
43
@@ -XXX,XX +XXX,XX @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
152
+ }
44
/* We want to do the MPU lookup as secure; work out what mmu_idx that is */
153
+ if (!dc_isar_feature(aa32_mve, s) ||
45
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
46
155
+ a->rdahi == 13) {
47
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
48
+ if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
157
+ unallocated_encoding(s);
49
return false;
158
+ return true;
50
}
159
+ }
51
160
+
52
@@ -XXX,XX +XXX,XX @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
161
+ if (a->shim == 0) {
53
goto gen_invep;
162
+ a->shim = 32;
54
}
163
+ }
55
164
+
56
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
165
+ rda = tcg_temp_new_i64();
57
+ if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
166
+ rdalo = load_reg(s, a->rdalo);
58
return false;
167
+ rdahi = load_reg(s, a->rdahi);
59
}
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
60
169
+
170
+ fn(rda, rda, a->shim);
171
+
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
174
+ store_reg(s, a->rdalo, rdalo);
175
+ store_reg(s, a->rdahi, rdahi);
176
+ tcg_temp_free_i64(rda);
177
+
178
+ return true;
179
+}
180
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
182
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
184
+}
185
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
187
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
189
+}
190
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
192
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
194
+}
195
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
197
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
199
+}
200
+
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
202
+{
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
204
+}
205
+
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
207
+{
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
209
+}
210
+
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
212
+{
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
214
+}
215
+
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
217
+{
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
219
+}
220
+
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
222
+{
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
224
+}
225
+
226
/*
227
* Multiply and multiply accumulate
228
*/
229
--
61
--
230
2.20.1
62
2.25.1
231
63
232
64
diff view generated by jsdifflib
1
Implement the MVE shifts by register, which perform
1
From: Richard Henderson <richard.henderson@linaro.org>
2
shifts on a single general-purpose register.
3
2
3
Remove the use of regime_is_secure from arm_tr_init_disas_context.
4
Instead, provide the value of v8m_secure directly from tb_flags.
5
Rather than use regime_is_secure, use the env->v7m.secure directly,
6
as per arm_mmu_idx_el.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20221001162318.153420-8-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
7
---
12
---
8
target/arm/helper-mve.h | 2 ++
13
target/arm/cpu.h | 2 ++
9
target/arm/translate.h | 1 +
14
target/arm/helper.c | 4 ++++
10
target/arm/t32.decode | 18 ++++++++++++++----
15
target/arm/translate.c | 3 +--
11
target/arm/mve_helper.c | 10 ++++++++++
16
3 files changed, 7 insertions(+), 2 deletions(-)
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
13
5 files changed, 57 insertions(+), 4 deletions(-)
14
17
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
20
--- a/target/arm/cpu.h
18
+++ b/target/arm/helper-mve.h
21
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
22
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
20
23
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
26
+/* Set if in secure mode */
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
27
+FIELD(TBFLAG_M32, SECURE, 6, 1)
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
28
29
/*
30
* Bit usage when in AArch64 state
31
diff --git a/target/arm/helper.c b/target/arm/helper.c
26
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.h
33
--- a/target/arm/helper.c
28
+++ b/target/arm/translate.h
34
+++ b/target/arm/helper.c
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
35
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
36
DP_TBFLAG_M32(flags, STACKCHECK, 1);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
41
@@ -XXX,XX +XXX,XX @@
42
&mve_shl_ri rdalo rdahi shim
43
&mve_shl_rr rdalo rdahi rm
44
&mve_sh_ri rda shim
45
+&mve_sh_rr rda rm
46
47
# rdahi: bits [3:1] from insn, bit 0 is 1
48
# rdalo: bits [3:1] from insn, bit 0 is 0
49
@@ -XXX,XX +XXX,XX @@
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
52
&mve_sh_ri shim=%imm5_12_6
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
54
55
{
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
59
}
37
}
60
38
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
39
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
40
+ DP_TBFLAG_M32(flags, SECURE, 1);
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
65
+ {
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
69
+ }
41
+ }
70
+
42
+
71
+ {
43
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
75
+ }
76
+
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
79
]
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/mve_helper.c
83
+++ b/target/arm/mve_helper.c
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
85
{
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
87
}
44
}
88
+
45
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
90
+{
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
92
+}
93
+
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
95
+{
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
97
+}
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
46
diff --git a/target/arm/translate.c b/target/arm/translate.c
99
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
48
--- a/target/arm/translate.c
101
+++ b/target/arm/translate.c
49
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
50
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
51
dc->vfp_enabled = 1;
104
}
52
dc->be_data = MO_TE;
105
53
dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
54
- dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
107
+{
55
- regime_is_secure(env, dc->mmu_idx);
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
56
+ dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
57
dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
110
+ return false;
58
dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
111
+ }
59
dc->v7m_new_fp_ctxt_needed =
112
+ if (!dc_isar_feature(aa32_mve, s) ||
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
115
+ a->rm == a->rda) {
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
117
+ unallocated_encoding(s);
118
+ return true;
119
+ }
120
+
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
123
+ return true;
124
+}
125
+
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
127
+{
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
129
+}
130
+
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
132
+{
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
134
+}
135
+
136
/*
137
* Multiply and multiply accumulate
138
*/
139
--
60
--
140
2.20.1
61
2.25.1
141
142
diff view generated by jsdifflib
1
Implement the MVE vector shift right by immediate insns VSHRI and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
VRSHRI. As with Neon, we implement these by using helper functions
3
which perform left shifts but allow negative shift counts to indicate
4
right shifts.
5
2
3
This is the last use of regime_is_secure; remove it
4
entirely before changing the layout of ARMMMUIdx.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20221001162318.153420-9-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
9
---
10
---
10
target/arm/helper-mve.h | 12 ++++++++++++
11
target/arm/internals.h | 42 ----------------------------------------
11
target/arm/translate.h | 20 ++++++++++++++++++++
12
target/arm/ptw.c | 44 ++++++++++++++++++++++++++++++++++++++++--
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
13
2 files changed, 42 insertions(+), 44 deletions(-)
13
target/arm/mve_helper.c | 7 +++++++
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
17
14
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
17
--- a/target/arm/internals.h
21
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
19
@@ -XXX,XX +XXX,XX @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
20
}
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate.h
48
+++ b/target/arm/translate.h
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
50
return x * 2 + 1;
51
}
21
}
52
22
53
+static inline int rsub_64(DisasContext *s, int x)
23
-/* Return true if this address translation regime is secure */
54
+{
24
-static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
55
+ return 64 - x;
56
+}
57
+
58
+static inline int rsub_32(DisasContext *s, int x)
59
+{
60
+ return 32 - x;
61
+}
62
+
63
+static inline int rsub_16(DisasContext *s, int x)
64
+{
65
+ return 16 - x;
66
+}
67
+
68
+static inline int rsub_8(DisasContext *s, int x)
69
+{
70
+ return 8 - x;
71
+}
72
+
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
74
{
75
return (dc->features & (1ULL << feature)) != 0;
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/mve.decode
79
+++ b/target/arm/mve.decode
80
@@ -XXX,XX +XXX,XX @@
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
83
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
85
+%rshift_i5 16:5 !function=rsub_32
86
+%rshift_i4 16:4 !function=rsub_16
87
+%rshift_i3 16:3 !function=rsub_8
88
+
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
90
+ size=0 shift=%rshift_i3
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
95
+
96
# Vector loads and stores
97
98
# Widening loads and narrowing stores:
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
103
+
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
107
+
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
127
+#define DO_2SHIFT_S(OP, FN) \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
131
132
#define DO_2SHIFT_SAT_U(OP, FN) \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
136
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
152
+/* These right shifts use a left-shift helper with negated shift count */
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-neon.c
160
+++ b/target/arm/translate-neon.c
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
162
return x + 1;
163
}
164
165
-static inline int rsub_64(DisasContext *s, int x)
166
-{
25
-{
167
- return 64 - x;
26
- switch (mmu_idx) {
27
- case ARMMMUIdx_E10_0:
28
- case ARMMMUIdx_E10_1:
29
- case ARMMMUIdx_E10_1_PAN:
30
- case ARMMMUIdx_E20_0:
31
- case ARMMMUIdx_E20_2:
32
- case ARMMMUIdx_E20_2_PAN:
33
- case ARMMMUIdx_Stage1_E0:
34
- case ARMMMUIdx_Stage1_E1:
35
- case ARMMMUIdx_Stage1_E1_PAN:
36
- case ARMMMUIdx_E2:
37
- case ARMMMUIdx_Stage2:
38
- case ARMMMUIdx_MPrivNegPri:
39
- case ARMMMUIdx_MUserNegPri:
40
- case ARMMMUIdx_MPriv:
41
- case ARMMMUIdx_MUser:
42
- return false;
43
- case ARMMMUIdx_SE3:
44
- case ARMMMUIdx_SE10_0:
45
- case ARMMMUIdx_SE10_1:
46
- case ARMMMUIdx_SE10_1_PAN:
47
- case ARMMMUIdx_SE20_0:
48
- case ARMMMUIdx_SE20_2:
49
- case ARMMMUIdx_SE20_2_PAN:
50
- case ARMMMUIdx_Stage1_SE0:
51
- case ARMMMUIdx_Stage1_SE1:
52
- case ARMMMUIdx_Stage1_SE1_PAN:
53
- case ARMMMUIdx_SE2:
54
- case ARMMMUIdx_Stage2_S:
55
- case ARMMMUIdx_MSPrivNegPri:
56
- case ARMMMUIdx_MSUserNegPri:
57
- case ARMMMUIdx_MSPriv:
58
- case ARMMMUIdx_MSUser:
59
- return true;
60
- default:
61
- g_assert_not_reached();
62
- }
168
-}
63
-}
169
-
64
-
170
-static inline int rsub_32(DisasContext *s, int x)
65
static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
171
-{
172
- return 32 - x;
173
-}
174
-static inline int rsub_16(DisasContext *s, int x)
175
-{
176
- return 16 - x;
177
-}
178
-static inline int rsub_8(DisasContext *s, int x)
179
-{
180
- return 8 - x;
181
-}
182
-
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
184
{
66
{
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
67
switch (mmu_idx) {
68
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/ptw.c
71
+++ b/target/arm/ptw.c
72
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
73
MMUAccessType access_type, ARMMMUIdx mmu_idx,
74
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
75
{
76
+ bool is_secure;
77
+
78
+ switch (mmu_idx) {
79
+ case ARMMMUIdx_E10_0:
80
+ case ARMMMUIdx_E10_1:
81
+ case ARMMMUIdx_E10_1_PAN:
82
+ case ARMMMUIdx_E20_0:
83
+ case ARMMMUIdx_E20_2:
84
+ case ARMMMUIdx_E20_2_PAN:
85
+ case ARMMMUIdx_Stage1_E0:
86
+ case ARMMMUIdx_Stage1_E1:
87
+ case ARMMMUIdx_Stage1_E1_PAN:
88
+ case ARMMMUIdx_E2:
89
+ case ARMMMUIdx_Stage2:
90
+ case ARMMMUIdx_MPrivNegPri:
91
+ case ARMMMUIdx_MUserNegPri:
92
+ case ARMMMUIdx_MPriv:
93
+ case ARMMMUIdx_MUser:
94
+ is_secure = false;
95
+ break;
96
+ case ARMMMUIdx_SE3:
97
+ case ARMMMUIdx_SE10_0:
98
+ case ARMMMUIdx_SE10_1:
99
+ case ARMMMUIdx_SE10_1_PAN:
100
+ case ARMMMUIdx_SE20_0:
101
+ case ARMMMUIdx_SE20_2:
102
+ case ARMMMUIdx_SE20_2_PAN:
103
+ case ARMMMUIdx_Stage1_SE0:
104
+ case ARMMMUIdx_Stage1_SE1:
105
+ case ARMMMUIdx_Stage1_SE1_PAN:
106
+ case ARMMMUIdx_SE2:
107
+ case ARMMMUIdx_Stage2_S:
108
+ case ARMMMUIdx_MSPrivNegPri:
109
+ case ARMMMUIdx_MSUserNegPri:
110
+ case ARMMMUIdx_MSPriv:
111
+ case ARMMMUIdx_MSUser:
112
+ is_secure = true;
113
+ break;
114
+ default:
115
+ g_assert_not_reached();
116
+ }
117
return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
118
- regime_is_secure(env, mmu_idx),
119
- result, fi);
120
+ is_secure, result, fi);
121
}
122
123
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
186
--
124
--
187
2.20.1
125
2.25.1
188
189
diff view generated by jsdifflib
1
Implement the MVE VSHLC insn, which performs a shift left of the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
entire vector with carry in bits provided from a general purpose
3
register and carry out bits written back to that register.
4
2
3
Use get_phys_addr_with_secure directly. For a-profile, this is the
4
one place where the value of is_secure may not equal arm_is_secure(env).
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20221001162318.153420-10-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
8
---
10
---
9
target/arm/helper-mve.h | 2 ++
11
target/arm/helper.c | 19 ++++++++++++++-----
10
target/arm/mve.decode | 2 ++
12
1 file changed, 14 insertions(+), 5 deletions(-)
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
13
4 files changed, 72 insertions(+)
14
13
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper.c
18
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
#ifdef CONFIG_TCG
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
23
+
22
- MMUAccessType access_type, ARMMMUIdx mmu_idx)
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
23
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
24
+ bool is_secure)
26
index XXXXXXX..XXXXXXX 100644
25
{
27
--- a/target/arm/mve.decode
26
bool ret;
28
+++ b/target/arm/mve.decode
27
uint64_t par64;
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
28
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
29
ARMMMUFaultInfo fi = {};
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
30
GetPhysAddrResult res = {};
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
31
33
+
32
- ret = get_phys_addr(env, value, access_type, mmu_idx, &res, &fi);
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
33
+ ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx,
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
34
+ is_secure, &res, &fi);
36
index XXXXXXX..XXXXXXX 100644
35
37
--- a/target/arm/mve_helper.c
36
/*
38
+++ b/target/arm/mve_helper.c
37
* ATS operations only do S1 or S1+S2 translations, so we never
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
38
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
39
switch (el) {
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
40
case 3:
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
41
mmu_idx = ARMMMUIdx_SE3;
43
+
42
+ secure = true;
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
43
break;
45
+ uint32_t shift)
44
case 2:
46
+{
45
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
47
+ uint32_t *d = vd;
46
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
48
+ uint16_t mask = mve_element_mask(env);
47
switch (el) {
49
+ unsigned e;
48
case 3:
50
+ uint32_t r;
49
mmu_idx = ARMMMUIdx_SE10_0;
51
+
50
+ secure = true;
52
+ /*
51
break;
53
+ * For each 32-bit element, we shift it left, bringing in the
52
case 2:
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
53
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
55
+ * the top become the new rdm, if the predicate mask permits.
54
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
56
+ * The final rdm value is returned to update the register.
55
case 4:
57
+ * shift == 0 here means "shift by 32 bits".
56
/* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
58
+ */
57
mmu_idx = ARMMMUIdx_E10_1;
59
+ if (shift == 0) {
58
+ secure = false;
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
59
break;
61
+ r = rdm;
60
case 6:
62
+ if (mask & 1) {
61
/* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
63
+ rdm = d[H4(e)];
62
mmu_idx = ARMMMUIdx_E10_0;
64
+ }
63
+ secure = false;
65
+ mergemask(&d[H4(e)], r, mask);
64
break;
66
+ }
65
default:
67
+ } else {
66
g_assert_not_reached();
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
67
}
69
+
68
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
69
- par64 = do_ats_write(env, value, access_type, mmu_idx);
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
70
+ par64 = do_ats_write(env, value, access_type, mmu_idx, secure);
72
+ if (mask & 1) {
71
73
+ rdm = d[H4(e)] >> (32 - shift);
72
A32_BANKED_CURRENT_REG_SET(env, par, par64);
74
+ }
73
#else
75
+ mergemask(&d[H4(e)], r, mask);
74
@@ -XXX,XX +XXX,XX @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
76
+ }
75
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
77
+ }
76
uint64_t par64;
78
+ mve_advance_vpt(env);
77
79
+ return rdm;
78
- par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
80
+}
79
+ /* There is no SecureEL2 for AArch32. */
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
80
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false);
82
index XXXXXXX..XXXXXXX 100644
81
83
--- a/target/arm/translate-mve.c
82
A32_BANKED_CURRENT_REG_SET(env, par, par64);
84
+++ b/target/arm/translate-mve.c
83
#else
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
84
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
85
break;
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
86
case 6: /* AT S1E3R, AT S1E3W */
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
87
mmu_idx = ARMMMUIdx_SE3;
89
+
88
+ secure = true;
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
89
break;
91
+{
90
default:
92
+ /*
91
g_assert_not_reached();
93
+ * Whole Vector Left Shift with Carry. The carry is taken
92
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
94
+ * from a general purpose register and written back there.
93
g_assert_not_reached();
95
+ * An imm of 0 means "shift by 32".
94
}
96
+ */
95
97
+ TCGv_ptr qd;
96
- env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
98
+ TCGv_i32 rdm;
97
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type,
99
+
98
+ mmu_idx, secure);
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
99
#else
101
+ return false;
100
/* Handled by hardware accelerator. */
102
+ }
101
g_assert_not_reached();
103
+ if (a->rdm == 13 || a->rdm == 15) {
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
105
+ return false;
106
+ }
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
108
+ return true;
109
+ }
110
+
111
+ qd = mve_qreg_ptr(a->qd);
112
+ rdm = load_reg(s, a->rdm);
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
114
+ store_reg(s, a->rdm, rdm);
115
+ tcg_temp_free_ptr(qd);
116
+ mve_update_eci(s);
117
+ return true;
118
+}
119
--
102
--
120
2.20.1
103
2.25.1
121
122
diff view generated by jsdifflib
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
1
From: Richard Henderson <richard.henderson@linaro.org>
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
reimplementing it all.
5
2
3
For a-profile aarch64, which does not bank system registers, it takes
4
quite a lot of code to switch between security states. In the process,
5
registers such as TCR_EL{1,2} must be swapped, which in itself requires
6
the flushing of softmmu tlbs. Therefore it doesn't buy us anything to
7
separate tlbs by security state.
8
9
Retain the distinction between Stage2 and Stage2_S.
10
11
This will be important as we implement FEAT_RME, and do not wish to
12
add a third set of mmu indexes for Realm state.
13
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20221001162318.153420-11-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
9
---
18
---
10
target/arm/translate.h | 3 +-
19
target/arm/cpu-param.h | 2 +-
11
target/arm/translate-a64.c | 86 ++++----------------------------------
20
target/arm/cpu.h | 72 +++++++------------
12
target/arm/translate.c | 17 +++++++-
21
target/arm/internals.h | 31 +-------
13
3 files changed, 24 insertions(+), 82 deletions(-)
22
target/arm/helper.c | 144 +++++++++++++------------------------
23
target/arm/ptw.c | 25 ++-----
24
target/arm/translate-a64.c | 8 ---
25
target/arm/translate.c | 6 +-
26
7 files changed, 85 insertions(+), 203 deletions(-)
14
27
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
28
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
16
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
30
--- a/target/arm/cpu-param.h
18
+++ b/target/arm/translate.h
31
+++ b/target/arm/cpu-param.h
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
32
@@ -XXX,XX +XXX,XX @@
20
* VMVN and VBIC (when cmode < 14 && op == 1).
33
# define TARGET_PAGE_BITS_MIN 10
34
#endif
35
36
-#define NB_MMU_MODES 15
37
+#define NB_MMU_MODES 8
38
39
#endif
40
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/cpu.h
43
+++ b/target/arm/cpu.h
44
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
45
* table over and over.
46
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
47
* Never (PAN) bit within PSTATE.
48
+ * 7. we fold together the secure and non-secure regimes for A-profile,
49
+ * because there are no banked system registers for aarch64, so the
50
+ * process of switching between secure and non-secure is
51
+ * already heavyweight.
21
*
52
*
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
53
* This gives us the following list of cases:
23
- * callers must catch this.
24
+ * callers must catch this; we return the 64-bit constant value defined
25
+ * for AArch64.
26
*
54
*
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
55
- * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
56
- * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
57
- * NS EL1 EL1&0 stage 1+2 +PAN
58
- * NS EL0 EL2&0
59
- * NS EL2 EL2&0
60
- * NS EL2 EL2&0 +PAN
61
- * NS EL2 (aka NS PL2)
62
- * S EL0 EL1&0 (aka S PL0)
63
- * S EL1 EL1&0 (not used if EL3 is 32 bit)
64
- * S EL1 EL1&0 +PAN
65
- * S EL3 (aka S PL1)
66
+ * EL0 EL1&0 stage 1+2 (aka NS PL0)
67
+ * EL1 EL1&0 stage 1+2 (aka NS PL1)
68
+ * EL1 EL1&0 stage 1+2 +PAN
69
+ * EL0 EL2&0
70
+ * EL2 EL2&0
71
+ * EL2 EL2&0 +PAN
72
+ * EL2 (aka NS PL2)
73
+ * EL3 (aka S PL1)
74
*
75
- * for a total of 11 different mmu_idx.
76
+ * for a total of 8 different mmu_idx.
77
*
78
* R profile CPUs have an MPU, but can use the same set of MMU indexes
79
- * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
80
- * NS EL2 if we ever model a Cortex-R52).
81
+ * as A profile. They only need to distinguish EL0 and EL1 (and
82
+ * EL2 if we ever model a Cortex-R52).
83
*
84
* M profile CPUs are rather different as they do not have a true MMU.
85
* They have the following different MMU indexes:
86
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
87
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
88
#define ARM_MMU_IDX_M 0x40 /* M profile */
89
90
-/* Meanings of the bits for A profile mmu idx values */
91
-#define ARM_MMU_IDX_A_NS 0x8
92
-
93
/* Meanings of the bits for M profile mmu idx values */
94
#define ARM_MMU_IDX_M_PRIV 0x1
95
#define ARM_MMU_IDX_M_NEGPRI 0x2
96
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
97
/*
98
* A-profile.
99
*/
100
- ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
101
- ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
102
- ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
103
- ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
104
- ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
105
- ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
106
- ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
107
- ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
108
-
109
- ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
110
- ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
111
- ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
112
- ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
113
- ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
114
- ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
115
- ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
116
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
117
+ ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
118
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
119
+ ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
120
+ ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
121
+ ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
122
+ ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
123
+ ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
124
125
/*
126
* These are not allocated TLBs and are used only for AT system
127
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
128
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
129
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
130
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
131
- ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
132
- ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
133
- ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
134
/*
135
* Not allocated a TLB: used only for second stage of an S12 page
136
* table walk, or for descriptor loads during first stage of an S1
137
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
138
* then various TLB flush insns which currently are no-ops or flush
139
* only stage 1 MMU indexes will need to change to flush stage 2.
140
*/
141
- ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
142
- ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
143
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
144
+ ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
145
146
/*
147
* M-profile.
148
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
149
TO_CORE_BIT(E2),
150
TO_CORE_BIT(E20_2),
151
TO_CORE_BIT(E20_2_PAN),
152
- TO_CORE_BIT(SE10_0),
153
- TO_CORE_BIT(SE20_0),
154
- TO_CORE_BIT(SE10_1),
155
- TO_CORE_BIT(SE20_2),
156
- TO_CORE_BIT(SE10_1_PAN),
157
- TO_CORE_BIT(SE20_2_PAN),
158
- TO_CORE_BIT(SE2),
159
- TO_CORE_BIT(SE3),
160
+ TO_CORE_BIT(E3),
161
162
TO_CORE_BIT(MUser),
163
TO_CORE_BIT(MPriv),
164
diff --git a/target/arm/internals.h b/target/arm/internals.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/target/arm/internals.h
167
+++ b/target/arm/internals.h
168
@@ -XXX,XX +XXX,XX @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
169
case ARMMMUIdx_Stage1_E0:
170
case ARMMMUIdx_Stage1_E1:
171
case ARMMMUIdx_Stage1_E1_PAN:
172
- case ARMMMUIdx_Stage1_SE0:
173
- case ARMMMUIdx_Stage1_SE1:
174
- case ARMMMUIdx_Stage1_SE1_PAN:
175
case ARMMMUIdx_E10_0:
176
case ARMMMUIdx_E10_1:
177
case ARMMMUIdx_E10_1_PAN:
178
case ARMMMUIdx_E20_0:
179
case ARMMMUIdx_E20_2:
180
case ARMMMUIdx_E20_2_PAN:
181
- case ARMMMUIdx_SE10_0:
182
- case ARMMMUIdx_SE10_1:
183
- case ARMMMUIdx_SE10_1_PAN:
184
- case ARMMMUIdx_SE20_0:
185
- case ARMMMUIdx_SE20_2:
186
- case ARMMMUIdx_SE20_2_PAN:
187
return true;
188
default:
189
return false;
190
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
191
{
192
switch (mmu_idx) {
193
case ARMMMUIdx_Stage1_E1_PAN:
194
- case ARMMMUIdx_Stage1_SE1_PAN:
195
case ARMMMUIdx_E10_1_PAN:
196
case ARMMMUIdx_E20_2_PAN:
197
- case ARMMMUIdx_SE10_1_PAN:
198
- case ARMMMUIdx_SE20_2_PAN:
199
return true;
200
default:
201
return false;
202
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
203
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
204
{
205
switch (mmu_idx) {
206
- case ARMMMUIdx_SE20_0:
207
- case ARMMMUIdx_SE20_2:
208
- case ARMMMUIdx_SE20_2_PAN:
209
case ARMMMUIdx_E20_0:
210
case ARMMMUIdx_E20_2:
211
case ARMMMUIdx_E20_2_PAN:
212
case ARMMMUIdx_Stage2:
213
case ARMMMUIdx_Stage2_S:
214
- case ARMMMUIdx_SE2:
215
case ARMMMUIdx_E2:
216
return 2;
217
- case ARMMMUIdx_SE3:
218
+ case ARMMMUIdx_E3:
219
return 3;
220
- case ARMMMUIdx_SE10_0:
221
- case ARMMMUIdx_Stage1_SE0:
222
- return arm_el_is_aa64(env, 3) ? 1 : 3;
223
- case ARMMMUIdx_SE10_1:
224
- case ARMMMUIdx_SE10_1_PAN:
225
+ case ARMMMUIdx_E10_0:
226
case ARMMMUIdx_Stage1_E0:
227
+ return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
228
case ARMMMUIdx_Stage1_E1:
229
case ARMMMUIdx_Stage1_E1_PAN:
230
- case ARMMMUIdx_Stage1_SE1:
231
- case ARMMMUIdx_Stage1_SE1_PAN:
232
- case ARMMMUIdx_E10_0:
233
case ARMMMUIdx_E10_1:
234
case ARMMMUIdx_E10_1_PAN:
235
case ARMMMUIdx_MPrivNegPri:
236
@@ -XXX,XX +XXX,XX @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
237
case ARMMMUIdx_Stage1_E0:
238
case ARMMMUIdx_Stage1_E1:
239
case ARMMMUIdx_Stage1_E1_PAN:
240
- case ARMMMUIdx_Stage1_SE0:
241
- case ARMMMUIdx_Stage1_SE1:
242
- case ARMMMUIdx_Stage1_SE1_PAN:
243
return true;
244
default:
245
return false;
246
diff --git a/target/arm/helper.c b/target/arm/helper.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/target/arm/helper.c
249
+++ b/target/arm/helper.c
250
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
251
/* Begin with base v8.0 state. */
252
uint64_t valid_mask = 0x3fff;
253
ARMCPU *cpu = env_archcpu(env);
254
+ uint64_t changed;
255
256
/*
257
* Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
258
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
259
260
/* Clear all-context RES0 bits. */
261
value &= valid_mask;
262
- raw_write(env, ri, value);
263
+ changed = env->cp15.scr_el3 ^ value;
264
+ env->cp15.scr_el3 = value;
265
+
266
+ /*
267
+ * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
268
+ * we must invalidate all TLBs below EL3.
269
+ */
270
+ if (changed & SCR_NS) {
271
+ tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
272
+ ARMMMUIdxBit_E20_0 |
273
+ ARMMMUIdxBit_E10_1 |
274
+ ARMMMUIdxBit_E20_2 |
275
+ ARMMMUIdxBit_E10_1_PAN |
276
+ ARMMMUIdxBit_E20_2_PAN |
277
+ ARMMMUIdxBit_E2));
278
+ }
279
}
280
281
static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
282
@@ -XXX,XX +XXX,XX @@ static int gt_phys_redir_timeridx(CPUARMState *env)
283
case ARMMMUIdx_E20_0:
284
case ARMMMUIdx_E20_2:
285
case ARMMMUIdx_E20_2_PAN:
286
- case ARMMMUIdx_SE20_0:
287
- case ARMMMUIdx_SE20_2:
288
- case ARMMMUIdx_SE20_2_PAN:
289
return GTIMER_HYP;
290
default:
291
return GTIMER_PHYS;
292
@@ -XXX,XX +XXX,XX @@ static int gt_virt_redir_timeridx(CPUARMState *env)
293
case ARMMMUIdx_E20_0:
294
case ARMMMUIdx_E20_2:
295
case ARMMMUIdx_E20_2_PAN:
296
- case ARMMMUIdx_SE20_0:
297
- case ARMMMUIdx_SE20_2:
298
- case ARMMMUIdx_SE20_2_PAN:
299
return GTIMER_HYPVIRT;
300
default:
301
return GTIMER_VIRT;
302
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
303
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
304
switch (el) {
305
case 3:
306
- mmu_idx = ARMMMUIdx_SE3;
307
+ mmu_idx = ARMMMUIdx_E3;
308
secure = true;
309
break;
310
case 2:
311
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
312
/* fall through */
313
case 1:
314
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
315
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
316
- : ARMMMUIdx_Stage1_E1_PAN);
317
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
318
} else {
319
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
320
+ mmu_idx = ARMMMUIdx_Stage1_E1;
321
}
322
break;
323
default:
324
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
325
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
326
switch (el) {
327
case 3:
328
- mmu_idx = ARMMMUIdx_SE10_0;
329
+ mmu_idx = ARMMMUIdx_E10_0;
330
secure = true;
331
break;
332
case 2:
333
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
334
mmu_idx = ARMMMUIdx_Stage1_E0;
335
break;
336
case 1:
337
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
338
+ mmu_idx = ARMMMUIdx_Stage1_E0;
339
break;
340
default:
341
g_assert_not_reached();
342
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
343
switch (ri->opc1) {
344
case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
345
if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
346
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
347
- : ARMMMUIdx_Stage1_E1_PAN);
348
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
349
} else {
350
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
351
+ mmu_idx = ARMMMUIdx_Stage1_E1;
352
}
353
break;
354
case 4: /* AT S1E2R, AT S1E2W */
355
- mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
356
+ mmu_idx = ARMMMUIdx_E2;
357
break;
358
case 6: /* AT S1E3R, AT S1E3W */
359
- mmu_idx = ARMMMUIdx_SE3;
360
+ mmu_idx = ARMMMUIdx_E3;
361
secure = true;
362
break;
363
default:
364
@@ -XXX,XX +XXX,XX @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
365
}
366
break;
367
case 2: /* AT S1E0R, AT S1E0W */
368
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
369
+ mmu_idx = ARMMMUIdx_Stage1_E0;
370
break;
371
case 4: /* AT S12E1R, AT S12E1W */
372
- mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
373
+ mmu_idx = ARMMMUIdx_E10_1;
374
break;
375
case 6: /* AT S12E0R, AT S12E0W */
376
- mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
377
+ mmu_idx = ARMMMUIdx_E10_0;
378
break;
379
default:
380
g_assert_not_reached();
381
@@ -XXX,XX +XXX,XX @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
382
uint16_t mask = ARMMMUIdxBit_E20_2 |
383
ARMMMUIdxBit_E20_2_PAN |
384
ARMMMUIdxBit_E20_0;
385
-
386
- if (arm_is_secure_below_el3(env)) {
387
- mask >>= ARM_MMU_IDX_A_NS;
388
- }
389
-
390
tlb_flush_by_mmuidx(env_cpu(env), mask);
391
}
392
raw_write(env, ri, value);
393
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
394
uint16_t mask = ARMMMUIdxBit_E10_1 |
395
ARMMMUIdxBit_E10_1_PAN |
396
ARMMMUIdxBit_E10_0;
397
-
398
- if (arm_is_secure_below_el3(env)) {
399
- mask >>= ARM_MMU_IDX_A_NS;
400
- }
401
-
402
tlb_flush_by_mmuidx(cs, mask);
403
raw_write(env, ri, value);
404
}
405
@@ -XXX,XX +XXX,XX @@ static int vae1_tlbmask(CPUARMState *env)
406
ARMMMUIdxBit_E10_1_PAN |
407
ARMMMUIdxBit_E10_0;
408
}
409
-
410
- if (arm_is_secure_below_el3(env)) {
411
- mask >>= ARM_MMU_IDX_A_NS;
412
- }
413
-
414
return mask;
415
}
416
417
@@ -XXX,XX +XXX,XX @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
418
mmu_idx = ARMMMUIdx_E10_0;
419
}
420
421
- if (arm_is_secure_below_el3(env)) {
422
- mmu_idx &= ~ARM_MMU_IDX_A_NS;
423
- }
424
-
425
return tlbbits_for_regime(env, mmu_idx, addr);
426
}
427
428
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
429
* stage 2 translations, whereas most other scopes only invalidate
430
* stage 1 translations.
431
*/
432
- if (arm_is_secure_below_el3(env)) {
433
- return ARMMMUIdxBit_SE10_1 |
434
- ARMMMUIdxBit_SE10_1_PAN |
435
- ARMMMUIdxBit_SE10_0;
436
- } else {
437
- return ARMMMUIdxBit_E10_1 |
438
- ARMMMUIdxBit_E10_1_PAN |
439
- ARMMMUIdxBit_E10_0;
440
- }
441
+ return (ARMMMUIdxBit_E10_1 |
442
+ ARMMMUIdxBit_E10_1_PAN |
443
+ ARMMMUIdxBit_E10_0);
444
}
445
446
static int e2_tlbmask(CPUARMState *env)
447
{
448
- if (arm_is_secure_below_el3(env)) {
449
- return ARMMMUIdxBit_SE20_0 |
450
- ARMMMUIdxBit_SE20_2 |
451
- ARMMMUIdxBit_SE20_2_PAN |
452
- ARMMMUIdxBit_SE2;
453
- } else {
454
- return ARMMMUIdxBit_E20_0 |
455
- ARMMMUIdxBit_E20_2 |
456
- ARMMMUIdxBit_E20_2_PAN |
457
- ARMMMUIdxBit_E2;
458
- }
459
+ return (ARMMMUIdxBit_E20_0 |
460
+ ARMMMUIdxBit_E20_2 |
461
+ ARMMMUIdxBit_E20_2_PAN |
462
+ ARMMMUIdxBit_E2);
463
}
464
465
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
466
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
467
ARMCPU *cpu = env_archcpu(env);
468
CPUState *cs = CPU(cpu);
469
470
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
471
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
472
}
473
474
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
475
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
476
{
477
CPUState *cs = env_cpu(env);
478
479
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
480
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
481
}
482
483
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
484
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
485
CPUState *cs = CPU(cpu);
486
uint64_t pageaddr = sextract64(value << 12, 0, 56);
487
488
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
489
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
490
}
491
492
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
493
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
494
{
495
CPUState *cs = env_cpu(env);
496
uint64_t pageaddr = sextract64(value << 12, 0, 56);
497
- bool secure = arm_is_secure_below_el3(env);
498
- int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
499
- int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
500
- pageaddr);
501
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
502
503
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
504
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
505
+ ARMMMUIdxBit_E2, bits);
506
}
507
508
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
509
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
510
{
511
CPUState *cs = env_cpu(env);
512
uint64_t pageaddr = sextract64(value << 12, 0, 56);
513
- int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
514
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
515
516
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
517
- ARMMMUIdxBit_SE3, bits);
518
+ ARMMMUIdxBit_E3, bits);
519
}
520
521
#ifdef TARGET_AARCH64
522
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env,
523
524
static int vae2_tlbmask(CPUARMState *env)
525
{
526
- return (arm_is_secure_below_el3(env)
527
- ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
528
+ return ARMMMUIdxBit_E2;
529
}
530
531
static void tlbi_aa64_rvae2_write(CPUARMState *env,
532
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
533
* flush-last-level-only.
534
*/
535
536
- do_rvae_write(env, value, ARMMMUIdxBit_SE3,
537
- tlb_force_broadcast(env));
538
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
539
}
540
541
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
542
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
543
* flush-last-level-only or inner/outer specific flushes.
544
*/
545
546
- do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
547
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
548
}
549
#endif
550
551
@@ -XXX,XX +XXX,XX @@ uint64_t arm_sctlr(CPUARMState *env, int el)
552
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
553
if (el == 0) {
554
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
555
- el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
556
- ? 2 : 1;
557
+ el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
558
}
559
return env->cp15.sctlr_el[el];
560
}
561
@@ -XXX,XX +XXX,XX @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
562
switch (mmu_idx) {
563
case ARMMMUIdx_E10_0:
564
case ARMMMUIdx_E20_0:
565
- case ARMMMUIdx_SE10_0:
566
- case ARMMMUIdx_SE20_0:
567
return 0;
568
case ARMMMUIdx_E10_1:
569
case ARMMMUIdx_E10_1_PAN:
570
- case ARMMMUIdx_SE10_1:
571
- case ARMMMUIdx_SE10_1_PAN:
572
return 1;
573
case ARMMMUIdx_E2:
574
case ARMMMUIdx_E20_2:
575
case ARMMMUIdx_E20_2_PAN:
576
- case ARMMMUIdx_SE2:
577
- case ARMMMUIdx_SE20_2:
578
- case ARMMMUIdx_SE20_2_PAN:
579
return 2;
580
- case ARMMMUIdx_SE3:
581
+ case ARMMMUIdx_E3:
582
return 3;
583
default:
584
g_assert_not_reached();
585
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
586
}
587
break;
588
case 3:
589
- return ARMMMUIdx_SE3;
590
+ return ARMMMUIdx_E3;
591
default:
592
g_assert_not_reached();
593
}
594
595
- if (arm_is_secure_below_el3(env)) {
596
- idx &= ~ARM_MMU_IDX_A_NS;
597
- }
598
-
599
return idx;
600
}
601
602
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
603
switch (mmu_idx) {
604
case ARMMMUIdx_E10_1:
605
case ARMMMUIdx_E10_1_PAN:
606
- case ARMMMUIdx_SE10_1:
607
- case ARMMMUIdx_SE10_1_PAN:
608
/* TODO: ARMv8.3-NV */
609
DP_TBFLAG_A64(flags, UNPRIV, 1);
610
break;
611
case ARMMMUIdx_E20_2:
612
case ARMMMUIdx_E20_2_PAN:
613
- case ARMMMUIdx_SE20_2:
614
- case ARMMMUIdx_SE20_2_PAN:
615
/*
616
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
617
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
618
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
619
index XXXXXXX..XXXXXXX 100644
620
--- a/target/arm/ptw.c
621
+++ b/target/arm/ptw.c
622
@@ -XXX,XX +XXX,XX @@ unsigned int arm_pamax(ARMCPU *cpu)
623
ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
624
{
625
switch (mmu_idx) {
626
- case ARMMMUIdx_SE10_0:
627
- return ARMMMUIdx_Stage1_SE0;
628
- case ARMMMUIdx_SE10_1:
629
- return ARMMMUIdx_Stage1_SE1;
630
- case ARMMMUIdx_SE10_1_PAN:
631
- return ARMMMUIdx_Stage1_SE1_PAN;
632
case ARMMMUIdx_E10_0:
633
return ARMMMUIdx_Stage1_E0;
634
case ARMMMUIdx_E10_1:
635
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
636
static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
637
{
638
switch (mmu_idx) {
639
- case ARMMMUIdx_SE10_0:
640
case ARMMMUIdx_E20_0:
641
- case ARMMMUIdx_SE20_0:
642
case ARMMMUIdx_Stage1_E0:
643
- case ARMMMUIdx_Stage1_SE0:
644
case ARMMMUIdx_MUser:
645
case ARMMMUIdx_MSUser:
646
case ARMMMUIdx_MUserNegPri:
647
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
648
649
s2_mmu_idx = (s2walk_secure
650
? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
651
- is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
652
+ is_el0 = mmu_idx == ARMMMUIdx_E10_0;
653
654
/*
655
* S1 is done, now do S2 translation.
656
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
657
case ARMMMUIdx_Stage1_E1:
658
case ARMMMUIdx_Stage1_E1_PAN:
659
case ARMMMUIdx_E2:
660
+ is_secure = arm_is_secure_below_el3(env);
661
+ break;
662
case ARMMMUIdx_Stage2:
663
case ARMMMUIdx_MPrivNegPri:
664
case ARMMMUIdx_MUserNegPri:
665
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
666
case ARMMMUIdx_MUser:
667
is_secure = false;
668
break;
669
- case ARMMMUIdx_SE3:
670
- case ARMMMUIdx_SE10_0:
671
- case ARMMMUIdx_SE10_1:
672
- case ARMMMUIdx_SE10_1_PAN:
673
- case ARMMMUIdx_SE20_0:
674
- case ARMMMUIdx_SE20_2:
675
- case ARMMMUIdx_SE20_2_PAN:
676
- case ARMMMUIdx_Stage1_SE0:
677
- case ARMMMUIdx_Stage1_SE1:
678
- case ARMMMUIdx_Stage1_SE1_PAN:
679
- case ARMMMUIdx_SE2:
680
+ case ARMMMUIdx_E3:
681
case ARMMMUIdx_Stage2_S:
682
case ARMMMUIdx_MSPrivNegPri:
683
case ARMMMUIdx_MSUserNegPri:
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
684
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
685
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-a64.c
686
--- a/target/arm/translate-a64.c
32
+++ b/target/arm/translate-a64.c
687
+++ b/target/arm/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
688
@@ -XXX,XX +XXX,XX @@ static int get_a64_user_mem_index(DisasContext *s)
34
{
689
case ARMMMUIdx_E20_2_PAN:
35
int rd = extract32(insn, 0, 5);
690
useridx = ARMMMUIdx_E20_0;
36
int cmode = extract32(insn, 12, 4);
691
break;
37
- int cmode_3_1 = extract32(cmode, 1, 3);
692
- case ARMMMUIdx_SE10_1:
38
- int cmode_0 = extract32(cmode, 0, 1);
693
- case ARMMMUIdx_SE10_1_PAN:
39
int o2 = extract32(insn, 11, 1);
694
- useridx = ARMMMUIdx_SE10_0;
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
695
- break;
41
bool is_neg = extract32(insn, 29, 1);
696
- case ARMMMUIdx_SE20_2:
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
697
- case ARMMMUIdx_SE20_2_PAN:
43
return;
698
- useridx = ARMMMUIdx_SE20_0;
44
}
699
- break;
45
700
default:
46
- /* See AdvSIMDExpandImm() in ARM ARM */
701
g_assert_not_reached();
47
- switch (cmode_3_1) {
702
}
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
52
- {
53
- int shift = cmode_3_1 * 8;
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
55
- break;
56
- }
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
59
- {
60
- int shift = (cmode_3_1 & 0x1) * 8;
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
62
- break;
63
- }
64
- case 6:
65
- if (cmode_0) {
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
67
- imm = (abcdefgh << 16) | 0xffff;
68
- } else {
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
70
- imm = (abcdefgh << 8) | 0xff;
71
- }
72
- imm = bitfield_replicate(imm, 32);
73
- break;
74
- case 7:
75
- if (!cmode_0 && !is_neg) {
76
- imm = bitfield_replicate(abcdefgh, 8);
77
- } else if (!cmode_0 && is_neg) {
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
115
- }
116
- break;
117
- default:
118
- g_assert_not_reached();
119
- }
120
-
121
- if (cmode_3_1 != 7 && is_neg) {
122
- imm = ~imm;
123
+ if (cmode == 15 && o2 && !is_neg) {
124
+ /* FMOV (vector, immediate) - half-precision */
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
126
+ /* now duplicate across the lanes */
127
+ imm = bitfield_replicate(imm, 16);
128
+ } else {
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
130
}
131
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
703
diff --git a/target/arm/translate.c b/target/arm/translate.c
134
index XXXXXXX..XXXXXXX 100644
704
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate.c
705
--- a/target/arm/translate.c
136
+++ b/target/arm/translate.c
706
+++ b/target/arm/translate.c
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
707
@@ -XXX,XX +XXX,XX @@ static inline int get_a32_user_mem_index(DisasContext *s)
138
case 14:
708
* otherwise, access as if at PL0.
139
if (op) {
709
*/
140
/*
710
switch (s->mmu_idx) {
141
- * This is the only case where the top and bottom 32 bits
711
+ case ARMMMUIdx_E3:
142
- * of the encoded constant differ.
712
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
143
+ * This and cmode == 15 op == 1 are the only cases where
713
case ARMMMUIdx_E10_0:
144
+ * the top and bottom 32 bits of the encoded constant differ.
714
case ARMMMUIdx_E10_1:
145
*/
715
case ARMMMUIdx_E10_1_PAN:
146
uint64_t imm64 = 0;
716
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
147
int n;
717
- case ARMMMUIdx_SE3:
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
718
- case ARMMMUIdx_SE10_0:
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
719
- case ARMMMUIdx_SE10_1:
150
break;
720
- case ARMMMUIdx_SE10_1_PAN:
151
case 15:
721
- return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
152
+ if (op) {
722
case ARMMMUIdx_MUser:
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
723
case ARMMMUIdx_MPriv:
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
724
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
155
+ if (imm & 0x80) {
156
+ imm64 |= 0x8000000000000000ULL;
157
+ }
158
+ if (imm & 0x40) {
159
+ imm64 |= 0x3fc0000000000000ULL;
160
+ } else {
161
+ imm64 |= 0x4000000000000000ULL;
162
+ }
163
+ return imm64;
164
+ }
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
167
break;
168
--
725
--
169
2.20.1
726
2.25.1
170
171
diff view generated by jsdifflib
1
From: Nolan Leake <nolan@sigbus.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is just enough to make reboot and poweroff work. Works for
3
Use a switch on mmu_idx for the a-profile indexes, instead of
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
4
three different if's vs regime_el and arm_mmu_idx_is_stage1_of_2.
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
6
do what linux does for reset.
7
5
8
The watchdog timer functionality is not yet implemented.
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
8
Message-id: 20221001162318.153420-12-richard.henderson@linaro.org
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
moved header file to include/]
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
10
---
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
11
target/arm/ptw.c | 32 +++++++++++++++++++++++++-------
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
12
1 file changed, 25 insertions(+), 7 deletions(-)
21
hw/arm/bcm2835_peripherals.c | 13 ++-
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
23
hw/misc/meson.build | 1 +
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
13
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
29
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/bcm2835_peripherals.h
16
--- a/target/arm/ptw.c
31
+++ b/include/hw/arm/bcm2835_peripherals.h
17
+++ b/target/arm/ptw.c
32
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
33
#include "hw/misc/bcm2835_mphi.h"
19
34
#include "hw/misc/bcm2835_thermal.h"
20
hcr_el2 = arm_hcr_el2_eff(env);
35
#include "hw/misc/bcm2835_cprman.h"
21
36
+#include "hw/misc/bcm2835_powermgt.h"
22
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
37
#include "hw/sd/sdhci.h"
23
+ switch (mmu_idx) {
38
#include "hw/sd/bcm2835_sdhost.h"
24
+ case ARMMMUIdx_Stage2:
39
#include "hw/gpio/bcm2835_gpio.h"
25
+ case ARMMMUIdx_Stage2_S:
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
26
/* HCR.DC means HCR.VM behaves as 1 */
41
BCM2835MphiState mphi;
27
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
42
UnimplementedDeviceState txp;
28
- }
43
UnimplementedDeviceState armtmr;
29
44
- UnimplementedDeviceState powermgt;
30
- if (hcr_el2 & HCR_TGE) {
45
+ BCM2835PowerMgtState powermgt;
31
+ case ARMMMUIdx_E10_0:
46
BCM2835CprmanState cprman;
32
+ case ARMMMUIdx_E10_1:
47
PL011State uart0;
33
+ case ARMMMUIdx_E10_1_PAN:
48
BCM2835AuxState aux;
34
/* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
35
- if (!is_secure && regime_el(env, mmu_idx) == 1) {
50
new file mode 100644
36
+ if (!is_secure && (hcr_el2 & HCR_TGE)) {
51
index XXXXXXX..XXXXXXX
37
return true;
52
--- /dev/null
38
}
53
+++ b/include/hw/misc/bcm2835_powermgt.h
39
- }
54
@@ -XXX,XX +XXX,XX @@
40
+ break;
55
+/*
41
56
+ * BCM2835 Power Management emulation
42
- if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
57
+ *
43
+ case ARMMMUIdx_Stage1_E0:
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
44
+ case ARMMMUIdx_Stage1_E1:
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
45
+ case ARMMMUIdx_Stage1_E1_PAN:
60
+ *
46
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
47
- return true;
62
+ * See the COPYING file in the top-level directory.
48
+ if (hcr_el2 & HCR_DC) {
63
+ */
49
+ return true;
50
+ }
51
+ break;
64
+
52
+
65
+#ifndef BCM2835_POWERMGT_H
53
+ case ARMMMUIdx_E20_0:
66
+#define BCM2835_POWERMGT_H
54
+ case ARMMMUIdx_E20_2:
67
+
55
+ case ARMMMUIdx_E20_2_PAN:
68
+#include "hw/sysbus.h"
56
+ case ARMMMUIdx_E2:
69
+#include "qom/object.h"
57
+ case ARMMMUIdx_E3:
70
+
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
73
+
74
+struct BCM2835PowerMgtState {
75
+ SysBusDevice busdev;
76
+ MemoryRegion iomem;
77
+
78
+ uint32_t rstc;
79
+ uint32_t rsts;
80
+ uint32_t wdog;
81
+};
82
+
83
+#endif
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/hw/arm/bcm2835_peripherals.c
87
+++ b/hw/arm/bcm2835_peripherals.c
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
89
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
91
OBJECT(&s->gpu_bus_mr));
92
+
93
+ /* Power Management */
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
95
+ TYPE_BCM2835_POWERMGT);
96
}
97
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
101
INTERRUPT_USB));
102
103
+ /* Power Management */
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
105
+ return;
106
+ }
107
+
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qemu/log.h"
135
+#include "qemu/module.h"
136
+#include "hw/misc/bcm2835_powermgt.h"
137
+#include "migration/vmstate.h"
138
+#include "sysemu/runstate.h"
139
+
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
58
+ break;
165
+
59
+
166
+ default:
60
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
61
+ g_assert_not_reached();
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
62
}
169
+ "\n", offset);
63
170
+ res = 0;
64
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
226
+ .impl.min_access_size = 4,
227
+ .impl.max_access_size = 4,
228
+};
229
+
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
284
index XXXXXXX..XXXXXXX 100644
285
--- a/hw/misc/meson.build
286
+++ b/hw/misc/meson.build
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
288
'bcm2835_rng.c',
289
'bcm2835_thermal.c',
290
'bcm2835_cprman.c',
291
+ 'bcm2835_powermgt.c',
292
))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
295
--
65
--
296
2.20.1
66
2.25.1
297
298
diff view generated by jsdifflib
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
do_urshr() is borrowed from sve_helper.c.
3
The effect of TGE does not only apply to non-secure state,
4
now that Secure EL2 exists.
4
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20221001162318.153420-13-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
8
---
10
---
9
target/arm/helper-mve.h | 10 ++++++++++
11
target/arm/ptw.c | 4 ++--
10
target/arm/mve.decode | 11 +++++++++++
12
1 file changed, 2 insertions(+), 2 deletions(-)
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 76 insertions(+)
14
13
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
16
--- a/target/arm/ptw.c
18
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
case ARMMMUIdx_E10_0:
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
case ARMMMUIdx_E10_1:
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
case ARMMMUIdx_E10_1_PAN:
23
+
22
- /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
- if (!is_secure && (hcr_el2 & HCR_TGE)) {
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+ /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+ if (hcr_el2 & HCR_TGE) {
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
return true;
28
+
27
}
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
break;
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
41
+
42
+# Narrowing shifts (which only support b and h sizes)
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
47
+
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
57
58
DO_VSHLL_ALL(vshllb, false)
59
DO_VSHLL_ALL(vshllt, true)
60
+
61
+/*
62
+ * Narrowing right shifts, taking a double sized input, shifting it
63
+ * and putting the result in either the top or bottom half of the output.
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
65
+ */
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
68
+ void *vm, uint32_t shift) \
69
+ { \
70
+ LTYPE *m = vm; \
71
+ TYPE *d = vd; \
72
+ uint16_t mask = mve_element_mask(env); \
73
+ unsigned le; \
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
77
+ } \
78
+ mve_advance_vpt(env); \
79
+ }
80
+
81
+#define DO_VSHRN_ALL(OP, FN) \
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
86
+
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
88
+{
89
+ if (likely(sh < 64)) {
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
91
+ } else if (sh == 64) {
92
+ return x >> 63;
93
+ } else {
94
+ return 0;
95
+ }
96
+}
97
+
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
105
DO_VSHLL(VSHLL_BU, vshllbu)
106
DO_VSHLL(VSHLL_TS, vshllts)
107
DO_VSHLL(VSHLL_TU, vshlltu)
108
+
109
+#define DO_2SHIFT_N(INSN, FN) \
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
111
+ { \
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
113
+ gen_helper_mve_##FN##b, \
114
+ gen_helper_mve_##FN##h, \
115
+ }; \
116
+ return do_2shift(s, a, fns[a->size], false); \
117
+ }
118
+
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
123
--
29
--
124
2.20.1
30
2.25.1
125
126
diff view generated by jsdifflib
1
Implement the MVE long shifts by register, which perform shifts on a
1
From: Richard Henderson <richard.henderson@linaro.org>
2
pair of general-purpose registers treated as a 64-bit quantity, with
3
the shift count in another general-purpose register, which might be
4
either positive or negative.
5
2
6
Like the long-shifts-by-immediate, these encodings sit in the space
3
For page walking, we may require HCR for a security state
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
4
that is not "current".
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
10
we have to move the CSEL pattern into the same decodetree group.
11
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20221001162318.153420-14-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
15
---
10
---
16
target/arm/helper-mve.h | 6 +++
11
target/arm/cpu.h | 20 +++++++++++++-------
17
target/arm/translate.h | 1 +
12
target/arm/helper.c | 11 ++++++++---
18
target/arm/t32.decode | 16 +++++--
13
2 files changed, 21 insertions(+), 10 deletions(-)
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
21
5 files changed, 182 insertions(+), 3 deletions(-)
22
14
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/helper-mve.h
17
--- a/target/arm/cpu.h
26
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/cpu.h
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_secure(CPUARMState *env)
28
20
* Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
21
* This corresponds to the pseudocode EL2Enabled()
30
22
*/
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
+static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.h
42
+++ b/target/arm/translate.h
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
48
49
/**
50
* arm_tbflags_from_tb:
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/t32.decode
54
+++ b/target/arm/t32.decode
55
@@ -XXX,XX +XXX,XX @@
56
&mcrr !extern cp opc1 crm rt rt2
57
58
&mve_shl_ri rdalo rdahi shim
59
+&mve_shl_rr rdalo rdahi rm
60
61
# rdahi: bits [3:1] from insn, bit 0 is 1
62
# rdalo: bits [3:1] from insn, bit 0 is 0
63
@@ -XXX,XX +XXX,XX @@
64
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
69
70
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
76
+
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
83
]
84
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
87
+
88
+ # v8.1M CSEL and friends
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
90
}
91
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
94
}
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
96
97
-# v8.1M CSEL and friends
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
99
-
100
# Data-processing (register-shifted register)
101
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/mve_helper.c
106
+++ b/target/arm/mve_helper.c
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
108
return rdm;
109
}
110
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
112
+{
24
+{
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
25
+ return arm_feature(env, ARM_FEATURE_EL2)
26
+ && (!secure || (env->cp15.scr_el3 & SCR_EEL2));
114
+}
27
+}
115
+
28
+
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
29
static inline bool arm_is_el2_enabled(CPUARMState *env)
30
{
31
- if (arm_feature(env, ARM_FEATURE_EL2)) {
32
- if (arm_is_secure_below_el3(env)) {
33
- return (env->cp15.scr_el3 & SCR_EEL2) != 0;
34
- }
35
- return true;
36
- }
37
- return false;
38
+ return arm_is_el2_enabled_secstate(env, arm_is_secure_below_el3(env));
39
}
40
41
#else
42
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_secure(CPUARMState *env)
43
return false;
44
}
45
46
+static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
117
+{
47
+{
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
48
+ return false;
119
+}
49
+}
120
+
50
+
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
51
static inline bool arm_is_el2_enabled(CPUARMState *env)
122
{
52
{
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
53
return false;
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
54
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
55
* "for all purposes other than a direct read or write access of HCR_EL2."
56
* Not included here is HCR_RW.
57
*/
58
+uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure);
59
uint64_t arm_hcr_el2_eff(CPUARMState *env);
60
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
61
62
diff --git a/target/arm/helper.c b/target/arm/helper.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/helper.c
65
+++ b/target/arm/helper.c
66
@@ -XXX,XX +XXX,XX @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
67
}
68
69
/*
70
- * Return the effective value of HCR_EL2.
71
+ * Return the effective value of HCR_EL2, at the given security state.
72
* Bits that are not included here:
73
* RW (read from SCR_EL3.RW as needed)
74
*/
75
-uint64_t arm_hcr_el2_eff(CPUARMState *env)
76
+uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
125
{
77
{
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
78
uint64_t ret = env->cp15.hcr_el2;
79
80
- if (!arm_is_el2_enabled(env)) {
81
+ if (!arm_is_el2_enabled_secstate(env, secure)) {
82
/*
83
* "This register has no effect if EL2 is not enabled in the
84
* current Security state". This is ARMv8.4-SecEL2 speak for
85
@@ -XXX,XX +XXX,XX @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
86
return ret;
127
}
87
}
128
+
88
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
89
+uint64_t arm_hcr_el2_eff(CPUARMState *env)
130
+{
90
+{
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
91
+ return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
132
+}
133
+
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
135
+{
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
137
+}
138
+
139
+/* Operate on 64-bit values, but saturate at 48 bits */
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
141
+ bool round, uint32_t *sat)
142
+{
143
+ if (shift <= -48) {
144
+ /* Rounding the sign bit always produces 0. */
145
+ if (round) {
146
+ return 0;
147
+ }
148
+ return src >> 63;
149
+ } else if (shift < 0) {
150
+ if (round) {
151
+ src >>= -shift - 1;
152
+ return (src >> 1) + (src & 1);
153
+ }
154
+ return src >> -shift;
155
+ } else if (shift < 48) {
156
+ int64_t val = src << shift;
157
+ int64_t extval = sextract64(val, 0, 48);
158
+ if (!sat || val == extval) {
159
+ return extval;
160
+ }
161
+ } else if (!sat || src == 0) {
162
+ return 0;
163
+ }
164
+
165
+ *sat = 1;
166
+ return (1ULL << 47) - (src >= 0);
167
+}
168
+
169
+/* Operate on 64-bit values, but saturate at 48 bits */
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
171
+ bool round, uint32_t *sat)
172
+{
173
+ uint64_t val, extval;
174
+
175
+ if (shift <= -(48 + round)) {
176
+ return 0;
177
+ } else if (shift < 0) {
178
+ if (round) {
179
+ val = src >> (-shift - 1);
180
+ val = (val >> 1) + (val & 1);
181
+ } else {
182
+ val = src >> -shift;
183
+ }
184
+ extval = extract64(val, 0, 48);
185
+ if (!sat || val == extval) {
186
+ return extval;
187
+ }
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
195
+ return 0;
196
+ }
197
+
198
+ *sat = 1;
199
+ return MAKE_64BIT_MASK(0, 48);
200
+}
201
+
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
203
+{
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
205
+}
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
238
+ return true;
239
+ }
240
+
241
+ rda = tcg_temp_new_i64();
242
+ rdalo = load_reg(s, a->rdalo);
243
+ rdahi = load_reg(s, a->rdahi);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
256
+}
257
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
259
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
261
+}
262
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
264
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
266
+}
267
+
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
269
+{
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
271
+}
272
+
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
274
+{
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
276
+}
277
+
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
279
+{
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
92
+}
287
+
93
+
288
/*
94
/*
289
* Multiply and multiply accumulate
95
* Corresponds to ARM pseudocode function ELIsInHost().
290
*/
96
*/
291
--
97
--
292
2.20.1
98
2.25.1
293
294
diff view generated by jsdifflib
1
Implement the MVE VSRI and VSLI insns, which perform a
1
From: Richard Henderson <richard.henderson@linaro.org>
2
shift-and-insert operation.
3
2
3
Rename the argument to is_secure_ptr, and introduce a
4
local variable is_secure with the value. We only write
5
back to the pointer toward the end of the function.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20221001162318.153420-15-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
7
---
11
---
8
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/ptw.c | 22 ++++++++++++----------
9
target/arm/mve.decode | 9 ++++++++
13
1 file changed, 12 insertions(+), 10 deletions(-)
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 3 +++
12
4 files changed, 62 insertions(+)
13
14
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
--- a/target/arm/ptw.c
17
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
/* Translate a S1 pagetable walk through S2 if needed. */
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
22
+
23
- hwaddr addr, bool *is_secure,
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+ hwaddr addr, bool *is_secure_ptr,
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
ARMMMUFaultInfo *fi)
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
{
26
+
27
- ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+ bool is_secure = *is_secure_ptr;
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+ ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
31
index XXXXXXX..XXXXXXX 100644
32
- !regime_translation_disabled(env, s2_mmu_idx, *is_secure)) {
32
--- a/target/arm/mve.decode
33
+ !regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
33
+++ b/target/arm/mve.decode
34
GetPhysAddrResult s2 = {};
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
35
int ret;
35
36
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
37
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
38
- *is_secure, false, &s2, fi);
38
+
39
+ is_secure, false, &s2, fi);
39
+# Shift-and-insert
40
if (ret) {
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
41
assert(fi->type != ARMFault_None);
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
fi->s2addr = addr;
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
fi->stage2 = true;
43
+
44
fi->s1ptw = true;
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
- fi->s1ns = !*is_secure;
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+ fi->s1ns = !is_secure;
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
47
return ~0;
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
}
48
index XXXXXXX..XXXXXXX 100644
49
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
49
--- a/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
50
+++ b/target/arm/mve_helper.c
51
fi->s2addr = addr;
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
52
fi->stage2 = true;
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
53
fi->s1ptw = true;
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
54
- fi->s1ns = !*is_secure;
54
55
+ fi->s1ns = !is_secure;
55
+/* Shift-and-insert; we always work with 64 bits at a time */
56
return ~0;
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
}
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
58
+ void *vm, uint32_t shift) \
59
if (arm_is_secure_below_el3(env)) {
59
+ { \
60
/* Check if page table walk is to secure or non-secure PA space. */
60
+ uint64_t *d = vd, *m = vm; \
61
- if (*is_secure) {
61
+ uint16_t mask; \
62
- *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
62
+ uint64_t shiftmask; \
63
+ if (is_secure) {
63
+ unsigned e; \
64
+ is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
64
+ if (shift == 0 || shift == ESIZE * 8) { \
65
} else {
65
+ /* \
66
- *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
67
+ is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
67
+ * The generic logic would give the right answer for 0 but \
68
}
68
+ * fails for <dt>. \
69
+ *is_secure_ptr = is_secure;
69
+ */ \
70
} else {
70
+ goto done; \
71
- assert(!*is_secure);
71
+ } \
72
+ assert(!is_secure);
72
+ assert(shift < ESIZE * 8); \
73
}
73
+ mask = mve_element_mask(env); \
74
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
addr = s2.phys;
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
80
+ } \
81
+done: \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
89
+
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
96
+
97
/*
98
* Long shifts taking half-sized inputs from top or bottom of the input
99
* vector and producing a double-width result. ESIZE, TYPE are for
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
107
108
+DO_2SHIFT(VSRI, vsri, false)
109
+DO_2SHIFT(VSLI, vsli, false)
110
+
111
#define DO_VSHLL(INSN, FN) \
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
114
--
76
--
115
2.20.1
77
2.25.1
116
117
diff view generated by jsdifflib
1
Use dup_const() instead of bitfield_replicate() in
1
From: Richard Henderson <richard.henderson@linaro.org>
2
disas_simd_mod_imm().
3
2
4
(We can't replace the other use of bitfield_replicate() in this file,
3
This value is unused.
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
7
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20221001162318.153420-16-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
11
---
9
---
12
target/arm/translate-a64.c | 2 +-
10
target/arm/ptw.c | 5 ++---
13
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 2 insertions(+), 3 deletions(-)
14
12
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
15
--- a/target/arm/ptw.c
18
+++ b/target/arm/translate-a64.c
16
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
20
/* FMOV (vector, immediate) - half-precision */
18
* s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
21
imm = vfp_expand_imm(MO_16, abcdefgh);
19
* combined attributes in MAIR_EL1 format.
22
/* now duplicate across the lanes */
20
*/
23
- imm = bitfield_replicate(imm, 16);
21
-static uint8_t combined_attrs_fwb(CPUARMState *env,
24
+ imm = dup_const(MO_16, imm);
22
- ARMCacheAttrs s1, ARMCacheAttrs s2)
23
+static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
24
{
25
switch (s2.attrs) {
26
case 7:
27
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
28
29
/* Combine memory type and cacheability attributes */
30
if (arm_hcr_el2_eff(env) & HCR_FWB) {
31
- ret.attrs = combined_attrs_fwb(env, s1, s2);
32
+ ret.attrs = combined_attrs_fwb(s1, s2);
25
} else {
33
} else {
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
34
ret.attrs = combined_attrs_nofwb(env, s1, s2);
27
}
35
}
28
--
36
--
29
2.20.1
37
2.25.1
30
31
diff view generated by jsdifflib
1
Implement the MVE VHLL (vector shift left long) insn. This has two
1
From: Richard Henderson <richard.henderson@linaro.org>
2
encodings: the T1 encoding is the usual shift-by-immediate format,
3
and the T2 encoding is a special case where the shift count is always
4
equal to the element size.
5
2
3
These subroutines did not need ENV for anything except
4
retrieving the effective value of HCR anyway.
5
6
We have computed the effective value of HCR in the callers,
7
and this will be especially important for interpreting HCR
8
in a non-current security state.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20221001162318.153420-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
9
---
14
---
10
target/arm/helper-mve.h | 9 +++++++
15
target/arm/ptw.c | 30 +++++++++++++++++-------------
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
16
1 file changed, 17 insertions(+), 13 deletions(-)
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
13
target/arm/translate-mve.c | 15 +++++++++++
14
4 files changed, 105 insertions(+), 4 deletions(-)
15
17
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
20
--- a/target/arm/ptw.c
19
+++ b/target/arm/helper-mve.h
21
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
}
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
26
-static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
27
+static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
28
{
29
/*
30
* For an S1 page table walk, the stage 1 attributes are always
31
@@ -XXX,XX +XXX,XX @@ static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
32
* when cacheattrs.attrs bit [2] is 0.
33
*/
34
assert(cacheattrs.is_s2_format);
35
- if (arm_hcr_el2_eff(env) & HCR_FWB) {
36
+ if (hcr & HCR_FWB) {
37
return (cacheattrs.attrs & 0x4) == 0;
38
} else {
39
return (cacheattrs.attrs & 0xc) == 0;
40
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
41
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
42
!regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
43
GetPhysAddrResult s2 = {};
44
+ uint64_t hcr;
45
int ret;
46
47
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
48
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
49
fi->s1ns = !is_secure;
50
return ~0;
51
}
52
- if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
53
- ptw_attrs_are_device(env, s2.cacheattrs)) {
24
+
54
+
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
55
+ hcr = arm_hcr_el2_eff(env);
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
56
+ if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
57
/*
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
58
* PTW set and S1 walk touched S2 Device memory:
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
59
* generate Permission fault.
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
60
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
61
* ref: shared/translation/attrs/S2AttrDecode()
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
62
* .../S2ConvertAttrsHints()
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
63
*/
34
index XXXXXXX..XXXXXXX 100644
64
-static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
35
--- a/target/arm/mve.decode
65
+static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
36
+++ b/target/arm/mve.decode
66
{
37
@@ -XXX,XX +XXX,XX @@
67
uint8_t hiattr = extract32(s2attrs, 2, 2);
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
68
uint8_t loattr = extract32(s2attrs, 0, 2);
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
69
uint8_t hihint = 0, lohint = 0;
40
70
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
71
if (hiattr != 0) { /* normal memory */
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
72
- if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
43
+# VSHLL encoding T2 where shift == esize
73
+ if (hcr & HCR_CD) { /* cache disabled */
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
74
hiattr = loattr = 1; /* non-cacheable */
45
+ qd=%qd qm=%qm size=0 shift=8
75
} else {
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
76
if (hiattr != 1) { /* Write-through or write-back */
47
+ qd=%qd qm=%qm size=1 shift=16
77
@@ -XXX,XX +XXX,XX @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
48
+
78
* s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
79
* combined attributes in MAIR_EL1 format.
50
%rshift_i5 16:5 !function=rsub_32
80
*/
51
%rshift_i4 16:4 !function=rsub_16
81
-static uint8_t combined_attrs_nofwb(CPUARMState *env,
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
82
+static uint8_t combined_attrs_nofwb(uint64_t hcr,
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
83
ARMCacheAttrs s1, ARMCacheAttrs s2)
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
84
{
55
85
uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
86
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
87
- s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
88
+ s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
59
+# overlaps what would be size=0b11 VMULH/VRMULH
89
60
+{
90
s1lo = extract32(s1.attrs, 0, 4);
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
91
s2lo = extract32(s2_mair_attrs, 0, 4);
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
92
@@ -XXX,XX +XXX,XX @@ static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
63
93
* @s1: Attributes from stage 1 walk
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
94
* @s2: Attributes from stage 2 walk
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
95
*/
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
96
-static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
67
+}
97
+static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
68
+
98
ARMCacheAttrs s1, ARMCacheAttrs s2)
69
+{
99
{
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
100
ARMCacheAttrs ret;
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
101
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
72
+
102
}
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
103
74
+}
104
/* Combine memory type and cacheability attributes */
75
+
105
- if (arm_hcr_el2_eff(env) & HCR_FWB) {
76
+{
106
+ if (hcr & HCR_FWB) {
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
107
ret.attrs = combined_attrs_fwb(s1, s2);
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
108
} else {
79
+
109
- ret.attrs = combined_attrs_nofwb(env, s1, s2);
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
110
+ ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
81
+}
111
}
82
+
112
83
+{
113
/*
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
114
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
115
ARMCacheAttrs cacheattrs1;
86
+
116
ARMMMUIdx s2_mmu_idx;
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
117
bool is_el0;
88
+}
118
+ uint64_t hcr;
89
119
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
120
ret = get_phys_addr_with_secure(env, address, access_type,
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
121
s1_mmu_idx, is_secure, result, fi);
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
122
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
123
}
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
124
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
125
/* Combine the S1 and S2 cache attributes. */
96
+
126
- if (arm_hcr_el2_eff(env) & HCR_DC) {
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
127
+ hcr = arm_hcr_el2_eff(env);
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
128
+ if (hcr & HCR_DC) {
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
129
/*
100
+
130
* HCR.DC forces the first stage attributes to
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
131
* Normal Non-Shareable,
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
132
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
103
+
133
}
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
134
cacheattrs1.shareability = 0;
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
135
}
106
+
136
- result->cacheattrs = combine_cacheattrs(env, cacheattrs1,
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
137
+ result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
138
result->cacheattrs);
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
139
110
index XXXXXXX..XXXXXXX 100644
140
/*
111
--- a/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
118
+/*
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
121
+ * the input, and LESIZE, LTYPE for the output.
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
123
+ * because the long shift is strictly left-only.
124
+ */
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
127
+ void *vm, uint32_t shift) \
128
+ { \
129
+ LTYPE *d = vd; \
130
+ TYPE *m = vm; \
131
+ uint16_t mask = mve_element_mask(env); \
132
+ unsigned le; \
133
+ assert(shift <= 16); \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
137
+ } \
138
+ mve_advance_vpt(env); \
139
+ }
140
+
141
+#define DO_VSHLL_ALL(OP, TOP) \
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
146
+
147
+DO_VSHLL_ALL(vshllb, false)
148
+DO_VSHLL_ALL(vshllt, true)
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/arm/translate-mve.c
152
+++ b/target/arm/translate-mve.c
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
+
158
+#define DO_VSHLL(INSN, FN) \
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
160
+ { \
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
162
+ gen_helper_mve_##FN##b, \
163
+ gen_helper_mve_##FN##h, \
164
+ }; \
165
+ return do_2shift(s, a, fns[a->size], false); \
166
+ }
167
+
168
+DO_VSHLL(VSHLL_BS, vshllbs)
169
+DO_VSHLL(VSHLL_BU, vshllbu)
170
+DO_VSHLL(VSHLL_TS, vshllts)
171
+DO_VSHLL(VSHLL_TU, vshlltu)
172
--
141
--
173
2.20.1
142
2.25.1
174
175
diff view generated by jsdifflib
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
1
From: Richard Henderson <richard.henderson@linaro.org>
2
insns had some bugs:
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
not 32x32->64
5
* we were incorrectly maintaining the accumulator in its full
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
registers after each beat
9
2
10
In particular, fixing the second of these allows us to recast
3
Use arm_hcr_el2_eff_secstate instead of arm_hcr_el2_eff, so
11
the implementation to avoid 128-bit arithmetic entirely.
4
that we use is_secure instead of the current security state.
5
These AT* operations have been broken since arm_hcr_el2_eff
6
gained a check for "el2 enabled" for Secure EL2.
12
7
13
Since the element size here is always 4, we can also drop the
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
parameterization of ESIZE to make the code a little more readable.
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20221001162318.153420-18-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/ptw.c | 8 ++++----
14
1 file changed, 4 insertions(+), 4 deletions(-)
15
15
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
16
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
20
---
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
22
1 file changed, 21 insertions(+), 17 deletions(-)
23
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve_helper.c
18
--- a/target/arm/ptw.c
27
+++ b/target/arm/mve_helper.c
19
+++ b/target/arm/ptw.c
28
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
29
*/
21
}
30
31
#include "qemu/osdep.h"
32
-#include "qemu/int128.h"
33
#include "cpu.h"
34
#include "internals.h"
35
#include "vec_internal.h"
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
38
39
/*
40
- * Rounding multiply add long dual accumulate high: we must keep
41
- * a 72-bit internal accumulator value and return the top 64 bits.
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
43
+ * this is implemented with a 72-bit internal accumulator value of which
44
+ * the top 64 bits are returned. We optimize this to avoid having to
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
46
+ * is squashed back into 64-bits after each beat.
47
*/
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
51
void *vm, uint64_t a) \
52
{ \
53
uint16_t mask = mve_element_mask(env); \
54
unsigned e; \
55
TYPE *n = vn, *m = vm; \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
59
if (mask & 1) { \
60
+ LTYPE mul; \
61
if (e & 1) { \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
63
- m[H##ESIZE(e)])); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
65
+ if (SUB) { \
66
+ mul = -mul; \
67
+ } \
68
} else { \
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
70
- m[H##ESIZE(e)])); \
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
72
} \
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
75
+ a += mul; \
76
} \
77
} \
78
mve_advance_vpt(env); \
79
- return int128_getlo(int128_rshift(acc, 8)); \
80
+ return a; \
81
}
22
}
82
23
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
24
- hcr_el2 = arm_hcr_el2_eff(env);
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
25
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
26
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
27
switch (mmu_idx) {
87
28
case ARMMMUIdx_Stage2:
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
29
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
30
return ~0;
90
31
}
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
32
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
33
- hcr = arm_hcr_el2_eff(env);
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
34
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
35
if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
95
36
/*
96
/* Vector add across vector */
37
* PTW set and S1 walk touched S2 Device memory:
97
#define DO_VADDV(OP, ESIZE, TYPE) \
38
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
39
}
40
41
/* Combine the S1 and S2 cache attributes. */
42
- hcr = arm_hcr_el2_eff(env);
43
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
44
if (hcr & HCR_DC) {
45
/*
46
* HCR.DC forces the first stage attributes to
47
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
48
result->page_size = TARGET_PAGE_SIZE;
49
50
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
51
- hcr = arm_hcr_el2_eff(env);
52
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
53
result->cacheattrs.shareability = 0;
54
result->cacheattrs.is_s2_format = false;
55
if (hcr & HCR_DC) {
98
--
56
--
99
2.20.1
57
2.25.1
100
101
diff view generated by jsdifflib
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and VQSHLU.
3
2
4
The size-and-immediate encoding here is the same as Neon, and we
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
handle it the same way neon-dp.decode does.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20221001162318.153420-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/ptw.c | 138 +++++++++++++++++++++++++----------------------
9
1 file changed, 74 insertions(+), 64 deletions(-)
6
10
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 16 +++++++++++
12
target/arm/mve.decode | 23 +++++++++++++++
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
15
4 files changed, 147 insertions(+)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
13
--- a/target/arm/ptw.c
20
+++ b/target/arm/helper-mve.h
14
+++ b/target/arm/ptw.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
15
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
16
return ret;
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
17
}
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
18
19
+/*
20
+ * MMU disabled. S1 addresses within aa64 translation regimes are
21
+ * still checked for bounds -- see AArch64.S1DisabledOutput().
22
+ */
23
+static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
24
+ MMUAccessType access_type,
25
+ ARMMMUIdx mmu_idx, bool is_secure,
26
+ GetPhysAddrResult *result,
27
+ ARMMMUFaultInfo *fi)
28
+{
29
+ uint64_t hcr;
30
+ uint8_t memattr;
25
+
31
+
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+ int r_el = regime_el(env, mmu_idx);
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+ if (arm_el_is_aa64(env, r_el)) {
35
+ int pamax = arm_pamax(env_archcpu(env));
36
+ uint64_t tcr = env->cp15.tcr_el[r_el];
37
+ int addrtop, tbi;
29
+
38
+
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+ if (access_type == MMU_INST_FETCH) {
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
42
+ }
43
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
44
+ addrtop = (tbi ? 55 : 63);
33
+
45
+
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+ fi->type = ARMFault_AddressSize;
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+ fi->level = 0;
49
+ fi->stage2 = false;
50
+ return 1;
51
+ }
37
+
52
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+ /*
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
54
+ * When TBI is disabled, we've just validated that all of the
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
55
+ * bits above PAMax are zero, so logically we only need to
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
56
+ * clear the top byte for TBI. But it's clearer to follow
42
index XXXXXXX..XXXXXXX 100644
57
+ * the pseudocode set of addrdesc.paddress.
43
--- a/target/arm/mve.decode
58
+ */
44
+++ b/target/arm/mve.decode
59
+ address = extract64(address, 0, 52);
45
@@ -XXX,XX +XXX,XX @@
60
+ }
46
&2op qd qm qn size
47
&2scalar qd qn rm size
48
&1imm qd imm cmode op
49
+&2shift qd qm shift size
50
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
52
# Note that both Rn and Qd are 3 bits only (no D bit)
53
@@ -XXX,XX +XXX,XX @@
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
60
+
61
# Vector loads and stores
62
63
# Widening loads and narrowing stores:
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
65
# So we have a single decode line and check the cmode/op in the
66
# trans function.
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
68
+
69
+# Shifts by immediate
70
+
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
74
+
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
78
+
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
82
+
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
92
#define DO_UQRSHL_OP(N, M, satp) \
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
94
+#define DO_SUQSHL_OP(N, M, satp) \
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
96
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
100
DO_VADDV(vaddvub, 1, uint8_t)
101
DO_VADDV(vaddvuh, 2, uint16_t)
102
DO_VADDV(vaddvuw, 4, uint32_t)
103
+
104
+/* Shifts by immediate */
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
107
+ void *vm, uint32_t shift) \
108
+ { \
109
+ TYPE *d = vd, *m = vm; \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
113
+ mergemask(&d[H##ESIZE(e)], \
114
+ FN(m[H##ESIZE(e)], shift), mask); \
115
+ } \
116
+ mve_advance_vpt(env); \
117
+ }
61
+ }
118
+
62
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
63
+ result->phys = address;
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
64
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
121
+ void *vm, uint32_t shift) \
65
+ result->page_size = TARGET_PAGE_SIZE;
122
+ { \
66
+
123
+ TYPE *d = vd, *m = vm; \
67
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
124
+ uint16_t mask = mve_element_mask(env); \
68
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
125
+ unsigned e; \
69
+ result->cacheattrs.shareability = 0;
126
+ bool qc = false; \
70
+ result->cacheattrs.is_s2_format = false;
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
71
+ if (hcr & HCR_DC) {
128
+ bool sat = false; \
72
+ if (hcr & HCR_DCT) {
129
+ mergemask(&d[H##ESIZE(e)], \
73
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
74
+ } else {
131
+ qc |= sat & mask & 1; \
75
+ memattr = 0xff; /* Normal, WB, RWA */
132
+ } \
76
+ }
133
+ if (qc) { \
77
+ } else if (access_type == MMU_INST_FETCH) {
134
+ env->vfp.qc[0] = qc; \
78
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
135
+ } \
79
+ memattr = 0xee; /* Normal, WT, RA, NT */
136
+ mve_advance_vpt(env); \
80
+ } else {
81
+ memattr = 0x44; /* Normal, NC, No */
82
+ }
83
+ result->cacheattrs.shareability = 2; /* outer sharable */
84
+ } else {
85
+ memattr = 0x00; /* Device, nGnRnE */
137
+ }
86
+ }
138
+
87
+ result->cacheattrs.attrs = memattr;
139
+/* provide unsigned 2-op shift helpers for all sizes */
88
+ return 0;
140
+#define DO_2SHIFT_U(OP, FN) \
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
144
+
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
153
+
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
171
}
172
return do_1imm(s, a, fn);
173
}
174
+
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
176
+ bool negateshift)
177
+{
178
+ TCGv_ptr qd, qm;
179
+ int shift = a->shift;
180
+
181
+ if (!dc_isar_feature(aa32_mve, s) ||
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
183
+ !fn) {
184
+ return false;
185
+ }
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
187
+ return true;
188
+ }
189
+
190
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
192
+ * which permits a negative shift count to indicate a right-shift,
193
+ * we must negate the shift count.
194
+ */
195
+ if (negateshift) {
196
+ shift = -shift;
197
+ }
198
+
199
+ qd = mve_qreg_ptr(a->qd);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
202
+ tcg_temp_free_ptr(qd);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
206
+}
89
+}
207
+
90
+
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
91
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
92
MMUAccessType access_type, ARMMMUIdx mmu_idx,
210
+ { \
93
bool is_secure, GetPhysAddrResult *result,
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
94
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
212
+ gen_helper_mve_##FN##b, \
95
/* Definitely a real MMU, not an MPU */
213
+ gen_helper_mve_##FN##h, \
96
214
+ gen_helper_mve_##FN##w, \
97
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
215
+ NULL, \
98
- uint64_t hcr;
216
+ }; \
99
- uint8_t memattr;
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
100
-
218
+ }
101
- /*
219
+
102
- * MMU disabled. S1 addresses within aa64 translation regimes are
220
+DO_2SHIFT(VSHLI, vshli_u, false)
103
- * still checked for bounds -- see AArch64.TranslateAddressS1Off.
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
104
- */
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
105
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
106
- int r_el = regime_el(env, mmu_idx);
107
- if (arm_el_is_aa64(env, r_el)) {
108
- int pamax = arm_pamax(env_archcpu(env));
109
- uint64_t tcr = env->cp15.tcr_el[r_el];
110
- int addrtop, tbi;
111
-
112
- tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
113
- if (access_type == MMU_INST_FETCH) {
114
- tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
115
- }
116
- tbi = (tbi >> extract64(address, 55, 1)) & 1;
117
- addrtop = (tbi ? 55 : 63);
118
-
119
- if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
120
- fi->type = ARMFault_AddressSize;
121
- fi->level = 0;
122
- fi->stage2 = false;
123
- return 1;
124
- }
125
-
126
- /*
127
- * When TBI is disabled, we've just validated that all of the
128
- * bits above PAMax are zero, so logically we only need to
129
- * clear the top byte for TBI. But it's clearer to follow
130
- * the pseudocode set of addrdesc.paddress.
131
- */
132
- address = extract64(address, 0, 52);
133
- }
134
- }
135
- result->phys = address;
136
- result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
137
- result->page_size = TARGET_PAGE_SIZE;
138
-
139
- /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
140
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
141
- result->cacheattrs.shareability = 0;
142
- result->cacheattrs.is_s2_format = false;
143
- if (hcr & HCR_DC) {
144
- if (hcr & HCR_DCT) {
145
- memattr = 0xf0; /* Tagged, Normal, WB, RWA */
146
- } else {
147
- memattr = 0xff; /* Normal, WB, RWA */
148
- }
149
- } else if (access_type == MMU_INST_FETCH) {
150
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
151
- memattr = 0xee; /* Normal, WT, RA, NT */
152
- } else {
153
- memattr = 0x44; /* Normal, NC, No */
154
- }
155
- result->cacheattrs.shareability = 2; /* outer sharable */
156
- } else {
157
- memattr = 0x00; /* Device, nGnRnE */
158
- }
159
- result->cacheattrs.attrs = memattr;
160
- return 0;
161
+ return get_phys_addr_disabled(env, address, access_type, mmu_idx,
162
+ is_secure, result, fi);
163
}
164
-
165
if (regime_using_lpae_format(env, mmu_idx)) {
166
return get_phys_addr_lpae(env, address, access_type, mmu_idx,
167
is_secure, false, result, fi);
224
--
168
--
225
2.20.1
169
2.25.1
226
227
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add a test booting and quickly shutdown a raspi2 machine,
3
Do not apply memattr or shareability for Stage2 translations.
4
to test the power management model:
4
Make sure to apply HCR_{DC,DCT} only to Regime_EL10, per the
5
pseudocode in AArch64.S1DisabledOutput.
5
6
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
9
Message-id: 20221001162318.153420-20-richard.henderson@linaro.org
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
10
console: [ 0.000000] CPU: div instructions available: patching division code
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
44
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
49
---
11
---
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
12
target/arm/ptw.c | 48 +++++++++++++++++++++++++-----------------------
51
1 file changed, 43 insertions(+)
13
1 file changed, 25 insertions(+), 23 deletions(-)
52
14
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
54
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
55
--- a/tests/acceptance/boot_linux_console.py
17
--- a/target/arm/ptw.c
56
+++ b/tests/acceptance/boot_linux_console.py
18
+++ b/target/arm/ptw.c
57
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
58
from avocado import skip
20
GetPhysAddrResult *result,
59
from avocado import skipUnless
21
ARMMMUFaultInfo *fi)
60
from avocado_qemu import Test
22
{
61
+from avocado_qemu import exec_command
23
- uint64_t hcr;
62
from avocado_qemu import exec_command_and_wait_for_pattern
24
- uint8_t memattr;
63
from avocado_qemu import interrupt_interactive_console_until_pattern
25
+ uint8_t memattr = 0x00; /* Device nGnRnE */
64
from avocado_qemu import wait_for_console_pattern
26
+ uint8_t shareability = 0; /* non-sharable */
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
27
66
"""
28
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
67
self.do_test_arm_raspi2(0)
29
int r_el = regime_el(env, mmu_idx);
68
69
+ def test_arm_raspi2_initrd(self):
70
+ """
71
+ :avocado: tags=arch:arm
72
+ :avocado: tags=machine:raspi2
73
+ """
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
75
+ 'pool/main/r/raspberrypi-firmware/'
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
81
+
30
+
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
31
if (arm_el_is_aa64(env, r_el)) {
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
32
int pamax = arm_pamax(env_archcpu(env));
84
+ 'arm/rootfs-armv7a.cpio.gz')
33
uint64_t tcr = env->cp15.tcr_el[r_el];
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
34
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
35
*/
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
36
address = extract64(address, 0, 52);
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
37
}
89
+
38
+
90
+ self.vm.set_console()
39
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
40
+ if (r_el == 1) {
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
41
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
93
+ 'panic=-1 noreboot ' +
42
+ if (hcr & HCR_DC) {
94
+ 'dwc_otg.fiq_fsm_enable=0')
43
+ if (hcr & HCR_DCT) {
95
+ self.vm.add_args('-kernel', kernel_path,
44
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
96
+ '-dtb', dtb_path,
45
+ } else {
97
+ '-initrd', initrd_path,
46
+ memattr = 0xff; /* Normal, WB, RWA */
98
+ '-append', kernel_command_line,
47
+ }
99
+ '-no-reboot')
48
+ }
100
+ self.vm.launch()
49
+ }
101
+ self.wait_for_console_pattern('Boot successful.')
50
+ if (memattr == 0 && access_type == MMU_INST_FETCH) {
102
+
51
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
52
+ memattr = 0xee; /* Normal, WT, RA, NT */
104
+ 'BCM2835')
53
+ } else {
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
54
+ memattr = 0x44; /* Normal, NC, No */
106
+ '/soc/cprman@7e101000')
55
+ }
107
+ exec_command(self, 'halt')
56
+ shareability = 2; /* outer sharable */
108
+ # Wait for VM to shut down gracefully
57
+ }
109
+ self.vm.wait()
58
+ result->cacheattrs.is_s2_format = false;
110
+
59
}
111
def test_arm_exynos4210_initrd(self):
60
112
"""
61
result->phys = address;
113
:avocado: tags=arch:arm
62
result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
63
result->page_size = TARGET_PAGE_SIZE;
64
-
65
- /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
66
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
67
- result->cacheattrs.shareability = 0;
68
- result->cacheattrs.is_s2_format = false;
69
- if (hcr & HCR_DC) {
70
- if (hcr & HCR_DCT) {
71
- memattr = 0xf0; /* Tagged, Normal, WB, RWA */
72
- } else {
73
- memattr = 0xff; /* Normal, WB, RWA */
74
- }
75
- } else if (access_type == MMU_INST_FETCH) {
76
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
77
- memattr = 0xee; /* Normal, WT, RA, NT */
78
- } else {
79
- memattr = 0x44; /* Normal, NC, No */
80
- }
81
- result->cacheattrs.shareability = 2; /* outer sharable */
82
- } else {
83
- memattr = 0x00; /* Device, nGnRnE */
84
- }
85
+ result->cacheattrs.shareability = shareability;
86
result->cacheattrs.attrs = memattr;
87
return 0;
88
}
114
--
89
--
115
2.20.1
90
2.25.1
116
117
diff view generated by jsdifflib
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
3
Adjust GetPhysAddrResult to fill in CPUTLBEntryFull,
4
function has to be used for machine shutdown. Otherwise we cause
4
so that it may be passed directly to tlb_set_page_full.
5
a reset with a bogus "cause" value, when we intended a shutdown.
6
5
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
6
The change is large, but mostly mechanical. The major
7
non-mechanical change is page_size -> lg_page_size.
8
Most of the time this is obvious, and is related to
9
TARGET_PAGE_BITS.
10
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
13
Message-id: 20221001162318.153420-21-richard.henderson@linaro.org
10
[PMM: tweaked commit message]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
15
---
13
hw/gpio/gpio_pwr.c | 2 +-
16
target/arm/internals.h | 5 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
17
target/arm/helper.c | 12 +--
18
target/arm/m_helper.c | 20 ++---
19
target/arm/ptw.c | 179 ++++++++++++++++++++--------------------
20
target/arm/tlb_helper.c | 9 +-
21
5 files changed, 111 insertions(+), 114 deletions(-)
15
22
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
23
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/gpio/gpio_pwr.c
25
--- a/target/arm/internals.h
19
+++ b/hw/gpio/gpio_pwr.c
26
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
27
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCacheAttrs {
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
28
29
/* Fields that are valid upon success. */
30
typedef struct GetPhysAddrResult {
31
- hwaddr phys;
32
- target_ulong page_size;
33
- int prot;
34
- MemTxAttrs attrs;
35
+ CPUTLBEntryFull f;
36
ARMCacheAttrs cacheattrs;
37
} GetPhysAddrResult;
38
39
diff --git a/target/arm/helper.c b/target/arm/helper.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/helper.c
42
+++ b/target/arm/helper.c
43
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
44
/* Create a 64-bit PAR */
45
par64 = (1 << 11); /* LPAE bit always set */
46
if (!ret) {
47
- par64 |= res.phys & ~0xfffULL;
48
- if (!res.attrs.secure) {
49
+ par64 |= res.f.phys_addr & ~0xfffULL;
50
+ if (!res.f.attrs.secure) {
51
par64 |= (1 << 9); /* NS */
52
}
53
par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
54
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
55
*/
56
if (!ret) {
57
/* We do not set any attribute bits in the PAR */
58
- if (res.page_size == (1 << 24)
59
+ if (res.f.lg_page_size == 24
60
&& arm_feature(env, ARM_FEATURE_V7)) {
61
- par64 = (res.phys & 0xff000000) | (1 << 1);
62
+ par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
63
} else {
64
- par64 = res.phys & 0xfffff000;
65
+ par64 = res.f.phys_addr & 0xfffff000;
66
}
67
- if (!res.attrs.secure) {
68
+ if (!res.f.attrs.secure) {
69
par64 |= (1 << 9); /* NS */
70
}
71
} else {
72
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/m_helper.c
75
+++ b/target/arm/m_helper.c
76
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
77
}
78
goto pend_fault;
79
}
80
- address_space_stl_le(arm_addressspace(cs, res.attrs), res.phys, value,
81
- res.attrs, &txres);
82
+ address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
83
+ value, res.f.attrs, &txres);
84
if (txres != MEMTX_OK) {
85
/* BusFault trying to write the data */
86
if (mode == STACK_LAZYFP) {
87
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
88
goto pend_fault;
89
}
90
91
- value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
92
- res.attrs, &txres);
93
+ value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
94
+ res.f.phys_addr, res.f.attrs, &txres);
95
if (txres != MEMTX_OK) {
96
/* BusFault trying to read the data */
97
qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
98
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
99
qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
100
return false;
101
}
102
- *insn = address_space_lduw_le(arm_addressspace(cs, res.attrs), res.phys,
103
- res.attrs, &txres);
104
+ *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
105
+ res.f.phys_addr, res.f.attrs, &txres);
106
if (txres != MEMTX_OK) {
107
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
108
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
109
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
110
}
111
return false;
112
}
113
- value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
114
- res.attrs, &txres);
115
+ value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
116
+ res.f.phys_addr, res.f.attrs, &txres);
117
if (txres != MEMTX_OK) {
118
/* BusFault trying to read the data */
119
qemu_log_mask(CPU_LOG_INT,
120
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
121
} else {
122
mrvalid = true;
123
}
124
- r = res.prot & PAGE_READ;
125
- rw = res.prot & PAGE_WRITE;
126
+ r = res.f.prot & PAGE_READ;
127
+ rw = res.f.prot & PAGE_WRITE;
128
} else {
129
r = false;
130
rw = false;
131
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/target/arm/ptw.c
134
+++ b/target/arm/ptw.c
135
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
136
assert(!is_secure);
137
}
138
139
- addr = s2.phys;
140
+ addr = s2.f.phys_addr;
141
}
142
return addr;
143
}
144
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
145
/* 1Mb section. */
146
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
147
ap = (desc >> 10) & 3;
148
- result->page_size = 1024 * 1024;
149
+ result->f.lg_page_size = 20; /* 1MB */
150
} else {
151
/* Lookup l2 entry. */
152
if (type == 1) {
153
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
154
case 1: /* 64k page. */
155
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
156
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
157
- result->page_size = 0x10000;
158
+ result->f.lg_page_size = 16;
159
break;
160
case 2: /* 4k page. */
161
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
162
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
163
- result->page_size = 0x1000;
164
+ result->f.lg_page_size = 12;
165
break;
166
case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
167
if (type == 1) {
168
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
169
if (arm_feature(env, ARM_FEATURE_XSCALE)
170
|| arm_feature(env, ARM_FEATURE_V6)) {
171
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
172
- result->page_size = 0x1000;
173
+ result->f.lg_page_size = 12;
174
} else {
175
/*
176
* UNPREDICTABLE in ARMv5; we choose to take a
177
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
178
}
179
} else {
180
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
181
- result->page_size = 0x400;
182
+ result->f.lg_page_size = 10;
183
}
184
ap = (desc >> 4) & 3;
185
break;
186
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
187
g_assert_not_reached();
188
}
189
}
190
- result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
191
- result->prot |= result->prot ? PAGE_EXEC : 0;
192
- if (!(result->prot & (1 << access_type))) {
193
+ result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
194
+ result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
195
+ if (!(result->f.prot & (1 << access_type))) {
196
/* Access permission fault. */
197
fi->type = ARMFault_Permission;
198
goto do_fault;
199
}
200
- result->phys = phys_addr;
201
+ result->f.phys_addr = phys_addr;
202
return false;
203
do_fault:
204
fi->domain = domain;
205
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
206
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
207
phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
208
phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
209
- result->page_size = 0x1000000;
210
+ result->f.lg_page_size = 24; /* 16MB */
211
} else {
212
/* Section. */
213
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
214
- result->page_size = 0x100000;
215
+ result->f.lg_page_size = 20; /* 1MB */
216
}
217
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
218
xn = desc & (1 << 4);
219
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
220
case 1: /* 64k page. */
221
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
222
xn = desc & (1 << 15);
223
- result->page_size = 0x10000;
224
+ result->f.lg_page_size = 16;
225
break;
226
case 2: case 3: /* 4k page. */
227
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
228
xn = desc & 1;
229
- result->page_size = 0x1000;
230
+ result->f.lg_page_size = 12;
231
break;
232
default:
233
/* Never happens, but compiler isn't smart enough to tell. */
234
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
235
}
236
}
237
if (domain_prot == 3) {
238
- result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
239
+ result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
240
} else {
241
if (pxn && !regime_is_user(env, mmu_idx)) {
242
xn = 1;
243
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
244
fi->type = ARMFault_AccessFlag;
245
goto do_fault;
246
}
247
- result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
248
+ result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
249
} else {
250
- result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
251
+ result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
252
}
253
- if (result->prot && !xn) {
254
- result->prot |= PAGE_EXEC;
255
+ if (result->f.prot && !xn) {
256
+ result->f.prot |= PAGE_EXEC;
257
}
258
- if (!(result->prot & (1 << access_type))) {
259
+ if (!(result->f.prot & (1 << access_type))) {
260
/* Access permission fault. */
261
fi->type = ARMFault_Permission;
262
goto do_fault;
263
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
264
* the CPU doesn't support TZ or this is a non-secure translation
265
* regime, because the attribute will already be non-secure.
266
*/
267
- result->attrs.secure = false;
268
+ result->f.attrs.secure = false;
269
}
270
- result->phys = phys_addr;
271
+ result->f.phys_addr = phys_addr;
272
return false;
273
do_fault:
274
fi->domain = domain;
275
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
276
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
277
ns = mmu_idx == ARMMMUIdx_Stage2;
278
xn = extract32(attrs, 11, 2);
279
- result->prot = get_S2prot(env, ap, xn, s1_is_el0);
280
+ result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
281
} else {
282
ns = extract32(attrs, 3, 1);
283
xn = extract32(attrs, 12, 1);
284
pxn = extract32(attrs, 11, 1);
285
- result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
286
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
287
}
288
289
fault_type = ARMFault_Permission;
290
- if (!(result->prot & (1 << access_type))) {
291
+ if (!(result->f.prot & (1 << access_type))) {
292
goto do_fault;
293
}
294
295
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
296
* the CPU doesn't support TZ or this is a non-secure translation
297
* regime, because the attribute will already be non-secure.
298
*/
299
- result->attrs.secure = false;
300
+ result->f.attrs.secure = false;
301
}
302
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
303
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
304
- arm_tlb_bti_gp(&result->attrs) = true;
305
+ arm_tlb_bti_gp(&result->f.attrs) = true;
306
}
307
308
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
309
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
310
result->cacheattrs.shareability = extract32(attrs, 6, 2);
311
}
312
313
- result->phys = descaddr;
314
- result->page_size = page_size;
315
+ result->f.phys_addr = descaddr;
316
+ result->f.lg_page_size = ctz64(page_size);
317
return false;
318
319
do_fault:
320
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
321
322
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
323
/* MPU disabled. */
324
- result->phys = address;
325
- result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
326
+ result->f.phys_addr = address;
327
+ result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
328
return false;
329
}
330
331
- result->phys = address;
332
+ result->f.phys_addr = address;
333
for (n = 7; n >= 0; n--) {
334
base = env->cp15.c6_region[n];
335
if ((base & 1) == 0) {
336
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
337
fi->level = 1;
338
return true;
339
}
340
- result->prot = PAGE_READ | PAGE_WRITE;
341
+ result->f.prot = PAGE_READ | PAGE_WRITE;
342
break;
343
case 2:
344
- result->prot = PAGE_READ;
345
+ result->f.prot = PAGE_READ;
346
if (!is_user) {
347
- result->prot |= PAGE_WRITE;
348
+ result->f.prot |= PAGE_WRITE;
349
}
350
break;
351
case 3:
352
- result->prot = PAGE_READ | PAGE_WRITE;
353
+ result->f.prot = PAGE_READ | PAGE_WRITE;
354
break;
355
case 5:
356
if (is_user) {
357
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
358
fi->level = 1;
359
return true;
360
}
361
- result->prot = PAGE_READ;
362
+ result->f.prot = PAGE_READ;
363
break;
364
case 6:
365
- result->prot = PAGE_READ;
366
+ result->f.prot = PAGE_READ;
367
break;
368
default:
369
/* Bad permission. */
370
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
371
fi->level = 1;
372
return true;
373
}
374
- result->prot |= PAGE_EXEC;
375
+ result->f.prot |= PAGE_EXEC;
376
return false;
377
}
378
379
static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
380
- int32_t address, int *prot)
381
+ int32_t address, uint8_t *prot)
22
{
382
{
23
if (level) {
383
if (!arm_feature(env, ARM_FEATURE_M)) {
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
384
*prot = PAGE_READ | PAGE_WRITE;
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
385
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
26
}
386
int n;
387
bool is_user = regime_is_user(env, mmu_idx);
388
389
- result->phys = address;
390
- result->page_size = TARGET_PAGE_SIZE;
391
- result->prot = 0;
392
+ result->f.phys_addr = address;
393
+ result->f.lg_page_size = TARGET_PAGE_BITS;
394
+ result->f.prot = 0;
395
396
if (regime_translation_disabled(env, mmu_idx, secure) ||
397
m_is_ppb_region(env, address)) {
398
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
399
* which always does a direct read using address_space_ldl(), rather
400
* than going via this function, so we don't need to check that here.
401
*/
402
- get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
403
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
404
} else { /* MPU enabled */
405
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
406
/* region search */
407
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
408
if (ranges_overlap(base, rmask,
409
address & TARGET_PAGE_MASK,
410
TARGET_PAGE_SIZE)) {
411
- result->page_size = 1;
412
+ result->f.lg_page_size = 0;
413
}
414
continue;
415
}
416
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
417
continue;
418
}
419
if (rsize < TARGET_PAGE_BITS) {
420
- result->page_size = 1 << rsize;
421
+ result->f.lg_page_size = rsize;
422
}
423
break;
424
}
425
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
426
fi->type = ARMFault_Background;
427
return true;
428
}
429
- get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
430
+ get_phys_addr_pmsav7_default(env, mmu_idx, address,
431
+ &result->f.prot);
432
} else { /* a MPU hit! */
433
uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
434
uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
435
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
436
case 5:
437
break; /* no access */
438
case 3:
439
- result->prot |= PAGE_WRITE;
440
+ result->f.prot |= PAGE_WRITE;
441
/* fall through */
442
case 2:
443
case 6:
444
- result->prot |= PAGE_READ | PAGE_EXEC;
445
+ result->f.prot |= PAGE_READ | PAGE_EXEC;
446
break;
447
case 7:
448
/* for v7M, same as 6; for R profile a reserved value */
449
if (arm_feature(env, ARM_FEATURE_M)) {
450
- result->prot |= PAGE_READ | PAGE_EXEC;
451
+ result->f.prot |= PAGE_READ | PAGE_EXEC;
452
break;
453
}
454
/* fall through */
455
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
456
case 1:
457
case 2:
458
case 3:
459
- result->prot |= PAGE_WRITE;
460
+ result->f.prot |= PAGE_WRITE;
461
/* fall through */
462
case 5:
463
case 6:
464
- result->prot |= PAGE_READ | PAGE_EXEC;
465
+ result->f.prot |= PAGE_READ | PAGE_EXEC;
466
break;
467
case 7:
468
/* for v7M, same as 6; for R profile a reserved value */
469
if (arm_feature(env, ARM_FEATURE_M)) {
470
- result->prot |= PAGE_READ | PAGE_EXEC;
471
+ result->f.prot |= PAGE_READ | PAGE_EXEC;
472
break;
473
}
474
/* fall through */
475
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
476
477
/* execute never */
478
if (xn) {
479
- result->prot &= ~PAGE_EXEC;
480
+ result->f.prot &= ~PAGE_EXEC;
481
}
482
}
483
}
484
485
fi->type = ARMFault_Permission;
486
fi->level = 1;
487
- return !(result->prot & (1 << access_type));
488
+ return !(result->f.prot & (1 << access_type));
27
}
489
}
28
490
491
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
492
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
493
uint32_t addr_page_base = address & TARGET_PAGE_MASK;
494
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
495
496
- result->page_size = TARGET_PAGE_SIZE;
497
- result->phys = address;
498
- result->prot = 0;
499
+ result->f.lg_page_size = TARGET_PAGE_BITS;
500
+ result->f.phys_addr = address;
501
+ result->f.prot = 0;
502
if (mregion) {
503
*mregion = -1;
504
}
505
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
506
ranges_overlap(base, limit - base + 1,
507
addr_page_base,
508
TARGET_PAGE_SIZE)) {
509
- result->page_size = 1;
510
+ result->f.lg_page_size = 0;
511
}
512
continue;
513
}
514
515
if (base > addr_page_base || limit < addr_page_limit) {
516
- result->page_size = 1;
517
+ result->f.lg_page_size = 0;
518
}
519
520
if (matchregion != -1) {
521
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
522
523
if (matchregion == -1) {
524
/* hit using the background region */
525
- get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
526
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
527
} else {
528
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
529
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
530
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
531
xn = 1;
532
}
533
534
- result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
535
- if (result->prot && !xn && !(pxn && !is_user)) {
536
- result->prot |= PAGE_EXEC;
537
+ result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
538
+ if (result->f.prot && !xn && !(pxn && !is_user)) {
539
+ result->f.prot |= PAGE_EXEC;
540
}
541
/*
542
* We don't need to look the attribute up in the MAIR0/MAIR1
543
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
544
545
fi->type = ARMFault_Permission;
546
fi->level = 1;
547
- return !(result->prot & (1 << access_type));
548
+ return !(result->f.prot & (1 << access_type));
549
}
550
551
static bool v8m_is_sau_exempt(CPUARMState *env,
552
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
553
} else {
554
fi->type = ARMFault_QEMU_SFault;
555
}
556
- result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
557
- result->phys = address;
558
- result->prot = 0;
559
+ result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
560
+ result->f.phys_addr = address;
561
+ result->f.prot = 0;
562
return true;
563
}
564
} else {
565
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
566
* might downgrade a secure access to nonsecure.
567
*/
568
if (sattrs.ns) {
569
- result->attrs.secure = false;
570
+ result->f.attrs.secure = false;
571
} else if (!secure) {
572
/*
573
* NS access to S memory must fault.
574
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
575
* for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
576
*/
577
fi->type = ARMFault_QEMU_SFault;
578
- result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
579
- result->phys = address;
580
- result->prot = 0;
581
+ result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
582
+ result->f.phys_addr = address;
583
+ result->f.prot = 0;
584
return true;
585
}
586
}
587
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
588
ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
589
result, fi, NULL);
590
if (sattrs.subpage) {
591
- result->page_size = 1;
592
+ result->f.lg_page_size = 0;
593
}
594
return ret;
595
}
596
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
597
result->cacheattrs.is_s2_format = false;
598
}
599
600
- result->phys = address;
601
- result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
602
- result->page_size = TARGET_PAGE_SIZE;
603
+ result->f.phys_addr = address;
604
+ result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
605
+ result->f.lg_page_size = TARGET_PAGE_BITS;
606
result->cacheattrs.shareability = shareability;
607
result->cacheattrs.attrs = memattr;
608
return 0;
609
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
610
return ret;
611
}
612
613
- ipa = result->phys;
614
- ipa_secure = result->attrs.secure;
615
+ ipa = result->f.phys_addr;
616
+ ipa_secure = result->f.attrs.secure;
617
if (is_secure) {
618
/* Select TCR based on the NS bit from the S1 walk. */
619
s2walk_secure = !(ipa_secure
620
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
621
* Save the stage1 results so that we may merge
622
* prot and cacheattrs later.
623
*/
624
- s1_prot = result->prot;
625
+ s1_prot = result->f.prot;
626
cacheattrs1 = result->cacheattrs;
627
memset(result, 0, sizeof(*result));
628
629
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
630
fi->s2addr = ipa;
631
632
/* Combine the S1 and S2 perms. */
633
- result->prot &= s1_prot;
634
+ result->f.prot &= s1_prot;
635
636
/* If S2 fails, return early. */
637
if (ret) {
638
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
639
* Check if IPA translates to secure or non-secure PA space.
640
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
641
*/
642
- result->attrs.secure =
643
+ result->f.attrs.secure =
644
(is_secure
645
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
646
&& (ipa_secure
647
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
648
* cannot upgrade an non-secure translation regime's attributes
649
* to secure.
650
*/
651
- result->attrs.secure = is_secure;
652
- result->attrs.user = regime_is_user(env, mmu_idx);
653
+ result->f.attrs.secure = is_secure;
654
+ result->f.attrs.user = regime_is_user(env, mmu_idx);
655
656
/*
657
* Fast Context Switch Extension. This doesn't exist at all in v8.
658
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
659
660
if (arm_feature(env, ARM_FEATURE_PMSA)) {
661
bool ret;
662
- result->page_size = TARGET_PAGE_SIZE;
663
+ result->f.lg_page_size = TARGET_PAGE_BITS;
664
665
if (arm_feature(env, ARM_FEATURE_V8)) {
666
/* PMSAv8 */
667
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
668
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
669
(uint32_t)address, mmu_idx,
670
ret ? "Miss" : "Hit",
671
- result->prot & PAGE_READ ? 'r' : '-',
672
- result->prot & PAGE_WRITE ? 'w' : '-',
673
- result->prot & PAGE_EXEC ? 'x' : '-');
674
+ result->f.prot & PAGE_READ ? 'r' : '-',
675
+ result->f.prot & PAGE_WRITE ? 'w' : '-',
676
+ result->f.prot & PAGE_EXEC ? 'x' : '-');
677
678
return ret;
679
}
680
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
681
bool ret;
682
683
ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
684
- *attrs = res.attrs;
685
+ *attrs = res.f.attrs;
686
687
if (ret) {
688
return -1;
689
}
690
- return res.phys;
691
+ return res.f.phys_addr;
692
}
693
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
694
index XXXXXXX..XXXXXXX 100644
695
--- a/target/arm/tlb_helper.c
696
+++ b/target/arm/tlb_helper.c
697
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
698
* target page size are handled specially, so for those we
699
* pass in the exact addresses.
700
*/
701
- if (res.page_size >= TARGET_PAGE_SIZE) {
702
- res.phys &= TARGET_PAGE_MASK;
703
+ if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
704
+ res.f.phys_addr &= TARGET_PAGE_MASK;
705
address &= TARGET_PAGE_MASK;
706
}
707
/* Notice and record tagged memory. */
708
if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
709
- arm_tlb_mte_tagged(&res.attrs) = true;
710
+ arm_tlb_mte_tagged(&res.f.attrs) = true;
711
}
712
713
- tlb_set_page_with_attrs(cs, address, res.phys, res.attrs,
714
- res.prot, mmu_idx, res.page_size);
715
+ tlb_set_page_full(cs, mmu_idx, address, &res.f);
716
return true;
717
} else if (probe) {
718
return false;
29
--
719
--
30
2.20.1
720
2.25.1
31
32
diff view generated by jsdifflib
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
1
From: Jerome Forissier <jerome.forissier@linaro.org>
2
2
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
3
According to the Linux kernel booting.rst [1], CPTR_EL3.ESM and
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
4
SCR_EL3.EnTP2 must be initialized to 1 when EL3 is present and FEAT_SME
5
assert due to fpst->default_nan_mode being set.
5
is advertised. This has to be taken care of when QEMU boots directly
6
into the kernel (i.e., "-M virt,secure=on -cpu max -kernel Image").
6
7
7
To avoid this, we check to see what NaN mode we're running in before we call
8
Cc: qemu-stable@nongnu.org
8
floatxx_silence_nan().
9
Fixes: 78cb9776662a ("target/arm: Enable SME for -cpu max")
9
10
Link: [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/arm64/booting.rst?h=v6.0#n321
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
11
Signed-off-by: Jerome Forissier <jerome.forissier@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20221003145641.1921467-1-jerome.forissier@linaro.org
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
15
---
16
target/arm/helper-a64.c | 12 +++++++++---
16
hw/arm/boot.c | 4 ++++
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
17
1 file changed, 4 insertions(+)
18
2 files changed, 27 insertions(+), 9 deletions(-)
19
18
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper-a64.c
21
--- a/hw/arm/boot.c
23
+++ b/target/arm/helper-a64.c
22
+++ b/hw/arm/boot.c
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
23
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
25
float16 nan = a;
24
if (cpu_isar_feature(aa64_sve, cpu)) {
26
if (float16_is_signaling_nan(a, fpst)) {
25
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
27
float_raise(float_flag_invalid, fpst);
26
}
28
- nan = float16_silence_nan(a, fpst);
27
+ if (cpu_isar_feature(aa64_sme, cpu)) {
29
+ if (!fpst->default_nan_mode) {
28
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
30
+ nan = float16_silence_nan(a, fpst);
29
+ env->cp15.scr_el3 |= SCR_ENTP2;
31
+ }
30
+ }
32
}
31
/* AArch64 kernels never boot in secure mode */
33
if (fpst->default_nan_mode) {
32
assert(!info->secure_boot);
34
nan = float16_default_nan(fpst);
33
/* This hook is only supported for AArch32 currently:
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
36
float32 nan = a;
37
if (float32_is_signaling_nan(a, fpst)) {
38
float_raise(float_flag_invalid, fpst);
39
- nan = float32_silence_nan(a, fpst);
40
+ if (!fpst->default_nan_mode) {
41
+ nan = float32_silence_nan(a, fpst);
42
+ }
43
}
44
if (fpst->default_nan_mode) {
45
nan = float32_default_nan(fpst);
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
47
float64 nan = a;
48
if (float64_is_signaling_nan(a, fpst)) {
49
float_raise(float_flag_invalid, fpst);
50
- nan = float64_silence_nan(a, fpst);
51
+ if (!fpst->default_nan_mode) {
52
+ nan = float64_silence_nan(a, fpst);
53
+ }
54
}
55
if (fpst->default_nan_mode) {
56
nan = float64_default_nan(fpst);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/vfp_helper.c
60
+++ b/target/arm/vfp_helper.c
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
62
float16 nan = f16;
63
if (float16_is_signaling_nan(f16, fpst)) {
64
float_raise(float_flag_invalid, fpst);
65
- nan = float16_silence_nan(f16, fpst);
66
+ if (!fpst->default_nan_mode) {
67
+ nan = float16_silence_nan(f16, fpst);
68
+ }
69
}
70
if (fpst->default_nan_mode) {
71
nan = float16_default_nan(fpst);
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
73
float32 nan = f32;
74
if (float32_is_signaling_nan(f32, fpst)) {
75
float_raise(float_flag_invalid, fpst);
76
- nan = float32_silence_nan(f32, fpst);
77
+ if (!fpst->default_nan_mode) {
78
+ nan = float32_silence_nan(f32, fpst);
79
+ }
80
}
81
if (fpst->default_nan_mode) {
82
nan = float32_default_nan(fpst);
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
84
float64 nan = f64;
85
if (float64_is_signaling_nan(f64, fpst)) {
86
float_raise(float_flag_invalid, fpst);
87
- nan = float64_silence_nan(f64, fpst);
88
+ if (!fpst->default_nan_mode) {
89
+ nan = float64_silence_nan(f64, fpst);
90
+ }
91
}
92
if (fpst->default_nan_mode) {
93
nan = float64_default_nan(fpst);
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
95
float16 nan = f16;
96
if (float16_is_signaling_nan(f16, s)) {
97
float_raise(float_flag_invalid, s);
98
- nan = float16_silence_nan(f16, s);
99
+ if (!s->default_nan_mode) {
100
+ nan = float16_silence_nan(f16, fpstp);
101
+ }
102
}
103
if (s->default_nan_mode) {
104
nan = float16_default_nan(s);
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
106
float32 nan = f32;
107
if (float32_is_signaling_nan(f32, s)) {
108
float_raise(float_flag_invalid, s);
109
- nan = float32_silence_nan(f32, s);
110
+ if (!s->default_nan_mode) {
111
+ nan = float32_silence_nan(f32, fpstp);
112
+ }
113
}
114
if (s->default_nan_mode) {
115
nan = float32_default_nan(s);
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
117
float64 nan = f64;
118
if (float64_is_signaling_nan(f64, s)) {
119
float_raise(float_flag_invalid, s);
120
- nan = float64_silence_nan(f64, s);
121
+ if (!s->default_nan_mode) {
122
+ nan = float64_silence_nan(f64, fpstp);
123
+ }
124
}
125
if (s->default_nan_mode) {
126
nan = float64_default_nan(s);
127
--
34
--
128
2.20.1
35
2.25.1
129
130
diff view generated by jsdifflib
1
In do_ldst(), the calculation of the offset needs to be based on the
1
Arm CPUs support some subset of the granule (page) sizes 4K, 16K and
2
size of the memory access, not the size of the elements in the
2
64K. The guest selects the one it wants using bits in the TCR_ELx
3
vector. This meant we were getting it wrong for the widening and
3
registers. If it tries to program these registers with a value that
4
narrowing variants of the various VLDR and VSTR insns.
4
is either reserved or which requests a size that the CPU does not
5
5
implement, the architecture requires that the CPU behaves as if the
6
field was programmed to some size that has been implemented.
7
Currently we don't implement this, and instead let the guest use any
8
granule size, even if the CPU ID register fields say it isn't
9
present.
10
11
Make aa64_va_parameters() check against the supported granule size
12
and force use of a different one if it is not implemented.
13
14
(A subsequent commit will make ARMVAParameters use the new enum
15
rather than the current pair of using16k/using64k bools.)
16
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20221003162315.2833797-2-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
9
---
20
---
10
target/arm/translate-mve.c | 17 +++++++++--------
21
target/arm/cpu.h | 33 +++++++++++++
11
1 file changed, 9 insertions(+), 8 deletions(-)
22
target/arm/internals.h | 9 ++++
12
23
target/arm/helper.c | 102 +++++++++++++++++++++++++++++++++++++----
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
24
3 files changed, 136 insertions(+), 8 deletions(-)
25
26
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-mve.c
28
--- a/target/arm/cpu.h
16
+++ b/target/arm/translate-mve.c
29
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
30
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
31
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
32
}
33
34
+static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
35
+{
36
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
37
+}
38
+
39
+static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
40
+{
41
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
42
+}
43
+
44
+static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
45
+{
46
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
47
+}
48
+
49
+static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
50
+{
51
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
52
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
53
+}
54
+
55
+static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
56
+{
57
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
58
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
59
+}
60
+
61
+static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
62
+{
63
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
64
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
65
+}
66
+
67
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
68
{
69
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
70
diff --git a/target/arm/internals.h b/target/arm/internals.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/internals.h
73
+++ b/target/arm/internals.h
74
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
75
return valid;
76
}
77
78
+/* Granule size (i.e. page size) */
79
+typedef enum ARMGranuleSize {
80
+ /* Same order as TG0 encoding */
81
+ Gran4K,
82
+ Gran64K,
83
+ Gran16K,
84
+ GranInvalid,
85
+} ARMGranuleSize;
86
+
87
/*
88
* Parameters of a given virtual address, as extracted from the
89
* translation control register (TCR) for a given regime.
90
diff --git a/target/arm/helper.c b/target/arm/helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/helper.c
93
+++ b/target/arm/helper.c
94
@@ -XXX,XX +XXX,XX @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
18
}
95
}
19
}
96
}
20
97
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
98
+static ARMGranuleSize tg0_to_gran_size(int tg)
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
99
+{
23
+ unsigned msize)
100
+ switch (tg) {
101
+ case 0:
102
+ return Gran4K;
103
+ case 1:
104
+ return Gran64K;
105
+ case 2:
106
+ return Gran16K;
107
+ default:
108
+ return GranInvalid;
109
+ }
110
+}
111
+
112
+static ARMGranuleSize tg1_to_gran_size(int tg)
113
+{
114
+ switch (tg) {
115
+ case 1:
116
+ return Gran16K;
117
+ case 2:
118
+ return Gran4K;
119
+ case 3:
120
+ return Gran64K;
121
+ default:
122
+ return GranInvalid;
123
+ }
124
+}
125
+
126
+static inline bool have4k(ARMCPU *cpu, bool stage2)
127
+{
128
+ return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
129
+ : cpu_isar_feature(aa64_tgran4, cpu);
130
+}
131
+
132
+static inline bool have16k(ARMCPU *cpu, bool stage2)
133
+{
134
+ return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
135
+ : cpu_isar_feature(aa64_tgran16, cpu);
136
+}
137
+
138
+static inline bool have64k(ARMCPU *cpu, bool stage2)
139
+{
140
+ return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
141
+ : cpu_isar_feature(aa64_tgran64, cpu);
142
+}
143
+
144
+static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
145
+ bool stage2)
146
+{
147
+ switch (gran) {
148
+ case Gran4K:
149
+ if (have4k(cpu, stage2)) {
150
+ return gran;
151
+ }
152
+ break;
153
+ case Gran16K:
154
+ if (have16k(cpu, stage2)) {
155
+ return gran;
156
+ }
157
+ break;
158
+ case Gran64K:
159
+ if (have64k(cpu, stage2)) {
160
+ return gran;
161
+ }
162
+ break;
163
+ case GranInvalid:
164
+ break;
165
+ }
166
+ /*
167
+ * If the guest selects a granule size that isn't implemented,
168
+ * the architecture requires that we behave as if it selected one
169
+ * that is (with an IMPDEF choice of which one to pick). We choose
170
+ * to implement the smallest supported granule size.
171
+ */
172
+ if (have4k(cpu, stage2)) {
173
+ return Gran4K;
174
+ }
175
+ if (have16k(cpu, stage2)) {
176
+ return Gran16K;
177
+ }
178
+ assert(have64k(cpu, stage2));
179
+ return Gran64K;
180
+}
181
+
182
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
183
ARMMMUIdx mmu_idx, bool data)
24
{
184
{
25
TCGv_i32 addr;
185
uint64_t tcr = regime_tcr(env, mmu_idx);
26
uint32_t offset;
186
bool epd, hpd, using16k, using64k, tsz_oob, ds;
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
187
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
28
return true;
188
+ ARMGranuleSize gran;
189
ARMCPU *cpu = env_archcpu(env);
190
+ bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
191
192
if (!regime_has_2_ranges(mmu_idx)) {
193
select = 0;
194
tsz = extract32(tcr, 0, 6);
195
- using64k = extract32(tcr, 14, 1);
196
- using16k = extract32(tcr, 15, 1);
197
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
198
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
199
+ if (stage2) {
200
/* VTCR_EL2 */
201
hpd = false;
202
} else {
203
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
204
select = extract64(va, 55, 1);
205
if (!select) {
206
tsz = extract32(tcr, 0, 6);
207
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
208
epd = extract32(tcr, 7, 1);
209
sh = extract32(tcr, 12, 2);
210
- using64k = extract32(tcr, 14, 1);
211
- using16k = extract32(tcr, 15, 1);
212
hpd = extract64(tcr, 41, 1);
213
} else {
214
- int tg = extract32(tcr, 30, 2);
215
- using16k = tg == 1;
216
- using64k = tg == 3;
217
tsz = extract32(tcr, 16, 6);
218
+ gran = tg1_to_gran_size(extract32(tcr, 30, 2));
219
epd = extract32(tcr, 23, 1);
220
sh = extract32(tcr, 28, 2);
221
hpd = extract64(tcr, 42, 1);
222
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
223
ds = extract64(tcr, 59, 1);
29
}
224
}
30
225
31
- offset = a->imm << a->size;
226
+ gran = sanitize_gran_size(cpu, gran, stage2);
32
+ offset = a->imm << msize;
227
+ using64k = gran == Gran64K;
33
if (!a->a) {
228
+ using16k = gran == Gran16K;
34
offset = -offset;
229
+
35
}
230
if (cpu_isar_feature(aa64_st, cpu)) {
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
231
max_tsz = 48 - using64k;
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
232
} else {
38
{ NULL, NULL }
39
};
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
42
}
43
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
47
{ \
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
50
{ NULL, gen_helper_mve_##ULD }, \
51
}; \
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
54
}
55
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
62
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
64
{
65
--
233
--
66
2.20.1
234
2.25.1
67
68
diff view generated by jsdifflib
1
Implement the MVE saturating shift-right-and-narrow insns
1
Now we have an enum for the granule size, use it in the
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
2
ARMVAParameters struct instead of the using16k/using64k bools.
3
4
do_srshr() is borrowed from sve_helper.c.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
6
Message-id: 20221003162315.2833797-3-peter.maydell@linaro.org
9
---
7
---
10
target/arm/helper-mve.h | 30 +++++++++++
8
target/arm/internals.h | 23 +++++++++++++++++++++--
11
target/arm/mve.decode | 28 ++++++++++
9
target/arm/helper.c | 39 ++++++++++++++++++++++++++++-----------
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
10
target/arm/ptw.c | 8 +-------
13
target/arm/translate-mve.c | 12 +++++
11
3 files changed, 50 insertions(+), 20 deletions(-)
14
4 files changed, 174 insertions(+)
15
12
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
15
--- a/target/arm/internals.h
19
+++ b/target/arm/helper-mve.h
16
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ typedef enum ARMGranuleSize {
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
GranInvalid,
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
} ARMGranuleSize;
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
24
+
21
+/**
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+ * arm_granule_bits: Return address size of the granule in bits
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+ *
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+ * Return the address size of the granule in bits. This corresponds
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+ * to the pseudocode TGxGranuleBits().
29
+
26
+ */
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+static inline int arm_granule_bits(ARMGranuleSize gran)
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/mve.decode
57
+++ b/target/arm/mve.decode
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
62
+
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
95
}
96
}
97
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
99
+{
28
+{
100
+ if (likely(sh < 64)) {
29
+ switch (gran) {
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
30
+ case Gran64K:
102
+ } else {
31
+ return 16;
103
+ /* Rounding the sign bit always produces 0. */
32
+ case Gran16K:
104
+ return 0;
33
+ return 14;
34
+ case Gran4K:
35
+ return 12;
36
+ default:
37
+ g_assert_not_reached();
105
+ }
38
+ }
106
+}
39
+}
107
+
40
+
108
DO_VSHRN_ALL(vshrn, DO_SHR)
41
/*
109
DO_VSHRN_ALL(vrshrn, do_urshr)
42
* Parameters of a given virtual address, as extracted from the
110
+
43
* translation control register (TCR) for a given regime.
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
44
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
112
+ bool *satp)
45
bool tbi : 1;
46
bool epd : 1;
47
bool hpd : 1;
48
- bool using16k : 1;
49
- bool using64k : 1;
50
bool tsz_oob : 1; /* tsz has been clamped to legal range */
51
bool ds : 1;
52
+ ARMGranuleSize gran : 2;
53
} ARMVAParameters;
54
55
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
56
diff --git a/target/arm/helper.c b/target/arm/helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/helper.c
59
+++ b/target/arm/helper.c
60
@@ -XXX,XX +XXX,XX @@ typedef struct {
61
uint64_t length;
62
} TLBIRange;
63
64
+static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
113
+{
65
+{
114
+ if (val > max) {
66
+ /*
115
+ *satp = true;
67
+ * Note that the TLBI range TG field encoding differs from both
116
+ return max;
68
+ * TG0 and TG1 encodings.
117
+ } else if (val < min) {
69
+ */
118
+ *satp = true;
70
+ switch (tg) {
119
+ return min;
71
+ case 1:
120
+ } else {
72
+ return Gran4K;
121
+ return val;
73
+ case 2:
74
+ return Gran16K;
75
+ case 3:
76
+ return Gran64K;
77
+ default:
78
+ return GranInvalid;
122
+ }
79
+ }
123
+}
80
+}
124
+
81
+
125
+/* Saturating narrowing right shifts */
82
static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
83
uint64_t value)
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
84
{
128
+ void *vm, uint32_t shift) \
85
@@ -XXX,XX +XXX,XX @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
129
+ { \
86
uint64_t select = sextract64(value, 36, 1);
130
+ LTYPE *m = vm; \
87
ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
131
+ TYPE *d = vd; \
88
TLBIRange ret = { };
132
+ uint16_t mask = mve_element_mask(env); \
89
+ ARMGranuleSize gran;
133
+ bool qc = false; \
90
134
+ unsigned le; \
91
page_size_granule = extract64(value, 46, 2);
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
92
+ gran = tlbi_range_tg_to_gran_size(page_size_granule);
136
+ bool sat = false; \
93
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
94
/* The granule encoded in value must match the granule in use. */
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
95
- if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
96
+ if (gran != param.gran) {
140
+ } \
97
qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
141
+ if (qc) { \
98
page_size_granule);
142
+ env->vfp.qc[0] = qc; \
99
return ret;
143
+ } \
100
}
144
+ mve_advance_vpt(env); \
101
145
+ }
102
- page_shift = (page_size_granule - 1) * 2 + 12;
146
+
103
+ page_shift = arm_granule_bits(gran);
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
104
num = extract64(value, 39, 5);
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
105
scale = extract64(value, 44, 2);
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
106
exponent = (5 * scale) + 1;
150
+
107
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
108
ARMMMUIdx mmu_idx, bool data)
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
109
{
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
110
uint64_t tcr = regime_tcr(env, mmu_idx);
154
+
111
- bool epd, hpd, using16k, using64k, tsz_oob, ds;
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
112
+ bool epd, hpd, tsz_oob, ds;
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
113
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
114
ARMGranuleSize gran;
158
+
115
ARMCPU *cpu = env_archcpu(env);
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
116
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
117
}
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
118
162
+
119
gran = sanitize_gran_size(cpu, gran, stage2);
163
+#define DO_SHRN_SB(N, M, SATP) \
120
- using64k = gran == Gran64K;
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
121
- using16k = gran == Gran16K;
165
+#define DO_SHRN_UB(N, M, SATP) \
122
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
123
if (cpu_isar_feature(aa64_st, cpu)) {
167
+#define DO_SHRUN_B(N, M, SATP) \
124
- max_tsz = 48 - using64k;
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
125
+ max_tsz = 48 - (gran == Gran64K);
169
+
126
} else {
170
+#define DO_SHRN_SH(N, M, SATP) \
127
max_tsz = 39;
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
128
}
172
+#define DO_SHRN_UH(N, M, SATP) \
129
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
130
* adjust the effective value of DS, as documented.
174
+#define DO_SHRUN_H(N, M, SATP) \
131
*/
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
132
min_tsz = 16;
176
+
133
- if (using64k) {
177
+#define DO_RSHRN_SB(N, M, SATP) \
134
+ if (gran == Gran64K) {
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
135
if (cpu_isar_feature(aa64_lva, cpu)) {
179
+#define DO_RSHRN_UB(N, M, SATP) \
136
min_tsz = 12;
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
137
}
181
+#define DO_RSHRUN_B(N, M, SATP) \
138
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
139
switch (mmu_idx) {
183
+
140
case ARMMMUIdx_Stage2:
184
+#define DO_RSHRN_SH(N, M, SATP) \
141
case ARMMMUIdx_Stage2_S:
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
142
- if (using16k) {
186
+#define DO_RSHRN_UH(N, M, SATP) \
143
+ if (gran == Gran16K) {
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
144
ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
188
+#define DO_RSHRUN_H(N, M, SATP) \
145
} else {
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
146
ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
190
+
147
}
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
148
break;
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
149
default:
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
150
- if (using16k) {
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
151
+ if (gran == Gran16K) {
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
152
ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
153
} else {
197
+
154
ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
155
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
156
.tbi = tbi,
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
157
.epd = epd,
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
158
.hpd = hpd,
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
159
- .using16k = using16k,
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
160
- .using64k = using64k,
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
161
.tsz_oob = tsz_oob,
162
.ds = ds,
163
+ .gran = gran,
164
};
165
}
166
167
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
205
index XXXXXXX..XXXXXXX 100644
168
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate-mve.c
169
--- a/target/arm/ptw.c
207
+++ b/target/arm/translate-mve.c
170
+++ b/target/arm/ptw.c
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
171
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
209
DO_2SHIFT_N(VSHRNT, vshrnt)
172
}
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
173
}
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
174
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
175
- if (param.using64k) {
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
176
- stride = 13;
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
177
- } else if (param.using16k) {
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
178
- stride = 11;
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
179
- } else {
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
180
- stride = 9;
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
181
- }
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
182
+ stride = arm_granule_bits(param.gran) - 3;
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
183
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
184
/*
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
185
* Note that QEMU ignores shareability and cacheability attributes,
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
224
--
186
--
225
2.20.1
187
2.25.1
226
227
diff view generated by jsdifflib
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
1
FEAT_GTG is a change tho the ID register ID_AA64MMFR0_EL1 so that it
2
VORR and VBIC). These have essentially the same encoding
2
can report a different set of supported granule (page) sizes for
3
as their Neon equivalents, and we implement the decode
3
stage 1 and stage 2 translation tables. As of commit c20281b2a5048
4
in the same way.
4
we already report the granule sizes that way for '-cpu max', and now
5
we also correctly make attempts to use unimplemented granule sizes
6
fail, so we can report the support of the feature in the
7
documentation.
5
8
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20221003162315.2833797-4-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
9
---
12
---
10
target/arm/helper-mve.h | 4 +++
13
docs/system/arm/emulation.rst | 1 +
11
target/arm/mve.decode | 17 +++++++++++++
14
1 file changed, 1 insertion(+)
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
14
4 files changed, 95 insertions(+)
15
15
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
18
--- a/docs/system/arm/emulation.rst
19
+++ b/target/arm/helper-mve.h
19
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
- FEAT_FRINTTS (Floating-point to integer instructions)
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
- FEAT_FlagM (Flag manipulation instructions v2)
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
24
+
24
+- FEAT_GTG (Guest translation granule size)
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
25
- FEAT_HCX (Support for the HCRX_EL2 register)
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
26
- FEAT_HPDS (Hierarchical permission disables)
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
27
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
34
%size_28 28:1 !function=plus_1
35
36
+# 1imm format immediate
37
+%imm_28_16_0 28:1 16:3 0:4
38
+
39
&vldr_vstr rn qd imm p a w size l u
40
&1op qd qm size
41
&2op qd qm qn size
42
&2scalar qd qn rm size
43
+&1imm qd imm cmode op
44
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
46
# Note that both Rn and Qd are 3 bits only (no D bit)
47
@@ -XXX,XX +XXX,XX @@
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
50
size=%size_28
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
52
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
54
# the case for shifts. In the Arm ARM these insns are documented
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
56
# Predicate operations
57
%mask_22_13 22:1 13:3
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
59
+
60
+# Logical immediate operations (1 reg and modified-immediate)
61
+
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
78
79
+/*
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
85
+ { \
86
+ uint64_t *da = vda; \
87
+ uint16_t mask = mve_element_mask(env); \
88
+ unsigned e; \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ }
94
+
95
+#define DO_MOVI(N, I) (I)
96
+#define DO_ANDI(N, I) ((N) & (I))
97
+#define DO_ORRI(N, I) ((N) | (I))
98
+
99
+DO_1OP_IMM(vmovi, DO_MOVI)
100
+DO_1OP_IMM(vandi, DO_ANDI)
101
+DO_1OP_IMM(vorri, DO_ORRI)
102
+
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
115
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
117
static inline long mve_qreg_offset(unsigned reg)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
119
mve_update_eci(s);
120
return true;
121
}
122
+
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
124
+{
125
+ TCGv_ptr qd;
126
+ uint64_t imm;
127
+
128
+ if (!dc_isar_feature(aa32_mve, s) ||
129
+ !mve_check_qreg_bank(s, a->qd) ||
130
+ !fn) {
131
+ return false;
132
+ }
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
134
+ return true;
135
+ }
136
+
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
138
+
139
+ qd = mve_qreg_ptr(a->qd);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
141
+ tcg_temp_free_ptr(qd);
142
+ mve_update_eci(s);
143
+ return true;
144
+}
145
+
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
147
+{
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
149
+ MVEGenOneOpImmFn *fn;
150
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
152
+ if (a->op) {
153
+ /*
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
155
+ * so the VBIC becomes a logical AND operation.
156
+ */
157
+ fn = gen_helper_mve_vandi;
158
+ } else {
159
+ fn = gen_helper_mve_vorri;
160
+ }
161
+ } else {
162
+ /* There is one unallocated cmode/op combination in this space */
163
+ if (a->cmode == 15 && a->op == 1) {
164
+ return false;
165
+ }
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
167
+ fn = gen_helper_mve_vmovi;
168
+ }
169
+ return do_1imm(s, a, fn);
170
+}
171
--
28
--
172
2.20.1
29
2.25.1
173
174
diff view generated by jsdifflib