1
Most of this is the Neon decodetree patches, followed by Edgar's versal cleanups.
1
Hi; here's a target-arm pullreq. Mostly this is RTH's FEAT_RME
2
series; there are also a handful of bug fixes including some
3
which aren't arm-specific but which it's convenient to include
4
here.
2
5
3
thanks
6
thanks
4
-- PMM
7
-- PMM
5
8
9
The following changes since commit b455ce4c2f300c8ba47cba7232dd03261368a4cb:
6
10
7
The following changes since commit 2ef486e76d64436be90f7359a3071fb2a56ce835:
11
Merge tag 'q800-for-8.1-pull-request' of https://github.com/vivier/qemu-m68k into staging (2023-06-22 10:18:32 +0200)
8
9
Merge remote-tracking branch 'remotes/marcel/tags/rdma-pull-request' into staging (2020-05-03 14:12:56 +0100)
10
12
11
are available in the Git repository at:
13
are available in the Git repository at:
12
14
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200504
15
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230623
14
16
15
for you to fetch changes up to 9aefc6cf9b73f66062d2f914a0136756e7a28211:
17
for you to fetch changes up to 497fad38979c16b6412388927401e577eba43d26:
16
18
17
target/arm: Move gen_ function typedefs to translate.h (2020-05-04 12:59:26 +0100)
19
pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym (2023-06-23 11:46:02 +0100)
18
20
19
----------------------------------------------------------------
21
----------------------------------------------------------------
20
target-arm queue:
22
target-arm queue:
21
* Start of conversion of Neon insns to decodetree
23
* Add (experimental) support for FEAT_RME
22
* versal board: support SD and RTC
24
* host-utils: Avoid using __builtin_subcll on buggy versions of Apple Clang
23
* Implement ARMv8.2-TTS2UXN
25
* target/arm: Restructure has_vfp_d32 test
24
* Make VQDMULL undefined when U=1
26
* hw/arm/sbsa-ref: add ITS support in SBSA GIC
25
* Some minor code cleanups
27
* target/arm: Fix sve predicate store, 8 <= VQ <= 15
28
* pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym
26
29
27
----------------------------------------------------------------
30
----------------------------------------------------------------
28
Edgar E. Iglesias (11):
31
Peter Maydell (2):
29
hw/arm: versal: Remove inclusion of arm_gicv3_common.h
32
host-utils: Avoid using __builtin_subcll on buggy versions of Apple Clang
30
hw/arm: versal: Move misplaced comment
33
pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym
31
hw/arm: versal-virt: Fix typo xlnx-ve -> xlnx-versal
32
hw/arm: versal: Embed the UARTs into the SoC type
33
hw/arm: versal: Embed the GEMs into the SoC type
34
hw/arm: versal: Embed the ADMAs into the SoC type
35
hw/arm: versal: Embed the APUs into the SoC type
36
hw/arm: versal: Add support for SD
37
hw/arm: versal: Add support for the RTC
38
hw/arm: versal-virt: Add support for SD
39
hw/arm: versal-virt: Add support for the RTC
40
34
41
Fredrik Strupe (1):
35
Richard Henderson (23):
42
target/arm: Make VQDMULL undefined when U=1
36
target/arm: Add isar_feature_aa64_rme
37
target/arm: Update SCR and HCR for RME
38
target/arm: SCR_EL3.NS may be RES1
39
target/arm: Add RME cpregs
40
target/arm: Introduce ARMSecuritySpace
41
include/exec/memattrs: Add two bits of space to MemTxAttrs
42
target/arm: Adjust the order of Phys and Stage2 ARMMMUIdx
43
target/arm: Introduce ARMMMUIdx_Phys_{Realm,Root}
44
target/arm: Remove __attribute__((nonnull)) from ptw.c
45
target/arm: Pipe ARMSecuritySpace through ptw.c
46
target/arm: NSTable is RES0 for the RME EL3 regime
47
target/arm: Handle Block and Page bits for security space
48
target/arm: Handle no-execute for Realm and Root regimes
49
target/arm: Use get_phys_addr_with_struct in S1_ptw_translate
50
target/arm: Move s1_is_el0 into S1Translate
51
target/arm: Use get_phys_addr_with_struct for stage2
52
target/arm: Add GPC syndrome
53
target/arm: Implement GPC exceptions
54
target/arm: Implement the granule protection check
55
target/arm: Add cpu properties for enabling FEAT_RME
56
docs/system/arm: Document FEAT_RME
57
target/arm: Restructure has_vfp_d32 test
58
target/arm: Fix sve predicate store, 8 <= VQ <= 15
43
59
44
Peter Maydell (25):
60
Shashi Mallela (1):
45
target/arm: Don't use a TLB for ARMMMUIdx_Stage2
61
hw/arm/sbsa-ref: add ITS support in SBSA GIC
46
target/arm: Use enum constant in get_phys_addr_lpae() call
47
target/arm: Add new 's1_is_el0' argument to get_phys_addr_lpae()
48
target/arm: Implement ARMv8.2-TTS2UXN
49
target/arm: Use correct variable for setting 'max' cpu's ID_AA64DFR0
50
target/arm/translate-vfp.inc.c: Remove duplicate simd_r32 check
51
target/arm: Don't allow Thumb Neon insns without FEATURE_NEON
52
target/arm: Add stubs for AArch32 Neon decodetree
53
target/arm: Convert VCMLA (vector) to decodetree
54
target/arm: Convert VCADD (vector) to decodetree
55
target/arm: Convert V[US]DOT (vector) to decodetree
56
target/arm: Convert VFM[AS]L (vector) to decodetree
57
target/arm: Convert VCMLA (scalar) to decodetree
58
target/arm: Convert V[US]DOT (scalar) to decodetree
59
target/arm: Convert VFM[AS]L (scalar) to decodetree
60
target/arm: Convert Neon load/store multiple structures to decodetree
61
target/arm: Convert Neon 'load single structure to all lanes' to decodetree
62
target/arm: Convert Neon 'load/store single structure' to decodetree
63
target/arm: Convert Neon 3-reg-same VADD/VSUB to decodetree
64
target/arm: Convert Neon 3-reg-same logic ops to decodetree
65
target/arm: Convert Neon 3-reg-same VMAX/VMIN to decodetree
66
target/arm: Convert Neon 3-reg-same comparisons to decodetree
67
target/arm: Convert Neon 3-reg-same VQADD/VQSUB to decodetree
68
target/arm: Convert Neon 3-reg-same VMUL, VMLA, VMLS, VSHL to decodetree
69
target/arm: Move gen_ function typedefs to translate.h
70
62
71
Philippe Mathieu-Daudé (2):
63
docs/system/arm/cpu-features.rst | 23 ++
72
hw/arm/mps2-tz: Use TYPE_IOTKIT instead of hardcoded string
64
docs/system/arm/emulation.rst | 1 +
73
target/arm: Use uint64_t for midr field in CPU state struct
65
docs/system/arm/sbsa.rst | 14 +
74
66
include/exec/memattrs.h | 9 +-
75
include/hw/arm/xlnx-versal.h | 31 +-
67
include/qemu/compiler.h | 13 +
76
target/arm/cpu-param.h | 2 +-
68
include/qemu/host-utils.h | 2 +-
77
target/arm/cpu.h | 38 ++-
69
target/arm/cpu.h | 151 ++++++++---
78
target/arm/translate-a64.h | 9 -
70
target/arm/internals.h | 27 ++
79
target/arm/translate.h | 26 ++
71
target/arm/syndrome.h | 10 +
80
target/arm/neon-dp.decode | 86 +++++
72
hw/arm/sbsa-ref.c | 33 ++-
81
target/arm/neon-ls.decode | 52 +++
73
target/arm/cpu.c | 32 ++-
82
target/arm/neon-shared.decode | 66 ++++
74
target/arm/helper.c | 162 ++++++++++-
83
hw/arm/mps2-tz.c | 2 +-
75
target/arm/ptw.c | 570 +++++++++++++++++++++++++++++++--------
84
hw/arm/xlnx-versal-virt.c | 74 ++++-
76
target/arm/tcg/cpu64.c | 53 ++++
85
hw/arm/xlnx-versal.c | 115 +++++--
77
target/arm/tcg/tlb_helper.c | 96 ++++++-
86
target/arm/cpu.c | 3 +-
78
target/arm/tcg/translate-sve.c | 2 +-
87
target/arm/cpu64.c | 8 +-
79
pc-bios/keymaps/meson.build | 2 +-
88
target/arm/helper.c | 183 ++++------
80
17 files changed, 1034 insertions(+), 166 deletions(-)
89
target/arm/translate-a64.c | 17 -
90
target/arm/translate-neon.inc.c | 714 +++++++++++++++++++++++++++++++++++++++
91
target/arm/translate-vfp.inc.c | 6 -
92
target/arm/translate.c | 716 +++-------------------------------------
93
target/arm/Makefile.objs | 18 +
94
19 files changed, 1302 insertions(+), 864 deletions(-)
95
create mode 100644 target/arm/neon-dp.decode
96
create mode 100644 target/arm/neon-ls.decode
97
create mode 100644 target/arm/neon-shared.decode
98
create mode 100644 target/arm/translate-neon.inc.c
99
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
MIDR_EL1 is a 64-bit system register with the top 32-bit being RES0.
3
Add the missing field for ID_AA64PFR0, and the predicate.
4
Represent it in QEMU's ARMCPU struct with a uint64_t, not a
4
Disable it if EL3 is forced off by the board or command-line.
5
uint32_t.
6
5
7
This fixes an error when compiling with -Werror=conversion
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
because we were manipulating the register value using a
9
local uint64_t variable:
10
11
target/arm/cpu64.c: In function ‘aarch64_max_initfn’:
12
target/arm/cpu64.c:628:21: error: conversion from ‘uint64_t’ {aka ‘long unsigned int’} to ‘uint32_t’ {aka ‘unsigned int’} may change value [-Werror=conversion]
13
628 | cpu->midr = t;
14
| ^
15
16
and future-proofs us against a possible future architecture
17
change using some of the top 32 bits.
18
19
Suggested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
20
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
22
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
23
Message-id: 20200428172634.29707-1-f4bug@amsat.org
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20230620124418.805717-2-richard.henderson@linaro.org
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
---
11
---
27
target/arm/cpu.h | 2 +-
12
target/arm/cpu.h | 6 ++++++
28
target/arm/cpu.c | 2 +-
13
target/arm/cpu.c | 4 ++++
29
2 files changed, 2 insertions(+), 2 deletions(-)
14
2 files changed, 10 insertions(+)
30
15
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
18
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
20
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64PFR0, SEL2, 36, 4)
36
uint64_t id_aa64dfr0;
21
FIELD(ID_AA64PFR0, MPAM, 40, 4)
37
uint64_t id_aa64dfr1;
22
FIELD(ID_AA64PFR0, AMU, 44, 4)
38
} isar;
23
FIELD(ID_AA64PFR0, DIT, 48, 4)
39
- uint32_t midr;
24
+FIELD(ID_AA64PFR0, RME, 52, 4)
40
+ uint64_t midr;
25
FIELD(ID_AA64PFR0, CSV2, 56, 4)
41
uint32_t revidr;
26
FIELD(ID_AA64PFR0, CSV3, 60, 4)
42
uint32_t reset_fpsid;
27
43
uint32_t ctr;
28
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
29
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
30
}
31
32
+static inline bool isar_feature_aa64_rme(const ARMISARegisters *id)
33
+{
34
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0;
35
+}
36
+
37
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
38
{
39
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
40
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
45
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu.c
42
--- a/target/arm/cpu.c
47
+++ b/target/arm/cpu.c
43
+++ b/target/arm/cpu.c
48
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_cpus[] = {
44
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
49
static Property arm_cpu_properties[] = {
45
cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
50
DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
46
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
51
DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
47
ID_AA64PFR0, EL3, 0);
52
- DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
48
+
53
+ DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
49
+ /* Disable the realm management extension, which requires EL3. */
54
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
50
+ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
55
mp_affinity, ARM64_AFFINITY_INVALID),
51
+ ID_AA64PFR0, RME, 0);
56
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
52
}
53
54
if (!cpu->has_el2) {
57
--
55
--
58
2.20.1
56
2.34.1
59
57
60
58
diff view generated by jsdifflib
1
For ARMv8.2-TTS2UXN, the stage 2 page table walk wants to know
1
From: Richard Henderson <richard.henderson@linaro.org>
2
whether the stage 1 access is for EL0 or not, because whether
3
exec permission is given can depend on whether this is an EL0
4
or EL1 access. Add a new argument to get_phys_addr_lpae() so
5
the call sites can pass this information in.
6
2
7
Since get_phys_addr_lpae() doesn't already have a doc comment,
3
Define the missing SCR and HCR bits, allow SCR_NSE and {SCR,HCR}_GPF
8
add one so we have a place to put the documentation of the
4
to be set, and invalidate TLBs when NSE changes.
9
semantics of the new s1_is_el0 argument.
10
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230620124418.805717-3-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20200330210400.11724-4-peter.maydell@linaro.org
15
---
10
---
16
target/arm/helper.c | 29 ++++++++++++++++++++++++++++-
11
target/arm/cpu.h | 5 +++--
17
1 file changed, 28 insertions(+), 1 deletion(-)
12
target/arm/helper.c | 10 ++++++++--
13
2 files changed, 11 insertions(+), 4 deletions(-)
18
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
20
#define HCR_TERR (1ULL << 36)
21
#define HCR_TEA (1ULL << 37)
22
#define HCR_MIOCNCE (1ULL << 38)
23
-/* RES0 bit 39 */
24
+#define HCR_TME (1ULL << 39)
25
#define HCR_APK (1ULL << 40)
26
#define HCR_API (1ULL << 41)
27
#define HCR_NV (1ULL << 42)
28
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
29
#define HCR_NV2 (1ULL << 45)
30
#define HCR_FWB (1ULL << 46)
31
#define HCR_FIEN (1ULL << 47)
32
-/* RES0 bit 48 */
33
+#define HCR_GPF (1ULL << 48)
34
#define HCR_TID4 (1ULL << 49)
35
#define HCR_TICAB (1ULL << 50)
36
#define HCR_AMVOFFEN (1ULL << 51)
37
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
38
#define SCR_TRNDR (1ULL << 40)
39
#define SCR_ENTP2 (1ULL << 41)
40
#define SCR_GPF (1ULL << 48)
41
+#define SCR_NSE (1ULL << 62)
42
43
#define HSTR_TTEE (1 << 16)
44
#define HSTR_TJDBX (1 << 17)
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
47
--- a/target/arm/helper.c
22
+++ b/target/arm/helper.c
48
+++ b/target/arm/helper.c
23
@@ -XXX,XX +XXX,XX @@
49
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
24
50
if (cpu_isar_feature(aa64_fgt, cpu)) {
25
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
51
valid_mask |= SCR_FGTEN;
26
MMUAccessType access_type, ARMMMUIdx mmu_idx,
27
+ bool s1_is_el0,
28
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
29
target_ulong *page_size_ptr,
30
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
31
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
32
}
52
}
33
53
+ if (cpu_isar_feature(aa64_rme, cpu)) {
34
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
54
+ valid_mask |= SCR_NSE | SCR_GPF;
35
+ false,
55
+ }
36
&s2pa, &txattrs, &s2prot, &s2size, fi,
56
} else {
37
pcacheattrs);
57
valid_mask &= ~(SCR_RW | SCR_ST);
38
if (ret) {
58
if (cpu_isar_feature(aa32_ras, cpu)) {
39
@@ -XXX,XX +XXX,XX @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
59
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
40
};
60
env->cp15.scr_el3 = value;
41
}
61
42
62
/*
43
+/**
63
- * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
44
+ * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
64
+ * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
45
+ *
65
* we must invalidate all TLBs below EL3.
46
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
66
*/
47
+ * prot and page_size may not be filled in, and the populated fsr value provides
67
- if (changed & SCR_NS) {
48
+ * information on why the translation aborted, in the format of a long-format
68
+ if (changed & (SCR_NS | SCR_NSE)) {
49
+ * DFSR/IFSR fault register, with the following caveats:
69
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
50
+ * * the WnR bit is never set (the caller must do this).
70
ARMMMUIdxBit_E20_0 |
51
+ *
71
ARMMMUIdxBit_E10_1 |
52
+ * @env: CPUARMState
72
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
53
+ * @address: virtual address to get physical address for
73
if (cpu_isar_feature(aa64_fwb, cpu)) {
54
+ * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
74
valid_mask |= HCR_FWB;
55
+ * @mmu_idx: MMU index indicating required translation regime
75
}
56
+ * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
76
+ if (cpu_isar_feature(aa64_rme, cpu)) {
57
+ * walk), must be true if this is stage 2 of a stage 1+2 walk for an
77
+ valid_mask |= HCR_GPF;
58
+ * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
78
+ }
59
+ * @phys_ptr: set to the physical address corresponding to the virtual address
60
+ * @attrs: set to the memory transaction attributes to use
61
+ * @prot: set to the permissions for the page containing phys_ptr
62
+ * @page_size_ptr: set to the size of the page containing phys_ptr
63
+ * @fi: set to fault info if the translation fails
64
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
65
+ */
66
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
67
MMUAccessType access_type, ARMMMUIdx mmu_idx,
68
+ bool s1_is_el0,
69
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
70
target_ulong *page_size_ptr,
71
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
72
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
73
74
/* S1 is done. Now do S2 translation. */
75
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
76
+ mmu_idx == ARMMMUIdx_E10_0,
77
phys_ptr, attrs, &s2_prot,
78
page_size, fi,
79
cacheattrs != NULL ? &cacheattrs2 : NULL);
80
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
81
}
79
}
82
80
83
if (regime_using_lpae_format(env, mmu_idx)) {
81
if (cpu_isar_feature(any_evt, cpu)) {
84
- return get_phys_addr_lpae(env, address, access_type, mmu_idx,
85
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
86
phys_ptr, attrs, prot, page_size,
87
fi, cacheattrs);
88
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
89
--
82
--
90
2.20.1
83
2.34.1
91
92
diff view generated by jsdifflib
1
The access_type argument to get_phys_addr_lpae() is an MMUAccessType;
1
From: Richard Henderson <richard.henderson@linaro.org>
2
use the enum constant MMU_DATA_LOAD rather than a literal 0 when we
3
call it in S1_ptw_translate().
4
2
3
With RME, SEL2 must also be present to support secure state.
4
The NS bit is RES1 if SEL2 is not present.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230620124418.805717-4-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200330210400.11724-3-peter.maydell@linaro.org
9
---
10
---
10
target/arm/helper.c | 5 +++--
11
target/arm/helper.c | 3 +++
11
1 file changed, 3 insertions(+), 2 deletions(-)
12
1 file changed, 3 insertions(+)
12
13
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
16
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
18
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
18
pcacheattrs = &cacheattrs;
19
}
19
}
20
20
if (cpu_isar_feature(aa64_sel2, cpu)) {
21
- ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa,
21
valid_mask |= SCR_EEL2;
22
- &txattrs, &s2prot, &s2size, fi, pcacheattrs);
22
+ } else if (cpu_isar_feature(aa64_rme, cpu)) {
23
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
23
+ /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
24
+ &s2pa, &txattrs, &s2prot, &s2size, fi,
24
+ value |= SCR_NS;
25
+ pcacheattrs);
25
}
26
if (ret) {
26
if (cpu_isar_feature(aa64_mte, cpu)) {
27
assert(fi->type != ARMFault_None);
27
valid_mask |= SCR_ATA;
28
fi->s2addr = addr;
29
--
28
--
30
2.20.1
29
2.34.1
31
32
diff view generated by jsdifflib
1
Convert the Neon VMUL, VMLA, VMLS and VSHL insns in the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3-reg-same grouping to decodetree.
3
2
3
This includes GPCCR, GPTBR, MFAR, the TLB flush insns PAALL, PAALLOS,
4
RPALOS, RPAOS, and the cache flush insns CIPAPA and CIGDPAPA.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230620124418.805717-5-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-20-peter.maydell@linaro.org
7
---
10
---
8
target/arm/neon-dp.decode | 9 +++++++
11
target/arm/cpu.h | 19 ++++++++++
9
target/arm/translate-neon.inc.c | 44 +++++++++++++++++++++++++++++++++
12
target/arm/helper.c | 84 +++++++++++++++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 28 +++------------------
13
2 files changed, 103 insertions(+)
11
3 files changed, 56 insertions(+), 25 deletions(-)
12
14
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
17
--- a/target/arm/cpu.h
16
+++ b/target/arm/neon-dp.decode
18
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@ VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
19
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
18
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
20
uint64_t fgt_read[2]; /* HFGRTR, HDFGRTR */
19
VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
21
uint64_t fgt_write[2]; /* HFGWTR, HDFGWTR */
20
22
uint64_t fgt_exec[1]; /* HFGITR */
21
+VSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 0 .... @3same
22
+VSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 0 .... @3same
23
+
23
+
24
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
24
+ /* RME registers */
25
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
25
+ uint64_t gpccr_el3;
26
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
26
+ uint64_t gptbr_el3;
27
@@ -XXX,XX +XXX,XX @@ VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
27
+ uint64_t mfar_el3;
28
28
} cp15;
29
VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
29
30
VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
30
struct {
31
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
32
uint64_t reset_cbar;
33
uint32_t reset_auxcr;
34
bool reset_hivecs;
35
+ uint8_t reset_l0gptsz;
36
37
/*
38
* Intermediate values used during property parsing.
39
@@ -XXX,XX +XXX,XX @@ FIELD(MVFR1, SIMDFMAC, 28, 4)
40
FIELD(MVFR2, SIMDMISC, 0, 4)
41
FIELD(MVFR2, FPMISC, 4, 4)
42
43
+FIELD(GPCCR, PPS, 0, 3)
44
+FIELD(GPCCR, IRGN, 8, 2)
45
+FIELD(GPCCR, ORGN, 10, 2)
46
+FIELD(GPCCR, SH, 12, 2)
47
+FIELD(GPCCR, PGS, 14, 2)
48
+FIELD(GPCCR, GPC, 16, 1)
49
+FIELD(GPCCR, GPCP, 17, 1)
50
+FIELD(GPCCR, L0GPTSZ, 20, 4)
31
+
51
+
32
+VMLA_3s 1111 001 0 0 . .. .... .... 1001 . . . 0 .... @3same
52
+FIELD(MFAR, FPA, 12, 40)
33
+VMLS_3s 1111 001 1 0 . .. .... .... 1001 . . . 0 .... @3same
53
+FIELD(MFAR, NSE, 62, 1)
54
+FIELD(MFAR, NS, 63, 1)
34
+
55
+
35
+VMUL_3s 1111 001 0 0 . .. .... .... 1001 . . . 1 .... @3same
56
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
36
+VMUL_p_3s 1111 001 1 0 . .. .... .... 1001 . . . 1 .... @3same
57
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
58
/* If adding a feature bit which corresponds to a Linux ELF
59
diff --git a/target/arm/helper.c b/target/arm/helper.c
38
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-neon.inc.c
61
--- a/target/arm/helper.c
40
+++ b/target/arm/translate-neon.inc.c
62
+++ b/target/arm/helper.c
41
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
63
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo sme_reginfo[] = {
42
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
64
.access = PL2_RW, .accessfn = access_esm,
43
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
65
.type = ARM_CP_CONST, .resetvalue = 0 },
44
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
66
};
45
+DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
46
47
#define DO_3SAME_CMP(INSN, COND) \
48
static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
49
@@ -XXX,XX +XXX,XX @@ DO_3SAME_GVEC4(VQADD_S, sqadd_op)
50
DO_3SAME_GVEC4(VQADD_U, uqadd_op)
51
DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
52
DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
53
+
67
+
54
+static void gen_VMUL_p_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
68
+static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
55
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
69
+ uint64_t value)
56
+{
70
+{
57
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz,
71
+ CPUState *cs = env_cpu(env);
58
+ 0, gen_helper_gvec_pmul_b);
72
+
73
+ tlb_flush(cs);
59
+}
74
+}
60
+
75
+
61
+static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
76
+static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
77
+ uint64_t value)
62
+{
78
+{
63
+ if (a->size != 0) {
79
+ /* L0GPTSZ is RO; other bits not mentioned are RES0. */
64
+ return false;
80
+ uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
65
+ }
81
+ R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
66
+ return do_3same(s, a, gen_VMUL_p_3s);
82
+ R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
83
+
84
+ env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
67
+}
85
+}
68
+
86
+
69
+#define DO_3SAME_GVEC3_NO_SZ_3(INSN, OPARRAY) \
87
+static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
70
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
88
+{
71
+ uint32_t rn_ofs, uint32_t rm_ofs, \
89
+ env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
72
+ uint32_t oprsz, uint32_t maxsz) \
90
+ env_archcpu(env)->reset_l0gptsz);
73
+ { \
91
+}
74
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, \
75
+ oprsz, maxsz, &OPARRAY[vece]); \
76
+ } \
77
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
78
+
92
+
93
+static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
94
+ uint64_t value)
95
+{
96
+ CPUState *cs = env_cpu(env);
79
+
97
+
80
+DO_3SAME_GVEC3_NO_SZ_3(VMLA, mla_op)
98
+ tlb_flush_all_cpus_synced(cs);
81
+DO_3SAME_GVEC3_NO_SZ_3(VMLS, mls_op)
99
+}
82
+
100
+
83
+#define DO_3SAME_GVEC3_SHIFT(INSN, OPARRAY) \
101
+static const ARMCPRegInfo rme_reginfo[] = {
84
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
102
+ { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
85
+ uint32_t rn_ofs, uint32_t rm_ofs, \
103
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
86
+ uint32_t oprsz, uint32_t maxsz) \
104
+ .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
87
+ { \
105
+ .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
88
+ /* Note the operation is vshl vd,vm,vn */ \
106
+ { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
89
+ tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, \
107
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
90
+ oprsz, maxsz, &OPARRAY[vece]); \
108
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
91
+ } \
109
+ { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
92
+ DO_3SAME(INSN, gen_##INSN##_3s)
110
+ .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
111
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
112
+ { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
113
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
114
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
115
+ .writefn = tlbi_aa64_paall_write },
116
+ { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
117
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
118
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
119
+ .writefn = tlbi_aa64_paallos_write },
120
+ /*
121
+ * QEMU does not have a way to invalidate by physical address, thus
122
+ * invalidating a range of physical addresses is accomplished by
123
+ * flushing all tlb entries in the outer sharable domain,
124
+ * just like PAALLOS.
125
+ */
126
+ { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
127
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
128
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
129
+ .writefn = tlbi_aa64_paallos_write },
130
+ { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
131
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
132
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
133
+ .writefn = tlbi_aa64_paallos_write },
134
+ { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
135
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
136
+ .access = PL3_W, .type = ARM_CP_NOP },
137
+};
93
+
138
+
94
+DO_3SAME_GVEC3_SHIFT(VSHL_S, sshl_op)
139
+static const ARMCPRegInfo rme_mte_reginfo[] = {
95
+DO_3SAME_GVEC3_SHIFT(VSHL_U, ushl_op)
140
+ { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
96
diff --git a/target/arm/translate.c b/target/arm/translate.c
141
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
97
index XXXXXXX..XXXXXXX 100644
142
+ .access = PL3_W, .type = ARM_CP_NOP },
98
--- a/target/arm/translate.c
143
+};
99
+++ b/target/arm/translate.c
144
#endif /* TARGET_AARCH64 */
100
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
145
101
}
146
static void define_pmu_regs(ARMCPU *cpu)
102
return 1;
147
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
103
148
if (cpu_isar_feature(aa64_fgt, cpu)) {
104
- case NEON_3R_VMUL: /* VMUL */
149
define_arm_cp_regs(cpu, fgt_reginfo);
105
- if (u) {
150
}
106
- /* Polynomial case allows only P8. */
151
+
107
- if (size != 0) {
152
+ if (cpu_isar_feature(aa64_rme, cpu)) {
108
- return 1;
153
+ define_arm_cp_regs(cpu, rme_reginfo);
109
- }
154
+ if (cpu_isar_feature(aa64_mte, cpu)) {
110
- tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
155
+ define_arm_cp_regs(cpu, rme_mte_reginfo);
111
- 0, gen_helper_gvec_pmul_b);
156
+ }
112
- } else {
157
+ }
113
- tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
158
#endif
114
- vec_size, vec_size);
159
115
- }
160
if (cpu_isar_feature(any_predinv, cpu)) {
116
- return 0;
117
-
118
- case NEON_3R_VML: /* VMLA, VMLS */
119
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
120
- u ? &mls_op[size] : &mla_op[size]);
121
- return 0;
122
-
123
- case NEON_3R_VSHL:
124
- /* Note the operation is vshl vd,vm,vn */
125
- tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
126
- u ? &ushl_op[size] : &sshl_op[size]);
127
- return 0;
128
-
129
case NEON_3R_VADD_VSUB:
130
case NEON_3R_LOGIC:
131
case NEON_3R_VMAX:
132
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
133
case NEON_3R_VCGE:
134
case NEON_3R_VQADD:
135
case NEON_3R_VQSUB:
136
+ case NEON_3R_VMUL:
137
+ case NEON_3R_VML:
138
+ case NEON_3R_VSHL:
139
/* Already handled by decodetree */
140
return 1;
141
}
142
--
161
--
143
2.20.1
162
2.34.1
144
145
diff view generated by jsdifflib
1
We define ARMMMUIdx_Stage2 as being an MMU index which uses a QEMU
1
From: Richard Henderson <richard.henderson@linaro.org>
2
TLB. However we never actually use the TLB -- all stage 2 lookups
3
are done by direct calls to get_phys_addr_lpae() followed by a
4
physical address load via address_space_ld*().
5
2
6
Remove Stage2 from the list of ARM MMU indexes which correspond to
3
Introduce both the enumeration and functions to retrieve
7
real core MMU indexes, and instead put it in the set of "NOTLB" ARM
4
the current state, and state outside of EL3.
8
MMU indexes.
9
5
10
This allows us to drop NB_MMU_MODES to 11. It also means we can
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
safely add support for the ARMv8.3-TTS2UXN extension, which adds
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
permission bits to the stage 2 descriptors which define execute
8
Message-id: 20230620124418.805717-6-richard.henderson@linaro.org
13
permission separatel for EL0 and EL1; supporting that while keeping
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Stage2 in a QEMU TLB would require us to use separate TLBs for
10
---
15
"Stage2 for an EL0 access" and "Stage2 for an EL1 access", which is a
11
target/arm/cpu.h | 89 ++++++++++++++++++++++++++++++++++-----------
16
lot of extra complication given we aren't even using the QEMU TLB.
12
target/arm/helper.c | 60 ++++++++++++++++++++++++++++++
13
2 files changed, 127 insertions(+), 22 deletions(-)
17
14
18
In the process of updating the comment on our MMU index use,
19
fix a couple of other minor errors:
20
* NS EL2 EL2&0 was missing from the list in the comment
21
* some text hadn't been updated from when we bumped NB_MMU_MODES
22
above 8
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
26
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
27
Message-id: 20200330210400.11724-2-peter.maydell@linaro.org
28
---
29
target/arm/cpu-param.h | 2 +-
30
target/arm/cpu.h | 21 +++++---
31
target/arm/helper.c | 112 ++++-------------------------------------
32
3 files changed, 27 insertions(+), 108 deletions(-)
33
34
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/cpu-param.h
37
+++ b/target/arm/cpu-param.h
38
@@ -XXX,XX +XXX,XX @@
39
# define TARGET_PAGE_BITS_MIN 10
40
#endif
41
42
-#define NB_MMU_MODES 12
43
+#define NB_MMU_MODES 11
44
45
#endif
46
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
47
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/cpu.h
17
--- a/target/arm/cpu.h
49
+++ b/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
50
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
19
@@ -XXX,XX +XXX,XX @@ static inline int arm_feature(CPUARMState *env, int feature)
51
* handling via the TLB. The only way to do a stage 1 translation without
20
52
* the immediate stage 2 translation is via the ATS or AT system insns,
21
void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
53
* which can be slow-pathed and always do a page table walk.
22
54
+ * The only use of stage 2 translations is either as part of an s1+2
23
-#if !defined(CONFIG_USER_ONLY)
55
+ * lookup or when loading the descriptors during a stage 1 page table walk,
24
/*
56
+ * and in both those cases we don't use the TLB.
25
+ * ARM v9 security states.
57
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
26
+ * The ordering of the enumeration corresponds to the low 2 bits
58
* translation regimes, because they map reasonably well to each other
27
+ * of the GPI value, and (except for Root) the concat of NSE:NS.
59
* and they can't both be active at the same time.
28
+ */
60
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
29
+
61
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
30
+typedef enum ARMSecuritySpace {
62
* NS EL1 EL1&0 stage 1+2 +PAN
31
+ ARMSS_Secure = 0,
63
* NS EL0 EL2&0
32
+ ARMSS_NonSecure = 1,
64
+ * NS EL2 EL2&0
33
+ ARMSS_Root = 2,
65
* NS EL2 EL2&0 +PAN
34
+ ARMSS_Realm = 3,
66
* NS EL2 (aka NS PL2)
35
+} ARMSecuritySpace;
67
* S EL0 EL1&0 (aka S PL0)
36
+
68
* S EL1 EL1&0 (not used if EL3 is 32 bit)
37
+/* Return true if @space is secure, in the pre-v9 sense. */
69
* S EL1 EL1&0 +PAN
38
+static inline bool arm_space_is_secure(ARMSecuritySpace space)
70
* S EL3 (aka S PL1)
39
+{
71
- * NS EL1&0 stage 2
40
+ return space == ARMSS_Secure || space == ARMSS_Root;
72
*
41
+}
73
- * for a total of 12 different mmu_idx.
42
+
74
+ * for a total of 11 different mmu_idx.
43
+/* Return the ARMSecuritySpace for @secure, assuming !RME or EL[0-2]. */
75
*
44
+static inline ARMSecuritySpace arm_secure_to_space(bool secure)
76
* R profile CPUs have an MPU, but can use the same set of MMU indexes
45
+{
77
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
46
+ return secure ? ARMSS_Secure : ARMSS_NonSecure;
78
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
47
+}
79
* are not quite the same -- different CPU types (most notably M profile
48
+
80
* vs A/R profile) would like to use MMU indexes with different semantics,
49
+#if !defined(CONFIG_USER_ONLY)
81
* but since we don't ever need to use all of those in a single CPU we
50
+/**
82
- * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
51
+ * arm_security_space_below_el3:
83
+ * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
52
+ * @env: cpu context
84
+ * modes + total number of M profile MMU modes". The lower bits of
53
+ *
85
* ARMMMUIdx are the core TLB mmu index, and the higher bits are always
54
+ * Return the security space of exception levels below EL3, following
86
* the same for any particular CPU.
55
+ * an exception return to those levels. Unlike arm_security_space,
87
* Variables of type ARMMUIdx are always full values, and the core
56
+ * this doesn't care about the current EL.
88
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
57
+ */
89
ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A,
58
+ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env);
90
ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A,
59
+
91
60
+/**
92
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
61
+ * arm_is_secure_below_el3:
93
-
62
+ * @env: cpu context
94
/*
63
+ *
95
* These are not allocated TLBs and are used only for AT system
64
* Return true if exception levels below EL3 are in secure state,
96
* instructions or for the first stage of an S12 page table walk.
65
- * or would be following an exception return to that level.
97
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
66
- * Unlike arm_is_secure() (which is always a question about the
98
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
67
- * _current_ state of the CPU) this doesn't care about the current
99
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
68
- * EL or mode.
100
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
69
+ * or would be following an exception return to those levels.
101
+ /*
70
*/
102
+ * Not allocated a TLB: used only for second stage of an S12 page
71
static inline bool arm_is_secure_below_el3(CPUARMState *env)
103
+ * table walk, or for descriptor loads during first stage of an S1
72
{
104
+ * page table walk. Note that if we ever want to have a TLB for this
73
- assert(!arm_feature(env, ARM_FEATURE_M));
105
+ * then various TLB flush insns which currently are no-ops or flush
74
- if (arm_feature(env, ARM_FEATURE_EL3)) {
106
+ * only stage 1 MMU indexes will need to change to flush stage 2.
75
- return !(env->cp15.scr_el3 & SCR_NS);
107
+ */
76
- } else {
108
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
77
- /* If EL3 is not supported then the secure state is implementation
109
78
- * defined, in which case QEMU defaults to non-secure.
110
/*
79
- */
111
* M-profile.
80
- return false;
112
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdxBit {
81
- }
113
TO_CORE_BIT(SE10_1),
82
+ ARMSecuritySpace ss = arm_security_space_below_el3(env);
114
TO_CORE_BIT(SE10_1_PAN),
83
+ return ss == ARMSS_Secure;
115
TO_CORE_BIT(SE3),
84
}
116
- TO_CORE_BIT(Stage2),
85
117
86
/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
118
TO_CORE_BIT(MUser),
87
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_el3_or_mon(CPUARMState *env)
119
TO_CORE_BIT(MPriv),
88
return false;
89
}
90
91
-/* Return true if the processor is in secure state */
92
+/**
93
+ * arm_security_space:
94
+ * @env: cpu context
95
+ *
96
+ * Return the current security space of the cpu.
97
+ */
98
+ARMSecuritySpace arm_security_space(CPUARMState *env);
99
+
100
+/**
101
+ * arm_is_secure:
102
+ * @env: cpu context
103
+ *
104
+ * Return true if the processor is in secure state.
105
+ */
106
static inline bool arm_is_secure(CPUARMState *env)
107
{
108
- if (arm_feature(env, ARM_FEATURE_M)) {
109
- return env->v7m.secure;
110
- }
111
- if (arm_is_el3_or_mon(env)) {
112
- return true;
113
- }
114
- return arm_is_secure_below_el3(env);
115
+ return arm_space_is_secure(arm_security_space(env));
116
}
117
118
/*
119
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
120
}
121
122
#else
123
+static inline ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
124
+{
125
+ return ARMSS_NonSecure;
126
+}
127
+
128
static inline bool arm_is_secure_below_el3(CPUARMState *env)
129
{
130
return false;
131
}
132
133
+static inline ARMSecuritySpace arm_security_space(CPUARMState *env)
134
+{
135
+ return ARMSS_NonSecure;
136
+}
137
+
138
static inline bool arm_is_secure(CPUARMState *env)
139
{
140
return false;
120
diff --git a/target/arm/helper.c b/target/arm/helper.c
141
diff --git a/target/arm/helper.c b/target/arm/helper.c
121
index XXXXXXX..XXXXXXX 100644
142
index XXXXXXX..XXXXXXX 100644
122
--- a/target/arm/helper.c
143
--- a/target/arm/helper.c
123
+++ b/target/arm/helper.c
144
+++ b/target/arm/helper.c
124
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
145
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
125
tlb_flush_by_mmuidx(cs,
126
ARMMMUIdxBit_E10_1 |
127
ARMMMUIdxBit_E10_1_PAN |
128
- ARMMMUIdxBit_E10_0 |
129
- ARMMMUIdxBit_Stage2);
130
+ ARMMMUIdxBit_E10_0);
131
}
132
133
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
134
@@ -XXX,XX +XXX,XX @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
135
tlb_flush_by_mmuidx_all_cpus_synced(cs,
136
ARMMMUIdxBit_E10_1 |
137
ARMMMUIdxBit_E10_1_PAN |
138
- ARMMMUIdxBit_E10_0 |
139
- ARMMMUIdxBit_Stage2);
140
+ ARMMMUIdxBit_E10_0);
141
}
142
143
-static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
144
- uint64_t value)
145
-{
146
- /* Invalidate by IPA. This has to invalidate any structures that
147
- * contain only stage 2 translation information, but does not need
148
- * to apply to structures that contain combined stage 1 and stage 2
149
- * translation information.
150
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
151
- */
152
- CPUState *cs = env_cpu(env);
153
- uint64_t pageaddr;
154
-
155
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
156
- return;
157
- }
158
-
159
- pageaddr = sextract64(value << 12, 0, 40);
160
-
161
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
162
-}
163
-
164
-static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
165
- uint64_t value)
166
-{
167
- CPUState *cs = env_cpu(env);
168
- uint64_t pageaddr;
169
-
170
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
171
- return;
172
- }
173
-
174
- pageaddr = sextract64(value << 12, 0, 40);
175
-
176
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
177
- ARMMMUIdxBit_Stage2);
178
-}
179
180
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
181
uint64_t value)
182
@@ -XXX,XX +XXX,XX @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
183
tlb_flush_by_mmuidx(cs,
184
ARMMMUIdxBit_E10_1 |
185
ARMMMUIdxBit_E10_1_PAN |
186
- ARMMMUIdxBit_E10_0 |
187
- ARMMMUIdxBit_Stage2);
188
+ ARMMMUIdxBit_E10_0);
189
raw_write(env, ri, value);
190
}
146
}
191
}
147
}
192
@@ -XXX,XX +XXX,XX @@ static int alle1_tlbmask(CPUARMState *env)
148
#endif
193
return ARMMMUIdxBit_SE10_1 |
149
+
194
ARMMMUIdxBit_SE10_1_PAN |
150
+#ifndef CONFIG_USER_ONLY
195
ARMMMUIdxBit_SE10_0;
151
+ARMSecuritySpace arm_security_space(CPUARMState *env)
196
- } else if (arm_feature(env, ARM_FEATURE_EL2)) {
152
+{
197
- return ARMMMUIdxBit_E10_1 |
153
+ if (arm_feature(env, ARM_FEATURE_M)) {
198
- ARMMMUIdxBit_E10_1_PAN |
154
+ return arm_secure_to_space(env->v7m.secure);
199
- ARMMMUIdxBit_E10_0 |
155
+ }
200
- ARMMMUIdxBit_Stage2;
156
+
201
} else {
157
+ /*
202
return ARMMMUIdxBit_E10_1 |
158
+ * If EL3 is not supported then the secure state is implementation
203
ARMMMUIdxBit_E10_1_PAN |
159
+ * defined, in which case QEMU defaults to non-secure.
204
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
160
+ */
205
ARMMMUIdxBit_SE3);
161
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
206
}
162
+ return ARMSS_NonSecure;
207
163
+ }
208
-static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
164
+
209
- uint64_t value)
165
+ /* Check for AArch64 EL3 or AArch32 Mon. */
210
-{
166
+ if (is_a64(env)) {
211
- /* Invalidate by IPA. This has to invalidate any structures that
167
+ if (extract32(env->pstate, 2, 2) == 3) {
212
- * contain only stage 2 translation information, but does not need
168
+ if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
213
- * to apply to structures that contain combined stage 1 and stage 2
169
+ return ARMSS_Root;
214
- * translation information.
170
+ } else {
215
- * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
171
+ return ARMSS_Secure;
216
- */
172
+ }
217
- ARMCPU *cpu = env_archcpu(env);
173
+ }
218
- CPUState *cs = CPU(cpu);
174
+ } else {
219
- uint64_t pageaddr;
175
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
220
-
176
+ return ARMSS_Secure;
221
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
177
+ }
222
- return;
178
+ }
223
- }
179
+
224
-
180
+ return arm_security_space_below_el3(env);
225
- pageaddr = sextract64(value << 12, 0, 48);
181
+}
226
-
182
+
227
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
183
+ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
228
-}
184
+{
229
-
185
+ assert(!arm_feature(env, ARM_FEATURE_M));
230
-static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
186
+
231
- uint64_t value)
187
+ /*
232
-{
188
+ * If EL3 is not supported then the secure state is implementation
233
- CPUState *cs = env_cpu(env);
189
+ * defined, in which case QEMU defaults to non-secure.
234
- uint64_t pageaddr;
190
+ */
235
-
191
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
236
- if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
192
+ return ARMSS_NonSecure;
237
- return;
193
+ }
238
- }
194
+
239
-
195
+ /*
240
- pageaddr = sextract64(value << 12, 0, 48);
196
+ * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
241
-
197
+ * Ignoring NSE when !NS retains consistency without having to
242
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
198
+ * modify other predicates.
243
- ARMMMUIdxBit_Stage2);
199
+ */
244
-}
200
+ if (!(env->cp15.scr_el3 & SCR_NS)) {
245
-
201
+ return ARMSS_Secure;
246
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
202
+ } else if (env->cp15.scr_el3 & SCR_NSE) {
247
bool isread)
203
+ return ARMSS_Realm;
248
{
204
+ } else {
249
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
205
+ return ARMSS_NonSecure;
250
.writefn = tlbi_aa64_vae1_write },
206
+ }
251
{ .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
207
+}
252
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
208
+#endif /* !CONFIG_USER_ONLY */
253
- .access = PL2_W, .type = ARM_CP_NO_RAW,
254
- .writefn = tlbi_aa64_ipas2e1is_write },
255
+ .access = PL2_W, .type = ARM_CP_NOP },
256
{ .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
257
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
258
- .access = PL2_W, .type = ARM_CP_NO_RAW,
259
- .writefn = tlbi_aa64_ipas2e1is_write },
260
+ .access = PL2_W, .type = ARM_CP_NOP },
261
{ .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
262
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
263
.access = PL2_W, .type = ARM_CP_NO_RAW,
264
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
265
.writefn = tlbi_aa64_alle1is_write },
266
{ .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
267
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
268
- .access = PL2_W, .type = ARM_CP_NO_RAW,
269
- .writefn = tlbi_aa64_ipas2e1_write },
270
+ .access = PL2_W, .type = ARM_CP_NOP },
271
{ .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
272
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
273
- .access = PL2_W, .type = ARM_CP_NO_RAW,
274
- .writefn = tlbi_aa64_ipas2e1_write },
275
+ .access = PL2_W, .type = ARM_CP_NOP },
276
{ .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
277
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
278
.access = PL2_W, .type = ARM_CP_NO_RAW,
279
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
280
.writefn = tlbimva_hyp_is_write },
281
{ .name = "TLBIIPAS2",
282
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
283
- .type = ARM_CP_NO_RAW, .access = PL2_W,
284
- .writefn = tlbiipas2_write },
285
+ .type = ARM_CP_NOP, .access = PL2_W },
286
{ .name = "TLBIIPAS2IS",
287
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
288
- .type = ARM_CP_NO_RAW, .access = PL2_W,
289
- .writefn = tlbiipas2_is_write },
290
+ .type = ARM_CP_NOP, .access = PL2_W },
291
{ .name = "TLBIIPAS2L",
292
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
293
- .type = ARM_CP_NO_RAW, .access = PL2_W,
294
- .writefn = tlbiipas2_write },
295
+ .type = ARM_CP_NOP, .access = PL2_W },
296
{ .name = "TLBIIPAS2LIS",
297
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
298
- .type = ARM_CP_NO_RAW, .access = PL2_W,
299
- .writefn = tlbiipas2_is_write },
300
+ .type = ARM_CP_NOP, .access = PL2_W },
301
/* 32 bit cache operations */
302
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
303
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
304
--
209
--
305
2.20.1
210
2.34.1
306
307
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Embed the GEMs into the SoC type.
3
We will need 2 bits to represent ARMSecurityState.
4
4
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
5
Do not attempt to replace or widen secure, even though it
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
logically overlaps the new field -- there are uses within
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
e.g. hw/block/pflash_cfi01.c, which don't know anything
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
specific about ARM.
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
10
Message-id: 20200427181649.26851-6-edgar.iglesias@gmail.com
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20230620124418.805717-7-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
14
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
15
include/exec/memattrs.h | 9 ++++++++-
14
hw/arm/xlnx-versal.c | 15 ++++++++-------
16
1 file changed, 8 insertions(+), 1 deletion(-)
15
2 files changed, 10 insertions(+), 8 deletions(-)
16
17
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
18
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
20
--- a/include/exec/memattrs.h
20
+++ b/include/hw/arm/xlnx-versal.h
21
+++ b/include/exec/memattrs.h
21
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs {
22
#include "hw/arm/boot.h"
23
* "didn't specify" if necessary.
23
#include "hw/intc/arm_gicv3.h"
24
*/
24
#include "hw/char/pl011.h"
25
unsigned int unspecified:1;
25
+#include "hw/net/cadence_gem.h"
26
- /* ARM/AMBA: TrustZone Secure access
26
27
+ /*
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
28
+ * ARM/AMBA: TrustZone Secure access
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
29
* x86: System Management Mode access
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
30
*/
30
31
unsigned int secure:1;
31
struct {
32
+ /*
32
PL011State uart[XLNX_VERSAL_NR_UARTS];
33
+ * ARM: ArmSecuritySpace. This partially overlaps secure, but it is
33
- SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
34
+ * easier to have both fields to assist code that does not understand
34
+ CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
35
+ * ARMv9 RME, or no specific knowledge of ARM at all (e.g. pflash).
35
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
36
+ */
36
} iou;
37
+ unsigned int space:2;
37
} lpd;
38
/* Memory access is usermode (unprivileged) */
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
unsigned int user:1;
39
index XXXXXXX..XXXXXXX 100644
40
/*
40
--- a/hw/arm/xlnx-versal.c
41
+++ b/hw/arm/xlnx-versal.c
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_gems(Versal *s, qemu_irq *pic)
43
DeviceState *dev;
44
MemoryRegion *mr;
45
46
- dev = qdev_create(NULL, "cadence_gem");
47
- s->lpd.iou.gem[i] = SYS_BUS_DEVICE(dev);
48
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
49
+ sysbus_init_child_obj(OBJECT(s), name,
50
+ &s->lpd.iou.gem[i], sizeof(s->lpd.iou.gem[i]),
51
+ TYPE_CADENCE_GEM);
52
+ dev = DEVICE(&s->lpd.iou.gem[i]);
53
if (nd->used) {
54
qemu_check_nic_model(nd, "cadence_gem");
55
qdev_set_nic_properties(dev, nd);
56
}
57
- object_property_set_int(OBJECT(s->lpd.iou.gem[i]),
58
+ object_property_set_int(OBJECT(dev),
59
2, "num-priority-queues",
60
&error_abort);
61
- object_property_set_link(OBJECT(s->lpd.iou.gem[i]),
62
+ object_property_set_link(OBJECT(dev),
63
OBJECT(&s->mr_ps), "dma",
64
&error_abort);
65
qdev_init_nofail(dev);
66
67
- mr = sysbus_mmio_get_region(s->lpd.iou.gem[i], 0);
68
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
69
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
70
71
- sysbus_connect_irq(s->lpd.iou.gem[i], 0, pic[irqs[i]]);
72
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
73
g_free(name);
74
}
75
}
76
--
41
--
77
2.20.1
42
2.34.1
78
79
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Embed the UARTs into the SoC type.
3
It will be helpful to have ARMMMUIdx_Phys_* to be in the same
4
relative order as ARMSecuritySpace enumerators. This requires
5
the adjustment to the nstable check. While there, check for being
6
in secure state rather than rely on clearing the low bit making
7
no change to non-secure state.
4
8
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-id: 20230620124418.805717-8-richard.henderson@linaro.org
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-5-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
14
target/arm/cpu.h | 12 ++++++------
14
hw/arm/xlnx-versal.c | 12 ++++++------
15
target/arm/ptw.c | 12 +++++-------
15
2 files changed, 8 insertions(+), 7 deletions(-)
16
2 files changed, 11 insertions(+), 13 deletions(-)
16
17
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
20
--- a/target/arm/cpu.h
20
+++ b/include/hw/arm/xlnx-versal.h
21
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
22
#include "hw/sysbus.h"
23
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
23
#include "hw/arm/boot.h"
24
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
24
#include "hw/intc/arm_gicv3.h"
25
25
+#include "hw/char/pl011.h"
26
- /* TLBs with 1-1 mapping to the physical address spaces. */
26
27
- ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A,
27
#define TYPE_XLNX_VERSAL "xlnx-versal"
28
- ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A,
28
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
29
-
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
30
/*
30
MemoryRegion mr_ocm;
31
* Used for second stage of an S12 page table walk, or for descriptor
31
32
* loads during first stage of an S1 page table walk. Note that both
32
struct {
33
* are in use simultaneously for SecureEL2: the security state for
33
- SysBusDevice *uart[XLNX_VERSAL_NR_UARTS];
34
* the S2 ptw is selected by the NS bit from the S1 ptw.
34
+ PL011State uart[XLNX_VERSAL_NR_UARTS];
35
*/
35
SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
36
- ARMMMUIdx_Stage2 = 10 | ARM_MMU_IDX_A,
36
SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
37
- ARMMMUIdx_Stage2_S = 11 | ARM_MMU_IDX_A,
37
} iou;
38
+ ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A,
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
39
+ ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
40
+
41
+ /* TLBs with 1-1 mapping to the physical address spaces. */
42
+ ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
43
+ ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
44
45
/*
46
* These are not allocated TLBs and are used only for AT system
47
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
39
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-versal.c
49
--- a/target/arm/ptw.c
41
+++ b/hw/arm/xlnx-versal.c
50
+++ b/target/arm/ptw.c
42
@@ -XXX,XX +XXX,XX @@
51
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
43
#include "kvm_arm.h"
52
descaddr |= (address >> (stride * (4 - level))) & indexmask;
44
#include "hw/misc/unimp.h"
53
descaddr &= ~7ULL;
45
#include "hw/arm/xlnx-versal.h"
54
nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1);
46
-#include "hw/char/pl011.h"
55
- if (nstable) {
47
56
+ if (nstable && ptw->in_secure) {
48
#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72")
57
/*
49
#define GEM_REVISION 0x40070106
58
* Stage2_S -> Stage2 or Phys_S -> Phys_NS
50
@@ -XXX,XX +XXX,XX @@ static void versal_create_uarts(Versal *s, qemu_irq *pic)
59
- * Assert that the non-secure idx are even, and relative order.
51
DeviceState *dev;
60
+ * Assert the relative order of the secure/non-secure indexes.
52
MemoryRegion *mr;
61
*/
53
62
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
54
- dev = qdev_create(NULL, TYPE_PL011);
63
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
55
- s->lpd.iou.uart[i] = SYS_BUS_DEVICE(dev);
64
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
56
+ sysbus_init_child_obj(OBJECT(s), name,
65
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
57
+ &s->lpd.iou.uart[i], sizeof(s->lpd.iou.uart[i]),
66
- ptw->in_ptw_idx &= ~1;
58
+ TYPE_PL011);
67
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
59
+ dev = DEVICE(&s->lpd.iou.uart[i]);
68
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
60
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
69
+ ptw->in_ptw_idx += 1;
61
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
70
ptw->in_secure = false;
62
qdev_init_nofail(dev);
63
64
- mr = sysbus_mmio_get_region(s->lpd.iou.uart[i], 0);
65
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
66
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
67
68
- sysbus_connect_irq(s->lpd.iou.uart[i], 0, pic[irqs[i]]);
69
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
70
g_free(name);
71
}
71
}
72
}
72
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
73
--
73
--
74
2.20.1
74
2.34.1
75
76
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for SD.
3
With FEAT_RME, there are four physical address spaces.
4
For now, just define the symbols, and mention them in
5
the same spots as the other Phys indexes in ptw.c.
4
6
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200427181649.26851-11-edgar.iglesias@gmail.com
10
Message-id: 20230620124418.805717-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
hw/arm/xlnx-versal-virt.c | 46 +++++++++++++++++++++++++++++++++++++++
13
target/arm/cpu.h | 23 +++++++++++++++++++++--
12
1 file changed, 46 insertions(+)
14
target/arm/ptw.c | 10 ++++++++--
15
2 files changed, 29 insertions(+), 4 deletions(-)
13
16
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-versal-virt.c
19
--- a/target/arm/cpu.h
17
+++ b/hw/arm/xlnx-versal-virt.c
20
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ typedef enum ARMMMUIdx {
19
#include "hw/arm/sysbus-fdt.h"
22
ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
20
#include "hw/arm/fdt.h"
23
21
#include "cpu.h"
24
/* TLBs with 1-1 mapping to the physical address spaces. */
22
+#include "hw/qdev-properties.h"
25
- ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
23
#include "hw/arm/xlnx-versal.h"
26
- ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
24
27
+ ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
25
#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt")
28
+ ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
26
@@ -XXX,XX +XXX,XX @@ static void fdt_add_zdma_nodes(VersalVirt *s)
29
+ ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A,
27
}
30
+ ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A,
28
}
31
29
32
/*
30
+static void fdt_add_sd_nodes(VersalVirt *s)
33
* These are not allocated TLBs and are used only for AT system
34
@@ -XXX,XX +XXX,XX @@ typedef enum ARMASIdx {
35
ARMASIdx_TagS = 3,
36
} ARMASIdx;
37
38
+static inline ARMMMUIdx arm_space_to_phys(ARMSecuritySpace space)
31
+{
39
+{
32
+ const char clocknames[] = "clk_xin\0clk_ahb";
40
+ /* Assert the relative order of the physical mmu indexes. */
33
+ const char compat[] = "arasan,sdhci-8.9a";
41
+ QEMU_BUILD_BUG_ON(ARMSS_Secure != 0);
34
+ int i;
42
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS != ARMMMUIdx_Phys_S + ARMSS_NonSecure);
43
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Root != ARMMMUIdx_Phys_S + ARMSS_Root);
44
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Realm != ARMMMUIdx_Phys_S + ARMSS_Realm);
35
+
45
+
36
+ for (i = ARRAY_SIZE(s->soc.pmc.iou.sd) - 1; i >= 0; i--) {
46
+ return ARMMMUIdx_Phys_S + space;
37
+ uint64_t addr = MM_PMC_SD0 + MM_PMC_SD0_SIZE * i;
38
+ char *name = g_strdup_printf("/sdhci@%" PRIx64, addr);
39
+
40
+ qemu_fdt_add_subnode(s->fdt, name);
41
+
42
+ qemu_fdt_setprop_cells(s->fdt, name, "clocks",
43
+ s->phandle.clk_25Mhz, s->phandle.clk_25Mhz);
44
+ qemu_fdt_setprop(s->fdt, name, "clock-names",
45
+ clocknames, sizeof(clocknames));
46
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
47
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_SD0_IRQ_0 + i * 2,
48
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
49
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
50
+ 2, addr, 2, MM_PMC_SD0_SIZE);
51
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
52
+ g_free(name);
53
+ }
54
+}
47
+}
55
+
48
+
56
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
49
+static inline ARMSecuritySpace arm_phys_to_space(ARMMMUIdx idx)
57
{
58
Error *err = NULL;
59
@@ -XXX,XX +XXX,XX @@ static void create_virtio_regions(VersalVirt *s)
60
}
61
}
62
63
+static void sd_plugin_card(SDHCIState *sd, DriveInfo *di)
64
+{
50
+{
65
+ BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL;
51
+ assert(idx >= ARMMMUIdx_Phys_S && idx <= ARMMMUIdx_Phys_Realm);
66
+ DeviceState *card;
52
+ return idx - ARMMMUIdx_Phys_S;
67
+
68
+ card = qdev_create(qdev_get_child_bus(DEVICE(sd), "sd-bus"), TYPE_SD_CARD);
69
+ object_property_add_child(OBJECT(sd), "card[*]", OBJECT(card),
70
+ &error_fatal);
71
+ qdev_prop_set_drive(card, "drive", blk, &error_fatal);
72
+ object_property_set_bool(OBJECT(card), true, "realized", &error_fatal);
73
+}
53
+}
74
+
54
+
75
static void versal_virt_init(MachineState *machine)
55
static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
76
{
56
{
77
VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine);
57
/* If all the CLIDR.Ctypem bits are 0 there are no caches, and
78
int psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
58
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
79
+ int i;
59
index XXXXXXX..XXXXXXX 100644
80
60
--- a/target/arm/ptw.c
81
/*
61
+++ b/target/arm/ptw.c
82
* If the user provides an Operating System to be loaded, we expect them
62
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
83
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
63
case ARMMMUIdx_E3:
84
fdt_add_gic_nodes(s);
64
break;
85
fdt_add_timer_nodes(s);
65
86
fdt_add_zdma_nodes(s);
66
- case ARMMMUIdx_Phys_NS:
87
+ fdt_add_sd_nodes(s);
67
case ARMMMUIdx_Phys_S:
88
fdt_add_cpu_nodes(s, psci_conduit);
68
+ case ARMMMUIdx_Phys_NS:
89
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
69
+ case ARMMMUIdx_Phys_Root:
90
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
70
+ case ARMMMUIdx_Phys_Realm:
91
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
71
/* No translation for physical address spaces. */
92
memory_region_add_subregion_overlap(get_system_memory(),
72
return true;
93
0, &s->soc.fpd.apu.mr, 0);
73
94
74
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
95
+ /* Plugin SD cards. */
75
switch (mmu_idx) {
96
+ for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) {
76
case ARMMMUIdx_Stage2:
97
+ sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD));
77
case ARMMMUIdx_Stage2_S:
98
+ }
78
- case ARMMMUIdx_Phys_NS:
99
+
79
case ARMMMUIdx_Phys_S:
100
s->binfo.ram_size = machine->ram_size;
80
+ case ARMMMUIdx_Phys_NS:
101
s->binfo.loader_start = 0x0;
81
+ case ARMMMUIdx_Phys_Root:
102
s->binfo.get_dtb = versal_virt_get_dtb;
82
+ case ARMMMUIdx_Phys_Realm:
83
break;
84
85
default:
86
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
87
switch (mmu_idx) {
88
case ARMMMUIdx_Phys_S:
89
case ARMMMUIdx_Phys_NS:
90
+ case ARMMMUIdx_Phys_Root:
91
+ case ARMMMUIdx_Phys_Realm:
92
/* Checking Phys early avoids special casing later vs regime_el. */
93
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
94
is_secure, result, fi);
103
--
95
--
104
2.20.1
96
2.34.1
105
97
106
98
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Fix typo xlnx-ve -> xlnx-versal.
3
This was added in 7e98e21c098 as part of a reorg in which
4
one of the argument had been legally NULL, and this caught
5
actual instances. Now that the reorg is complete, this
6
serves little purpose.
4
7
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
11
Message-id: 20230620124418.805717-10-richard.henderson@linaro.org
9
Message-id: 20200427181649.26851-4-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
hw/arm/xlnx-versal-virt.c | 2 +-
14
target/arm/ptw.c | 6 ++----
13
1 file changed, 1 insertion(+), 1 deletion(-)
15
1 file changed, 2 insertions(+), 4 deletions(-)
14
16
15
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
17
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal-virt.c
19
--- a/target/arm/ptw.c
18
+++ b/hw/arm/xlnx-versal-virt.c
20
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
21
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
20
psci_conduit = QEMU_PSCI_CONDUIT_SMC;
22
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
21
}
23
uint64_t address,
22
24
MMUAccessType access_type, bool s1_is_el0,
23
- sysbus_init_child_obj(OBJECT(machine), "xlnx-ve", &s->soc,
25
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
24
+ sysbus_init_child_obj(OBJECT(machine), "xlnx-versal", &s->soc,
26
- __attribute__((nonnull));
25
sizeof(s->soc), TYPE_XLNX_VERSAL);
27
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi);
26
object_property_set_link(OBJECT(&s->soc), OBJECT(machine->ram),
28
27
"ddr", &error_abort);
29
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
30
target_ulong address,
31
MMUAccessType access_type,
32
GetPhysAddrResult *result,
33
- ARMMMUFaultInfo *fi)
34
- __attribute__((nonnull));
35
+ ARMMMUFaultInfo *fi);
36
37
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
38
static const uint8_t pamax_map[] = {
28
--
39
--
29
2.20.1
40
2.34.1
30
41
31
42
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for the RTC.
3
Add input and output space members to S1Translate. Set and adjust
4
them in S1_ptw_translate, and the various points at which we drop
5
secure state. Initialize the space in get_phys_addr; for now leave
6
get_phys_addr_with_secure considering only secure vs non-secure spaces.
4
7
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20230620124418.805717-11-richard.henderson@linaro.org
8
Message-id: 20200427181649.26851-12-edgar.iglesias@gmail.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
hw/arm/xlnx-versal-virt.c | 22 ++++++++++++++++++++++
13
target/arm/ptw.c | 86 +++++++++++++++++++++++++++++++++++++++---------
12
1 file changed, 22 insertions(+)
14
1 file changed, 71 insertions(+), 15 deletions(-)
13
15
14
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
16
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-versal-virt.c
18
--- a/target/arm/ptw.c
17
+++ b/hw/arm/xlnx-versal-virt.c
19
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ static void fdt_add_sd_nodes(VersalVirt *s)
20
@@ -XXX,XX +XXX,XX @@
19
}
21
typedef struct S1Translate {
22
ARMMMUIdx in_mmu_idx;
23
ARMMMUIdx in_ptw_idx;
24
+ ARMSecuritySpace in_space;
25
bool in_secure;
26
bool in_debug;
27
bool out_secure;
28
bool out_rw;
29
bool out_be;
30
+ ARMSecuritySpace out_space;
31
hwaddr out_virt;
32
hwaddr out_phys;
33
void *out_host;
34
@@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
35
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
36
hwaddr addr, ARMMMUFaultInfo *fi)
37
{
38
+ ARMSecuritySpace space = ptw->in_space;
39
bool is_secure = ptw->in_secure;
40
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
41
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
42
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
43
.in_mmu_idx = s2_mmu_idx,
44
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
45
.in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
46
+ .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
47
+ : space == ARMSS_Realm ? ARMSS_Realm
48
+ : ARMSS_NonSecure),
49
.in_debug = true,
50
};
51
GetPhysAddrResult s2 = { };
52
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
53
ptw->out_phys = s2.f.phys_addr;
54
pte_attrs = s2.cacheattrs.attrs;
55
ptw->out_secure = s2.f.attrs.secure;
56
+ ptw->out_space = s2.f.attrs.space;
57
} else {
58
/* Regime is physical. */
59
ptw->out_phys = addr;
60
pte_attrs = 0;
61
ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
62
+ ptw->out_space = (s2_mmu_idx == ARMMMUIdx_Phys_S ? ARMSS_Secure
63
+ : space == ARMSS_Realm ? ARMSS_Realm
64
+ : ARMSS_NonSecure);
65
}
66
ptw->out_host = NULL;
67
ptw->out_rw = false;
68
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
69
ptw->out_rw = full->prot & PAGE_WRITE;
70
pte_attrs = full->pte_attrs;
71
ptw->out_secure = full->attrs.secure;
72
+ ptw->out_space = full->attrs.space;
73
#else
74
g_assert_not_reached();
75
#endif
76
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
77
}
78
} else {
79
/* Page tables are in MMIO. */
80
- MemTxAttrs attrs = { .secure = ptw->out_secure };
81
+ MemTxAttrs attrs = {
82
+ .secure = ptw->out_secure,
83
+ .space = ptw->out_space,
84
+ };
85
AddressSpace *as = arm_addressspace(cs, attrs);
86
MemTxResult result = MEMTX_OK;
87
88
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
89
#endif
90
} else {
91
/* Page tables are in MMIO. */
92
- MemTxAttrs attrs = { .secure = ptw->out_secure };
93
+ MemTxAttrs attrs = {
94
+ .secure = ptw->out_secure,
95
+ .space = ptw->out_space,
96
+ };
97
AddressSpace *as = arm_addressspace(cs, attrs);
98
MemTxResult result = MEMTX_OK;
99
100
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
101
* regime, because the attribute will already be non-secure.
102
*/
103
result->f.attrs.secure = false;
104
+ result->f.attrs.space = ARMSS_NonSecure;
105
}
106
result->f.phys_addr = phys_addr;
107
return false;
108
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
109
* regime, because the attribute will already be non-secure.
110
*/
111
result->f.attrs.secure = false;
112
+ result->f.attrs.space = ARMSS_NonSecure;
113
}
114
115
if (regime_is_stage2(mmu_idx)) {
116
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
117
*/
118
if (sattrs.ns) {
119
result->f.attrs.secure = false;
120
+ result->f.attrs.space = ARMSS_NonSecure;
121
} else if (!secure) {
122
/*
123
* NS access to S memory must fault.
124
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
125
bool is_secure = ptw->in_secure;
126
bool ret, ipa_secure;
127
ARMCacheAttrs cacheattrs1;
128
+ ARMSecuritySpace ipa_space;
129
bool is_el0;
130
uint64_t hcr;
131
132
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
133
134
ipa = result->f.phys_addr;
135
ipa_secure = result->f.attrs.secure;
136
+ ipa_space = result->f.attrs.space;
137
138
is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
139
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
140
ptw->in_secure = ipa_secure;
141
+ ptw->in_space = ipa_space;
142
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
143
144
/*
145
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
146
ARMMMUIdx s1_mmu_idx;
147
148
/*
149
- * The page table entries may downgrade secure to non-secure, but
150
- * cannot upgrade an non-secure translation regime's attributes
151
- * to secure.
152
+ * The page table entries may downgrade Secure to NonSecure, but
153
+ * cannot upgrade a NonSecure translation regime's attributes
154
+ * to Secure or Realm.
155
*/
156
result->f.attrs.secure = is_secure;
157
+ result->f.attrs.space = ptw->in_space;
158
159
switch (mmu_idx) {
160
case ARMMMUIdx_Phys_S:
161
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
162
163
default:
164
/* Single stage uses physical for ptw. */
165
- ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
166
+ ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
167
break;
168
}
169
170
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
171
S1Translate ptw = {
172
.in_mmu_idx = mmu_idx,
173
.in_secure = is_secure,
174
+ .in_space = arm_secure_to_space(is_secure),
175
};
176
return get_phys_addr_with_struct(env, &ptw, address, access_type,
177
result, fi);
178
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
179
MMUAccessType access_type, ARMMMUIdx mmu_idx,
180
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
181
{
182
- bool is_secure;
183
+ S1Translate ptw = {
184
+ .in_mmu_idx = mmu_idx,
185
+ };
186
+ ARMSecuritySpace ss;
187
188
switch (mmu_idx) {
189
case ARMMMUIdx_E10_0:
190
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
191
case ARMMMUIdx_Stage1_E1:
192
case ARMMMUIdx_Stage1_E1_PAN:
193
case ARMMMUIdx_E2:
194
- is_secure = arm_is_secure_below_el3(env);
195
+ ss = arm_security_space_below_el3(env);
196
break;
197
case ARMMMUIdx_Stage2:
198
+ /*
199
+ * For Secure EL2, we need this index to be NonSecure;
200
+ * otherwise this will already be NonSecure or Realm.
201
+ */
202
+ ss = arm_security_space_below_el3(env);
203
+ if (ss == ARMSS_Secure) {
204
+ ss = ARMSS_NonSecure;
205
+ }
206
+ break;
207
case ARMMMUIdx_Phys_NS:
208
case ARMMMUIdx_MPrivNegPri:
209
case ARMMMUIdx_MUserNegPri:
210
case ARMMMUIdx_MPriv:
211
case ARMMMUIdx_MUser:
212
- is_secure = false;
213
+ ss = ARMSS_NonSecure;
214
break;
215
- case ARMMMUIdx_E3:
216
case ARMMMUIdx_Stage2_S:
217
case ARMMMUIdx_Phys_S:
218
case ARMMMUIdx_MSPrivNegPri:
219
case ARMMMUIdx_MSUserNegPri:
220
case ARMMMUIdx_MSPriv:
221
case ARMMMUIdx_MSUser:
222
- is_secure = true;
223
+ ss = ARMSS_Secure;
224
+ break;
225
+ case ARMMMUIdx_E3:
226
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
227
+ cpu_isar_feature(aa64_rme, env_archcpu(env))) {
228
+ ss = ARMSS_Root;
229
+ } else {
230
+ ss = ARMSS_Secure;
231
+ }
232
+ break;
233
+ case ARMMMUIdx_Phys_Root:
234
+ ss = ARMSS_Root;
235
+ break;
236
+ case ARMMMUIdx_Phys_Realm:
237
+ ss = ARMSS_Realm;
238
break;
239
default:
240
g_assert_not_reached();
241
}
242
- return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
243
- is_secure, result, fi);
244
+
245
+ ptw.in_space = ss;
246
+ ptw.in_secure = arm_space_is_secure(ss);
247
+ return get_phys_addr_with_struct(env, &ptw, address, access_type,
248
+ result, fi);
20
}
249
}
21
250
22
+static void fdt_add_rtc_node(VersalVirt *s)
251
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
23
+{
252
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
24
+ const char compat[] = "xlnx,zynqmp-rtc";
25
+ const char interrupt_names[] = "alarm\0sec";
26
+ char *name = g_strdup_printf("/rtc@%x", MM_PMC_RTC);
27
+
28
+ qemu_fdt_add_subnode(s->fdt, name);
29
+
30
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
31
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_ALARM_IRQ,
32
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI,
33
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_SECONDS_IRQ,
34
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
35
+ qemu_fdt_setprop(s->fdt, name, "interrupt-names",
36
+ interrupt_names, sizeof(interrupt_names));
37
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
38
+ 2, MM_PMC_RTC, 2, MM_PMC_RTC_SIZE);
39
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
40
+ g_free(name);
41
+}
42
+
43
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
44
{
253
{
45
Error *err = NULL;
254
ARMCPU *cpu = ARM_CPU(cs);
46
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
255
CPUARMState *env = &cpu->env;
47
fdt_add_timer_nodes(s);
256
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
48
fdt_add_zdma_nodes(s);
257
+ ARMSecuritySpace ss = arm_security_space(env);
49
fdt_add_sd_nodes(s);
258
S1Translate ptw = {
50
+ fdt_add_rtc_node(s);
259
- .in_mmu_idx = arm_mmu_idx(env),
51
fdt_add_cpu_nodes(s, psci_conduit);
260
- .in_secure = arm_is_secure(env),
52
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
261
+ .in_mmu_idx = mmu_idx,
53
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
262
+ .in_space = ss,
263
+ .in_secure = arm_space_is_secure(ss),
264
.in_debug = true,
265
};
266
GetPhysAddrResult res = {};
54
--
267
--
55
2.20.1
268
2.34.1
56
57
diff view generated by jsdifflib
1
Convert the Neon "load single structure to all lanes" insns to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
decodetree.
3
2
3
Test in_space instead of in_secure so that we don't
4
switch out of Root space.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230620124418.805717-12-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-13-peter.maydell@linaro.org
7
---
10
---
8
target/arm/neon-ls.decode | 5 +++
11
target/arm/ptw.c | 28 ++++++++++++++--------------
9
target/arm/translate-neon.inc.c | 73 +++++++++++++++++++++++++++++++++
12
1 file changed, 14 insertions(+), 14 deletions(-)
10
target/arm/translate.c | 55 +------------------------
11
3 files changed, 80 insertions(+), 53 deletions(-)
12
13
13
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-ls.decode
16
--- a/target/arm/ptw.c
16
+++ b/target/arm/neon-ls.decode
17
+++ b/target/arm/ptw.c
17
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
18
19
{
19
VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
20
ARMCPU *cpu = env_archcpu(env);
20
vd=%vd_dp
21
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
21
+
22
- bool is_secure = ptw->in_secure;
22
+# Neon load single element to all lanes
23
int32_t level;
23
+
24
ARMVAParameters param;
24
+VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
25
uint64_t ttbr;
25
+ vd=%vd_dp
26
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
26
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
27
uint64_t descaddrmask;
27
index XXXXXXX..XXXXXXX 100644
28
bool aarch64 = arm_el_is_aa64(env, el);
28
--- a/target/arm/translate-neon.inc.c
29
uint64_t descriptor, new_descriptor;
29
+++ b/target/arm/translate-neon.inc.c
30
- bool nstable;
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
31
31
gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
32
/* TODO: This code does not support shareability levels. */
32
return true;
33
if (aarch64) {
33
}
34
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
34
+
35
descaddrmask = MAKE_64BIT_MASK(0, 40);
35
+static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
36
}
36
+{
37
descaddrmask &= ~indexmask_grainsize;
37
+ /* Neon load single structure to all lanes */
38
-
38
+ int reg, stride, vec_size;
39
- /*
39
+ int vd = a->vd;
40
- * Secure stage 1 accesses start with the page table in secure memory and
40
+ int size = a->size;
41
- * can be downgraded to non-secure at any step. Non-secure accesses
41
+ int nregs = a->n + 1;
42
- * remain non-secure. We implement this by just ORing in the NSTable/NS
42
+ TCGv_i32 addr, tmp;
43
- * bits at each step.
43
+
44
- * Stage 2 never gets this kind of downgrade.
44
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
45
- */
45
+ return false;
46
- tableattrs = is_secure ? 0 : (1 << 4);
46
+ }
47
+ tableattrs = 0;
47
+
48
48
+ /* UNDEF accesses to D16-D31 if they don't exist */
49
next_level:
49
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
50
descaddr |= (address >> (stride * (4 - level))) & indexmask;
50
+ return false;
51
descaddr &= ~7ULL;
51
+ }
52
- nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1);
52
+
53
- if (nstable && ptw->in_secure) {
53
+ if (size == 3) {
54
+ if (nregs != 4 || a->a == 0) {
55
+ return false;
56
+ }
57
+ /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
58
+ size = 2;
59
+ }
60
+ if (nregs == 1 && a->a == 1 && size == 0) {
61
+ return false;
62
+ }
63
+ if (nregs == 3 && a->a == 1) {
64
+ return false;
65
+ }
66
+
67
+ if (!vfp_access_check(s)) {
68
+ return true;
69
+ }
70
+
54
+
71
+ /*
55
+ /*
72
+ * VLD1 to all lanes: T bit indicates how many Dregs to write.
56
+ * Process the NSTable bit from the previous level. This changes
73
+ * VLD2/3/4 to all lanes: T bit indicates register stride.
57
+ * the table address space and the output space from Secure to
58
+ * NonSecure. With RME, the EL3 translation regime does not change
59
+ * from Root to NonSecure.
74
+ */
60
+ */
75
+ stride = a->t ? 2 : 1;
61
+ if (ptw->in_space == ARMSS_Secure
76
+ vec_size = nregs == 1 ? stride * 8 : 8;
62
+ && !regime_is_stage2(mmu_idx)
63
+ && extract32(tableattrs, 4, 1)) {
64
/*
65
* Stage2_S -> Stage2 or Phys_S -> Phys_NS
66
* Assert the relative order of the secure/non-secure indexes.
67
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
68
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
69
ptw->in_ptw_idx += 1;
70
ptw->in_secure = false;
71
+ ptw->in_space = ARMSS_NonSecure;
72
}
77
+
73
+
78
+ tmp = tcg_temp_new_i32();
74
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
79
+ addr = tcg_temp_new_i32();
75
goto do_fault;
80
+ load_reg_var(s, addr, a->rn);
76
}
81
+ for (reg = 0; reg < nregs; reg++) {
77
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
82
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
78
*/
83
+ s->be_data | size);
79
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
84
+ if ((vd & 1) && vec_size == 16) {
80
if (!regime_is_stage2(mmu_idx)) {
85
+ /*
81
- attrs |= nstable << 5; /* NS */
86
+ * We cannot write 16 bytes at once because the
82
+ attrs |= !ptw->in_secure << 5; /* NS */
87
+ * destination is unaligned.
83
if (!param.hpd) {
88
+ */
84
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
89
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
85
/*
90
+ 8, 8, tmp);
91
+ tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
92
+ neon_reg_offset(vd, 0), 8, 8);
93
+ } else {
94
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
95
+ vec_size, vec_size, tmp);
96
+ }
97
+ tcg_gen_addi_i32(addr, addr, 1 << size);
98
+ vd += stride;
99
+ }
100
+ tcg_temp_free_i32(tmp);
101
+ tcg_temp_free_i32(addr);
102
+
103
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
104
+
105
+ return true;
106
+}
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
112
int size;
113
int reg;
114
int load;
115
- int vec_size;
116
TCGv_i32 addr;
117
TCGv_i32 tmp;
118
119
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
120
} else {
121
size = (insn >> 10) & 3;
122
if (size == 3) {
123
- /* Load single element to all lanes. */
124
- int a = (insn >> 4) & 1;
125
- if (!load) {
126
- return 1;
127
- }
128
- size = (insn >> 6) & 3;
129
- nregs = ((insn >> 8) & 3) + 1;
130
-
131
- if (size == 3) {
132
- if (nregs != 4 || a == 0) {
133
- return 1;
134
- }
135
- /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
136
- size = 2;
137
- }
138
- if (nregs == 1 && a == 1 && size == 0) {
139
- return 1;
140
- }
141
- if (nregs == 3 && a == 1) {
142
- return 1;
143
- }
144
- addr = tcg_temp_new_i32();
145
- load_reg_var(s, addr, rn);
146
-
147
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
148
- * VLD2/3/4 to all lanes: bit 5 indicates register stride.
149
- */
150
- stride = (insn & (1 << 5)) ? 2 : 1;
151
- vec_size = nregs == 1 ? stride * 8 : 8;
152
-
153
- tmp = tcg_temp_new_i32();
154
- for (reg = 0; reg < nregs; reg++) {
155
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
156
- s->be_data | size);
157
- if ((rd & 1) && vec_size == 16) {
158
- /* We cannot write 16 bytes at once because the
159
- * destination is unaligned.
160
- */
161
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
162
- 8, 8, tmp);
163
- tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
164
- neon_reg_offset(rd, 0), 8, 8);
165
- } else {
166
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
167
- vec_size, vec_size, tmp);
168
- }
169
- tcg_gen_addi_i32(addr, addr, 1 << size);
170
- rd += stride;
171
- }
172
- tcg_temp_free_i32(tmp);
173
- tcg_temp_free_i32(addr);
174
- stride = (1 << size) * nregs;
175
+ /* Load single element to all lanes -- handled by decodetree */
176
+ return 1;
177
} else {
178
/* Single element. */
179
int idx = (insn >> 4) & 0xf;
180
--
86
--
181
2.20.1
87
2.34.1
182
183
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
By using the TYPE_* definitions for devices, we can:
3
With Realm security state, bit 55 of a block or page descriptor during
4
- quickly find where devices are used with 'git-grep'
4
the stage2 walk becomes the NS bit; during the stage1 walk the bit 5
5
- easily rename a device (one-line change).
5
NS bit is RES0. With Root security state, bit 11 of the block or page
6
descriptor during the stage1 walk becomes the NSE bit.
6
7
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Rather than collecting an NS bit and applying it later, compute the
8
Message-id: 20200428154650.21991-1-f4bug@amsat.org
9
output pa space from the input pa space and unconditionally assign.
10
This means that we no longer need to adjust the output space earlier
11
for the NSTable bit.
12
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230620124418.805717-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
17
---
12
hw/arm/mps2-tz.c | 2 +-
18
target/arm/ptw.c | 89 +++++++++++++++++++++++++++++++++++++++---------
13
1 file changed, 1 insertion(+), 1 deletion(-)
19
1 file changed, 73 insertions(+), 16 deletions(-)
14
20
15
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
21
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/mps2-tz.c
23
--- a/target/arm/ptw.c
18
+++ b/hw/arm/mps2-tz.c
24
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
25
@@ -XXX,XX +XXX,XX @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
20
exit(EXIT_FAILURE);
26
* @mmu_idx: MMU index indicating required translation regime
27
* @is_aa64: TRUE if AArch64
28
* @ap: The 2-bit simple AP (AP[2:1])
29
- * @ns: NS (non-secure) bit
30
* @xn: XN (execute-never) bit
31
* @pxn: PXN (privileged execute-never) bit
32
+ * @in_pa: The original input pa space
33
+ * @out_pa: The output pa space, modified by NSTable, NS, and NSE
34
*/
35
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
36
- int ap, int ns, int xn, int pxn)
37
+ int ap, int xn, int pxn,
38
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
39
{
40
ARMCPU *cpu = env_archcpu(env);
41
bool is_user = regime_is_user(env, mmu_idx);
42
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
43
}
21
}
44
}
22
45
23
- sysbus_init_child_obj(OBJECT(machine), "iotkit", &mms->iotkit,
46
- if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
24
+ sysbus_init_child_obj(OBJECT(machine), TYPE_IOTKIT, &mms->iotkit,
47
+ if (out_pa == ARMSS_NonSecure && in_pa == ARMSS_Secure &&
25
sizeof(mms->iotkit), mmc->armsse_type);
48
+ (env->cp15.scr_el3 & SCR_SIF)) {
26
iotkitdev = DEVICE(&mms->iotkit);
49
return prot_rw;
27
object_property_set_link(OBJECT(&mms->iotkit), OBJECT(system_memory),
50
}
51
52
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
53
int32_t stride;
54
int addrsize, inputsize, outputsize;
55
uint64_t tcr = regime_tcr(env, mmu_idx);
56
- int ap, ns, xn, pxn;
57
+ int ap, xn, pxn;
58
uint32_t el = regime_el(env, mmu_idx);
59
uint64_t descaddrmask;
60
bool aarch64 = arm_el_is_aa64(env, el);
61
uint64_t descriptor, new_descriptor;
62
+ ARMSecuritySpace out_space;
63
64
/* TODO: This code does not support shareability levels. */
65
if (aarch64) {
66
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
67
}
68
69
ap = extract32(attrs, 6, 2);
70
+ out_space = ptw->in_space;
71
if (regime_is_stage2(mmu_idx)) {
72
- ns = mmu_idx == ARMMMUIdx_Stage2;
73
+ /*
74
+ * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
75
+ * The bit remains ignored for other security states.
76
+ */
77
+ if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
78
+ out_space = ARMSS_NonSecure;
79
+ }
80
xn = extract64(attrs, 53, 2);
81
result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
82
} else {
83
- ns = extract32(attrs, 5, 1);
84
+ int nse, ns = extract32(attrs, 5, 1);
85
+ switch (out_space) {
86
+ case ARMSS_Root:
87
+ /*
88
+ * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
89
+ * R_XTYPW: NSE and NS together select the output pa space.
90
+ */
91
+ nse = extract32(attrs, 11, 1);
92
+ out_space = (nse << 1) | ns;
93
+ if (out_space == ARMSS_Secure &&
94
+ !cpu_isar_feature(aa64_sel2, cpu)) {
95
+ out_space = ARMSS_NonSecure;
96
+ }
97
+ break;
98
+ case ARMSS_Secure:
99
+ if (ns) {
100
+ out_space = ARMSS_NonSecure;
101
+ }
102
+ break;
103
+ case ARMSS_Realm:
104
+ switch (mmu_idx) {
105
+ case ARMMMUIdx_Stage1_E0:
106
+ case ARMMMUIdx_Stage1_E1:
107
+ case ARMMMUIdx_Stage1_E1_PAN:
108
+ /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
109
+ break;
110
+ case ARMMMUIdx_E2:
111
+ case ARMMMUIdx_E20_0:
112
+ case ARMMMUIdx_E20_2:
113
+ case ARMMMUIdx_E20_2_PAN:
114
+ /*
115
+ * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
116
+ * NS changes the output to non-secure space.
117
+ */
118
+ if (ns) {
119
+ out_space = ARMSS_NonSecure;
120
+ }
121
+ break;
122
+ default:
123
+ g_assert_not_reached();
124
+ }
125
+ break;
126
+ case ARMSS_NonSecure:
127
+ /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
128
+ break;
129
+ default:
130
+ g_assert_not_reached();
131
+ }
132
xn = extract64(attrs, 54, 1);
133
pxn = extract64(attrs, 53, 1);
134
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
135
+
136
+ /*
137
+ * Note that we modified ptw->in_space earlier for NSTable, but
138
+ * result->f.attrs retains a copy of the original security space.
139
+ */
140
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
141
+ result->f.attrs.space, out_space);
142
}
143
144
if (!(result->f.prot & (1 << access_type))) {
145
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
146
}
147
}
148
149
- if (ns) {
150
- /*
151
- * The NS bit will (as required by the architecture) have no effect if
152
- * the CPU doesn't support TZ or this is a non-secure translation
153
- * regime, because the attribute will already be non-secure.
154
- */
155
- result->f.attrs.secure = false;
156
- result->f.attrs.space = ARMSS_NonSecure;
157
- }
158
+ result->f.attrs.space = out_space;
159
+ result->f.attrs.secure = arm_space_is_secure(out_space);
160
161
if (regime_is_stage2(mmu_idx)) {
162
result->cacheattrs.is_s2_format = true;
28
--
163
--
29
2.20.1
164
2.34.1
30
31
diff view generated by jsdifflib
1
Convert the Neon 3-reg-same VADD and VSUB insns to decodetree.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Note that we don't need the neon_3r_sizes[op] check here because all
3
While Root and Realm may read and write data from other spaces,
4
size values are OK for VADD and VSUB; we'll add this when we convert
4
neither may execute from other pa spaces.
5
the first insn that has size restrictions.
6
5
7
For this we need one of the GVecGen*Fn typedefs currently in
6
This happens for Stage1 EL3, EL2, EL2&0, and Stage2 EL1&0.
8
translate-a64.h; move them all to translate.h as a block so they
9
are visible to the 32-bit decoder.
10
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230620124418.805717-14-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20200430181003.21682-15-peter.maydell@linaro.org
14
---
12
---
15
target/arm/translate-a64.h | 9 --------
13
target/arm/ptw.c | 52 ++++++++++++++++++++++++++++++++++++++++++------
16
target/arm/translate.h | 9 ++++++++
14
1 file changed, 46 insertions(+), 6 deletions(-)
17
target/arm/neon-dp.decode | 17 +++++++++++++++
18
target/arm/translate-neon.inc.c | 38 +++++++++++++++++++++++++++++++++
19
target/arm/translate.c | 14 ++++--------
20
5 files changed, 68 insertions(+), 19 deletions(-)
21
15
22
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
16
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
23
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/translate-a64.h
18
--- a/target/arm/ptw.c
25
+++ b/target/arm/translate-a64.h
19
+++ b/target/arm/ptw.c
26
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
20
@@ -XXX,XX +XXX,XX @@ do_fault:
27
21
* @xn: XN (execute-never) bits
28
bool disas_sve(DisasContext *, uint32_t);
22
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
29
23
*/
30
-/* Note that the gvec expanders operate on offsets + sizes. */
24
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
31
-typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
25
+static int get_S2prot_noexecute(int s2ap)
32
-typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
26
{
33
- uint32_t, uint32_t);
27
int prot = 0;
34
-typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
28
35
- uint32_t, uint32_t, uint32_t);
29
@@ -XXX,XX +XXX,XX @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
36
-typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
30
if (s2ap & 2) {
37
- uint32_t, uint32_t, uint32_t);
31
prot |= PAGE_WRITE;
38
-
32
}
39
#endif /* TARGET_ARM_TRANSLATE_A64_H */
33
+ return prot;
40
diff --git a/target/arm/translate.h b/target/arm/translate.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate.h
43
+++ b/target/arm/translate.h
44
@@ -XXX,XX +XXX,XX @@ void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
45
#define dc_isar_feature(name, ctx) \
46
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
47
48
+/* Note that the gvec expanders operate on offsets + sizes. */
49
+typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
50
+typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
51
+ uint32_t, uint32_t);
52
+typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
53
+ uint32_t, uint32_t, uint32_t);
54
+typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
55
+ uint32_t, uint32_t, uint32_t);
56
+
57
#endif /* TARGET_ARM_TRANSLATE_H */
58
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/neon-dp.decode
61
+++ b/target/arm/neon-dp.decode
62
@@ -XXX,XX +XXX,XX @@
63
#
64
# This file is processed by scripts/decodetree.py
65
#
66
+# VFP/Neon register fields; same as vfp.decode
67
+%vm_dp 5:1 0:4
68
+%vn_dp 7:1 16:4
69
+%vd_dp 22:1 12:4
70
71
# Encodings for Neon data processing instructions where the T32 encoding
72
# is a simple transformation of the A32 encoding.
73
@@ -XXX,XX +XXX,XX @@
74
# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
75
# This file works on the A32 encoding only; calling code for T32 has to
76
# transform the insn into the A32 version first.
77
+
78
+######################################################################
79
+# 3-reg-same grouping:
80
+# 1111 001 U 0 D sz:2 Vn:4 Vd:4 opc:4 N Q M op Vm:4
81
+######################################################################
82
+
83
+&3same vm vn vd q size
84
+
85
+@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
86
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
87
+
88
+VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
89
+VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
90
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/translate-neon.inc.c
93
+++ b/target/arm/translate-neon.inc.c
94
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
95
96
return true;
97
}
98
+
99
+static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
100
+{
101
+ int vec_size = a->q ? 16 : 8;
102
+ int rd_ofs = neon_reg_offset(a->vd, 0);
103
+ int rn_ofs = neon_reg_offset(a->vn, 0);
104
+ int rm_ofs = neon_reg_offset(a->vm, 0);
105
+
106
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
107
+ return false;
108
+ }
109
+
110
+ /* UNDEF accesses to D16-D31 if they don't exist. */
111
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
112
+ ((a->vd | a->vn | a->vm) & 0x10)) {
113
+ return false;
114
+ }
115
+
116
+ if ((a->vn | a->vm | a->vd) & a->q) {
117
+ return false;
118
+ }
119
+
120
+ if (!vfp_access_check(s)) {
121
+ return true;
122
+ }
123
+
124
+ fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
125
+ return true;
126
+}
34
+}
127
+
35
+
128
+#define DO_3SAME(INSN, FUNC) \
36
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
129
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
37
+{
130
+ { \
38
+ int prot = get_S2prot_noexecute(s2ap);
131
+ return do_3same(s, a, FUNC); \
39
132
+ }
40
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
133
+
41
switch (xn) {
134
+DO_3SAME(VADD, tcg_gen_gvec_add)
42
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
135
+DO_3SAME(VSUB, tcg_gen_gvec_sub)
136
diff --git a/target/arm/translate.c b/target/arm/translate.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/target/arm/translate.c
139
+++ b/target/arm/translate.c
140
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
141
}
142
return 0;
143
144
- case NEON_3R_VADD_VSUB:
145
- if (u) {
146
- tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
147
- vec_size, vec_size);
148
- } else {
149
- tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
150
- vec_size, vec_size);
151
- }
152
- return 0;
153
-
154
case NEON_3R_VQADD:
155
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
156
rn_ofs, rm_ofs, vec_size, vec_size,
157
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
158
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
159
u ? &ushl_op[size] : &sshl_op[size]);
160
return 0;
161
+
162
+ case NEON_3R_VADD_VSUB:
163
+ /* Already handled by decodetree */
164
+ return 1;
165
}
43
}
166
44
}
167
if (size == 3) {
45
46
- if (out_pa == ARMSS_NonSecure && in_pa == ARMSS_Secure &&
47
- (env->cp15.scr_el3 & SCR_SIF)) {
48
- return prot_rw;
49
+ if (in_pa != out_pa) {
50
+ switch (in_pa) {
51
+ case ARMSS_Root:
52
+ /*
53
+ * R_ZWRVD: permission fault for insn fetched from non-Root,
54
+ * I_WWBFB: SIF has no effect in EL3.
55
+ */
56
+ return prot_rw;
57
+ case ARMSS_Realm:
58
+ /*
59
+ * R_PKTDS: permission fault for insn fetched from non-Realm,
60
+ * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
61
+ * happens during any stage2 translation.
62
+ */
63
+ switch (mmu_idx) {
64
+ case ARMMMUIdx_E2:
65
+ case ARMMMUIdx_E20_0:
66
+ case ARMMMUIdx_E20_2:
67
+ case ARMMMUIdx_E20_2_PAN:
68
+ return prot_rw;
69
+ default:
70
+ break;
71
+ }
72
+ break;
73
+ case ARMSS_Secure:
74
+ if (env->cp15.scr_el3 & SCR_SIF) {
75
+ return prot_rw;
76
+ }
77
+ break;
78
+ default:
79
+ /* Input NonSecure must have output NonSecure. */
80
+ g_assert_not_reached();
81
+ }
82
}
83
84
/* TODO have_wxn should be replaced with
85
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
86
/*
87
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
88
* The bit remains ignored for other security states.
89
+ * R_YMCSL: Executing an insn fetched from non-Realm causes
90
+ * a stage2 permission fault.
91
*/
92
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
93
out_space = ARMSS_NonSecure;
94
+ result->f.prot = get_S2prot_noexecute(ap);
95
+ } else {
96
+ xn = extract64(attrs, 53, 2);
97
+ result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
98
}
99
- xn = extract64(attrs, 53, 2);
100
- result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
101
} else {
102
int nse, ns = extract32(attrs, 5, 1);
103
switch (out_space) {
168
--
104
--
169
2.20.1
105
2.34.1
170
171
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Embed the ADMAs into the SoC type.
3
Do not provide a fast-path for physical addresses,
4
as those will need to be validated for GPC.
4
5
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20230620124418.805717-15-richard.henderson@linaro.org
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-7-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
include/hw/arm/xlnx-versal.h | 3 ++-
11
target/arm/ptw.c | 44 +++++++++++++++++---------------------------
14
hw/arm/xlnx-versal.c | 14 +++++++-------
12
1 file changed, 17 insertions(+), 27 deletions(-)
15
2 files changed, 9 insertions(+), 8 deletions(-)
16
13
17
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/xlnx-versal.h
16
--- a/target/arm/ptw.c
20
+++ b/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/ptw.c
21
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
22
#include "hw/arm/boot.h"
19
* From gdbstub, do not use softmmu so that we don't modify the
23
#include "hw/intc/arm_gicv3.h"
20
* state of the cpu at all, including softmmu tlb contents.
24
#include "hw/char/pl011.h"
21
*/
25
+#include "hw/dma/xlnx-zdma.h"
22
- if (regime_is_stage2(s2_mmu_idx)) {
26
#include "hw/net/cadence_gem.h"
23
- S1Translate s2ptw = {
27
24
- .in_mmu_idx = s2_mmu_idx,
28
#define TYPE_XLNX_VERSAL "xlnx-versal"
25
- .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
29
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
26
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
30
struct {
27
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
31
PL011State uart[XLNX_VERSAL_NR_UARTS];
28
- : space == ARMSS_Realm ? ARMSS_Realm
32
CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
29
- : ARMSS_NonSecure),
33
- SysBusDevice *adma[XLNX_VERSAL_NR_ADMAS];
30
- .in_debug = true,
34
+ XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
31
- };
35
} iou;
32
- GetPhysAddrResult s2 = { };
36
} lpd;
33
+ S1Translate s2ptw = {
37
34
+ .in_mmu_idx = s2_mmu_idx,
38
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
35
+ .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
39
index XXXXXXX..XXXXXXX 100644
36
+ .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
40
--- a/hw/arm/xlnx-versal.c
37
+ .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
41
+++ b/hw/arm/xlnx-versal.c
38
+ : space == ARMSS_Realm ? ARMSS_Realm
42
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
39
+ : ARMSS_NonSecure),
43
DeviceState *dev;
40
+ .in_debug = true,
44
MemoryRegion *mr;
41
+ };
45
42
+ GetPhysAddrResult s2 = { };
46
- dev = qdev_create(NULL, "xlnx.zdma");
43
47
- s->lpd.iou.adma[i] = SYS_BUS_DEVICE(dev);
44
- if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
48
- object_property_set_int(OBJECT(s->lpd.iou.adma[i]), 128, "bus-width",
45
- false, &s2, fi)) {
49
- &error_abort);
46
- goto fail;
50
- object_property_add_child(OBJECT(s), name, OBJECT(dev), &error_fatal);
47
- }
51
+ sysbus_init_child_obj(OBJECT(s), name,
48
- ptw->out_phys = s2.f.phys_addr;
52
+ &s->lpd.iou.adma[i], sizeof(s->lpd.iou.adma[i]),
49
- pte_attrs = s2.cacheattrs.attrs;
53
+ TYPE_XLNX_ZDMA);
50
- ptw->out_secure = s2.f.attrs.secure;
54
+ dev = DEVICE(&s->lpd.iou.adma[i]);
51
- ptw->out_space = s2.f.attrs.space;
55
+ object_property_set_int(OBJECT(dev), 128, "bus-width", &error_abort);
52
- } else {
56
qdev_init_nofail(dev);
53
- /* Regime is physical. */
57
54
- ptw->out_phys = addr;
58
- mr = sysbus_mmio_get_region(s->lpd.iou.adma[i], 0);
55
- pte_attrs = 0;
59
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
56
- ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
60
memory_region_add_subregion(&s->mr_ps,
57
- ptw->out_space = (s2_mmu_idx == ARMMMUIdx_Phys_S ? ARMSS_Secure
61
MM_ADMA_CH0 + i * MM_ADMA_CH0_SIZE, mr);
58
- : space == ARMSS_Realm ? ARMSS_Realm
62
59
- : ARMSS_NonSecure);
63
- sysbus_connect_irq(s->lpd.iou.adma[i], 0, pic[VERSAL_ADMA_IRQ_0 + i]);
60
+ if (get_phys_addr_with_struct(env, &s2ptw, addr,
64
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_ADMA_IRQ_0 + i]);
61
+ MMU_DATA_LOAD, &s2, fi)) {
65
g_free(name);
62
+ goto fail;
66
}
63
}
67
}
64
+ ptw->out_phys = s2.f.phys_addr;
65
+ pte_attrs = s2.cacheattrs.attrs;
66
ptw->out_host = NULL;
67
ptw->out_rw = false;
68
+ ptw->out_secure = s2.f.attrs.secure;
69
+ ptw->out_space = s2.f.attrs.space;
70
} else {
71
#ifdef CONFIG_TCG
72
CPUTLBEntryFull *full;
68
--
73
--
69
2.20.1
74
2.34.1
70
71
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Embed the APUs into the SoC type.
3
Instead of passing this to get_phys_addr_lpae, stash it
4
in the S1Translate structure.
4
5
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20230620124418.805717-16-richard.henderson@linaro.org
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20200427181649.26851-8-edgar.iglesias@gmail.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
11
---
13
include/hw/arm/xlnx-versal.h | 2 +-
12
target/arm/ptw.c | 27 ++++++++++++---------------
14
hw/arm/xlnx-versal-virt.c | 4 ++--
13
1 file changed, 12 insertions(+), 15 deletions(-)
15
hw/arm/xlnx-versal.c | 19 +++++--------------
16
3 files changed, 8 insertions(+), 17 deletions(-)
17
14
18
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/arm/xlnx-versal.h
17
--- a/target/arm/ptw.c
21
+++ b/include/hw/arm/xlnx-versal.h
18
+++ b/target/arm/ptw.c
22
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
19
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
23
struct {
20
ARMSecuritySpace in_space;
24
struct {
21
bool in_secure;
25
MemoryRegion mr;
22
bool in_debug;
26
- ARMCPU *cpu[XLNX_VERSAL_NR_ACPUS];
23
+ /*
27
+ ARMCPU cpu[XLNX_VERSAL_NR_ACPUS];
24
+ * If this is stage 2 of a stage 1+2 page table walk, then this must
28
GICv3State gic;
25
+ * be true if stage 1 is an EL0 access; otherwise this is ignored.
29
} apu;
26
+ * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
30
} fpd;
27
+ */
31
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
28
+ bool in_s1_is_el0;
32
index XXXXXXX..XXXXXXX 100644
29
bool out_secure;
33
--- a/hw/arm/xlnx-versal-virt.c
30
bool out_rw;
34
+++ b/hw/arm/xlnx-versal-virt.c
31
bool out_be;
35
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
32
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
36
s->binfo.get_dtb = versal_virt_get_dtb;
33
} S1Translate;
37
s->binfo.modify_dtb = versal_virt_modify_dtb;
34
38
if (machine->kernel_filename) {
35
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
39
- arm_load_kernel(s->soc.fpd.apu.cpu[0], machine, &s->binfo);
36
- uint64_t address,
40
+ arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
37
- MMUAccessType access_type, bool s1_is_el0,
38
+ uint64_t address, MMUAccessType access_type,
39
GetPhysAddrResult *result, ARMMMUFaultInfo *fi);
40
41
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
42
@@ -XXX,XX +XXX,XX @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
43
* @ptw: Current and next stage parameters for the walk.
44
* @address: virtual address to get physical address for
45
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
46
- * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
47
- * (so this is a stage 2 page table walk),
48
- * must be true if this is stage 2 of a stage 1+2
49
- * walk for an EL0 access. If @mmu_idx is anything else,
50
- * @s1_is_el0 is ignored.
51
* @result: set on translation success,
52
* @fi: set to fault info if the translation fails
53
*/
54
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
55
uint64_t address,
56
- MMUAccessType access_type, bool s1_is_el0,
57
+ MMUAccessType access_type,
58
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
59
{
60
ARMCPU *cpu = env_archcpu(env);
61
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
62
result->f.prot = get_S2prot_noexecute(ap);
63
} else {
64
xn = extract64(attrs, 53, 2);
65
- result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
66
+ result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
67
}
41
} else {
68
} else {
42
- AddressSpace *as = arm_boot_address_space(s->soc.fpd.apu.cpu[0],
69
int nse, ns = extract32(attrs, 5, 1);
43
+ AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0],
70
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
44
&s->binfo);
71
bool ret, ipa_secure;
45
/* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL).
72
ARMCacheAttrs cacheattrs1;
46
* Offset things by 4K. */
73
ARMSecuritySpace ipa_space;
47
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
74
- bool is_el0;
48
index XXXXXXX..XXXXXXX 100644
75
uint64_t hcr;
49
--- a/hw/arm/xlnx-versal.c
76
50
+++ b/hw/arm/xlnx-versal.c
77
ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
51
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
78
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
52
79
ipa_secure = result->f.attrs.secure;
53
for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) {
80
ipa_space = result->f.attrs.space;
54
Object *obj;
81
55
- char *name;
82
- is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
56
-
83
+ ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
57
- obj = object_new(XLNX_VERSAL_ACPU_TYPE);
84
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
58
- if (!obj) {
85
ptw->in_secure = ipa_secure;
59
- error_report("Unable to create apu.cpu[%d] of type %s",
86
ptw->in_space = ipa_space;
60
- i, XLNX_VERSAL_ACPU_TYPE);
87
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
61
- exit(EXIT_FAILURE);
88
ret = get_phys_addr_pmsav8(env, ipa, access_type,
62
- }
89
ptw->in_mmu_idx, is_secure, result, fi);
63
-
90
} else {
64
- name = g_strdup_printf("apu-cpu[%d]", i);
91
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
65
- object_property_add_child(OBJECT(s), name, obj, &error_fatal);
92
- is_el0, result, fi);
66
- g_free(name);
93
+ ret = get_phys_addr_lpae(env, ptw, ipa, access_type, result, fi);
67
68
+ object_initialize_child(OBJECT(s), "apu-cpu[*]",
69
+ &s->fpd.apu.cpu[i], sizeof(s->fpd.apu.cpu[i]),
70
+ XLNX_VERSAL_ACPU_TYPE, &error_abort, NULL);
71
+ obj = OBJECT(&s->fpd.apu.cpu[i]);
72
object_property_set_int(obj, s->cfg.psci_conduit,
73
"psci-conduit", &error_abort);
74
if (i) {
75
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
76
object_property_set_link(obj, OBJECT(&s->fpd.apu.mr), "memory",
77
&error_abort);
78
object_property_set_bool(obj, true, "realized", &error_fatal);
79
- s->fpd.apu.cpu[i] = ARM_CPU(obj);
80
}
94
}
81
}
95
fi->s2addr = ipa;
82
96
83
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_gic(Versal *s, qemu_irq *pic)
97
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
84
}
98
}
85
99
86
for (i = 0; i < nr_apu_cpus; i++) {
100
if (regime_using_lpae_format(env, mmu_idx)) {
87
- DeviceState *cpudev = DEVICE(s->fpd.apu.cpu[i]);
101
- return get_phys_addr_lpae(env, ptw, address, access_type, false,
88
+ DeviceState *cpudev = DEVICE(&s->fpd.apu.cpu[i]);
102
- result, fi);
89
int ppibase = XLNX_VERSAL_NR_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
103
+ return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
90
qemu_irq maint_irq;
104
} else if (arm_feature(env, ARM_FEATURE_V7) ||
91
int ti;
105
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
106
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
92
--
107
--
93
2.20.1
108
2.34.1
94
109
95
110
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move misplaced comment.
3
This fixes a bug in which we failed to initialize
4
the result attributes properly after the memset.
4
5
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20230620124418.805717-17-richard.henderson@linaro.org
9
Message-id: 20200427181649.26851-3-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
hw/arm/xlnx-versal.c | 2 +-
12
target/arm/ptw.c | 11 +----------
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 1 insertion(+), 10 deletions(-)
14
14
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal.c
17
--- a/target/arm/ptw.c
18
+++ b/hw/arm/xlnx-versal.c
18
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
19
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
20
20
void *out_host;
21
obj = object_new(XLNX_VERSAL_ACPU_TYPE);
21
} S1Translate;
22
if (!obj) {
22
23
- /* Secondary CPUs start in PSCI powered-down state */
23
-static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
24
error_report("Unable to create apu.cpu[%d] of type %s",
24
- uint64_t address, MMUAccessType access_type,
25
i, XLNX_VERSAL_ACPU_TYPE);
25
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi);
26
exit(EXIT_FAILURE);
26
-
27
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
27
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
28
object_property_set_int(obj, s->cfg.psci_conduit,
28
target_ulong address,
29
"psci-conduit", &error_abort);
29
MMUAccessType access_type,
30
if (i) {
30
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
31
+ /* Secondary CPUs start in PSCI powered-down state */
31
cacheattrs1 = result->cacheattrs;
32
object_property_set_bool(obj, true,
32
memset(result, 0, sizeof(*result));
33
"start-powered-off", &error_abort);
33
34
}
34
- if (arm_feature(env, ARM_FEATURE_PMSA)) {
35
- ret = get_phys_addr_pmsav8(env, ipa, access_type,
36
- ptw->in_mmu_idx, is_secure, result, fi);
37
- } else {
38
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type, result, fi);
39
- }
40
+ ret = get_phys_addr_with_struct(env, ptw, ipa, access_type, result, fi);
41
fi->s2addr = ipa;
42
43
/* Combine the S1 and S2 perms. */
35
--
44
--
36
2.20.1
45
2.34.1
37
46
38
47
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
hw/arm: versal: Add support for the RTC.
3
The function takes the fields as filled in by
4
the Arm ARM pseudocode for TakeGPCException.
4
5
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20230620124418.805717-18-richard.henderson@linaro.org
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-10-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
include/hw/arm/xlnx-versal.h | 8 ++++++++
11
target/arm/syndrome.h | 10 ++++++++++
13
hw/arm/xlnx-versal.c | 21 +++++++++++++++++++++
12
1 file changed, 10 insertions(+)
14
2 files changed, 29 insertions(+)
15
13
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
14
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
16
--- a/target/arm/syndrome.h
19
+++ b/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/syndrome.h
20
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
21
#include "hw/char/pl011.h"
19
EC_SVEACCESSTRAP = 0x19,
22
#include "hw/dma/xlnx-zdma.h"
20
EC_ERETTRAP = 0x1a,
23
#include "hw/net/cadence_gem.h"
21
EC_SMETRAP = 0x1d,
24
+#include "hw/rtc/xlnx-zynqmp-rtc.h"
22
+ EC_GPC = 0x1e,
25
23
EC_INSNABORT = 0x20,
26
#define TYPE_XLNX_VERSAL "xlnx-versal"
24
EC_INSNABORT_SAME_EL = 0x21,
27
#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
25
EC_PCALIGNMENT = 0x22,
28
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
26
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_bxjtrap(int cv, int cond, int rm)
29
struct {
27
(cv << 24) | (cond << 20) | rm;
30
SDHCIState sd[XLNX_VERSAL_NR_SDS];
31
} iou;
32
+
33
+ XlnxZynqMPRTC rtc;
34
} pmc;
35
36
struct {
37
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
38
#define VERSAL_GEM1_IRQ_0 58
39
#define VERSAL_GEM1_WAKE_IRQ_0 59
40
#define VERSAL_ADMA_IRQ_0 60
41
+#define VERSAL_RTC_APB_ERR_IRQ 121
42
#define VERSAL_SD0_IRQ_0 126
43
+#define VERSAL_RTC_ALARM_IRQ 142
44
+#define VERSAL_RTC_SECONDS_IRQ 143
45
46
/* Architecturally reserved IRQs suitable for virtualization. */
47
#define VERSAL_RSVD_IRQ_FIRST 111
48
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
49
#define MM_PMC_SD0_SIZE 0x10000
50
#define MM_PMC_CRP 0xf1260000U
51
#define MM_PMC_CRP_SIZE 0x10000
52
+#define MM_PMC_RTC 0xf12a0000
53
+#define MM_PMC_RTC_SIZE 0x10000
54
#endif
55
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/hw/arm/xlnx-versal.c
58
+++ b/hw/arm/xlnx-versal.c
59
@@ -XXX,XX +XXX,XX @@ static void versal_create_sds(Versal *s, qemu_irq *pic)
60
}
61
}
28
}
62
29
63
+static void versal_create_rtc(Versal *s, qemu_irq *pic)
30
+static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc,
31
+ int cm, int s1ptw, int wnr, int fsc)
64
+{
32
+{
65
+ SysBusDevice *sbd;
33
+ /* TODO: FEAT_NV2 adds VNCR */
66
+ MemoryRegion *mr;
34
+ return (EC_GPC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (s2ptw << 21)
67
+
35
+ | (ind << 20) | (gpcsc << 14) | (cm << 8) | (s1ptw << 7)
68
+ sysbus_init_child_obj(OBJECT(s), "rtc", &s->pmc.rtc, sizeof(s->pmc.rtc),
36
+ | (wnr << 6) | fsc;
69
+ TYPE_XLNX_ZYNQMP_RTC);
70
+ sbd = SYS_BUS_DEVICE(&s->pmc.rtc);
71
+ qdev_init_nofail(DEVICE(sbd));
72
+
73
+ mr = sysbus_mmio_get_region(sbd, 0);
74
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr);
75
+
76
+ /*
77
+ * TODO: Connect the ALARM and SECONDS interrupts once our RTC model
78
+ * supports them.
79
+ */
80
+ sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]);
81
+}
37
+}
82
+
38
+
83
/* This takes the board allocated linear DDR memory and creates aliases
39
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
84
* for each split DDR range/aperture on the Versal address map.
40
{
85
*/
41
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
86
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
87
versal_create_gems(s, pic);
88
versal_create_admas(s, pic);
89
versal_create_sds(s, pic);
90
+ versal_create_rtc(s, pic);
91
versal_map_ddr(s);
92
versal_unimp(s);
93
94
--
42
--
95
2.20.1
43
2.34.1
96
97
diff view generated by jsdifflib
1
The ARMv8.2-TTS2UXN feature extends the XN field in stage 2
1
From: Richard Henderson <richard.henderson@linaro.org>
2
translation table descriptors from just bit [54] to bits [54:53],
3
allowing stage 2 to control execution permissions separately for EL0
4
and EL1. Implement the new semantics of the XN field and enable
5
the feature for our 'max' CPU.
6
2
3
Handle GPC Fault types in arm_deliver_fault, reporting as
4
either a GPC exception at EL3, or falling through to insn
5
or data aborts at various exception levels.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20230620124418.805717-19-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200330210400.11724-5-peter.maydell@linaro.org
11
---
11
---
12
target/arm/cpu.h | 15 +++++++++++++++
12
target/arm/cpu.h | 1 +
13
target/arm/cpu.c | 1 +
13
target/arm/internals.h | 27 +++++++++++
14
target/arm/cpu64.c | 2 ++
14
target/arm/helper.c | 5 ++
15
target/arm/helper.c | 37 +++++++++++++++++++++++++++++++------
15
target/arm/tcg/tlb_helper.c | 96 +++++++++++++++++++++++++++++++++++--
16
4 files changed, 49 insertions(+), 6 deletions(-)
16
4 files changed, 126 insertions(+), 3 deletions(-)
17
17
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
20
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
22
@@ -XXX,XX +XXX,XX @@
23
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
23
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
24
}
24
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
25
25
#define EXCP_VSERR 24
26
+static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
26
+#define EXCP_GPC 25 /* v9 Granule Protection Check Fault */
27
+{
27
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
28
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
28
29
+}
29
#define ARMV7M_EXCP_RESET 1
30
+
30
diff --git a/target/arm/internals.h b/target/arm/internals.h
31
/*
31
index XXXXXXX..XXXXXXX 100644
32
* 64-bit feature tests via id registers.
32
--- a/target/arm/internals.h
33
*/
33
+++ b/target/arm/internals.h
34
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
34
@@ -XXX,XX +XXX,XX @@ typedef enum ARMFaultType {
35
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
35
ARMFault_ICacheMaint,
36
}
36
ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
37
37
ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
38
+static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
38
+ ARMFault_GPCFOnWalk,
39
+{
39
+ ARMFault_GPCFOnOutput,
40
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
40
} ARMFaultType;
41
+}
41
42
+
42
+typedef enum ARMGPCF {
43
/*
43
+ GPCF_None,
44
* Feature tests for "does this exist in either 32-bit or 64-bit?"
44
+ GPCF_AddressSize,
45
*/
45
+ GPCF_Walk,
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
46
+ GPCF_EABT,
47
return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
47
+ GPCF_Fail,
48
}
48
+} ARMGPCF;
49
49
+
50
+static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
50
/**
51
+{
51
* ARMMMUFaultInfo: Information describing an ARM MMU Fault
52
+ return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
52
* @type: Type of fault
53
+}
53
+ * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
54
+
54
* @level: Table walk level (for translation, access flag and permission faults)
55
/*
55
* @domain: Domain of the fault address (for non-LPAE CPUs only)
56
* Forward to the above feature tests given an ARMCPU pointer.
56
* @s2addr: Address that caused a fault at stage 2
57
*/
57
+ * @paddr: physical address that caused a fault for gpc
58
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
58
+ * @paddr_space: physical address space that caused a fault for gpc
59
index XXXXXXX..XXXXXXX 100644
59
* @stage2: True if we faulted at stage 2
60
--- a/target/arm/cpu.c
60
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
61
+++ b/target/arm/cpu.c
61
* @s1ns: True if we faulted on a non-secure IPA while in secure state
62
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
62
@@ -XXX,XX +XXX,XX @@ typedef enum ARMFaultType {
63
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
63
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
64
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
64
struct ARMMMUFaultInfo {
65
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
65
ARMFaultType type;
66
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
66
+ ARMGPCF gpcf;
67
cpu->isar.id_mmfr4 = t;
67
target_ulong s2addr;
68
}
68
+ target_ulong paddr;
69
#endif
69
+ ARMSecuritySpace paddr_space;
70
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
70
int level;
71
index XXXXXXX..XXXXXXX 100644
71
int domain;
72
--- a/target/arm/cpu64.c
72
bool stage2;
73
+++ b/target/arm/cpu64.c
73
@@ -XXX,XX +XXX,XX @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
74
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
74
case ARMFault_Exclusive:
75
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
75
fsc = 0x35;
76
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
76
break;
77
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
77
+ case ARMFault_GPCFOnWalk:
78
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
78
+ assert(fi->level >= -1 && fi->level <= 3);
79
cpu->isar.id_aa64mmfr1 = t;
79
+ if (fi->level < 0) {
80
80
+ fsc = 0b100011;
81
t = cpu->isar.id_aa64mmfr2;
81
+ } else {
82
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
82
+ fsc = 0b100100 | fi->level;
83
u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
83
+ }
84
u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
84
+ break;
85
u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
85
+ case ARMFault_GPCFOnOutput:
86
+ u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
86
+ fsc = 0b101000;
87
cpu->isar.id_mmfr4 = u;
87
+ break;
88
88
default:
89
u = cpu->isar.id_aa64dfr0;
89
/* Other faults can't occur in a context that requires a
90
* long-format status code.
90
diff --git a/target/arm/helper.c b/target/arm/helper.c
91
diff --git a/target/arm/helper.c b/target/arm/helper.c
91
index XXXXXXX..XXXXXXX 100644
92
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/helper.c
93
--- a/target/arm/helper.c
93
+++ b/target/arm/helper.c
94
+++ b/target/arm/helper.c
94
@@ -XXX,XX +XXX,XX @@ simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
95
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs)
95
*
96
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
96
* @env: CPUARMState
97
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
97
* @s2ap: The 2-bit stage2 access permissions (S2AP)
98
[EXCP_VSERR] = "Virtual SERR",
98
- * @xn: XN (execute-never) bit
99
+ [EXCP_GPC] = "Granule Protection Check",
99
+ * @xn: XN (execute-never) bits
100
};
100
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
101
101
*/
102
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
102
-static int get_S2prot(CPUARMState *env, int s2ap, int xn)
103
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
103
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
104
}
105
106
switch (cs->exception_index) {
107
+ case EXCP_GPC:
108
+ qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
109
+ env->cp15.mfar_el3);
110
+ /* fall through */
111
case EXCP_PREFETCH_ABORT:
112
case EXCP_DATA_ABORT:
113
/*
114
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/tcg/tlb_helper.c
117
+++ b/target/arm/tcg/tlb_helper.c
118
@@ -XXX,XX +XXX,XX @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
119
return fsr;
120
}
121
122
+static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
123
+ ARMMMUFaultInfo *fi)
124
+{
125
+ bool ret;
126
+
127
+ switch (fi->gpcf) {
128
+ case GPCF_None:
129
+ return false;
130
+ case GPCF_AddressSize:
131
+ case GPCF_Walk:
132
+ case GPCF_EABT:
133
+ /* R_PYTGX: GPT faults are reported as GPC. */
134
+ ret = true;
135
+ break;
136
+ case GPCF_Fail:
137
+ /*
138
+ * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
139
+ * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
140
+ * if SCR_EL3.GPF is set, otherwise an insn or data abort.
141
+ */
142
+ ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
143
+ break;
144
+ default:
145
+ g_assert_not_reached();
146
+ }
147
+
148
+ assert(cpu_isar_feature(aa64_rme, cpu));
149
+ assert(fi->type == ARMFault_GPCFOnWalk ||
150
+ fi->type == ARMFault_GPCFOnOutput);
151
+ if (fi->gpcf == GPCF_AddressSize) {
152
+ assert(fi->level == 0);
153
+ } else {
154
+ assert(fi->level >= 0 && fi->level <= 1);
155
+ }
156
+
157
+ return ret;
158
+}
159
+
160
+static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
161
+{
162
+ static uint8_t const gpcsc[] = {
163
+ [GPCF_AddressSize] = 0b000000,
164
+ [GPCF_Walk] = 0b000100,
165
+ [GPCF_Fail] = 0b001100,
166
+ [GPCF_EABT] = 0b010100,
167
+ };
168
+
169
+ /* Note that we've validated fi->gpcf and fi->level above. */
170
+ return gpcsc[fi->gpcf] | fi->level;
171
+}
172
+
173
static G_NORETURN
174
void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
175
MMUAccessType access_type,
176
int mmu_idx, ARMMMUFaultInfo *fi)
104
{
177
{
105
int prot = 0;
178
CPUARMState *env = &cpu->env;
106
179
- int target_el;
107
@@ -XXX,XX +XXX,XX @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn)
180
+ int target_el = exception_target_el(env);
108
if (s2ap & 2) {
181
+ int current_el = arm_current_el(env);
109
prot |= PAGE_WRITE;
182
bool same_el;
110
}
183
uint32_t syn, exc, fsr, fsc;
111
- if (!xn) {
184
112
- if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
185
- target_el = exception_target_el(env);
113
+
186
+ if (report_as_gpc_exception(cpu, current_el, fi)) {
114
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
187
+ target_el = 3;
115
+ switch (xn) {
188
+
116
+ case 0:
189
+ fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
117
prot |= PAGE_EXEC;
190
+
118
+ break;
191
+ syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
119
+ case 1:
192
+ access_type == MMU_INST_FETCH,
120
+ if (s1_is_el0) {
193
+ encode_gpcsc(fi), 0, fi->s1ptw,
121
+ prot |= PAGE_EXEC;
194
+ access_type == MMU_DATA_STORE, fsc);
122
+ }
195
+
123
+ break;
196
+ env->cp15.mfar_el3 = fi->paddr;
124
+ case 2:
197
+ switch (fi->paddr_space) {
125
+ break;
198
+ case ARMSS_Secure:
126
+ case 3:
199
+ break;
127
+ if (!s1_is_el0) {
200
+ case ARMSS_NonSecure:
128
+ prot |= PAGE_EXEC;
201
+ env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
129
+ }
202
+ break;
203
+ case ARMSS_Root:
204
+ env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
205
+ break;
206
+ case ARMSS_Realm:
207
+ env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
130
+ break;
208
+ break;
131
+ default:
209
+ default:
132
+ g_assert_not_reached();
210
+ g_assert_not_reached();
133
+ }
211
+ }
134
+ } else {
212
+
135
+ if (!extract32(xn, 1, 1)) {
213
+ exc = EXCP_GPC;
136
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
214
+ goto do_raise;
137
+ prot |= PAGE_EXEC;
215
+ }
138
+ }
216
+
217
+ /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
218
+ if (fi->gpcf == GPCF_Fail && target_el < 2) {
219
+ if (arm_hcr_el2_eff(env) & HCR_GPF) {
220
+ target_el = 2;
221
+ }
222
+ }
223
+
224
if (fi->stage2) {
225
target_el = 2;
226
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
227
@@ -XXX,XX +XXX,XX @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
228
env->cp15.hpfar_el2 |= HPFAR_NS;
139
}
229
}
140
}
230
}
141
return prot;
231
- same_el = (arm_current_el(env) == target_el);
142
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
232
233
+ same_el = current_el == target_el;
234
fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
235
236
if (access_type == MMU_INST_FETCH) {
237
@@ -XXX,XX +XXX,XX @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
238
exc = EXCP_DATA_ABORT;
143
}
239
}
144
240
145
ap = extract32(attrs, 4, 2);
241
+ do_raise:
146
- xn = extract32(attrs, 12, 1);
242
env->exception.vaddress = addr;
147
243
env->exception.fsr = fsr;
148
if (mmu_idx == ARMMMUIdx_Stage2) {
244
raise_exception(env, exc, syn, target_el);
149
ns = true;
150
- *prot = get_S2prot(env, ap, xn);
151
+ xn = extract32(attrs, 11, 2);
152
+ *prot = get_S2prot(env, ap, xn, s1_is_el0);
153
} else {
154
ns = extract32(attrs, 3, 1);
155
+ xn = extract32(attrs, 12, 1);
156
pxn = extract32(attrs, 11, 1);
157
*prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
158
}
159
--
245
--
160
2.20.1
246
2.34.1
161
162
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add support for SD.
3
Place the check at the end of get_phys_addr_with_struct,
4
so that we check all physical results.
4
5
5
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20230620124418.805717-20-richard.henderson@linaro.org
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-9-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
include/hw/arm/xlnx-versal.h | 12 ++++++++++++
11
target/arm/ptw.c | 249 +++++++++++++++++++++++++++++++++++++++++++----
13
hw/arm/xlnx-versal.c | 31 +++++++++++++++++++++++++++++++
12
1 file changed, 232 insertions(+), 17 deletions(-)
14
2 files changed, 43 insertions(+)
15
13
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
16
--- a/target/arm/ptw.c
19
+++ b/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ typedef struct S1Translate {
21
19
void *out_host;
22
#include "hw/sysbus.h"
20
} S1Translate;
23
#include "hw/arm/boot.h"
21
24
+#include "hw/sd/sdhci.h"
22
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
25
#include "hw/intc/arm_gicv3.h"
23
- target_ulong address,
26
#include "hw/char/pl011.h"
24
- MMUAccessType access_type,
27
#include "hw/dma/xlnx-zdma.h"
25
- GetPhysAddrResult *result,
28
@@ -XXX,XX +XXX,XX @@
26
- ARMMMUFaultInfo *fi);
29
#define XLNX_VERSAL_NR_UARTS 2
27
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
30
#define XLNX_VERSAL_NR_GEMS 2
28
+ target_ulong address,
31
#define XLNX_VERSAL_NR_ADMAS 8
29
+ MMUAccessType access_type,
32
+#define XLNX_VERSAL_NR_SDS 2
30
+ GetPhysAddrResult *result,
33
#define XLNX_VERSAL_NR_IRQS 192
31
+ ARMMMUFaultInfo *fi);
34
32
+
35
typedef struct Versal {
33
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
36
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
34
+ target_ulong address,
37
} iou;
35
+ MMUAccessType access_type,
38
} lpd;
36
+ GetPhysAddrResult *result,
39
37
+ ARMMMUFaultInfo *fi);
40
+ /* The Platform Management Controller subsystem. */
38
41
+ struct {
39
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
42
+ struct {
40
static const uint8_t pamax_map[] = {
43
+ SDHCIState sd[XLNX_VERSAL_NR_SDS];
41
@@ -XXX,XX +XXX,XX @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
44
+ } iou;
42
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
45
+ } pmc;
43
}
46
+
44
47
struct {
45
+static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
48
MemoryRegion *mr_ddr;
46
+ ARMSecuritySpace pspace,
49
uint32_t psci_conduit;
47
+ ARMMMUFaultInfo *fi)
50
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
48
+{
51
#define VERSAL_GEM1_IRQ_0 58
49
+ MemTxAttrs attrs = {
52
#define VERSAL_GEM1_WAKE_IRQ_0 59
50
+ .secure = true,
53
#define VERSAL_ADMA_IRQ_0 60
51
+ .space = ARMSS_Root,
54
+#define VERSAL_SD0_IRQ_0 126
52
+ };
55
53
+ ARMCPU *cpu = env_archcpu(env);
56
/* Architecturally reserved IRQs suitable for virtualization. */
54
+ uint64_t gpccr = env->cp15.gpccr_el3;
57
#define VERSAL_RSVD_IRQ_FIRST 111
55
+ unsigned pps, pgs, l0gptsz, level = 0;
58
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
56
+ uint64_t tableaddr, pps_mask, align, entry, index;
59
#define MM_FPD_CRF 0xfd1a0000U
57
+ AddressSpace *as;
60
#define MM_FPD_CRF_SIZE 0x140000
58
+ MemTxResult result;
61
59
+ int gpi;
62
+#define MM_PMC_SD0 0xf1040000U
60
+
63
+#define MM_PMC_SD0_SIZE 0x10000
61
+ if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
64
#define MM_PMC_CRP 0xf1260000U
62
+ return true;
65
#define MM_PMC_CRP_SIZE 0x10000
63
+ }
66
#endif
64
+
67
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
65
+ /*
68
index XXXXXXX..XXXXXXX 100644
66
+ * GPC Priority 1 (R_GMGRR):
69
--- a/hw/arm/xlnx-versal.c
67
+ * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
70
+++ b/hw/arm/xlnx-versal.c
68
+ * the access fails as GPT walk fault at level 0.
71
@@ -XXX,XX +XXX,XX @@ static void versal_create_admas(Versal *s, qemu_irq *pic)
69
+ */
70
+
71
+ /*
72
+ * Configuration of PPS to a value exceeding the implemented
73
+ * physical address size is invalid.
74
+ */
75
+ pps = FIELD_EX64(gpccr, GPCCR, PPS);
76
+ if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
77
+ goto fault_walk;
78
+ }
79
+ pps = pamax_map[pps];
80
+ pps_mask = MAKE_64BIT_MASK(0, pps);
81
+
82
+ switch (FIELD_EX64(gpccr, GPCCR, SH)) {
83
+ case 0b10: /* outer shareable */
84
+ break;
85
+ case 0b00: /* non-shareable */
86
+ case 0b11: /* inner shareable */
87
+ /* Inner and Outer non-cacheable requires Outer shareable. */
88
+ if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
89
+ FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
90
+ goto fault_walk;
91
+ }
92
+ break;
93
+ default: /* reserved */
94
+ goto fault_walk;
95
+ }
96
+
97
+ switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
98
+ case 0b00: /* 4KB */
99
+ pgs = 12;
100
+ break;
101
+ case 0b01: /* 64KB */
102
+ pgs = 16;
103
+ break;
104
+ case 0b10: /* 16KB */
105
+ pgs = 14;
106
+ break;
107
+ default: /* reserved */
108
+ goto fault_walk;
109
+ }
110
+
111
+ /* Note this field is read-only and fixed at reset. */
112
+ l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
113
+
114
+ /*
115
+ * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
116
+ * R_CPDSB: A NonSecure physical address input exceeding PPS
117
+ * does not experience any fault.
118
+ */
119
+ if (paddress & ~pps_mask) {
120
+ if (pspace == ARMSS_NonSecure) {
121
+ return true;
122
+ }
123
+ goto fault_size;
124
+ }
125
+
126
+ /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
127
+ tableaddr = env->cp15.gptbr_el3 << 12;
128
+ if (tableaddr & ~pps_mask) {
129
+ goto fault_size;
130
+ }
131
+
132
+ /*
133
+ * BADDR is aligned per a function of PPS and L0GPTSZ.
134
+ * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
135
+ * unlike the RES0 bits of the GPT entries (R_XNKFZ).
136
+ */
137
+ align = MAX(pps - l0gptsz + 3, 12);
138
+ align = MAKE_64BIT_MASK(0, align);
139
+ tableaddr &= ~align;
140
+
141
+ as = arm_addressspace(env_cpu(env), attrs);
142
+
143
+ /* Level 0 lookup. */
144
+ index = extract64(paddress, l0gptsz, pps - l0gptsz);
145
+ tableaddr += index * 8;
146
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
147
+ if (result != MEMTX_OK) {
148
+ goto fault_eabt;
149
+ }
150
+
151
+ switch (extract32(entry, 0, 4)) {
152
+ case 1: /* block descriptor */
153
+ if (entry >> 8) {
154
+ goto fault_walk; /* RES0 bits not 0 */
155
+ }
156
+ gpi = extract32(entry, 4, 4);
157
+ goto found;
158
+ case 3: /* table descriptor */
159
+ tableaddr = entry & ~0xf;
160
+ align = MAX(l0gptsz - pgs - 1, 12);
161
+ align = MAKE_64BIT_MASK(0, align);
162
+ if (tableaddr & (~pps_mask | align)) {
163
+ goto fault_walk; /* RES0 bits not 0 */
164
+ }
165
+ break;
166
+ default: /* invalid */
167
+ goto fault_walk;
168
+ }
169
+
170
+ /* Level 1 lookup */
171
+ level = 1;
172
+ index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
173
+ tableaddr += index * 8;
174
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
175
+ if (result != MEMTX_OK) {
176
+ goto fault_eabt;
177
+ }
178
+
179
+ switch (extract32(entry, 0, 4)) {
180
+ case 1: /* contiguous descriptor */
181
+ if (entry >> 10) {
182
+ goto fault_walk; /* RES0 bits not 0 */
183
+ }
184
+ /*
185
+ * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
186
+ * and because we cannot invalidate by pa, and thus will always
187
+ * flush entire tlbs, we don't actually care about the range here
188
+ * and can simply extract the GPI as the result.
189
+ */
190
+ if (extract32(entry, 8, 2) == 0) {
191
+ goto fault_walk; /* reserved contig */
192
+ }
193
+ gpi = extract32(entry, 4, 4);
194
+ break;
195
+ default:
196
+ index = extract64(paddress, pgs, 4);
197
+ gpi = extract64(entry, index * 4, 4);
198
+ break;
199
+ }
200
+
201
+ found:
202
+ switch (gpi) {
203
+ case 0b0000: /* no access */
204
+ break;
205
+ case 0b1111: /* all access */
206
+ return true;
207
+ case 0b1000:
208
+ case 0b1001:
209
+ case 0b1010:
210
+ case 0b1011:
211
+ if (pspace == (gpi & 3)) {
212
+ return true;
213
+ }
214
+ break;
215
+ default:
216
+ goto fault_walk; /* reserved */
217
+ }
218
+
219
+ fi->gpcf = GPCF_Fail;
220
+ goto fault_common;
221
+ fault_eabt:
222
+ fi->gpcf = GPCF_EABT;
223
+ goto fault_common;
224
+ fault_size:
225
+ fi->gpcf = GPCF_AddressSize;
226
+ goto fault_common;
227
+ fault_walk:
228
+ fi->gpcf = GPCF_Walk;
229
+ fault_common:
230
+ fi->level = level;
231
+ fi->paddr = paddress;
232
+ fi->paddr_space = pspace;
233
+ return false;
234
+}
235
+
236
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
237
{
238
/*
239
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
240
};
241
GetPhysAddrResult s2 = { };
242
243
- if (get_phys_addr_with_struct(env, &s2ptw, addr,
244
- MMU_DATA_LOAD, &s2, fi)) {
245
+ if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
246
goto fail;
247
}
248
+
249
ptw->out_phys = s2.f.phys_addr;
250
pte_attrs = s2.cacheattrs.attrs;
251
ptw->out_host = NULL;
252
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
253
254
fail:
255
assert(fi->type != ARMFault_None);
256
+ if (fi->type == ARMFault_GPCFOnOutput) {
257
+ fi->type = ARMFault_GPCFOnWalk;
258
+ }
259
fi->s2addr = addr;
260
fi->stage2 = true;
261
fi->s1ptw = true;
262
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
263
ARMMMUFaultInfo *fi)
264
{
265
uint8_t memattr = 0x00; /* Device nGnRnE */
266
- uint8_t shareability = 0; /* non-sharable */
267
+ uint8_t shareability = 0; /* non-shareable */
268
int r_el;
269
270
switch (mmu_idx) {
271
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
272
} else {
273
memattr = 0x44; /* Normal, NC, No */
274
}
275
- shareability = 2; /* outer sharable */
276
+ shareability = 2; /* outer shareable */
277
}
278
result->cacheattrs.is_s2_format = false;
279
break;
280
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
281
ARMSecuritySpace ipa_space;
282
uint64_t hcr;
283
284
- ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
285
+ ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
286
287
/* If S1 fails, return early. */
288
if (ret) {
289
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
290
cacheattrs1 = result->cacheattrs;
291
memset(result, 0, sizeof(*result));
292
293
- ret = get_phys_addr_with_struct(env, ptw, ipa, access_type, result, fi);
294
+ ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
295
fi->s2addr = ipa;
296
297
/* Combine the S1 and S2 perms. */
298
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
299
return false;
300
}
301
302
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
303
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
304
target_ulong address,
305
MMUAccessType access_type,
306
GetPhysAddrResult *result,
307
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
72
}
308
}
73
}
309
}
74
310
75
+#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */
311
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
76
+static void versal_create_sds(Versal *s, qemu_irq *pic)
312
+ target_ulong address,
313
+ MMUAccessType access_type,
314
+ GetPhysAddrResult *result,
315
+ ARMMMUFaultInfo *fi)
77
+{
316
+{
78
+ int i;
317
+ if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
79
+
318
+ return true;
80
+ for (i = 0; i < ARRAY_SIZE(s->pmc.iou.sd); i++) {
319
+ }
81
+ DeviceState *dev;
320
+ if (!granule_protection_check(env, result->f.phys_addr,
82
+ MemoryRegion *mr;
321
+ result->f.attrs.space, fi)) {
83
+
322
+ fi->type = ARMFault_GPCFOnOutput;
84
+ sysbus_init_child_obj(OBJECT(s), "sd[*]",
323
+ return true;
85
+ &s->pmc.iou.sd[i], sizeof(s->pmc.iou.sd[i]),
324
+ }
86
+ TYPE_SYSBUS_SDHCI);
325
+ return false;
87
+ dev = DEVICE(&s->pmc.iou.sd[i]);
88
+
89
+ object_property_set_uint(OBJECT(dev),
90
+ 3, "sd-spec-version", &error_fatal);
91
+ object_property_set_uint(OBJECT(dev), SDHCI_CAPABILITIES, "capareg",
92
+ &error_fatal);
93
+ object_property_set_uint(OBJECT(dev), UHS_I, "uhs", &error_fatal);
94
+ qdev_init_nofail(dev);
95
+
96
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
97
+ memory_region_add_subregion(&s->mr_ps,
98
+ MM_PMC_SD0 + i * MM_PMC_SD0_SIZE, mr);
99
+
100
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
101
+ pic[VERSAL_SD0_IRQ_0 + i * 2]);
102
+ }
103
+}
326
+}
104
+
327
+
105
/* This takes the board allocated linear DDR memory and creates aliases
328
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
106
* for each split DDR range/aperture on the Versal address map.
329
MMUAccessType access_type, ARMMMUIdx mmu_idx,
107
*/
330
bool is_secure, GetPhysAddrResult *result,
108
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
331
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
109
versal_create_uarts(s, pic);
332
.in_secure = is_secure,
110
versal_create_gems(s, pic);
333
.in_space = arm_secure_to_space(is_secure),
111
versal_create_admas(s, pic);
334
};
112
+ versal_create_sds(s, pic);
335
- return get_phys_addr_with_struct(env, &ptw, address, access_type,
113
versal_map_ddr(s);
336
- result, fi);
114
versal_unimp(s);
337
+ return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
115
338
}
339
340
bool get_phys_addr(CPUARMState *env, target_ulong address,
341
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
342
343
ptw.in_space = ss;
344
ptw.in_secure = arm_space_is_secure(ss);
345
- return get_phys_addr_with_struct(env, &ptw, address, access_type,
346
- result, fi);
347
+ return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
348
}
349
350
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
351
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
352
ARMMMUFaultInfo fi = {};
353
bool ret;
354
355
- ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
356
+ ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
357
*attrs = res.f.attrs;
358
359
if (ret) {
116
--
360
--
117
2.20.1
361
2.34.1
118
119
diff view generated by jsdifflib
1
Convert the Neon "load/store single structure to one lane" insns to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
decodetree.
3
2
4
As this is the last set of insns in the neon load/store group,
3
Add an x-rme cpu property to enable FEAT_RME.
5
we can remove the whole disas_neon_ls_insn() function.
4
Add an x-l0gptsz property to set GPCCR_EL3.L0GPTSZ,
5
for testing various possible configurations.
6
6
7
We're not currently completely sure whether FEAT_RME will
8
be OK to enable purely as a CPU-level property, or if it will
9
need board co-operation, so we're making these experimental
10
x- properties, so that the people developing the system
11
level software for RME can try to start using this and let
12
us know how it goes. The command line syntax for enabling
13
this will change in future, without backwards-compatibility.
14
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20230620124418.805717-21-richard.henderson@linaro.org
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200430181003.21682-14-peter.maydell@linaro.org
10
---
19
---
11
target/arm/neon-ls.decode | 11 +++
20
target/arm/tcg/cpu64.c | 53 ++++++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-neon.inc.c | 89 +++++++++++++++++++
21
1 file changed, 53 insertions(+)
13
target/arm/translate.c | 147 --------------------------------
14
3 files changed, 100 insertions(+), 147 deletions(-)
15
22
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
23
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
17
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
25
--- a/target/arm/tcg/cpu64.c
19
+++ b/target/arm/neon-ls.decode
26
+++ b/target/arm/tcg/cpu64.c
20
@@ -XXX,XX +XXX,XX @@ VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
27
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
21
28
cpu->sve_max_vq = max_vq;
22
VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
29
}
23
vd=%vd_dp
30
24
+
31
+static bool cpu_arm_get_rme(Object *obj, Error **errp)
25
+# Neon load/store single structure to one lane
26
+%imm1_5_p1 5:1 !function=plus1
27
+%imm1_6_p1 6:1 !function=plus1
28
+
29
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
30
+ vd=%vd_dp size=0 stride=1
31
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
32
+ vd=%vd_dp size=1 stride=%imm1_5_p1
33
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
34
+ vd=%vd_dp size=2 stride=%imm1_6_p1
35
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/translate-neon.inc.c
38
+++ b/target/arm/translate-neon.inc.c
39
@@ -XXX,XX +XXX,XX @@
40
* It might be possible to convert it to a standalone .c file eventually.
41
*/
42
43
+static inline int plus1(DisasContext *s, int x)
44
+{
32
+{
45
+ return x + 1;
33
+ ARMCPU *cpu = ARM_CPU(obj);
34
+ return cpu_isar_feature(aa64_rme, cpu);
46
+}
35
+}
47
+
36
+
48
/* Include the generated Neon decoder */
37
+static void cpu_arm_set_rme(Object *obj, bool value, Error **errp)
49
#include "decode-neon-dp.inc.c"
38
+{
50
#include "decode-neon-ls.inc.c"
39
+ ARMCPU *cpu = ARM_CPU(obj);
51
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
40
+ uint64_t t;
52
53
return true;
54
}
55
+
41
+
56
+static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
42
+ t = cpu->isar.id_aa64pfr0;
43
+ t = FIELD_DP64(t, ID_AA64PFR0, RME, value);
44
+ cpu->isar.id_aa64pfr0 = t;
45
+}
46
+
47
+static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name,
48
+ void *opaque, Error **errp)
57
+{
49
+{
58
+ /* Neon load/store single structure to one lane */
50
+ ARMCPU *cpu = ARM_CPU(obj);
59
+ int reg;
51
+ uint32_t value;
60
+ int nregs = a->n + 1;
61
+ int vd = a->vd;
62
+ TCGv_i32 addr, tmp;
63
+
52
+
64
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
53
+ if (!visit_type_uint32(v, name, &value, errp)) {
65
+ return false;
54
+ return;
66
+ }
55
+ }
67
+
56
+
68
+ /* UNDEF accesses to D16-D31 if they don't exist */
57
+ /* Encode the value for the GPCCR_EL3 field. */
69
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
58
+ switch (value) {
70
+ return false;
59
+ case 30:
71
+ }
60
+ case 34:
72
+
61
+ case 36:
73
+ /* Catch the UNDEF cases. This is unavoidably a bit messy. */
62
+ case 39:
74
+ switch (nregs) {
63
+ cpu->reset_l0gptsz = value - 30;
75
+ case 1:
76
+ if (((a->align & (1 << a->size)) != 0) ||
77
+ (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
78
+ return false;
79
+ }
80
+ break;
81
+ case 3:
82
+ if ((a->align & 1) != 0) {
83
+ return false;
84
+ }
85
+ /* fall through */
86
+ case 2:
87
+ if (a->size == 2 && (a->align & 2) != 0) {
88
+ return false;
89
+ }
90
+ break;
91
+ case 4:
92
+ if ((a->size == 2) && ((a->align & 3) == 3)) {
93
+ return false;
94
+ }
95
+ break;
64
+ break;
96
+ default:
65
+ default:
97
+ abort();
66
+ error_setg(errp, "invalid value for l0gptsz");
67
+ error_append_hint(errp, "valid values are 30, 34, 36, 39\n");
68
+ break;
98
+ }
69
+ }
99
+ if ((vd + a->stride * (nregs - 1)) > 31) {
70
+}
100
+ /*
101
+ * Attempts to write off the end of the register file are
102
+ * UNPREDICTABLE; we choose to UNDEF because otherwise we would
103
+ * access off the end of the array that holds the register data.
104
+ */
105
+ return false;
106
+ }
107
+
71
+
108
+ if (!vfp_access_check(s)) {
72
+static void cpu_max_get_l0gptsz(Object *obj, Visitor *v, const char *name,
109
+ return true;
73
+ void *opaque, Error **errp)
110
+ }
74
+{
75
+ ARMCPU *cpu = ARM_CPU(obj);
76
+ uint32_t value = cpu->reset_l0gptsz + 30;
111
+
77
+
112
+ tmp = tcg_temp_new_i32();
78
+ visit_type_uint32(v, name, &value, errp);
113
+ addr = tcg_temp_new_i32();
79
+}
114
+ load_reg_var(s, addr, a->rn);
115
+ /*
116
+ * TODO: if we implemented alignment exceptions, we should check
117
+ * addr against the alignment encoded in a->align here.
118
+ */
119
+ for (reg = 0; reg < nregs; reg++) {
120
+ if (a->l) {
121
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
122
+ s->be_data | a->size);
123
+ neon_store_element(vd, a->reg_idx, a->size, tmp);
124
+ } else { /* Store */
125
+ neon_load_element(tmp, vd, a->reg_idx, a->size);
126
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
127
+ s->be_data | a->size);
128
+ }
129
+ vd += a->stride;
130
+ tcg_gen_addi_i32(addr, addr, 1 << a->size);
131
+ }
132
+ tcg_temp_free_i32(addr);
133
+ tcg_temp_free_i32(tmp);
134
+
80
+
135
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
81
static Property arm_cpu_lpa2_property =
136
+
82
DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
137
+ return true;
83
138
+}
84
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
139
diff --git a/target/arm/translate.c b/target/arm/translate.c
85
aarch64_add_sme_properties(obj);
140
index XXXXXXX..XXXXXXX 100644
86
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
141
--- a/target/arm/translate.c
87
cpu_max_set_sve_max_vq, NULL, NULL);
142
+++ b/target/arm/translate.c
88
+ object_property_add_bool(obj, "x-rme", cpu_arm_get_rme, cpu_arm_set_rme);
143
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
89
+ object_property_add(obj, "x-l0gptsz", "uint32", cpu_max_get_l0gptsz,
144
tcg_temp_free_i32(rd);
90
+ cpu_max_set_l0gptsz, NULL, NULL);
91
qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
145
}
92
}
146
93
147
-
148
-/* Translate a NEON load/store element instruction. Return nonzero if the
149
- instruction is invalid. */
150
-static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
151
-{
152
- int rd, rn, rm;
153
- int nregs;
154
- int stride;
155
- int size;
156
- int reg;
157
- int load;
158
- TCGv_i32 addr;
159
- TCGv_i32 tmp;
160
-
161
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
- return 1;
163
- }
164
-
165
- /* FIXME: this access check should not take precedence over UNDEF
166
- * for invalid encodings; we will generate incorrect syndrome information
167
- * for attempts to execute invalid vfp/neon encodings with FP disabled.
168
- */
169
- if (s->fp_excp_el) {
170
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
171
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
172
- return 0;
173
- }
174
-
175
- if (!s->vfp_enabled)
176
- return 1;
177
- VFP_DREG_D(rd, insn);
178
- rn = (insn >> 16) & 0xf;
179
- rm = insn & 0xf;
180
- load = (insn & (1 << 21)) != 0;
181
- if ((insn & (1 << 23)) == 0) {
182
- /* Load store all elements -- handled already by decodetree */
183
- return 1;
184
- } else {
185
- size = (insn >> 10) & 3;
186
- if (size == 3) {
187
- /* Load single element to all lanes -- handled by decodetree */
188
- return 1;
189
- } else {
190
- /* Single element. */
191
- int idx = (insn >> 4) & 0xf;
192
- int reg_idx;
193
- switch (size) {
194
- case 0:
195
- reg_idx = (insn >> 5) & 7;
196
- stride = 1;
197
- break;
198
- case 1:
199
- reg_idx = (insn >> 6) & 3;
200
- stride = (insn & (1 << 5)) ? 2 : 1;
201
- break;
202
- case 2:
203
- reg_idx = (insn >> 7) & 1;
204
- stride = (insn & (1 << 6)) ? 2 : 1;
205
- break;
206
- default:
207
- abort();
208
- }
209
- nregs = ((insn >> 8) & 3) + 1;
210
- /* Catch the UNDEF cases. This is unavoidably a bit messy. */
211
- switch (nregs) {
212
- case 1:
213
- if (((idx & (1 << size)) != 0) ||
214
- (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
215
- return 1;
216
- }
217
- break;
218
- case 3:
219
- if ((idx & 1) != 0) {
220
- return 1;
221
- }
222
- /* fall through */
223
- case 2:
224
- if (size == 2 && (idx & 2) != 0) {
225
- return 1;
226
- }
227
- break;
228
- case 4:
229
- if ((size == 2) && ((idx & 3) == 3)) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- abort();
235
- }
236
- if ((rd + stride * (nregs - 1)) > 31) {
237
- /* Attempts to write off the end of the register file
238
- * are UNPREDICTABLE; we choose to UNDEF because otherwise
239
- * the neon_load_reg() would write off the end of the array.
240
- */
241
- return 1;
242
- }
243
- tmp = tcg_temp_new_i32();
244
- addr = tcg_temp_new_i32();
245
- load_reg_var(s, addr, rn);
246
- for (reg = 0; reg < nregs; reg++) {
247
- if (load) {
248
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
249
- s->be_data | size);
250
- neon_store_element(rd, reg_idx, size, tmp);
251
- } else { /* Store */
252
- neon_load_element(tmp, rd, reg_idx, size);
253
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
254
- s->be_data | size);
255
- }
256
- rd += stride;
257
- tcg_gen_addi_i32(addr, addr, 1 << size);
258
- }
259
- tcg_temp_free_i32(addr);
260
- tcg_temp_free_i32(tmp);
261
- stride = nregs * (1 << size);
262
- }
263
- }
264
- if (rm != 15) {
265
- TCGv_i32 base;
266
-
267
- base = load_reg(s, rn);
268
- if (rm == 13) {
269
- tcg_gen_addi_i32(base, base, stride);
270
- } else {
271
- TCGv_i32 index;
272
- index = load_reg(s, rm);
273
- tcg_gen_add_i32(base, base, index);
274
- tcg_temp_free_i32(index);
275
- }
276
- store_reg(s, rn, base);
277
- }
278
- return 0;
279
-}
280
-
281
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
282
{
283
switch (size) {
284
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
285
}
286
return;
287
}
288
- if ((insn & 0x0f100000) == 0x04000000) {
289
- /* NEON load/store. */
290
- if (disas_neon_ls_insn(s, insn)) {
291
- goto illegal_op;
292
- }
293
- return;
294
- }
295
if ((insn & 0x0e000f00) == 0x0c000100) {
296
if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
297
/* iWMMXt register transfer. */
298
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
299
}
300
break;
301
case 12:
302
- if ((insn & 0x01100000) == 0x01000000) {
303
- if (disas_neon_ls_insn(s, insn)) {
304
- goto illegal_op;
305
- }
306
- break;
307
- }
308
goto illegal_op;
309
default:
310
illegal_op:
311
--
94
--
312
2.20.1
95
2.34.1
313
314
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Remove inclusion of arm_gicv3_common.h, this already gets
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
included via xlnx-versal.h.
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
5
Message-id: 20230622143046.1578160-1-richard.henderson@linaro.org
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
[PMM: fixed typo; note experimental status in emulation.rst too]
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20200427181649.26851-2-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
8
---
12
hw/arm/xlnx-versal.c | 1 -
9
docs/system/arm/cpu-features.rst | 23 +++++++++++++++++++++++
13
1 file changed, 1 deletion(-)
10
docs/system/arm/emulation.rst | 1 +
11
2 files changed, 24 insertions(+)
14
12
15
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
13
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/xlnx-versal.c
15
--- a/docs/system/arm/cpu-features.rst
18
+++ b/hw/arm/xlnx-versal.c
16
+++ b/docs/system/arm/cpu-features.rst
19
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ As with ``sve-default-vector-length``, if the default length is larger
20
#include "hw/arm/boot.h"
18
than the maximum vector length enabled, the actual vector length will
21
#include "kvm_arm.h"
19
be reduced. If this property is set to ``-1`` then the default vector
22
#include "hw/misc/unimp.h"
20
length is set to the maximum possible length.
23
-#include "hw/intc/arm_gicv3_common.h"
21
+
24
#include "hw/arm/xlnx-versal.h"
22
+RME CPU Properties
25
#include "hw/char/pl011.h"
23
+==================
26
24
+
25
+The status of RME support with QEMU is experimental. At this time we
26
+only support RME within the CPU proper, not within the SMMU or GIC.
27
+The feature is enabled by the CPU property ``x-rme``, with the ``x-``
28
+prefix present as a reminder of the experimental status, and defaults off.
29
+
30
+The method for enabling RME will change in some future QEMU release
31
+without notice or backward compatibility.
32
+
33
+RME Level 0 GPT Size Property
34
+-----------------------------
35
+
36
+To aid firmware developers in testing different possible CPU
37
+configurations, ``x-l0gptsz=S`` may be used to specify the value
38
+to encode into ``GPCCR_EL3.L0GPTSZ``, a read-only field that
39
+specifies the size of the Level 0 Granule Protection Table.
40
+Legal values for ``S`` are 30, 34, 36, and 39; the default is 30.
41
+
42
+As with ``x-rme``, the ``x-l0gptsz`` property may be renamed or
43
+removed in some future QEMU release.
44
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
45
index XXXXXXX..XXXXXXX 100644
46
--- a/docs/system/arm/emulation.rst
47
+++ b/docs/system/arm/emulation.rst
48
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
49
- FEAT_RAS (Reliability, availability, and serviceability)
50
- FEAT_RASv1p1 (RAS Extension v1.1)
51
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
52
+- FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental)
53
- FEAT_RNG (Random number generator)
54
- FEAT_S2FWB (Stage 2 forced Write-Back)
55
- FEAT_SB (Speculation Barrier)
27
--
56
--
28
2.20.1
57
2.34.1
29
58
30
59
diff view generated by jsdifflib
1
We're going to want at least some of the NeonGen* typedefs
1
We use __builtin_subcll() to do a 64-bit subtract with borrow-in and
2
for the refactored 32-bit Neon decoder, so move them all
2
borrow-out when the host compiler supports it. Unfortunately some
3
to translate.h since it makes more sense to keep them in
3
versions of Apple Clang have a bug in their implementation of this
4
one group.
4
intrinsic which means it returns the wrong value. The effect is that
5
a QEMU built with the affected compiler will hang when emulating x86
6
or m68k float80 division.
5
7
8
The upstream LLVM issue is:
9
https://github.com/llvm/llvm-project/issues/55253
10
11
The commit that introduced the bug apparently never made it into an
12
upstream LLVM release without the subsequent fix
13
https://github.com/llvm/llvm-project/commit/fffb6e6afdbaba563189c1f715058ed401fbc88d
14
but unfortunately it did make it into Apple Clang 14.0, as shipped
15
in Xcode 14.3 (14.2 is reported to be OK). The Apple bug number is
16
FB12210478.
17
18
Add ifdefs to avoid use of __builtin_subcll() on Apple Clang version
19
14 or greater. There is not currently a version of Apple Clang which
20
has the bug fix -- when one appears we should be able to add an upper
21
bound to the ifdef condition so we can start using the builtin again.
22
We make the lower bound a conservative "any Apple clang with major
23
version 14 or greater" because the consequences of incorrectly
24
disabling the builtin when it would work are pretty small and the
25
consequences of not disabling it when we should are pretty bad.
26
27
Many thanks to those users who both reported this bug and also
28
did a lot of work in identifying the root cause; in particular
29
to Daniel Bertalan and osy.
30
31
Cc: qemu-stable@nongnu.org
32
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1631
33
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1659
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
35
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200430181003.21682-23-peter.maydell@linaro.org
36
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
37
Tested-by: Daniel Bertalan <dani@danielbertalan.dev>
38
Tested-by: Tested-By: Solra Bizna <solra@bizna.name>
39
Message-id: 20230622130823.1631719-1-peter.maydell@linaro.org
9
---
40
---
10
target/arm/translate.h | 17 +++++++++++++++++
41
include/qemu/compiler.h | 13 +++++++++++++
11
target/arm/translate-a64.c | 17 -----------------
42
include/qemu/host-utils.h | 2 +-
12
2 files changed, 17 insertions(+), 17 deletions(-)
43
2 files changed, 14 insertions(+), 1 deletion(-)
13
44
14
diff --git a/target/arm/translate.h b/target/arm/translate.h
45
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
15
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.h
47
--- a/include/qemu/compiler.h
17
+++ b/target/arm/translate.h
48
+++ b/include/qemu/compiler.h
18
@@ -XXX,XX +XXX,XX @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
49
@@ -XXX,XX +XXX,XX @@
19
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
50
#define QEMU_DISABLE_CFI
20
uint32_t, uint32_t, uint32_t);
51
#endif
21
52
22
+/* Function prototype for gen_ functions for calling Neon helpers */
53
+/*
23
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
54
+ * Apple clang version 14 has a bug in its __builtin_subcll(); define
24
+typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
55
+ * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it.
25
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
56
+ * When a version of Apple clang which has this bug fixed is released
26
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
57
+ * we can add an upper bound to this check.
27
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
58
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1631
28
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
59
+ * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details.
29
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
60
+ * The bug never made it into any upstream LLVM releases, only Apple ones.
30
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
61
+ */
31
+typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
62
+#if defined(__apple_build_version__) && __clang_major__ >= 14
32
+typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
63
+#define BUILTIN_SUBCLL_BROKEN
33
+typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
64
+#endif
34
+typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
35
+typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
36
+typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
37
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
38
+
65
+
39
#endif /* TARGET_ARM_TRANSLATE_H */
66
#endif /* COMPILER_H */
40
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
67
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
41
index XXXXXXX..XXXXXXX 100644
68
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate-a64.c
69
--- a/include/qemu/host-utils.h
43
+++ b/target/arm/translate-a64.c
70
+++ b/include/qemu/host-utils.h
44
@@ -XXX,XX +XXX,XX @@ typedef struct AArch64DecodeTable {
71
@@ -XXX,XX +XXX,XX @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
45
AArch64DecodeFn *disas_fn;
72
*/
46
} AArch64DecodeTable;
73
static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
47
48
-/* Function prototype for gen_ functions for calling Neon helpers */
49
-typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
50
-typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
51
-typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
52
-typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
53
-typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
54
-typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
55
-typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
56
-typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
57
-typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
58
-typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
59
-typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
60
-typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
61
-typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
62
-typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
63
-typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
64
-
65
/* initialize TCG globals. */
66
void a64_translate_init(void)
67
{
74
{
75
-#if __has_builtin(__builtin_subcll)
76
+#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
77
unsigned long long b = *pborrow;
78
x = __builtin_subcll(x, y, b, &b);
79
*pborrow = b & 1;
68
--
80
--
69
2.20.1
81
2.34.1
70
82
71
83
diff view generated by jsdifflib
1
Convert the VFM[AS]L (vector) insns to decodetree. This is the last
1
From: Richard Henderson <richard.henderson@linaro.org>
2
insn in the legacy decoder for the 3same_ext group, so we can
3
delete the legacy decoder function for the group entirely.
4
2
5
Note that in disas_thumb2_insn() the parts of this encoding space
3
One cannot test for feature aa32_simd_r32 without first
6
where the decodetree decoder returns false will correctly be directed
4
testing if AArch32 mode is supported at all. This leads to
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
8
into disas_coproc_insn() by mistake.
9
5
6
qemu-system-aarch64: ARM CPUs must have both VFP-D32 and Neon or neither
7
8
for Apple M1 cpus.
9
10
We already have a check for ARMv8-A never setting vfp-d32 true,
11
so restructure the code so that AArch64 avoids the test entirely.
12
13
Reported-by: Mads Ynddal <mads@ynddal.dk>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Tested-by: Mads Ynddal <m.ynddal@samsung.com>
17
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
18
Reviewed-by: Cédric Le Goater <clg@kaod.org>
19
Reviewed-by: Mads Ynddal <m.ynddal@samsung.com>
20
Message-id: 20230619140216.402530-1-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200430181003.21682-8-peter.maydell@linaro.org
13
---
22
---
14
target/arm/neon-shared.decode | 6 +++
23
target/arm/cpu.c | 28 +++++++++++++++-------------
15
target/arm/translate-neon.inc.c | 31 +++++++++++
24
1 file changed, 15 insertions(+), 13 deletions(-)
16
target/arm/translate.c | 92 +--------------------------------
17
3 files changed, 38 insertions(+), 91 deletions(-)
18
25
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
26
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/neon-shared.decode
28
--- a/target/arm/cpu.c
22
+++ b/target/arm/neon-shared.decode
29
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
30
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
24
# VUDOT and VSDOT
31
* KVM does not currently allow us to lie to the guest about its
25
VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
32
* ID/feature registers, so the guest always sees what the host has.
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
33
*/
27
+
34
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
28
+# VFM[AS]L
35
- ? cpu_isar_feature(aa64_fp_simd, cpu)
29
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
36
- : cpu_isar_feature(aa32_vfp, cpu)) {
30
+ vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
37
- cpu->has_vfp = true;
31
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
38
- if (!kvm_enabled()) {
32
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
39
- qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
33
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
40
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
34
index XXXXXXX..XXXXXXX 100644
41
+ if (cpu_isar_feature(aa64_fp_simd, cpu)) {
35
--- a/target/arm/translate-neon.inc.c
42
+ cpu->has_vfp = true;
36
+++ b/target/arm/translate-neon.inc.c
43
+ cpu->has_vfp_d32 = true;
37
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
44
+ if (tcg_enabled() || qtest_enabled()) {
38
opr_sz, opr_sz, 0, fn_gvec);
45
+ qdev_property_add_static(DEVICE(obj),
39
return true;
46
+ &arm_cpu_has_vfp_property);
40
}
47
+ }
41
+
48
}
42
+static bool trans_VFML(DisasContext *s, arg_VFML *a)
43
+{
44
+ int opr_sz;
45
+
46
+ if (!dc_isar_feature(aa32_fhm, s)) {
47
+ return false;
48
+ }
49
+
50
+ /* UNDEF accesses to D16-D31 if they don't exist. */
51
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
52
+ (a->vd & 0x10)) {
53
+ return false;
54
+ }
55
+
56
+ if (a->vd & a->q) {
57
+ return false;
58
+ }
59
+
60
+ if (!vfp_access_check(s)) {
61
+ return true;
62
+ }
63
+
64
+ opr_sz = (1 + a->q) * 8;
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(a->q, a->vn),
67
+ vfp_reg_offset(a->q, a->vm),
68
+ cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
69
+ gen_helper_gvec_fmlal_a32);
70
+ return true;
71
+}
72
diff --git a/target/arm/translate.c b/target/arm/translate.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/translate.c
75
+++ b/target/arm/translate.c
76
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
77
return 0;
78
}
79
80
-/* Advanced SIMD three registers of the same length extension.
81
- * 31 25 23 22 20 16 12 11 10 9 8 3 0
82
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
83
- * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
84
- * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
85
- */
86
-static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
87
-{
88
- gen_helper_gvec_3 *fn_gvec = NULL;
89
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
90
- int rd, rn, rm, opr_sz;
91
- int data = 0;
92
- int off_rn, off_rm;
93
- bool is_long = false, q = extract32(insn, 6, 1);
94
- bool ptr_is_env = false;
95
-
96
- if ((insn & 0xff300f10) == 0xfc200810) {
97
- /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
98
- int is_s = extract32(insn, 23, 1);
99
- if (!dc_isar_feature(aa32_fhm, s)) {
100
- return 1;
101
- }
102
- is_long = true;
103
- data = is_s; /* is_2 == 0 */
104
- fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
105
- ptr_is_env = true;
106
- } else {
107
- return 1;
108
- }
49
- }
109
-
50
-
110
- VFP_DREG_D(rd, insn);
51
- if (cpu->has_vfp && cpu_isar_feature(aa32_simd_r32, cpu)) {
111
- if (rd & q) {
52
- cpu->has_vfp_d32 = true;
112
- return 1;
53
- if (!kvm_enabled()) {
113
- }
54
+ } else if (cpu_isar_feature(aa32_vfp, cpu)) {
114
- if (q || !is_long) {
55
+ cpu->has_vfp = true;
115
- VFP_DREG_N(rn, insn);
56
+ if (cpu_isar_feature(aa32_simd_r32, cpu)) {
116
- VFP_DREG_M(rm, insn);
57
+ cpu->has_vfp_d32 = true;
117
- if ((rn | rm) & q & !is_long) {
58
/*
118
- return 1;
59
* The permitted values of the SIMDReg bits [3:0] on
119
- }
60
* Armv8-A are either 0b0000 and 0b0010. On such CPUs,
120
- off_rn = vfp_reg_offset(1, rn);
61
* make sure that has_vfp_d32 can not be set to false.
121
- off_rm = vfp_reg_offset(1, rm);
62
*/
122
- } else {
63
- if (!(arm_feature(&cpu->env, ARM_FEATURE_V8) &&
123
- rn = VFP_SREG_N(insn);
64
- !arm_feature(&cpu->env, ARM_FEATURE_M))) {
124
- rm = VFP_SREG_M(insn);
65
+ if ((tcg_enabled() || qtest_enabled())
125
- off_rn = vfp_reg_offset(0, rn);
66
+ && !(arm_feature(&cpu->env, ARM_FEATURE_V8)
126
- off_rm = vfp_reg_offset(0, rm);
67
+ && !arm_feature(&cpu->env, ARM_FEATURE_M))) {
127
- }
68
qdev_property_add_static(DEVICE(obj),
128
-
69
&arm_cpu_has_vfp_d32_property);
129
- if (s->fp_excp_el) {
130
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
131
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
132
- return 0;
133
- }
134
- if (!s->vfp_enabled) {
135
- return 1;
136
- }
137
-
138
- opr_sz = (1 + q) * 8;
139
- if (fn_gvec_ptr) {
140
- TCGv_ptr ptr;
141
- if (ptr_is_env) {
142
- ptr = cpu_env;
143
- } else {
144
- ptr = get_fpstatus_ptr(1);
145
- }
146
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
147
- opr_sz, opr_sz, data, fn_gvec_ptr);
148
- if (!ptr_is_env) {
149
- tcg_temp_free_ptr(ptr);
150
- }
151
- } else {
152
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
153
- opr_sz, opr_sz, data, fn_gvec);
154
- }
155
- return 0;
156
-}
157
-
158
/* Advanced SIMD two registers and a scalar extension.
159
* 31 24 23 22 20 16 12 11 10 9 8 3 0
160
* +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
161
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
162
}
163
}
164
}
165
- } else if ((insn & 0x0e000a00) == 0x0c000800
166
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
167
- if (disas_neon_insn_3same_ext(s, insn)) {
168
- goto illegal_op;
169
- }
170
- return;
171
} else if ((insn & 0x0f000a00) == 0x0e000800
172
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
173
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
174
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
175
}
176
break;
177
}
178
- if ((insn & 0xfe000a00) == 0xfc000800
179
+ if ((insn & 0xff000a00) == 0xfe000800
180
&& arm_dc_feature(s, ARM_FEATURE_V8)) {
181
/* The Thumb2 and ARM encodings are identical. */
182
- if (disas_neon_insn_3same_ext(s, insn)) {
183
- goto illegal_op;
184
- }
185
- } else if ((insn & 0xff000a00) == 0xfe000800
186
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
187
- /* The Thumb2 and ARM encodings are identical. */
188
if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
189
goto illegal_op;
190
}
70
}
191
--
71
--
192
2.20.1
72
2.34.1
193
73
194
74
diff view generated by jsdifflib
1
Convert the Neon "load/store multiple structures" insns to decodetree.
1
From: Shashi Mallela <shashi.mallela@linaro.org>
2
2
3
Create ITS as part of SBSA platform GIC initialization.
4
5
GIC ITS information is in DeviceTree so TF-A can pass it to EDK2.
6
7
Bumping platform version to 0.2 as this is important hardware change.
8
9
Signed-off-by: Shashi Mallela <shashi.mallela@linaro.org>
10
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
11
Message-id: 20230619170913.517373-2-marcin.juszkiewicz@linaro.org
12
Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
13
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-12-peter.maydell@linaro.org
6
---
16
---
7
target/arm/neon-ls.decode | 7 ++
17
docs/system/arm/sbsa.rst | 14 ++++++++++++++
8
target/arm/translate-neon.inc.c | 124 ++++++++++++++++++++++++++++++++
18
hw/arm/sbsa-ref.c | 33 ++++++++++++++++++++++++++++++---
9
target/arm/translate.c | 91 +----------------------
19
2 files changed, 44 insertions(+), 3 deletions(-)
10
3 files changed, 133 insertions(+), 89 deletions(-)
11
20
12
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
21
diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst
13
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-ls.decode
23
--- a/docs/system/arm/sbsa.rst
15
+++ b/target/arm/neon-ls.decode
24
+++ b/docs/system/arm/sbsa.rst
16
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ to be a complete compliant DT. It currently reports:
17
# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
26
- platform version
18
# This file works on the A32 encoding only; calling code for T32 has to
27
- GIC addresses
19
# transform the insn into the A32 version first.
28
29
+Platform version
30
+''''''''''''''''
20
+
31
+
21
+%vd_dp 22:1 12:4
32
The platform version is only for informing platform firmware about
33
what kind of ``sbsa-ref`` board it is running on. It is neither
34
a QEMU versioned machine type nor a reflection of the level of the
35
@@ -XXX,XX +XXX,XX @@ SBSA/SystemReady SR support provided.
36
The ``machine-version-major`` value is updated when changes breaking
37
fw compatibility are introduced. The ``machine-version-minor`` value
38
is updated when features are added that don't break fw compatibility.
22
+
39
+
23
+# Neon load/store multiple structures
40
+Platform version changes:
24
+
41
+
25
+VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
42
+0.0
26
+ vd=%vd_dp
43
+ Devicetree holds information about CPUs, memory and platform version.
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
44
+
45
+0.1
46
+ GIC information is present in devicetree.
47
+
48
+0.2
49
+ GIC ITS information is present in devicetree.
50
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
28
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
52
--- a/hw/arm/sbsa-ref.c
30
+++ b/target/arm/translate-neon.inc.c
53
+++ b/hw/arm/sbsa-ref.c
31
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
54
@@ -XXX,XX +XXX,XX @@ enum {
32
gen_helper_gvec_fmlal_idx_a32);
55
SBSA_CPUPERIPHS,
33
return true;
56
SBSA_GIC_DIST,
57
SBSA_GIC_REDIST,
58
+ SBSA_GIC_ITS,
59
SBSA_SECURE_EC,
60
SBSA_GWDT_WS0,
61
SBSA_GWDT_REFRESH,
62
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry sbsa_ref_memmap[] = {
63
[SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 },
64
[SBSA_GIC_DIST] = { 0x40060000, 0x00010000 },
65
[SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 },
66
+ [SBSA_GIC_ITS] = { 0x44081000, 0x00020000 },
67
[SBSA_SECURE_EC] = { 0x50000000, 0x00001000 },
68
[SBSA_GWDT_REFRESH] = { 0x50010000, 0x00001000 },
69
[SBSA_GWDT_CONTROL] = { 0x50011000, 0x00001000 },
70
@@ -XXX,XX +XXX,XX @@ static void sbsa_fdt_add_gic_node(SBSAMachineState *sms)
71
2, sbsa_ref_memmap[SBSA_GIC_REDIST].base,
72
2, sbsa_ref_memmap[SBSA_GIC_REDIST].size);
73
74
+ nodename = g_strdup_printf("/intc/its");
75
+ qemu_fdt_add_subnode(sms->fdt, nodename);
76
+ qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg",
77
+ 2, sbsa_ref_memmap[SBSA_GIC_ITS].base,
78
+ 2, sbsa_ref_memmap[SBSA_GIC_ITS].size);
79
+
80
g_free(nodename);
34
}
81
}
35
+
82
+
36
+static struct {
83
/*
37
+ int nregs;
84
* Firmware on this machine only uses ACPI table to load OS, these limited
38
+ int interleave;
85
* device tree nodes are just to let firmware know the info which varies from
39
+ int spacing;
86
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SBSAMachineState *sms)
40
+} const neon_ls_element_type[11] = {
87
* fw compatibility.
41
+ {1, 4, 1},
88
*/
42
+ {1, 4, 2},
89
qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
43
+ {4, 1, 1},
90
- qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1);
44
+ {2, 2, 2},
91
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 2);
45
+ {1, 3, 1},
92
46
+ {1, 3, 2},
93
if (ms->numa_state->have_numa_distance) {
47
+ {3, 1, 1},
94
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
48
+ {1, 1, 1},
95
@@ -XXX,XX +XXX,XX @@ static void create_secure_ram(SBSAMachineState *sms,
49
+ {1, 2, 1},
96
memory_region_add_subregion(secure_sysmem, base, secram);
50
+ {1, 2, 2},
97
}
51
+ {2, 1, 1}
98
52
+};
99
-static void create_gic(SBSAMachineState *sms)
100
+static void create_its(SBSAMachineState *sms)
101
+{
102
+ const char *itsclass = its_class_name();
103
+ DeviceState *dev;
53
+
104
+
54
+static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
105
+ dev = qdev_new(itsclass);
55
+ int stride)
56
+{
57
+ if (rm != 15) {
58
+ TCGv_i32 base;
59
+
106
+
60
+ base = load_reg(s, rn);
107
+ object_property_set_link(OBJECT(dev), "parent-gicv3", OBJECT(sms->gic),
61
+ if (rm == 13) {
108
+ &error_abort);
62
+ tcg_gen_addi_i32(base, base, stride);
109
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
63
+ } else {
110
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, sbsa_ref_memmap[SBSA_GIC_ITS].base);
64
+ TCGv_i32 index;
65
+ index = load_reg(s, rm);
66
+ tcg_gen_add_i32(base, base, index);
67
+ tcg_temp_free_i32(index);
68
+ }
69
+ store_reg(s, rn, base);
70
+ }
71
+}
111
+}
72
+
112
+
73
+static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
113
+static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
74
+{
114
{
75
+ /* Neon load/store multiple structures */
115
unsigned int smp_cpus = MACHINE(sms)->smp.cpus;
76
+ int nregs, interleave, spacing, reg, n;
116
SysBusDevice *gicbusdev;
77
+ MemOp endian = s->be_data;
117
@@ -XXX,XX +XXX,XX @@ static void create_gic(SBSAMachineState *sms)
78
+ int mmu_idx = get_mem_index(s);
118
qdev_prop_set_uint32(sms->gic, "len-redist-region-count", 1);
79
+ int size = a->size;
119
qdev_prop_set_uint32(sms->gic, "redist-region-count[0]", redist0_count);
80
+ TCGv_i64 tmp64;
120
81
+ TCGv_i32 addr, tmp;
121
+ object_property_set_link(OBJECT(sms->gic), "sysmem",
122
+ OBJECT(mem), &error_fatal);
123
+ qdev_prop_set_bit(sms->gic, "has-lpi", true);
82
+
124
+
83
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
125
gicbusdev = SYS_BUS_DEVICE(sms->gic);
84
+ return false;
126
sysbus_realize_and_unref(gicbusdev, &error_fatal);
85
+ }
127
sysbus_mmio_map(gicbusdev, 0, sbsa_ref_memmap[SBSA_GIC_DIST].base);
86
+
128
@@ -XXX,XX +XXX,XX @@ static void create_gic(SBSAMachineState *sms)
87
+ /* UNDEF accesses to D16-D31 if they don't exist */
129
sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus,
88
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
130
qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
89
+ return false;
131
}
90
+ }
132
+ create_its(sms);
91
+ if (a->itype > 10) {
92
+ return false;
93
+ }
94
+ /* Catch UNDEF cases for bad values of align field */
95
+ switch (a->itype & 0xc) {
96
+ case 4:
97
+ if (a->align >= 2) {
98
+ return false;
99
+ }
100
+ break;
101
+ case 8:
102
+ if (a->align == 3) {
103
+ return false;
104
+ }
105
+ break;
106
+ default:
107
+ break;
108
+ }
109
+ nregs = neon_ls_element_type[a->itype].nregs;
110
+ interleave = neon_ls_element_type[a->itype].interleave;
111
+ spacing = neon_ls_element_type[a->itype].spacing;
112
+ if (size == 3 && (interleave | spacing) != 1) {
113
+ return false;
114
+ }
115
+
116
+ if (!vfp_access_check(s)) {
117
+ return true;
118
+ }
119
+
120
+ /* For our purposes, bytes are always little-endian. */
121
+ if (size == 0) {
122
+ endian = MO_LE;
123
+ }
124
+ /*
125
+ * Consecutive little-endian elements from a single register
126
+ * can be promoted to a larger little-endian operation.
127
+ */
128
+ if (interleave == 1 && endian == MO_LE) {
129
+ size = 3;
130
+ }
131
+ tmp64 = tcg_temp_new_i64();
132
+ addr = tcg_temp_new_i32();
133
+ tmp = tcg_const_i32(1 << size);
134
+ load_reg_var(s, addr, a->rn);
135
+ for (reg = 0; reg < nregs; reg++) {
136
+ for (n = 0; n < 8 >> size; n++) {
137
+ int xs;
138
+ for (xs = 0; xs < interleave; xs++) {
139
+ int tt = a->vd + reg + spacing * xs;
140
+
141
+ if (a->l) {
142
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
143
+ neon_store_element64(tt, n, size, tmp64);
144
+ } else {
145
+ neon_load_element64(tmp64, tt, n, size);
146
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
147
+ }
148
+ tcg_gen_add_i32(addr, addr, tmp);
149
+ }
150
+ }
151
+ }
152
+ tcg_temp_free_i32(addr);
153
+ tcg_temp_free_i32(tmp);
154
+ tcg_temp_free_i64(tmp64);
155
+
156
+ gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
157
+ return true;
158
+}
159
diff --git a/target/arm/translate.c b/target/arm/translate.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/translate.c
162
+++ b/target/arm/translate.c
163
@@ -XXX,XX +XXX,XX @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
164
}
133
}
165
134
166
135
static void create_uart(const SBSAMachineState *sms, int uart,
167
-static struct {
136
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
168
- int nregs;
137
169
- int interleave;
138
create_secure_ram(sms, secure_sysmem);
170
- int spacing;
139
171
-} const neon_ls_element_type[11] = {
140
- create_gic(sms);
172
- {1, 4, 1},
141
+ create_gic(sms, sysmem);
173
- {1, 4, 2},
142
174
- {4, 1, 1},
143
create_uart(sms, SBSA_UART, sysmem, serial_hd(0));
175
- {2, 2, 2},
144
create_uart(sms, SBSA_SECURE_UART, secure_sysmem, serial_hd(1));
176
- {1, 3, 1},
177
- {1, 3, 2},
178
- {3, 1, 1},
179
- {1, 1, 1},
180
- {1, 2, 1},
181
- {1, 2, 2},
182
- {2, 1, 1}
183
-};
184
-
185
/* Translate a NEON load/store element instruction. Return nonzero if the
186
instruction is invalid. */
187
static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
188
{
189
int rd, rn, rm;
190
- int op;
191
int nregs;
192
- int interleave;
193
- int spacing;
194
int stride;
195
int size;
196
int reg;
197
int load;
198
- int n;
199
int vec_size;
200
- int mmu_idx;
201
- MemOp endian;
202
TCGv_i32 addr;
203
TCGv_i32 tmp;
204
- TCGv_i32 tmp2;
205
- TCGv_i64 tmp64;
206
207
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
208
return 1;
209
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
210
rn = (insn >> 16) & 0xf;
211
rm = insn & 0xf;
212
load = (insn & (1 << 21)) != 0;
213
- endian = s->be_data;
214
- mmu_idx = get_mem_index(s);
215
if ((insn & (1 << 23)) == 0) {
216
- /* Load store all elements. */
217
- op = (insn >> 8) & 0xf;
218
- size = (insn >> 6) & 3;
219
- if (op > 10)
220
- return 1;
221
- /* Catch UNDEF cases for bad values of align field */
222
- switch (op & 0xc) {
223
- case 4:
224
- if (((insn >> 5) & 1) == 1) {
225
- return 1;
226
- }
227
- break;
228
- case 8:
229
- if (((insn >> 4) & 3) == 3) {
230
- return 1;
231
- }
232
- break;
233
- default:
234
- break;
235
- }
236
- nregs = neon_ls_element_type[op].nregs;
237
- interleave = neon_ls_element_type[op].interleave;
238
- spacing = neon_ls_element_type[op].spacing;
239
- if (size == 3 && (interleave | spacing) != 1) {
240
- return 1;
241
- }
242
- /* For our purposes, bytes are always little-endian. */
243
- if (size == 0) {
244
- endian = MO_LE;
245
- }
246
- /* Consecutive little-endian elements from a single register
247
- * can be promoted to a larger little-endian operation.
248
- */
249
- if (interleave == 1 && endian == MO_LE) {
250
- size = 3;
251
- }
252
- tmp64 = tcg_temp_new_i64();
253
- addr = tcg_temp_new_i32();
254
- tmp2 = tcg_const_i32(1 << size);
255
- load_reg_var(s, addr, rn);
256
- for (reg = 0; reg < nregs; reg++) {
257
- for (n = 0; n < 8 >> size; n++) {
258
- int xs;
259
- for (xs = 0; xs < interleave; xs++) {
260
- int tt = rd + reg + spacing * xs;
261
-
262
- if (load) {
263
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
264
- neon_store_element64(tt, n, size, tmp64);
265
- } else {
266
- neon_load_element64(tmp64, tt, n, size);
267
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
268
- }
269
- tcg_gen_add_i32(addr, addr, tmp2);
270
- }
271
- }
272
- }
273
- tcg_temp_free_i32(addr);
274
- tcg_temp_free_i32(tmp2);
275
- tcg_temp_free_i64(tmp64);
276
- stride = nregs * interleave * 8;
277
+ /* Load store all elements -- handled already by decodetree */
278
+ return 1;
279
} else {
280
size = (insn >> 10) & 3;
281
if (size == 3) {
282
--
145
--
283
2.20.1
146
2.34.1
284
285
diff view generated by jsdifflib
1
From: Fredrik Strupe <fredrik@strupe.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
According to Arm ARM, VQDMULL is only valid when U=0, while having
3
Brown bag time: store instead of load results in uninitialized temp.
4
U=1 is unallocated.
5
4
6
Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
5
7
Fixes: 695272dcb976 ("target-arm: Handle UNDEF cases for Neon 3-regs-different-widths")
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1704
7
Reported-by: Mark Rutland <mark.rutland@arm.com>
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230620134659.817559-1-richard.henderson@linaro.org
11
Fixes: e6dd5e782be ("target/arm: Use tcg_gen_qemu_{ld, st}_i128 in gen_sve_{ld, st}r")
12
Tested-by: Alex Bennée <alex.bennee@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
16
---
11
target/arm/translate.c | 2 +-
17
target/arm/tcg/translate-sve.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
18
1 file changed, 1 insertion(+), 1 deletion(-)
13
19
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
20
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
22
--- a/target/arm/tcg/translate-sve.c
17
+++ b/target/arm/translate.c
23
+++ b/target/arm/tcg/translate-sve.c
18
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
24
@@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
19
{0, 0, 0, 0}, /* VMLSL */
25
/* Predicate register stores can be any multiple of 2. */
20
{0, 0, 0, 9}, /* VQDMLSL */
26
if (len_remain >= 8) {
21
{0, 0, 0, 0}, /* Integer VMULL */
27
t0 = tcg_temp_new_i64();
22
- {0, 0, 0, 1}, /* VQDMULL */
28
- tcg_gen_st_i64(t0, base, vofs + len_align);
23
+ {0, 0, 0, 9}, /* VQDMULL */
29
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
24
{0, 0, 0, 0xa}, /* Polynomial VMULL */
30
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE);
25
{0, 0, 0, 7}, /* Reserved: always UNDEF */
31
len_remain -= 8;
26
};
32
len_align += 8;
27
--
33
--
28
2.20.1
34
2.34.1
29
35
30
36
diff view generated by jsdifflib
Deleted patch
1
In aarch64_max_initfn() we update both 32-bit and 64-bit ID
2
registers. The intended pattern is that for 64-bit ID registers we
3
use FIELD_DP64 and the uint64_t 't' register, while 32-bit ID
4
registers use FIELD_DP32 and the uint32_t 'u' register. For
5
ID_AA64DFR0 we accidentally used 'u', meaning that the top 32 bits of
6
this 64-bit ID register would end up always zero. Luckily at the
7
moment that's what they should be anyway, so this bug has no visible
8
effects.
9
1
10
Use the right-sized variable.
11
12
Fixes: 3bec78447a958d481991
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 20200423110915.10527-1-peter.maydell@linaro.org
17
---
18
target/arm/cpu64.c | 6 +++---
19
1 file changed, 3 insertions(+), 3 deletions(-)
20
21
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu64.c
24
+++ b/target/arm/cpu64.c
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
26
u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
27
cpu->isar.id_mmfr4 = u;
28
29
- u = cpu->isar.id_aa64dfr0;
30
- u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
31
- cpu->isar.id_aa64dfr0 = u;
32
+ t = cpu->isar.id_aa64dfr0;
33
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
34
+ cpu->isar.id_aa64dfr0 = t;
35
36
u = cpu->isar.id_dfr0;
37
u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
38
--
39
2.20.1
40
41
diff view generated by jsdifflib
Deleted patch
1
Somewhere along theline we accidentally added a duplicate
2
"using D16-D31 when they don't exist" check to do_vfm_dp()
3
(probably an artifact of a patchseries rebase). Remove it.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20200430181003.21682-2-peter.maydell@linaro.org
9
---
10
target/arm/translate-vfp.inc.c | 6 ------
11
1 file changed, 6 deletions(-)
12
13
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.inc.c
16
+++ b/target/arm/translate-vfp.inc.c
17
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
18
return false;
19
}
20
21
- /* UNDEF accesses to D16-D31 if they don't exist. */
22
- if (!dc_isar_feature(aa32_simd_r32, s) &&
23
- ((a->vd | a->vn | a->vm) & 0x10)) {
24
- return false;
25
- }
26
-
27
if (!vfp_access_check(s)) {
28
return true;
29
}
30
--
31
2.20.1
32
33
diff view generated by jsdifflib
Deleted patch
1
We were accidentally permitting decode of Thumb Neon insns even if
2
the CPU didn't have the FEATURE_NEON bit set, because the feature
3
check was being done before the call to disas_neon_data_insn() and
4
disas_neon_ls_insn() in the Arm decoder but was omitted from the
5
Thumb decoder. Push the feature bit check down into the called
6
functions so it is done for both Arm and Thumb encodings.
7
1
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20200430181003.21682-3-peter.maydell@linaro.org
12
---
13
target/arm/translate.c | 16 ++++++++--------
14
1 file changed, 8 insertions(+), 8 deletions(-)
15
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
19
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
21
TCGv_i32 tmp2;
22
TCGv_i64 tmp64;
23
24
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
25
+ return 1;
26
+ }
27
+
28
/* FIXME: this access check should not take precedence over UNDEF
29
* for invalid encodings; we will generate incorrect syndrome information
30
* for attempts to execute invalid vfp/neon encodings with FP disabled.
31
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
32
TCGv_ptr ptr1, ptr2, ptr3;
33
TCGv_i64 tmp64;
34
35
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
36
+ return 1;
37
+ }
38
+
39
/* FIXME: this access check should not take precedence over UNDEF
40
* for invalid encodings; we will generate incorrect syndrome information
41
* for attempts to execute invalid vfp/neon encodings with FP disabled.
42
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
43
44
if (((insn >> 25) & 7) == 1) {
45
/* NEON Data processing. */
46
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
47
- goto illegal_op;
48
- }
49
-
50
if (disas_neon_data_insn(s, insn)) {
51
goto illegal_op;
52
}
53
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
54
}
55
if ((insn & 0x0f100000) == 0x04000000) {
56
/* NEON load/store. */
57
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
58
- goto illegal_op;
59
- }
60
-
61
if (disas_neon_ls_insn(s, insn)) {
62
goto illegal_op;
63
}
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
Deleted patch
1
Add the infrastructure for building and invoking a decodetree decoder
2
for the AArch32 Neon encodings. At the moment the new decoder covers
3
nothing, so we always fall back to the existing hand-written decode.
4
1
5
We follow the same pattern we did for the VFP decodetree conversion
6
(commit 78e138bc1f672c145ef6ace74617d and following): code that deals
7
with Neon will be moving gradually out to translate-neon.vfp.inc,
8
which we #include into translate.c.
9
10
In order to share the decode files between A32 and T32, we
11
split Neon into 3 parts:
12
* data-processing
13
* load-store
14
* 'shared' encodings
15
16
The first two groups of instructions have similar but not identical
17
A32 and T32 encodings, so we need to manually transform the T32
18
encoding into the A32 one before calling the decoder; the third group
19
covers the Neon instructions which are identical in A32 and T32.
20
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
23
Message-id: 20200430181003.21682-4-peter.maydell@linaro.org
24
---
25
target/arm/neon-dp.decode | 29 ++++++++++++++++++++++++++
26
target/arm/neon-ls.decode | 29 ++++++++++++++++++++++++++
27
target/arm/neon-shared.decode | 27 +++++++++++++++++++++++++
28
target/arm/translate-neon.inc.c | 32 +++++++++++++++++++++++++++++
29
target/arm/translate.c | 36 +++++++++++++++++++++++++++++++--
30
target/arm/Makefile.objs | 18 +++++++++++++++++
31
6 files changed, 169 insertions(+), 2 deletions(-)
32
create mode 100644 target/arm/neon-dp.decode
33
create mode 100644 target/arm/neon-ls.decode
34
create mode 100644 target/arm/neon-shared.decode
35
create mode 100644 target/arm/translate-neon.inc.c
36
37
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
38
new file mode 100644
39
index XXXXXXX..XXXXXXX
40
--- /dev/null
41
+++ b/target/arm/neon-dp.decode
42
@@ -XXX,XX +XXX,XX @@
43
+# AArch32 Neon data-processing instruction descriptions
44
+#
45
+# Copyright (c) 2020 Linaro, Ltd
46
+#
47
+# This library is free software; you can redistribute it and/or
48
+# modify it under the terms of the GNU Lesser General Public
49
+# License as published by the Free Software Foundation; either
50
+# version 2 of the License, or (at your option) any later version.
51
+#
52
+# This library is distributed in the hope that it will be useful,
53
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
54
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
55
+# Lesser General Public License for more details.
56
+#
57
+# You should have received a copy of the GNU Lesser General Public
58
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
59
+
60
+#
61
+# This file is processed by scripts/decodetree.py
62
+#
63
+
64
+# Encodings for Neon data processing instructions where the T32 encoding
65
+# is a simple transformation of the A32 encoding.
66
+# More specifically, this file covers instructions where the A32 encoding is
67
+# 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
68
+# and the T32 encoding is
69
+# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
70
+# This file works on the A32 encoding only; calling code for T32 has to
71
+# transform the insn into the A32 version first.
72
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
73
new file mode 100644
74
index XXXXXXX..XXXXXXX
75
--- /dev/null
76
+++ b/target/arm/neon-ls.decode
77
@@ -XXX,XX +XXX,XX @@
78
+# AArch32 Neon load/store instruction descriptions
79
+#
80
+# Copyright (c) 2020 Linaro, Ltd
81
+#
82
+# This library is free software; you can redistribute it and/or
83
+# modify it under the terms of the GNU Lesser General Public
84
+# License as published by the Free Software Foundation; either
85
+# version 2 of the License, or (at your option) any later version.
86
+#
87
+# This library is distributed in the hope that it will be useful,
88
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
89
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
90
+# Lesser General Public License for more details.
91
+#
92
+# You should have received a copy of the GNU Lesser General Public
93
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
94
+
95
+#
96
+# This file is processed by scripts/decodetree.py
97
+#
98
+
99
+# Encodings for Neon load/store instructions where the T32 encoding
100
+# is a simple transformation of the A32 encoding.
101
+# More specifically, this file covers instructions where the A32 encoding is
102
+# 0b1111_0100_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
103
+# and the T32 encoding is
104
+# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
105
+# This file works on the A32 encoding only; calling code for T32 has to
106
+# transform the insn into the A32 version first.
107
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
108
new file mode 100644
109
index XXXXXXX..XXXXXXX
110
--- /dev/null
111
+++ b/target/arm/neon-shared.decode
112
@@ -XXX,XX +XXX,XX @@
113
+# AArch32 Neon instruction descriptions
114
+#
115
+# Copyright (c) 2020 Linaro, Ltd
116
+#
117
+# This library is free software; you can redistribute it and/or
118
+# modify it under the terms of the GNU Lesser General Public
119
+# License as published by the Free Software Foundation; either
120
+# version 2 of the License, or (at your option) any later version.
121
+#
122
+# This library is distributed in the hope that it will be useful,
123
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
124
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
125
+# Lesser General Public License for more details.
126
+#
127
+# You should have received a copy of the GNU Lesser General Public
128
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
129
+
130
+#
131
+# This file is processed by scripts/decodetree.py
132
+#
133
+
134
+# Encodings for Neon instructions whose encoding is the same for
135
+# both A32 and T32.
136
+
137
+# More specifically, this covers:
138
+# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
139
+# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
140
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
141
new file mode 100644
142
index XXXXXXX..XXXXXXX
143
--- /dev/null
144
+++ b/target/arm/translate-neon.inc.c
145
@@ -XXX,XX +XXX,XX @@
146
+/*
147
+ * ARM translation: AArch32 Neon instructions
148
+ *
149
+ * Copyright (c) 2003 Fabrice Bellard
150
+ * Copyright (c) 2005-2007 CodeSourcery
151
+ * Copyright (c) 2007 OpenedHand, Ltd.
152
+ * Copyright (c) 2020 Linaro, Ltd.
153
+ *
154
+ * This library is free software; you can redistribute it and/or
155
+ * modify it under the terms of the GNU Lesser General Public
156
+ * License as published by the Free Software Foundation; either
157
+ * version 2 of the License, or (at your option) any later version.
158
+ *
159
+ * This library is distributed in the hope that it will be useful,
160
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
161
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
162
+ * Lesser General Public License for more details.
163
+ *
164
+ * You should have received a copy of the GNU Lesser General Public
165
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
166
+ */
167
+
168
+/*
169
+ * This file is intended to be included from translate.c; it uses
170
+ * some macros and definitions provided by that file.
171
+ * It might be possible to convert it to a standalone .c file eventually.
172
+ */
173
+
174
+/* Include the generated Neon decoder */
175
+#include "decode-neon-dp.inc.c"
176
+#include "decode-neon-ls.inc.c"
177
+#include "decode-neon-shared.inc.c"
178
diff --git a/target/arm/translate.c b/target/arm/translate.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/arm/translate.c
181
+++ b/target/arm/translate.c
182
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
183
184
#define ARM_CP_RW_BIT (1 << 20)
185
186
-/* Include the VFP decoder */
187
+/* Include the VFP and Neon decoders */
188
#include "translate-vfp.inc.c"
189
+#include "translate-neon.inc.c"
190
191
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
192
{
193
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
194
/* Unconditional instructions. */
195
/* TODO: Perhaps merge these into one decodetree output file. */
196
if (disas_a32_uncond(s, insn) ||
197
- disas_vfp_uncond(s, insn)) {
198
+ disas_vfp_uncond(s, insn) ||
199
+ disas_neon_dp(s, insn) ||
200
+ disas_neon_ls(s, insn) ||
201
+ disas_neon_shared(s, insn)) {
202
return;
203
}
204
/* fall back to legacy decoder */
205
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
206
ARCH(6T2);
207
}
208
209
+ if ((insn & 0xef000000) == 0xef000000) {
210
+ /*
211
+ * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
212
+ * transform into
213
+ * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
214
+ */
215
+ uint32_t a32_insn = (insn & 0xe2ffffff) |
216
+ ((insn & (1 << 28)) >> 4) | (1 << 28);
217
+
218
+ if (disas_neon_dp(s, a32_insn)) {
219
+ return;
220
+ }
221
+ }
222
+
223
+ if ((insn & 0xff100000) == 0xf9000000) {
224
+ /*
225
+ * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
226
+ * transform into
227
+ * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
228
+ */
229
+ uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
230
+
231
+ if (disas_neon_ls(s, a32_insn)) {
232
+ return;
233
+ }
234
+ }
235
+
236
/*
237
* TODO: Perhaps merge these into one decodetree output file.
238
* Note disas_vfp is written for a32 with cond field in the
239
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
240
*/
241
if (disas_t32(s, insn) ||
242
disas_vfp_uncond(s, insn) ||
243
+ disas_neon_shared(s, insn) ||
244
((insn >> 28) == 0xe && disas_vfp(s, insn))) {
245
return;
246
}
247
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
248
index XXXXXXX..XXXXXXX 100644
249
--- a/target/arm/Makefile.objs
250
+++ b/target/arm/Makefile.objs
251
@@ -XXX,XX +XXX,XX @@ target/arm/decode-sve.inc.c: $(SRC_PATH)/target/arm/sve.decode $(DECODETREE)
252
     $(PYTHON) $(DECODETREE) --decode disas_sve -o $@ $<,\
253
     "GEN", $(TARGET_DIR)$@)
254
255
+target/arm/decode-neon-shared.inc.c: $(SRC_PATH)/target/arm/neon-shared.decode $(DECODETREE)
256
+    $(call quiet-command,\
257
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_shared -o $@ $<,\
258
+     "GEN", $(TARGET_DIR)$@)
259
+
260
+target/arm/decode-neon-dp.inc.c: $(SRC_PATH)/target/arm/neon-dp.decode $(DECODETREE)
261
+    $(call quiet-command,\
262
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_dp -o $@ $<,\
263
+     "GEN", $(TARGET_DIR)$@)
264
+
265
+target/arm/decode-neon-ls.inc.c: $(SRC_PATH)/target/arm/neon-ls.decode $(DECODETREE)
266
+    $(call quiet-command,\
267
+     $(PYTHON) $(DECODETREE) --static-decode disas_neon_ls -o $@ $<,\
268
+     "GEN", $(TARGET_DIR)$@)
269
+
270
target/arm/decode-vfp.inc.c: $(SRC_PATH)/target/arm/vfp.decode $(DECODETREE)
271
    $(call quiet-command,\
272
     $(PYTHON) $(DECODETREE) --static-decode disas_vfp -o $@ $<,\
273
@@ -XXX,XX +XXX,XX @@ target/arm/decode-t16.inc.c: $(SRC_PATH)/target/arm/t16.decode $(DECODETREE)
274
     "GEN", $(TARGET_DIR)$@)
275
276
target/arm/translate-sve.o: target/arm/decode-sve.inc.c
277
+target/arm/translate.o: target/arm/decode-neon-shared.inc.c
278
+target/arm/translate.o: target/arm/decode-neon-dp.inc.c
279
+target/arm/translate.o: target/arm/decode-neon-ls.inc.c
280
target/arm/translate.o: target/arm/decode-vfp.inc.c
281
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
282
target/arm/translate.o: target/arm/decode-a32.inc.c
283
--
284
2.20.1
285
286
diff view generated by jsdifflib
Deleted patch
1
Convert the VCMLA (vector) insns in the 3same extension group to
2
decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-5-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 11 ++++++++++
9
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 11 +---------
11
3 files changed, 49 insertions(+), 10 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@
18
# More specifically, this covers:
19
# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
20
# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
21
+
22
+# VFP/Neon register fields; same as vfp.decode
23
+%vm_dp 5:1 0:4
24
+%vm_sp 0:4 5:1
25
+%vn_dp 7:1 16:4
26
+%vn_sp 16:4 7:1
27
+%vd_dp 22:1 12:4
28
+%vd_sp 12:4 22:1
29
+
30
+VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
31
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
32
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-neon.inc.c
35
+++ b/target/arm/translate-neon.inc.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "decode-neon-dp.inc.c"
38
#include "decode-neon-ls.inc.c"
39
#include "decode-neon-shared.inc.c"
40
+
41
+static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
42
+{
43
+ int opr_sz;
44
+ TCGv_ptr fpst;
45
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
46
+
47
+ if (!dc_isar_feature(aa32_vcma, s)
48
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
49
+ return false;
50
+ }
51
+
52
+ /* UNDEF accesses to D16-D31 if they don't exist. */
53
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
54
+ ((a->vd | a->vn | a->vm) & 0x10)) {
55
+ return false;
56
+ }
57
+
58
+ if ((a->vn | a->vm | a->vd) & a->q) {
59
+ return false;
60
+ }
61
+
62
+ if (!vfp_access_check(s)) {
63
+ return true;
64
+ }
65
+
66
+ opr_sz = (1 + a->q) * 8;
67
+ fpst = get_fpstatus_ptr(1);
68
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
69
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
70
+ vfp_reg_offset(1, a->vn),
71
+ vfp_reg_offset(1, a->vm),
72
+ fpst, opr_sz, opr_sz, a->rot,
73
+ fn_gvec_ptr);
74
+ tcg_temp_free_ptr(fpst);
75
+ return true;
76
+}
77
diff --git a/target/arm/translate.c b/target/arm/translate.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate.c
80
+++ b/target/arm/translate.c
81
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
82
bool is_long = false, q = extract32(insn, 6, 1);
83
bool ptr_is_env = false;
84
85
- if ((insn & 0xfe200f10) == 0xfc200800) {
86
- /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
87
- int size = extract32(insn, 20, 1);
88
- data = extract32(insn, 23, 2); /* rot */
89
- if (!dc_isar_feature(aa32_vcma, s)
90
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
91
- return 1;
92
- }
93
- fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
94
- } else if ((insn & 0xfea00f10) == 0xfc800800) {
95
+ if ((insn & 0xfea00f10) == 0xfc800800) {
96
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
97
int size = extract32(insn, 20, 1);
98
data = extract32(insn, 24, 1); /* rot */
99
--
100
2.20.1
101
102
diff view generated by jsdifflib
Deleted patch
1
Convert the VCADD (vector) insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-6-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 3 +++
8
target/arm/translate-neon.inc.c | 37 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 11 +---------
10
3 files changed, 41 insertions(+), 10 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@
17
18
VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
22
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
23
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/translate-neon.inc.c
26
+++ b/target/arm/translate-neon.inc.c
27
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
28
tcg_temp_free_ptr(fpst);
29
return true;
30
}
31
+
32
+static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
33
+{
34
+ int opr_sz;
35
+ TCGv_ptr fpst;
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+
38
+ if (!dc_isar_feature(aa32_vcma, s)
39
+ || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn | a->vm) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vn | a->vm | a->vd) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ opr_sz = (1 + a->q) * 8;
58
+ fpst = get_fpstatus_ptr(1);
59
+ fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
60
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->vm),
63
+ fpst, opr_sz, opr_sz, a->rot,
64
+ fn_gvec_ptr);
65
+ tcg_temp_free_ptr(fpst);
66
+ return true;
67
+}
68
diff --git a/target/arm/translate.c b/target/arm/translate.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/translate.c
71
+++ b/target/arm/translate.c
72
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
73
bool is_long = false, q = extract32(insn, 6, 1);
74
bool ptr_is_env = false;
75
76
- if ((insn & 0xfea00f10) == 0xfc800800) {
77
- /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
78
- int size = extract32(insn, 20, 1);
79
- data = extract32(insn, 24, 1); /* rot */
80
- if (!dc_isar_feature(aa32_vcma, s)
81
- || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
82
- return 1;
83
- }
84
- fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
85
- } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
86
+ if ((insn & 0xfeb00f00) == 0xfc200d00) {
87
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
88
bool u = extract32(insn, 4, 1);
89
if (!dc_isar_feature(aa32_dp, s)) {
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
Deleted patch
1
Convert the V[US]DOT (vector) insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-7-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 4 ++++
8
target/arm/translate-neon.inc.c | 32 ++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 9 +--------
10
3 files changed, 37 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ VCMLA 1111 110 rot:2 . 1 size:1 .... .... 1000 . q:1 . 0 .... \
17
18
VCADD 1111 110 rot:1 1 . 0 size:1 .... .... 1000 . q:1 . 0 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp
20
+
21
+# VUDOT and VSDOT
22
+VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
34
+{
35
+ int opr_sz;
36
+ gen_helper_gvec_3 *fn_gvec;
37
+
38
+ if (!dc_isar_feature(aa32_dp, s)) {
39
+ return false;
40
+ }
41
+
42
+ /* UNDEF accesses to D16-D31 if they don't exist. */
43
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
44
+ ((a->vd | a->vn | a->vm) & 0x10)) {
45
+ return false;
46
+ }
47
+
48
+ if ((a->vn | a->vm | a->vd) & a->q) {
49
+ return false;
50
+ }
51
+
52
+ if (!vfp_access_check(s)) {
53
+ return true;
54
+ }
55
+
56
+ opr_sz = (1 + a->q) * 8;
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
58
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
59
+ vfp_reg_offset(1, a->vn),
60
+ vfp_reg_offset(1, a->vm),
61
+ opr_sz, opr_sz, 0, fn_gvec);
62
+ return true;
63
+}
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
69
bool is_long = false, q = extract32(insn, 6, 1);
70
bool ptr_is_env = false;
71
72
- if ((insn & 0xfeb00f00) == 0xfc200d00) {
73
- /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
74
- bool u = extract32(insn, 4, 1);
75
- if (!dc_isar_feature(aa32_dp, s)) {
76
- return 1;
77
- }
78
- fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
79
- } else if ((insn & 0xff300f10) == 0xfc200810) {
80
+ if ((insn & 0xff300f10) == 0xfc200810) {
81
/* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
82
int is_s = extract32(insn, 23, 1);
83
if (!dc_isar_feature(aa32_fhm, s)) {
84
--
85
2.20.1
86
87
diff view generated by jsdifflib
Deleted patch
1
Convert VCMLA (scalar) in the 2reg-scalar-ext group to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-9-peter.maydell@linaro.org
6
---
7
target/arm/neon-shared.decode | 5 +++++
8
target/arm/translate-neon.inc.c | 40 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 26 +--------------------
10
3 files changed, 46 insertions(+), 25 deletions(-)
11
12
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-shared.decode
15
+++ b/target/arm/neon-shared.decode
16
@@ -XXX,XX +XXX,XX @@ VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
17
vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
18
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
19
vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
20
+
21
+VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
22
+ vn=%vn_dp vd=%vd_dp size=0
23
+VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
24
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
25
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-neon.inc.c
28
+++ b/target/arm/translate-neon.inc.c
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
30
gen_helper_gvec_fmlal_a32);
31
return true;
32
}
33
+
34
+static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
35
+{
36
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
37
+ int opr_sz;
38
+ TCGv_ptr fpst;
39
+
40
+ if (!dc_isar_feature(aa32_vcma, s)) {
41
+ return false;
42
+ }
43
+ if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
44
+ return false;
45
+ }
46
+
47
+ /* UNDEF accesses to D16-D31 if they don't exist. */
48
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
49
+ ((a->vd | a->vn | a->vm) & 0x10)) {
50
+ return false;
51
+ }
52
+
53
+ if ((a->vd | a->vn) & a->q) {
54
+ return false;
55
+ }
56
+
57
+ if (!vfp_access_check(s)) {
58
+ return true;
59
+ }
60
+
61
+ fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
62
+ : gen_helper_gvec_fcmlah_idx);
63
+ opr_sz = (1 + a->q) * 8;
64
+ fpst = get_fpstatus_ptr(1);
65
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
66
+ vfp_reg_offset(1, a->vn),
67
+ vfp_reg_offset(1, a->vm),
68
+ fpst, opr_sz, opr_sz,
69
+ (a->index << 2) | a->rot, fn_gvec_ptr);
70
+ tcg_temp_free_ptr(fpst);
71
+ return true;
72
+}
73
diff --git a/target/arm/translate.c b/target/arm/translate.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/translate.c
76
+++ b/target/arm/translate.c
77
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
78
bool is_long = false, q = extract32(insn, 6, 1);
79
bool ptr_is_env = false;
80
81
- if ((insn & 0xff000f10) == 0xfe000800) {
82
- /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
83
- int rot = extract32(insn, 20, 2);
84
- int size = extract32(insn, 23, 1);
85
- int index;
86
-
87
- if (!dc_isar_feature(aa32_vcma, s)) {
88
- return 1;
89
- }
90
- if (size == 0) {
91
- if (!dc_isar_feature(aa32_fp16_arith, s)) {
92
- return 1;
93
- }
94
- /* For fp16, rm is just Vm, and index is M. */
95
- rm = extract32(insn, 0, 4);
96
- index = extract32(insn, 5, 1);
97
- } else {
98
- /* For fp32, rm is the usual M:Vm, and index is 0. */
99
- VFP_DREG_M(rm, insn);
100
- index = 0;
101
- }
102
- data = (index << 2) | rot;
103
- fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
104
- : gen_helper_gvec_fcmlah_idx);
105
- } else if ((insn & 0xffb00f00) == 0xfe200d00) {
106
+ if ((insn & 0xffb00f00) == 0xfe200d00) {
107
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
108
int u = extract32(insn, 4, 1);
109
110
--
111
2.20.1
112
113
diff view generated by jsdifflib
Deleted patch
1
Convert the V[US]DOT (scalar) insns in the 2reg-scalar-ext group
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-10-peter.maydell@linaro.org
7
---
8
target/arm/neon-shared.decode | 3 +++
9
target/arm/translate-neon.inc.c | 35 +++++++++++++++++++++++++++++++++
10
target/arm/translate.c | 13 +-----------
11
3 files changed, 39 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-shared.decode
16
+++ b/target/arm/neon-shared.decode
17
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
18
vn=%vn_dp vd=%vd_dp size=0
19
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
20
vm=%vm_dp vn=%vn_dp vd=%vd_dp size=1 index=0
21
+
22
+VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
23
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
24
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-neon.inc.c
27
+++ b/target/arm/translate-neon.inc.c
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
29
tcg_temp_free_ptr(fpst);
30
return true;
31
}
32
+
33
+static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
34
+{
35
+ gen_helper_gvec_3 *fn_gvec;
36
+ int opr_sz;
37
+ TCGv_ptr fpst;
38
+
39
+ if (!dc_isar_feature(aa32_dp, s)) {
40
+ return false;
41
+ }
42
+
43
+ /* UNDEF accesses to D16-D31 if they don't exist. */
44
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
45
+ ((a->vd | a->vn) & 0x10)) {
46
+ return false;
47
+ }
48
+
49
+ if ((a->vd | a->vn) & a->q) {
50
+ return false;
51
+ }
52
+
53
+ if (!vfp_access_check(s)) {
54
+ return true;
55
+ }
56
+
57
+ fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
58
+ opr_sz = (1 + a->q) * 8;
59
+ fpst = get_fpstatus_ptr(1);
60
+ tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
61
+ vfp_reg_offset(1, a->vn),
62
+ vfp_reg_offset(1, a->rm),
63
+ opr_sz, opr_sz, a->index, fn_gvec);
64
+ tcg_temp_free_ptr(fpst);
65
+ return true;
66
+}
67
diff --git a/target/arm/translate.c b/target/arm/translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate.c
70
+++ b/target/arm/translate.c
71
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
72
bool is_long = false, q = extract32(insn, 6, 1);
73
bool ptr_is_env = false;
74
75
- if ((insn & 0xffb00f00) == 0xfe200d00) {
76
- /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
77
- int u = extract32(insn, 4, 1);
78
-
79
- if (!dc_isar_feature(aa32_dp, s)) {
80
- return 1;
81
- }
82
- fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
83
- /* rm is just Vm, and index is M. */
84
- data = extract32(insn, 5, 1); /* index */
85
- rm = extract32(insn, 0, 4);
86
- } else if ((insn & 0xffa00f10) == 0xfe000810) {
87
+ if ((insn & 0xffa00f10) == 0xfe000810) {
88
/* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
89
int is_s = extract32(insn, 20, 1);
90
int vm20 = extract32(insn, 0, 3);
91
--
92
2.20.1
93
94
diff view generated by jsdifflib
Deleted patch
1
Convert the VFM[AS]L (scalar) insns in the 2reg-scalar-ext group
2
to decodetree. These are the last ones in the group so we can remove
3
all the legacy decode for the group.
4
1
5
Note that in disas_thumb2_insn() the parts of this encoding space
6
where the decodetree decoder returns false will correctly be directed
7
to illegal_op by the "(insn & (1 << 28))" check so they won't fall
8
into disas_coproc_insn() by mistake.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200430181003.21682-11-peter.maydell@linaro.org
13
---
14
target/arm/neon-shared.decode | 7 +++
15
target/arm/translate-neon.inc.c | 32 ++++++++++
16
target/arm/translate.c | 107 +-------------------------------
17
3 files changed, 40 insertions(+), 106 deletions(-)
18
19
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/neon-shared.decode
22
+++ b/target/arm/neon-shared.decode
23
@@ -XXX,XX +XXX,XX @@ VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
24
25
VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
26
vm=%vm_dp vn=%vn_dp vd=%vd_dp
27
+
28
+%vfml_scalar_q0_rm 0:3 5:1
29
+%vfml_scalar_q1_index 5:1 3:1
30
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
31
+ rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
32
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
33
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
34
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-neon.inc.c
37
+++ b/target/arm/translate-neon.inc.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
39
tcg_temp_free_ptr(fpst);
40
return true;
41
}
42
+
43
+static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
44
+{
45
+ int opr_sz;
46
+
47
+ if (!dc_isar_feature(aa32_fhm, s)) {
48
+ return false;
49
+ }
50
+
51
+ /* UNDEF accesses to D16-D31 if they don't exist. */
52
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
53
+ ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
54
+ return false;
55
+ }
56
+
57
+ if (a->vd & a->q) {
58
+ return false;
59
+ }
60
+
61
+ if (!vfp_access_check(s)) {
62
+ return true;
63
+ }
64
+
65
+ opr_sz = (1 + a->q) * 8;
66
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
67
+ vfp_reg_offset(a->q, a->vn),
68
+ vfp_reg_offset(a->q, a->rm),
69
+ cpu_env, opr_sz, opr_sz,
70
+ (a->index << 2) | a->s, /* is_2 == 0 */
71
+ gen_helper_gvec_fmlal_idx_a32);
72
+ return true;
73
+}
74
diff --git a/target/arm/translate.c b/target/arm/translate.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.c
77
+++ b/target/arm/translate.c
78
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
79
}
80
81
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
82
-#define VFP_SREG(insn, bigbit, smallbit) \
83
- ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
84
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
85
if (dc_isar_feature(aa32_simd_r32, s)) { \
86
reg = (((insn) >> (bigbit)) & 0x0f) \
87
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
88
reg = ((insn) >> (bigbit)) & 0x0f; \
89
}} while (0)
90
91
-#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
92
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
93
-#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
94
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
95
-#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
96
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
97
98
static void gen_neon_dup_low16(TCGv_i32 var)
99
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
100
return 0;
101
}
102
103
-/* Advanced SIMD two registers and a scalar extension.
104
- * 31 24 23 22 20 16 12 11 10 9 8 3 0
105
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
106
- * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
107
- * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
108
- *
109
- */
110
-
111
-static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
112
-{
113
- gen_helper_gvec_3 *fn_gvec = NULL;
114
- gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
115
- int rd, rn, rm, opr_sz, data;
116
- int off_rn, off_rm;
117
- bool is_long = false, q = extract32(insn, 6, 1);
118
- bool ptr_is_env = false;
119
-
120
- if ((insn & 0xffa00f10) == 0xfe000810) {
121
- /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
122
- int is_s = extract32(insn, 20, 1);
123
- int vm20 = extract32(insn, 0, 3);
124
- int vm3 = extract32(insn, 3, 1);
125
- int m = extract32(insn, 5, 1);
126
- int index;
127
-
128
- if (!dc_isar_feature(aa32_fhm, s)) {
129
- return 1;
130
- }
131
- if (q) {
132
- rm = vm20;
133
- index = m * 2 + vm3;
134
- } else {
135
- rm = vm20 * 2 + m;
136
- index = vm3;
137
- }
138
- is_long = true;
139
- data = (index << 2) | is_s; /* is_2 == 0 */
140
- fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
141
- ptr_is_env = true;
142
- } else {
143
- return 1;
144
- }
145
-
146
- VFP_DREG_D(rd, insn);
147
- if (rd & q) {
148
- return 1;
149
- }
150
- if (q || !is_long) {
151
- VFP_DREG_N(rn, insn);
152
- if (rn & q & !is_long) {
153
- return 1;
154
- }
155
- off_rn = vfp_reg_offset(1, rn);
156
- off_rm = vfp_reg_offset(1, rm);
157
- } else {
158
- rn = VFP_SREG_N(insn);
159
- off_rn = vfp_reg_offset(0, rn);
160
- off_rm = vfp_reg_offset(0, rm);
161
- }
162
- if (s->fp_excp_el) {
163
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
164
- syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
165
- return 0;
166
- }
167
- if (!s->vfp_enabled) {
168
- return 1;
169
- }
170
-
171
- opr_sz = (1 + q) * 8;
172
- if (fn_gvec_ptr) {
173
- TCGv_ptr ptr;
174
- if (ptr_is_env) {
175
- ptr = cpu_env;
176
- } else {
177
- ptr = get_fpstatus_ptr(1);
178
- }
179
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
180
- opr_sz, opr_sz, data, fn_gvec_ptr);
181
- if (!ptr_is_env) {
182
- tcg_temp_free_ptr(ptr);
183
- }
184
- } else {
185
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
186
- opr_sz, opr_sz, data, fn_gvec);
187
- }
188
- return 0;
189
-}
190
-
191
static int disas_coproc_insn(DisasContext *s, uint32_t insn)
192
{
193
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
194
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
195
}
196
}
197
}
198
- } else if ((insn & 0x0f000a00) == 0x0e000800
199
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
200
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
201
- goto illegal_op;
202
- }
203
- return;
204
}
205
goto illegal_op;
206
}
207
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
208
}
209
break;
210
}
211
- if ((insn & 0xff000a00) == 0xfe000800
212
- && arm_dc_feature(s, ARM_FEATURE_V8)) {
213
- /* The Thumb2 and ARM encodings are identical. */
214
- if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
215
- goto illegal_op;
216
- }
217
- } else if (((insn >> 24) & 3) == 3) {
218
+ if (((insn >> 24) & 3) == 3) {
219
/* Translate into the equivalent ARM encoding. */
220
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
221
if (disas_neon_data_insn(s, insn)) {
222
--
223
2.20.1
224
225
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon logic ops in the 3-reg-same grouping to decodetree.
2
Note that for the logic ops the 'size' field forms part of their
3
decode and the actual operations are always bitwise.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200430181003.21682-16-peter.maydell@linaro.org
8
---
9
target/arm/neon-dp.decode | 12 +++++++++++
10
target/arm/translate-neon.inc.c | 19 +++++++++++++++++
11
target/arm/translate.c | 38 +--------------------------------
12
3 files changed, 32 insertions(+), 37 deletions(-)
13
14
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/neon-dp.decode
17
+++ b/target/arm/neon-dp.decode
18
@@ -XXX,XX +XXX,XX @@
19
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
20
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
21
22
+@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
23
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
24
+
25
+VAND_3s 1111 001 0 0 . 00 .... .... 0001 ... 1 .... @3same_logic
26
+VBIC_3s 1111 001 0 0 . 01 .... .... 0001 ... 1 .... @3same_logic
27
+VORR_3s 1111 001 0 0 . 10 .... .... 0001 ... 1 .... @3same_logic
28
+VORN_3s 1111 001 0 0 . 11 .... .... 0001 ... 1 .... @3same_logic
29
+VEOR_3s 1111 001 1 0 . 00 .... .... 0001 ... 1 .... @3same_logic
30
+VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
31
+VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
32
+VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
33
+
34
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
35
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
41
42
DO_3SAME(VADD, tcg_gen_gvec_add)
43
DO_3SAME(VSUB, tcg_gen_gvec_sub)
44
+DO_3SAME(VAND, tcg_gen_gvec_and)
45
+DO_3SAME(VBIC, tcg_gen_gvec_andc)
46
+DO_3SAME(VORR, tcg_gen_gvec_or)
47
+DO_3SAME(VORN, tcg_gen_gvec_orc)
48
+DO_3SAME(VEOR, tcg_gen_gvec_xor)
49
+
50
+/* These insns are all gvec_bitsel but with the inputs in various orders. */
51
+#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
52
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
53
+ uint32_t rn_ofs, uint32_t rm_ofs, \
54
+ uint32_t oprsz, uint32_t maxsz) \
55
+ { \
56
+ tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
57
+ } \
58
+ DO_3SAME(INSN, gen_##INSN##_3s)
59
+
60
+DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
61
+DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
62
+DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
63
diff --git a/target/arm/translate.c b/target/arm/translate.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate.c
66
+++ b/target/arm/translate.c
67
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
68
}
69
return 1;
70
71
- case NEON_3R_LOGIC: /* Logic ops. */
72
- switch ((u << 2) | size) {
73
- case 0: /* VAND */
74
- tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
75
- vec_size, vec_size);
76
- break;
77
- case 1: /* VBIC */
78
- tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
79
- vec_size, vec_size);
80
- break;
81
- case 2: /* VORR */
82
- tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
83
- vec_size, vec_size);
84
- break;
85
- case 3: /* VORN */
86
- tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
87
- vec_size, vec_size);
88
- break;
89
- case 4: /* VEOR */
90
- tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
91
- vec_size, vec_size);
92
- break;
93
- case 5: /* VBSL */
94
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
95
- vec_size, vec_size);
96
- break;
97
- case 6: /* VBIT */
98
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
99
- vec_size, vec_size);
100
- break;
101
- case 7: /* VBIF */
102
- tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
103
- vec_size, vec_size);
104
- break;
105
- }
106
- return 0;
107
-
108
case NEON_3R_VQADD:
109
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
110
rn_ofs, rm_ofs, vec_size, vec_size,
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
112
return 0;
113
114
case NEON_3R_VADD_VSUB:
115
+ case NEON_3R_LOGIC:
116
/* Already handled by decodetree */
117
return 1;
118
}
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon 3-reg-same VMAX and VMIN insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200430181003.21682-17-peter.maydell@linaro.org
6
---
7
target/arm/neon-dp.decode | 5 +++++
8
target/arm/translate-neon.inc.c | 14 ++++++++++++++
9
target/arm/translate.c | 21 ++-------------------
10
3 files changed, 21 insertions(+), 19 deletions(-)
11
12
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/neon-dp.decode
15
+++ b/target/arm/neon-dp.decode
16
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
17
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
18
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
19
20
+VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
21
+VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
22
+VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
23
+VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
24
+
25
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
26
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
27
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-neon.inc.c
30
+++ b/target/arm/translate-neon.inc.c
31
@@ -XXX,XX +XXX,XX @@ DO_3SAME(VEOR, tcg_gen_gvec_xor)
32
DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
33
DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
34
DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
35
+
36
+#define DO_3SAME_NO_SZ_3(INSN, FUNC) \
37
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
38
+ { \
39
+ if (a->size == 3) { \
40
+ return false; \
41
+ } \
42
+ return do_3same(s, a, FUNC); \
43
+ }
44
+
45
+DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
46
+DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
47
+DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
48
+DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
49
diff --git a/target/arm/translate.c b/target/arm/translate.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/translate.c
52
+++ b/target/arm/translate.c
53
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
54
rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
55
return 0;
56
57
- case NEON_3R_VMAX:
58
- if (u) {
59
- tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
60
- vec_size, vec_size);
61
- } else {
62
- tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
63
- vec_size, vec_size);
64
- }
65
- return 0;
66
- case NEON_3R_VMIN:
67
- if (u) {
68
- tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
69
- vec_size, vec_size);
70
- } else {
71
- tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
72
- vec_size, vec_size);
73
- }
74
- return 0;
75
-
76
case NEON_3R_VSHL:
77
/* Note the operation is vshl vd,vm,vn */
78
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
79
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
80
81
case NEON_3R_VADD_VSUB:
82
case NEON_3R_LOGIC:
83
+ case NEON_3R_VMAX:
84
+ case NEON_3R_VMIN:
85
/* Already handled by decodetree */
86
return 1;
87
}
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon comparison ops in the 3-reg-same grouping
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-18-peter.maydell@linaro.org
7
---
8
target/arm/neon-dp.decode | 8 ++++++++
9
target/arm/translate-neon.inc.c | 22 ++++++++++++++++++++++
10
target/arm/translate.c | 23 +++--------------------
11
3 files changed, 33 insertions(+), 20 deletions(-)
12
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
16
+++ b/target/arm/neon-dp.decode
17
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
18
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
19
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
20
21
+VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
22
+VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
23
+VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
24
+VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
25
+
26
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
27
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
28
VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
29
@@ -XXX,XX +XXX,XX @@ VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
30
31
VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
32
VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
33
+
34
+VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
35
+VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
36
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-neon.inc.c
39
+++ b/target/arm/translate-neon.inc.c
40
@@ -XXX,XX +XXX,XX @@ DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
41
DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
42
DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
43
DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
44
+
45
+#define DO_3SAME_CMP(INSN, COND) \
46
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
47
+ uint32_t rn_ofs, uint32_t rm_ofs, \
48
+ uint32_t oprsz, uint32_t maxsz) \
49
+ { \
50
+ tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
51
+ } \
52
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
53
+
54
+DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
55
+DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
56
+DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
57
+DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
58
+DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
59
+
60
+static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
61
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz)
62
+{
63
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
64
+}
65
+DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
66
diff --git a/target/arm/translate.c b/target/arm/translate.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate.c
69
+++ b/target/arm/translate.c
70
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
71
u ? &mls_op[size] : &mla_op[size]);
72
return 0;
73
74
- case NEON_3R_VTST_VCEQ:
75
- if (u) { /* VCEQ */
76
- tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
77
- vec_size, vec_size);
78
- } else { /* VTST */
79
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
80
- vec_size, vec_size, &cmtst_op[size]);
81
- }
82
- return 0;
83
-
84
- case NEON_3R_VCGT:
85
- tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
86
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
87
- return 0;
88
-
89
- case NEON_3R_VCGE:
90
- tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
91
- rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
92
- return 0;
93
-
94
case NEON_3R_VSHL:
95
/* Note the operation is vshl vd,vm,vn */
96
tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
97
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
98
case NEON_3R_LOGIC:
99
case NEON_3R_VMAX:
100
case NEON_3R_VMIN:
101
+ case NEON_3R_VTST_VCEQ:
102
+ case NEON_3R_VCGT:
103
+ case NEON_3R_VCGE:
104
/* Already handled by decodetree */
105
return 1;
106
}
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
1
Convert the Neon VQADD/VQSUB insns in the 3-reg-same grouping
1
The xkb official name for the Arabic keyboard layout is 'ara'.
2
to decodetree.
2
However xkb has for at least the past 15 years also permitted it to
3
be named via the legacy synonym 'ar'. In xkeyboard-config 2.39 this
4
synoynm was removed, which breaks compilation of QEMU:
3
5
6
FAILED: pc-bios/keymaps/ar
7
/home/fred/qemu-git/src/qemu/build-full/qemu-keymap -f pc-bios/keymaps/ar -l ar
8
xkbcommon: ERROR: Couldn't find file "symbols/ar" in include paths
9
xkbcommon: ERROR: 1 include paths searched:
10
xkbcommon: ERROR:     /usr/share/X11/xkb
11
xkbcommon: ERROR: 3 include paths could not be added:
12
xkbcommon: ERROR:     /home/fred/.config/xkb
13
xkbcommon: ERROR:     /home/fred/.xkb
14
xkbcommon: ERROR:     /etc/xkb
15
xkbcommon: ERROR: Abandoning symbols file "(unnamed)"
16
xkbcommon: ERROR: Failed to compile xkb_symbols
17
xkbcommon: ERROR: Failed to compile keymap
18
19
The upstream xkeyboard-config change removing the compat
20
mapping is:
21
https://gitlab.freedesktop.org/xkeyboard-config/xkeyboard-config/-/commit/470ad2cd8fea84d7210377161d86b31999bb5ea6
22
23
Make QEMU always ask for the 'ara' xkb layout, which should work on
24
both older and newer xkeyboard-config. We leave the QEMU name for
25
this keyboard layout as 'ar'; it is not the only one where our name
26
for it deviates from the xkb standard name.
27
28
Cc: qemu-stable@nongnu.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
30
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200430181003.21682-19-peter.maydell@linaro.org
31
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
32
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
33
Message-id: 20230620162024.1132013-1-peter.maydell@linaro.org
34
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1709
7
---
35
---
8
target/arm/neon-dp.decode | 6 ++++++
36
pc-bios/keymaps/meson.build | 2 +-
9
target/arm/translate-neon.inc.c | 15 +++++++++++++++
37
1 file changed, 1 insertion(+), 1 deletion(-)
10
target/arm/translate.c | 14 ++------------
11
3 files changed, 23 insertions(+), 12 deletions(-)
12
38
13
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
39
diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build
14
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/neon-dp.decode
41
--- a/pc-bios/keymaps/meson.build
16
+++ b/target/arm/neon-dp.decode
42
+++ b/pc-bios/keymaps/meson.build
17
@@ -XXX,XX +XXX,XX @@
43
@@ -XXX,XX +XXX,XX @@
18
@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
44
keymaps = {
19
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
45
- 'ar': '-l ar',
20
46
+ 'ar': '-l ara',
21
+VQADD_S_3s 1111 001 0 0 . .. .... .... 0000 . . . 1 .... @3same
47
'bepo': '-l fr -v dvorak',
22
+VQADD_U_3s 1111 001 1 0 . .. .... .... 0000 . . . 1 .... @3same
48
'cz': '-l cz',
23
+
49
'da': '-l dk',
24
@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
25
&3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
26
27
@@ -XXX,XX +XXX,XX @@ VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
28
VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
29
VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
30
31
+VQSUB_S_3s 1111 001 0 0 . .. .... .... 0010 . . . 1 .... @3same
32
+VQSUB_U_3s 1111 001 1 0 . .. .... .... 0010 . . . 1 .... @3same
33
+
34
VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
35
VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
36
VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
37
diff --git a/target/arm/translate-neon.inc.c b/target/arm/translate-neon.inc.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-neon.inc.c
40
+++ b/target/arm/translate-neon.inc.c
41
@@ -XXX,XX +XXX,XX @@ static void gen_VTST_3s(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
42
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &cmtst_op[vece]);
43
}
44
DO_3SAME_NO_SZ_3(VTST, gen_VTST_3s)
45
+
46
+#define DO_3SAME_GVEC4(INSN, OPARRAY) \
47
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
48
+ uint32_t rn_ofs, uint32_t rm_ofs, \
49
+ uint32_t oprsz, uint32_t maxsz) \
50
+ { \
51
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc), \
52
+ rn_ofs, rm_ofs, oprsz, maxsz, &OPARRAY[vece]); \
53
+ } \
54
+ DO_3SAME(INSN, gen_##INSN##_3s)
55
+
56
+DO_3SAME_GVEC4(VQADD_S, sqadd_op)
57
+DO_3SAME_GVEC4(VQADD_U, uqadd_op)
58
+DO_3SAME_GVEC4(VQSUB_S, sqsub_op)
59
+DO_3SAME_GVEC4(VQSUB_U, uqsub_op)
60
diff --git a/target/arm/translate.c b/target/arm/translate.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate.c
63
+++ b/target/arm/translate.c
64
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
65
}
66
return 1;
67
68
- case NEON_3R_VQADD:
69
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
70
- rn_ofs, rm_ofs, vec_size, vec_size,
71
- (u ? uqadd_op : sqadd_op) + size);
72
- return 0;
73
-
74
- case NEON_3R_VQSUB:
75
- tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
76
- rn_ofs, rm_ofs, vec_size, vec_size,
77
- (u ? uqsub_op : sqsub_op) + size);
78
- return 0;
79
-
80
case NEON_3R_VMUL: /* VMUL */
81
if (u) {
82
/* Polynomial case allows only P8. */
83
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
84
case NEON_3R_VTST_VCEQ:
85
case NEON_3R_VCGT:
86
case NEON_3R_VCGE:
87
+ case NEON_3R_VQADD:
88
+ case NEON_3R_VQSUB:
89
/* Already handled by decodetree */
90
return 1;
91
}
92
--
50
--
93
2.20.1
51
2.34.1
94
52
95
53
diff view generated by jsdifflib