1
First arm pullreq of the 2.11 cycle. I know I still have some
1
Big pullreq this week, though none of the new features are
2
more stuff on my queue to review, but 36 patches is big enough
2
particularly earthshaking. Most of the bulk is from code cleanup
3
as it is; I expect I'll do another pull later this week.
3
patches from me or rth.
4
4
5
thanks
5
thanks
6
-- PMM
6
-- PMM
7
7
8
The following changes since commit 32f0f68bb77289b75a82925f712bb52e16eac3ba:
8
The following changes since commit b651b80822fa8cb66ca30087ac7fbc75507ae5d2:
9
9
10
Merge remote-tracking branch 'remotes/ehabkost/tags/x86-and-machine-pull-request' into staging (2017-09-01 17:28:54 +0100)
10
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-5.0-pull-request' into staging (2020-02-20 17:35:42 +0000)
11
11
12
are available in the git repository at:
12
are available in the Git repository at:
13
13
14
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20170904
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200221
15
15
16
for you to fetch changes up to 1e35c4ce33a94cf78dbf639695cb877ef35920b0:
16
for you to fetch changes up to 270a679b3f950d7c4c600f324aab8bff292d0971:
17
17
18
arm_gicv3_kvm: Fix compile warning (2017-09-04 12:09:32 +0100)
18
target/arm: Add missing checks for fpsp_v2 (2020-02-21 12:54:25 +0000)
19
19
20
----------------------------------------------------------------
20
----------------------------------------------------------------
21
target-arm:
21
target-arm queue:
22
* collection of M profile cleanups and minor bugfixes
22
* aspeed/scu: Implement chip ID register
23
* loader: handle ELF files with overlapping zero-init data
23
* hw/misc/iotkit-secctl: Fix writing to 'PPC Interrupt Clear' register
24
* virt: allow PMU instantiation with userspace irqchip
24
* mainstone: Make providing flash images non-mandatory
25
* wdt_aspeed: Add support for the reset width register
25
* z2: Make providing flash images non-mandatory
26
* cpu: Define new cpu_transaction_failed() hook
26
* Fix failures to flush SVE high bits after AdvSIMD INS/ZIP/UZP/TRN/TBL/TBX/EXT
27
* arm: Support generating CPU exceptions on memory
27
* Minor performance improvement: spend less time recalculating hflags values
28
transaction failures (bus faults)
28
* Code cleanup to isar_feature function tests
29
* Mark some SoC devices as not user-creatable
29
* Implement ARMv8.1-PMU and ARMv8.4-PMU extensions
30
* arm: Fix aa64 ldp register writeback
30
* Bugfix: correct handling of PMCR_EL0.LC bit
31
* arm_gicv3_kvm: Fix compile warning
31
* Bugfix: correct definition of PMCRDP
32
* Correctly implement ACTLR2, HACTLR2
33
* allwinner: Wire up USB ports
34
* Vectorize emulation of USHL, SSHL, PMUL*
35
* xilinx_spips: Correct the number of dummy cycles for the FAST_READ_4 cmd
36
* sh4: Fix PCI ISA IO memory subregion
37
* Code cleanup to use more isar_feature tests and fewer ARM_FEATURE_* tests
32
38
33
----------------------------------------------------------------
39
----------------------------------------------------------------
34
Andrew Jeffery (2):
40
Francisco Iglesias (1):
35
watchdog: wdt_aspeed: Add support for the reset width register
41
xilinx_spips: Correct the number of dummy cycles for the FAST_READ_4 cmd
36
aspeed_soc: Propagate silicon-rev to watchdog
37
42
38
Andrew Jones (4):
43
Guenter Roeck (6):
39
hw/arm/virt: add pmu interrupt state
44
mainstone: Make providing flash images non-mandatory
40
target/arm/kvm: pmu: split init and set-irq stages
45
z2: Make providing flash images non-mandatory
41
hw/arm/virt: allow pmu instantiation with userspace irqchip
46
hw: usb: hcd-ohci: Move OHCISysBusState and TYPE_SYSBUS_OHCI to include file
42
target/arm/kvm: pmu: improve error handling
47
hcd-ehci: Introduce "companion-enable" sysbus property
48
arm: allwinner: Wire up USB ports
49
sh4: Fix PCI ISA IO memory subregion
43
50
44
Peter Maydell (25):
51
Joel Stanley (2):
45
target/arm: Use MMUAccessType enum rather than int
52
aspeed/scu: Create separate write callbacks
46
target/arm: Don't trap WFI/WFE for M profile
53
aspeed/scu: Implement chip ID register
47
target/arm: Consolidate PMSA handling in get_phys_addr()
54
48
target/arm: Tighten up Thumb decode where new v8M insns will be
55
Peter Maydell (21):
49
hw/intc/armv7m_nvic.c: Remove out of date comment
56
target/arm: Add _aa32_ to isar_feature functions testing 32-bit ID registers
50
target/arm: Remove incorrect comment about MPU_CTRL
57
target/arm: Check aa32_pan in take_aarch32_exception(), not aa64_pan
51
target/arm: Fix outdated comment about exception exit
58
target/arm: Add isar_feature_any_fp16 and document naming/usage conventions
52
target/arm: Define and use XPSR bit masks
59
target/arm: Define and use any_predinv isar_feature test
53
target/arm: Don't store M profile PRIMASK and FAULTMASK in daif
60
target/arm: Factor out PMU register definitions
54
target/arm: Don't use cpsr_write/cpsr_read to transfer M profile XPSR
61
target/arm: Add and use FIELD definitions for ID_AA64DFR0_EL1
55
target/arm: Make arm_cpu_dump_state() handle the M-profile XPSR
62
target/arm: Use FIELD macros for clearing ID_DFR0 PERFMON field
56
target/arm: Don't calculate lr in arm_v7m_cpu_do_interrupt() until needed
63
target/arm: Define an aa32_pmu_8_1 isar feature test function
57
target/arm: Create and use new function arm_v7m_is_handler_mode()
64
target/arm: Add _aa64_ and _any_ versions of pmu_8_1 isar checks
58
armv7m_nvic.h: Move from include/hw/arm to include/hw/intc
65
target/arm: Stop assuming DBGDIDR always exists
59
nvic: Implement "user accesses BusFault" SCS region behaviour
66
target/arm: Move DBGDIDR into ARMISARegisters
60
loader: Handle ELF files with overlapping zero-initialized data
67
target/arm: Read debug-related ID registers from KVM
61
loader: Ignore zero-sized ELF segments
68
target/arm: Implement ARMv8.1-PMU extension
62
memory.h: Move MemTxResult type to memattrs.h
69
target/arm: Implement ARMv8.4-PMU extension
63
cpu: Define new cpu_transaction_failed() hook
70
target/arm: Provide ARMv8.4-PMU in '-cpu max'
64
cputlb: Support generating CPU exceptions on memory transaction failures
71
target/arm: Correct definition of PMCRDP
65
boards.h: Define new flag ignore_memory_transaction_failures
72
target/arm: Correct handling of PMCR_EL0.LC bit
66
hw/arm: Set ignore_memory_transaction_failures for most ARM boards
73
target/arm: Test correct register in aa32_pan and aa32_ats1e1 checks
67
target/arm: Factor out fault delivery code
74
target/arm: Use isar_feature function for testing AA32HPD feature
68
target/arm: Allow deliver_fault() caller to specify EA bit
75
target/arm: Use FIELD_EX32 for testing 32-bit fields
69
target/arm: Implement new do_transaction_failed hook
76
target/arm: Correctly implement ACTLR2, HACTLR2
70
77
71
Philippe Mathieu-Daudé (1):
78
Philippe Mathieu-Daudé (1):
72
hw/arm: use defined type name instead of hard-coded string
79
hw/misc/iotkit-secctl: Fix writing to 'PPC Interrupt Clear' register
73
80
74
Pranith Kumar (1):
81
Richard Henderson (21):
75
arm_gicv3_kvm: Fix compile warning
82
target/arm: Flush high bits of sve register after AdvSIMD EXT
83
target/arm: Flush high bits of sve register after AdvSIMD TBL/TBX
84
target/arm: Flush high bits of sve register after AdvSIMD ZIP/UZP/TRN
85
target/arm: Flush high bits of sve register after AdvSIMD INS
86
target/arm: Use bit 55 explicitly for pauth
87
target/arm: Fix select for aa64_va_parameters_both
88
target/arm: Remove ttbr1_valid check from get_phys_addr_lpae
89
target/arm: Split out aa64_va_parameter_tbi, aa64_va_parameter_tbid
90
target/arm: Vectorize USHL and SSHL
91
target/arm: Convert PMUL.8 to gvec
92
target/arm: Convert PMULL.64 to gvec
93
target/arm: Convert PMULL.8 to gvec
94
target/arm: Rename isar_feature_aa32_simd_r32
95
target/arm: Use isar_feature_aa32_simd_r32 more places
96
target/arm: Set MVFR0.FPSP for ARMv5 cpus
97
target/arm: Add isar_feature_aa32_simd_r16
98
target/arm: Rename isar_feature_aa32_fpdp_v2
99
target/arm: Add isar_feature_aa32_{fpsp_v2, fpsp_v3, fpdp_v3}
100
target/arm: Perform fpdp_v2 check first
101
target/arm: Replace ARM_FEATURE_VFP3 checks with fp{sp, dp}_v3
102
target/arm: Add missing checks for fpsp_v2
76
103
77
Richard Henderson (1):
104
hw/usb/hcd-ohci.h | 16 ++
78
target/arm: Fix aa64 ldp register writeback
105
include/hw/arm/allwinner-a10.h | 6 +
106
target/arm/cpu.h | 173 ++++++++++++---
107
target/arm/helper-sve.h | 2 +
108
target/arm/helper.h | 21 +-
109
target/arm/internals.h | 47 +++-
110
target/arm/translate.h | 6 +
111
hw/arm/allwinner-a10.c | 43 ++++
112
hw/arm/mainstone.c | 11 +-
113
hw/arm/z2.c | 6 -
114
hw/intc/armv7m_nvic.c | 30 +--
115
hw/misc/aspeed_scu.c | 93 ++++++--
116
hw/misc/iotkit-secctl.c | 2 +-
117
hw/sh4/sh_pci.c | 11 +-
118
hw/ssi/xilinx_spips.c | 2 +-
119
hw/usb/hcd-ehci-sysbus.c | 2 +
120
hw/usb/hcd-ohci.c | 15 --
121
linux-user/arm/signal.c | 4 +-
122
linux-user/elfload.c | 4 +-
123
target/arm/arch_dump.c | 11 +-
124
target/arm/cpu.c | 175 +++++++--------
125
target/arm/cpu64.c | 58 +++--
126
target/arm/debug_helper.c | 6 +-
127
target/arm/helper.c | 472 +++++++++++++++++++++++------------------
128
target/arm/kvm32.c | 25 +++
129
target/arm/kvm64.c | 46 ++++
130
target/arm/m_helper.c | 11 +-
131
target/arm/machine.c | 3 +-
132
target/arm/neon_helper.c | 117 ----------
133
target/arm/pauth_helper.c | 3 +-
134
target/arm/translate-a64.c | 92 ++++----
135
target/arm/translate-vfp.inc.c | 263 ++++++++++++++---------
136
target/arm/translate.c | 356 ++++++++++++++++++++++++++-----
137
target/arm/vec_helper.c | 211 ++++++++++++++++++
138
target/arm/vfp_helper.c | 2 +-
139
35 files changed, 1564 insertions(+), 781 deletions(-)
79
140
80
Thomas Huth (2):
81
hw/arm/aspeed_soc: Mark devices as user_creatable = false
82
hw/arm/digic: Mark device with user_creatable = false
83
84
include/exec/memattrs.h | 10 +++
85
include/exec/memory.h | 10 ---
86
include/hw/arm/armv7m.h | 2 +-
87
include/hw/boards.h | 11 +++
88
include/hw/elf_ops.h | 72 +++++++++++++--
89
include/hw/{arm => intc}/armv7m_nvic.h | 0
90
include/hw/watchdog/wdt_aspeed.h | 2 +
91
include/qom/cpu.h | 27 ++++++
92
softmmu_template.h | 4 +-
93
target/arm/cpu.h | 56 +++++++++---
94
target/arm/internals.h | 15 +++-
95
target/arm/kvm_arm.h | 9 +-
96
accel/tcg/cputlb.c | 32 ++++++-
97
hw/arm/armv7m.c | 4 +-
98
hw/arm/aspeed.c | 3 +
99
hw/arm/aspeed_soc.c | 4 +
100
hw/arm/collie.c | 1 +
101
hw/arm/cubieboard.c | 1 +
102
hw/arm/digic.c | 2 +
103
hw/arm/digic_boards.c | 1 +
104
hw/arm/exynos4210.c | 4 +-
105
hw/arm/exynos4_boards.c | 2 +
106
hw/arm/gumstix.c | 2 +
107
hw/arm/highbank.c | 13 ++-
108
hw/arm/imx25_pdk.c | 1 +
109
hw/arm/integratorcp.c | 1 +
110
hw/arm/kzm.c | 1 +
111
hw/arm/mainstone.c | 1 +
112
hw/arm/musicpal.c | 1 +
113
hw/arm/netduino2.c | 1 +
114
hw/arm/nseries.c | 2 +
115
hw/arm/omap_sx1.c | 2 +
116
hw/arm/palm.c | 1 +
117
hw/arm/raspi.c | 1 +
118
hw/arm/realview.c | 10 ++-
119
hw/arm/sabrelite.c | 1 +
120
hw/arm/spitz.c | 4 +
121
hw/arm/stellaris.c | 2 +
122
hw/arm/tosa.c | 1 +
123
hw/arm/versatilepb.c | 2 +
124
hw/arm/vexpress.c | 7 +-
125
hw/arm/virt.c | 12 ++-
126
hw/arm/xilinx_zynq.c | 15 ++--
127
hw/arm/xlnx-ep108.c | 2 +
128
hw/arm/z2.c | 1 +
129
hw/intc/arm_gicv3_kvm.c | 2 +-
130
hw/intc/armv7m_nvic.c | 68 +++++++++-----
131
hw/watchdog/wdt_aspeed.c | 93 ++++++++++++++++---
132
qom/cpu.c | 7 ++
133
target/arm/cpu.c | 8 +-
134
target/arm/helper.c | 124 ++++++++++++-------------
135
target/arm/kvm.c | 6 +-
136
target/arm/kvm32.c | 7 +-
137
target/arm/kvm64.c | 63 +++++++------
138
target/arm/machine.c | 54 ++++++++++-
139
target/arm/op_helper.c | 160 ++++++++++++++++++++++-----------
140
target/arm/translate-a64.c | 29 +++---
141
target/arm/translate.c | 106 ++++++++++++++++------
142
58 files changed, 795 insertions(+), 288 deletions(-)
143
rename include/hw/{arm => intc}/armv7m_nvic.h (100%)
144
diff view generated by jsdifflib
1
From: Andrew Jeffery <andrew@aj.id.au>
1
From: Joel Stanley <joel@jms.id.au>
2
2
3
The reset width register controls how the pulse on the SoC's WDTRST{1,2}
3
This splits the common write callback into separate ast2400 and ast2500
4
pins behaves. A pulse is emitted if the external reset bit is set in
4
implementations. This makes it clearer when implementing differing
5
WDT_CTRL. On the AST2500 WDT_RESET_WIDTH can consume magic bit patterns
6
to configure push-pull/open-drain and active-high/active-low
7
behaviours and thus needs some special handling in the write path.
8
9
As some of the capabilities depend on the SoC version a silicon-rev
10
property is introduced, which is used to guard version-specific
11
behaviour.
5
behaviour.
12
6
13
Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
7
Signed-off-by: Joel Stanley <joel@jms.id.au>
8
Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
14
Reviewed-by: Cédric Le Goater <clg@kaod.org>
9
Reviewed-by: Cédric Le Goater <clg@kaod.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Message-id: 20200121013302.43839-2-joel@jms.id.au
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
13
---
17
include/hw/watchdog/wdt_aspeed.h | 2 +
14
hw/misc/aspeed_scu.c | 80 +++++++++++++++++++++++++++++++-------------
18
hw/watchdog/wdt_aspeed.c | 93 +++++++++++++++++++++++++++++++++++-----
15
1 file changed, 57 insertions(+), 23 deletions(-)
19
2 files changed, 84 insertions(+), 11 deletions(-)
20
16
21
diff --git a/include/hw/watchdog/wdt_aspeed.h b/include/hw/watchdog/wdt_aspeed.h
17
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
22
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/watchdog/wdt_aspeed.h
19
--- a/hw/misc/aspeed_scu.c
24
+++ b/include/hw/watchdog/wdt_aspeed.h
20
+++ b/hw/misc/aspeed_scu.c
25
@@ -XXX,XX +XXX,XX @@ typedef struct AspeedWDTState {
21
@@ -XXX,XX +XXX,XX @@ static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
26
uint32_t regs[ASPEED_WDT_REGS_MAX];
22
return s->regs[reg];
27
23
}
28
uint32_t pclk_freq;
24
29
+ uint32_t silicon_rev;
25
-static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data,
30
+ uint32_t ext_pulse_width_mask;
26
- unsigned size)
31
} AspeedWDTState;
27
+static void aspeed_ast2400_scu_write(void *opaque, hwaddr offset,
32
28
+ uint64_t data, unsigned size)
33
#endif /* ASPEED_WDT_H */
29
+{
34
diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c
30
+ AspeedSCUState *s = ASPEED_SCU(opaque);
35
index XXXXXXX..XXXXXXX 100644
31
+ int reg = TO_REG(offset);
36
--- a/hw/watchdog/wdt_aspeed.c
37
+++ b/hw/watchdog/wdt_aspeed.c
38
@@ -XXX,XX +XXX,XX @@
39
*/
40
41
#include "qemu/osdep.h"
42
+
32
+
43
+#include "qapi/error.h"
33
+ if (reg >= ASPEED_SCU_NR_REGS) {
44
#include "qemu/log.h"
34
+ qemu_log_mask(LOG_GUEST_ERROR,
45
+#include "qemu/timer.h"
35
+ "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
46
#include "sysemu/watchdog.h"
36
+ __func__, offset);
47
+#include "hw/misc/aspeed_scu.h"
48
#include "hw/sysbus.h"
49
-#include "qemu/timer.h"
50
#include "hw/watchdog/wdt_aspeed.h"
51
52
-#define WDT_STATUS (0x00 / 4)
53
-#define WDT_RELOAD_VALUE (0x04 / 4)
54
-#define WDT_RESTART (0x08 / 4)
55
-#define WDT_CTRL (0x0C / 4)
56
+#define WDT_STATUS (0x00 / 4)
57
+#define WDT_RELOAD_VALUE (0x04 / 4)
58
+#define WDT_RESTART (0x08 / 4)
59
+#define WDT_CTRL (0x0C / 4)
60
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
61
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
62
#define WDT_CTRL_1MHZ_CLK BIT(4)
63
@@ -XXX,XX +XXX,XX @@
64
#define WDT_CTRL_WDT_INTR BIT(2)
65
#define WDT_CTRL_RESET_SYSTEM BIT(1)
66
#define WDT_CTRL_ENABLE BIT(0)
67
+#define WDT_RESET_WIDTH (0x18 / 4)
68
+#define WDT_RESET_WIDTH_ACTIVE_HIGH BIT(31)
69
+#define WDT_POLARITY_MASK (0xFF << 24)
70
+#define WDT_ACTIVE_HIGH_MAGIC (0xA5 << 24)
71
+#define WDT_ACTIVE_LOW_MAGIC (0x5A << 24)
72
+#define WDT_RESET_WIDTH_PUSH_PULL BIT(30)
73
+#define WDT_DRIVE_TYPE_MASK (0xFF << 24)
74
+#define WDT_PUSH_PULL_MAGIC (0xA8 << 24)
75
+#define WDT_OPEN_DRAIN_MAGIC (0x8A << 24)
76
77
-#define WDT_TIMEOUT_STATUS (0x10 / 4)
78
-#define WDT_TIMEOUT_CLEAR (0x14 / 4)
79
-#define WDT_RESET_WDITH (0x18 / 4)
80
+#define WDT_TIMEOUT_STATUS (0x10 / 4)
81
+#define WDT_TIMEOUT_CLEAR (0x14 / 4)
82
83
-#define WDT_RESTART_MAGIC 0x4755
84
+#define WDT_RESTART_MAGIC 0x4755
85
86
static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
87
{
88
return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
89
}
90
91
+static bool is_ast2500(const AspeedWDTState *s)
92
+{
93
+ switch (s->silicon_rev) {
94
+ case AST2500_A0_SILICON_REV:
95
+ case AST2500_A1_SILICON_REV:
96
+ return true;
97
+ case AST2400_A0_SILICON_REV:
98
+ case AST2400_A1_SILICON_REV:
99
+ default:
100
+ break;
101
+ }
102
+
103
+ return false;
104
+}
105
+
106
static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
107
{
108
AspeedWDTState *s = ASPEED_WDT(opaque);
109
@@ -XXX,XX +XXX,XX @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
110
return 0;
111
case WDT_CTRL:
112
return s->regs[WDT_CTRL];
113
+ case WDT_RESET_WIDTH:
114
+ return s->regs[WDT_RESET_WIDTH];
115
case WDT_TIMEOUT_STATUS:
116
case WDT_TIMEOUT_CLEAR:
117
- case WDT_RESET_WDITH:
118
qemu_log_mask(LOG_UNIMP,
119
"%s: uninmplemented read at offset 0x%" HWADDR_PRIx "\n",
120
__func__, offset);
121
@@ -XXX,XX +XXX,XX @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data,
122
timer_del(s->timer);
123
}
124
break;
125
+ case WDT_RESET_WIDTH:
126
+ {
127
+ uint32_t property = data & WDT_POLARITY_MASK;
128
+
129
+ if (property && is_ast2500(s)) {
130
+ if (property == WDT_ACTIVE_HIGH_MAGIC) {
131
+ s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_ACTIVE_HIGH;
132
+ } else if (property == WDT_ACTIVE_LOW_MAGIC) {
133
+ s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_ACTIVE_HIGH;
134
+ } else if (property == WDT_PUSH_PULL_MAGIC) {
135
+ s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_PUSH_PULL;
136
+ } else if (property == WDT_OPEN_DRAIN_MAGIC) {
137
+ s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_PUSH_PULL;
138
+ }
139
+ }
140
+ s->regs[WDT_RESET_WIDTH] &= ~s->ext_pulse_width_mask;
141
+ s->regs[WDT_RESET_WIDTH] |= data & s->ext_pulse_width_mask;
142
+ break;
143
+ }
144
case WDT_TIMEOUT_STATUS:
145
case WDT_TIMEOUT_CLEAR:
146
- case WDT_RESET_WDITH:
147
qemu_log_mask(LOG_UNIMP,
148
"%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n",
149
__func__, offset);
150
@@ -XXX,XX +XXX,XX @@ static void aspeed_wdt_reset(DeviceState *dev)
151
s->regs[WDT_RELOAD_VALUE] = 0x03EF1480;
152
s->regs[WDT_RESTART] = 0;
153
s->regs[WDT_CTRL] = 0;
154
+ s->regs[WDT_RESET_WIDTH] = 0xFF;
155
156
timer_del(s->timer);
157
}
158
@@ -XXX,XX +XXX,XX @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
159
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
160
AspeedWDTState *s = ASPEED_WDT(dev);
161
162
+ if (!is_supported_silicon_rev(s->silicon_rev)) {
163
+ error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,
164
+ s->silicon_rev);
165
+ return;
37
+ return;
166
+ }
38
+ }
167
+
39
+
168
+ switch (s->silicon_rev) {
40
+ if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 &&
169
+ case AST2400_A0_SILICON_REV:
41
+ !s->regs[PROT_KEY]) {
170
+ case AST2400_A1_SILICON_REV:
42
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__);
171
+ s->ext_pulse_width_mask = 0xff;
172
+ break;
173
+ case AST2500_A0_SILICON_REV:
174
+ case AST2500_A1_SILICON_REV:
175
+ s->ext_pulse_width_mask = 0xfffff;
176
+ break;
177
+ default:
178
+ g_assert_not_reached();
179
+ }
43
+ }
180
+
44
+
181
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev);
45
+ trace_aspeed_scu_write(offset, size, data);
182
46
+
183
/* FIXME: This setting should be derived from the SCU hw strapping
47
+ switch (reg) {
184
@@ -XXX,XX +XXX,XX @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
48
+ case PROT_KEY:
185
sysbus_init_mmio(sbd, &s->iomem);
49
+ s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0;
50
+ return;
51
+ case SILICON_REV:
52
+ case FREQ_CNTR_EVAL:
53
+ case VGA_SCRATCH1 ... VGA_SCRATCH8:
54
+ case RNG_DATA:
55
+ case FREE_CNTR4:
56
+ case FREE_CNTR4_EXT:
57
+ qemu_log_mask(LOG_GUEST_ERROR,
58
+ "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
59
+ __func__, offset);
60
+ return;
61
+ }
62
+
63
+ s->regs[reg] = data;
64
+}
65
+
66
+static void aspeed_ast2500_scu_write(void *opaque, hwaddr offset,
67
+ uint64_t data, unsigned size)
68
{
69
AspeedSCUState *s = ASPEED_SCU(opaque);
70
int reg = TO_REG(offset);
71
@@ -XXX,XX +XXX,XX @@ static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data,
72
case PROT_KEY:
73
s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0;
74
return;
75
- case CLK_SEL:
76
- s->regs[reg] = data;
77
- break;
78
case HW_STRAP1:
79
- if (ASPEED_IS_AST2500(s->regs[SILICON_REV])) {
80
- s->regs[HW_STRAP1] |= data;
81
- return;
82
- }
83
- /* Jump to assignment below */
84
- break;
85
+ s->regs[HW_STRAP1] |= data;
86
+ return;
87
case SILICON_REV:
88
- if (ASPEED_IS_AST2500(s->regs[SILICON_REV])) {
89
- s->regs[HW_STRAP1] &= ~data;
90
- } else {
91
- qemu_log_mask(LOG_GUEST_ERROR,
92
- "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
93
- __func__, offset);
94
- }
95
- /* Avoid assignment below, we've handled everything */
96
+ s->regs[HW_STRAP1] &= ~data;
97
return;
98
case FREQ_CNTR_EVAL:
99
case VGA_SCRATCH1 ... VGA_SCRATCH8:
100
@@ -XXX,XX +XXX,XX @@ static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data,
101
s->regs[reg] = data;
186
}
102
}
187
103
188
+static Property aspeed_wdt_properties[] = {
104
-static const MemoryRegionOps aspeed_scu_ops = {
189
+ DEFINE_PROP_UINT32("silicon-rev", AspeedWDTState, silicon_rev, 0),
105
+static const MemoryRegionOps aspeed_ast2400_scu_ops = {
190
+ DEFINE_PROP_END_OF_LIST(),
106
.read = aspeed_scu_read,
107
- .write = aspeed_scu_write,
108
+ .write = aspeed_ast2400_scu_write,
109
+ .endianness = DEVICE_LITTLE_ENDIAN,
110
+ .valid.min_access_size = 4,
111
+ .valid.max_access_size = 4,
112
+ .valid.unaligned = false,
191
+};
113
+};
192
+
114
+
193
static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
115
+static const MemoryRegionOps aspeed_ast2500_scu_ops = {
194
{
116
+ .read = aspeed_scu_read,
195
DeviceClass *dc = DEVICE_CLASS(klass);
117
+ .write = aspeed_ast2500_scu_write,
196
@@ -XXX,XX +XXX,XX @@ static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
118
.endianness = DEVICE_LITTLE_ENDIAN,
197
dc->reset = aspeed_wdt_reset;
119
.valid.min_access_size = 4,
198
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
120
.valid.max_access_size = 4,
199
dc->vmsd = &vmstate_aspeed_wdt;
121
@@ -XXX,XX +XXX,XX @@ static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data)
200
+ dc->props = aspeed_wdt_properties;
122
asc->calc_hpll = aspeed_2400_scu_calc_hpll;
123
asc->apb_divider = 2;
124
asc->nr_regs = ASPEED_SCU_NR_REGS;
125
- asc->ops = &aspeed_scu_ops;
126
+ asc->ops = &aspeed_ast2400_scu_ops;
201
}
127
}
202
128
203
static const TypeInfo aspeed_wdt_info = {
129
static const TypeInfo aspeed_2400_scu_info = {
130
@@ -XXX,XX +XXX,XX @@ static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data)
131
asc->calc_hpll = aspeed_2500_scu_calc_hpll;
132
asc->apb_divider = 4;
133
asc->nr_regs = ASPEED_SCU_NR_REGS;
134
- asc->ops = &aspeed_scu_ops;
135
+ asc->ops = &aspeed_ast2500_scu_ops;
136
}
137
138
static const TypeInfo aspeed_2500_scu_info = {
204
--
139
--
205
2.7.4
140
2.20.1
206
141
207
142
diff view generated by jsdifflib
1
For embedded systems, notably ARM, one common use of ELF
1
From: Joel Stanley <joel@jms.id.au>
2
file segments is that the 'physical addresses' represent load addresses
3
and the 'virtual addresses' execution addresses, such that
4
the load addresses are packed into ROM or flash, and the
5
relocation and zero-initialization of data is done at runtime.
6
This means that the 'memsz' in the segment header represents
7
the runtime size of the segment, but the size that needs to
8
be loaded is only the 'filesz'. In particular, paddr+memsz
9
may overlap with the next segment to be loaded, as in this
10
example:
11
2
12
0x70000001 off 0x00007f68 vaddr 0x00008150 paddr 0x00008150 align 2**2
3
This returns a fixed but non-zero value for the chip id.
13
filesz 0x00000008 memsz 0x00000008 flags r--
14
LOAD off 0x000000f4 vaddr 0x00000000 paddr 0x00000000 align 2**2
15
filesz 0x00000124 memsz 0x00000124 flags r--
16
LOAD off 0x00000218 vaddr 0x00000400 paddr 0x00000400 align 2**3
17
filesz 0x00007d58 memsz 0x00007d58 flags r-x
18
LOAD off 0x00007f70 vaddr 0x20000140 paddr 0x00008158 align 2**3
19
filesz 0x00000a80 memsz 0x000022f8 flags rw-
20
LOAD off 0x000089f0 vaddr 0x20002438 paddr 0x00008bd8 align 2**0
21
filesz 0x00000000 memsz 0x00004000 flags rw-
22
LOAD off 0x000089f0 vaddr 0x20000000 paddr 0x20000000 align 2**0
23
filesz 0x00000000 memsz 0x00000140 flags rw-
24
4
25
where the segment at paddr 0x8158 has a memsz of 0x2258 and
5
Signed-off-by: Joel Stanley <joel@jms.id.au>
26
would overlap with the segment at paddr 0x8bd8 if QEMU's loader
6
Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
27
tried to honour it. (At runtime the segments will not overlap
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
28
since their vaddrs are more widely spaced than their paddrs.)
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20200121013302.43839-3-joel@jms.id.au
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/misc/aspeed_scu.c | 13 +++++++++++++
13
1 file changed, 13 insertions(+)
29
14
30
Currently if you try to load an ELF file like this with QEMU then
15
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
31
it will fail with an error "rom: requested regions overlap",
32
because we create a ROM image for each segment using the memsz
33
as the size.
34
35
Support ELF files using this scheme, by truncating the
36
zero-initialized part of the segment if it would overlap another
37
segment. This will retain the existing loader behaviour for
38
all ELF files we currently accept, and also accept ELF files
39
which only need 'filesz' bytes to be loaded.
40
41
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
42
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
43
Message-id: 1502116754-18867-2-git-send-email-peter.maydell@linaro.org
44
---
45
include/hw/elf_ops.h | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
46
1 file changed, 48 insertions(+)
47
48
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
49
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
50
--- a/include/hw/elf_ops.h
17
--- a/hw/misc/aspeed_scu.c
51
+++ b/include/hw/elf_ops.h
18
+++ b/hw/misc/aspeed_scu.c
52
@@ -XXX,XX +XXX,XX @@ static int glue(load_elf, SZ)(const char *name, int fd,
19
@@ -XXX,XX +XXX,XX @@
53
goto fail;
20
#define CPU2_BASE_SEG4 TO_REG(0x110)
54
}
21
#define CPU2_BASE_SEG5 TO_REG(0x114)
55
}
22
#define CPU2_CACHE_CTRL TO_REG(0x118)
23
+#define CHIP_ID0 TO_REG(0x150)
24
+#define CHIP_ID1 TO_REG(0x154)
25
#define UART_HPLL_CLK TO_REG(0x160)
26
#define PCIE_CTRL TO_REG(0x180)
27
#define BMC_MMIO_CTRL TO_REG(0x184)
28
@@ -XXX,XX +XXX,XX @@
29
#define AST2600_HW_STRAP2_PROT TO_REG(0x518)
30
#define AST2600_RNG_CTRL TO_REG(0x524)
31
#define AST2600_RNG_DATA TO_REG(0x540)
32
+#define AST2600_CHIP_ID0 TO_REG(0x5B0)
33
+#define AST2600_CHIP_ID1 TO_REG(0x5B4)
34
35
#define AST2600_CLK TO_REG(0x40)
36
37
@@ -XXX,XX +XXX,XX @@ static const uint32_t ast2500_a1_resets[ASPEED_SCU_NR_REGS] = {
38
[CPU2_BASE_SEG1] = 0x80000000U,
39
[CPU2_BASE_SEG4] = 0x1E600000U,
40
[CPU2_BASE_SEG5] = 0xC0000000U,
41
+ [CHIP_ID0] = 0x1234ABCDU,
42
+ [CHIP_ID1] = 0x88884444U,
43
[UART_HPLL_CLK] = 0x00001903U,
44
[PCIE_CTRL] = 0x0000007BU,
45
[BMC_DEV_ID] = 0x00002402U
46
@@ -XXX,XX +XXX,XX @@ static void aspeed_ast2500_scu_write(void *opaque, hwaddr offset,
47
case RNG_DATA:
48
case FREE_CNTR4:
49
case FREE_CNTR4_EXT:
50
+ case CHIP_ID0:
51
+ case CHIP_ID1:
52
qemu_log_mask(LOG_GUEST_ERROR,
53
"%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
54
__func__, offset);
55
@@ -XXX,XX +XXX,XX @@ static void aspeed_ast2600_scu_write(void *opaque, hwaddr offset,
56
case AST2600_RNG_DATA:
57
case AST2600_SILICON_REV:
58
case AST2600_SILICON_REV2:
59
+ case AST2600_CHIP_ID0:
60
+ case AST2600_CHIP_ID1:
61
/* Add read only registers here */
62
qemu_log_mask(LOG_GUEST_ERROR,
63
"%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
64
@@ -XXX,XX +XXX,XX @@ static const uint32_t ast2600_a0_resets[ASPEED_AST2600_SCU_NR_REGS] = {
65
[AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0,
66
[AST2600_SDRAM_HANDSHAKE] = 0x00000040, /* SoC completed DRAM init */
67
[AST2600_HPLL_PARAM] = 0x1000405F,
68
+ [AST2600_CHIP_ID0] = 0x1234ABCD,
69
+ [AST2600_CHIP_ID1] = 0x88884444,
56
+
70
+
57
+ /* The ELF spec is somewhat vague about the purpose of the
71
};
58
+ * physical address field. One common use in the embedded world
72
59
+ * is that physical address field specifies the load address
73
static void aspeed_ast2600_scu_reset(DeviceState *dev)
60
+ * and the virtual address field specifies the execution address.
61
+ * Segments are packed into ROM or flash, and the relocation
62
+ * and zero-initialization of data is done at runtime. This
63
+ * means that the memsz header represents the runtime size of the
64
+ * segment, but the filesz represents the loadtime size. If
65
+ * we try to honour the memsz value for an ELF file like this
66
+ * we will end up with overlapping segments (which the
67
+ * loader.c code will later reject).
68
+ * We support ELF files using this scheme by by checking whether
69
+ * paddr + memsz for this segment would overlap with any other
70
+ * segment. If so, then we assume it's using this scheme and
71
+ * truncate the loaded segment to the filesz size.
72
+ * If the segment considered as being memsz size doesn't overlap
73
+ * then we use memsz for the segment length, to handle ELF files
74
+ * which assume that the loader will do the zero-initialization.
75
+ */
76
+ if (mem_size > file_size) {
77
+ /* If this segment's zero-init portion overlaps another
78
+ * segment's data or zero-init portion, then truncate this one.
79
+ * Invalid ELF files where the segments overlap even when
80
+ * only file_size bytes are loaded will be rejected by
81
+ * the ROM overlap check in loader.c, so we don't try to
82
+ * explicitly detect those here.
83
+ */
84
+ int j;
85
+ elf_word zero_start = ph->p_paddr + file_size;
86
+ elf_word zero_end = ph->p_paddr + mem_size;
87
+
88
+ for (j = 0; j < ehdr.e_phnum; j++) {
89
+ struct elf_phdr *jph = &phdr[j];
90
+
91
+ if (i != j && jph->p_type == PT_LOAD) {
92
+ elf_word other_start = jph->p_paddr;
93
+ elf_word other_end = jph->p_paddr + jph->p_memsz;
94
+
95
+ if (!(other_start >= zero_end ||
96
+ zero_start >= other_end)) {
97
+ mem_size = file_size;
98
+ break;
99
+ }
100
+ }
101
+ }
102
+ }
103
+
104
/* address_offset is hack for kernel images that are
105
linked at the wrong physical address. */
106
if (translate_fn) {
107
--
74
--
108
2.7.4
75
2.20.1
109
76
110
77
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
Fix warning reported by Clang static code analyzer:
4
5
CC hw/misc/iotkit-secctl.o
6
hw/misc/iotkit-secctl.c:343:9: warning: Value stored to 'value' is never read
7
value &= 0x00f000f3;
8
^ ~~~~~~~~~~
9
10
Fixes: b3717c23e1c
11
Reported-by: Clang Static Analyzer
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20200217132922.24607-1-f4bug@amsat.org
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
---
16
---
7
hw/arm/armv7m.c | 4 ++--
17
hw/misc/iotkit-secctl.c | 2 +-
8
hw/arm/exynos4210.c | 4 ++--
18
1 file changed, 1 insertion(+), 1 deletion(-)
9
hw/arm/highbank.c | 11 +++++++----
10
hw/arm/realview.c | 6 ++++--
11
hw/arm/vexpress.c | 6 ++++--
12
hw/arm/xilinx_zynq.c | 14 ++++++++------
13
6 files changed, 27 insertions(+), 18 deletions(-)
14
19
15
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
20
diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/armv7m.c
22
--- a/hw/misc/iotkit-secctl.c
18
+++ b/hw/arm/armv7m.c
23
+++ b/hw/misc/iotkit-secctl.c
19
@@ -XXX,XX +XXX,XX @@ static void armv7m_instance_init(Object *obj)
24
@@ -XXX,XX +XXX,XX @@ static MemTxResult iotkit_secctl_s_write(void *opaque, hwaddr addr,
20
&error_abort);
25
qemu_set_irq(s->sec_resp_cfg, s->secrespcfg);
21
memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX);
22
23
- object_initialize(&s->nvic, sizeof(s->nvic), "armv7m_nvic");
24
+ object_initialize(&s->nvic, sizeof(s->nvic), TYPE_NVIC);
25
qdev_set_parent_bus(DEVICE(&s->nvic), sysbus_get_default());
26
object_property_add_alias(obj, "num-irq",
27
OBJECT(&s->nvic), "num-irq", &error_abort);
28
@@ -XXX,XX +XXX,XX @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
29
cpu_model = "cortex-m3";
30
}
31
32
- armv7m = qdev_create(NULL, "armv7m");
33
+ armv7m = qdev_create(NULL, TYPE_ARMV7M);
34
qdev_prop_set_uint32(armv7m, "num-irq", num_irq);
35
qdev_prop_set_string(armv7m, "cpu-model", cpu_model);
36
object_property_set_link(OBJECT(armv7m), OBJECT(get_system_memory()),
37
diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/hw/arm/exynos4210.c
40
+++ b/hw/arm/exynos4210.c
41
@@ -XXX,XX +XXX,XX @@
42
#include "hw/arm/arm.h"
43
#include "hw/loader.h"
44
#include "hw/arm/exynos4210.h"
45
-#include "hw/sd/sd.h"
46
+#include "hw/sd/sdhci.h"
47
#include "hw/usb/hcd-ehci.h"
48
49
#define EXYNOS4210_CHIPID_ADDR 0x10000000
50
@@ -XXX,XX +XXX,XX @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem)
51
BlockBackend *blk;
52
DriveInfo *di;
53
54
- dev = qdev_create(NULL, "generic-sdhci");
55
+ dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
56
qdev_prop_set_uint32(dev, "capareg", EXYNOS4210_SDHCI_CAPABILITIES);
57
qdev_init_nofail(dev);
58
59
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/hw/arm/highbank.c
62
+++ b/hw/arm/highbank.c
63
@@ -XXX,XX +XXX,XX @@
64
#include "exec/address-spaces.h"
65
#include "qemu/error-report.h"
66
#include "hw/char/pl011.h"
67
+#include "hw/ide/ahci.h"
68
+#include "hw/cpu/a9mpcore.h"
69
+#include "hw/cpu/a15mpcore.h"
70
71
#define SMP_BOOT_ADDR 0x100
72
#define SMP_BOOT_REG 0x40
73
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
74
busdev = SYS_BUS_DEVICE(dev);
75
sysbus_mmio_map(busdev, 0, 0xfff12000);
76
77
- dev = qdev_create(NULL, "a9mpcore_priv");
78
+ dev = qdev_create(NULL, TYPE_A9MPCORE_PRIV);
79
break;
26
break;
80
case CALXEDA_MIDWAY:
27
case A_SECPPCINTCLR:
81
- dev = qdev_create(NULL, "a15mpcore_priv");
28
- value &= 0x00f000f3;
82
+ dev = qdev_create(NULL, TYPE_A15MPCORE_PRIV);
29
+ s->secppcintstat &= ~(value & 0x00f000f3);
30
foreach_ppc(s, iotkit_secctl_ppc_update_irq_clear);
83
break;
31
break;
84
}
32
case A_SECPPCINTEN:
85
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
86
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
87
sysbus_connect_irq(busdev, 0, pic[18]);
88
pl011_create(0xfff36000, pic[20], serial_hds[0]);
89
90
- dev = qdev_create(NULL, "highbank-regs");
91
+ dev = qdev_create(NULL, TYPE_HIGHBANK_REGISTERS);
92
qdev_init_nofail(dev);
93
busdev = SYS_BUS_DEVICE(dev);
94
sysbus_mmio_map(busdev, 0, 0xfff3c000);
95
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
96
sysbus_create_simple("pl031", 0xfff35000, pic[19]);
97
sysbus_create_simple("pl022", 0xfff39000, pic[23]);
98
99
- sysbus_create_simple("sysbus-ahci", 0xffe08000, pic[83]);
100
+ sysbus_create_simple(TYPE_SYSBUS_AHCI, 0xffe08000, pic[83]);
101
102
if (nd_table[0].used) {
103
qemu_check_nic_model(&nd_table[0], "xgmac");
104
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/hw/arm/realview.c
107
+++ b/hw/arm/realview.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "exec/address-spaces.h"
110
#include "qemu/error-report.h"
111
#include "hw/char/pl011.h"
112
+#include "hw/cpu/a9mpcore.h"
113
+#include "hw/intc/realview_gic.h"
114
115
#define SMP_BOOT_ADDR 0xe0000000
116
#define SMP_BOOTREG_ADDR 0x10000030
117
@@ -XXX,XX +XXX,XX @@ static void realview_init(MachineState *machine,
118
sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000);
119
120
if (is_mpcore) {
121
- dev = qdev_create(NULL, is_pb ? "a9mpcore_priv": "realview_mpcore");
122
+ dev = qdev_create(NULL, is_pb ? TYPE_A9MPCORE_PRIV : "realview_mpcore");
123
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
124
qdev_init_nofail(dev);
125
busdev = SYS_BUS_DEVICE(dev);
126
@@ -XXX,XX +XXX,XX @@ static void realview_init(MachineState *machine,
127
} else {
128
uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000;
129
/* For now just create the nIRQ GIC, and ignore the others. */
130
- dev = sysbus_create_simple("realview_gic", gic_addr, cpu_irq[0]);
131
+ dev = sysbus_create_simple(TYPE_REALVIEW_GIC, gic_addr, cpu_irq[0]);
132
}
133
for (n = 0; n < 64; n++) {
134
pic[n] = qdev_get_gpio_in(dev, n);
135
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/hw/arm/vexpress.c
138
+++ b/hw/arm/vexpress.c
139
@@ -XXX,XX +XXX,XX @@
140
#include "qemu/error-report.h"
141
#include <libfdt.h>
142
#include "hw/char/pl011.h"
143
+#include "hw/cpu/a9mpcore.h"
144
+#include "hw/cpu/a15mpcore.h"
145
146
#define VEXPRESS_BOARD_ID 0x8e0
147
#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024)
148
@@ -XXX,XX +XXX,XX @@ static void a9_daughterboard_init(const VexpressMachineState *vms,
149
memory_region_add_subregion(sysmem, 0x60000000, ram);
150
151
/* 0x1e000000 A9MPCore (SCU) private memory region */
152
- init_cpus(cpu_model, "a9mpcore_priv", 0x1e000000, pic, vms->secure);
153
+ init_cpus(cpu_model, TYPE_A9MPCORE_PRIV, 0x1e000000, pic, vms->secure);
154
155
/* Daughterboard peripherals : 0x10020000 .. 0x20000000 */
156
157
@@ -XXX,XX +XXX,XX @@ static void a15_daughterboard_init(const VexpressMachineState *vms,
158
memory_region_add_subregion(sysmem, 0x80000000, ram);
159
160
/* 0x2c000000 A15MPCore private memory region (GIC) */
161
- init_cpus(cpu_model, "a15mpcore_priv", 0x2c000000, pic, vms->secure);
162
+ init_cpus(cpu_model, TYPE_A15MPCORE_PRIV, 0x2c000000, pic, vms->secure);
163
164
/* A15 daughterboard peripherals: */
165
166
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/hw/arm/xilinx_zynq.c
169
+++ b/hw/arm/xilinx_zynq.c
170
@@ -XXX,XX +XXX,XX @@
171
#include "hw/misc/zynq-xadc.h"
172
#include "hw/ssi/ssi.h"
173
#include "qemu/error-report.h"
174
-#include "hw/sd/sd.h"
175
+#include "hw/sd/sdhci.h"
176
#include "hw/char/cadence_uart.h"
177
+#include "hw/net/cadence_gem.h"
178
+#include "hw/cpu/a9mpcore.h"
179
180
#define NUM_SPI_FLASHES 4
181
#define NUM_QSPI_FLASHES 2
182
@@ -XXX,XX +XXX,XX @@ static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq)
183
DeviceState *dev;
184
SysBusDevice *s;
185
186
- dev = qdev_create(NULL, "cadence_gem");
187
+ dev = qdev_create(NULL, TYPE_CADENCE_GEM);
188
if (nd->used) {
189
- qemu_check_nic_model(nd, "cadence_gem");
190
+ qemu_check_nic_model(nd, TYPE_CADENCE_GEM);
191
qdev_set_nic_properties(dev, nd);
192
}
193
qdev_init_nofail(dev);
194
@@ -XXX,XX +XXX,XX @@ static void zynq_init(MachineState *machine)
195
qdev_init_nofail(dev);
196
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000);
197
198
- dev = qdev_create(NULL, "a9mpcore_priv");
199
+ dev = qdev_create(NULL, TYPE_A9MPCORE_PRIV);
200
qdev_prop_set_uint32(dev, "num-cpu", 1);
201
qdev_init_nofail(dev);
202
busdev = SYS_BUS_DEVICE(dev);
203
@@ -XXX,XX +XXX,XX @@ static void zynq_init(MachineState *machine)
204
gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]);
205
gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]);
206
207
- dev = qdev_create(NULL, "generic-sdhci");
208
+ dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
209
qdev_init_nofail(dev);
210
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000);
211
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]);
212
@@ -XXX,XX +XXX,XX @@ static void zynq_init(MachineState *machine)
213
qdev_prop_set_drive(carddev, "drive", blk, &error_fatal);
214
object_property_set_bool(OBJECT(carddev), true, "realized", &error_fatal);
215
216
- dev = qdev_create(NULL, "generic-sdhci");
217
+ dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
218
qdev_init_nofail(dev);
219
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000);
220
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]);
221
--
33
--
222
2.7.4
34
2.20.1
223
35
224
36
diff view generated by jsdifflib
New patch
1
From: Guenter Roeck <linux@roeck-us.net>
1
2
3
Up to now, the mainstone machine only boots if two flash images are
4
provided. This is not really necessary; the machine can boot from initrd
5
or from SD without it. At the same time, having to provide dummy flash
6
images is a nuisance and does not add any real value. Make it optional.
7
8
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20200217210824.18513-1-linux@roeck-us.net
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/arm/mainstone.c | 11 +----------
14
1 file changed, 1 insertion(+), 10 deletions(-)
15
16
diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/mainstone.c
19
+++ b/hw/arm/mainstone.c
20
@@ -XXX,XX +XXX,XX @@ static void mainstone_common_init(MemoryRegion *address_space_mem,
21
/* There are two 32MiB flash devices on the board */
22
for (i = 0; i < 2; i ++) {
23
dinfo = drive_get(IF_PFLASH, 0, i);
24
- if (!dinfo) {
25
- if (qtest_enabled()) {
26
- break;
27
- }
28
- error_report("Two flash images must be given with the "
29
- "'pflash' parameter");
30
- exit(1);
31
- }
32
-
33
if (!pflash_cfi01_register(mainstone_flash_base[i],
34
i ? "mainstone.flash1" : "mainstone.flash0",
35
MAINSTONE_FLASH,
36
- blk_by_legacy_dinfo(dinfo),
37
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
38
sector_len, 4, 0, 0, 0, 0, be)) {
39
error_report("Error registering flash memory");
40
exit(1);
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
New patch
1
From: Guenter Roeck <linux@roeck-us.net>
1
2
3
Up to now, the z2 machine only boots if a flash image is provided.
4
This is not really necessary; the machine can boot from initrd or from
5
SD without it. At the same time, having to provide dummy flash images
6
is a nuisance and does not add any real value. Make it optional.
7
8
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20200217210903.18602-1-linux@roeck-us.net
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/arm/z2.c | 6 ------
14
1 file changed, 6 deletions(-)
15
16
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/z2.c
19
+++ b/hw/arm/z2.c
20
@@ -XXX,XX +XXX,XX @@ static void z2_init(MachineState *machine)
21
be = 0;
22
#endif
23
dinfo = drive_get(IF_PFLASH, 0, 0);
24
- if (!dinfo && !qtest_enabled()) {
25
- error_report("Flash image must be given with the "
26
- "'pflash' parameter");
27
- exit(1);
28
- }
29
-
30
if (!pflash_cfi01_register(Z2_FLASH_BASE, "z2.flash0", Z2_FLASH_SIZE,
31
dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
32
sector_len, 4, 0, 0, 0, 0, be)) {
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Writes to AdvSIMD registers flush the bits above 128.
4
5
Buglink: https://bugs.launchpad.net/bugs/1863247
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200214194643.23317-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-a64.c | 1 +
12
1 file changed, 1 insertion(+)
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_simd_ext(DisasContext *s, uint32_t insn)
19
tcg_temp_free_i64(tcg_resl);
20
write_vec_element(s, tcg_resh, rd, 1, MO_64);
21
tcg_temp_free_i64(tcg_resh);
22
+ clear_vec_high(s, true, rd);
23
}
24
25
/* TBL/TBX
26
--
27
2.20.1
28
29
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Writes to AdvSIMD registers flush the bits above 128.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200214194643.23317-3-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate-a64.c | 1 +
11
1 file changed, 1 insertion(+)
12
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
16
+++ b/target/arm/translate-a64.c
17
@@ -XXX,XX +XXX,XX @@ static void disas_simd_tb(DisasContext *s, uint32_t insn)
18
tcg_temp_free_i64(tcg_resl);
19
write_vec_element(s, tcg_resh, rd, 1, MO_64);
20
tcg_temp_free_i64(tcg_resh);
21
+ clear_vec_high(s, true, rd);
22
}
23
24
/* ZIP/UZP/TRN
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Writes to AdvSIMD registers flush the bits above 128.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200214194643.23317-4-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate-a64.c | 1 +
11
1 file changed, 1 insertion(+)
12
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
16
+++ b/target/arm/translate-a64.c
17
@@ -XXX,XX +XXX,XX @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
18
tcg_temp_free_i64(tcg_resl);
19
write_vec_element(s, tcg_resh, rd, 1, MO_64);
20
tcg_temp_free_i64(tcg_resh);
21
+ clear_vec_high(s, true, rd);
22
}
23
24
/*
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For "ldp x0, x1, [x0]", if the second load is on a second page and
3
Writes to AdvSIMD registers flush the bits above 128.
4
the second page is unmapped, the exception would be raised with x0
5
already modified. This means the instruction couldn't be restarted.
6
4
7
Cc: qemu-arm@nongnu.org
8
Cc: qemu-stable@nongnu.org
9
Reported-by: Andrew <andrew@fubar.geek.nz>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20170825224833.4463-1-richard.henderson@linaro.org
6
Message-id: 20200214194643.23317-5-richard.henderson@linaro.org
12
Fixes: https://bugs.launchpad.net/qemu/+bug/1713066
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
[PMM: tweaked comment format]
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
9
---
18
target/arm/translate-a64.c | 29 +++++++++++++++++------------
10
target/arm/translate-a64.c | 6 ++++++
19
1 file changed, 17 insertions(+), 12 deletions(-)
11
1 file changed, 6 insertions(+)
20
12
21
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/translate-a64.c
15
--- a/target/arm/translate-a64.c
24
+++ b/target/arm/translate-a64.c
16
+++ b/target/arm/translate-a64.c
25
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ static void handle_simd_inse(DisasContext *s, int rd, int rn,
26
} else {
18
write_vec_element(s, tmp, rd, dst_index, size);
27
do_fp_st(s, rt, tcg_addr, size);
19
28
}
20
tcg_temp_free_i64(tmp);
29
- } else {
30
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
31
- if (is_load) {
32
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
33
- false, 0, false, false);
34
- } else {
35
- do_gpr_st(s, tcg_rt, tcg_addr, size,
36
- false, 0, false, false);
37
- }
38
- }
39
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
40
- if (is_vector) {
41
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
42
if (is_load) {
43
do_fp_ld(s, rt2, tcg_addr, size);
44
} else {
45
do_fp_st(s, rt2, tcg_addr, size);
46
}
47
} else {
48
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
49
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
50
+
21
+
51
if (is_load) {
22
+ /* INS is considered a 128-bit write for SVE. */
52
+ TCGv_i64 tmp = tcg_temp_new_i64();
23
+ clear_vec_high(s, true, rd);
24
}
25
26
27
@@ -XXX,XX +XXX,XX @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
28
29
idx = extract32(imm5, 1 + size, 4 - size);
30
write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
53
+
31
+
54
+ /* Do not modify tcg_rt before recognizing any exception
32
+ /* INS is considered a 128-bit write for SVE. */
55
+ * from the second load.
33
+ clear_vec_high(s, true, rd);
56
+ */
34
}
57
+ do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
35
58
+ false, 0, false, false);
36
/*
59
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
60
do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
61
false, 0, false, false);
62
+
63
+ tcg_gen_mov_i64(tcg_rt, tmp);
64
+ tcg_temp_free_i64(tmp);
65
} else {
66
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
67
+ false, 0, false, false);
68
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
69
do_gpr_st(s, tcg_rt2, tcg_addr, size,
70
false, 0, false, false);
71
}
72
--
37
--
73
2.7.4
38
2.20.1
74
39
75
40
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The psuedocode in aarch64/functions/pac/auth/Auth and
4
aarch64/functions/pac/strip/Strip always uses bit 55 for
5
extfield and do not consider if the current regime has 2 ranges.
6
7
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20200216194343.21331-2-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/pauth_helper.c | 3 ++-
14
1 file changed, 2 insertions(+), 1 deletion(-)
15
16
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/pauth_helper.c
19
+++ b/target/arm/pauth_helper.c
20
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
21
22
static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
23
{
24
- uint64_t extfield = -param.select;
25
+ /* Note that bit 55 is used whether or not the regime has 2 ranges. */
26
+ uint64_t extfield = sextract64(ptr, 55, 1);
27
int bot_pac_bit = 64 - param.tsz;
28
int top_pac_bit = 64 - 8 * param.tbi;
29
30
--
31
2.20.1
32
33
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Select should always be 0 for a regime with one range.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200216194343.21331-3-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper.c | 46 +++++++++++++++++++++++----------------------
11
1 file changed, 24 insertions(+), 22 deletions(-)
12
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
18
bool tbi, tbid, epd, hpd, using16k, using64k;
19
int select, tsz;
20
21
- /*
22
- * Bit 55 is always between the two regions, and is canonical for
23
- * determining if address tagging is enabled.
24
- */
25
- select = extract64(va, 55, 1);
26
-
27
if (!regime_has_2_ranges(mmu_idx)) {
28
+ select = 0;
29
tsz = extract32(tcr, 0, 6);
30
using64k = extract32(tcr, 14, 1);
31
using16k = extract32(tcr, 15, 1);
32
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
33
tbid = extract32(tcr, 29, 1);
34
}
35
epd = false;
36
- } else if (!select) {
37
- tsz = extract32(tcr, 0, 6);
38
- epd = extract32(tcr, 7, 1);
39
- using64k = extract32(tcr, 14, 1);
40
- using16k = extract32(tcr, 15, 1);
41
- tbi = extract64(tcr, 37, 1);
42
- hpd = extract64(tcr, 41, 1);
43
- tbid = extract64(tcr, 51, 1);
44
} else {
45
- int tg = extract32(tcr, 30, 2);
46
- using16k = tg == 1;
47
- using64k = tg == 3;
48
- tsz = extract32(tcr, 16, 6);
49
- epd = extract32(tcr, 23, 1);
50
- tbi = extract64(tcr, 38, 1);
51
- hpd = extract64(tcr, 42, 1);
52
- tbid = extract64(tcr, 52, 1);
53
+ /*
54
+ * Bit 55 is always between the two regions, and is canonical for
55
+ * determining if address tagging is enabled.
56
+ */
57
+ select = extract64(va, 55, 1);
58
+ if (!select) {
59
+ tsz = extract32(tcr, 0, 6);
60
+ epd = extract32(tcr, 7, 1);
61
+ using64k = extract32(tcr, 14, 1);
62
+ using16k = extract32(tcr, 15, 1);
63
+ tbi = extract64(tcr, 37, 1);
64
+ hpd = extract64(tcr, 41, 1);
65
+ tbid = extract64(tcr, 51, 1);
66
+ } else {
67
+ int tg = extract32(tcr, 30, 2);
68
+ using16k = tg == 1;
69
+ using64k = tg == 3;
70
+ tsz = extract32(tcr, 16, 6);
71
+ epd = extract32(tcr, 23, 1);
72
+ tbi = extract64(tcr, 38, 1);
73
+ hpd = extract64(tcr, 42, 1);
74
+ tbid = extract64(tcr, 52, 1);
75
+ }
76
}
77
tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */
78
tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
79
--
80
2.20.1
81
82
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Now that aa64_va_parameters_both sets select based on the number
4
of ranges in the regime, the ttbr1_valid check is redundant.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200216194343.21331-4-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 6 +-----
12
1 file changed, 1 insertion(+), 5 deletions(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
19
TCR *tcr = regime_tcr(env, mmu_idx);
20
int ap, ns, xn, pxn;
21
uint32_t el = regime_el(env, mmu_idx);
22
- bool ttbr1_valid;
23
uint64_t descaddrmask;
24
bool aarch64 = arm_el_is_aa64(env, el);
25
bool guarded = false;
26
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
27
param = aa64_va_parameters(env, address, mmu_idx,
28
access_type != MMU_INST_FETCH);
29
level = 0;
30
- ttbr1_valid = regime_has_2_ranges(mmu_idx);
31
addrsize = 64 - 8 * param.tbi;
32
inputsize = 64 - param.tsz;
33
} else {
34
param = aa32_va_parameters(env, address, mmu_idx);
35
level = 1;
36
- /* There is no TTBR1 for EL2 */
37
- ttbr1_valid = (el != 2);
38
addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
39
inputsize = addrsize - param.tsz;
40
}
41
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
42
if (inputsize < addrsize) {
43
target_ulong top_bits = sextract64(address, inputsize,
44
addrsize - inputsize);
45
- if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
46
+ if (-top_bits != param.select) {
47
/* The gap between the two regions is a Translation fault */
48
fault_type = ARMFault_Translation;
49
goto do_fault;
50
--
51
2.20.1
52
53
diff view generated by jsdifflib
1
For external aborts, we will want to be able to specify the EA
1
From: Richard Henderson <richard.henderson@linaro.org>
2
(external abort type) bit in the syndrome field. Allow callers of
3
deliver_fault() to do that by adding a field to ARMMMUFaultInfo which
4
we use when constructing the syndrome values.
5
2
3
For the purpose of rebuild_hflags_a64, we do not need to compute
4
all of the va parameters, only tbi. Moreover, we can compute them
5
in a form that is more useful to storing in hflags.
6
7
This eliminates the need for aa64_va_parameter_both, so fold that
8
in to aa64_va_parameter. The remaining calls to aa64_va_parameter
9
are in get_phys_addr_lpae and in pauth_helper.c.
10
11
This reduces the total cpu consumption of aa64_va_parameter in a
12
kernel boot plus a kvm guest kernel boot from 3% to 0.5%.
13
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20200216194343.21331-5-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
---
18
---
10
target/arm/internals.h | 2 ++
19
target/arm/internals.h | 3 --
11
target/arm/op_helper.c | 10 +++++-----
20
target/arm/helper.c | 68 +++++++++++++++++++++++-------------------
12
2 files changed, 7 insertions(+), 5 deletions(-)
21
2 files changed, 37 insertions(+), 34 deletions(-)
13
22
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
23
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/internals.h
25
--- a/target/arm/internals.h
17
+++ b/target/arm/internals.h
26
+++ b/target/arm/internals.h
18
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu);
27
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
19
* @s2addr: Address that caused a fault at stage 2
28
unsigned tsz : 8;
20
* @stage2: True if we faulted at stage 2
29
unsigned select : 1;
21
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
30
bool tbi : 1;
22
+ * @ea: True if we should set the EA (external abort type) bit in syndrome
31
- bool tbid : 1;
23
*/
32
bool epd : 1;
24
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
33
bool hpd : 1;
25
struct ARMMMUFaultInfo {
34
bool using16k : 1;
26
target_ulong s2addr;
35
bool using64k : 1;
27
bool stage2;
36
} ARMVAParameters;
28
bool s1ptw;
37
29
+ bool ea;
38
-ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
30
};
39
- ARMMMUIdx mmu_idx);
31
40
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
32
/* Do a page table walk and add page to TLB if possible */
41
ARMMMUIdx mmu_idx, bool data);
33
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
42
43
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/op_helper.c
45
--- a/target/arm/helper.c
36
+++ b/target/arm/op_helper.c
46
+++ b/target/arm/helper.c
37
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
47
@@ -XXX,XX +XXX,XX @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
38
48
}
39
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
49
#endif /* !CONFIG_USER_ONLY */
40
unsigned int target_el,
50
41
- bool same_el,
51
-ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
42
+ bool same_el, bool ea,
52
- ARMMMUIdx mmu_idx)
43
bool s1ptw, bool is_write,
53
+static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
44
int fsc)
54
+{
55
+ if (regime_has_2_ranges(mmu_idx)) {
56
+ return extract64(tcr, 37, 2);
57
+ } else if (mmu_idx == ARMMMUIdx_Stage2) {
58
+ return 0; /* VTCR_EL2 */
59
+ } else {
60
+ return extract32(tcr, 20, 1);
61
+ }
62
+}
63
+
64
+static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
65
+{
66
+ if (regime_has_2_ranges(mmu_idx)) {
67
+ return extract64(tcr, 51, 2);
68
+ } else if (mmu_idx == ARMMMUIdx_Stage2) {
69
+ return 0; /* VTCR_EL2 */
70
+ } else {
71
+ return extract32(tcr, 29, 1);
72
+ }
73
+}
74
+
75
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
76
+ ARMMMUIdx mmu_idx, bool data)
45
{
77
{
46
@@ -XXX,XX +XXX,XX @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
78
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
47
*/
79
- bool tbi, tbid, epd, hpd, using16k, using64k;
48
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
80
- int select, tsz;
49
syn = syn_data_abort_no_iss(same_el,
81
+ bool epd, hpd, using16k, using64k;
50
- 0, 0, s1ptw, is_write, fsc);
82
+ int select, tsz, tbi;
51
+ ea, 0, s1ptw, is_write, fsc);
83
84
if (!regime_has_2_ranges(mmu_idx)) {
85
select = 0;
86
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
87
using16k = extract32(tcr, 15, 1);
88
if (mmu_idx == ARMMMUIdx_Stage2) {
89
/* VTCR_EL2 */
90
- tbi = tbid = hpd = false;
91
+ hpd = false;
92
} else {
93
- tbi = extract32(tcr, 20, 1);
94
hpd = extract32(tcr, 24, 1);
95
- tbid = extract32(tcr, 29, 1);
96
}
97
epd = false;
52
} else {
98
} else {
53
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
99
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
54
* syndrome created at translation time.
100
epd = extract32(tcr, 7, 1);
55
@@ -XXX,XX +XXX,XX @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
101
using64k = extract32(tcr, 14, 1);
56
*/
102
using16k = extract32(tcr, 15, 1);
57
syn = syn_data_abort_with_iss(same_el,
103
- tbi = extract64(tcr, 37, 1);
58
0, 0, 0, 0, 0,
104
hpd = extract64(tcr, 41, 1);
59
- 0, 0, s1ptw, is_write, fsc,
105
- tbid = extract64(tcr, 51, 1);
60
+ ea, 0, s1ptw, is_write, fsc,
106
} else {
61
false);
107
int tg = extract32(tcr, 30, 2);
62
/* Merge the runtime syndrome with the template syndrome. */
108
using16k = tg == 1;
63
syn |= template_syn;
109
using64k = tg == 3;
64
@@ -XXX,XX +XXX,XX @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
110
tsz = extract32(tcr, 16, 6);
111
epd = extract32(tcr, 23, 1);
112
- tbi = extract64(tcr, 38, 1);
113
hpd = extract64(tcr, 42, 1);
114
- tbid = extract64(tcr, 52, 1);
115
}
65
}
116
}
66
117
tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */
67
if (access_type == MMU_INST_FETCH) {
118
tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
68
- syn = syn_insn_abort(same_el, 0, fi->s1ptw, fsc);
119
69
+ syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
120
+ /* Present TBI as a composite with TBID. */
70
exc = EXCP_PREFETCH_ABORT;
121
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
71
} else {
122
+ if (!data) {
72
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
123
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
73
- same_el, fi->s1ptw,
124
+ }
74
+ same_el, fi->ea, fi->s1ptw,
125
+ tbi = (tbi >> select) & 1;
75
access_type == MMU_DATA_STORE,
126
+
76
fsc);
127
return (ARMVAParameters) {
77
if (access_type == MMU_DATA_STORE
128
.tsz = tsz,
129
.select = select,
130
.tbi = tbi,
131
- .tbid = tbid,
132
.epd = epd,
133
.hpd = hpd,
134
.using16k = using16k,
135
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
136
};
137
}
138
139
-ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
140
- ARMMMUIdx mmu_idx, bool data)
141
-{
142
- ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
143
-
144
- /* Present TBI as a composite with TBID. */
145
- ret.tbi &= (data || !ret.tbid);
146
- return ret;
147
-}
148
-
149
#ifndef CONFIG_USER_ONLY
150
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
151
ARMMMUIdx mmu_idx)
152
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
153
{
154
uint32_t flags = rebuild_hflags_aprofile(env);
155
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
156
- ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
157
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
158
uint64_t sctlr;
159
int tbii, tbid;
160
161
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
162
163
/* Get control bits for tagged addresses. */
164
- if (regime_has_2_ranges(mmu_idx)) {
165
- ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
166
- tbid = (p1.tbi << 1) | p0.tbi;
167
- tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
168
- } else {
169
- tbid = p0.tbi;
170
- tbii = tbid & !p0.tbid;
171
- }
172
+ tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
173
+ tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
174
175
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
176
flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
78
--
177
--
79
2.7.4
178
2.20.1
80
179
81
180
diff view generated by jsdifflib
1
In the ARM get_phys_addr() code, switch to using the MMUAccessType
1
Enforce a convention that an isar_feature function that tests a
2
enum and its MMU_* values rather than int and literal 0/1/2.
2
32-bit ID register always has _aa32_ in its name, and one that
3
tests a 64-bit ID register always has _aa64_ in its name.
4
We already follow this except for three cases: thumb_div,
5
arm_div and jazelle, which all need _aa32_ adding.
3
6
7
(As noted in the comment, isar_feature_aa32_fp16_arith()
8
is an exception in that it currently tests ID_AA64PFR0_EL1,
9
but will switch to MVFR1 once we've properly implemented
10
FP16 for AArch32.)
11
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
15
Message-id: 20200214175116.9164-2-peter.maydell@linaro.org
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1501692241-23310-2-git-send-email-peter.maydell@linaro.org
9
---
16
---
10
target/arm/internals.h | 3 ++-
17
target/arm/cpu.h | 13 ++++++++++---
11
target/arm/helper.c | 30 +++++++++++++++---------------
18
target/arm/internals.h | 2 +-
12
2 files changed, 17 insertions(+), 16 deletions(-)
19
linux-user/elfload.c | 4 ++--
20
target/arm/cpu.c | 6 ++++--
21
target/arm/helper.c | 2 +-
22
target/arm/translate.c | 6 +++---
23
6 files changed, 21 insertions(+), 12 deletions(-)
13
24
25
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu.h
28
+++ b/target/arm/cpu.h
29
@@ -XXX,XX +XXX,XX @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
30
/* Shared between translate-sve.c and sve_helper.c. */
31
extern const uint64_t pred_esz_masks[4];
32
33
+/*
34
+ * Naming convention for isar_feature functions:
35
+ * Functions which test 32-bit ID registers should have _aa32_ in
36
+ * their name. Functions which test 64-bit ID registers should have
37
+ * _aa64_ in their name.
38
+ */
39
+
40
/*
41
* 32-bit feature tests via id registers.
42
*/
43
-static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
44
+static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id)
45
{
46
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
47
}
48
49
-static inline bool isar_feature_arm_div(const ARMISARegisters *id)
50
+static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id)
51
{
52
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
53
}
54
55
-static inline bool isar_feature_jazelle(const ARMISARegisters *id)
56
+static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id)
57
{
58
return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
59
}
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
60
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
index XXXXXXX..XXXXXXX 100644
61
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/internals.h
62
--- a/target/arm/internals.h
17
+++ b/target/arm/internals.h
63
+++ b/target/arm/internals.h
18
@@ -XXX,XX +XXX,XX @@ struct ARMMMUFaultInfo {
64
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
19
};
65
if ((features >> ARM_FEATURE_THUMB2) & 1) {
20
66
valid |= CPSR_IT;
21
/* Do a page table walk and add page to TLB if possible */
67
}
22
-bool arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx,
68
- if (isar_feature_jazelle(id)) {
23
+bool arm_tlb_fill(CPUState *cpu, vaddr address,
69
+ if (isar_feature_aa32_jazelle(id)) {
24
+ MMUAccessType access_type, int mmu_idx,
70
valid |= CPSR_J;
25
uint32_t *fsr, ARMMMUFaultInfo *fi);
71
}
26
72
if (isar_feature_aa32_pan(id)) {
27
/* Return true if the stage 1 translation regime is using LPAE format page
73
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/linux-user/elfload.c
76
+++ b/linux-user/elfload.c
77
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
78
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
79
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
80
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
81
- GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
82
- GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
83
+ GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
84
+ GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
85
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
86
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
87
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
88
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/cpu.c
91
+++ b/target/arm/cpu.c
92
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
93
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
94
* Security Extensions is ARM_FEATURE_EL3.
95
*/
96
- assert(!tcg_enabled() || no_aa32 || cpu_isar_feature(arm_div, cpu));
97
+ assert(!tcg_enabled() || no_aa32 ||
98
+ cpu_isar_feature(aa32_arm_div, cpu));
99
set_feature(env, ARM_FEATURE_LPAE);
100
set_feature(env, ARM_FEATURE_V7);
101
}
102
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
103
if (arm_feature(env, ARM_FEATURE_V6)) {
104
set_feature(env, ARM_FEATURE_V5);
105
if (!arm_feature(env, ARM_FEATURE_M)) {
106
- assert(!tcg_enabled() || no_aa32 || cpu_isar_feature(jazelle, cpu));
107
+ assert(!tcg_enabled() || no_aa32 ||
108
+ cpu_isar_feature(aa32_jazelle, cpu));
109
set_feature(env, ARM_FEATURE_AUXCR);
110
}
111
}
28
diff --git a/target/arm/helper.c b/target/arm/helper.c
112
diff --git a/target/arm/helper.c b/target/arm/helper.c
29
index XXXXXXX..XXXXXXX 100644
113
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/helper.c
114
--- a/target/arm/helper.c
31
+++ b/target/arm/helper.c
115
+++ b/target/arm/helper.c
116
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
117
if (arm_feature(env, ARM_FEATURE_LPAE)) {
118
define_arm_cp_regs(cpu, lpae_cp_reginfo);
119
}
120
- if (cpu_isar_feature(jazelle, cpu)) {
121
+ if (cpu_isar_feature(aa32_jazelle, cpu)) {
122
define_arm_cp_regs(cpu, jazelle_regs);
123
}
124
/* Slightly awkwardly, the OMAP and StrongARM cores need all of
125
diff --git a/target/arm/translate.c b/target/arm/translate.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/translate.c
128
+++ b/target/arm/translate.c
32
@@ -XXX,XX +XXX,XX @@
129
@@ -XXX,XX +XXX,XX @@
33
130
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
34
#ifndef CONFIG_USER_ONLY
131
/* currently all emulated v5 cores are also v5TE, so don't bother */
35
static bool get_phys_addr(CPUARMState *env, target_ulong address,
132
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
36
- int access_type, ARMMMUIdx mmu_idx,
133
-#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
37
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
134
+#define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
38
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
135
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
39
target_ulong *page_size, uint32_t *fsr,
136
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
40
ARMMMUFaultInfo *fi);
137
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
41
138
@@ -XXX,XX +XXX,XX @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u)
42
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
139
TCGv_i32 t1, t2;
43
- int access_type, ARMMMUIdx mmu_idx,
140
44
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
141
if (s->thumb
45
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
142
- ? !dc_isar_feature(thumb_div, s)
46
target_ulong *page_size_ptr, uint32_t *fsr,
143
- : !dc_isar_feature(arm_div, s)) {
47
ARMMMUFaultInfo *fi);
144
+ ? !dc_isar_feature(aa32_thumb_div, s)
48
@@ -XXX,XX +XXX,XX @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
145
+ : !dc_isar_feature(aa32_arm_div, s)) {
49
}
146
return false;
50
51
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
52
- int access_type, ARMMMUIdx mmu_idx)
53
+ MMUAccessType access_type, ARMMMUIdx mmu_idx)
54
{
55
hwaddr phys_addr;
56
target_ulong page_size;
57
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
58
59
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
60
{
61
- int access_type = ri->opc2 & 1;
62
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
63
uint64_t par64;
64
ARMMMUIdx mmu_idx;
65
int el = arm_current_el(env);
66
@@ -XXX,XX +XXX,XX @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
67
static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
68
uint64_t value)
69
{
70
- int access_type = ri->opc2 & 1;
71
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
72
uint64_t par64;
73
74
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
75
@@ -XXX,XX +XXX,XX @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
76
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
77
uint64_t value)
78
{
79
- int access_type = ri->opc2 & 1;
80
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
81
ARMMMUIdx mmu_idx;
82
int secure = arm_is_secure_below_el3(env);
83
84
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
85
}
86
87
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
88
- int access_type, ARMMMUIdx mmu_idx,
89
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
90
hwaddr *phys_ptr, int *prot,
91
target_ulong *page_size, uint32_t *fsr,
92
ARMMMUFaultInfo *fi)
93
@@ -XXX,XX +XXX,XX @@ do_fault:
94
}
95
96
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
97
- int access_type, ARMMMUIdx mmu_idx,
98
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
99
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
100
target_ulong *page_size, uint32_t *fsr,
101
ARMMMUFaultInfo *fi)
102
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
103
if (pxn && !regime_is_user(env, mmu_idx)) {
104
xn = 1;
105
}
106
- if (xn && access_type == 2)
107
+ if (xn && access_type == MMU_INST_FETCH)
108
goto do_fault;
109
110
if (arm_feature(env, ARM_FEATURE_V6K) &&
111
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
112
}
113
114
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
115
- int access_type, ARMMMUIdx mmu_idx,
116
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
117
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
118
target_ulong *page_size_ptr, uint32_t *fsr,
119
ARMMMUFaultInfo *fi)
120
@@ -XXX,XX +XXX,XX @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
121
}
122
123
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
124
- int access_type, ARMMMUIdx mmu_idx,
125
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
126
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
127
{
128
ARMCPU *cpu = arm_env_get_cpu(env);
129
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
130
}
131
132
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
133
- int access_type, ARMMMUIdx mmu_idx,
134
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
135
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
136
{
137
int n;
138
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
139
return true;
140
}
147
}
141
148
142
- if (access_type == 2) {
143
+ if (access_type == MMU_INST_FETCH) {
144
mask = env->cp15.pmsav5_insn_ap;
145
} else {
146
mask = env->cp15.pmsav5_data_ap;
147
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
148
* @fsr: set to the DFSR/IFSR value on failure
149
*/
150
static bool get_phys_addr(CPUARMState *env, target_ulong address,
151
- int access_type, ARMMMUIdx mmu_idx,
152
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
153
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
154
target_ulong *page_size, uint32_t *fsr,
155
ARMMMUFaultInfo *fi)
156
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
157
* fsr with ARM DFSR/IFSR fault register format value on failure.
158
*/
159
bool arm_tlb_fill(CPUState *cs, vaddr address,
160
- int access_type, int mmu_idx, uint32_t *fsr,
161
+ MMUAccessType access_type, int mmu_idx, uint32_t *fsr,
162
ARMMMUFaultInfo *fi)
163
{
164
ARMCPU *cpu = ARM_CPU(cs);
165
--
149
--
166
2.7.4
150
2.20.1
167
151
168
152
diff view generated by jsdifflib
1
When we switched our handling of exception exit to detect
1
In take_aarch32_exception(), we know we are dealing with a CPU that
2
the magic addresses at translate time rather than via
2
has AArch32, so the right isar_feature test is aa32_pan, not aa64_pan.
3
a do_unassigned_access hook, we forgot to update a
4
comment; correct the omission.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 1501692241-23310-8-git-send-email-peter.maydell@linaro.org
6
Message-id: 20200214175116.9164-3-peter.maydell@linaro.org
10
---
7
---
11
target/arm/helper.c | 2 +-
8
target/arm/helper.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
9
1 file changed, 1 insertion(+), 1 deletion(-)
13
10
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
11
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
13
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
14
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
15
@@ -XXX,XX +XXX,XX @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
19
bool rettobase = false;
16
env->elr_el[2] = env->regs[15];
20
17
} else {
21
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
18
/* CPSR.PAN is normally preserved preserved unless... */
22
- * arm_v7m_do_unassigned_access() enforces the architectural rule
19
- if (cpu_isar_feature(aa64_pan, env_archcpu(env))) {
23
+ * gen_bx_excret() enforces the architectural rule
20
+ if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
24
* that jumps to magic addresses don't have magic behaviour unless
21
switch (new_el) {
25
* we're in Handler mode (compare pseudocode BXWritePC()).
22
case 3:
26
*/
23
if (!arm_is_secure_below_el3(env)) {
27
--
24
--
28
2.7.4
25
2.20.1
29
26
30
27
diff view generated by jsdifflib
1
The ARMv7M architecture specifies that most of the addresses in the
1
Our current usage of the isar_feature feature tests almost always
2
PPB region (which includes the NVIC, systick and system registers)
2
uses an _aa32_ test when the code path is known to be AArch32
3
are not accessible to unprivileged accesses, which should
3
specific and an _aa64_ test when the code path is known to be
4
BusFault with a few exceptions:
4
AArch64 specific. There is just one exception: in the vfp_set_fpscr
5
* the STIR is configurably user-accessible
5
helper we check aa64_fp16 to determine whether the FZ16 bit in
6
* the ITM (which we don't implement at all) is always
6
the FP(S)CR exists, but this code is also used for AArch32.
7
user-accessible
7
There are other places in future where we're likely to want
8
a general "does this feature exist for either AArch32 or
9
AArch64" check (typically where architecturally the feature exists
10
for both CPU states if it exists at all, but the CPU might be
11
AArch32-only or AArch64-only, and so only have one set of ID
12
registers).
8
13
9
Implement this by switching the register access functions
14
Introduce a new category of isar_feature_* functions:
10
to the _with_attrs scheme that lets us distinguish user
15
isar_feature_any_foo() should be tested when what we want to
11
mode accesses.
16
know is "does this feature exist for either AArch32 or AArch64",
17
and always returns the logical OR of isar_feature_aa32_foo()
18
and isar_feature_aa64_foo().
12
19
13
This allows us to pull the handling of the CCR.USERSETMPEND
20
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
flag up to the level where we can make it generate a BusFault
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
as it should for non-permitted accesses.
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Message-id: 20200214175116.9164-4-peter.maydell@linaro.org
24
---
25
target/arm/cpu.h | 19 ++++++++++++++++++-
26
target/arm/vfp_helper.c | 2 +-
27
2 files changed, 19 insertions(+), 2 deletions(-)
16
28
17
Note that until the core ARM CPU code implements turning
29
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
MEMTX_ERROR into a BusFault the registers will continue to
19
act as RAZ/WI to user accesses.
20
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 1501692241-23310-16-git-send-email-peter.maydell@linaro.org
25
---
26
hw/intc/armv7m_nvic.c | 58 ++++++++++++++++++++++++++++++++++++---------------
27
1 file changed, 41 insertions(+), 17 deletions(-)
28
29
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
30
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/intc/armv7m_nvic.c
31
--- a/target/arm/cpu.h
32
+++ b/hw/intc/armv7m_nvic.c
32
+++ b/target/arm/cpu.h
33
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
33
@@ -XXX,XX +XXX,XX @@ extern const uint64_t pred_esz_masks[4];
34
}
34
* Naming convention for isar_feature functions:
35
case 0xf00: /* Software Triggered Interrupt Register */
35
* Functions which test 32-bit ID registers should have _aa32_ in
36
{
36
* their name. Functions which test 64-bit ID registers should have
37
- /* user mode can only write to STIR if CCR.USERSETMPEND permits it */
37
- * _aa64_ in their name.
38
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
38
+ * _aa64_ in their name. These must only be used in code where we
39
- if (excnum < s->num_irq &&
39
+ * know for certain that the CPU has AArch32 or AArch64 respectively
40
- (arm_current_el(&cpu->env) ||
40
+ * or where the correct answer for a CPU which doesn't implement that
41
- (cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK))) {
41
+ * CPU state is "false" (eg when generating A32 or A64 code, if adding
42
+ if (excnum < s->num_irq) {
42
+ * system registers that are specific to that CPU state, for "should
43
armv7m_nvic_set_pending(s, excnum);
43
+ * we let this system register bit be set" tests where the 32-bit
44
}
44
+ * flavour of the register doesn't have the bit, and so on).
45
break;
45
+ * Functions which simply ask "does this feature exist at all" have
46
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
46
+ * _any_ in their name, and always return the logical OR of the _aa64_
47
}
47
+ * and the _aa32_ function.
48
*/
49
50
/*
51
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
52
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
48
}
53
}
49
54
50
-static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr,
55
+/*
51
- unsigned size)
56
+ * Feature tests for "does this exist in either 32-bit or 64-bit?"
52
+static bool nvic_user_access_ok(NVICState *s, hwaddr offset)
57
+ */
58
+static inline bool isar_feature_any_fp16(const ARMISARegisters *id)
53
+{
59
+{
54
+ /* Return true if unprivileged access to this register is permitted. */
60
+ return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id);
55
+ switch (offset) {
56
+ case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
57
+ return s->cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK;
58
+ default:
59
+ /* All other user accesses cause a BusFault unconditionally */
60
+ return false;
61
+ }
62
+}
61
+}
63
+
62
+
64
+static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
63
/*
65
+ uint64_t *data, unsigned size,
64
* Forward to the above feature tests given an ARMCPU pointer.
66
+ MemTxAttrs attrs)
65
*/
66
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/vfp_helper.c
69
+++ b/target/arm/vfp_helper.c
70
@@ -XXX,XX +XXX,XX @@ uint32_t vfp_get_fpscr(CPUARMState *env)
71
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
67
{
72
{
68
NVICState *s = (NVICState *)opaque;
73
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
69
uint32_t offset = addr;
74
- if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
70
unsigned i, startvec, end;
75
+ if (!cpu_isar_feature(any_fp16, env_archcpu(env))) {
71
uint32_t val;
76
val &= ~FPCR_FZ16;
72
73
+ if (attrs.user && !nvic_user_access_ok(s, addr)) {
74
+ /* Generate BusFault for unprivileged accesses */
75
+ return MEMTX_ERROR;
76
+ }
77
+
78
switch (offset) {
79
/* reads of set and clear both return the status */
80
case 0x100 ... 0x13f: /* NVIC Set enable */
81
@@ -XXX,XX +XXX,XX @@ static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr,
82
}
77
}
83
78
84
trace_nvic_sysreg_read(addr, val, size);
85
- return val;
86
+ *data = val;
87
+ return MEMTX_OK;
88
}
89
90
-static void nvic_sysreg_write(void *opaque, hwaddr addr,
91
- uint64_t value, unsigned size)
92
+static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
93
+ uint64_t value, unsigned size,
94
+ MemTxAttrs attrs)
95
{
96
NVICState *s = (NVICState *)opaque;
97
uint32_t offset = addr;
98
@@ -XXX,XX +XXX,XX @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
99
100
trace_nvic_sysreg_write(addr, value, size);
101
102
+ if (attrs.user && !nvic_user_access_ok(s, addr)) {
103
+ /* Generate BusFault for unprivileged accesses */
104
+ return MEMTX_ERROR;
105
+ }
106
+
107
switch (offset) {
108
case 0x100 ... 0x13f: /* NVIC Set enable */
109
offset += 0x80;
110
@@ -XXX,XX +XXX,XX @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
111
}
112
}
113
nvic_irq_update(s);
114
- return;
115
+ return MEMTX_OK;
116
case 0x200 ... 0x23f: /* NVIC Set pend */
117
/* the special logic in armv7m_nvic_set_pending()
118
* is not needed since IRQs are never escalated
119
@@ -XXX,XX +XXX,XX @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
120
}
121
}
122
nvic_irq_update(s);
123
- return;
124
+ return MEMTX_OK;
125
case 0x300 ... 0x33f: /* NVIC Active */
126
- return; /* R/O */
127
+ return MEMTX_OK; /* R/O */
128
case 0x400 ... 0x5ef: /* NVIC Priority */
129
startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
130
131
@@ -XXX,XX +XXX,XX @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
132
set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
133
}
134
nvic_irq_update(s);
135
- return;
136
+ return MEMTX_OK;
137
case 0xd18 ... 0xd23: /* System Handler Priority. */
138
for (i = 0; i < size; i++) {
139
unsigned hdlidx = (offset - 0xd14) + i;
140
set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
141
}
142
nvic_irq_update(s);
143
- return;
144
+ return MEMTX_OK;
145
}
146
if (size == 4) {
147
nvic_writel(s, offset, value);
148
- return;
149
+ return MEMTX_OK;
150
}
151
qemu_log_mask(LOG_GUEST_ERROR,
152
"NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
153
+ /* This is UNPREDICTABLE; treat as RAZ/WI */
154
+ return MEMTX_OK;
155
}
156
157
static const MemoryRegionOps nvic_sysreg_ops = {
158
- .read = nvic_sysreg_read,
159
- .write = nvic_sysreg_write,
160
+ .read_with_attrs = nvic_sysreg_read,
161
+ .write_with_attrs = nvic_sysreg_write,
162
.endianness = DEVICE_NATIVE_ENDIAN,
163
};
164
165
--
79
--
166
2.7.4
80
2.20.1
167
81
168
82
diff view generated by jsdifflib
1
Remove the comment that claims that some MPU_CTRL bits are stored
1
Instead of open-coding "ARM_FEATURE_AARCH64 ? aa64_predinv: aa32_predinv",
2
in sctlr_el[1]. This has never been true since MPU_CTRL was added
2
define and use an any_predinv isar_feature test function.
3
in commit 29c483a50607 -- the comment is a leftover from
4
Michael Davidsaver's original implementation, which I modified
5
not to use sctlr_el[1]; I forgot to delete the comment then.
6
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Message-id: 20200214175116.9164-5-peter.maydell@linaro.org
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1501692241-23310-7-git-send-email-peter.maydell@linaro.org
11
---
8
---
12
target/arm/cpu.h | 2 +-
9
target/arm/cpu.h | 5 +++++
13
1 file changed, 1 insertion(+), 1 deletion(-)
10
target/arm/helper.c | 9 +--------
11
2 files changed, 6 insertions(+), 8 deletions(-)
14
12
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
15
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
17
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_fp16(const ARMISARegisters *id)
20
uint32_t dfsr; /* Debug Fault Status Register */
18
return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id);
21
uint32_t mmfar; /* MemManage Fault Address */
19
}
22
uint32_t bfar; /* BusFault Address */
20
23
- unsigned mpu_ctrl; /* MPU_CTRL (some bits kept in sctlr_el[1]) */
21
+static inline bool isar_feature_any_predinv(const ARMISARegisters *id)
24
+ unsigned mpu_ctrl; /* MPU_CTRL */
22
+{
25
int exception;
23
+ return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id);
26
} v7m;
24
+}
25
+
26
/*
27
* Forward to the above feature tests given an ARMCPU pointer.
28
*/
29
diff --git a/target/arm/helper.c b/target/arm/helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/helper.c
32
+++ b/target/arm/helper.c
33
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
34
#endif /*CONFIG_USER_ONLY*/
35
#endif
36
37
- /*
38
- * While all v8.0 cpus support aarch64, QEMU does have configurations
39
- * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
40
- * which will set ID_ISAR6.
41
- */
42
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
43
- ? cpu_isar_feature(aa64_predinv, cpu)
44
- : cpu_isar_feature(aa32_predinv, cpu)) {
45
+ if (cpu_isar_feature(any_predinv, cpu)) {
46
define_arm_cp_regs(cpu, predinv_reginfo);
47
}
27
48
28
--
49
--
29
2.7.4
50
2.20.1
30
51
31
52
diff view generated by jsdifflib
New patch
1
1
Pull the code that defines the various PMU registers out
2
into its own function, matching the pattern we have
3
already for the debug registers.
4
5
Apart from one style fix to a multi-line comment, this
6
is purely movement of code with no changes to it.
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20200214175116.9164-6-peter.maydell@linaro.org
12
---
13
target/arm/helper.c | 158 +++++++++++++++++++++++---------------------
14
1 file changed, 82 insertions(+), 76 deletions(-)
15
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
19
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
21
}
22
}
23
24
+static void define_pmu_regs(ARMCPU *cpu)
25
+{
26
+ /*
27
+ * v7 performance monitor control register: same implementor
28
+ * field as main ID register, and we implement four counters in
29
+ * addition to the cycle count register.
30
+ */
31
+ unsigned int i, pmcrn = 4;
32
+ ARMCPRegInfo pmcr = {
33
+ .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
34
+ .access = PL0_RW,
35
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
36
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
37
+ .accessfn = pmreg_access, .writefn = pmcr_write,
38
+ .raw_writefn = raw_write,
39
+ };
40
+ ARMCPRegInfo pmcr64 = {
41
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
42
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
43
+ .access = PL0_RW, .accessfn = pmreg_access,
44
+ .type = ARM_CP_IO,
45
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
46
+ .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
47
+ .writefn = pmcr_write, .raw_writefn = raw_write,
48
+ };
49
+ define_one_arm_cp_reg(cpu, &pmcr);
50
+ define_one_arm_cp_reg(cpu, &pmcr64);
51
+ for (i = 0; i < pmcrn; i++) {
52
+ char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
53
+ char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
54
+ char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
55
+ char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
56
+ ARMCPRegInfo pmev_regs[] = {
57
+ { .name = pmevcntr_name, .cp = 15, .crn = 14,
58
+ .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
59
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
60
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
61
+ .accessfn = pmreg_access },
62
+ { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
63
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
64
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
65
+ .type = ARM_CP_IO,
66
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
67
+ .raw_readfn = pmevcntr_rawread,
68
+ .raw_writefn = pmevcntr_rawwrite },
69
+ { .name = pmevtyper_name, .cp = 15, .crn = 14,
70
+ .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
71
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
72
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
73
+ .accessfn = pmreg_access },
74
+ { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
75
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
76
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
77
+ .type = ARM_CP_IO,
78
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
79
+ .raw_writefn = pmevtyper_rawwrite },
80
+ REGINFO_SENTINEL
81
+ };
82
+ define_arm_cp_regs(cpu, pmev_regs);
83
+ g_free(pmevcntr_name);
84
+ g_free(pmevcntr_el0_name);
85
+ g_free(pmevtyper_name);
86
+ g_free(pmevtyper_el0_name);
87
+ }
88
+ if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
89
+ FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
90
+ ARMCPRegInfo v81_pmu_regs[] = {
91
+ { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
92
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
93
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
94
+ .resetvalue = extract64(cpu->pmceid0, 32, 32) },
95
+ { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
96
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
97
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
98
+ .resetvalue = extract64(cpu->pmceid1, 32, 32) },
99
+ REGINFO_SENTINEL
100
+ };
101
+ define_arm_cp_regs(cpu, v81_pmu_regs);
102
+ }
103
+}
104
+
105
/* We don't know until after realize whether there's a GICv3
106
* attached, and that is what registers the gicv3 sysregs.
107
* So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
108
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
109
define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
110
}
111
if (arm_feature(env, ARM_FEATURE_V7)) {
112
- /* v7 performance monitor control register: same implementor
113
- * field as main ID register, and we implement four counters in
114
- * addition to the cycle count register.
115
- */
116
- unsigned int i, pmcrn = 4;
117
- ARMCPRegInfo pmcr = {
118
- .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
119
- .access = PL0_RW,
120
- .type = ARM_CP_IO | ARM_CP_ALIAS,
121
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
122
- .accessfn = pmreg_access, .writefn = pmcr_write,
123
- .raw_writefn = raw_write,
124
- };
125
- ARMCPRegInfo pmcr64 = {
126
- .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
127
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
128
- .access = PL0_RW, .accessfn = pmreg_access,
129
- .type = ARM_CP_IO,
130
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
131
- .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
132
- .writefn = pmcr_write, .raw_writefn = raw_write,
133
- };
134
- define_one_arm_cp_reg(cpu, &pmcr);
135
- define_one_arm_cp_reg(cpu, &pmcr64);
136
- for (i = 0; i < pmcrn; i++) {
137
- char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
138
- char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
139
- char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
140
- char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
141
- ARMCPRegInfo pmev_regs[] = {
142
- { .name = pmevcntr_name, .cp = 15, .crn = 14,
143
- .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
144
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
145
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
146
- .accessfn = pmreg_access },
147
- { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
148
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
149
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
150
- .type = ARM_CP_IO,
151
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
152
- .raw_readfn = pmevcntr_rawread,
153
- .raw_writefn = pmevcntr_rawwrite },
154
- { .name = pmevtyper_name, .cp = 15, .crn = 14,
155
- .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
156
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
157
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
158
- .accessfn = pmreg_access },
159
- { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
160
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
161
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
162
- .type = ARM_CP_IO,
163
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
164
- .raw_writefn = pmevtyper_rawwrite },
165
- REGINFO_SENTINEL
166
- };
167
- define_arm_cp_regs(cpu, pmev_regs);
168
- g_free(pmevcntr_name);
169
- g_free(pmevcntr_el0_name);
170
- g_free(pmevtyper_name);
171
- g_free(pmevtyper_el0_name);
172
- }
173
ARMCPRegInfo clidr = {
174
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
175
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
176
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
177
define_one_arm_cp_reg(cpu, &clidr);
178
define_arm_cp_regs(cpu, v7_cp_reginfo);
179
define_debug_regs(cpu);
180
+ define_pmu_regs(cpu);
181
} else {
182
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
183
}
184
- if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
185
- FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
186
- ARMCPRegInfo v81_pmu_regs[] = {
187
- { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
188
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
189
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
190
- .resetvalue = extract64(cpu->pmceid0, 32, 32) },
191
- { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
192
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
193
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
194
- .resetvalue = extract64(cpu->pmceid1, 32, 32) },
195
- REGINFO_SENTINEL
196
- };
197
- define_arm_cp_regs(cpu, v81_pmu_regs);
198
- }
199
if (arm_feature(env, ARM_FEATURE_V8)) {
200
/* AArch64 ID registers, which all have impdef reset values.
201
* Note that within the ID register ranges the unused slots
202
--
203
2.20.1
204
205
diff view generated by jsdifflib
New patch
1
Add FIELD() definitions for the ID_AA64DFR0_EL1 and use them
2
where we currently have hard-coded bit values.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20200214175116.9164-7-peter.maydell@linaro.org
8
---
9
target/arm/cpu.h | 10 ++++++++++
10
target/arm/cpu.c | 2 +-
11
target/arm/helper.c | 6 +++---
12
3 files changed, 14 insertions(+), 4 deletions(-)
13
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64MMFR2, BBM, 52, 4)
19
FIELD(ID_AA64MMFR2, EVT, 56, 4)
20
FIELD(ID_AA64MMFR2, E0PD, 60, 4)
21
22
+FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
23
+FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
24
+FIELD(ID_AA64DFR0, PMUVER, 8, 4)
25
+FIELD(ID_AA64DFR0, BRPS, 12, 4)
26
+FIELD(ID_AA64DFR0, WRPS, 20, 4)
27
+FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
28
+FIELD(ID_AA64DFR0, PMSVER, 32, 4)
29
+FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
30
+FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
31
+
32
FIELD(ID_DFR0, COPDBG, 0, 4)
33
FIELD(ID_DFR0, COPSDBG, 4, 4)
34
FIELD(ID_DFR0, MMAPDBG, 8, 4)
35
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/cpu.c
38
+++ b/target/arm/cpu.c
39
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
40
cpu);
41
#endif
42
} else {
43
- cpu->id_aa64dfr0 &= ~0xf00;
44
+ cpu->id_aa64dfr0 = FIELD_DP64(cpu->id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
45
cpu->id_dfr0 &= ~(0xf << 24);
46
cpu->pmceid0 = 0;
47
cpu->pmceid1 = 0;
48
diff --git a/target/arm/helper.c b/target/arm/helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/helper.c
51
+++ b/target/arm/helper.c
52
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
53
* check that if they both exist then they agree.
54
*/
55
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
56
- assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
57
- assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
58
- assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
59
+ assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, BRPS) == brps);
60
+ assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, WRPS) == wrps);
61
+ assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) == ctx_cmps);
62
}
63
64
define_one_arm_cp_reg(cpu, &dbgdidr);
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
New patch
1
We already define FIELD macros for ID_DFR0, so use them in the
2
one place where we're doing direct bit value manipulation.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20200214175116.9164-8-peter.maydell@linaro.org
8
---
9
target/arm/cpu.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu.c
15
+++ b/target/arm/cpu.c
16
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
17
#endif
18
} else {
19
cpu->id_aa64dfr0 = FIELD_DP64(cpu->id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
20
- cpu->id_dfr0 &= ~(0xf << 24);
21
+ cpu->id_dfr0 = FIELD_DP32(cpu->id_dfr0, ID_DFR0, PERFMON, 0);
22
cpu->pmceid0 = 0;
23
cpu->pmceid1 = 0;
24
}
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
1
The armv7m_nvic.h header file was accidentally placed in
1
Instead of open-coding a check on the ID_DFR0 PerfMon ID register
2
include/hw/arm; move it to include/hw/intc to match where
2
field, create a standardly-named isar_feature for "does AArch32 have
3
its corresponding .c file lives.
3
a v8.1 PMUv3" and use it.
4
4
5
This entails moving the id_dfr0 field into the ARMISARegisters struct.
6
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20200214175116.9164-9-peter.maydell@linaro.org
7
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 1501692241-23310-15-git-send-email-peter.maydell@linaro.org
10
---
10
---
11
include/hw/arm/armv7m.h | 2 +-
11
target/arm/cpu.h | 9 ++++++++-
12
include/hw/{arm => intc}/armv7m_nvic.h | 0
12
hw/intc/armv7m_nvic.c | 2 +-
13
hw/intc/armv7m_nvic.c | 2 +-
13
target/arm/cpu.c | 28 ++++++++++++++--------------
14
3 files changed, 2 insertions(+), 2 deletions(-)
14
target/arm/cpu64.c | 6 +++---
15
rename include/hw/{arm => intc}/armv7m_nvic.h (100%)
15
target/arm/helper.c | 5 ++---
16
16
5 files changed, 28 insertions(+), 22 deletions(-)
17
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
17
18
index XXXXXXX..XXXXXXX 100644
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
--- a/include/hw/arm/armv7m.h
19
index XXXXXXX..XXXXXXX 100644
20
+++ b/include/hw/arm/armv7m.h
20
--- a/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@
21
+++ b/target/arm/cpu.h
22
#define HW_ARM_ARMV7M_H
22
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
23
23
uint32_t mvfr0;
24
#include "hw/sysbus.h"
24
uint32_t mvfr1;
25
-#include "hw/arm/armv7m_nvic.h"
25
uint32_t mvfr2;
26
+#include "hw/intc/armv7m_nvic.h"
26
+ uint32_t id_dfr0;
27
27
uint64_t id_aa64isar0;
28
#define TYPE_BITBAND "ARM,bitband-memory"
28
uint64_t id_aa64isar1;
29
#define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND)
29
uint64_t id_aa64pfr0;
30
diff --git a/include/hw/arm/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
30
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
31
similarity index 100%
31
uint32_t reset_sctlr;
32
rename from include/hw/arm/armv7m_nvic.h
32
uint32_t id_pfr0;
33
rename to include/hw/intc/armv7m_nvic.h
33
uint32_t id_pfr1;
34
- uint32_t id_dfr0;
35
uint64_t pmceid0;
36
uint64_t pmceid1;
37
uint32_t id_afr0;
38
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
39
return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) >= 2;
40
}
41
42
+static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
43
+{
44
+ /* 0xf means "non-standard IMPDEF PMU" */
45
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
46
+ FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
47
+}
48
+
49
/*
50
* 64-bit feature tests via id registers.
51
*/
34
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
52
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
35
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
36
--- a/hw/intc/armv7m_nvic.c
54
--- a/hw/intc/armv7m_nvic.c
37
+++ b/hw/intc/armv7m_nvic.c
55
+++ b/hw/intc/armv7m_nvic.c
38
@@ -XXX,XX +XXX,XX @@
56
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
39
#include "hw/sysbus.h"
57
case 0xd44: /* PFR1. */
40
#include "qemu/timer.h"
58
return cpu->id_pfr1;
41
#include "hw/arm/arm.h"
59
case 0xd48: /* DFR0. */
42
-#include "hw/arm/armv7m_nvic.h"
60
- return cpu->id_dfr0;
43
+#include "hw/intc/armv7m_nvic.h"
61
+ return cpu->isar.id_dfr0;
44
#include "target/arm/cpu.h"
62
case 0xd4c: /* AFR0. */
45
#include "exec/exec-all.h"
63
return cpu->id_afr0;
46
#include "qemu/log.h"
64
case 0xd50: /* MMFR0. */
65
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/cpu.c
68
+++ b/target/arm/cpu.c
69
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
70
#endif
71
} else {
72
cpu->id_aa64dfr0 = FIELD_DP64(cpu->id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
73
- cpu->id_dfr0 = FIELD_DP32(cpu->id_dfr0, ID_DFR0, PERFMON, 0);
74
+ cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
75
cpu->pmceid0 = 0;
76
cpu->pmceid1 = 0;
77
}
78
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
79
cpu->reset_sctlr = 0x00050078;
80
cpu->id_pfr0 = 0x111;
81
cpu->id_pfr1 = 0x1;
82
- cpu->id_dfr0 = 0x2;
83
+ cpu->isar.id_dfr0 = 0x2;
84
cpu->id_afr0 = 0x3;
85
cpu->id_mmfr0 = 0x01130003;
86
cpu->id_mmfr1 = 0x10030302;
87
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
88
cpu->reset_sctlr = 0x00050078;
89
cpu->id_pfr0 = 0x111;
90
cpu->id_pfr1 = 0x1;
91
- cpu->id_dfr0 = 0x2;
92
+ cpu->isar.id_dfr0 = 0x2;
93
cpu->id_afr0 = 0x3;
94
cpu->id_mmfr0 = 0x01130003;
95
cpu->id_mmfr1 = 0x10030302;
96
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
97
cpu->reset_sctlr = 0x00050078;
98
cpu->id_pfr0 = 0x111;
99
cpu->id_pfr1 = 0x11;
100
- cpu->id_dfr0 = 0x33;
101
+ cpu->isar.id_dfr0 = 0x33;
102
cpu->id_afr0 = 0;
103
cpu->id_mmfr0 = 0x01130003;
104
cpu->id_mmfr1 = 0x10030302;
105
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
106
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
107
cpu->id_pfr0 = 0x111;
108
cpu->id_pfr1 = 0x1;
109
- cpu->id_dfr0 = 0;
110
+ cpu->isar.id_dfr0 = 0;
111
cpu->id_afr0 = 0x2;
112
cpu->id_mmfr0 = 0x01100103;
113
cpu->id_mmfr1 = 0x10020302;
114
@@ -XXX,XX +XXX,XX @@ static void cortex_m3_initfn(Object *obj)
115
cpu->pmsav7_dregion = 8;
116
cpu->id_pfr0 = 0x00000030;
117
cpu->id_pfr1 = 0x00000200;
118
- cpu->id_dfr0 = 0x00100000;
119
+ cpu->isar.id_dfr0 = 0x00100000;
120
cpu->id_afr0 = 0x00000000;
121
cpu->id_mmfr0 = 0x00000030;
122
cpu->id_mmfr1 = 0x00000000;
123
@@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj)
124
cpu->isar.mvfr2 = 0x00000000;
125
cpu->id_pfr0 = 0x00000030;
126
cpu->id_pfr1 = 0x00000200;
127
- cpu->id_dfr0 = 0x00100000;
128
+ cpu->isar.id_dfr0 = 0x00100000;
129
cpu->id_afr0 = 0x00000000;
130
cpu->id_mmfr0 = 0x00000030;
131
cpu->id_mmfr1 = 0x00000000;
132
@@ -XXX,XX +XXX,XX @@ static void cortex_m7_initfn(Object *obj)
133
cpu->isar.mvfr2 = 0x00000040;
134
cpu->id_pfr0 = 0x00000030;
135
cpu->id_pfr1 = 0x00000200;
136
- cpu->id_dfr0 = 0x00100000;
137
+ cpu->isar.id_dfr0 = 0x00100000;
138
cpu->id_afr0 = 0x00000000;
139
cpu->id_mmfr0 = 0x00100030;
140
cpu->id_mmfr1 = 0x00000000;
141
@@ -XXX,XX +XXX,XX @@ static void cortex_m33_initfn(Object *obj)
142
cpu->isar.mvfr2 = 0x00000040;
143
cpu->id_pfr0 = 0x00000030;
144
cpu->id_pfr1 = 0x00000210;
145
- cpu->id_dfr0 = 0x00200000;
146
+ cpu->isar.id_dfr0 = 0x00200000;
147
cpu->id_afr0 = 0x00000000;
148
cpu->id_mmfr0 = 0x00101F40;
149
cpu->id_mmfr1 = 0x00000000;
150
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
151
cpu->midr = 0x411fc153; /* r1p3 */
152
cpu->id_pfr0 = 0x0131;
153
cpu->id_pfr1 = 0x001;
154
- cpu->id_dfr0 = 0x010400;
155
+ cpu->isar.id_dfr0 = 0x010400;
156
cpu->id_afr0 = 0x0;
157
cpu->id_mmfr0 = 0x0210030;
158
cpu->id_mmfr1 = 0x00000000;
159
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
160
cpu->reset_sctlr = 0x00c50078;
161
cpu->id_pfr0 = 0x1031;
162
cpu->id_pfr1 = 0x11;
163
- cpu->id_dfr0 = 0x400;
164
+ cpu->isar.id_dfr0 = 0x400;
165
cpu->id_afr0 = 0;
166
cpu->id_mmfr0 = 0x31100003;
167
cpu->id_mmfr1 = 0x20000000;
168
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
169
cpu->reset_sctlr = 0x00c50078;
170
cpu->id_pfr0 = 0x1031;
171
cpu->id_pfr1 = 0x11;
172
- cpu->id_dfr0 = 0x000;
173
+ cpu->isar.id_dfr0 = 0x000;
174
cpu->id_afr0 = 0;
175
cpu->id_mmfr0 = 0x00100103;
176
cpu->id_mmfr1 = 0x20000000;
177
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
178
cpu->reset_sctlr = 0x00c50078;
179
cpu->id_pfr0 = 0x00001131;
180
cpu->id_pfr1 = 0x00011011;
181
- cpu->id_dfr0 = 0x02010555;
182
+ cpu->isar.id_dfr0 = 0x02010555;
183
cpu->id_afr0 = 0x00000000;
184
cpu->id_mmfr0 = 0x10101105;
185
cpu->id_mmfr1 = 0x40000000;
186
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
187
cpu->reset_sctlr = 0x00c50078;
188
cpu->id_pfr0 = 0x00001131;
189
cpu->id_pfr1 = 0x00011011;
190
- cpu->id_dfr0 = 0x02010555;
191
+ cpu->isar.id_dfr0 = 0x02010555;
192
cpu->id_afr0 = 0x00000000;
193
cpu->id_mmfr0 = 0x10201105;
194
cpu->id_mmfr1 = 0x20000000;
195
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/target/arm/cpu64.c
198
+++ b/target/arm/cpu64.c
199
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
200
cpu->reset_sctlr = 0x00c50838;
201
cpu->id_pfr0 = 0x00000131;
202
cpu->id_pfr1 = 0x00011011;
203
- cpu->id_dfr0 = 0x03010066;
204
+ cpu->isar.id_dfr0 = 0x03010066;
205
cpu->id_afr0 = 0x00000000;
206
cpu->id_mmfr0 = 0x10101105;
207
cpu->id_mmfr1 = 0x40000000;
208
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
209
cpu->reset_sctlr = 0x00c50838;
210
cpu->id_pfr0 = 0x00000131;
211
cpu->id_pfr1 = 0x00011011;
212
- cpu->id_dfr0 = 0x03010066;
213
+ cpu->isar.id_dfr0 = 0x03010066;
214
cpu->id_afr0 = 0x00000000;
215
cpu->id_mmfr0 = 0x10101105;
216
cpu->id_mmfr1 = 0x40000000;
217
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
218
cpu->reset_sctlr = 0x00c50838;
219
cpu->id_pfr0 = 0x00000131;
220
cpu->id_pfr1 = 0x00011011;
221
- cpu->id_dfr0 = 0x03010066;
222
+ cpu->isar.id_dfr0 = 0x03010066;
223
cpu->id_afr0 = 0x00000000;
224
cpu->id_mmfr0 = 0x10201105;
225
cpu->id_mmfr1 = 0x40000000;
226
diff --git a/target/arm/helper.c b/target/arm/helper.c
227
index XXXXXXX..XXXXXXX 100644
228
--- a/target/arm/helper.c
229
+++ b/target/arm/helper.c
230
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
231
g_free(pmevtyper_name);
232
g_free(pmevtyper_el0_name);
233
}
234
- if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
235
- FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
236
+ if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
237
ARMCPRegInfo v81_pmu_regs[] = {
238
{ .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
239
.cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
240
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
241
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
242
.access = PL1_R, .type = ARM_CP_CONST,
243
.accessfn = access_aa32_tid3,
244
- .resetvalue = cpu->id_dfr0 },
245
+ .resetvalue = cpu->isar.id_dfr0 },
246
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
247
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
248
.access = PL1_R, .type = ARM_CP_CONST,
47
--
249
--
48
2.7.4
250
2.20.1
49
251
50
252
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Add the 64-bit version of the "is this a v8.1 PMUv3?"
2
ID register check function, and the _any_ version that
3
checks for either AArch32 or AArch64 support. We'll use
4
this in a later commit.
2
5
3
Mimicking gicv3-maintenance-interrupt, add the PMU's interrupt to
6
We don't (yet) do any isar_feature checks on ID_AA64DFR1_EL1,
4
CPU state.
7
but we move id_aa64dfr1 into the ARMISARegisters struct with
8
id_aa64dfr0, for consistency.
5
9
6
Signed-off-by: Andrew Jones <drjones@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 1500471597-2517-2-git-send-email-drjones@redhat.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Message-id: 20200214175116.9164-10-peter.maydell@linaro.org
10
---
14
---
11
target/arm/cpu.h | 2 ++
15
target/arm/cpu.h | 15 +++++++++++++--
12
hw/arm/virt.c | 3 +++
16
target/arm/cpu.c | 3 ++-
13
target/arm/cpu.c | 2 ++
17
target/arm/cpu64.c | 6 +++---
14
3 files changed, 7 insertions(+)
18
target/arm/helper.c | 12 +++++++-----
19
4 files changed, 25 insertions(+), 11 deletions(-)
15
20
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
23
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
25
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
21
qemu_irq gt_timer_outputs[NUM_GTIMERS];
26
uint64_t id_aa64mmfr0;
22
/* GPIO output for GICv3 maintenance interrupt signal */
27
uint64_t id_aa64mmfr1;
23
qemu_irq gicv3_maintenance_interrupt;
28
uint64_t id_aa64mmfr2;
24
+ /* GPIO output for the PMU interrupt */
29
+ uint64_t id_aa64dfr0;
25
+ qemu_irq pmu_interrupt;
30
+ uint64_t id_aa64dfr1;
26
31
} isar;
27
/* MemoryRegion to use for secure physical accesses */
32
uint32_t midr;
28
MemoryRegion *secure_memory;
33
uint32_t revidr;
29
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
34
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
30
index XXXXXXX..XXXXXXX 100644
35
uint32_t id_mmfr2;
31
--- a/hw/arm/virt.c
36
uint32_t id_mmfr3;
32
+++ b/hw/arm/virt.c
37
uint32_t id_mmfr4;
33
@@ -XXX,XX +XXX,XX @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
38
- uint64_t id_aa64dfr0;
34
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
39
- uint64_t id_aa64dfr1;
35
qdev_get_gpio_in(gicdev, ppibase
40
uint64_t id_aa64afr0;
36
+ ARCH_GICV3_MAINT_IRQ));
41
uint64_t id_aa64afr1;
37
+ qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
42
uint32_t dbgdidr;
38
+ qdev_get_gpio_in(gicdev, ppibase
43
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
39
+ + VIRTUAL_PMU_IRQ));
44
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
40
45
}
41
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
46
42
sysbus_connect_irq(gicbusdev, i + smp_cpus,
47
+static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id)
48
+{
49
+ return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
50
+ FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
51
+}
52
+
53
/*
54
* Feature tests for "does this exist in either 32-bit or 64-bit?"
55
*/
56
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_predinv(const ARMISARegisters *id)
57
return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id);
58
}
59
60
+static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id)
61
+{
62
+ return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id);
63
+}
64
+
65
/*
66
* Forward to the above feature tests given an ARMCPU pointer.
67
*/
43
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
68
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
44
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/cpu.c
70
--- a/target/arm/cpu.c
46
+++ b/target/arm/cpu.c
71
+++ b/target/arm/cpu.c
47
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
72
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
48
73
cpu);
49
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
50
"gicv3-maintenance-interrupt", 1);
51
+ qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
52
+ "pmu-interrupt", 1);
53
#endif
74
#endif
54
75
} else {
55
/* DTB consumers generally don't in fact care what the 'compatible'
76
- cpu->id_aa64dfr0 = FIELD_DP64(cpu->id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
77
+ cpu->isar.id_aa64dfr0 =
78
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
79
cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
80
cpu->pmceid0 = 0;
81
cpu->pmceid1 = 0;
82
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/arm/cpu64.c
85
+++ b/target/arm/cpu64.c
86
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
87
cpu->isar.id_isar5 = 0x00011121;
88
cpu->isar.id_isar6 = 0;
89
cpu->isar.id_aa64pfr0 = 0x00002222;
90
- cpu->id_aa64dfr0 = 0x10305106;
91
+ cpu->isar.id_aa64dfr0 = 0x10305106;
92
cpu->isar.id_aa64isar0 = 0x00011120;
93
cpu->isar.id_aa64mmfr0 = 0x00001124;
94
cpu->dbgdidr = 0x3516d000;
95
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
96
cpu->isar.id_isar5 = 0x00011121;
97
cpu->isar.id_isar6 = 0;
98
cpu->isar.id_aa64pfr0 = 0x00002222;
99
- cpu->id_aa64dfr0 = 0x10305106;
100
+ cpu->isar.id_aa64dfr0 = 0x10305106;
101
cpu->isar.id_aa64isar0 = 0x00011120;
102
cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
103
cpu->dbgdidr = 0x3516d000;
104
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
105
cpu->isar.id_isar4 = 0x00011142;
106
cpu->isar.id_isar5 = 0x00011121;
107
cpu->isar.id_aa64pfr0 = 0x00002222;
108
- cpu->id_aa64dfr0 = 0x10305106;
109
+ cpu->isar.id_aa64dfr0 = 0x10305106;
110
cpu->isar.id_aa64isar0 = 0x00011120;
111
cpu->isar.id_aa64mmfr0 = 0x00001124;
112
cpu->dbgdidr = 0x3516d000;
113
diff --git a/target/arm/helper.c b/target/arm/helper.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/helper.c
116
+++ b/target/arm/helper.c
117
@@ -XXX,XX +XXX,XX @@
118
#include "hw/semihosting/semihost.h"
119
#include "sysemu/cpus.h"
120
#include "sysemu/kvm.h"
121
+#include "sysemu/tcg.h"
122
#include "qemu/range.h"
123
#include "qapi/qapi-commands-machine-target.h"
124
#include "qapi/error.h"
125
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
126
* check that if they both exist then they agree.
127
*/
128
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
129
- assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, BRPS) == brps);
130
- assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, WRPS) == wrps);
131
- assert(FIELD_EX64(cpu->id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) == ctx_cmps);
132
+ assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) == brps);
133
+ assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) == wrps);
134
+ assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS)
135
+ == ctx_cmps);
136
}
137
138
define_one_arm_cp_reg(cpu, &dbgdidr);
139
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
140
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
141
.access = PL1_R, .type = ARM_CP_CONST,
142
.accessfn = access_aa64_tid3,
143
- .resetvalue = cpu->id_aa64dfr0 },
144
+ .resetvalue = cpu->isar.id_aa64dfr0 },
145
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
146
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
147
.access = PL1_R, .type = ARM_CP_CONST,
148
.accessfn = access_aa64_tid3,
149
- .resetvalue = cpu->id_aa64dfr1 },
150
+ .resetvalue = cpu->isar.id_aa64dfr1 },
151
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
152
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
153
.access = PL1_R, .type = ARM_CP_CONST,
56
--
154
--
57
2.7.4
155
2.20.1
58
156
59
157
diff view generated by jsdifflib
1
Add a utility function for testing whether the CPU is in Handler
1
The AArch32 DBGDIDR defines properties like the number of
2
mode; this is just a check whether v7m.exception is non-zero, but
2
breakpoints, watchpoints and context-matching comparators. On an
3
we do it in several places and it makes the code a bit easier
3
AArch64 CPU, the register may not even exist if AArch32 is not
4
to read to not have to mentally figure out what the test is testing.
4
supported at EL1.
5
6
Currently we hard-code use of DBGDIDR to identify the number of
7
breakpoints etc; this works for all our TCG CPUs, but will break if
8
we ever add an AArch64-only CPU. We also have an assert() that the
9
AArch32 and AArch64 registers match, which currently works only by
10
luck for KVM because we don't populate either of these ID registers
11
from the KVM vCPU and so they are both zero.
12
13
Clean this up so we have functions for finding the number
14
of breakpoints, watchpoints and context comparators which look
15
in the appropriate ID register.
16
17
This allows us to drop the "check that AArch64 and AArch32 agree
18
on the number of breakpoints etc" asserts:
19
* we no longer look at the AArch32 versions unless that's the
20
right place to be looking
21
* it's valid to have a CPU (eg AArch64-only) where they don't match
22
* we shouldn't have been asserting the validity of ID registers
23
in a codepath used with KVM anyway
5
24
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1501692241-23310-14-git-send-email-peter.maydell@linaro.org
27
Message-id: 20200214175116.9164-11-peter.maydell@linaro.org
11
---
28
---
12
target/arm/cpu.h | 10 ++++++++--
29
target/arm/cpu.h | 7 +++++++
13
target/arm/helper.c | 8 ++++----
30
target/arm/internals.h | 42 +++++++++++++++++++++++++++++++++++++++
14
2 files changed, 12 insertions(+), 6 deletions(-)
31
target/arm/debug_helper.c | 6 +++---
32
target/arm/helper.c | 21 +++++---------------
33
4 files changed, 57 insertions(+), 19 deletions(-)
15
34
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
35
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
37
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
38
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ static inline int arm_highest_el(CPUARMState *env)
39
@@ -XXX,XX +XXX,XX @@ FIELD(ID_DFR0, MPROFDBG, 20, 4)
21
return 1;
40
FIELD(ID_DFR0, PERFMON, 24, 4)
41
FIELD(ID_DFR0, TRACEFILT, 28, 4)
42
43
+FIELD(DBGDIDR, SE_IMP, 12, 1)
44
+FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
45
+FIELD(DBGDIDR, VERSION, 16, 4)
46
+FIELD(DBGDIDR, CTX_CMPS, 20, 4)
47
+FIELD(DBGDIDR, BRPS, 24, 4)
48
+FIELD(DBGDIDR, WRPS, 28, 4)
49
+
50
FIELD(MVFR0, SIMDREG, 0, 4)
51
FIELD(MVFR0, FPSP, 4, 4)
52
FIELD(MVFR0, FPDP, 8, 4)
53
diff --git a/target/arm/internals.h b/target/arm/internals.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/internals.h
56
+++ b/target/arm/internals.h
57
@@ -XXX,XX +XXX,XX @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
58
}
22
}
59
}
23
60
24
+/* Return true if a v7M CPU is in Handler mode */
61
+/**
25
+static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
62
+ * arm_num_brps: Return number of implemented breakpoints.
63
+ * Note that the ID register BRPS field is "number of bps - 1",
64
+ * and we return the actual number of breakpoints.
65
+ */
66
+static inline int arm_num_brps(ARMCPU *cpu)
26
+{
67
+{
27
+ return env->v7m.exception != 0;
68
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
69
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
70
+ } else {
71
+ return FIELD_EX32(cpu->dbgdidr, DBGDIDR, BRPS) + 1;
72
+ }
28
+}
73
+}
29
+
74
+
30
/* Return the current Exception Level (as per ARMv8; note that this differs
75
+/**
31
* from the ARMv7 Privilege Level).
76
+ * arm_num_wrps: Return number of implemented watchpoints.
77
+ * Note that the ID register WRPS field is "number of wps - 1",
78
+ * and we return the actual number of watchpoints.
79
+ */
80
+static inline int arm_num_wrps(ARMCPU *cpu)
81
+{
82
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
83
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
84
+ } else {
85
+ return FIELD_EX32(cpu->dbgdidr, DBGDIDR, WRPS) + 1;
86
+ }
87
+}
88
+
89
+/**
90
+ * arm_num_ctx_cmps: Return number of implemented context comparators.
91
+ * Note that the ID register CTX_CMPS field is "number of cmps - 1",
92
+ * and we return the actual number of comparators.
93
+ */
94
+static inline int arm_num_ctx_cmps(ARMCPU *cpu)
95
+{
96
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
97
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
98
+ } else {
99
+ return FIELD_EX32(cpu->dbgdidr, DBGDIDR, CTX_CMPS) + 1;
100
+ }
101
+}
102
+
103
/* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3.
104
* Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits.
32
*/
105
*/
33
static inline int arm_current_el(CPUARMState *env)
106
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/debug_helper.c
109
+++ b/target/arm/debug_helper.c
110
@@ -XXX,XX +XXX,XX @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
34
{
111
{
35
if (arm_feature(env, ARM_FEATURE_M)) {
112
CPUARMState *env = &cpu->env;
36
- return !((env->v7m.exception == 0) && (env->v7m.control & 1));
113
uint64_t bcr = env->cp15.dbgbcr[lbn];
37
+ return arm_v7m_is_handler_mode(env) || !(env->v7m.control & 1);
114
- int brps = extract32(cpu->dbgdidr, 24, 4);
115
- int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
116
+ int brps = arm_num_brps(cpu);
117
+ int ctx_cmps = arm_num_ctx_cmps(cpu);
118
int bt;
119
uint32_t contextidr;
120
uint64_t hcr_el2;
121
@@ -XXX,XX +XXX,XX @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
122
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
123
* We choose the former.
124
*/
125
- if (lbn > brps || lbn < (brps - ctx_cmps)) {
126
+ if (lbn >= brps || lbn < (brps - ctx_cmps)) {
127
return false;
38
}
128
}
39
40
if (is_a64(env)) {
41
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
42
}
43
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
44
45
- if (env->v7m.exception != 0) {
46
+ if (arm_v7m_is_handler_mode(env)) {
47
*flags |= ARM_TBFLAG_HANDLER_MASK;
48
}
49
129
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
130
diff --git a/target/arm/helper.c b/target/arm/helper.c
51
index XXXXXXX..XXXXXXX 100644
131
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/helper.c
132
--- a/target/arm/helper.c
53
+++ b/target/arm/helper.c
133
+++ b/target/arm/helper.c
54
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
134
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
55
* that jumps to magic addresses don't have magic behaviour unless
135
};
56
* we're in Handler mode (compare pseudocode BXWritePC()).
136
57
*/
137
/* Note that all these register fields hold "number of Xs minus 1". */
58
- assert(env->v7m.exception != 0);
138
- brps = extract32(cpu->dbgdidr, 24, 4);
59
+ assert(arm_v7m_is_handler_mode(env));
139
- wrps = extract32(cpu->dbgdidr, 28, 4);
60
140
- ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
61
/* In the spec pseudocode ExceptionReturn() is called directly
141
+ brps = arm_num_brps(cpu);
62
* from BXWritePC() and gets the full target PC value including
142
+ wrps = arm_num_wrps(cpu);
63
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
143
+ ctx_cmps = arm_num_ctx_cmps(cpu);
64
* resuming in Thread mode. If that doesn't match what the
144
65
* exception return type specified then this is a UsageFault.
145
assert(ctx_cmps <= brps);
66
*/
146
67
- if (return_to_handler == (env->v7m.exception == 0)) {
147
- /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
68
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
148
- * of the debug registers such as number of breakpoints;
69
/* Take an INVPC UsageFault by pushing the stack again. */
149
- * check that if they both exist then they agree.
70
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
150
- */
71
env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
151
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
72
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
152
- assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) == brps);
73
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
153
- assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) == wrps);
74
lr |= 4;
154
- assert(FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS)
155
- == ctx_cmps);
156
- }
157
-
158
define_one_arm_cp_reg(cpu, &dbgdidr);
159
define_arm_cp_regs(cpu, debug_cp_reginfo);
160
161
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
162
define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
75
}
163
}
76
- if (env->v7m.exception == 0) {
164
77
+ if (!arm_v7m_is_handler_mode(env)) {
165
- for (i = 0; i < brps + 1; i++) {
78
lr |= 8;
166
+ for (i = 0; i < brps; i++) {
167
ARMCPRegInfo dbgregs[] = {
168
{ .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
169
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
170
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
171
define_arm_cp_regs(cpu, dbgregs);
79
}
172
}
80
173
81
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
174
- for (i = 0; i < wrps + 1; i++) {
82
* switch_v7m_sp() deals with updating the SPSEL bit in
175
+ for (i = 0; i < wrps; i++) {
83
* env->v7m.control, so we only need update the others.
176
ARMCPRegInfo dbgregs[] = {
84
*/
177
{ .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
85
- if (env->v7m.exception == 0) {
178
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
86
+ if (!arm_v7m_is_handler_mode(env)) {
87
switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
88
}
89
env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
90
--
179
--
91
2.7.4
180
2.20.1
92
181
93
182
diff view generated by jsdifflib
1
Implement the new do_transaction_failed hook for ARM, which should
1
We're going to want to read the DBGDIDR register from KVM in
2
cause the CPU to take a prefetch abort or data abort.
2
a subsequent commit, which means it needs to be in the
3
ARMISARegisters sub-struct. Move it.
3
4
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Message-id: 20200214175116.9164-12-peter.maydell@linaro.org
7
---
8
---
8
target/arm/internals.h | 10 ++++++++++
9
target/arm/cpu.h | 2 +-
9
target/arm/cpu.c | 1 +
10
target/arm/internals.h | 6 +++---
10
target/arm/op_helper.c | 43 +++++++++++++++++++++++++++++++++++++++++++
11
target/arm/cpu.c | 8 ++++----
11
3 files changed, 54 insertions(+)
12
target/arm/cpu64.c | 6 +++---
13
target/arm/helper.c | 2 +-
14
5 files changed, 12 insertions(+), 12 deletions(-)
12
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
21
uint32_t mvfr1;
22
uint32_t mvfr2;
23
uint32_t id_dfr0;
24
+ uint32_t dbgdidr;
25
uint64_t id_aa64isar0;
26
uint64_t id_aa64isar1;
27
uint64_t id_aa64pfr0;
28
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
29
uint32_t id_mmfr4;
30
uint64_t id_aa64afr0;
31
uint64_t id_aa64afr1;
32
- uint32_t dbgdidr;
33
uint32_t clidr;
34
uint64_t mp_affinity; /* MP ID without feature bits */
35
/* The elements of this array are the CCSIDR values for each cache,
13
diff --git a/target/arm/internals.h b/target/arm/internals.h
36
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/internals.h
38
--- a/target/arm/internals.h
16
+++ b/target/arm/internals.h
39
+++ b/target/arm/internals.h
17
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
40
@@ -XXX,XX +XXX,XX @@ static inline int arm_num_brps(ARMCPU *cpu)
18
MMUAccessType access_type,
41
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
19
int mmu_idx, uintptr_t retaddr);
42
return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
20
43
} else {
21
+/* arm_cpu_do_transaction_failed: handle a memory system error response
44
- return FIELD_EX32(cpu->dbgdidr, DBGDIDR, BRPS) + 1;
22
+ * (eg "no device/memory present at address") by raising an external abort
45
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
23
+ * exception
46
}
24
+ */
47
}
25
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
48
26
+ vaddr addr, unsigned size,
49
@@ -XXX,XX +XXX,XX @@ static inline int arm_num_wrps(ARMCPU *cpu)
27
+ MMUAccessType access_type,
50
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
28
+ int mmu_idx, MemTxAttrs attrs,
51
return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
29
+ MemTxResult response, uintptr_t retaddr);
52
} else {
30
+
53
- return FIELD_EX32(cpu->dbgdidr, DBGDIDR, WRPS) + 1;
31
/* Call the EL change hook if one has been registered */
54
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
32
static inline void arm_call_el_change_hook(ARMCPU *cpu)
55
}
33
{
56
}
57
58
@@ -XXX,XX +XXX,XX @@ static inline int arm_num_ctx_cmps(ARMCPU *cpu)
59
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
60
return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
61
} else {
62
- return FIELD_EX32(cpu->dbgdidr, DBGDIDR, CTX_CMPS) + 1;
63
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
64
}
65
}
66
34
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
67
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
35
index XXXXXXX..XXXXXXX 100644
68
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/cpu.c
69
--- a/target/arm/cpu.c
37
+++ b/target/arm/cpu.c
70
+++ b/target/arm/cpu.c
38
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
71
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
39
#else
72
cpu->isar.id_isar2 = 0x21232031;
40
cc->do_interrupt = arm_cpu_do_interrupt;
73
cpu->isar.id_isar3 = 0x11112131;
41
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
74
cpu->isar.id_isar4 = 0x00111142;
42
+ cc->do_transaction_failed = arm_cpu_do_transaction_failed;
75
- cpu->dbgdidr = 0x15141000;
43
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
76
+ cpu->isar.dbgdidr = 0x15141000;
44
cc->asidx_from_attrs = arm_asidx_from_attrs;
77
cpu->clidr = (1 << 27) | (2 << 24) | 3;
45
cc->vmsd = &vmstate_arm_cpu;
78
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
46
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
79
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
80
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
81
cpu->isar.id_isar2 = 0x21232041;
82
cpu->isar.id_isar3 = 0x11112131;
83
cpu->isar.id_isar4 = 0x00111142;
84
- cpu->dbgdidr = 0x35141000;
85
+ cpu->isar.dbgdidr = 0x35141000;
86
cpu->clidr = (1 << 27) | (1 << 24) | 3;
87
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
88
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
89
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
90
cpu->isar.id_isar2 = 0x21232041;
91
cpu->isar.id_isar3 = 0x11112131;
92
cpu->isar.id_isar4 = 0x10011142;
93
- cpu->dbgdidr = 0x3515f005;
94
+ cpu->isar.dbgdidr = 0x3515f005;
95
cpu->clidr = 0x0a200023;
96
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
97
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
98
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
99
cpu->isar.id_isar2 = 0x21232041;
100
cpu->isar.id_isar3 = 0x11112131;
101
cpu->isar.id_isar4 = 0x10011142;
102
- cpu->dbgdidr = 0x3515f021;
103
+ cpu->isar.dbgdidr = 0x3515f021;
104
cpu->clidr = 0x0a200023;
105
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
106
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
107
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
47
index XXXXXXX..XXXXXXX 100644
108
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/op_helper.c
109
--- a/target/arm/cpu64.c
49
+++ b/target/arm/op_helper.c
110
+++ b/target/arm/cpu64.c
50
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
111
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
51
deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
112
cpu->isar.id_aa64dfr0 = 0x10305106;
52
}
113
cpu->isar.id_aa64isar0 = 0x00011120;
53
114
cpu->isar.id_aa64mmfr0 = 0x00001124;
54
+/* arm_cpu_do_transaction_failed: handle a memory system error response
115
- cpu->dbgdidr = 0x3516d000;
55
+ * (eg "no device/memory present at address") by raising an external abort
116
+ cpu->isar.dbgdidr = 0x3516d000;
56
+ * exception
117
cpu->clidr = 0x0a200023;
57
+ */
118
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
58
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
119
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
59
+ vaddr addr, unsigned size,
120
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
60
+ MMUAccessType access_type,
121
cpu->isar.id_aa64dfr0 = 0x10305106;
61
+ int mmu_idx, MemTxAttrs attrs,
122
cpu->isar.id_aa64isar0 = 0x00011120;
62
+ MemTxResult response, uintptr_t retaddr)
123
cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
63
+{
124
- cpu->dbgdidr = 0x3516d000;
64
+ ARMCPU *cpu = ARM_CPU(cs);
125
+ cpu->isar.dbgdidr = 0x3516d000;
65
+ CPUARMState *env = &cpu->env;
126
cpu->clidr = 0x0a200023;
66
+ uint32_t fsr, fsc;
127
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
67
+ ARMMMUFaultInfo fi = {};
128
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
68
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
129
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
69
+
130
cpu->isar.id_aa64dfr0 = 0x10305106;
70
+ if (retaddr) {
131
cpu->isar.id_aa64isar0 = 0x00011120;
71
+ /* now we have a real cpu fault */
132
cpu->isar.id_aa64mmfr0 = 0x00001124;
72
+ cpu_restore_state(cs, retaddr);
133
- cpu->dbgdidr = 0x3516d000;
73
+ }
134
+ cpu->isar.dbgdidr = 0x3516d000;
74
+
135
cpu->clidr = 0x0a200023;
75
+ /* The EA bit in syndromes and fault status registers is an
136
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
76
+ * IMPDEF classification of external aborts. ARM implementations
137
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
77
+ * usually use this to indicate AXI bus Decode error (0) or
138
diff --git a/target/arm/helper.c b/target/arm/helper.c
78
+ * Slave error (1); in QEMU we follow that.
139
index XXXXXXX..XXXXXXX 100644
79
+ */
140
--- a/target/arm/helper.c
80
+ fi.ea = (response != MEMTX_DECODE_ERROR);
141
+++ b/target/arm/helper.c
81
+
142
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
82
+ /* The fault status register format depends on whether we're using
143
ARMCPRegInfo dbgdidr = {
83
+ * the LPAE long descriptor format, or the short descriptor format.
144
.name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
84
+ */
145
.access = PL0_R, .accessfn = access_tda,
85
+ if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
146
- .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
86
+ /* long descriptor form, STATUS 0b010000: synchronous ext abort */
147
+ .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
87
+ fsr = (fi.ea << 12) | (1 << 9) | 0x10;
148
};
88
+ } else {
149
89
+ /* short descriptor form, FSR 0b01000 : synchronous ext abort */
150
/* Note that all these register fields hold "number of Xs minus 1". */
90
+ fsr = (fi.ea << 12) | 0x8;
91
+ }
92
+ fsc = 0x10;
93
+
94
+ deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
95
+}
96
+
97
#endif /* !defined(CONFIG_USER_ONLY) */
98
99
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
100
--
151
--
101
2.7.4
152
2.20.1
102
153
103
154
diff view generated by jsdifflib
1
For M profile the XPSR is a similar but not identical format to the
1
Now we have isar_feature test functions that look at fields in the
2
A profile CPSR/SPSR. (For instance the Thumb bit is in a different
2
ID_AA64DFR0_EL1 and ID_DFR0 ID registers, add the code that reads
3
place.) For guest accesses we make the M profile code go through
3
these register values from KVM so that the checks behave correctly
4
xpsr_read() and xpsr_write() which handle the different layout.
4
when we're using KVM.
5
However for migration we use cpsr_read() and cpsr_write() to
6
marshal state into and out of the migration data stream. This
7
is pretty confusing and works more by luck than anything else.
8
Make M profile migration use xpsr_read() and xpsr_write() instead.
9
5
10
The most complicated part of this is handling the possibility
6
No isar_feature function tests ID_AA64DFR1_EL1 or DBGDIDR yet, but we
11
that the migration source is an older QEMU which hands us a
7
add it to maintain the invariant that every field in the
12
CPSR format value; helpfully we can always tell the two apart.
8
ARMISARegisters struct is populated for a KVM CPU and can be relied
9
on. This requirement isn't actually written down yet, so add a note
10
to the relevant comment.
13
11
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 1501692241-23310-11-git-send-email-peter.maydell@linaro.org
14
Message-id: 20200214175116.9164-13-peter.maydell@linaro.org
17
---
15
---
18
target/arm/machine.c | 49 ++++++++++++++++++++++++++++++++++---------------
16
target/arm/cpu.h | 5 +++++
19
1 file changed, 34 insertions(+), 15 deletions(-)
17
target/arm/kvm32.c | 8 ++++++++
18
target/arm/kvm64.c | 36 ++++++++++++++++++++++++++++++++++++
19
3 files changed, 49 insertions(+)
20
20
21
diff --git a/target/arm/machine.c b/target/arm/machine.c
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/machine.c
23
--- a/target/arm/cpu.h
24
+++ b/target/arm/machine.c
24
+++ b/target/arm/cpu.h
25
@@ -XXX,XX +XXX,XX @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
25
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
26
uint32_t val = qemu_get_be32(f);
26
* prefix means a constant register.
27
27
* Some of these registers are split out into a substructure that
28
if (arm_feature(env, ARM_FEATURE_M)) {
28
* is shared with the translators to control the ISA.
29
- /* If the I or F bits are set then this is a migration from
29
+ *
30
- * an old QEMU which still stored the M profile FAULTMASK
30
+ * Note that if you add an ID register to the ARMISARegisters struct
31
- * and PRIMASK in env->daif. Set v7m.faultmask and v7m.primask
31
+ * you need to also update the 32-bit and 64-bit versions of the
32
- * accordingly, and then clear the bits so they don't confuse
32
+ * kvm_arm_get_host_cpu_features() function to correctly populate the
33
- * cpsr_write(). For a new QEMU, the bits here will always be
33
+ * field by reading the value from the KVM vCPU.
34
- * clear, and the data is transferred using the
34
*/
35
- * vmstate_m_faultmask_primask subsection.
35
struct ARMISARegisters {
36
- */
36
uint32_t id_isar0;
37
- if (val & CPSR_F) {
37
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
38
- env->v7m.faultmask = 1;
38
index XXXXXXX..XXXXXXX 100644
39
- }
39
--- a/target/arm/kvm32.c
40
- if (val & CPSR_I) {
40
+++ b/target/arm/kvm32.c
41
- env->v7m.primask = 1;
41
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
42
+ if (val & XPSR_EXCP) {
42
ahcf->isar.id_isar6 = 0;
43
+ /* This is a CPSR format value from an older QEMU. (We can tell
43
}
44
+ * because values transferred in XPSR format always have zero
44
45
+ * for the EXCP field, and CPSR format will always have bit 4
45
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
46
+ * set in CPSR_M.) Rearrange it into XPSR format. The significant
46
+ ARM_CP15_REG32(0, 0, 1, 2));
47
+ * differences are that the T bit is not in the same place, the
48
+ * primask/faultmask info may be in the CPSR I and F bits, and
49
+ * we do not want the mode bits.
50
+ */
51
+ uint32_t newval = val;
52
+
47
+
53
+ newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
48
err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
54
+ if (val & CPSR_T) {
49
KVM_REG_ARM | KVM_REG_SIZE_U32 |
55
+ newval |= XPSR_T;
50
KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0);
56
+ }
51
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
57
+ /* If the I or F bits are set then this is a migration from
52
* Fortunately there is not yet anything in there that affects migration.
58
+ * an old QEMU which still stored the M profile FAULTMASK
53
*/
59
+ * and PRIMASK in env->daif. For a new QEMU, the data is
54
60
+ * transferred using the vmstate_m_faultmask_primask subsection.
55
+ /*
61
+ */
56
+ * There is no way to read DBGDIDR, because currently 32-bit KVM
62
+ if (val & CPSR_F) {
57
+ * doesn't implement debug at all. Leave it at zero.
63
+ env->v7m.faultmask = 1;
58
+ */
64
+ }
59
+
65
+ if (val & CPSR_I) {
60
kvm_arm_destroy_scratch_host_vcpu(fdarray);
66
+ env->v7m.primask = 1;
61
67
+ }
62
if (err < 0) {
68
+ val = newval;
63
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
69
}
64
index XXXXXXX..XXXXXXX 100644
70
- val &= ~(CPSR_F | CPSR_I);
65
--- a/target/arm/kvm64.c
71
+ /* Ignore the low bits, they are handled by vmstate_m. */
66
+++ b/target/arm/kvm64.c
72
+ xpsr_write(env, val, ~XPSR_EXCP);
67
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
73
+ return 0;
68
} else {
69
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
70
ARM64_SYS_REG(3, 0, 0, 4, 1));
71
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
72
+ ARM64_SYS_REG(3, 0, 0, 5, 0));
73
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
74
+ ARM64_SYS_REG(3, 0, 0, 5, 1));
75
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
76
ARM64_SYS_REG(3, 0, 0, 6, 0));
77
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
78
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
79
* than skipping the reads and leaving 0, as we must avoid
80
* considering the values in every case.
81
*/
82
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
83
+ ARM64_SYS_REG(3, 0, 0, 1, 2));
84
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
85
ARM64_SYS_REG(3, 0, 0, 2, 0));
86
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
87
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
88
ARM64_SYS_REG(3, 0, 0, 3, 1));
89
err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
90
ARM64_SYS_REG(3, 0, 0, 3, 2));
91
+
92
+ /*
93
+ * DBGDIDR is a bit complicated because the kernel doesn't
94
+ * provide an accessor for it in 64-bit mode, which is what this
95
+ * scratch VM is in, and there's no architected "64-bit sysreg
96
+ * which reads the same as the 32-bit register" the way there is
97
+ * for other ID registers. Instead we synthesize a value from the
98
+ * AArch64 ID_AA64DFR0, the same way the kernel code in
99
+ * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
100
+ * We only do this if the CPU supports AArch32 at EL1.
101
+ */
102
+ if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
103
+ int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
104
+ int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
105
+ int ctx_cmps =
106
+ FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
107
+ int version = 6; /* ARMv8 debug architecture */
108
+ bool has_el3 =
109
+ !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
110
+ uint32_t dbgdidr = 0;
111
+
112
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
113
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
114
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
115
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
116
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
117
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
118
+ dbgdidr |= (1 << 15); /* RES1 bit */
119
+ ahcf->isar.dbgdidr = dbgdidr;
120
+ }
74
}
121
}
75
122
76
env->aarch64 = ((val & PSTATE_nRW) == 0);
123
sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
77
@@ -XXX,XX +XXX,XX @@ static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
78
CPUARMState *env = &cpu->env;
79
uint32_t val;
80
81
- if (is_a64(env)) {
82
+ if (arm_feature(env, ARM_FEATURE_M)) {
83
+ /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
84
+ val = xpsr_read(env) & ~XPSR_EXCP;
85
+ } else if (is_a64(env)) {
86
val = pstate_read(env);
87
} else {
88
val = cpsr_read(env);
89
--
124
--
90
2.7.4
125
2.20.1
91
126
92
127
diff view generated by jsdifflib
1
Call the new cpu_transaction_failed() hook at the places where
1
The ARMv8.1-PMU extension requires:
2
CPU generated code interacts with the memory system:
2
* the evtCount field in PMETYPER<n>_EL0 is 16 bits, not 10
3
io_readx()
3
* MDCR_EL2.HPMD allows event counting to be disabled at EL2
4
io_writex()
4
* two new required events, STALL_FRONTEND and STALL_BACKEND
5
get_page_addr_code()
5
* ID register bits in ID_AA64DFR0_EL1 and ID_DFR0
6
6
7
Any access from C code (eg via cpu_physical_memory_rw(),
7
We already implement the 16-bit evtCount field and the
8
address_space_rw(), ld/st_*_phys()) will *not* trigger CPU exceptions
8
HPMD bit, so all that is missing is the two new events:
9
via cpu_transaction_failed(). Handling for transactions failures for
9
STALL_FRONTEND
10
this kind of call should be done by using a function which returns a
10
"counts every cycle counted by the CPU_CYCLES event on which no
11
MemTxResult and treating the failure case appropriately in the
11
operation was issued because there are no operations available
12
calling code.
12
to issue to this PE from the frontend"
13
STALL_BACKEND
14
"counts every cycle counted by the CPU_CYCLES event on which no
15
operation was issued because the backend is unable to accept
16
any available operations from the frontend"
13
17
14
In an ideal world we would not generate CPU exceptions for
18
QEMU never stalls in this sense, so our implementation is trivial:
15
instruction fetch failures in get_page_addr_code() but instead wait
19
always return a zero count.
16
until the code translation process tried a load and it failed;
17
however that change would require too great a restructuring and
18
redesign to attempt at this point.
19
20
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
23
Message-id: 20200214175116.9164-14-peter.maydell@linaro.org
22
---
24
---
23
softmmu_template.h | 4 ++--
25
target/arm/helper.c | 32 ++++++++++++++++++++++++++++++--
24
accel/tcg/cputlb.c | 32 ++++++++++++++++++++++++++++++--
26
1 file changed, 30 insertions(+), 2 deletions(-)
25
2 files changed, 32 insertions(+), 4 deletions(-)
26
27
27
diff --git a/softmmu_template.h b/softmmu_template.h
28
diff --git a/target/arm/helper.c b/target/arm/helper.c
28
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
29
--- a/softmmu_template.h
30
--- a/target/arm/helper.c
30
+++ b/softmmu_template.h
31
+++ b/target/arm/helper.c
31
@@ -XXX,XX +XXX,XX @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
32
@@ -XXX,XX +XXX,XX @@ static int64_t instructions_ns_per(uint64_t icount)
32
uintptr_t retaddr)
33
{
34
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
35
- return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
36
+ return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE);
37
}
33
}
38
#endif
34
#endif
39
35
40
@@ -XXX,XX +XXX,XX @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
36
+static bool pmu_8_1_events_supported(CPUARMState *env)
41
uintptr_t retaddr)
37
+{
42
{
38
+ /* For events which are supported in any v8.1 PMU */
43
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
39
+ return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
44
- return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
40
+}
45
+ return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE);
46
}
47
48
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
49
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/accel/tcg/cputlb.c
52
+++ b/accel/tcg/cputlb.c
53
@@ -XXX,XX +XXX,XX @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
54
}
55
56
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
57
+ int mmu_idx,
58
target_ulong addr, uintptr_t retaddr, int size)
59
{
60
CPUState *cpu = ENV_GET_CPU(env);
61
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
62
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
63
uint64_t val;
64
bool locked = false;
65
+ MemTxResult r;
66
67
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
68
cpu->mem_io_pc = retaddr;
69
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
70
qemu_mutex_lock_iothread();
71
locked = true;
72
}
73
- memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
74
+ r = memory_region_dispatch_read(mr, physaddr,
75
+ &val, size, iotlbentry->attrs);
76
+ if (r != MEMTX_OK) {
77
+ cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
78
+ mmu_idx, iotlbentry->attrs, r, retaddr);
79
+ }
80
if (locked) {
81
qemu_mutex_unlock_iothread();
82
}
83
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
84
}
85
86
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
87
+ int mmu_idx,
88
uint64_t val, target_ulong addr,
89
uintptr_t retaddr, int size)
90
{
91
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
92
hwaddr physaddr = iotlbentry->addr;
93
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
94
bool locked = false;
95
+ MemTxResult r;
96
97
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
98
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
99
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
100
qemu_mutex_lock_iothread();
101
locked = true;
102
}
103
- memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
104
+ r = memory_region_dispatch_write(mr, physaddr,
105
+ val, size, iotlbentry->attrs);
106
+ if (r != MEMTX_OK) {
107
+ cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
108
+ mmu_idx, iotlbentry->attrs, r, retaddr);
109
+ }
110
if (locked) {
111
qemu_mutex_unlock_iothread();
112
}
113
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
114
MemoryRegion *mr;
115
CPUState *cpu = ENV_GET_CPU(env);
116
CPUIOTLBEntry *iotlbentry;
117
+ hwaddr physaddr;
118
119
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
120
mmu_idx = cpu_mmu_index(env, true);
121
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
122
}
123
qemu_mutex_unlock_iothread();
124
125
+ /* Give the new-style cpu_transaction_failed() hook first chance
126
+ * to handle this.
127
+ * This is not the ideal place to detect and generate CPU
128
+ * exceptions for instruction fetch failure (for instance
129
+ * we don't know the length of the access that the CPU would
130
+ * use, and it would be better to go ahead and try the access
131
+ * and use the MemTXResult it produced). However it is the
132
+ * simplest place we have currently available for the check.
133
+ */
134
+ physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
135
+ cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
136
+ iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
137
+
41
+
138
cpu_unassigned_access(cpu, addr, false, true, 0, 4);
42
+static uint64_t zero_event_get_count(CPUARMState *env)
139
/* The CPU's unassigned access hook might have longjumped out
43
+{
140
* with an exception. If it didn't (or there was no hook) then
44
+ /* For events which on QEMU never fire, so their count is always zero */
45
+ return 0;
46
+}
47
+
48
+static int64_t zero_event_ns_per(uint64_t cycles)
49
+{
50
+ /* An event which never fires can never overflow */
51
+ return -1;
52
+}
53
+
54
static const pm_event pm_events[] = {
55
{ .number = 0x000, /* SW_INCR */
56
.supported = event_always_supported,
57
@@ -XXX,XX +XXX,XX @@ static const pm_event pm_events[] = {
58
.supported = event_always_supported,
59
.get_count = cycles_get_count,
60
.ns_per_count = cycles_ns_per,
61
- }
62
+ },
63
#endif
64
+ { .number = 0x023, /* STALL_FRONTEND */
65
+ .supported = pmu_8_1_events_supported,
66
+ .get_count = zero_event_get_count,
67
+ .ns_per_count = zero_event_ns_per,
68
+ },
69
+ { .number = 0x024, /* STALL_BACKEND */
70
+ .supported = pmu_8_1_events_supported,
71
+ .get_count = zero_event_get_count,
72
+ .ns_per_count = zero_event_ns_per,
73
+ },
74
};
75
76
/*
77
@@ -XXX,XX +XXX,XX @@ static const pm_event pm_events[] = {
78
* should first be updated to something sparse instead of the current
79
* supported_event_map[] array.
80
*/
81
-#define MAX_EVENT_ID 0x11
82
+#define MAX_EVENT_ID 0x24
83
#define UNSUPPORTED_EVENT UINT16_MAX
84
static uint16_t supported_event_map[MAX_EVENT_ID + 1];
85
141
--
86
--
142
2.7.4
87
2.20.1
143
88
144
89
diff view generated by jsdifflib
1
The M profile XPSR is almost the same format as the A profile CPSR,
1
The ARMv8.4-PMU extension adds:
2
but not quite. Define some XPSR_* macros and use them where we
2
* one new required event, STALL
3
definitely dealing with an XPSR rather than reusing the CPSR ones.
3
* one new system register PMMIR_EL1
4
4
5
(There are also some more L1-cache related events, but since
6
we don't implement any cache we don't provide these, in the
7
same way we don't provide the base-PMUv3 cache events.)
8
9
The STALL event "counts every attributable cycle on which no
10
attributable instruction or operation was sent for execution on this
11
PE". QEMU doesn't stall in this sense, so this is another
12
always-reads-zero event.
13
14
The PMMIR_EL1 register is a read-only register providing
15
implementation-specific information about the PMU; currently it has
16
only one field, SLOTS, which defines behaviour of the STALL_SLOT PMU
17
event. Since QEMU doesn't implement the STALL_SLOT event, we can
18
validly make the register read zero.
19
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
22
Message-id: 20200214175116.9164-15-peter.maydell@linaro.org
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1501692241-23310-9-git-send-email-peter.maydell@linaro.org
9
---
23
---
10
target/arm/cpu.h | 38 ++++++++++++++++++++++++++++----------
24
target/arm/cpu.h | 18 ++++++++++++++++++
11
target/arm/helper.c | 15 ++++++++-------
25
target/arm/helper.c | 22 +++++++++++++++++++++-
12
2 files changed, 36 insertions(+), 17 deletions(-)
26
2 files changed, 39 insertions(+), 1 deletion(-)
13
27
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
30
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
31
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ void pmccntr_sync(CPUARMState *env);
32
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
19
/* Mask of bits which may be set by exception return copying them from SPSR */
33
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
20
#define CPSR_ERET_MASK (~CPSR_RESERVED)
34
}
21
35
22
+/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
36
+static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id)
23
+#define XPSR_EXCP 0x1ffU
37
+{
24
+#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
38
+ /* 0xf means "non-standard IMPDEF PMU" */
25
+#define XPSR_IT_2_7 CPSR_IT_2_7
39
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
26
+#define XPSR_GE CPSR_GE
40
+ FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
27
+#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
41
+}
28
+#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
29
+#define XPSR_IT_0_1 CPSR_IT_0_1
30
+#define XPSR_Q CPSR_Q
31
+#define XPSR_V CPSR_V
32
+#define XPSR_C CPSR_C
33
+#define XPSR_Z CPSR_Z
34
+#define XPSR_N CPSR_N
35
+#define XPSR_NZCV CPSR_NZCV
36
+#define XPSR_IT CPSR_IT
37
+
42
+
38
#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
43
/*
39
#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
44
* 64-bit feature tests via id registers.
40
#define TTBCR_PD0 (1U << 4)
45
*/
41
@@ -XXX,XX +XXX,XX @@ static inline uint32_t xpsr_read(CPUARMState *env)
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id)
42
/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
47
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
43
static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
44
{
45
- if (mask & CPSR_NZCV) {
46
- env->ZF = (~val) & CPSR_Z;
47
+ if (mask & XPSR_NZCV) {
48
+ env->ZF = (~val) & XPSR_Z;
49
env->NF = val;
50
env->CF = (val >> 29) & 1;
51
env->VF = (val << 3) & 0x80000000;
52
}
53
- if (mask & CPSR_Q)
54
- env->QF = ((val & CPSR_Q) != 0);
55
- if (mask & (1 << 24))
56
- env->thumb = ((val & (1 << 24)) != 0);
57
- if (mask & CPSR_IT_0_1) {
58
+ if (mask & XPSR_Q) {
59
+ env->QF = ((val & XPSR_Q) != 0);
60
+ }
61
+ if (mask & XPSR_T) {
62
+ env->thumb = ((val & XPSR_T) != 0);
63
+ }
64
+ if (mask & XPSR_IT_0_1) {
65
env->condexec_bits &= ~3;
66
env->condexec_bits |= (val >> 25) & 3;
67
}
68
- if (mask & CPSR_IT_2_7) {
69
+ if (mask & XPSR_IT_2_7) {
70
env->condexec_bits &= 3;
71
env->condexec_bits |= (val >> 8) & 0xfc;
72
}
73
- if (mask & 0x1ff) {
74
- env->v7m.exception = val & 0x1ff;
75
+ if (mask & XPSR_EXCP) {
76
+ env->v7m.exception = val & XPSR_EXCP;
77
}
78
}
48
}
79
49
50
+static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id)
51
+{
52
+ return FIELD_EX32(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
53
+ FIELD_EX32(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
54
+}
55
+
56
/*
57
* Feature tests for "does this exist in either 32-bit or 64-bit?"
58
*/
59
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id)
60
return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id);
61
}
62
63
+static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id)
64
+{
65
+ return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id);
66
+}
67
+
68
/*
69
* Forward to the above feature tests given an ARMCPU pointer.
70
*/
80
diff --git a/target/arm/helper.c b/target/arm/helper.c
71
diff --git a/target/arm/helper.c b/target/arm/helper.c
81
index XXXXXXX..XXXXXXX 100644
72
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/helper.c
73
--- a/target/arm/helper.c
83
+++ b/target/arm/helper.c
74
+++ b/target/arm/helper.c
84
@@ -XXX,XX +XXX,XX @@ static void v7m_push_stack(ARMCPU *cpu)
75
@@ -XXX,XX +XXX,XX @@ static bool pmu_8_1_events_supported(CPUARMState *env)
85
/* Align stack pointer if the guest wants that */
76
return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
86
if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
77
}
87
env->regs[13] -= 4;
78
88
- xpsr |= 0x200;
79
+static bool pmu_8_4_events_supported(CPUARMState *env)
89
+ xpsr |= XPSR_SPREALIGN;
80
+{
81
+ /* For events which are supported in any v8.1 PMU */
82
+ return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
83
+}
84
+
85
static uint64_t zero_event_get_count(CPUARMState *env)
86
{
87
/* For events which on QEMU never fire, so their count is always zero */
88
@@ -XXX,XX +XXX,XX @@ static const pm_event pm_events[] = {
89
.get_count = zero_event_get_count,
90
.ns_per_count = zero_event_ns_per,
91
},
92
+ { .number = 0x03c, /* STALL */
93
+ .supported = pmu_8_4_events_supported,
94
+ .get_count = zero_event_get_count,
95
+ .ns_per_count = zero_event_ns_per,
96
+ },
97
};
98
99
/*
100
@@ -XXX,XX +XXX,XX @@ static const pm_event pm_events[] = {
101
* should first be updated to something sparse instead of the current
102
* supported_event_map[] array.
103
*/
104
-#define MAX_EVENT_ID 0x24
105
+#define MAX_EVENT_ID 0x3c
106
#define UNSUPPORTED_EVENT UINT16_MAX
107
static uint16_t supported_event_map[MAX_EVENT_ID + 1];
108
109
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
110
};
111
define_arm_cp_regs(cpu, v81_pmu_regs);
90
}
112
}
91
/* Switch to the handler mode. */
113
+ if (cpu_isar_feature(any_pmu_8_4, cpu)) {
92
v7m_push(env, xpsr);
114
+ static const ARMCPRegInfo v84_pmmir = {
93
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
115
+ .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
94
env->regs[15] &= ~1U;
116
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
95
}
117
+ .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
96
xpsr = v7m_pop(env);
118
+ .resetvalue = 0
97
- xpsr_write(env, xpsr, 0xfffffdff);
119
+ };
98
+ xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
120
+ define_one_arm_cp_reg(cpu, &v84_pmmir);
99
/* Undo stack alignment. */
100
- if (xpsr & 0x200)
101
+ if (xpsr & XPSR_SPREALIGN) {
102
env->regs[13] |= 4;
103
+ }
121
+ }
104
122
}
105
/* The restored xPSR exception field will be zero if we're
123
106
* resuming in Thread mode. If that doesn't match what the
124
/* We don't know until after realize whether there's a GICv3
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
108
case 0 ... 7: /* xPSR sub-fields */
109
mask = 0;
110
if ((reg & 1) && el) {
111
- mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */
112
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
113
}
114
if (!(reg & 4)) {
115
- mask |= 0xf8000000; /* APSR */
116
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
117
}
118
/* EPSR reads as zero */
119
return xpsr_read(env) & mask;
120
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
121
uint32_t apsrmask = 0;
122
123
if (mask & 8) {
124
- apsrmask |= 0xf8000000; /* APSR NZCVQ */
125
+ apsrmask |= XPSR_NZCV | XPSR_Q;
126
}
127
if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
128
- apsrmask |= 0x000f0000; /* APSR GE[3:0] */
129
+ apsrmask |= XPSR_GE;
130
}
131
xpsr_write(env, val, apsrmask);
132
}
133
--
125
--
134
2.7.4
126
2.20.1
135
127
136
128
diff view generated by jsdifflib
1
Some ELF files have program headers that specify segments that
1
Set the ID register bits to provide ARMv8.4-PMU (and implicitly
2
are of zero size. Ignore them, rather than trying to create
2
also ARMv8.1-PMU) in the 'max' CPU.
3
zero-length ROM blobs for them, because the zero-length blob
4
can falsely trigger the overlapping-ROM-blobs check.
5
3
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-id: 20200214175116.9164-16-peter.maydell@linaro.org
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Tested-by: Hua Yanghao <huayanghao@gmail.com>
10
Message-id: 1502116754-18867-3-git-send-email-peter.maydell@linaro.org
11
---
7
---
12
include/hw/elf_ops.h | 24 +++++++++++++++++-------
8
target/arm/cpu64.c | 8 ++++++++
13
1 file changed, 17 insertions(+), 7 deletions(-)
9
1 file changed, 8 insertions(+)
14
10
15
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
11
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/elf_ops.h
13
--- a/target/arm/cpu64.c
18
+++ b/include/hw/elf_ops.h
14
+++ b/target/arm/cpu64.c
19
@@ -XXX,XX +XXX,XX @@ static int glue(load_elf, SZ)(const char *name, int fd,
15
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
20
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
16
u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
21
}
17
cpu->id_mmfr3 = u;
22
18
23
- if (load_rom) {
19
+ u = cpu->isar.id_aa64dfr0;
24
- snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
20
+ u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
25
-
21
+ cpu->isar.id_aa64dfr0 = u;
26
- /* rom_add_elf_program() seize the ownership of 'data' */
27
- rom_add_elf_program(label, data, file_size, mem_size, addr, as);
28
- } else {
29
- cpu_physical_memory_write(addr, data, file_size);
30
+ if (mem_size == 0) {
31
+ /* Some ELF files really do have segments of zero size;
32
+ * just ignore them rather than trying to create empty
33
+ * ROM blobs, because the zero-length blob can falsely
34
+ * trigger the overlapping-ROM-blobs check.
35
+ */
36
g_free(data);
37
+ } else {
38
+ if (load_rom) {
39
+ snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
40
+
22
+
41
+ /* rom_add_elf_program() seize the ownership of 'data' */
23
+ u = cpu->isar.id_dfr0;
42
+ rom_add_elf_program(label, data, file_size, mem_size,
24
+ u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
43
+ addr, as);
25
+ cpu->isar.id_dfr0 = u;
44
+ } else {
26
+
45
+ cpu_physical_memory_write(addr, data, file_size);
27
/*
46
+ g_free(data);
28
* FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
47
+ }
29
* so do not set MVFR1.FPHP. Strictly speaking this is not legal,
48
}
49
50
total_size += mem_size;
51
--
30
--
52
2.7.4
31
2.20.1
53
32
54
33
diff view generated by jsdifflib
1
Move the code in arm_v7m_cpu_do_interrupt() that calculates the
1
The PMCR_EL0.DP bit is bit 5, which is 0x20, not 0x10. 0x10 is 'X'.
2
magic LR value down to when we're actually going to use it.
2
Correct our #define of PMCRDP and add the missing PMCRX.
3
Having the calculation and use so far apart makes the code
4
a little harder to understand than it needs to be.
5
3
4
We do have the correct behaviour for handling the DP bit being
5
set, so this fixes a guest-visible bug.
6
7
Fixes: 033614c47de
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20200214175116.9164-17-peter.maydell@linaro.org
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1501692241-23310-13-git-send-email-peter.maydell@linaro.org
11
---
12
---
12
target/arm/helper.c | 15 ++++++++-------
13
target/arm/helper.c | 3 ++-
13
1 file changed, 8 insertions(+), 7 deletions(-)
14
1 file changed, 2 insertions(+), 1 deletion(-)
14
15
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
20
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
20
21
#define PMCRN_MASK 0xf800
21
arm_log_exception(cs->exception_index);
22
#define PMCRN_SHIFT 11
22
23
#define PMCRLC 0x40
23
- lr = 0xfffffff1;
24
-#define PMCRDP 0x10
24
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
25
+#define PMCRDP 0x20
25
- lr |= 4;
26
+#define PMCRX 0x10
26
- }
27
#define PMCRD 0x8
27
- if (env->v7m.exception == 0)
28
#define PMCRC 0x4
28
- lr |= 8;
29
#define PMCRP 0x2
29
-
30
/* For exceptions we just mark as pending on the NVIC, and let that
31
handle it. */
32
switch (cs->exception_index) {
33
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
34
return; /* Never happens. Keep compiler happy. */
35
}
36
37
+ lr = 0xfffffff1;
38
+ if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
39
+ lr |= 4;
40
+ }
41
+ if (env->v7m.exception == 0) {
42
+ lr |= 8;
43
+ }
44
+
45
v7m_push_stack(cpu);
46
v7m_exception_taken(cpu, lr);
47
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
48
--
30
--
49
2.7.4
31
2.20.1
50
32
51
33
diff view generated by jsdifflib
1
Currently get_phys_addr() has PMSAv7 handling before the
1
The LC bit in the PMCR_EL0 register is supposed to be:
2
"is translation disabled?" check, and then PMSAv5 after it.
2
* read/write
3
Tidy this up by making the PMSAv5 code handle the "MPU disabled"
3
* RES1 on an AArch64-only implementation
4
case itself, so that we have all the PMSA code in one place.
4
* an architecturally UNKNOWN value on reset
5
This will make adding the PMSAv8 code slightly cleaner, and
5
(and use of LC==0 by software is deprecated).
6
also means that pre-v7 PMSA cores benefit from the MPU lookup
7
logging that the PMSAv7 codepath had.
8
6
7
We were implementing it incorrectly as read-only always zero,
8
though we do have all the code needed to test it and behave
9
accordingly.
10
11
Instead make it a read-write bit which resets to 1 always, which
12
satisfies all the architectural requirements above.
13
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
17
Message-id: 20200214175116.9164-18-peter.maydell@linaro.org
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 1501692241-23310-4-git-send-email-peter.maydell@linaro.org
14
---
18
---
15
target/arm/helper.c | 38 ++++++++++++++++++++++----------------
19
target/arm/helper.c | 13 +++++++++----
16
1 file changed, 22 insertions(+), 16 deletions(-)
20
1 file changed, 9 insertions(+), 4 deletions(-)
17
21
18
diff --git a/target/arm/helper.c b/target/arm/helper.c
22
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.c
24
--- a/target/arm/helper.c
21
+++ b/target/arm/helper.c
25
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
26
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
23
uint32_t base;
27
#define PMCRC 0x4
24
bool is_user = regime_is_user(env, mmu_idx);
28
#define PMCRP 0x2
25
29
#define PMCRE 0x1
26
+ if (regime_translation_disabled(env, mmu_idx)) {
30
+/*
27
+ /* MPU disabled. */
31
+ * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
28
+ *phys_ptr = address;
32
+ * which can be written as 1 to trigger behaviour but which stay RAZ).
29
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
33
+ */
30
+ return false;
34
+#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
31
+ }
35
32
+
36
#define PMXEVTYPER_P 0x80000000
33
*phys_ptr = address;
37
#define PMXEVTYPER_U 0x40000000
34
for (n = 7; n >= 0; n--) {
38
@@ -XXX,XX +XXX,XX @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
35
base = env->cp15.c6_region[n];
36
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
37
}
39
}
38
}
40
}
39
41
40
- /* pmsav7 has special handling for when MPU is disabled so call it before
42
- /* only the DP, X, D and E bits are writable */
41
- * the common MMU/MPU disabled check below.
43
- env->cp15.c9_pmcr &= ~0x39;
42
- */
44
- env->cp15.c9_pmcr |= (value & 0x39);
43
- if (arm_feature(env, ARM_FEATURE_PMSA) &&
45
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
44
- arm_feature(env, ARM_FEATURE_V7)) {
46
+ env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
45
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
47
46
bool ret;
48
pmu_op_finish(env);
47
*page_size = TARGET_PAGE_SIZE;
49
}
48
- ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
50
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
49
- phys_ptr, prot, fsr);
51
.access = PL0_RW, .accessfn = pmreg_access,
50
- qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
52
.type = ARM_CP_IO,
51
+
53
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
52
+ if (arm_feature(env, ARM_FEATURE_V7)) {
54
- .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
53
+ /* PMSAv7 */
55
+ .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
54
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
56
+ PMCRLC,
55
+ phys_ptr, prot, fsr);
57
.writefn = pmcr_write, .raw_writefn = raw_write,
56
+ } else {
58
};
57
+ /* Pre-v7 MPU */
59
define_one_arm_cp_reg(cpu, &pmcr);
58
+ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
59
+ phys_ptr, prot, fsr);
60
+ }
61
+ qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
62
" mmu_idx %u -> %s (prot %c%c%c)\n",
63
access_type == MMU_DATA_LOAD ? "reading" :
64
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
65
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
66
return ret;
67
}
68
69
+ /* Definitely a real MMU, not an MPU */
70
+
71
if (regime_translation_disabled(env, mmu_idx)) {
72
- /* MMU/MPU disabled. */
73
+ /* MMU disabled. */
74
*phys_ptr = address;
75
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
76
*page_size = TARGET_PAGE_SIZE;
77
return 0;
78
}
79
80
- if (arm_feature(env, ARM_FEATURE_PMSA)) {
81
- /* Pre-v7 MPU */
82
- *page_size = TARGET_PAGE_SIZE;
83
- return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
84
- phys_ptr, prot, fsr);
85
- }
86
-
87
if (regime_using_lpae_format(env, mmu_idx)) {
88
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
89
attrs, prot, page_size, fsr, fi);
90
--
60
--
91
2.7.4
61
2.20.1
92
62
93
63
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
The isar_feature_aa32_pan and isar_feature_aa32_ats1e1 functions
2
are supposed to be testing fields in ID_MMFR3; but a cut-and-paste
3
error meant we were looking at MVFR0 instead.
2
4
3
If a KVM PMU init or set-irq attr call fails we just silently stop
5
Fix the functions to look at the right register; this requires
4
the PMU DT node generation. The only way they could fail, though,
6
us to move at least id_mmfr3 to the ARMISARegisters struct; we
5
is if the attr's respective KVM has-attr call fails. But that should
7
choose to move all the ID_MMFRn registers for consistency.
6
never happen if KVM advertises the PMU capability, because both
7
attrs have been available since the capability was introduced. Let's
8
just abort if this should-never-happen stuff does happen, because,
9
if it does, then something is obviously horribly wrong.
10
8
11
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Fixes: 3d6ad6bb466f
12
Reviewed-by: Christoffer Dall <cdall@linaro.org>
13
Message-id: 1500471597-2517-5-git-send-email-drjones@redhat.com
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200214175116.9164-19-peter.maydell@linaro.org
16
---
13
---
17
target/arm/kvm_arm.h | 15 ++++-----------
14
target/arm/cpu.h | 14 +++---
18
hw/arm/virt.c | 9 +++------
15
hw/intc/armv7m_nvic.c | 8 ++--
19
target/arm/kvm32.c | 3 +--
16
target/arm/cpu.c | 104 +++++++++++++++++++++---------------------
20
target/arm/kvm64.c | 28 ++++++++++++++++++++--------
17
target/arm/cpu64.c | 28 ++++++------
21
4 files changed, 28 insertions(+), 27 deletions(-)
18
target/arm/helper.c | 12 ++---
19
target/arm/kvm32.c | 17 +++++++
20
target/arm/kvm64.c | 10 ++++
21
7 files changed, 110 insertions(+), 83 deletions(-)
22
22
23
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
23
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/kvm_arm.h
25
--- a/target/arm/cpu.h
26
+++ b/target/arm/kvm_arm.h
26
+++ b/target/arm/cpu.h
27
@@ -XXX,XX +XXX,XX @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
27
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
28
28
uint32_t id_isar4;
29
int kvm_arm_vgic_probe(void);
29
uint32_t id_isar5;
30
30
uint32_t id_isar6;
31
-int kvm_arm_pmu_set_irq(CPUState *cs, int irq);
31
+ uint32_t id_mmfr0;
32
-int kvm_arm_pmu_init(CPUState *cs);
32
+ uint32_t id_mmfr1;
33
+void kvm_arm_pmu_set_irq(CPUState *cs, int irq);
33
+ uint32_t id_mmfr2;
34
+void kvm_arm_pmu_init(CPUState *cs);
34
+ uint32_t id_mmfr3;
35
35
+ uint32_t id_mmfr4;
36
#else
36
uint32_t mvfr0;
37
37
uint32_t mvfr1;
38
@@ -XXX,XX +XXX,XX @@ static inline int kvm_arm_vgic_probe(void)
38
uint32_t mvfr2;
39
return 0;
39
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
40
uint64_t pmceid0;
41
uint64_t pmceid1;
42
uint32_t id_afr0;
43
- uint32_t id_mmfr0;
44
- uint32_t id_mmfr1;
45
- uint32_t id_mmfr2;
46
- uint32_t id_mmfr3;
47
- uint32_t id_mmfr4;
48
uint64_t id_aa64afr0;
49
uint64_t id_aa64afr1;
50
uint32_t clidr;
51
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
52
53
static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
54
{
55
- return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) != 0;
56
+ return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0;
40
}
57
}
41
58
42
-static inline int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
59
static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
43
-{
60
{
44
- return 0;
61
- return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) >= 2;
45
-}
62
+ return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
46
-
63
}
47
-static inline int kvm_arm_pmu_init(CPUState *cs)
64
48
-{
65
static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
49
- return 0;
66
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
50
-}
67
index XXXXXXX..XXXXXXX 100644
51
+static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {}
68
--- a/hw/intc/armv7m_nvic.c
52
+static inline void kvm_arm_pmu_init(CPUState *cs) {}
69
+++ b/hw/intc/armv7m_nvic.c
53
70
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
71
case 0xd4c: /* AFR0. */
72
return cpu->id_afr0;
73
case 0xd50: /* MMFR0. */
74
- return cpu->id_mmfr0;
75
+ return cpu->isar.id_mmfr0;
76
case 0xd54: /* MMFR1. */
77
- return cpu->id_mmfr1;
78
+ return cpu->isar.id_mmfr1;
79
case 0xd58: /* MMFR2. */
80
- return cpu->id_mmfr2;
81
+ return cpu->isar.id_mmfr2;
82
case 0xd5c: /* MMFR3. */
83
- return cpu->id_mmfr3;
84
+ return cpu->isar.id_mmfr3;
85
case 0xd60: /* ISAR0. */
86
return cpu->isar.id_isar0;
87
case 0xd64: /* ISAR1. */
88
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/cpu.c
91
+++ b/target/arm/cpu.c
92
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
93
cpu->id_pfr1 = 0x1;
94
cpu->isar.id_dfr0 = 0x2;
95
cpu->id_afr0 = 0x3;
96
- cpu->id_mmfr0 = 0x01130003;
97
- cpu->id_mmfr1 = 0x10030302;
98
- cpu->id_mmfr2 = 0x01222110;
99
+ cpu->isar.id_mmfr0 = 0x01130003;
100
+ cpu->isar.id_mmfr1 = 0x10030302;
101
+ cpu->isar.id_mmfr2 = 0x01222110;
102
cpu->isar.id_isar0 = 0x00140011;
103
cpu->isar.id_isar1 = 0x12002111;
104
cpu->isar.id_isar2 = 0x11231111;
105
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
106
cpu->id_pfr1 = 0x1;
107
cpu->isar.id_dfr0 = 0x2;
108
cpu->id_afr0 = 0x3;
109
- cpu->id_mmfr0 = 0x01130003;
110
- cpu->id_mmfr1 = 0x10030302;
111
- cpu->id_mmfr2 = 0x01222110;
112
+ cpu->isar.id_mmfr0 = 0x01130003;
113
+ cpu->isar.id_mmfr1 = 0x10030302;
114
+ cpu->isar.id_mmfr2 = 0x01222110;
115
cpu->isar.id_isar0 = 0x00140011;
116
cpu->isar.id_isar1 = 0x12002111;
117
cpu->isar.id_isar2 = 0x11231111;
118
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
119
cpu->id_pfr1 = 0x11;
120
cpu->isar.id_dfr0 = 0x33;
121
cpu->id_afr0 = 0;
122
- cpu->id_mmfr0 = 0x01130003;
123
- cpu->id_mmfr1 = 0x10030302;
124
- cpu->id_mmfr2 = 0x01222100;
125
+ cpu->isar.id_mmfr0 = 0x01130003;
126
+ cpu->isar.id_mmfr1 = 0x10030302;
127
+ cpu->isar.id_mmfr2 = 0x01222100;
128
cpu->isar.id_isar0 = 0x0140011;
129
cpu->isar.id_isar1 = 0x12002111;
130
cpu->isar.id_isar2 = 0x11231121;
131
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
132
cpu->id_pfr1 = 0x1;
133
cpu->isar.id_dfr0 = 0;
134
cpu->id_afr0 = 0x2;
135
- cpu->id_mmfr0 = 0x01100103;
136
- cpu->id_mmfr1 = 0x10020302;
137
- cpu->id_mmfr2 = 0x01222000;
138
+ cpu->isar.id_mmfr0 = 0x01100103;
139
+ cpu->isar.id_mmfr1 = 0x10020302;
140
+ cpu->isar.id_mmfr2 = 0x01222000;
141
cpu->isar.id_isar0 = 0x00100011;
142
cpu->isar.id_isar1 = 0x12002111;
143
cpu->isar.id_isar2 = 0x11221011;
144
@@ -XXX,XX +XXX,XX @@ static void cortex_m3_initfn(Object *obj)
145
cpu->id_pfr1 = 0x00000200;
146
cpu->isar.id_dfr0 = 0x00100000;
147
cpu->id_afr0 = 0x00000000;
148
- cpu->id_mmfr0 = 0x00000030;
149
- cpu->id_mmfr1 = 0x00000000;
150
- cpu->id_mmfr2 = 0x00000000;
151
- cpu->id_mmfr3 = 0x00000000;
152
+ cpu->isar.id_mmfr0 = 0x00000030;
153
+ cpu->isar.id_mmfr1 = 0x00000000;
154
+ cpu->isar.id_mmfr2 = 0x00000000;
155
+ cpu->isar.id_mmfr3 = 0x00000000;
156
cpu->isar.id_isar0 = 0x01141110;
157
cpu->isar.id_isar1 = 0x02111000;
158
cpu->isar.id_isar2 = 0x21112231;
159
@@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj)
160
cpu->id_pfr1 = 0x00000200;
161
cpu->isar.id_dfr0 = 0x00100000;
162
cpu->id_afr0 = 0x00000000;
163
- cpu->id_mmfr0 = 0x00000030;
164
- cpu->id_mmfr1 = 0x00000000;
165
- cpu->id_mmfr2 = 0x00000000;
166
- cpu->id_mmfr3 = 0x00000000;
167
+ cpu->isar.id_mmfr0 = 0x00000030;
168
+ cpu->isar.id_mmfr1 = 0x00000000;
169
+ cpu->isar.id_mmfr2 = 0x00000000;
170
+ cpu->isar.id_mmfr3 = 0x00000000;
171
cpu->isar.id_isar0 = 0x01141110;
172
cpu->isar.id_isar1 = 0x02111000;
173
cpu->isar.id_isar2 = 0x21112231;
174
@@ -XXX,XX +XXX,XX @@ static void cortex_m7_initfn(Object *obj)
175
cpu->id_pfr1 = 0x00000200;
176
cpu->isar.id_dfr0 = 0x00100000;
177
cpu->id_afr0 = 0x00000000;
178
- cpu->id_mmfr0 = 0x00100030;
179
- cpu->id_mmfr1 = 0x00000000;
180
- cpu->id_mmfr2 = 0x01000000;
181
- cpu->id_mmfr3 = 0x00000000;
182
+ cpu->isar.id_mmfr0 = 0x00100030;
183
+ cpu->isar.id_mmfr1 = 0x00000000;
184
+ cpu->isar.id_mmfr2 = 0x01000000;
185
+ cpu->isar.id_mmfr3 = 0x00000000;
186
cpu->isar.id_isar0 = 0x01101110;
187
cpu->isar.id_isar1 = 0x02112000;
188
cpu->isar.id_isar2 = 0x20232231;
189
@@ -XXX,XX +XXX,XX @@ static void cortex_m33_initfn(Object *obj)
190
cpu->id_pfr1 = 0x00000210;
191
cpu->isar.id_dfr0 = 0x00200000;
192
cpu->id_afr0 = 0x00000000;
193
- cpu->id_mmfr0 = 0x00101F40;
194
- cpu->id_mmfr1 = 0x00000000;
195
- cpu->id_mmfr2 = 0x01000000;
196
- cpu->id_mmfr3 = 0x00000000;
197
+ cpu->isar.id_mmfr0 = 0x00101F40;
198
+ cpu->isar.id_mmfr1 = 0x00000000;
199
+ cpu->isar.id_mmfr2 = 0x01000000;
200
+ cpu->isar.id_mmfr3 = 0x00000000;
201
cpu->isar.id_isar0 = 0x01101110;
202
cpu->isar.id_isar1 = 0x02212000;
203
cpu->isar.id_isar2 = 0x20232232;
204
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
205
cpu->id_pfr1 = 0x001;
206
cpu->isar.id_dfr0 = 0x010400;
207
cpu->id_afr0 = 0x0;
208
- cpu->id_mmfr0 = 0x0210030;
209
- cpu->id_mmfr1 = 0x00000000;
210
- cpu->id_mmfr2 = 0x01200000;
211
- cpu->id_mmfr3 = 0x0211;
212
+ cpu->isar.id_mmfr0 = 0x0210030;
213
+ cpu->isar.id_mmfr1 = 0x00000000;
214
+ cpu->isar.id_mmfr2 = 0x01200000;
215
+ cpu->isar.id_mmfr3 = 0x0211;
216
cpu->isar.id_isar0 = 0x02101111;
217
cpu->isar.id_isar1 = 0x13112111;
218
cpu->isar.id_isar2 = 0x21232141;
219
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
220
cpu->id_pfr1 = 0x11;
221
cpu->isar.id_dfr0 = 0x400;
222
cpu->id_afr0 = 0;
223
- cpu->id_mmfr0 = 0x31100003;
224
- cpu->id_mmfr1 = 0x20000000;
225
- cpu->id_mmfr2 = 0x01202000;
226
- cpu->id_mmfr3 = 0x11;
227
+ cpu->isar.id_mmfr0 = 0x31100003;
228
+ cpu->isar.id_mmfr1 = 0x20000000;
229
+ cpu->isar.id_mmfr2 = 0x01202000;
230
+ cpu->isar.id_mmfr3 = 0x11;
231
cpu->isar.id_isar0 = 0x00101111;
232
cpu->isar.id_isar1 = 0x12112111;
233
cpu->isar.id_isar2 = 0x21232031;
234
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
235
cpu->id_pfr1 = 0x11;
236
cpu->isar.id_dfr0 = 0x000;
237
cpu->id_afr0 = 0;
238
- cpu->id_mmfr0 = 0x00100103;
239
- cpu->id_mmfr1 = 0x20000000;
240
- cpu->id_mmfr2 = 0x01230000;
241
- cpu->id_mmfr3 = 0x00002111;
242
+ cpu->isar.id_mmfr0 = 0x00100103;
243
+ cpu->isar.id_mmfr1 = 0x20000000;
244
+ cpu->isar.id_mmfr2 = 0x01230000;
245
+ cpu->isar.id_mmfr3 = 0x00002111;
246
cpu->isar.id_isar0 = 0x00101111;
247
cpu->isar.id_isar1 = 0x13112111;
248
cpu->isar.id_isar2 = 0x21232041;
249
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
250
cpu->id_pfr1 = 0x00011011;
251
cpu->isar.id_dfr0 = 0x02010555;
252
cpu->id_afr0 = 0x00000000;
253
- cpu->id_mmfr0 = 0x10101105;
254
- cpu->id_mmfr1 = 0x40000000;
255
- cpu->id_mmfr2 = 0x01240000;
256
- cpu->id_mmfr3 = 0x02102211;
257
+ cpu->isar.id_mmfr0 = 0x10101105;
258
+ cpu->isar.id_mmfr1 = 0x40000000;
259
+ cpu->isar.id_mmfr2 = 0x01240000;
260
+ cpu->isar.id_mmfr3 = 0x02102211;
261
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
262
* table 4-41 gives 0x02101110, which includes the arm div insns.
263
*/
264
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
265
cpu->id_pfr1 = 0x00011011;
266
cpu->isar.id_dfr0 = 0x02010555;
267
cpu->id_afr0 = 0x00000000;
268
- cpu->id_mmfr0 = 0x10201105;
269
- cpu->id_mmfr1 = 0x20000000;
270
- cpu->id_mmfr2 = 0x01240000;
271
- cpu->id_mmfr3 = 0x02102211;
272
+ cpu->isar.id_mmfr0 = 0x10201105;
273
+ cpu->isar.id_mmfr1 = 0x20000000;
274
+ cpu->isar.id_mmfr2 = 0x01240000;
275
+ cpu->isar.id_mmfr3 = 0x02102211;
276
cpu->isar.id_isar0 = 0x02101110;
277
cpu->isar.id_isar1 = 0x13112111;
278
cpu->isar.id_isar2 = 0x21232041;
279
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
280
t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
281
cpu->isar.mvfr2 = t;
282
283
- t = cpu->id_mmfr3;
284
+ t = cpu->isar.id_mmfr3;
285
t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
286
- cpu->id_mmfr3 = t;
287
+ cpu->isar.id_mmfr3 = t;
288
289
- t = cpu->id_mmfr4;
290
+ t = cpu->isar.id_mmfr4;
291
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
292
- cpu->id_mmfr4 = t;
293
+ cpu->isar.id_mmfr4 = t;
294
}
54
#endif
295
#endif
55
296
}
56
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
297
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
57
index XXXXXXX..XXXXXXX 100644
298
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/arm/virt.c
299
--- a/target/arm/cpu64.c
59
+++ b/hw/arm/virt.c
300
+++ b/target/arm/cpu64.c
60
@@ -XXX,XX +XXX,XX @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
301
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
61
return;
302
cpu->id_pfr1 = 0x00011011;
62
}
303
cpu->isar.id_dfr0 = 0x03010066;
63
if (kvm_enabled()) {
304
cpu->id_afr0 = 0x00000000;
64
- if (kvm_irqchip_in_kernel() &&
305
- cpu->id_mmfr0 = 0x10101105;
65
- !kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ))) {
306
- cpu->id_mmfr1 = 0x40000000;
66
- return;
307
- cpu->id_mmfr2 = 0x01260000;
67
- }
308
- cpu->id_mmfr3 = 0x02102211;
68
- if (!kvm_arm_pmu_init(cpu)) {
309
+ cpu->isar.id_mmfr0 = 0x10101105;
69
- return;
310
+ cpu->isar.id_mmfr1 = 0x40000000;
70
+ if (kvm_irqchip_in_kernel()) {
311
+ cpu->isar.id_mmfr2 = 0x01260000;
71
+ kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
312
+ cpu->isar.id_mmfr3 = 0x02102211;
72
}
313
cpu->isar.id_isar0 = 0x02101110;
73
+ kvm_arm_pmu_init(cpu);
314
cpu->isar.id_isar1 = 0x13112111;
315
cpu->isar.id_isar2 = 0x21232042;
316
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
317
cpu->id_pfr1 = 0x00011011;
318
cpu->isar.id_dfr0 = 0x03010066;
319
cpu->id_afr0 = 0x00000000;
320
- cpu->id_mmfr0 = 0x10101105;
321
- cpu->id_mmfr1 = 0x40000000;
322
- cpu->id_mmfr2 = 0x01260000;
323
- cpu->id_mmfr3 = 0x02102211;
324
+ cpu->isar.id_mmfr0 = 0x10101105;
325
+ cpu->isar.id_mmfr1 = 0x40000000;
326
+ cpu->isar.id_mmfr2 = 0x01260000;
327
+ cpu->isar.id_mmfr3 = 0x02102211;
328
cpu->isar.id_isar0 = 0x02101110;
329
cpu->isar.id_isar1 = 0x13112111;
330
cpu->isar.id_isar2 = 0x21232042;
331
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
332
cpu->id_pfr1 = 0x00011011;
333
cpu->isar.id_dfr0 = 0x03010066;
334
cpu->id_afr0 = 0x00000000;
335
- cpu->id_mmfr0 = 0x10201105;
336
- cpu->id_mmfr1 = 0x40000000;
337
- cpu->id_mmfr2 = 0x01260000;
338
- cpu->id_mmfr3 = 0x02102211;
339
+ cpu->isar.id_mmfr0 = 0x10201105;
340
+ cpu->isar.id_mmfr1 = 0x40000000;
341
+ cpu->isar.id_mmfr2 = 0x01260000;
342
+ cpu->isar.id_mmfr3 = 0x02102211;
343
cpu->isar.id_isar0 = 0x02101110;
344
cpu->isar.id_isar1 = 0x13112111;
345
cpu->isar.id_isar2 = 0x21232042;
346
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
347
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
348
cpu->isar.id_isar6 = u;
349
350
- u = cpu->id_mmfr3;
351
+ u = cpu->isar.id_mmfr3;
352
u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
353
- cpu->id_mmfr3 = u;
354
+ cpu->isar.id_mmfr3 = u;
355
356
u = cpu->isar.id_aa64dfr0;
357
u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
358
diff --git a/target/arm/helper.c b/target/arm/helper.c
359
index XXXXXXX..XXXXXXX 100644
360
--- a/target/arm/helper.c
361
+++ b/target/arm/helper.c
362
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
363
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
364
.access = PL1_R, .type = ARM_CP_CONST,
365
.accessfn = access_aa32_tid3,
366
- .resetvalue = cpu->id_mmfr0 },
367
+ .resetvalue = cpu->isar.id_mmfr0 },
368
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
369
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
370
.access = PL1_R, .type = ARM_CP_CONST,
371
.accessfn = access_aa32_tid3,
372
- .resetvalue = cpu->id_mmfr1 },
373
+ .resetvalue = cpu->isar.id_mmfr1 },
374
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
375
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
376
.access = PL1_R, .type = ARM_CP_CONST,
377
.accessfn = access_aa32_tid3,
378
- .resetvalue = cpu->id_mmfr2 },
379
+ .resetvalue = cpu->isar.id_mmfr2 },
380
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
381
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
382
.access = PL1_R, .type = ARM_CP_CONST,
383
.accessfn = access_aa32_tid3,
384
- .resetvalue = cpu->id_mmfr3 },
385
+ .resetvalue = cpu->isar.id_mmfr3 },
386
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
387
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
388
.access = PL1_R, .type = ARM_CP_CONST,
389
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
390
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
391
.access = PL1_R, .type = ARM_CP_CONST,
392
.accessfn = access_aa32_tid3,
393
- .resetvalue = cpu->id_mmfr4 },
394
+ .resetvalue = cpu->isar.id_mmfr4 },
395
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
396
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
397
.access = PL1_R, .type = ARM_CP_CONST,
398
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
399
define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
400
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
401
/* TTCBR2 is introduced with ARMv8.2-A32HPD. */
402
- if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
403
+ if (FIELD_EX32(cpu->isar.id_mmfr4, ID_MMFR4, HPDS) != 0) {
404
define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
74
}
405
}
75
}
406
}
76
77
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
407
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
78
index XXXXXXX..XXXXXXX 100644
408
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/kvm32.c
409
--- a/target/arm/kvm32.c
80
+++ b/target/arm/kvm32.c
410
+++ b/target/arm/kvm32.c
81
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_hw_debug_active(CPUState *cs)
411
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
82
return false;
412
* Fortunately there is not yet anything in there that affects migration.
83
}
413
*/
84
414
85
-int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
415
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
86
+void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
416
+ ARM_CP15_REG32(0, 0, 1, 4));
87
{
417
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
88
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
418
+ ARM_CP15_REG32(0, 0, 1, 5));
89
- return 0;
419
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
90
}
420
+ ARM_CP15_REG32(0, 0, 1, 6));
91
421
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
92
int kvm_arm_pmu_init(CPUState *cs)
422
+ ARM_CP15_REG32(0, 0, 1, 7));
423
+ if (read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
424
+ ARM_CP15_REG32(0, 0, 2, 6))) {
425
+ /*
426
+ * Older kernels don't support reading ID_MMFR4 (a new in v8
427
+ * register); assume it's zero.
428
+ */
429
+ ahcf->isar.id_mmfr4 = 0;
430
+ }
431
+
432
/*
433
* There is no way to read DBGDIDR, because currently 32-bit KVM
434
* doesn't implement debug at all. Leave it at zero.
93
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
435
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
94
index XXXXXXX..XXXXXXX 100644
436
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/kvm64.c
437
--- a/target/arm/kvm64.c
96
+++ b/target/arm/kvm64.c
438
+++ b/target/arm/kvm64.c
97
@@ -XXX,XX +XXX,XX @@ static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
439
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
98
440
*/
99
err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
441
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
100
if (err != 0) {
442
ARM64_SYS_REG(3, 0, 0, 1, 2));
101
+ error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
443
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
102
return false;
444
+ ARM64_SYS_REG(3, 0, 0, 1, 4));
103
}
445
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
104
446
+ ARM64_SYS_REG(3, 0, 0, 1, 5));
105
err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
447
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
106
- if (err < 0) {
448
+ ARM64_SYS_REG(3, 0, 0, 1, 6));
107
- fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
449
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
108
- strerror(-err));
450
+ ARM64_SYS_REG(3, 0, 0, 1, 7));
109
- abort();
451
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
110
+ if (err != 0) {
452
ARM64_SYS_REG(3, 0, 0, 2, 0));
111
+ error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
453
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
112
+ return false;
454
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
113
}
455
ARM64_SYS_REG(3, 0, 0, 2, 4));
114
456
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
115
return true;
457
ARM64_SYS_REG(3, 0, 0, 2, 5));
116
}
458
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
117
459
+ ARM64_SYS_REG(3, 0, 0, 2, 6));
118
-int kvm_arm_pmu_init(CPUState *cs)
460
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
119
+void kvm_arm_pmu_init(CPUState *cs)
461
ARM64_SYS_REG(3, 0, 0, 2, 7));
120
{
462
121
struct kvm_device_attr attr = {
122
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
123
.attr = KVM_ARM_VCPU_PMU_V3_INIT,
124
};
125
126
- return kvm_arm_pmu_set_attr(cs, &attr);
127
+ if (!ARM_CPU(cs)->has_pmu) {
128
+ return;
129
+ }
130
+ if (!kvm_arm_pmu_set_attr(cs, &attr)) {
131
+ error_report("failed to init PMU");
132
+ abort();
133
+ }
134
}
135
136
-int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
137
+void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
138
{
139
struct kvm_device_attr attr = {
140
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
141
@@ -XXX,XX +XXX,XX @@ int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
142
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
143
};
144
145
- return kvm_arm_pmu_set_attr(cs, &attr);
146
+ if (!ARM_CPU(cs)->has_pmu) {
147
+ return;
148
+ }
149
+ if (!kvm_arm_pmu_set_attr(cs, &attr)) {
150
+ error_report("failed to set irq for PMU");
151
+ abort();
152
+ }
153
}
154
155
static inline void set_feature(uint64_t *features, int feature)
156
--
463
--
157
2.7.4
464
2.20.1
158
465
159
466
diff view generated by jsdifflib
1
We currently have some similar code in tlb_fill() and in
1
Now we have moved ID_MMFR4 into the ARMISARegisters struct, we
2
arm_cpu_do_unaligned_access() for delivering a data abort or prefetch
2
can define and use an isar_feature for the presence of the
3
abort. We're also going to want to do the same thing to handle
3
ARMv8.2-AA32HPD feature, rather than open-coding the test.
4
external aborts. Factor out the common code into a new function
4
5
deliver_fault().
5
While we're here, correct a comment typo which missed an 'A'
6
from the feature name.
6
7
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Acked-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
10
Message-id: 20200214175116.9164-20-peter.maydell@linaro.org
10
---
11
---
11
target/arm/op_helper.c | 110 +++++++++++++++++++++++++------------------------
12
target/arm/cpu.h | 5 +++++
12
1 file changed, 57 insertions(+), 53 deletions(-)
13
target/arm/helper.c | 4 ++--
14
2 files changed, 7 insertions(+), 2 deletions(-)
13
15
14
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/op_helper.c
18
--- a/target/arm/cpu.h
17
+++ b/target/arm/op_helper.c
19
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
20
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id)
19
return syn;
21
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
20
}
22
}
21
23
22
+static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
24
+static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
23
+ uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi)
24
+{
25
+{
25
+ CPUARMState *env = &cpu->env;
26
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
26
+ int target_el;
27
+ bool same_el;
28
+ uint32_t syn, exc;
29
+
30
+ target_el = exception_target_el(env);
31
+ if (fi->stage2) {
32
+ target_el = 2;
33
+ env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
34
+ }
35
+ same_el = (arm_current_el(env) == target_el);
36
+
37
+ if (fsc == 0x3f) {
38
+ /* Caller doesn't have a long-format fault status code. This
39
+ * should only happen if this fault will never actually be reported
40
+ * to an EL that uses a syndrome register. Check that here.
41
+ * 0x3f is a (currently) reserved FSC code, in case the constructed
42
+ * syndrome does leak into the guest somehow.
43
+ */
44
+ assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
45
+ }
46
+
47
+ if (access_type == MMU_INST_FETCH) {
48
+ syn = syn_insn_abort(same_el, 0, fi->s1ptw, fsc);
49
+ exc = EXCP_PREFETCH_ABORT;
50
+ } else {
51
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
52
+ same_el, fi->s1ptw,
53
+ access_type == MMU_DATA_STORE,
54
+ fsc);
55
+ if (access_type == MMU_DATA_STORE
56
+ && arm_feature(env, ARM_FEATURE_V6)) {
57
+ fsr |= (1 << 11);
58
+ }
59
+ exc = EXCP_DATA_ABORT;
60
+ }
61
+
62
+ env->exception.vaddress = addr;
63
+ env->exception.fsr = fsr;
64
+ raise_exception(env, exc, syn, target_el);
65
+}
27
+}
66
+
28
+
67
/* try to fill the TLB and return an exception if error. If retaddr is
29
/*
68
* NULL, it means that the function was called in C code (i.e. not
30
* 64-bit feature tests via id registers.
69
* from generated code or from helper.c)
31
*/
70
@@ -XXX,XX +XXX,XX @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
71
ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
33
index XXXXXXX..XXXXXXX 100644
72
if (unlikely(ret)) {
34
--- a/target/arm/helper.c
73
ARMCPU *cpu = ARM_CPU(cs);
35
+++ b/target/arm/helper.c
74
- CPUARMState *env = &cpu->env;
36
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
75
- uint32_t syn, exc, fsc;
37
} else {
76
- unsigned int target_el;
38
define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
77
- bool same_el;
39
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
78
+ uint32_t fsc;
40
- /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
79
41
- if (FIELD_EX32(cpu->isar.id_mmfr4, ID_MMFR4, HPDS) != 0) {
80
if (retaddr) {
42
+ /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
81
/* now we have a real cpu fault */
43
+ if (cpu_isar_feature(aa32_hpd, cpu)) {
82
cpu_restore_state(cs, retaddr);
44
define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
83
}
45
}
84
85
- target_el = exception_target_el(env);
86
- if (fi.stage2) {
87
- target_el = 2;
88
- env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
89
- }
90
- same_el = arm_current_el(env) == target_el;
91
-
92
if (fsr & (1 << 9)) {
93
/* LPAE format fault status register : bottom 6 bits are
94
* status code in the same form as needed for syndrome
95
@@ -XXX,XX +XXX,XX @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
96
fsc = extract32(fsr, 0, 6);
97
} else {
98
/* Short format FSR : this fault will never actually be reported
99
- * to an EL that uses a syndrome register. Check that here,
100
- * and use a (currently) reserved FSR code in case the constructed
101
- * syndrome does leak into the guest somehow.
102
+ * to an EL that uses a syndrome register. Use a (currently)
103
+ * reserved FSR code in case the constructed syndrome does leak
104
+ * into the guest somehow. deliver_fault will assert that
105
+ * we don't target an EL using the syndrome.
106
*/
107
- assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
108
fsc = 0x3f;
109
}
110
111
- /* For insn and data aborts we assume there is no instruction syndrome
112
- * information; this is always true for exceptions reported to EL1.
113
- */
114
- if (access_type == MMU_INST_FETCH) {
115
- syn = syn_insn_abort(same_el, 0, fi.s1ptw, fsc);
116
- exc = EXCP_PREFETCH_ABORT;
117
- } else {
118
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
119
- same_el, fi.s1ptw,
120
- access_type == MMU_DATA_STORE, fsc);
121
- if (access_type == MMU_DATA_STORE
122
- && arm_feature(env, ARM_FEATURE_V6)) {
123
- fsr |= (1 << 11);
124
- }
125
- exc = EXCP_DATA_ABORT;
126
- }
127
-
128
- env->exception.vaddress = addr;
129
- env->exception.fsr = fsr;
130
- raise_exception(env, exc, syn, target_el);
131
+ deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
132
}
46
}
133
}
134
135
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
136
{
137
ARMCPU *cpu = ARM_CPU(cs);
138
CPUARMState *env = &cpu->env;
139
- int target_el;
140
- bool same_el;
141
- uint32_t syn;
142
+ uint32_t fsr, fsc;
143
+ ARMMMUFaultInfo fi = {};
144
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
145
146
if (retaddr) {
147
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
148
cpu_restore_state(cs, retaddr);
149
}
150
151
- target_el = exception_target_el(env);
152
- same_el = (arm_current_el(env) == target_el);
153
-
154
- env->exception.vaddress = vaddr;
155
-
156
/* the DFSR for an alignment fault depends on whether we're using
157
* the LPAE long descriptor format, or the short descriptor format
158
*/
159
if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
160
- env->exception.fsr = (1 << 9) | 0x21;
161
+ fsr = (1 << 9) | 0x21;
162
} else {
163
- env->exception.fsr = 0x1;
164
- }
165
-
166
- if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
167
- env->exception.fsr |= (1 << 11);
168
+ fsr = 0x1;
169
}
170
+ fsc = 0x21;
171
172
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
173
- same_el, 0, access_type == MMU_DATA_STORE,
174
- 0x21);
175
- raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
176
+ deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
177
}
178
179
#endif /* !defined(CONFIG_USER_ONLY) */
180
--
47
--
181
2.7.4
48
2.20.1
182
49
183
50
diff view generated by jsdifflib
1
Set the MachineClass flag ignore_memory_transaction_failures
1
Cut-and-paste errors mean we're using FIELD_EX64() to extract fields from
2
for almost all ARM boards. This means they retain the legacy
2
some 32-bit ID register fields. Use FIELD_EX32() instead. (This makes
3
behaviour that accesses to unimplemented addresses will RAZ/WI
3
no difference in behaviour, it's just more consistent.)
4
rather than aborting, when a subsequent commit adds support
5
for external aborts.
6
7
The exceptions are:
8
* virt -- we know that guests won't try to prod devices
9
that we don't describe in the device tree or ACPI tables
10
* mps2 -- this board was written to use unimplemented-device
11
for all the ranges with devices we don't yet handle
12
13
New boards should not set the flag, but instead be written
14
like the mps2.
15
4
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
For the Xilinx boards:
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Message-id: 20200214175116.9164-21-peter.maydell@linaro.org
19
---
8
---
20
hw/arm/aspeed.c | 3 +++
9
target/arm/cpu.h | 18 +++++++++---------
21
hw/arm/collie.c | 1 +
10
1 file changed, 9 insertions(+), 9 deletions(-)
22
hw/arm/cubieboard.c | 1 +
23
hw/arm/digic_boards.c | 1 +
24
hw/arm/exynos4_boards.c | 2 ++
25
hw/arm/gumstix.c | 2 ++
26
hw/arm/highbank.c | 2 ++
27
hw/arm/imx25_pdk.c | 1 +
28
hw/arm/integratorcp.c | 1 +
29
hw/arm/kzm.c | 1 +
30
hw/arm/mainstone.c | 1 +
31
hw/arm/musicpal.c | 1 +
32
hw/arm/netduino2.c | 1 +
33
hw/arm/nseries.c | 2 ++
34
hw/arm/omap_sx1.c | 2 ++
35
hw/arm/palm.c | 1 +
36
hw/arm/raspi.c | 1 +
37
hw/arm/realview.c | 4 ++++
38
hw/arm/sabrelite.c | 1 +
39
hw/arm/spitz.c | 4 ++++
40
hw/arm/stellaris.c | 2 ++
41
hw/arm/tosa.c | 1 +
42
hw/arm/versatilepb.c | 2 ++
43
hw/arm/vexpress.c | 1 +
44
hw/arm/xilinx_zynq.c | 1 +
45
hw/arm/xlnx-ep108.c | 2 ++
46
hw/arm/z2.c | 1 +
47
27 files changed, 43 insertions(+)
48
11
49
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
12
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
50
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/arm/aspeed.c
14
--- a/target/arm/cpu.h
52
+++ b/hw/arm/aspeed.c
15
+++ b/target/arm/cpu.h
53
@@ -XXX,XX +XXX,XX @@ static void palmetto_bmc_class_init(ObjectClass *oc, void *data)
16
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
54
mc->no_floppy = 1;
17
static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
55
mc->no_cdrom = 1;
18
{
56
mc->no_parallel = 1;
19
/* Return true if D16-D31 are implemented */
57
+ mc->ignore_memory_transaction_failures = true;
20
- return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2;
21
+ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2;
58
}
22
}
59
23
60
static const TypeInfo palmetto_bmc_type = {
24
static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
61
@@ -XXX,XX +XXX,XX @@ static void ast2500_evb_class_init(ObjectClass *oc, void *data)
25
{
62
mc->no_floppy = 1;
26
- return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
63
mc->no_cdrom = 1;
27
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0;
64
mc->no_parallel = 1;
65
+ mc->ignore_memory_transaction_failures = true;
66
}
28
}
67
29
68
static const TypeInfo ast2500_evb_type = {
30
static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
69
@@ -XXX,XX +XXX,XX @@ static void romulus_bmc_class_init(ObjectClass *oc, void *data)
31
{
70
mc->no_floppy = 1;
32
/* Return true if CPU supports double precision floating point */
71
mc->no_cdrom = 1;
33
- return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0;
72
mc->no_parallel = 1;
34
+ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0;
73
+ mc->ignore_memory_transaction_failures = true;
74
}
35
}
75
36
76
static const TypeInfo romulus_bmc_type = {
37
/*
77
diff --git a/hw/arm/collie.c b/hw/arm/collie.c
38
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
78
index XXXXXXX..XXXXXXX 100644
39
*/
79
--- a/hw/arm/collie.c
40
static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id)
80
+++ b/hw/arm/collie.c
81
@@ -XXX,XX +XXX,XX @@ static void collie_machine_init(MachineClass *mc)
82
{
41
{
83
mc->desc = "Sharp SL-5500 (Collie) PDA (SA-1110)";
42
- return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 0;
84
mc->init = collie_init;
43
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0;
85
+ mc->ignore_memory_transaction_failures = true;
86
}
44
}
87
45
88
DEFINE_MACHINE("collie", collie_machine_init)
46
static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id)
89
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
47
{
90
index XXXXXXX..XXXXXXX 100644
48
- return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 1;
91
--- a/hw/arm/cubieboard.c
49
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1;
92
+++ b/hw/arm/cubieboard.c
93
@@ -XXX,XX +XXX,XX @@ static void cubieboard_machine_init(MachineClass *mc)
94
mc->init = cubieboard_init;
95
mc->block_default_type = IF_IDE;
96
mc->units_per_default_bus = 1;
97
+ mc->ignore_memory_transaction_failures = true;
98
}
50
}
99
51
100
DEFINE_MACHINE("cubieboard", cubieboard_machine_init)
52
static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id)
101
diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/hw/arm/digic_boards.c
104
+++ b/hw/arm/digic_boards.c
105
@@ -XXX,XX +XXX,XX @@ static void canon_a1100_machine_init(MachineClass *mc)
106
{
53
{
107
mc->desc = "Canon PowerShot A1100 IS";
54
- return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 1;
108
mc->init = &canon_a1100_init;
55
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1;
109
+ mc->ignore_memory_transaction_failures = true;
110
}
56
}
111
57
112
DEFINE_MACHINE("canon-a1100", canon_a1100_machine_init)
58
static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id)
113
diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c
59
{
114
index XXXXXXX..XXXXXXX 100644
60
- return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 2;
115
--- a/hw/arm/exynos4_boards.c
61
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2;
116
+++ b/hw/arm/exynos4_boards.c
117
@@ -XXX,XX +XXX,XX @@ static void nuri_class_init(ObjectClass *oc, void *data)
118
mc->desc = "Samsung NURI board (Exynos4210)";
119
mc->init = nuri_init;
120
mc->max_cpus = EXYNOS4210_NCPUS;
121
+ mc->ignore_memory_transaction_failures = true;
122
}
62
}
123
63
124
static const TypeInfo nuri_type = {
64
static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id)
125
@@ -XXX,XX +XXX,XX @@ static void smdkc210_class_init(ObjectClass *oc, void *data)
65
{
126
mc->desc = "Samsung SMDKC210 board (Exynos4210)";
66
- return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 3;
127
mc->init = smdkc210_init;
67
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3;
128
mc->max_cpus = EXYNOS4210_NCPUS;
129
+ mc->ignore_memory_transaction_failures = true;
130
}
68
}
131
69
132
static const TypeInfo smdkc210_type = {
70
static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
133
diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c
71
{
134
index XXXXXXX..XXXXXXX 100644
72
- return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4;
135
--- a/hw/arm/gumstix.c
73
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4;
136
+++ b/hw/arm/gumstix.c
137
@@ -XXX,XX +XXX,XX @@ static void connex_class_init(ObjectClass *oc, void *data)
138
139
mc->desc = "Gumstix Connex (PXA255)";
140
mc->init = connex_init;
141
+ mc->ignore_memory_transaction_failures = true;
142
}
74
}
143
75
144
static const TypeInfo connex_type = {
76
static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
145
@@ -XXX,XX +XXX,XX @@ static void verdex_class_init(ObjectClass *oc, void *data)
146
147
mc->desc = "Gumstix Verdex (PXA270)";
148
mc->init = verdex_init;
149
+ mc->ignore_memory_transaction_failures = true;
150
}
151
152
static const TypeInfo verdex_type = {
153
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/hw/arm/highbank.c
156
+++ b/hw/arm/highbank.c
157
@@ -XXX,XX +XXX,XX @@ static void highbank_class_init(ObjectClass *oc, void *data)
158
mc->block_default_type = IF_IDE;
159
mc->units_per_default_bus = 1;
160
mc->max_cpus = 4;
161
+ mc->ignore_memory_transaction_failures = true;
162
}
163
164
static const TypeInfo highbank_type = {
165
@@ -XXX,XX +XXX,XX @@ static void midway_class_init(ObjectClass *oc, void *data)
166
mc->block_default_type = IF_IDE;
167
mc->units_per_default_bus = 1;
168
mc->max_cpus = 4;
169
+ mc->ignore_memory_transaction_failures = true;
170
}
171
172
static const TypeInfo midway_type = {
173
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/hw/arm/imx25_pdk.c
176
+++ b/hw/arm/imx25_pdk.c
177
@@ -XXX,XX +XXX,XX @@ static void imx25_pdk_machine_init(MachineClass *mc)
178
{
179
mc->desc = "ARM i.MX25 PDK board (ARM926)";
180
mc->init = imx25_pdk_init;
181
+ mc->ignore_memory_transaction_failures = true;
182
}
183
184
DEFINE_MACHINE("imx25-pdk", imx25_pdk_machine_init)
185
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
186
index XXXXXXX..XXXXXXX 100644
187
--- a/hw/arm/integratorcp.c
188
+++ b/hw/arm/integratorcp.c
189
@@ -XXX,XX +XXX,XX @@ static void integratorcp_machine_init(MachineClass *mc)
190
{
191
mc->desc = "ARM Integrator/CP (ARM926EJ-S)";
192
mc->init = integratorcp_init;
193
+ mc->ignore_memory_transaction_failures = true;
194
}
195
196
DEFINE_MACHINE("integratorcp", integratorcp_machine_init)
197
diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c
198
index XXXXXXX..XXXXXXX 100644
199
--- a/hw/arm/kzm.c
200
+++ b/hw/arm/kzm.c
201
@@ -XXX,XX +XXX,XX @@ static void kzm_machine_init(MachineClass *mc)
202
{
203
mc->desc = "ARM KZM Emulation Baseboard (ARM1136)";
204
mc->init = kzm_init;
205
+ mc->ignore_memory_transaction_failures = true;
206
}
207
208
DEFINE_MACHINE("kzm", kzm_machine_init)
209
diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c
210
index XXXXXXX..XXXXXXX 100644
211
--- a/hw/arm/mainstone.c
212
+++ b/hw/arm/mainstone.c
213
@@ -XXX,XX +XXX,XX @@ static void mainstone2_machine_init(MachineClass *mc)
214
{
215
mc->desc = "Mainstone II (PXA27x)";
216
mc->init = mainstone_init;
217
+ mc->ignore_memory_transaction_failures = true;
218
}
219
220
DEFINE_MACHINE("mainstone", mainstone2_machine_init)
221
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
222
index XXXXXXX..XXXXXXX 100644
223
--- a/hw/arm/musicpal.c
224
+++ b/hw/arm/musicpal.c
225
@@ -XXX,XX +XXX,XX @@ static void musicpal_machine_init(MachineClass *mc)
226
{
227
mc->desc = "Marvell 88w8618 / MusicPal (ARM926EJ-S)";
228
mc->init = musicpal_init;
229
+ mc->ignore_memory_transaction_failures = true;
230
}
231
232
DEFINE_MACHINE("musicpal", musicpal_machine_init)
233
diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c
234
index XXXXXXX..XXXXXXX 100644
235
--- a/hw/arm/netduino2.c
236
+++ b/hw/arm/netduino2.c
237
@@ -XXX,XX +XXX,XX @@ static void netduino2_machine_init(MachineClass *mc)
238
{
239
mc->desc = "Netduino 2 Machine";
240
mc->init = netduino2_init;
241
+ mc->ignore_memory_transaction_failures = true;
242
}
243
244
DEFINE_MACHINE("netduino2", netduino2_machine_init)
245
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
246
index XXXXXXX..XXXXXXX 100644
247
--- a/hw/arm/nseries.c
248
+++ b/hw/arm/nseries.c
249
@@ -XXX,XX +XXX,XX @@ static void n800_class_init(ObjectClass *oc, void *data)
250
mc->desc = "Nokia N800 tablet aka. RX-34 (OMAP2420)";
251
mc->init = n800_init;
252
mc->default_boot_order = "";
253
+ mc->ignore_memory_transaction_failures = true;
254
}
255
256
static const TypeInfo n800_type = {
257
@@ -XXX,XX +XXX,XX @@ static void n810_class_init(ObjectClass *oc, void *data)
258
mc->desc = "Nokia N810 tablet aka. RX-44 (OMAP2420)";
259
mc->init = n810_init;
260
mc->default_boot_order = "";
261
+ mc->ignore_memory_transaction_failures = true;
262
}
263
264
static const TypeInfo n810_type = {
265
diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c
266
index XXXXXXX..XXXXXXX 100644
267
--- a/hw/arm/omap_sx1.c
268
+++ b/hw/arm/omap_sx1.c
269
@@ -XXX,XX +XXX,XX @@ static void sx1_machine_v2_class_init(ObjectClass *oc, void *data)
270
271
mc->desc = "Siemens SX1 (OMAP310) V2";
272
mc->init = sx1_init_v2;
273
+ mc->ignore_memory_transaction_failures = true;
274
}
275
276
static const TypeInfo sx1_machine_v2_type = {
277
@@ -XXX,XX +XXX,XX @@ static void sx1_machine_v1_class_init(ObjectClass *oc, void *data)
278
279
mc->desc = "Siemens SX1 (OMAP310) V1";
280
mc->init = sx1_init_v1;
281
+ mc->ignore_memory_transaction_failures = true;
282
}
283
284
static const TypeInfo sx1_machine_v1_type = {
285
diff --git a/hw/arm/palm.c b/hw/arm/palm.c
286
index XXXXXXX..XXXXXXX 100644
287
--- a/hw/arm/palm.c
288
+++ b/hw/arm/palm.c
289
@@ -XXX,XX +XXX,XX @@ static void palmte_machine_init(MachineClass *mc)
290
{
291
mc->desc = "Palm Tungsten|E aka. Cheetah PDA (OMAP310)";
292
mc->init = palmte_init;
293
+ mc->ignore_memory_transaction_failures = true;
294
}
295
296
DEFINE_MACHINE("cheetah", palmte_machine_init)
297
diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c
298
index XXXXXXX..XXXXXXX 100644
299
--- a/hw/arm/raspi.c
300
+++ b/hw/arm/raspi.c
301
@@ -XXX,XX +XXX,XX @@ static void raspi2_machine_init(MachineClass *mc)
302
mc->no_cdrom = 1;
303
mc->max_cpus = BCM2836_NCPUS;
304
mc->default_ram_size = 1024 * 1024 * 1024;
305
+ mc->ignore_memory_transaction_failures = true;
306
};
307
DEFINE_MACHINE("raspi2", raspi2_machine_init)
308
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
309
index XXXXXXX..XXXXXXX 100644
310
--- a/hw/arm/realview.c
311
+++ b/hw/arm/realview.c
312
@@ -XXX,XX +XXX,XX @@ static void realview_eb_class_init(ObjectClass *oc, void *data)
313
mc->desc = "ARM RealView Emulation Baseboard (ARM926EJ-S)";
314
mc->init = realview_eb_init;
315
mc->block_default_type = IF_SCSI;
316
+ mc->ignore_memory_transaction_failures = true;
317
}
318
319
static const TypeInfo realview_eb_type = {
320
@@ -XXX,XX +XXX,XX @@ static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data)
321
mc->init = realview_eb_mpcore_init;
322
mc->block_default_type = IF_SCSI;
323
mc->max_cpus = 4;
324
+ mc->ignore_memory_transaction_failures = true;
325
}
326
327
static const TypeInfo realview_eb_mpcore_type = {
328
@@ -XXX,XX +XXX,XX @@ static void realview_pb_a8_class_init(ObjectClass *oc, void *data)
329
330
mc->desc = "ARM RealView Platform Baseboard for Cortex-A8";
331
mc->init = realview_pb_a8_init;
332
+ mc->ignore_memory_transaction_failures = true;
333
}
334
335
static const TypeInfo realview_pb_a8_type = {
336
@@ -XXX,XX +XXX,XX @@ static void realview_pbx_a9_class_init(ObjectClass *oc, void *data)
337
mc->desc = "ARM RealView Platform Baseboard Explore for Cortex-A9";
338
mc->init = realview_pbx_a9_init;
339
mc->max_cpus = 4;
340
+ mc->ignore_memory_transaction_failures = true;
341
}
342
343
static const TypeInfo realview_pbx_a9_type = {
344
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
345
index XXXXXXX..XXXXXXX 100644
346
--- a/hw/arm/sabrelite.c
347
+++ b/hw/arm/sabrelite.c
348
@@ -XXX,XX +XXX,XX @@ static void sabrelite_machine_init(MachineClass *mc)
349
mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex A9)";
350
mc->init = sabrelite_init;
351
mc->max_cpus = FSL_IMX6_NUM_CPUS;
352
+ mc->ignore_memory_transaction_failures = true;
353
}
354
355
DEFINE_MACHINE("sabrelite", sabrelite_machine_init)
356
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/hw/arm/spitz.c
359
+++ b/hw/arm/spitz.c
360
@@ -XXX,XX +XXX,XX @@ static void akitapda_class_init(ObjectClass *oc, void *data)
361
362
mc->desc = "Sharp SL-C1000 (Akita) PDA (PXA270)";
363
mc->init = akita_init;
364
+ mc->ignore_memory_transaction_failures = true;
365
}
366
367
static const TypeInfo akitapda_type = {
368
@@ -XXX,XX +XXX,XX @@ static void spitzpda_class_init(ObjectClass *oc, void *data)
369
mc->desc = "Sharp SL-C3000 (Spitz) PDA (PXA270)";
370
mc->init = spitz_init;
371
mc->block_default_type = IF_IDE;
372
+ mc->ignore_memory_transaction_failures = true;
373
}
374
375
static const TypeInfo spitzpda_type = {
376
@@ -XXX,XX +XXX,XX @@ static void borzoipda_class_init(ObjectClass *oc, void *data)
377
mc->desc = "Sharp SL-C3100 (Borzoi) PDA (PXA270)";
378
mc->init = borzoi_init;
379
mc->block_default_type = IF_IDE;
380
+ mc->ignore_memory_transaction_failures = true;
381
}
382
383
static const TypeInfo borzoipda_type = {
384
@@ -XXX,XX +XXX,XX @@ static void terrierpda_class_init(ObjectClass *oc, void *data)
385
mc->desc = "Sharp SL-C3200 (Terrier) PDA (PXA270)";
386
mc->init = terrier_init;
387
mc->block_default_type = IF_IDE;
388
+ mc->ignore_memory_transaction_failures = true;
389
}
390
391
static const TypeInfo terrierpda_type = {
392
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
393
index XXXXXXX..XXXXXXX 100644
394
--- a/hw/arm/stellaris.c
395
+++ b/hw/arm/stellaris.c
396
@@ -XXX,XX +XXX,XX @@ static void lm3s811evb_class_init(ObjectClass *oc, void *data)
397
398
mc->desc = "Stellaris LM3S811EVB";
399
mc->init = lm3s811evb_init;
400
+ mc->ignore_memory_transaction_failures = true;
401
}
402
403
static const TypeInfo lm3s811evb_type = {
404
@@ -XXX,XX +XXX,XX @@ static void lm3s6965evb_class_init(ObjectClass *oc, void *data)
405
406
mc->desc = "Stellaris LM3S6965EVB";
407
mc->init = lm3s6965evb_init;
408
+ mc->ignore_memory_transaction_failures = true;
409
}
410
411
static const TypeInfo lm3s6965evb_type = {
412
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
413
index XXXXXXX..XXXXXXX 100644
414
--- a/hw/arm/tosa.c
415
+++ b/hw/arm/tosa.c
416
@@ -XXX,XX +XXX,XX @@ static void tosapda_machine_init(MachineClass *mc)
417
mc->desc = "Sharp SL-6000 (Tosa) PDA (PXA255)";
418
mc->init = tosa_init;
419
mc->block_default_type = IF_IDE;
420
+ mc->ignore_memory_transaction_failures = true;
421
}
422
423
DEFINE_MACHINE("tosa", tosapda_machine_init)
424
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
425
index XXXXXXX..XXXXXXX 100644
426
--- a/hw/arm/versatilepb.c
427
+++ b/hw/arm/versatilepb.c
428
@@ -XXX,XX +XXX,XX @@ static void versatilepb_class_init(ObjectClass *oc, void *data)
429
mc->desc = "ARM Versatile/PB (ARM926EJ-S)";
430
mc->init = vpb_init;
431
mc->block_default_type = IF_SCSI;
432
+ mc->ignore_memory_transaction_failures = true;
433
}
434
435
static const TypeInfo versatilepb_type = {
436
@@ -XXX,XX +XXX,XX @@ static void versatileab_class_init(ObjectClass *oc, void *data)
437
mc->desc = "ARM Versatile/AB (ARM926EJ-S)";
438
mc->init = vab_init;
439
mc->block_default_type = IF_SCSI;
440
+ mc->ignore_memory_transaction_failures = true;
441
}
442
443
static const TypeInfo versatileab_type = {
444
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
445
index XXXXXXX..XXXXXXX 100644
446
--- a/hw/arm/vexpress.c
447
+++ b/hw/arm/vexpress.c
448
@@ -XXX,XX +XXX,XX @@ static void vexpress_class_init(ObjectClass *oc, void *data)
449
mc->desc = "ARM Versatile Express";
450
mc->init = vexpress_common_init;
451
mc->max_cpus = 4;
452
+ mc->ignore_memory_transaction_failures = true;
453
}
454
455
static void vexpress_a9_class_init(ObjectClass *oc, void *data)
456
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/hw/arm/xilinx_zynq.c
459
+++ b/hw/arm/xilinx_zynq.c
460
@@ -XXX,XX +XXX,XX @@ static void zynq_machine_init(MachineClass *mc)
461
mc->init = zynq_init;
462
mc->max_cpus = 1;
463
mc->no_sdcard = 1;
464
+ mc->ignore_memory_transaction_failures = true;
465
}
466
467
DEFINE_MACHINE("xilinx-zynq-a9", zynq_machine_init)
468
diff --git a/hw/arm/xlnx-ep108.c b/hw/arm/xlnx-ep108.c
469
index XXXXXXX..XXXXXXX 100644
470
--- a/hw/arm/xlnx-ep108.c
471
+++ b/hw/arm/xlnx-ep108.c
472
@@ -XXX,XX +XXX,XX @@ static void xlnx_ep108_machine_init(MachineClass *mc)
473
mc->init = xlnx_ep108_init;
474
mc->block_default_type = IF_IDE;
475
mc->units_per_default_bus = 1;
476
+ mc->ignore_memory_transaction_failures = true;
477
}
478
479
DEFINE_MACHINE("xlnx-ep108", xlnx_ep108_machine_init)
480
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_machine_init(MachineClass *mc)
481
mc->init = xlnx_ep108_init;
482
mc->block_default_type = IF_IDE;
483
mc->units_per_default_bus = 1;
484
+ mc->ignore_memory_transaction_failures = true;
485
}
486
487
DEFINE_MACHINE("xlnx-zcu102", xlnx_zcu102_machine_init)
488
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
489
index XXXXXXX..XXXXXXX 100644
490
--- a/hw/arm/z2.c
491
+++ b/hw/arm/z2.c
492
@@ -XXX,XX +XXX,XX @@ static void z2_machine_init(MachineClass *mc)
493
{
494
mc->desc = "Zipit Z2 (PXA27x)";
495
mc->init = z2_init;
496
+ mc->ignore_memory_transaction_failures = true;
497
}
498
499
DEFINE_MACHINE("z2", z2_machine_init)
500
--
77
--
501
2.7.4
78
2.20.1
502
79
503
80
diff view generated by jsdifflib
1
Currently we have a rather half-baked setup for allowing CPUs to
1
The ACTLR2 and HACTLR2 AArch32 system registers didn't exist in ARMv7
2
generate exceptions on accesses to invalid memory: the CPU has a
2
or the original ARMv8. They were later added as optional registers,
3
cpu_unassigned_access() hook which the memory system calls in
3
whose presence is signaled by the ID_MMFR4.AC2 field. From ARMv8.2
4
unassigned_mem_write() and unassigned_mem_read() if the current_cpu
4
they are mandatory (ie ID_MMFR4.AC2 must be non-zero).
5
pointer is non-NULL. This was originally designed before we
6
implemented the MemTxResult type that allows memory operations to
7
report a success or failure code, which is why the hook is called
8
right at the bottom of the memory system. The major problem with
9
this is that it means that the hook can be called even when the
10
access was not actually done by the CPU: for instance if the CPU
11
writes to a DMA engine register which causes the DMA engine to begin
12
a transaction which has been set up by the guest to operate on
13
invalid memory then this will casue the CPU to take an exception
14
incorrectly. Another minor problem is that currently if a device
15
returns a transaction error then this won't turn into a CPU exception
16
at all.
17
5
18
The right way to do this is to have allow the CPU to respond
6
We implemented HACTLR2 in commit 0e0456ab8895a5e85, but we
19
to memory system transaction failures at the point where the
7
incorrectly made it exist for all v8 CPUs, and we didn't implement
20
CPU specific code calls into the memory system.
8
ACTLR2 at all.
21
9
22
Define a new QOM CPU method and utility function
10
Sort this out by implementing both registers only when they are
23
cpu_transaction_failed() which is called in these cases.
11
supposed to exist, and setting the ID_MMFR4 bit for -cpu max.
24
The functionality here overlaps with the existing
25
cpu_unassigned_access() because individual target CPUs will
26
need some work to convert them to the new system. When this
27
transition is complete we can remove the old cpu_unassigned_access()
28
code.
29
12
13
Note that this removes HACTLR2 from our Cortex-A53, -A47 and -A72
14
CPU models; this is correct, because those CPUs do not implement
15
this register.
16
17
Fixes: 0e0456ab8895a5e85
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
31
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
32
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
20
Message-id: 20200214175116.9164-22-peter.maydell@linaro.org
33
---
21
---
34
include/qom/cpu.h | 22 ++++++++++++++++++++++
22
target/arm/cpu.h | 5 +++++
35
1 file changed, 22 insertions(+)
23
target/arm/cpu.c | 1 +
24
target/arm/cpu64.c | 4 ++++
25
target/arm/helper.c | 32 +++++++++++++++++++++++---------
26
4 files changed, 33 insertions(+), 9 deletions(-)
36
27
37
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
38
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
39
--- a/include/qom/cpu.h
30
--- a/target/arm/cpu.h
40
+++ b/include/qom/cpu.h
31
+++ b/target/arm/cpu.h
41
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock;
32
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
42
* @has_work: Callback for checking if there is work to do.
33
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
43
* @do_interrupt: Callback for interrupt handling.
44
* @do_unassigned_access: Callback for unassigned access handling.
45
+ * (this is deprecated: new targets should use do_transaction_failed instead)
46
* @do_unaligned_access: Callback for unaligned access handling, if
47
* the target defines #ALIGNED_ONLY.
48
+ * @do_transaction_failed: Callback for handling failed memory transactions
49
+ * (ie bus faults or external aborts; not MMU faults)
50
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
51
* runtime configurable endianness is currently big-endian. Non-configurable
52
* CPUs can use the default implementation of this method. This method should
53
@@ -XXX,XX +XXX,XX @@ typedef struct CPUClass {
54
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
55
MMUAccessType access_type,
56
int mmu_idx, uintptr_t retaddr);
57
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
58
+ unsigned size, MMUAccessType access_type,
59
+ int mmu_idx, MemTxAttrs attrs,
60
+ MemTxResult response, uintptr_t retaddr);
61
bool (*virtio_is_big_endian)(CPUState *cpu);
62
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
63
uint8_t *buf, int len, bool is_write);
64
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
65
66
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
67
}
34
}
35
36
+static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id)
37
+{
38
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0;
39
+}
68
+
40
+
69
+static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
41
/*
70
+ vaddr addr, unsigned size,
42
* 64-bit feature tests via id registers.
71
+ MMUAccessType access_type,
43
*/
72
+ int mmu_idx, MemTxAttrs attrs,
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
73
+ MemTxResult response,
45
index XXXXXXX..XXXXXXX 100644
74
+ uintptr_t retaddr)
46
--- a/target/arm/cpu.c
75
+{
47
+++ b/target/arm/cpu.c
76
+ CPUClass *cc = CPU_GET_CLASS(cpu);
48
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
49
50
t = cpu->isar.id_mmfr4;
51
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
52
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
53
cpu->isar.id_mmfr4 = t;
54
}
55
#endif
56
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/cpu64.c
59
+++ b/target/arm/cpu64.c
60
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
61
u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
62
cpu->isar.id_mmfr3 = u;
63
64
+ u = cpu->isar.id_mmfr4;
65
+ u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
66
+ cpu->isar.id_mmfr4 = u;
77
+
67
+
78
+ if (cc->do_transaction_failed) {
68
u = cpu->isar.id_aa64dfr0;
79
+ cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
69
u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
80
+ mmu_idx, attrs, response, retaddr);
70
cpu->isar.id_aa64dfr0 = u;
81
+ }
71
diff --git a/target/arm/helper.c b/target/arm/helper.c
82
+}
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/helper.c
74
+++ b/target/arm/helper.c
75
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo ats1cp_reginfo[] = {
76
};
83
#endif
77
#endif
84
78
85
#endif /* NEED_CPU_H */
79
+/*
80
+ * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
81
+ * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
82
+ * is non-zero, which is never for ARMv7, optionally in ARMv8
83
+ * and mandatorily for ARMv8.2 and up.
84
+ * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
85
+ * implementation is RAZ/WI we can ignore this detail, as we
86
+ * do for ACTLR.
87
+ */
88
+static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
89
+ { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
90
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
91
+ .access = PL1_RW, .type = ARM_CP_CONST,
92
+ .resetvalue = 0 },
93
+ { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
94
+ .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
95
+ .access = PL2_RW, .type = ARM_CP_CONST,
96
+ .resetvalue = 0 },
97
+ REGINFO_SENTINEL
98
+};
99
+
100
void register_cp_regs_for_features(ARMCPU *cpu)
101
{
102
/* Register all the coprocessor registers based on feature bits */
103
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
104
REGINFO_SENTINEL
105
};
106
define_arm_cp_regs(cpu, auxcr_reginfo);
107
- if (arm_feature(env, ARM_FEATURE_V8)) {
108
- /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
109
- ARMCPRegInfo hactlr2_reginfo = {
110
- .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
111
- .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
112
- .access = PL2_RW, .type = ARM_CP_CONST,
113
- .resetvalue = 0
114
- };
115
- define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
116
+ if (cpu_isar_feature(aa32_ac2, cpu)) {
117
+ define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
118
}
119
}
120
86
--
121
--
87
2.7.4
122
2.20.1
88
123
89
124
diff view generated by jsdifflib
1
Move the MemTxResult type to memattrs.h. We're going to want to
1
From: Guenter Roeck <linux@roeck-us.net>
2
use it in cpu/qom.h, which doesn't want to include all of
3
memory.h. In practice MemTxResult and MemTxAttrs are pretty
4
closely linked since both are used for the new-style
5
read_with_attrs and write_with_attrs callbacks, so memattrs.h
6
is a reasonable home for this rather than creating a whole
7
new header file for it.
8
2
3
We need to be able to use OHCISysBusState outside hcd-ohci.c, so move it
4
to its include file.
5
6
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
7
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
8
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
9
Message-id: 20200217204812.9857-2-linux@roeck-us.net
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
12
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
13
---
11
---
14
include/exec/memattrs.h | 10 ++++++++++
12
hw/usb/hcd-ohci.h | 16 ++++++++++++++++
15
include/exec/memory.h | 10 ----------
13
hw/usb/hcd-ohci.c | 15 ---------------
16
2 files changed, 10 insertions(+), 10 deletions(-)
14
2 files changed, 16 insertions(+), 15 deletions(-)
17
15
18
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
16
diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/include/exec/memattrs.h
18
--- a/hw/usb/hcd-ohci.h
21
+++ b/include/exec/memattrs.h
19
+++ b/hw/usb/hcd-ohci.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs {
20
@@ -XXX,XX +XXX,XX @@
23
*/
21
#define HCD_OHCI_H
24
#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
22
25
23
#include "sysemu/dma.h"
26
+/* New-style MMIO accessors can indicate that the transaction failed.
24
+#include "hw/usb.h"
27
+ * A zero (MEMTX_OK) response means success; anything else is a failure
25
28
+ * of some kind. The memory subsystem will bitwise-OR together results
26
/* Number of Downstream Ports on the root hub: */
29
+ * if it is synthesizing an operation from multiple smaller accesses.
27
#define OHCI_MAX_PORTS 15
30
+ */
28
@@ -XXX,XX +XXX,XX @@ typedef struct OHCIState {
31
+#define MEMTX_OK 0
29
void (*ohci_die)(struct OHCIState *ohci);
32
+#define MEMTX_ERROR (1U << 0) /* device returned an error */
30
} OHCIState;
33
+#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
31
34
+typedef uint32_t MemTxResult;
32
+#define TYPE_SYSBUS_OHCI "sysbus-ohci"
33
+#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI)
35
+
34
+
36
#endif
35
+typedef struct {
37
diff --git a/include/exec/memory.h b/include/exec/memory.h
36
+ /*< private >*/
37
+ SysBusDevice parent_obj;
38
+ /*< public >*/
39
+
40
+ OHCIState ohci;
41
+ char *masterbus;
42
+ uint32_t num_ports;
43
+ uint32_t firstport;
44
+ dma_addr_t dma_offset;
45
+} OHCISysBusState;
46
+
47
extern const VMStateDescription vmstate_ohci_state;
48
49
void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
50
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
38
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
39
--- a/include/exec/memory.h
52
--- a/hw/usb/hcd-ohci.c
40
+++ b/include/exec/memory.h
53
+++ b/hw/usb/hcd-ohci.c
41
@@ -XXX,XX +XXX,XX @@ static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
54
@@ -XXX,XX +XXX,XX @@ void ohci_sysbus_die(struct OHCIState *ohci)
42
n->end = end;
55
ohci_bus_stop(ohci);
43
}
56
}
44
57
45
-/* New-style MMIO accessors can indicate that the transaction failed.
58
-#define TYPE_SYSBUS_OHCI "sysbus-ohci"
46
- * A zero (MEMTX_OK) response means success; anything else is a failure
59
-#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI)
47
- * of some kind. The memory subsystem will bitwise-OR together results
48
- * if it is synthesizing an operation from multiple smaller accesses.
49
- */
50
-#define MEMTX_OK 0
51
-#define MEMTX_ERROR (1U << 0) /* device returned an error */
52
-#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
53
-typedef uint32_t MemTxResult;
54
-
60
-
55
/*
61
-typedef struct {
56
* Memory region callbacks
62
- /*< private >*/
57
*/
63
- SysBusDevice parent_obj;
64
- /*< public >*/
65
-
66
- OHCIState ohci;
67
- char *masterbus;
68
- uint32_t num_ports;
69
- uint32_t firstport;
70
- dma_addr_t dma_offset;
71
-} OHCISysBusState;
72
-
73
static void ohci_realize_pxa(DeviceState *dev, Error **errp)
74
{
75
OHCISysBusState *s = SYSBUS_OHCI(dev);
58
--
76
--
59
2.7.4
77
2.20.1
60
78
61
79
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Guenter Roeck <linux@roeck-us.net>
2
2
3
QEMU currently aborts if the user is accidentially trying to
3
We'll use this property in a follow-up patch to insantiate an EHCI
4
do something like this:
4
bus with companion support.
5
5
6
$ aarch64-softmmu/qemu-system-aarch64 -S -M integratorcp -nographic
6
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
7
QEMU 2.9.93 monitor - type 'help' for more information
7
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
8
(qemu) device_add ast2400
8
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
9
Unexpected error in error_set_from_qdev_prop_error()
9
Message-id: 20200217204812.9857-3-linux@roeck-us.net
10
at hw/core/qdev-properties.c:1032:
11
Aborted (core dumped)
12
13
The ast2400 SoC devices are clearly not creatable by the user since
14
they are using the serial_hds and nd_table arrays directly in their
15
realize function, so mark them with user_creatable = false.
16
17
Signed-off-by: Thomas Huth <thuth@redhat.com>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Reviewed-by: Cédric Le Goater <clg@kaod.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
11
---
22
hw/arm/aspeed_soc.c | 2 ++
12
hw/usb/hcd-ehci-sysbus.c | 2 ++
23
1 file changed, 2 insertions(+)
13
1 file changed, 2 insertions(+)
24
14
25
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
15
diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c
26
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/arm/aspeed_soc.c
17
--- a/hw/usb/hcd-ehci-sysbus.c
28
+++ b/hw/arm/aspeed_soc.c
18
+++ b/hw/usb/hcd-ehci-sysbus.c
29
@@ -XXX,XX +XXX,XX @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data)
19
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_ehci_sysbus = {
30
20
31
sc->info = (AspeedSoCInfo *) data;
21
static Property ehci_sysbus_properties[] = {
32
dc->realize = aspeed_soc_realize;
22
DEFINE_PROP_UINT32("maxframes", EHCISysBusState, ehci.maxframes, 128),
33
+ /* Reason: Uses serial_hds and nd_table in realize() directly */
23
+ DEFINE_PROP_BOOL("companion-enable", EHCISysBusState, ehci.companion_enable,
34
+ dc->user_creatable = false;
24
+ false),
35
}
25
DEFINE_PROP_END_OF_LIST(),
36
26
};
37
static const TypeInfo aspeed_soc_type_info = {
27
38
--
28
--
39
2.7.4
29
2.20.1
40
30
41
31
diff view generated by jsdifflib
1
Define a new MachineClass field ignore_memory_transaction_failures.
1
From: Guenter Roeck <linux@roeck-us.net>
2
If this is flag is true then the CPU will ignore memory transaction
3
failures which should cause the CPU to take an exception due to an
4
access to an unassigned physical address; the transaction will
5
instead return zero (for a read) or be ignored (for a write). This
6
should be set only by legacy board models which rely on the old
7
RAZ/WI behaviour for handling devices that QEMU does not yet model.
8
New board models should instead use "unimplemented-device" for all
9
memory ranges where the guest will attempt to probe for a device that
10
QEMU doesn't implement and a stub device is required.
11
2
12
We need this for ARM boards, where we're about to implement support for
3
Instantiate EHCI and OHCI controllers on Allwinner A10. OHCI ports are
13
generating external aborts on memory transaction failures. Too many
4
modeled as companions of the respective EHCI ports.
14
of our legacy board models rely on the RAZ/WI behaviour and we
15
would break currently working guests when their "probe for device"
16
code provoked an external abort rather than a RAZ.
17
5
6
With this patch applied, USB controllers are discovered and instantiated
7
when booting the cubieboard machine with a recent Linux kernel.
8
9
ehci-platform 1c14000.usb: EHCI Host Controller
10
ehci-platform 1c14000.usb: new USB bus registered, assigned bus number 1
11
ehci-platform 1c14000.usb: irq 26, io mem 0x01c14000
12
ehci-platform 1c14000.usb: USB 2.0 started, EHCI 1.00
13
ehci-platform 1c1c000.usb: EHCI Host Controller
14
ehci-platform 1c1c000.usb: new USB bus registered, assigned bus number 2
15
ehci-platform 1c1c000.usb: irq 31, io mem 0x01c1c000
16
ehci-platform 1c1c000.usb: USB 2.0 started, EHCI 1.00
17
ohci-platform 1c14400.usb: Generic Platform OHCI controller
18
ohci-platform 1c14400.usb: new USB bus registered, assigned bus number 3
19
ohci-platform 1c14400.usb: irq 27, io mem 0x01c14400
20
ohci-platform 1c1c400.usb: Generic Platform OHCI controller
21
ohci-platform 1c1c400.usb: new USB bus registered, assigned bus number 4
22
ohci-platform 1c1c400.usb: irq 32, io mem 0x01c1c400
23
usb 2-1: new high-speed USB device number 2 using ehci-platform
24
usb-storage 2-1:1.0: USB Mass Storage device detected
25
scsi host1: usb-storage 2-1:1.0
26
usb 3-1: new full-speed USB device number 2 using ohci-platform
27
input: QEMU QEMU USB Mouse as /devices/platform/soc/1c14400.usb/usb3/3-1/3-1:1.0/0003:0627:0001.0001/input/input0
28
29
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
30
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
31
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
32
Message-id: 20200217204812.9857-4-linux@roeck-us.net
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
34
---
20
include/hw/boards.h | 11 +++++++++++
35
include/hw/arm/allwinner-a10.h | 6 +++++
21
include/qom/cpu.h | 7 ++++++-
36
hw/arm/allwinner-a10.c | 43 ++++++++++++++++++++++++++++++++++
22
qom/cpu.c | 7 +++++++
37
2 files changed, 49 insertions(+)
23
3 files changed, 24 insertions(+), 1 deletion(-)
24
38
25
diff --git a/include/hw/boards.h b/include/hw/boards.h
39
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
26
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/boards.h
41
--- a/include/hw/arm/allwinner-a10.h
28
+++ b/include/hw/boards.h
42
+++ b/include/hw/arm/allwinner-a10.h
29
@@ -XXX,XX +XXX,XX @@ typedef struct {
43
@@ -XXX,XX +XXX,XX @@
30
* size than the target architecture's minimum. (Attempting to create
44
#include "hw/intc/allwinner-a10-pic.h"
31
* such a CPU will fail.) Note that changing this is a migration
45
#include "hw/net/allwinner_emac.h"
32
* compatibility break for the machine.
46
#include "hw/ide/ahci.h"
33
+ * @ignore_memory_transaction_failures:
47
+#include "hw/usb/hcd-ohci.h"
34
+ * If this is flag is true then the CPU will ignore memory transaction
48
+#include "hw/usb/hcd-ehci.h"
35
+ * failures which should cause the CPU to take an exception due to an
49
36
+ * access to an unassigned physical address; the transaction will instead
50
#include "target/arm/cpu.h"
37
+ * return zero (for a read) or be ignored (for a write). This should be
51
38
+ * set only by legacy board models which rely on the old RAZ/WI behaviour
52
39
+ * for handling devices that QEMU does not yet model. New board models
53
#define AW_A10_SDRAM_BASE 0x40000000
40
+ * should instead use "unimplemented-device" for all memory ranges where
54
41
+ * the guest will attempt to probe for a device that QEMU doesn't
55
+#define AW_A10_NUM_USB 2
42
+ * implement and a stub device is required.
56
+
43
*/
57
#define TYPE_AW_A10 "allwinner-a10"
44
struct MachineClass {
58
#define AW_A10(obj) OBJECT_CHECK(AwA10State, (obj), TYPE_AW_A10)
45
/*< private >*/
59
46
@@ -XXX,XX +XXX,XX @@ struct MachineClass {
60
@@ -XXX,XX +XXX,XX @@ typedef struct AwA10State {
47
bool rom_file_has_mr;
61
AwEmacState emac;
48
int minimum_page_bits;
62
AllwinnerAHCIState sata;
49
bool has_hotpluggable_cpus;
63
MemoryRegion sram_a;
50
+ bool ignore_memory_transaction_failures;
64
+ EHCISysBusState ehci[AW_A10_NUM_USB];
51
int numa_mem_align_shift;
65
+ OHCISysBusState ohci[AW_A10_NUM_USB];
52
void (*numa_auto_assign_ram)(MachineClass *mc, NodeInfo *nodes,
66
} AwA10State;
53
int nb_nodes, ram_addr_t size);
67
54
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
68
#endif
69
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
55
index XXXXXXX..XXXXXXX 100644
70
index XXXXXXX..XXXXXXX 100644
56
--- a/include/qom/cpu.h
71
--- a/hw/arm/allwinner-a10.c
57
+++ b/include/qom/cpu.h
72
+++ b/hw/arm/allwinner-a10.c
58
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
59
* @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
60
* to @trace_dstate).
61
* @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
62
+ * @ignore_memory_transaction_failures: Cached copy of the MachineState
63
+ * flag of the same name: allows the board to suppress calling of the
64
+ * CPU do_transaction_failed hook function.
65
*
66
* State of one CPU core or thread.
67
*/
68
@@ -XXX,XX +XXX,XX @@ struct CPUState {
69
*/
70
bool throttle_thread_scheduled;
71
72
+ bool ignore_memory_transaction_failures;
73
+
74
/* Note that this is accessed at the start of every TB via a negative
75
offset from AREG0. Leave this field at the end so as to make the
76
(absolute value) offset as small as possible. This reduces code
77
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
78
{
79
CPUClass *cc = CPU_GET_CLASS(cpu);
80
81
- if (cc->do_transaction_failed) {
82
+ if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
83
cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
84
mmu_idx, attrs, response, retaddr);
85
}
86
diff --git a/qom/cpu.c b/qom/cpu.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/qom/cpu.c
89
+++ b/qom/cpu.c
90
@@ -XXX,XX +XXX,XX @@
73
@@ -XXX,XX +XXX,XX @@
91
#include "exec/cpu-common.h"
74
#include "hw/arm/allwinner-a10.h"
92
#include "qemu/error-report.h"
75
#include "hw/misc/unimp.h"
93
#include "sysemu/sysemu.h"
76
#include "sysemu/sysemu.h"
94
+#include "hw/boards.h"
77
+#include "hw/boards.h"
95
#include "hw/qdev-properties.h"
78
+#include "hw/usb/hcd-ohci.h"
96
#include "trace-root.h"
79
97
80
#define AW_A10_PIC_REG_BASE 0x01c20400
98
@@ -XXX,XX +XXX,XX @@ static void cpu_common_parse_features(const char *typename, char *features,
81
#define AW_A10_PIT_REG_BASE 0x01c20c00
99
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
82
#define AW_A10_UART0_REG_BASE 0x01c28000
100
{
83
#define AW_A10_EMAC_BASE 0x01c0b000
101
CPUState *cpu = CPU(dev);
84
+#define AW_A10_EHCI_BASE 0x01c14000
102
+ Object *machine = qdev_get_machine();
85
+#define AW_A10_OHCI_BASE 0x01c14400
103
+ ObjectClass *oc = object_get_class(machine);
86
#define AW_A10_SATA_BASE 0x01c18000
104
+ MachineClass *mc = MACHINE_CLASS(oc);
87
88
static void aw_a10_init(Object *obj)
89
@@ -XXX,XX +XXX,XX @@ static void aw_a10_init(Object *obj)
90
91
sysbus_init_child_obj(obj, "sata", &s->sata, sizeof(s->sata),
92
TYPE_ALLWINNER_AHCI);
105
+
93
+
106
+ cpu->ignore_memory_transaction_failures =
94
+ if (machine_usb(current_machine)) {
107
+ mc->ignore_memory_transaction_failures;
95
+ int i;
108
96
+
109
if (dev->hotplugged) {
97
+ for (i = 0; i < AW_A10_NUM_USB; i++) {
110
cpu_synchronize_post_init(cpu);
98
+ sysbus_init_child_obj(obj, "ehci[*]", OBJECT(&s->ehci[i]),
99
+ sizeof(s->ehci[i]), TYPE_PLATFORM_EHCI);
100
+ sysbus_init_child_obj(obj, "ohci[*]", OBJECT(&s->ohci[i]),
101
+ sizeof(s->ohci[i]), TYPE_SYSBUS_OHCI);
102
+ }
103
+ }
104
}
105
106
static void aw_a10_realize(DeviceState *dev, Error **errp)
107
@@ -XXX,XX +XXX,XX @@ static void aw_a10_realize(DeviceState *dev, Error **errp)
108
serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2,
109
qdev_get_gpio_in(dev, 1),
110
115200, serial_hd(0), DEVICE_NATIVE_ENDIAN);
111
+
112
+ if (machine_usb(current_machine)) {
113
+ int i;
114
+
115
+ for (i = 0; i < AW_A10_NUM_USB; i++) {
116
+ char bus[16];
117
+
118
+ sprintf(bus, "usb-bus.%d", i);
119
+
120
+ object_property_set_bool(OBJECT(&s->ehci[i]), true,
121
+ "companion-enable", &error_fatal);
122
+ object_property_set_bool(OBJECT(&s->ehci[i]), true, "realized",
123
+ &error_fatal);
124
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0,
125
+ AW_A10_EHCI_BASE + i * 0x8000);
126
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0,
127
+ qdev_get_gpio_in(dev, 39 + i));
128
+
129
+ object_property_set_str(OBJECT(&s->ohci[i]), bus, "masterbus",
130
+ &error_fatal);
131
+ object_property_set_bool(OBJECT(&s->ohci[i]), true, "realized",
132
+ &error_fatal);
133
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0,
134
+ AW_A10_OHCI_BASE + i * 0x8000);
135
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0,
136
+ qdev_get_gpio_in(dev, 64 + i));
137
+ }
138
+ }
139
}
140
141
static void aw_a10_class_init(ObjectClass *oc, void *data)
111
--
142
--
112
2.7.4
143
2.20.1
113
144
114
145
diff view generated by jsdifflib
1
Remove an out of date comment which says there's only one
1
From: Richard Henderson <richard.henderson@linaro.org>
2
item in the NVIC container region -- we put systick into its
3
own device object a while back and so now there are two
4
things in the container.
5
2
3
These instructions shift left or right depending on the sign
4
of the input, and 7 bits are significant to the shift. This
5
requires several masks and selects in addition to the actual
6
shifts to form the complete answer.
7
8
That said, the operation is still a small improvement even for
9
two 64-bit elements -- 13 vector operations instead of 2 * 7
10
integer operations.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20200216214232.4230-2-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 1501692241-23310-6-git-send-email-peter.maydell@linaro.org
10
---
16
---
11
hw/intc/armv7m_nvic.c | 4 ----
17
target/arm/helper.h | 11 +-
12
1 file changed, 4 deletions(-)
18
target/arm/translate.h | 6 +
19
target/arm/neon_helper.c | 33 ----
20
target/arm/translate-a64.c | 18 +--
21
target/arm/translate.c | 299 +++++++++++++++++++++++++++++++++++--
22
target/arm/vec_helper.c | 88 +++++++++++
23
6 files changed, 389 insertions(+), 66 deletions(-)
13
24
14
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
25
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/armv7m_nvic.c
27
--- a/target/arm/helper.h
17
+++ b/hw/intc/armv7m_nvic.c
28
+++ b/target/arm/helper.h
18
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(neon_abd_s16, i32, i32, i32)
19
* 0xd00..0xd3c - SCS registers
30
DEF_HELPER_2(neon_abd_u32, i32, i32, i32)
20
* 0xd40..0xeff - Reserved or Not implemented
31
DEF_HELPER_2(neon_abd_s32, i32, i32, i32)
21
* 0xf00 - STIR
32
22
- *
33
-DEF_HELPER_2(neon_shl_u8, i32, i32, i32)
23
- * At the moment there is only one thing in the container region,
34
-DEF_HELPER_2(neon_shl_s8, i32, i32, i32)
24
- * but we leave it in place to allow us to pull systick out into
35
DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
25
- * its own device object later.
36
DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
26
*/
37
-DEF_HELPER_2(neon_shl_u32, i32, i32, i32)
27
memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000);
38
-DEF_HELPER_2(neon_shl_s32, i32, i32, i32)
28
/* The system register region goes at the bottom of the priority
39
-DEF_HELPER_2(neon_shl_u64, i64, i64, i64)
40
-DEF_HELPER_2(neon_shl_s64, i64, i64, i64)
41
DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
42
DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
43
DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
44
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
45
DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
46
DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
47
48
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
52
+
53
#ifdef TARGET_AARCH64
54
#include "helper-a64.h"
55
#include "helper-sve.h"
56
diff --git a/target/arm/translate.h b/target/arm/translate.h
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate.h
59
+++ b/target/arm/translate.h
60
@@ -XXX,XX +XXX,XX @@ uint64_t vfp_expand_imm(int size, uint8_t imm8);
61
extern const GVecGen3 mla_op[4];
62
extern const GVecGen3 mls_op[4];
63
extern const GVecGen3 cmtst_op[4];
64
+extern const GVecGen3 sshl_op[4];
65
+extern const GVecGen3 ushl_op[4];
66
extern const GVecGen2i ssra_op[4];
67
extern const GVecGen2i usra_op[4];
68
extern const GVecGen2i sri_op[4];
69
@@ -XXX,XX +XXX,XX @@ extern const GVecGen4 sqadd_op[4];
70
extern const GVecGen4 uqsub_op[4];
71
extern const GVecGen4 sqsub_op[4];
72
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
73
+void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
74
+void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
75
+void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
76
+void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
77
78
/*
79
* Forward to the isar_feature_* tests given a DisasContext pointer.
80
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/neon_helper.c
83
+++ b/target/arm/neon_helper.c
84
@@ -XXX,XX +XXX,XX @@ NEON_VOP(abd_u32, neon_u32, 1)
85
} else { \
86
dest = src1 << tmp; \
87
}} while (0)
88
-NEON_VOP(shl_u8, neon_u8, 4)
89
NEON_VOP(shl_u16, neon_u16, 2)
90
-NEON_VOP(shl_u32, neon_u32, 1)
91
#undef NEON_FN
92
93
-uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
94
-{
95
- int8_t shift = (int8_t)shiftop;
96
- if (shift >= 64 || shift <= -64) {
97
- val = 0;
98
- } else if (shift < 0) {
99
- val >>= -shift;
100
- } else {
101
- val <<= shift;
102
- }
103
- return val;
104
-}
105
-
106
#define NEON_FN(dest, src1, src2) do { \
107
int8_t tmp; \
108
tmp = (int8_t)src2; \
109
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
110
} else { \
111
dest = src1 << tmp; \
112
}} while (0)
113
-NEON_VOP(shl_s8, neon_s8, 4)
114
NEON_VOP(shl_s16, neon_s16, 2)
115
-NEON_VOP(shl_s32, neon_s32, 1)
116
#undef NEON_FN
117
118
-uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
119
-{
120
- int8_t shift = (int8_t)shiftop;
121
- int64_t val = valop;
122
- if (shift >= 64) {
123
- val = 0;
124
- } else if (shift <= -64) {
125
- val >>= 63;
126
- } else if (shift < 0) {
127
- val >>= -shift;
128
- } else {
129
- val <<= shift;
130
- }
131
- return val;
132
-}
133
-
134
#define NEON_FN(dest, src1, src2) do { \
135
int8_t tmp; \
136
tmp = (int8_t)src2; \
137
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/arm/translate-a64.c
140
+++ b/target/arm/translate-a64.c
141
@@ -XXX,XX +XXX,XX @@ static void handle_3same_64(DisasContext *s, int opcode, bool u,
142
break;
143
case 0x8: /* SSHL, USHL */
144
if (u) {
145
- gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
146
+ gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
147
} else {
148
- gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
149
+ gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
150
}
151
break;
152
case 0x9: /* SQSHL, UQSHL */
153
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
154
is_q ? 16 : 8, vec_full_reg_size(s),
155
(u ? uqsub_op : sqsub_op) + size);
156
return;
157
+ case 0x08: /* SSHL, USHL */
158
+ gen_gvec_op3(s, is_q, rd, rn, rm,
159
+ u ? &ushl_op[size] : &sshl_op[size]);
160
+ return;
161
case 0x0c: /* SMAX, UMAX */
162
if (u) {
163
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
164
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
165
genfn = fns[size][u];
166
break;
167
}
168
- case 0x8: /* SSHL, USHL */
169
- {
170
- static NeonGenTwoOpFn * const fns[3][2] = {
171
- { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
172
- { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
173
- { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
174
- };
175
- genfn = fns[size][u];
176
- break;
177
- }
178
case 0x9: /* SQSHL, UQSHL */
179
{
180
static NeonGenTwoOpEnvFn * const fns[3][2] = {
181
diff --git a/target/arm/translate.c b/target/arm/translate.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/target/arm/translate.c
184
+++ b/target/arm/translate.c
185
@@ -XXX,XX +XXX,XX @@ static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
186
if (u) {
187
switch (size) {
188
case 1: gen_helper_neon_shl_u16(var, var, shift); break;
189
- case 2: gen_helper_neon_shl_u32(var, var, shift); break;
190
+ case 2: gen_ushl_i32(var, var, shift); break;
191
default: abort();
192
}
193
} else {
194
switch (size) {
195
case 1: gen_helper_neon_shl_s16(var, var, shift); break;
196
- case 2: gen_helper_neon_shl_s32(var, var, shift); break;
197
+ case 2: gen_sshl_i32(var, var, shift); break;
198
default: abort();
199
}
200
}
201
@@ -XXX,XX +XXX,XX @@ const GVecGen3 cmtst_op[4] = {
202
.vece = MO_64 },
203
};
204
205
+void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
206
+{
207
+ TCGv_i32 lval = tcg_temp_new_i32();
208
+ TCGv_i32 rval = tcg_temp_new_i32();
209
+ TCGv_i32 lsh = tcg_temp_new_i32();
210
+ TCGv_i32 rsh = tcg_temp_new_i32();
211
+ TCGv_i32 zero = tcg_const_i32(0);
212
+ TCGv_i32 max = tcg_const_i32(32);
213
+
214
+ /*
215
+ * Rely on the TCG guarantee that out of range shifts produce
216
+ * unspecified results, not undefined behaviour (i.e. no trap).
217
+ * Discard out-of-range results after the fact.
218
+ */
219
+ tcg_gen_ext8s_i32(lsh, shift);
220
+ tcg_gen_neg_i32(rsh, lsh);
221
+ tcg_gen_shl_i32(lval, src, lsh);
222
+ tcg_gen_shr_i32(rval, src, rsh);
223
+ tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
224
+ tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
225
+
226
+ tcg_temp_free_i32(lval);
227
+ tcg_temp_free_i32(rval);
228
+ tcg_temp_free_i32(lsh);
229
+ tcg_temp_free_i32(rsh);
230
+ tcg_temp_free_i32(zero);
231
+ tcg_temp_free_i32(max);
232
+}
233
+
234
+void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
235
+{
236
+ TCGv_i64 lval = tcg_temp_new_i64();
237
+ TCGv_i64 rval = tcg_temp_new_i64();
238
+ TCGv_i64 lsh = tcg_temp_new_i64();
239
+ TCGv_i64 rsh = tcg_temp_new_i64();
240
+ TCGv_i64 zero = tcg_const_i64(0);
241
+ TCGv_i64 max = tcg_const_i64(64);
242
+
243
+ /*
244
+ * Rely on the TCG guarantee that out of range shifts produce
245
+ * unspecified results, not undefined behaviour (i.e. no trap).
246
+ * Discard out-of-range results after the fact.
247
+ */
248
+ tcg_gen_ext8s_i64(lsh, shift);
249
+ tcg_gen_neg_i64(rsh, lsh);
250
+ tcg_gen_shl_i64(lval, src, lsh);
251
+ tcg_gen_shr_i64(rval, src, rsh);
252
+ tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
253
+ tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
254
+
255
+ tcg_temp_free_i64(lval);
256
+ tcg_temp_free_i64(rval);
257
+ tcg_temp_free_i64(lsh);
258
+ tcg_temp_free_i64(rsh);
259
+ tcg_temp_free_i64(zero);
260
+ tcg_temp_free_i64(max);
261
+}
262
+
263
+static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
264
+ TCGv_vec src, TCGv_vec shift)
265
+{
266
+ TCGv_vec lval = tcg_temp_new_vec_matching(dst);
267
+ TCGv_vec rval = tcg_temp_new_vec_matching(dst);
268
+ TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
269
+ TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
270
+ TCGv_vec msk, max;
271
+
272
+ tcg_gen_neg_vec(vece, rsh, shift);
273
+ if (vece == MO_8) {
274
+ tcg_gen_mov_vec(lsh, shift);
275
+ } else {
276
+ msk = tcg_temp_new_vec_matching(dst);
277
+ tcg_gen_dupi_vec(vece, msk, 0xff);
278
+ tcg_gen_and_vec(vece, lsh, shift, msk);
279
+ tcg_gen_and_vec(vece, rsh, rsh, msk);
280
+ tcg_temp_free_vec(msk);
281
+ }
282
+
283
+ /*
284
+ * Rely on the TCG guarantee that out of range shifts produce
285
+ * unspecified results, not undefined behaviour (i.e. no trap).
286
+ * Discard out-of-range results after the fact.
287
+ */
288
+ tcg_gen_shlv_vec(vece, lval, src, lsh);
289
+ tcg_gen_shrv_vec(vece, rval, src, rsh);
290
+
291
+ max = tcg_temp_new_vec_matching(dst);
292
+ tcg_gen_dupi_vec(vece, max, 8 << vece);
293
+
294
+ /*
295
+ * The choice of LT (signed) and GEU (unsigned) are biased toward
296
+ * the instructions of the x86_64 host. For MO_8, the whole byte
297
+ * is significant so we must use an unsigned compare; otherwise we
298
+ * have already masked to a byte and so a signed compare works.
299
+ * Other tcg hosts have a full set of comparisons and do not care.
300
+ */
301
+ if (vece == MO_8) {
302
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
303
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
304
+ tcg_gen_andc_vec(vece, lval, lval, lsh);
305
+ tcg_gen_andc_vec(vece, rval, rval, rsh);
306
+ } else {
307
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
308
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
309
+ tcg_gen_and_vec(vece, lval, lval, lsh);
310
+ tcg_gen_and_vec(vece, rval, rval, rsh);
311
+ }
312
+ tcg_gen_or_vec(vece, dst, lval, rval);
313
+
314
+ tcg_temp_free_vec(max);
315
+ tcg_temp_free_vec(lval);
316
+ tcg_temp_free_vec(rval);
317
+ tcg_temp_free_vec(lsh);
318
+ tcg_temp_free_vec(rsh);
319
+}
320
+
321
+static const TCGOpcode ushl_list[] = {
322
+ INDEX_op_neg_vec, INDEX_op_shlv_vec,
323
+ INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
324
+};
325
+
326
+const GVecGen3 ushl_op[4] = {
327
+ { .fniv = gen_ushl_vec,
328
+ .fno = gen_helper_gvec_ushl_b,
329
+ .opt_opc = ushl_list,
330
+ .vece = MO_8 },
331
+ { .fniv = gen_ushl_vec,
332
+ .fno = gen_helper_gvec_ushl_h,
333
+ .opt_opc = ushl_list,
334
+ .vece = MO_16 },
335
+ { .fni4 = gen_ushl_i32,
336
+ .fniv = gen_ushl_vec,
337
+ .opt_opc = ushl_list,
338
+ .vece = MO_32 },
339
+ { .fni8 = gen_ushl_i64,
340
+ .fniv = gen_ushl_vec,
341
+ .opt_opc = ushl_list,
342
+ .vece = MO_64 },
343
+};
344
+
345
+void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
346
+{
347
+ TCGv_i32 lval = tcg_temp_new_i32();
348
+ TCGv_i32 rval = tcg_temp_new_i32();
349
+ TCGv_i32 lsh = tcg_temp_new_i32();
350
+ TCGv_i32 rsh = tcg_temp_new_i32();
351
+ TCGv_i32 zero = tcg_const_i32(0);
352
+ TCGv_i32 max = tcg_const_i32(31);
353
+
354
+ /*
355
+ * Rely on the TCG guarantee that out of range shifts produce
356
+ * unspecified results, not undefined behaviour (i.e. no trap).
357
+ * Discard out-of-range results after the fact.
358
+ */
359
+ tcg_gen_ext8s_i32(lsh, shift);
360
+ tcg_gen_neg_i32(rsh, lsh);
361
+ tcg_gen_shl_i32(lval, src, lsh);
362
+ tcg_gen_umin_i32(rsh, rsh, max);
363
+ tcg_gen_sar_i32(rval, src, rsh);
364
+ tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
365
+ tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
366
+
367
+ tcg_temp_free_i32(lval);
368
+ tcg_temp_free_i32(rval);
369
+ tcg_temp_free_i32(lsh);
370
+ tcg_temp_free_i32(rsh);
371
+ tcg_temp_free_i32(zero);
372
+ tcg_temp_free_i32(max);
373
+}
374
+
375
+void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
376
+{
377
+ TCGv_i64 lval = tcg_temp_new_i64();
378
+ TCGv_i64 rval = tcg_temp_new_i64();
379
+ TCGv_i64 lsh = tcg_temp_new_i64();
380
+ TCGv_i64 rsh = tcg_temp_new_i64();
381
+ TCGv_i64 zero = tcg_const_i64(0);
382
+ TCGv_i64 max = tcg_const_i64(63);
383
+
384
+ /*
385
+ * Rely on the TCG guarantee that out of range shifts produce
386
+ * unspecified results, not undefined behaviour (i.e. no trap).
387
+ * Discard out-of-range results after the fact.
388
+ */
389
+ tcg_gen_ext8s_i64(lsh, shift);
390
+ tcg_gen_neg_i64(rsh, lsh);
391
+ tcg_gen_shl_i64(lval, src, lsh);
392
+ tcg_gen_umin_i64(rsh, rsh, max);
393
+ tcg_gen_sar_i64(rval, src, rsh);
394
+ tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
395
+ tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
396
+
397
+ tcg_temp_free_i64(lval);
398
+ tcg_temp_free_i64(rval);
399
+ tcg_temp_free_i64(lsh);
400
+ tcg_temp_free_i64(rsh);
401
+ tcg_temp_free_i64(zero);
402
+ tcg_temp_free_i64(max);
403
+}
404
+
405
+static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
406
+ TCGv_vec src, TCGv_vec shift)
407
+{
408
+ TCGv_vec lval = tcg_temp_new_vec_matching(dst);
409
+ TCGv_vec rval = tcg_temp_new_vec_matching(dst);
410
+ TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
411
+ TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
412
+ TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
413
+
414
+ /*
415
+ * Rely on the TCG guarantee that out of range shifts produce
416
+ * unspecified results, not undefined behaviour (i.e. no trap).
417
+ * Discard out-of-range results after the fact.
418
+ */
419
+ tcg_gen_neg_vec(vece, rsh, shift);
420
+ if (vece == MO_8) {
421
+ tcg_gen_mov_vec(lsh, shift);
422
+ } else {
423
+ tcg_gen_dupi_vec(vece, tmp, 0xff);
424
+ tcg_gen_and_vec(vece, lsh, shift, tmp);
425
+ tcg_gen_and_vec(vece, rsh, rsh, tmp);
426
+ }
427
+
428
+ /* Bound rsh so out of bound right shift gets -1. */
429
+ tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
430
+ tcg_gen_umin_vec(vece, rsh, rsh, tmp);
431
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
432
+
433
+ tcg_gen_shlv_vec(vece, lval, src, lsh);
434
+ tcg_gen_sarv_vec(vece, rval, src, rsh);
435
+
436
+ /* Select in-bound left shift. */
437
+ tcg_gen_andc_vec(vece, lval, lval, tmp);
438
+
439
+ /* Select between left and right shift. */
440
+ if (vece == MO_8) {
441
+ tcg_gen_dupi_vec(vece, tmp, 0);
442
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
443
+ } else {
444
+ tcg_gen_dupi_vec(vece, tmp, 0x80);
445
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
446
+ }
447
+
448
+ tcg_temp_free_vec(lval);
449
+ tcg_temp_free_vec(rval);
450
+ tcg_temp_free_vec(lsh);
451
+ tcg_temp_free_vec(rsh);
452
+ tcg_temp_free_vec(tmp);
453
+}
454
+
455
+static const TCGOpcode sshl_list[] = {
456
+ INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
457
+ INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
458
+};
459
+
460
+const GVecGen3 sshl_op[4] = {
461
+ { .fniv = gen_sshl_vec,
462
+ .fno = gen_helper_gvec_sshl_b,
463
+ .opt_opc = sshl_list,
464
+ .vece = MO_8 },
465
+ { .fniv = gen_sshl_vec,
466
+ .fno = gen_helper_gvec_sshl_h,
467
+ .opt_opc = sshl_list,
468
+ .vece = MO_16 },
469
+ { .fni4 = gen_sshl_i32,
470
+ .fniv = gen_sshl_vec,
471
+ .opt_opc = sshl_list,
472
+ .vece = MO_32 },
473
+ { .fni8 = gen_sshl_i64,
474
+ .fniv = gen_sshl_vec,
475
+ .opt_opc = sshl_list,
476
+ .vece = MO_64 },
477
+};
478
+
479
static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
480
TCGv_vec a, TCGv_vec b)
481
{
482
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
483
vec_size, vec_size);
484
}
485
return 0;
486
+
487
+ case NEON_3R_VSHL:
488
+ /* Note the operation is vshl vd,vm,vn */
489
+ tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
490
+ u ? &ushl_op[size] : &sshl_op[size]);
491
+ return 0;
492
}
493
494
if (size == 3) {
495
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
496
neon_load_reg64(cpu_V0, rn + pass);
497
neon_load_reg64(cpu_V1, rm + pass);
498
switch (op) {
499
- case NEON_3R_VSHL:
500
- if (u) {
501
- gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
502
- } else {
503
- gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
504
- }
505
- break;
506
case NEON_3R_VQSHL:
507
if (u) {
508
gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
509
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
510
}
511
pairwise = 0;
512
switch (op) {
513
- case NEON_3R_VSHL:
514
case NEON_3R_VQSHL:
515
case NEON_3R_VRSHL:
516
case NEON_3R_VQRSHL:
517
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
518
case NEON_3R_VHSUB:
519
GEN_NEON_INTEGER_OP(hsub);
520
break;
521
- case NEON_3R_VSHL:
522
- GEN_NEON_INTEGER_OP(shl);
523
- break;
524
case NEON_3R_VQSHL:
525
GEN_NEON_INTEGER_OP_ENV(qshl);
526
break;
527
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
528
}
529
} else {
530
if (input_unsigned) {
531
- gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
532
+ gen_ushl_i64(cpu_V0, in, tmp64);
533
} else {
534
- gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
535
+ gen_sshl_i64(cpu_V0, in, tmp64);
536
}
537
}
538
tmp = tcg_temp_new_i32();
539
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
540
index XXXXXXX..XXXXXXX 100644
541
--- a/target/arm/vec_helper.c
542
+++ b/target/arm/vec_helper.c
543
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
544
do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
545
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
546
}
547
+
548
+void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
549
+{
550
+ intptr_t i, opr_sz = simd_oprsz(desc);
551
+ int8_t *d = vd, *n = vn, *m = vm;
552
+
553
+ for (i = 0; i < opr_sz; ++i) {
554
+ int8_t mm = m[i];
555
+ int8_t nn = n[i];
556
+ int8_t res = 0;
557
+ if (mm >= 0) {
558
+ if (mm < 8) {
559
+ res = nn << mm;
560
+ }
561
+ } else {
562
+ res = nn >> (mm > -8 ? -mm : 7);
563
+ }
564
+ d[i] = res;
565
+ }
566
+ clear_tail(d, opr_sz, simd_maxsz(desc));
567
+}
568
+
569
+void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc)
570
+{
571
+ intptr_t i, opr_sz = simd_oprsz(desc);
572
+ int16_t *d = vd, *n = vn, *m = vm;
573
+
574
+ for (i = 0; i < opr_sz / 2; ++i) {
575
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
576
+ int16_t nn = n[i];
577
+ int16_t res = 0;
578
+ if (mm >= 0) {
579
+ if (mm < 16) {
580
+ res = nn << mm;
581
+ }
582
+ } else {
583
+ res = nn >> (mm > -16 ? -mm : 15);
584
+ }
585
+ d[i] = res;
586
+ }
587
+ clear_tail(d, opr_sz, simd_maxsz(desc));
588
+}
589
+
590
+void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc)
591
+{
592
+ intptr_t i, opr_sz = simd_oprsz(desc);
593
+ uint8_t *d = vd, *n = vn, *m = vm;
594
+
595
+ for (i = 0; i < opr_sz; ++i) {
596
+ int8_t mm = m[i];
597
+ uint8_t nn = n[i];
598
+ uint8_t res = 0;
599
+ if (mm >= 0) {
600
+ if (mm < 8) {
601
+ res = nn << mm;
602
+ }
603
+ } else {
604
+ if (mm > -8) {
605
+ res = nn >> -mm;
606
+ }
607
+ }
608
+ d[i] = res;
609
+ }
610
+ clear_tail(d, opr_sz, simd_maxsz(desc));
611
+}
612
+
613
+void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
614
+{
615
+ intptr_t i, opr_sz = simd_oprsz(desc);
616
+ uint16_t *d = vd, *n = vn, *m = vm;
617
+
618
+ for (i = 0; i < opr_sz / 2; ++i) {
619
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
620
+ uint16_t nn = n[i];
621
+ uint16_t res = 0;
622
+ if (mm >= 0) {
623
+ if (mm < 16) {
624
+ res = nn << mm;
625
+ }
626
+ } else {
627
+ if (mm > -16) {
628
+ res = nn >> -mm;
629
+ }
630
+ }
631
+ d[i] = res;
632
+ }
633
+ clear_tail(d, opr_sz, simd_maxsz(desc));
634
+}
29
--
635
--
30
2.7.4
636
2.20.1
31
637
32
638
diff view generated by jsdifflib
1
M profile cores can never trap on WFI or WFE instructions. Check for
1
From: Richard Henderson <richard.henderson@linaro.org>
2
M profile in check_wfx_trap() to ensure this.
3
2
4
The existing code will do the right thing for v7M cores because
3
The gvec form will be needed for implementing SVE2.
5
the hcr_el2 and scr_el3 registers will be all-zeroes and so we
6
won't attempt to trap, but when we start setting ARM_FEATURE_V8
7
for v8M cores the v8A handling of SCTLR.nTWE and .nTWI will not
8
give the right results.
9
4
5
Extend the implementation to operate on uint64_t instead of uint32_t.
6
Use a counted inner loop instead of terminating when op1 goes to zero,
7
looking toward the required implementation for ARMv8.4-DIT.
8
9
Tested-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200216214232.4230-3-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 1501692241-23310-3-git-send-email-peter.maydell@linaro.org
14
---
14
---
15
target/arm/op_helper.c | 5 +++++
15
target/arm/helper.h | 3 ++-
16
1 file changed, 5 insertions(+)
16
target/arm/neon_helper.c | 22 ----------------------
17
target/arm/translate-a64.c | 10 +++-------
18
target/arm/translate.c | 11 ++++-------
19
target/arm/vec_helper.c | 30 ++++++++++++++++++++++++++++++
20
5 files changed, 39 insertions(+), 37 deletions(-)
17
21
18
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
22
diff --git a/target/arm/helper.h b/target/arm/helper.h
19
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/op_helper.c
24
--- a/target/arm/helper.h
21
+++ b/target/arm/op_helper.c
25
+++ b/target/arm/helper.h
22
@@ -XXX,XX +XXX,XX @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
23
int cur_el = arm_current_el(env);
27
DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
24
uint64_t mask;
28
DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
25
29
DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
26
+ if (arm_feature(env, ARM_FEATURE_M)) {
30
-DEF_HELPER_2(neon_mul_p8, i32, i32, i32)
27
+ /* M profile cores can never trap WFI/WFE. */
31
DEF_HELPER_2(neon_mull_p8, i64, i32, i32)
28
+ return 0;
32
33
DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
34
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
38
+DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+
40
#ifdef TARGET_AARCH64
41
#include "helper-a64.h"
42
#include "helper-sve.h"
43
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/neon_helper.c
46
+++ b/target/arm/neon_helper.c
47
@@ -XXX,XX +XXX,XX @@ NEON_VOP(mul_u16, neon_u16, 2)
48
49
/* Polynomial multiplication is like integer multiplication except the
50
partial products are XORed, not added. */
51
-uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
52
-{
53
- uint32_t mask;
54
- uint32_t result;
55
- result = 0;
56
- while (op1) {
57
- mask = 0;
58
- if (op1 & 1)
59
- mask |= 0xff;
60
- if (op1 & (1 << 8))
61
- mask |= (0xff << 8);
62
- if (op1 & (1 << 16))
63
- mask |= (0xff << 16);
64
- if (op1 & (1 << 24))
65
- mask |= (0xff << 24);
66
- result ^= op2 & mask;
67
- op1 = (op1 >> 1) & 0x7f7f7f7f;
68
- op2 = (op2 << 1) & 0xfefefefe;
69
- }
70
- return result;
71
-}
72
-
73
uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
74
{
75
uint64_t result = 0;
76
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate-a64.c
79
+++ b/target/arm/translate-a64.c
80
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
81
case 0x13: /* MUL, PMUL */
82
if (!u) { /* MUL */
83
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
84
- return;
85
+ } else { /* PMUL */
86
+ gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
87
}
88
- break;
89
+ return;
90
case 0x12: /* MLA, MLS */
91
if (u) {
92
gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
93
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
94
genfn = fns[size][u];
95
break;
96
}
97
- case 0x13: /* MUL, PMUL */
98
- assert(u); /* PMUL */
99
- assert(size == 0);
100
- genfn = gen_helper_neon_mul_p8;
101
- break;
102
case 0x16: /* SQDMULH, SQRDMULH */
103
{
104
static NeonGenTwoOpEnvFn * const fns[2][2] = {
105
diff --git a/target/arm/translate.c b/target/arm/translate.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate.c
108
+++ b/target/arm/translate.c
109
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
110
111
case NEON_3R_VMUL: /* VMUL */
112
if (u) {
113
- /* Polynomial case allows only P8 and is handled below. */
114
+ /* Polynomial case allows only P8. */
115
if (size != 0) {
116
return 1;
117
}
118
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
119
+ 0, gen_helper_gvec_pmul_b);
120
} else {
121
tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
122
vec_size, vec_size);
123
- return 0;
124
}
125
- break;
126
+ return 0;
127
128
case NEON_3R_VML: /* VMLA, VMLS */
129
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
130
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
131
tmp2 = neon_load_reg(rd, pass);
132
gen_neon_add(size, tmp, tmp2);
133
break;
134
- case NEON_3R_VMUL:
135
- /* VMUL.P8; other cases already eliminated. */
136
- gen_helper_neon_mul_p8(tmp, tmp, tmp2);
137
- break;
138
case NEON_3R_VPMAX:
139
GEN_NEON_INTEGER_OP(pmax);
140
break;
141
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
142
index XXXXXXX..XXXXXXX 100644
143
--- a/target/arm/vec_helper.c
144
+++ b/target/arm/vec_helper.c
145
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
146
}
147
clear_tail(d, opr_sz, simd_maxsz(desc));
148
}
149
+
150
+/*
151
+ * 8x8->8 polynomial multiply.
152
+ *
153
+ * Polynomial multiplication is like integer multiplication except the
154
+ * partial products are XORed, not added.
155
+ *
156
+ * TODO: expose this as a generic vector operation, as it is a common
157
+ * crypto building block.
158
+ */
159
+void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
160
+{
161
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
162
+ uint64_t *d = vd, *n = vn, *m = vm;
163
+
164
+ for (i = 0; i < opr_sz / 8; ++i) {
165
+ uint64_t nn = n[i];
166
+ uint64_t mm = m[i];
167
+ uint64_t rr = 0;
168
+
169
+ for (j = 0; j < 8; ++j) {
170
+ uint64_t mask = (nn & 0x0101010101010101ull) * 0xff;
171
+ rr ^= mm & mask;
172
+ mm = (mm << 1) & 0xfefefefefefefefeull;
173
+ nn >>= 1;
174
+ }
175
+ d[i] = rr;
29
+ }
176
+ }
30
+
177
+ clear_tail(d, opr_sz, simd_maxsz(desc));
31
/* If we are currently in EL0 then we need to check if SCTLR is set up for
178
+}
32
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
33
*/
34
--
179
--
35
2.7.4
180
2.20.1
36
181
37
182
diff view generated by jsdifflib
1
Make the arm_cpu_dump_state() debug logging handle the M-profile XPSR
1
From: Richard Henderson <richard.henderson@linaro.org>
2
rather than assuming it's an A-profile CPSR. On M profile the PSR
3
line of a register dump will now look like this:
4
2
5
XPSR=41000000 -Z-- T priv-thread
3
The gvec form will be needed for implementing SVE2.
6
4
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200216214232.4230-4-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1501692241-23310-12-git-send-email-peter.maydell@linaro.org
11
---
10
---
12
target/arm/translate.c | 58 ++++++++++++++++++++++++++++++++++----------------
11
target/arm/helper.h | 4 +---
13
1 file changed, 40 insertions(+), 18 deletions(-)
12
target/arm/neon_helper.c | 30 ------------------------------
13
target/arm/translate-a64.c | 28 +++-------------------------
14
target/arm/translate.c | 16 ++--------------
15
target/arm/vec_helper.c | 33 +++++++++++++++++++++++++++++++++
16
5 files changed, 39 insertions(+), 72 deletions(-)
14
17
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.h
21
+++ b/target/arm/helper.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
23
DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
24
DEF_HELPER_2(dc_zva, void, env, i64)
25
26
-DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64)
27
-DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64)
28
-
29
DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
30
void, ptr, ptr, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
32
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
35
DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
38
#ifdef TARGET_AARCH64
39
#include "helper-a64.h"
40
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/neon_helper.c
43
+++ b/target/arm/neon_helper.c
44
@@ -XXX,XX +XXX,XX @@ void HELPER(neon_zip16)(void *vd, void *vm)
45
rm[0] = m0;
46
rd[0] = d0;
47
}
48
-
49
-/* Helper function for 64 bit polynomial multiply case:
50
- * perform PolynomialMult(op1, op2) and return either the top or
51
- * bottom half of the 128 bit result.
52
- */
53
-uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
54
-{
55
- int bitnum;
56
- uint64_t res = 0;
57
-
58
- for (bitnum = 0; bitnum < 64; bitnum++) {
59
- if (op1 & (1ULL << bitnum)) {
60
- res ^= op2 << bitnum;
61
- }
62
- }
63
- return res;
64
-}
65
-uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
66
-{
67
- int bitnum;
68
- uint64_t res = 0;
69
-
70
- /* bit 0 of op1 can't influence the high 64 bits at all */
71
- for (bitnum = 1; bitnum < 64; bitnum++) {
72
- if (op1 & (1ULL << bitnum)) {
73
- res ^= op2 >> (64 - bitnum);
74
- }
75
- }
76
- return res;
77
-}
78
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate-a64.c
81
+++ b/target/arm/translate-a64.c
82
@@ -XXX,XX +XXX,XX @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
83
clear_vec_high(s, is_q, rd);
84
}
85
86
-static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
87
-{
88
- /* PMULL of 64 x 64 -> 128 is an odd special case because it
89
- * is the only three-reg-diff instruction which produces a
90
- * 128-bit wide result from a single operation. However since
91
- * it's possible to calculate the two halves more or less
92
- * separately we just use two helper calls.
93
- */
94
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
95
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
96
- TCGv_i64 tcg_res = tcg_temp_new_i64();
97
-
98
- read_vec_element(s, tcg_op1, rn, is_q, MO_64);
99
- read_vec_element(s, tcg_op2, rm, is_q, MO_64);
100
- gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
101
- write_vec_element(s, tcg_res, rd, 0, MO_64);
102
- gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
103
- write_vec_element(s, tcg_res, rd, 1, MO_64);
104
-
105
- tcg_temp_free_i64(tcg_op1);
106
- tcg_temp_free_i64(tcg_op2);
107
- tcg_temp_free_i64(tcg_res);
108
-}
109
-
110
/* AdvSIMD three different
111
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
112
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
113
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
114
if (!fp_access_check(s)) {
115
return;
116
}
117
- handle_pmull_64(s, is_q, rd, rn, rm);
118
+ /* The Q field specifies lo/hi half input for this insn. */
119
+ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
120
+ gen_helper_gvec_pmull_q);
121
return;
122
}
123
goto is_widening;
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
124
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
125
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.c
126
--- a/target/arm/translate.c
18
+++ b/target/arm/translate.c
127
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
128
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
20
ARMCPU *cpu = ARM_CPU(cs);
129
* outside the loop below as it only performs a single pass.
21
CPUARMState *env = &cpu->env;
130
*/
22
int i;
131
if (op == 14 && size == 2) {
23
- uint32_t psr;
132
- TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
24
- const char *ns_status;
133
-
25
134
if (!dc_isar_feature(aa32_pmull, s)) {
26
if (is_a64(env)) {
135
return 1;
27
aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
136
}
28
@@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
137
- tcg_rn = tcg_temp_new_i64();
29
else
138
- tcg_rm = tcg_temp_new_i64();
30
cpu_fprintf(f, " ");
139
- tcg_rd = tcg_temp_new_i64();
140
- neon_load_reg64(tcg_rn, rn);
141
- neon_load_reg64(tcg_rm, rm);
142
- gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
143
- neon_store_reg64(tcg_rd, rd);
144
- gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
145
- neon_store_reg64(tcg_rd, rd + 1);
146
- tcg_temp_free_i64(tcg_rn);
147
- tcg_temp_free_i64(tcg_rm);
148
- tcg_temp_free_i64(tcg_rd);
149
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
150
+ 0, gen_helper_gvec_pmull_q);
151
return 0;
152
}
153
154
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/target/arm/vec_helper.c
157
+++ b/target/arm/vec_helper.c
158
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
31
}
159
}
32
- psr = cpsr_read(env);
160
clear_tail(d, opr_sz, simd_maxsz(desc));
33
161
}
34
- if (arm_feature(env, ARM_FEATURE_EL3) &&
35
- (psr & CPSR_M) != ARM_CPU_MODE_MON) {
36
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
37
+ if (arm_feature(env, ARM_FEATURE_M)) {
38
+ uint32_t xpsr = xpsr_read(env);
39
+ const char *mode;
40
+
162
+
41
+ if (xpsr & XPSR_EXCP) {
163
+/*
42
+ mode = "handler";
164
+ * 64x64->128 polynomial multiply.
43
+ } else {
165
+ * Because of the lanes are not accessed in strict columns,
44
+ if (env->v7m.control & R_V7M_CONTROL_NPRIV_MASK) {
166
+ * this probably cannot be turned into a generic helper.
45
+ mode = "unpriv-thread";
167
+ */
46
+ } else {
168
+void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
47
+ mode = "priv-thread";
169
+{
48
+ }
170
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
171
+ intptr_t hi = simd_data(desc);
172
+ uint64_t *d = vd, *n = vn, *m = vm;
173
+
174
+ for (i = 0; i < opr_sz / 8; i += 2) {
175
+ uint64_t nn = n[i + hi];
176
+ uint64_t mm = m[i + hi];
177
+ uint64_t rhi = 0;
178
+ uint64_t rlo = 0;
179
+
180
+ /* Bit 0 can only influence the low 64-bit result. */
181
+ if (nn & 1) {
182
+ rlo = mm;
49
+ }
183
+ }
50
+
184
+
51
+ cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s\n",
185
+ for (j = 1; j < 64; ++j) {
52
+ xpsr,
186
+ uint64_t mask = -((nn >> j) & 1);
53
+ xpsr & XPSR_N ? 'N' : '-',
187
+ rlo ^= (mm << j) & mask;
54
+ xpsr & XPSR_Z ? 'Z' : '-',
188
+ rhi ^= (mm >> (64 - j)) & mask;
55
+ xpsr & XPSR_C ? 'C' : '-',
56
+ xpsr & XPSR_V ? 'V' : '-',
57
+ xpsr & XPSR_T ? 'T' : 'A',
58
+ mode);
59
} else {
60
- ns_status = "";
61
- }
62
-
63
- cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
64
- psr,
65
- psr & (1 << 31) ? 'N' : '-',
66
- psr & (1 << 30) ? 'Z' : '-',
67
- psr & (1 << 29) ? 'C' : '-',
68
- psr & (1 << 28) ? 'V' : '-',
69
- psr & CPSR_T ? 'T' : 'A',
70
- ns_status,
71
- cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
72
+ uint32_t psr = cpsr_read(env);
73
+ const char *ns_status = "";
74
+
75
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
76
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
77
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
78
+ }
189
+ }
79
+
190
+ d[i] = rlo;
80
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
191
+ d[i + 1] = rhi;
81
+ psr,
82
+ psr & CPSR_N ? 'N' : '-',
83
+ psr & CPSR_Z ? 'Z' : '-',
84
+ psr & CPSR_C ? 'C' : '-',
85
+ psr & CPSR_V ? 'V' : '-',
86
+ psr & CPSR_T ? 'T' : 'A',
87
+ ns_status,
88
+ cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
89
+ }
192
+ }
90
193
+ clear_tail(d, opr_sz, simd_maxsz(desc));
91
if (flags & CPU_DUMP_FPU) {
194
+}
92
int numvfpregs = 0;
93
--
195
--
94
2.7.4
196
2.20.1
95
197
96
198
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
We still need two different helpers, since NEON and SVE2 get the
4
inputs from different locations within the source vector. However,
5
we can convert both to the same internal form for computation.
6
7
The sve2 helper is not used yet, but adding it with this patch
8
helps illustrate why the neon changes are helpful.
9
10
Tested-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20200216214232.4230-5-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/helper-sve.h | 2 ++
17
target/arm/helper.h | 3 +-
18
target/arm/neon_helper.c | 32 --------------------
19
target/arm/translate-a64.c | 27 +++++++++++------
20
target/arm/translate.c | 26 ++++++++---------
21
target/arm/vec_helper.c | 60 ++++++++++++++++++++++++++++++++++++++
22
6 files changed, 95 insertions(+), 55 deletions(-)
23
24
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/helper-sve.h
27
+++ b/target/arm/helper-sve.h
28
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG,
29
void, env, ptr, ptr, ptr, tl, i32)
30
DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG,
31
void, env, ptr, ptr, ptr, tl, i32)
32
+
33
+DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
diff --git a/target/arm/helper.h b/target/arm/helper.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/helper.h
37
+++ b/target/arm/helper.h
38
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
39
DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
40
DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
41
DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
42
-DEF_HELPER_2(neon_mull_p8, i64, i32, i32)
43
44
DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
45
DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
46
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
47
DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
48
DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
50
+DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
51
+
52
#ifdef TARGET_AARCH64
53
#include "helper-a64.h"
54
#include "helper-sve.h"
55
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/neon_helper.c
58
+++ b/target/arm/neon_helper.c
59
@@ -XXX,XX +XXX,XX @@ NEON_VOP(mul_u8, neon_u8, 4)
60
NEON_VOP(mul_u16, neon_u16, 2)
61
#undef NEON_FN
62
63
-/* Polynomial multiplication is like integer multiplication except the
64
- partial products are XORed, not added. */
65
-uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
66
-{
67
- uint64_t result = 0;
68
- uint64_t mask;
69
- uint64_t op2ex = op2;
70
- op2ex = (op2ex & 0xff) |
71
- ((op2ex & 0xff00) << 8) |
72
- ((op2ex & 0xff0000) << 16) |
73
- ((op2ex & 0xff000000) << 24);
74
- while (op1) {
75
- mask = 0;
76
- if (op1 & 1) {
77
- mask |= 0xffff;
78
- }
79
- if (op1 & (1 << 8)) {
80
- mask |= (0xffffU << 16);
81
- }
82
- if (op1 & (1 << 16)) {
83
- mask |= (0xffffULL << 32);
84
- }
85
- if (op1 & (1 << 24)) {
86
- mask |= (0xffffULL << 48);
87
- }
88
- result ^= op2ex & mask;
89
- op1 = (op1 >> 1) & 0x7f7f7f7f;
90
- op2ex <<= 1;
91
- }
92
- return result;
93
-}
94
-
95
#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
96
NEON_VOP(tst_u8, neon_u8, 4)
97
NEON_VOP(tst_u16, neon_u16, 2)
98
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-a64.c
101
+++ b/target/arm/translate-a64.c
102
@@ -XXX,XX +XXX,XX @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
103
gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
104
tcg_passres, tcg_passres);
105
break;
106
- case 14: /* PMULL */
107
- assert(size == 0);
108
- gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
109
- break;
110
default:
111
g_assert_not_reached();
112
}
113
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
114
handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
115
break;
116
case 14: /* PMULL, PMULL2 */
117
- if (is_u || size == 1 || size == 2) {
118
+ if (is_u) {
119
unallocated_encoding(s);
120
return;
121
}
122
- if (size == 3) {
123
+ switch (size) {
124
+ case 0: /* PMULL.P8 */
125
+ if (!fp_access_check(s)) {
126
+ return;
127
+ }
128
+ /* The Q field specifies lo/hi half input for this insn. */
129
+ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
130
+ gen_helper_neon_pmull_h);
131
+ break;
132
+
133
+ case 3: /* PMULL.P64 */
134
if (!dc_isar_feature(aa64_pmull, s)) {
135
unallocated_encoding(s);
136
return;
137
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
138
/* The Q field specifies lo/hi half input for this insn. */
139
gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
140
gen_helper_gvec_pmull_q);
141
- return;
142
+ break;
143
+
144
+ default:
145
+ unallocated_encoding(s);
146
+ break;
147
}
148
- goto is_widening;
149
+ return;
150
case 9: /* SQDMLAL, SQDMLAL2 */
151
case 11: /* SQDMLSL, SQDMLSL2 */
152
case 13: /* SQDMULL, SQDMULL2 */
153
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
154
unallocated_encoding(s);
155
return;
156
}
157
- is_widening:
158
if (!fp_access_check(s)) {
159
return;
160
}
161
diff --git a/target/arm/translate.c b/target/arm/translate.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/target/arm/translate.c
164
+++ b/target/arm/translate.c
165
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
166
return 1;
167
}
168
169
- /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
170
- * outside the loop below as it only performs a single pass.
171
- */
172
- if (op == 14 && size == 2) {
173
- if (!dc_isar_feature(aa32_pmull, s)) {
174
- return 1;
175
+ /* Handle polynomial VMULL in a single pass. */
176
+ if (op == 14) {
177
+ if (size == 0) {
178
+ /* VMULL.P8 */
179
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
180
+ 0, gen_helper_neon_pmull_h);
181
+ } else {
182
+ /* VMULL.P64 */
183
+ if (!dc_isar_feature(aa32_pmull, s)) {
184
+ return 1;
185
+ }
186
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
187
+ 0, gen_helper_gvec_pmull_q);
188
}
189
- tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
190
- 0, gen_helper_gvec_pmull_q);
191
return 0;
192
}
193
194
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
195
/* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
196
gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
197
break;
198
- case 14: /* Polynomial VMULL */
199
- gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
200
- tcg_temp_free_i32(tmp2);
201
- tcg_temp_free_i32(tmp);
202
- break;
203
default: /* 15 is RESERVED: caught earlier */
204
abort();
205
}
206
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/arm/vec_helper.c
209
+++ b/target/arm/vec_helper.c
210
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
211
}
212
clear_tail(d, opr_sz, simd_maxsz(desc));
213
}
214
+
215
+/*
216
+ * 8x8->16 polynomial multiply.
217
+ *
218
+ * The byte inputs are expanded to (or extracted from) half-words.
219
+ * Note that neon and sve2 get the inputs from different positions.
220
+ * This allows 4 bytes to be processed in parallel with uint64_t.
221
+ */
222
+
223
+static uint64_t expand_byte_to_half(uint64_t x)
224
+{
225
+ return (x & 0x000000ff)
226
+ | ((x & 0x0000ff00) << 8)
227
+ | ((x & 0x00ff0000) << 16)
228
+ | ((x & 0xff000000) << 24);
229
+}
230
+
231
+static uint64_t pmull_h(uint64_t op1, uint64_t op2)
232
+{
233
+ uint64_t result = 0;
234
+ int i;
235
+
236
+ for (i = 0; i < 8; ++i) {
237
+ uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff;
238
+ result ^= op2 & mask;
239
+ op1 >>= 1;
240
+ op2 <<= 1;
241
+ }
242
+ return result;
243
+}
244
+
245
+void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
246
+{
247
+ int hi = simd_data(desc);
248
+ uint64_t *d = vd, *n = vn, *m = vm;
249
+ uint64_t nn = n[hi], mm = m[hi];
250
+
251
+ d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
252
+ nn >>= 32;
253
+ mm >>= 32;
254
+ d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
255
+
256
+ clear_tail(d, 16, simd_maxsz(desc));
257
+}
258
+
259
+#ifdef TARGET_AARCH64
260
+void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
261
+{
262
+ int shift = simd_data(desc) * 8;
263
+ intptr_t i, opr_sz = simd_oprsz(desc);
264
+ uint64_t *d = vd, *n = vn, *m = vm;
265
+
266
+ for (i = 0; i < opr_sz / 8; ++i) {
267
+ uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull;
268
+ uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull;
269
+
270
+ d[i] = pmull_h(nn, mm);
271
+ }
272
+}
273
+#endif
274
--
275
2.20.1
276
277
diff view generated by jsdifflib
1
From: Pranith Kumar <bobby.prani@gmail.com>
1
From: Francisco Iglesias <francisco.iglesias@xilinx.com>
2
2
3
Fix the following warning:
3
Correct the number of dummy cycles required by the FAST_READ_4 command (to
4
be eight, one dummy byte).
4
5
5
/home/pranith/qemu/hw/intc/arm_gicv3_kvm.c:296:17: warning: logical not is only applied to the left hand side of this bitwise operator [-Wlogical-not-parentheses]
6
Fixes: ef06ca3946 ("xilinx_spips: Add support for RX discard and RX drain")
6
if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
7
Suggested-by: Cédric Le Goater <clg@kaod.org>
7
^ ~
8
Signed-off-by: Francisco Iglesias <frasse.iglesias@gmail.com>
8
/home/pranith/qemu/hw/intc/arm_gicv3_kvm.c:296:17: note: add parentheses after the '!' to evaluate the bitwise operator first
9
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
9
if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
10
Message-id: 20200218113350.6090-1-frasse.iglesias@gmail.com
10
^
11
/home/pranith/qemu/hw/intc/arm_gicv3_kvm.c:296:17: note: add parentheses around left hand side expression to silence this warning
12
if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
13
^
14
15
This logic error meant we were not setting the PTZ
16
bit when we should -- luckily as the comment suggests
17
this wouldn't have had any effects beyond making GIC
18
initialization take a little longer.
19
20
Signed-off-by: Pranith Kumar <bobby.prani@gmail.com>
21
Message-id: 20170829173226.7625-1-bobby.prani@gmail.com
22
Cc: qemu-stable@nongnu.org
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
12
---
26
hw/intc/arm_gicv3_kvm.c | 2 +-
13
hw/ssi/xilinx_spips.c | 2 +-
27
1 file changed, 1 insertion(+), 1 deletion(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
28
15
29
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
16
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
30
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/intc/arm_gicv3_kvm.c
18
--- a/hw/ssi/xilinx_spips.c
32
+++ b/hw/intc/arm_gicv3_kvm.c
19
+++ b/hw/ssi/xilinx_spips.c
33
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_gicv3_put(GICv3State *s)
20
@@ -XXX,XX +XXX,XX @@ static int xilinx_spips_num_dummies(XilinxQSPIPS *qs, uint8_t command)
34
kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, &regh, true);
21
case FAST_READ:
35
22
case DOR:
36
reg64 = c->gicr_pendbaser;
23
case QOR:
37
- if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
24
+ case FAST_READ_4:
38
+ if (!(c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
25
case DOR_4:
39
/* Setting PTZ is advised if LPIs are disabled, to reduce
26
case QOR_4:
40
* GIC initialization time.
27
return 1;
41
*/
28
case DIOR:
29
- case FAST_READ_4:
30
case DIOR_4:
31
return 2;
32
case QIOR:
42
--
33
--
43
2.7.4
34
2.20.1
44
35
45
36
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Guenter Roeck <linux@roeck-us.net>
2
2
3
Move the in-kernel-irqchip test to only guard the set-irq
3
Booting the r2d machine from flash fails because flash is not discovered.
4
stage, not the init stage of the PMU. Also add the PMU to
4
Looking at the flattened memory tree, we see the following.
5
the KVM device irq line synchronization to enable its use.
6
5
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
FlatView #1
8
Reviewed-by: Christoffer Dall <cdall@linaro.org>
7
AS "memory", root: system
9
Message-id: 1500471597-2517-4-git-send-email-drjones@redhat.com
8
AS "cpu-memory-0", root: system
9
AS "sh_pci_host", root: bus master container
10
Root memory region: system
11
0000000000000000-000000000000ffff (prio 0, i/o): io
12
0000000000010000-0000000000ffffff (prio 0, i/o): r2d.flash @0000000000010000
13
14
The overlapping memory region is sh_pci.isa, ie the ISA I/O region bridge.
15
This region is initially assigned to address 0xfe240000, but overwritten
16
with a write into the PCIIOBR register. This write is expected to adjust
17
the PCI memory window, but not to change the region's base adddress.
18
19
Peter Maydell provided the following detailed explanation.
20
21
"Section 22.3.7 and in particular figure 22.3 (of "SSH7751R user's manual:
22
hardware") are clear about how this is supposed to work: there is a window
23
at 0xfe240000 in the system register space for PCI I/O space. When the CPU
24
makes an access into that area, the PCI controller calculates the PCI
25
address to use by combining bits 0..17 of the system address with the
26
bits 31..18 value that the guest has put into the PCIIOBR. That is, writing
27
to the PCIIOBR changes which section of the IO address space is visible in
28
the 0xfe240000 window. Instead what QEMU's implementation does is move the
29
window to whatever value the guest writes to the PCIIOBR register -- so if
30
the guest writes 0 we put the window at 0 in system address space."
31
32
Fix the problem by calling memory_region_set_alias_offset() instead of
33
removing and re-adding the PCI ISA subregion on writes into PCIIOBR.
34
At the same time, in sh_pci_device_realize(), don't set iobr since
35
it is overwritten later anyway. Instead, pass the base address to
36
memory_region_add_subregion() directly.
37
38
Many thanks to Peter Maydell for the detailed problem analysis, and for
39
providing suggestions on how to fix the problem.
40
41
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
42
Message-id: 20200218201050.15273-1-linux@roeck-us.net
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
43
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
44
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
45
---
13
hw/arm/virt.c | 3 ++-
46
hw/sh4/sh_pci.c | 11 +++--------
14
target/arm/kvm.c | 6 +++++-
47
1 file changed, 3 insertions(+), 8 deletions(-)
15
target/arm/kvm64.c | 3 +--
16
3 files changed, 8 insertions(+), 4 deletions(-)
17
48
18
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
49
diff --git a/hw/sh4/sh_pci.c b/hw/sh4/sh_pci.c
19
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/virt.c
51
--- a/hw/sh4/sh_pci.c
21
+++ b/hw/arm/virt.c
52
+++ b/hw/sh4/sh_pci.c
22
@@ -XXX,XX +XXX,XX @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
53
@@ -XXX,XX +XXX,XX @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val,
23
return;
54
pcic->mbr = val & 0xff000001;
24
}
55
break;
25
if (kvm_enabled()) {
56
case 0x1c8:
26
- if (!kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ))) {
57
- if ((val & 0xfffc0000) != (pcic->iobr & 0xfffc0000)) {
27
+ if (kvm_irqchip_in_kernel() &&
58
- memory_region_del_subregion(get_system_memory(), &pcic->isa);
28
+ !kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ))) {
59
- pcic->iobr = val & 0xfffc0001;
29
return;
60
- memory_region_add_subregion(get_system_memory(),
30
}
61
- pcic->iobr & 0xfffc0000, &pcic->isa);
31
if (!kvm_arm_pmu_init(cpu)) {
62
- }
32
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
63
+ pcic->iobr = val & 0xfffc0001;
33
index XXXXXXX..XXXXXXX 100644
64
+ memory_region_set_alias_offset(&pcic->isa, val & 0xfffc0000);
34
--- a/target/arm/kvm.c
65
break;
35
+++ b/target/arm/kvm.c
66
case 0x220:
36
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
67
pci_data_write(phb->bus, pcic->par, val, 4);
37
switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
68
@@ -XXX,XX +XXX,XX @@ static void sh_pci_device_realize(DeviceState *dev, Error **errp)
38
}
69
get_system_io(), 0, 0x40000);
39
70
sysbus_init_mmio(sbd, &s->memconfig_p4);
40
- /* XXX PMU IRQ is missing */
71
sysbus_init_mmio(sbd, &s->memconfig_a7);
41
+ if (switched_level & KVM_ARM_DEV_PMU) {
72
- s->iobr = 0xfe240000;
42
+ qemu_set_irq(cpu->pmu_interrupt,
73
- memory_region_add_subregion(get_system_memory(), s->iobr, &s->isa);
43
+ !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
74
+ memory_region_add_subregion(get_system_memory(), 0xfe240000, &s->isa);
44
+ switched_level &= ~KVM_ARM_DEV_PMU;
75
45
+ }
76
s->dev = pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "sh_pci_host");
46
77
}
47
if (switched_level) {
48
qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
49
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/kvm64.c
52
+++ b/target/arm/kvm64.c
53
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
54
if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
55
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
56
}
57
- if (!kvm_irqchip_in_kernel() ||
58
- !kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
59
+ if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
60
cpu->has_pmu = false;
61
}
62
if (cpu->has_pmu) {
63
--
78
--
64
2.7.4
79
2.20.1
65
80
66
81
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
The old name, isar_feature_aa32_fp_d32, does not reflect
4
the MVFR0 field name, SIMDReg.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20200214181547.21408-3-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
[PMM: wrapped one long line]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 2 +-
14
target/arm/translate-vfp.inc.c | 53 +++++++++++++++++-----------------
15
2 files changed, 28 insertions(+), 27 deletions(-)
16
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
22
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
23
}
24
25
-static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
26
+static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id)
27
{
28
/* Return true if D16-D31 are implemented */
29
return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2;
30
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-vfp.inc.c
33
+++ b/target/arm/translate-vfp.inc.c
34
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
35
}
36
37
/* UNDEF accesses to D16-D31 if they don't exist */
38
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
39
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
40
((a->vm | a->vn | a->vd) & 0x10)) {
41
return false;
42
}
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
44
}
45
46
/* UNDEF accesses to D16-D31 if they don't exist */
47
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
48
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
49
((a->vm | a->vn | a->vd) & 0x10)) {
50
return false;
51
}
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
53
}
54
55
/* UNDEF accesses to D16-D31 if they don't exist */
56
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
57
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
58
((a->vm | a->vd) & 0x10)) {
59
return false;
60
}
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
62
}
63
64
/* UNDEF accesses to D16-D31 if they don't exist */
65
- if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
66
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
67
return false;
68
}
69
70
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
71
uint32_t offset;
72
73
/* UNDEF accesses to D16-D31 if they don't exist */
74
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
75
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
76
return false;
77
}
78
79
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
80
uint32_t offset;
81
82
/* UNDEF accesses to D16-D31 if they don't exist */
83
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
84
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
85
return false;
86
}
87
88
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
89
}
90
91
/* UNDEF accesses to D16-D31 if they don't exist */
92
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
93
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
94
return false;
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
98
*/
99
100
/* UNDEF accesses to D16-D31 if they don't exist */
101
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
102
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
103
return false;
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
107
TCGv_i64 tmp;
108
109
/* UNDEF accesses to D16-D31 if they don't exist */
110
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
111
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
112
return false;
113
}
114
115
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
116
}
117
118
/* UNDEF accesses to D16-D31 if they don't exist */
119
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
120
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
121
return false;
122
}
123
124
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
125
TCGv_ptr fpst;
126
127
/* UNDEF accesses to D16-D31 if they don't exist */
128
- if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
129
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
130
return false;
131
}
132
133
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
134
TCGv_i64 f0, fd;
135
136
/* UNDEF accesses to D16-D31 if they don't exist */
137
- if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
138
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
139
return false;
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
143
}
144
145
/* UNDEF accesses to D16-D31 if they don't exist. */
146
- if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
147
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
148
+ ((a->vd | a->vn | a->vm) & 0x10)) {
149
return false;
150
}
151
152
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
153
vd = a->vd;
154
155
/* UNDEF accesses to D16-D31 if they don't exist. */
156
- if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
157
+ if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
158
return false;
159
}
160
161
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
162
}
163
164
/* UNDEF accesses to D16-D31 if they don't exist. */
165
- if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
166
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
167
return false;
168
}
169
170
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
171
}
172
173
/* UNDEF accesses to D16-D31 if they don't exist. */
174
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
175
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
176
return false;
177
}
178
179
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
180
}
181
182
/* UNDEF accesses to D16-D31 if they don't exist. */
183
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
184
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
185
return false;
186
}
187
188
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
189
}
190
191
/* UNDEF accesses to D16-D31 if they don't exist. */
192
- if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
193
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
194
return false;
195
}
196
197
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
198
}
199
200
/* UNDEF accesses to D16-D31 if they don't exist. */
201
- if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
202
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
203
return false;
204
}
205
206
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
207
}
208
209
/* UNDEF accesses to D16-D31 if they don't exist. */
210
- if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
211
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
212
return false;
213
}
214
215
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
216
TCGv_i32 vm;
217
218
/* UNDEF accesses to D16-D31 if they don't exist. */
219
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
220
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
221
return false;
222
}
223
224
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
225
TCGv_i32 vd;
226
227
/* UNDEF accesses to D16-D31 if they don't exist. */
228
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
229
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
230
return false;
231
}
232
233
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
234
TCGv_ptr fpst;
235
236
/* UNDEF accesses to D16-D31 if they don't exist. */
237
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
238
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
239
return false;
240
}
241
242
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
243
}
244
245
/* UNDEF accesses to D16-D31 if they don't exist. */
246
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
247
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
248
return false;
249
}
250
251
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
252
}
253
254
/* UNDEF accesses to D16-D31 if they don't exist. */
255
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
256
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
257
return false;
258
}
259
260
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
261
TCGv_ptr fpst;
262
263
/* UNDEF accesses to D16-D31 if they don't exist. */
264
- if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
265
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
266
return false;
267
}
268
269
--
270
2.20.1
271
272
diff view generated by jsdifflib
1
Tighten up the T32 decoder in the places where new v8M instructions
1
From: Richard Henderson <richard.henderson@linaro.org>
2
will be:
3
* TT/TTT/TTA/TTAT are in what was nominally LDREX/STREX r15, ...
4
which is UNPREDICTABLE:
5
make the UNPREDICTABLE behaviour be to UNDEF
6
* BXNS/BLXNS are distinguished from BX/BLX via the low 3 bits,
7
which in previous architectural versions are SBZ:
8
enforce the SBZ via UNDEF rather than ignoring it, and move
9
the "ARCH(5)" UNDEF case up so we don't leak a TCG temporary
10
* SG is in the encoding which would be LDRD/STRD with rn = r15;
11
this is UNPREDICTABLE and we currently UNDEF:
12
move this check further up the code so that we don't leak
13
TCG temporaries in the UNDEF case and have a better place
14
to put the SG decode.
15
2
16
This means that if a v8M binary is accidentally run on v7M
3
Many uses of ARM_FEATURE_VFP3 are testing for the number of simd
17
or if a test case hits something that we haven't implemented
4
registers implemented. Use the proper test vs MVFR0.SIMDReg.
18
yet the behaviour will be obvious (UNDEF) rather than obscure
19
(plough on treating it as a different instruction).
20
5
21
In the process, add some comments about the instruction patterns
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
at these points in the decode. Our Thumb and ARM decoders are
7
Message-id: 20200214181547.21408-4-richard.henderson@linaro.org
23
very difficult to understand currently, but gradually adding
8
[PMM: fix typo in commit message]
24
comments like this should help to clarify what exactly has
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
25
been decoded when.
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.c | 9 ++++-----
13
target/arm/helper.c | 13 ++++++-------
14
target/arm/translate.c | 2 +-
15
3 files changed, 11 insertions(+), 13 deletions(-)
26
16
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
28
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
18
index XXXXXXX..XXXXXXX 100644
29
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
--- a/target/arm/cpu.c
30
Message-id: 1501692241-23310-5-git-send-email-peter.maydell@linaro.org
20
+++ b/target/arm/cpu.c
31
---
21
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
32
target/arm/translate.c | 48 +++++++++++++++++++++++++++++++++++++++---------
22
33
1 file changed, 39 insertions(+), 9 deletions(-)
23
if (flags & CPU_DUMP_FPU) {
34
24
int numvfpregs = 0;
25
- if (arm_feature(env, ARM_FEATURE_VFP)) {
26
- numvfpregs += 16;
27
- }
28
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
29
- numvfpregs += 16;
30
+ if (cpu_isar_feature(aa32_simd_r32, cpu)) {
31
+ numvfpregs = 32;
32
+ } else if (arm_feature(env, ARM_FEATURE_VFP)) {
33
+ numvfpregs = 16;
34
}
35
for (i = 0; i < numvfpregs; i++) {
36
uint64_t v = *aa32_vfp_dreg(env, i);
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/helper.c
40
+++ b/target/arm/helper.c
41
@@ -XXX,XX +XXX,XX @@ static void switch_mode(CPUARMState *env, int mode);
42
43
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
44
{
45
- int nregs;
46
+ ARMCPU *cpu = env_archcpu(env);
47
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
48
49
/* VFP data registers are always little-endian. */
50
- nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
51
if (reg < nregs) {
52
stq_le_p(buf, *aa32_vfp_dreg(env, reg));
53
return 8;
54
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
55
56
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
57
{
58
- int nregs;
59
+ ARMCPU *cpu = env_archcpu(env);
60
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
61
62
- nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
63
if (reg < nregs) {
64
*aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
65
return 8;
66
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
/* VFPv3 and upwards with NEON implement 32 double precision
68
* registers (D0-D31).
69
*/
70
- if (!arm_feature(env, ARM_FEATURE_NEON) ||
71
- !arm_feature(env, ARM_FEATURE_VFP3)) {
72
+ if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
73
/* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
74
value |= (1 << 30);
75
}
76
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
77
} else if (arm_feature(env, ARM_FEATURE_NEON)) {
78
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
79
51, "arm-neon.xml", 0);
80
- } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
81
+ } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
82
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
83
35, "arm-vfp3.xml", 0);
84
} else if (arm_feature(env, ARM_FEATURE_VFP)) {
35
diff --git a/target/arm/translate.c b/target/arm/translate.c
85
diff --git a/target/arm/translate.c b/target/arm/translate.c
36
index XXXXXXX..XXXXXXX 100644
86
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/translate.c
87
--- a/target/arm/translate.c
38
+++ b/target/arm/translate.c
88
+++ b/target/arm/translate.c
39
@@ -XXX,XX +XXX,XX @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
89
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
40
abort();
90
#define VFP_SREG(insn, bigbit, smallbit) \
41
case 4:
91
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
42
if (insn & (1 << 22)) {
92
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
43
- /* Other load/store, table branch. */
93
- if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
44
+ /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
94
+ if (dc_isar_feature(aa32_simd_r32, s)) { \
45
+ * - load/store doubleword, load/store exclusive, ldacq/strel,
95
reg = (((insn) >> (bigbit)) & 0x0f) \
46
+ * table branch.
96
| (((insn) >> ((smallbit) - 4)) & 0x10); \
47
+ */
97
} else { \
48
if (insn & 0x01200000) {
49
- /* Load/store doubleword. */
50
+ /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
51
+ * - load/store dual (post-indexed)
52
+ * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
53
+ * - load/store dual (literal and immediate)
54
+ * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
55
+ * - load/store dual (pre-indexed)
56
+ */
57
if (rn == 15) {
58
+ if (insn & (1 << 21)) {
59
+ /* UNPREDICTABLE */
60
+ goto illegal_op;
61
+ }
62
addr = tcg_temp_new_i32();
63
tcg_gen_movi_i32(addr, s->pc & ~3);
64
} else {
65
@@ -XXX,XX +XXX,XX @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
66
}
67
if (insn & (1 << 21)) {
68
/* Base writeback. */
69
- if (rn == 15)
70
- goto illegal_op;
71
tcg_gen_addi_i32(addr, addr, offset - 4);
72
store_reg(s, rn, addr);
73
} else {
74
tcg_temp_free_i32(addr);
75
}
76
} else if ((insn & (1 << 23)) == 0) {
77
- /* Load/store exclusive word. */
78
+ /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
79
+ * - load/store exclusive word
80
+ */
81
+ if (rs == 15) {
82
+ goto illegal_op;
83
+ }
84
addr = tcg_temp_local_new_i32();
85
load_reg_var(s, addr, rn);
86
tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
87
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
88
break;
89
}
90
if (insn & (1 << 10)) {
91
- /* data processing extended or blx */
92
+ /* 0b0100_01xx_xxxx_xxxx
93
+ * - data processing extended, branch and exchange
94
+ */
95
rd = (insn & 7) | ((insn >> 4) & 8);
96
rm = (insn >> 3) & 0xf;
97
op = (insn >> 8) & 3;
98
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99
tmp = load_reg(s, rm);
100
store_reg(s, rd, tmp);
101
break;
102
- case 3:/* branch [and link] exchange thumb register */
103
- tmp = load_reg(s, rm);
104
- if (insn & (1 << 7)) {
105
+ case 3:
106
+ {
107
+ /* 0b0100_0111_xxxx_xxxx
108
+ * - branch [and link] exchange thumb register
109
+ */
110
+ bool link = insn & (1 << 7);
111
+
112
+ if (insn & 7) {
113
+ goto undef;
114
+ }
115
+ if (link) {
116
ARCH(5);
117
+ }
118
+ tmp = load_reg(s, rm);
119
+ if (link) {
120
val = (uint32_t)s->pc | 1;
121
tmp2 = tcg_temp_new_i32();
122
tcg_gen_movi_i32(tmp2, val);
123
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
124
}
125
break;
126
}
127
+ }
128
break;
129
}
130
131
--
98
--
132
2.7.4
99
2.20.1
133
100
134
101
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
QEMU currently shows some unexpected behavior when the user trys to
3
We are going to convert FEATURE tests to ISAR tests,
4
do a "device_add digic" on an unrelated ARM machine like integratorcp
4
so FPSP needs to be set for these cpus, like we have
5
in "-nographic" mode (the device_add command does not immediately
5
already for FPDP.
6
return to the monitor prompt), and trying to "device_del" the device
7
later results in a "qemu/qdev-monitor.c:872:qdev_unplug: assertion
8
failed: (hotplug_ctrl)" error condition.
9
Looking at the realize function of the device, it uses serial_hds
10
directly and this means that the device can not be added a second
11
time, so let's simply mark it with "user_creatable = false" now.
12
6
13
Signed-off-by: Thomas Huth <thuth@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200214181547.21408-5-richard.henderson@linaro.org
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
11
---
17
hw/arm/digic.c | 2 ++
12
target/arm/cpu.c | 10 ++++++----
18
1 file changed, 2 insertions(+)
13
1 file changed, 6 insertions(+), 4 deletions(-)
19
14
20
diff --git a/hw/arm/digic.c b/hw/arm/digic.c
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/digic.c
17
--- a/target/arm/cpu.c
23
+++ b/hw/arm/digic.c
18
+++ b/target/arm/cpu.c
24
@@ -XXX,XX +XXX,XX @@ static void digic_class_init(ObjectClass *oc, void *data)
19
@@ -XXX,XX +XXX,XX @@ static void arm926_initfn(Object *obj)
25
DeviceClass *dc = DEVICE_CLASS(oc);
20
*/
26
21
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
27
dc->realize = digic_realize;
22
/*
28
+ /* Reason: Uses serial_hds in the realize function --> not usable twice */
23
- * Similarly, we need to set MVFR0 fields to enable double precision
29
+ dc->user_creatable = false;
24
- * and short vector support even though ARMv5 doesn't have this register.
25
+ * Similarly, we need to set MVFR0 fields to enable vfp and short vector
26
+ * support even though ARMv5 doesn't have this register.
27
*/
28
cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
29
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1);
30
cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1);
30
}
31
}
31
32
32
static const TypeInfo digic_type_info = {
33
@@ -XXX,XX +XXX,XX @@ static void arm1026_initfn(Object *obj)
34
*/
35
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
36
/*
37
- * Similarly, we need to set MVFR0 fields to enable double precision
38
- * and short vector support even though ARMv5 doesn't have this register.
39
+ * Similarly, we need to set MVFR0 fields to enable vfp and short vector
40
+ * support even though ARMv5 doesn't have this register.
41
*/
42
cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
43
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1);
44
cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1);
45
46
{
33
--
47
--
34
2.7.4
48
2.20.1
35
49
36
50
diff view generated by jsdifflib
1
We currently store the M profile CPU register state PRIMASK and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
FAULTMASK in the daif field of the CPU state in its I and F
2
3
bits. This is a legacy from the original implementation, which
3
Use this in the places that were checking ARM_FEATURE_VFP, and
4
tried to share the cpu_exec_interrupt code between A profile
4
are obviously testing for the existance of the register set
5
and M profile. We've since separated out the two cases because
5
as opposed to testing for some particular instruction extension.
6
they are significantly different, so now there is no common
6
7
code between M and A profile which looks at env->daif: all the
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
uses are either in A-only or M-only code paths. Sharing the state
8
Message-id: 20200214181547.21408-6-richard.henderson@linaro.org
9
fields now is just confusing, and will make things awkward
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
when we implement v8M, where the PRIMASK and FAULTMASK
11
registers are banked between security states.
12
13
Switch M profile over to using v7m.faultmask and v7m.primask
14
fields for these registers.
15
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 1501692241-23310-10-git-send-email-peter.maydell@linaro.org
19
---
11
---
20
target/arm/cpu.h | 4 +++-
12
target/arm/cpu.h | 6 ++++++
21
hw/intc/armv7m_nvic.c | 4 ++--
13
hw/intc/armv7m_nvic.c | 20 ++++++++++----------
22
target/arm/cpu.c | 5 -----
14
linux-user/arm/signal.c | 4 ++--
23
target/arm/helper.c | 18 +++++-------------
15
target/arm/arch_dump.c | 11 ++++++-----
24
target/arm/machine.c | 33 +++++++++++++++++++++++++++++++++
16
target/arm/cpu.c | 8 ++++----
25
5 files changed, 43 insertions(+), 21 deletions(-)
17
target/arm/helper.c | 4 ++--
18
target/arm/m_helper.c | 11 ++++++-----
19
target/arm/machine.c | 3 +--
20
8 files changed, 37 insertions(+), 30 deletions(-)
26
21
27
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
28
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/cpu.h
24
--- a/target/arm/cpu.h
30
+++ b/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
31
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
26
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
32
uint32_t bfar; /* BusFault Address */
27
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
33
unsigned mpu_ctrl; /* MPU_CTRL */
28
}
34
int exception;
29
35
+ uint32_t primask;
30
+static inline bool isar_feature_aa32_simd_r16(const ARMISARegisters *id)
36
+ uint32_t faultmask;
31
+{
37
} v7m;
32
+ /* Return true if D0-D15 are implemented */
38
33
+ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0;
39
/* Information associated with an exception about to be taken:
34
+}
40
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
35
+
41
* we're in a HardFault or NMI handler.
36
static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id)
42
*/
37
{
43
if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
38
/* Return true if D16-D31 are implemented */
44
- || env->daif & PSTATE_F) {
45
+ || env->v7m.faultmask) {
46
return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
47
}
48
49
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
39
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
50
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/intc/armv7m_nvic.c
41
--- a/hw/intc/armv7m_nvic.c
52
+++ b/hw/intc/armv7m_nvic.c
42
+++ b/hw/intc/armv7m_nvic.c
53
@@ -XXX,XX +XXX,XX @@ static inline int nvic_exec_prio(NVICState *s)
43
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
54
CPUARMState *env = &s->cpu->env;
44
case 0xd84: /* CSSELR */
55
int running;
45
return cpu->env.v7m.csselr[attrs.secure];
56
46
case 0xd88: /* CPACR */
57
- if (env->daif & PSTATE_F) { /* FAULTMASK */
47
- if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
58
+ if (env->v7m.faultmask) {
48
+ if (!cpu_isar_feature(aa32_simd_r16, cpu)) {
59
running = -1;
49
return 0;
60
- } else if (env->daif & PSTATE_I) { /* PRIMASK */
50
}
61
+ } else if (env->v7m.primask) {
51
return cpu->env.v7m.cpacr[attrs.secure];
62
running = 0;
52
case 0xd8c: /* NSACR */
63
} else if (env->v7m.basepri > 0) {
53
- if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
64
running = env->v7m.basepri & nvic_gprio_mask(s);
54
+ if (!attrs.secure || !cpu_isar_feature(aa32_simd_r16, cpu)) {
55
return 0;
56
}
57
return cpu->env.v7m.nsacr;
58
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
59
}
60
return cpu->env.v7m.sfar;
61
case 0xf34: /* FPCCR */
62
- if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
63
+ if (!cpu_isar_feature(aa32_simd_r16, cpu)) {
64
return 0;
65
}
66
if (attrs.secure) {
67
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
68
return value;
69
}
70
case 0xf38: /* FPCAR */
71
- if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
72
+ if (!cpu_isar_feature(aa32_simd_r16, cpu)) {
73
return 0;
74
}
75
return cpu->env.v7m.fpcar[attrs.secure];
76
case 0xf3c: /* FPDSCR */
77
- if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
78
+ if (!cpu_isar_feature(aa32_simd_r16, cpu)) {
79
return 0;
80
}
81
return cpu->env.v7m.fpdscr[attrs.secure];
82
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
83
}
84
break;
85
case 0xd88: /* CPACR */
86
- if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
87
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
88
/* We implement only the Floating Point extension's CP10/CP11 */
89
cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
90
}
91
break;
92
case 0xd8c: /* NSACR */
93
- if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
94
+ if (attrs.secure && cpu_isar_feature(aa32_simd_r16, cpu)) {
95
/* We implement only the Floating Point extension's CP10/CP11 */
96
cpu->env.v7m.nsacr = value & (3 << 10);
97
}
98
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
99
break;
100
}
101
case 0xf34: /* FPCCR */
102
- if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
103
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
104
/* Not all bits here are banked. */
105
uint32_t fpccr_s;
106
107
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
108
}
109
break;
110
case 0xf38: /* FPCAR */
111
- if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
112
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
113
value &= ~7;
114
cpu->env.v7m.fpcar[attrs.secure] = value;
115
}
116
break;
117
case 0xf3c: /* FPDSCR */
118
- if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
119
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
120
value &= 0x07c00000;
121
cpu->env.v7m.fpdscr[attrs.secure] = value;
122
}
123
diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/linux-user/arm/signal.c
126
+++ b/linux-user/arm/signal.c
127
@@ -XXX,XX +XXX,XX @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
128
setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
129
/* Save coprocessor signal frame. */
130
regspace = uc->tuc_regspace;
131
- if (arm_feature(env, ARM_FEATURE_VFP)) {
132
+ if (cpu_isar_feature(aa32_simd_r16, env_archcpu(env))) {
133
regspace = setup_sigframe_v2_vfp(regspace, env);
134
}
135
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
136
@@ -XXX,XX +XXX,XX @@ static int do_sigframe_return_v2(CPUARMState *env,
137
138
/* Restore coprocessor signal frame */
139
regspace = uc->tuc_regspace;
140
- if (arm_feature(env, ARM_FEATURE_VFP)) {
141
+ if (cpu_isar_feature(aa32_simd_r16, env_archcpu(env))) {
142
regspace = restore_sigframe_v2_vfp(env, regspace);
143
if (!regspace) {
144
return 1;
145
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/target/arm/arch_dump.c
148
+++ b/target/arm/arch_dump.c
149
@@ -XXX,XX +XXX,XX @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
150
int cpuid, void *opaque)
151
{
152
struct arm_note note;
153
- CPUARMState *env = &ARM_CPU(cs)->env;
154
+ ARMCPU *cpu = ARM_CPU(cs);
155
+ CPUARMState *env = &cpu->env;
156
DumpState *s = opaque;
157
- int ret, i, fpvalid = !!arm_feature(env, ARM_FEATURE_VFP);
158
+ int ret, i;
159
+ bool fpvalid = cpu_isar_feature(aa32_simd_r16, cpu);
160
161
arm_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
162
163
@@ -XXX,XX +XXX,XX @@ int cpu_get_dump_info(ArchDumpInfo *info,
164
ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
165
{
166
ARMCPU *cpu = ARM_CPU(first_cpu);
167
- CPUARMState *env = &cpu->env;
168
size_t note_size;
169
170
if (class == ELFCLASS64) {
171
@@ -XXX,XX +XXX,XX @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
172
note_size += AARCH64_PRFPREG_NOTE_SIZE;
173
#ifdef TARGET_AARCH64
174
if (cpu_isar_feature(aa64_sve, cpu)) {
175
- note_size += AARCH64_SVE_NOTE_SIZE(env);
176
+ note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env);
177
}
178
#endif
179
} else {
180
note_size = ARM_PRSTATUS_NOTE_SIZE;
181
- if (arm_feature(env, ARM_FEATURE_VFP)) {
182
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
183
note_size += ARM_VFP_NOTE_SIZE;
184
}
185
}
65
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
186
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
66
index XXXXXXX..XXXXXXX 100644
187
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/cpu.c
188
--- a/target/arm/cpu.c
68
+++ b/target/arm/cpu.c
189
+++ b/target/arm/cpu.c
69
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
190
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
70
uint32_t initial_pc; /* Loaded from 0x4 */
191
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
71
uint8_t *rom;
192
}
72
193
73
- /* For M profile we store FAULTMASK and PRIMASK in the
194
- if (arm_feature(env, ARM_FEATURE_VFP)) {
74
- * PSTATE F and I bits; these are both clear at reset.
195
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
75
- */
196
env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
76
- env->daif &= ~(PSTATE_I | PSTATE_F);
197
env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
77
-
198
R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
78
/* The reset value of this bit is IMPDEF, but ARM recommends
199
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
79
* that it resets to 1, so QEMU always does that rather than making
200
int numvfpregs = 0;
80
* it dependent on CPU model.
201
if (cpu_isar_feature(aa32_simd_r32, cpu)) {
202
numvfpregs = 32;
203
- } else if (arm_feature(env, ARM_FEATURE_VFP)) {
204
+ } else if (cpu_isar_feature(aa32_simd_r16, cpu)) {
205
numvfpregs = 16;
206
}
207
for (i = 0; i < numvfpregs; i++) {
208
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
209
* KVM does not currently allow us to lie to the guest about its
210
* ID/feature registers, so the guest always sees what the host has.
211
*/
212
- if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
213
+ if (cpu_isar_feature(aa32_simd_r16, cpu)) {
214
cpu->has_vfp = true;
215
if (!kvm_enabled()) {
216
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
217
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
218
* We rely on no XScale CPU having VFP so we can use the same bits in the
219
* TB flags field for VECSTRIDE and XSCALE_CPAR.
220
*/
221
- assert(!(arm_feature(env, ARM_FEATURE_VFP) &&
222
+ assert(!(cpu_isar_feature(aa32_simd_r16, cpu) &&
223
arm_feature(env, ARM_FEATURE_XSCALE)));
224
225
if (arm_feature(env, ARM_FEATURE_V7) &&
81
diff --git a/target/arm/helper.c b/target/arm/helper.c
226
diff --git a/target/arm/helper.c b/target/arm/helper.c
82
index XXXXXXX..XXXXXXX 100644
227
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/helper.c
228
--- a/target/arm/helper.c
84
+++ b/target/arm/helper.c
229
+++ b/target/arm/helper.c
230
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
231
* ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
232
* TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
233
*/
234
- if (arm_feature(env, ARM_FEATURE_VFP)) {
235
+ if (cpu_isar_feature(aa32_simd_r16, env_archcpu(env))) {
236
/* VFP coprocessor: cp10 & cp11 [23:20] */
237
mask |= (1 << 31) | (1 << 30) | (0xf << 20);
238
239
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
240
} else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
241
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
242
35, "arm-vfp3.xml", 0);
243
- } else if (arm_feature(env, ARM_FEATURE_VFP)) {
244
+ } else if (cpu_isar_feature(aa32_simd_r16, cpu)) {
245
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
246
19, "arm-vfp.xml", 0);
247
}
248
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
249
index XXXXXXX..XXXXXXX 100644
250
--- a/target/arm/m_helper.c
251
+++ b/target/arm/m_helper.c
252
@@ -XXX,XX +XXX,XX @@ static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
253
*/
254
uint32_t sig = 0xfefa125a;
255
256
- if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
257
+ if (!cpu_isar_feature(aa32_simd_r16, env_archcpu(env))
258
+ || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
259
sig |= 1;
260
}
261
return sig;
262
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
263
264
if (dotailchain) {
265
/* Sanitize LR FType and PREFIX bits */
266
- if (!arm_feature(env, ARM_FEATURE_VFP)) {
267
+ if (!cpu_isar_feature(aa32_simd_r16, cpu)) {
268
lr |= R_V7M_EXCRET_FTYPE_MASK;
269
}
270
lr = deposit32(lr, 24, 8, 0xff);
85
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
271
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
86
272
87
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
273
ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
88
/* Auto-clear FAULTMASK on return from other than NMI */
274
89
- env->daif &= ~PSTATE_F;
275
- if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
90
+ env->v7m.faultmask = 0;
276
+ if (!ftype && !cpu_isar_feature(aa32_simd_r16, cpu)) {
91
}
277
qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
92
278
"exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
93
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
279
"if FPU not present\n",
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
95
return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
96
env->regs[13] : env->v7m.other_sp;
97
case 16: /* PRIMASK */
98
- return (env->daif & PSTATE_I) != 0;
99
+ return env->v7m.primask;
100
case 17: /* BASEPRI */
101
case 18: /* BASEPRI_MAX */
102
return env->v7m.basepri;
103
case 19: /* FAULTMASK */
104
- return (env->daif & PSTATE_F) != 0;
105
+ return env->v7m.faultmask;
106
default:
107
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
108
" register %d\n", reg);
109
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
280
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
110
}
281
* SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
111
break;
282
* RES0 if the FPU is not present, and is stored in the S bank
112
case 16: /* PRIMASK */
283
*/
113
- if (val & 1) {
284
- if (arm_feature(env, ARM_FEATURE_VFP) &&
114
- env->daif |= PSTATE_I;
285
+ if (cpu_isar_feature(aa32_simd_r16, env_archcpu(env)) &&
115
- } else {
286
extract32(env->v7m.nsacr, 10, 1)) {
116
- env->daif &= ~PSTATE_I;
287
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
117
- }
288
env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
118
+ env->v7m.primask = val & 1;
119
break;
120
case 17: /* BASEPRI */
121
env->v7m.basepri = val & 0xff;
122
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
289
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
123
env->v7m.basepri = val;
290
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
124
break;
291
env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
125
case 19: /* FAULTMASK */
292
}
126
- if (val & 1) {
293
- if (arm_feature(env, ARM_FEATURE_VFP)) {
127
- env->daif |= PSTATE_F;
294
+ if (cpu_isar_feature(aa32_simd_r16, env_archcpu(env))) {
128
- } else {
295
/*
129
- env->daif &= ~PSTATE_F;
296
* SFPA is RAZ/WI from NS or if no FPU.
130
- }
297
* FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
131
+ env->v7m.faultmask = val & 1;
132
break;
133
case 20: /* CONTROL */
134
/* Writing to the SPSEL bit only has an effect if we are in
135
diff --git a/target/arm/machine.c b/target/arm/machine.c
298
diff --git a/target/arm/machine.c b/target/arm/machine.c
136
index XXXXXXX..XXXXXXX 100644
299
index XXXXXXX..XXXXXXX 100644
137
--- a/target/arm/machine.c
300
--- a/target/arm/machine.c
138
+++ b/target/arm/machine.c
301
+++ b/target/arm/machine.c
139
@@ -XXX,XX +XXX,XX @@ static bool m_needed(void *opaque)
302
@@ -XXX,XX +XXX,XX @@
140
return arm_feature(env, ARM_FEATURE_M);
303
static bool vfp_needed(void *opaque)
304
{
305
ARMCPU *cpu = opaque;
306
- CPUARMState *env = &cpu->env;
307
308
- return arm_feature(env, ARM_FEATURE_VFP);
309
+ return cpu_isar_feature(aa32_simd_r16, cpu);
141
}
310
}
142
311
143
+static const VMStateDescription vmstate_m_faultmask_primask = {
312
static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
144
+ .name = "cpu/m/faultmask-primask",
145
+ .version_id = 1,
146
+ .minimum_version_id = 1,
147
+ .fields = (VMStateField[]) {
148
+ VMSTATE_UINT32(env.v7m.faultmask, ARMCPU),
149
+ VMSTATE_UINT32(env.v7m.primask, ARMCPU),
150
+ VMSTATE_END_OF_LIST()
151
+ }
152
+};
153
+
154
static const VMStateDescription vmstate_m = {
155
.name = "cpu/m",
156
.version_id = 4,
157
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m = {
158
VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
159
VMSTATE_INT32(env.v7m.exception, ARMCPU),
160
VMSTATE_END_OF_LIST()
161
+ },
162
+ .subsections = (const VMStateDescription*[]) {
163
+ &vmstate_m_faultmask_primask,
164
+ NULL
165
}
166
};
167
168
@@ -XXX,XX +XXX,XX @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
169
CPUARMState *env = &cpu->env;
170
uint32_t val = qemu_get_be32(f);
171
172
+ if (arm_feature(env, ARM_FEATURE_M)) {
173
+ /* If the I or F bits are set then this is a migration from
174
+ * an old QEMU which still stored the M profile FAULTMASK
175
+ * and PRIMASK in env->daif. Set v7m.faultmask and v7m.primask
176
+ * accordingly, and then clear the bits so they don't confuse
177
+ * cpsr_write(). For a new QEMU, the bits here will always be
178
+ * clear, and the data is transferred using the
179
+ * vmstate_m_faultmask_primask subsection.
180
+ */
181
+ if (val & CPSR_F) {
182
+ env->v7m.faultmask = 1;
183
+ }
184
+ if (val & CPSR_I) {
185
+ env->v7m.primask = 1;
186
+ }
187
+ val &= ~(CPSR_F | CPSR_I);
188
+ }
189
+
190
env->aarch64 = ((val & PSTATE_nRW) == 0);
191
192
if (is_a64(env)) {
193
--
313
--
194
2.7.4
314
2.20.1
195
315
196
316
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
The old name, isar_feature_aa32_fpdp, does not reflect
4
that the test includes VFPv2. We will introduce further
5
feature tests for VFPv3.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20200214181547.21408-7-richard.henderson@linaro.org
10
[PMM: fixed grammar in commit message]
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/cpu.h | 4 ++--
15
target/arm/translate-vfp.inc.c | 40 +++++++++++++++++-----------------
16
2 files changed, 22 insertions(+), 22 deletions(-)
17
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
23
return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0;
24
}
25
26
-static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
27
+static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id)
28
{
29
- /* Return true if CPU supports double precision floating point */
30
+ /* Return true if CPU supports double precision floating point, VFPv2 */
31
return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0;
32
}
33
34
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-vfp.inc.c
37
+++ b/target/arm/translate-vfp.inc.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
39
return false;
40
}
41
42
- if (dp && !dc_isar_feature(aa32_fpdp, s)) {
43
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
44
return false;
45
}
46
47
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
48
return false;
49
}
50
51
- if (dp && !dc_isar_feature(aa32_fpdp, s)) {
52
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
53
return false;
54
}
55
56
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
57
return false;
58
}
59
60
- if (dp && !dc_isar_feature(aa32_fpdp, s)) {
61
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
62
return false;
63
}
64
65
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
66
return false;
67
}
68
69
- if (dp && !dc_isar_feature(aa32_fpdp, s)) {
70
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
71
return false;
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
75
return false;
76
}
77
78
- if (!dc_isar_feature(aa32_fpdp, s)) {
79
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
80
return false;
81
}
82
83
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
84
return false;
85
}
86
87
- if (!dc_isar_feature(aa32_fpdp, s)) {
88
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
89
return false;
90
}
91
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
93
return false;
94
}
95
96
- if (!dc_isar_feature(aa32_fpdp, s)) {
97
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
98
return false;
99
}
100
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
102
return false;
103
}
104
105
- if (!dc_isar_feature(aa32_fpdp, s)) {
106
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
107
return false;
108
}
109
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
111
return false;
112
}
113
114
- if (!dc_isar_feature(aa32_fpdp, s)) {
115
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
116
return false;
117
}
118
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
120
return false;
121
}
122
123
- if (!dc_isar_feature(aa32_fpdp, s)) {
124
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
125
return false;
126
}
127
128
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
129
return false;
130
}
131
132
- if (!dc_isar_feature(aa32_fpdp, s)) {
133
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
134
return false;
135
}
136
137
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
138
return false;
139
}
140
141
- if (!dc_isar_feature(aa32_fpdp, s)) {
142
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
143
return false;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
147
return false;
148
}
149
150
- if (!dc_isar_feature(aa32_fpdp, s)) {
151
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
152
return false;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
156
return false;
157
}
158
159
- if (!dc_isar_feature(aa32_fpdp, s)) {
160
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
161
return false;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
165
return false;
166
}
167
168
- if (!dc_isar_feature(aa32_fpdp, s)) {
169
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
170
return false;
171
}
172
173
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
174
return false;
175
}
176
177
- if (!dc_isar_feature(aa32_fpdp, s)) {
178
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
179
return false;
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
183
return false;
184
}
185
186
- if (!dc_isar_feature(aa32_fpdp, s)) {
187
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
188
return false;
189
}
190
191
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
192
return false;
193
}
194
195
- if (!dc_isar_feature(aa32_fpdp, s)) {
196
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
197
return false;
198
}
199
200
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
201
return false;
202
}
203
204
- if (!dc_isar_feature(aa32_fpdp, s)) {
205
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
206
return false;
207
}
208
209
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
210
return false;
211
}
212
213
- if (!dc_isar_feature(aa32_fpdp, s)) {
214
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
215
return false;
216
}
217
218
--
219
2.20.1
220
221
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
When adding a PMU with a userspace irqchip we skip the set-irq
3
We will shortly use these to test for VFPv2 and VFPv3
4
stage of device creation. Split the 'create' function into two
4
in different situations.
5
functions 'init' and 'set-irq' so they may be called separately.
6
5
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Christoffer Dall <cdall@linaro.org>
7
Message-id: 20200214181547.21408-8-richard.henderson@linaro.org
9
Message-id: 1500471597-2517-3-git-send-email-drjones@redhat.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/kvm_arm.h | 10 ++++++++--
11
target/arm/cpu.h | 18 ++++++++++++++++++
13
hw/arm/virt.c | 11 +++++++++--
12
1 file changed, 18 insertions(+)
14
target/arm/kvm32.c | 8 +++++++-
15
target/arm/kvm64.c | 52 +++++++++++++++++++++++++---------------------------
16
4 files changed, 49 insertions(+), 32 deletions(-)
17
13
18
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/kvm_arm.h
16
--- a/target/arm/cpu.h
21
+++ b/target/arm/kvm_arm.h
17
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
18
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
23
19
return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0;
24
int kvm_arm_vgic_probe(void);
25
26
-int kvm_arm_pmu_create(CPUState *cs, int irq);
27
+int kvm_arm_pmu_set_irq(CPUState *cs, int irq);
28
+int kvm_arm_pmu_init(CPUState *cs);
29
30
#else
31
32
@@ -XXX,XX +XXX,XX @@ static inline int kvm_arm_vgic_probe(void)
33
return 0;
34
}
20
}
35
21
36
-static inline int kvm_arm_pmu_create(CPUState *cs, int irq)
22
+static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id)
37
+static inline int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
38
+{
23
+{
39
+ return 0;
24
+ /* Return true if CPU supports single precision floating point, VFPv2 */
25
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0;
40
+}
26
+}
41
+
27
+
42
+static inline int kvm_arm_pmu_init(CPUState *cs)
28
+static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id)
43
{
44
return 0;
45
}
46
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/arm/virt.c
49
+++ b/hw/arm/virt.c
50
@@ -XXX,XX +XXX,XX @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
51
52
CPU_FOREACH(cpu) {
53
armcpu = ARM_CPU(cpu);
54
- if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU) ||
55
- (kvm_enabled() && !kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ)))) {
56
+ if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
57
return;
58
}
59
+ if (kvm_enabled()) {
60
+ if (!kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ))) {
61
+ return;
62
+ }
63
+ if (!kvm_arm_pmu_init(cpu)) {
64
+ return;
65
+ }
66
+ }
67
}
68
69
if (vms->gic_version == 2) {
70
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/kvm32.c
73
+++ b/target/arm/kvm32.c
74
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_hw_debug_active(CPUState *cs)
75
return false;
76
}
77
78
-int kvm_arm_pmu_create(CPUState *cs, int irq)
79
+int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
80
+{
29
+{
81
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
30
+ /* Return true if CPU supports single precision floating point, VFPv3 */
82
+ return 0;
31
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2;
83
+}
32
+}
84
+
33
+
85
+int kvm_arm_pmu_init(CPUState *cs)
34
static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id)
86
{
35
{
87
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
36
/* Return true if CPU supports double precision floating point, VFPv2 */
88
return 0;
37
return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0;
89
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/kvm64.c
92
+++ b/target/arm/kvm64.c
93
@@ -XXX,XX +XXX,XX @@ static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
94
return NULL;
95
}
38
}
96
39
97
-static bool kvm_arm_pmu_support_ctrl(CPUState *cs, struct kvm_device_attr *attr)
40
+static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id)
98
-{
99
- return kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr) == 0;
100
-}
101
-
102
-int kvm_arm_pmu_create(CPUState *cs, int irq)
103
+static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
104
{
105
int err;
106
107
- struct kvm_device_attr attr = {
108
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
109
- .addr = (intptr_t)&irq,
110
- .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
111
- .flags = 0,
112
- };
113
-
114
- if (!kvm_arm_pmu_support_ctrl(cs, &attr)) {
115
- return 0;
116
+ err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
117
+ if (err != 0) {
118
+ return false;
119
}
120
121
- err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
122
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
123
if (err < 0) {
124
fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
125
strerror(-err));
126
abort();
127
}
128
129
- attr.group = KVM_ARM_VCPU_PMU_V3_CTRL;
130
- attr.attr = KVM_ARM_VCPU_PMU_V3_INIT;
131
- attr.addr = 0;
132
- attr.flags = 0;
133
+ return true;
134
+}
135
136
- err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
137
- if (err < 0) {
138
- fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
139
- strerror(-err));
140
- abort();
141
- }
142
+int kvm_arm_pmu_init(CPUState *cs)
143
+{
41
+{
144
+ struct kvm_device_attr attr = {
42
+ /* Return true if CPU supports double precision floating point, VFPv3 */
145
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
43
+ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2;
146
+ .attr = KVM_ARM_VCPU_PMU_V3_INIT,
147
+ };
148
+
149
+ return kvm_arm_pmu_set_attr(cs, &attr);
150
+}
44
+}
151
+
45
+
152
+int kvm_arm_pmu_set_irq(CPUState *cs, int irq)
46
/*
153
+{
47
* We always set the FP and SIMD FP16 fields to indicate identical
154
+ struct kvm_device_attr attr = {
48
* levels of support (assuming SIMD is implemented at all), so
155
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
156
+ .addr = (intptr_t)&irq,
157
+ .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
158
+ };
159
160
- return 1;
161
+ return kvm_arm_pmu_set_attr(cs, &attr);
162
}
163
164
static inline void set_feature(uint64_t *features, int feature)
165
--
49
--
166
2.7.4
50
2.20.1
167
51
168
52
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Shuffle the order of the checks so that we test the ISA
4
before we test anything else, such as the register arguments.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200214181547.21408-9-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-vfp.inc.c | 144 ++++++++++++++++-----------------
12
1 file changed, 72 insertions(+), 72 deletions(-)
13
14
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-vfp.inc.c
17
+++ b/target/arm/translate-vfp.inc.c
18
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
19
return false;
20
}
21
22
- /* UNDEF accesses to D16-D31 if they don't exist */
23
- if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
24
- ((a->vm | a->vn | a->vd) & 0x10)) {
25
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
26
return false;
27
}
28
29
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
30
+ /* UNDEF accesses to D16-D31 if they don't exist */
31
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
32
+ ((a->vm | a->vn | a->vd) & 0x10)) {
33
return false;
34
}
35
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
37
return false;
38
}
39
40
- /* UNDEF accesses to D16-D31 if they don't exist */
41
- if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
42
- ((a->vm | a->vn | a->vd) & 0x10)) {
43
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
44
return false;
45
}
46
47
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
48
+ /* UNDEF accesses to D16-D31 if they don't exist */
49
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
50
+ ((a->vm | a->vn | a->vd) & 0x10)) {
51
return false;
52
}
53
54
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
55
return false;
56
}
57
58
- /* UNDEF accesses to D16-D31 if they don't exist */
59
- if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
60
- ((a->vm | a->vd) & 0x10)) {
61
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
62
return false;
63
}
64
65
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
66
+ /* UNDEF accesses to D16-D31 if they don't exist */
67
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
68
+ ((a->vm | a->vd) & 0x10)) {
69
return false;
70
}
71
72
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
73
return false;
74
}
75
76
- /* UNDEF accesses to D16-D31 if they don't exist */
77
- if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
78
+ if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
79
return false;
80
}
81
82
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
83
+ /* UNDEF accesses to D16-D31 if they don't exist */
84
+ if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
85
return false;
86
}
87
88
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
89
TCGv_i64 f0, f1, fd;
90
TCGv_ptr fpst;
91
92
- /* UNDEF accesses to D16-D31 if they don't exist */
93
- if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
94
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
95
return false;
96
}
97
98
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
99
+ /* UNDEF accesses to D16-D31 if they don't exist */
100
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
101
return false;
102
}
103
104
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
105
int veclen = s->vec_len;
106
TCGv_i64 f0, fd;
107
108
- /* UNDEF accesses to D16-D31 if they don't exist */
109
- if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
110
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
111
return false;
112
}
113
114
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
115
+ /* UNDEF accesses to D16-D31 if they don't exist */
116
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
117
return false;
118
}
119
120
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
121
return false;
122
}
123
124
- /* UNDEF accesses to D16-D31 if they don't exist. */
125
- if (!dc_isar_feature(aa32_simd_r32, s) &&
126
- ((a->vd | a->vn | a->vm) & 0x10)) {
127
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
128
return false;
129
}
130
131
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
132
+ /* UNDEF accesses to D16-D31 if they don't exist. */
133
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
134
+ ((a->vd | a->vn | a->vm) & 0x10)) {
135
return false;
136
}
137
138
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
139
140
vd = a->vd;
141
142
- /* UNDEF accesses to D16-D31 if they don't exist. */
143
- if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
144
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
145
return false;
146
}
147
148
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
149
+ /* UNDEF accesses to D16-D31 if they don't exist. */
150
+ if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
151
return false;
152
}
153
154
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
155
{
156
TCGv_i64 vd, vm;
157
158
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
159
+ return false;
160
+ }
161
+
162
/* Vm/M bits must be zero for the Z variant */
163
if (a->z && a->vm != 0) {
164
return false;
165
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
166
return false;
167
}
168
169
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
170
- return false;
171
- }
172
-
173
if (!vfp_access_check(s)) {
174
return true;
175
}
176
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
177
TCGv_i32 tmp;
178
TCGv_i64 vd;
179
180
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
181
+ return false;
182
+ }
183
+
184
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
185
return false;
186
}
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
188
return false;
189
}
190
191
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
192
- return false;
193
- }
194
-
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
199
TCGv_i32 tmp;
200
TCGv_i64 vm;
201
202
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
203
+ return false;
204
+ }
205
+
206
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
207
return false;
208
}
209
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
210
return false;
211
}
212
213
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
214
- return false;
215
- }
216
-
217
if (!vfp_access_check(s)) {
218
return true;
219
}
220
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
221
TCGv_ptr fpst;
222
TCGv_i64 tmp;
223
224
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
225
+ return false;
226
+ }
227
+
228
if (!dc_isar_feature(aa32_vrint, s)) {
229
return false;
230
}
231
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
232
return false;
233
}
234
235
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
236
- return false;
237
- }
238
-
239
if (!vfp_access_check(s)) {
240
return true;
241
}
242
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
243
TCGv_i64 tmp;
244
TCGv_i32 tcg_rmode;
245
246
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
247
+ return false;
248
+ }
249
+
250
if (!dc_isar_feature(aa32_vrint, s)) {
251
return false;
252
}
253
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
254
return false;
255
}
256
257
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
258
- return false;
259
- }
260
-
261
if (!vfp_access_check(s)) {
262
return true;
263
}
264
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
265
TCGv_ptr fpst;
266
TCGv_i64 tmp;
267
268
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
269
+ return false;
270
+ }
271
+
272
if (!dc_isar_feature(aa32_vrint, s)) {
273
return false;
274
}
275
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
276
return false;
277
}
278
279
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
280
- return false;
281
- }
282
-
283
if (!vfp_access_check(s)) {
284
return true;
285
}
286
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
287
TCGv_i64 vd;
288
TCGv_i32 vm;
289
290
- /* UNDEF accesses to D16-D31 if they don't exist. */
291
- if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
292
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
293
return false;
294
}
295
296
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
297
+ /* UNDEF accesses to D16-D31 if they don't exist. */
298
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
299
return false;
300
}
301
302
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
303
TCGv_i64 vm;
304
TCGv_i32 vd;
305
306
- /* UNDEF accesses to D16-D31 if they don't exist. */
307
- if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
308
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
309
return false;
310
}
311
312
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
313
+ /* UNDEF accesses to D16-D31 if they don't exist. */
314
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
315
return false;
316
}
317
318
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
319
TCGv_i64 vd;
320
TCGv_ptr fpst;
321
322
- /* UNDEF accesses to D16-D31 if they don't exist. */
323
- if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
324
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
325
return false;
326
}
327
328
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
329
+ /* UNDEF accesses to D16-D31 if they don't exist. */
330
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
331
return false;
332
}
333
334
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
335
TCGv_i32 vd;
336
TCGv_i64 vm;
337
338
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
339
+ return false;
340
+ }
341
+
342
if (!dc_isar_feature(aa32_jscvt, s)) {
343
return false;
344
}
345
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
346
return false;
347
}
348
349
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
350
- return false;
351
- }
352
-
353
if (!vfp_access_check(s)) {
354
return true;
355
}
356
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
357
TCGv_ptr fpst;
358
int frac_bits;
359
360
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
361
+ return false;
362
+ }
363
+
364
if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
365
return false;
366
}
367
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
368
return false;
369
}
370
371
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
372
- return false;
373
- }
374
-
375
if (!vfp_access_check(s)) {
376
return true;
377
}
378
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
379
TCGv_i64 vm;
380
TCGv_ptr fpst;
381
382
- /* UNDEF accesses to D16-D31 if they don't exist. */
383
- if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
384
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
385
return false;
386
}
387
388
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
389
+ /* UNDEF accesses to D16-D31 if they don't exist. */
390
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
391
return false;
392
}
393
394
--
395
2.20.1
396
397
diff view generated by jsdifflib
1
From: Andrew Jeffery <andrew@aj.id.au>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is required to configure differences in behaviour between the
3
Sort this check to the start of a trans_* function.
4
AST2400 and AST2500 watchdog IPs.
4
Merge this with any existing test for fpdp_v2.
5
5
6
Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Message-id: 20200214181547.21408-10-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
hw/arm/aspeed_soc.c | 2 ++
11
target/arm/translate-vfp.inc.c | 24 ++++++++----------------
12
1 file changed, 2 insertions(+)
12
1 file changed, 8 insertions(+), 16 deletions(-)
13
13
14
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
14
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/aspeed_soc.c
16
--- a/target/arm/translate-vfp.inc.c
17
+++ b/hw/arm/aspeed_soc.c
17
+++ b/target/arm/translate-vfp.inc.c
18
@@ -XXX,XX +XXX,XX @@ static void aspeed_soc_init(Object *obj)
18
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
19
object_initialize(&s->wdt[i], sizeof(s->wdt[i]), TYPE_ASPEED_WDT);
19
* VFPv2 allows access to FPSID from userspace; VFPv3 restricts
20
object_property_add_child(obj, "wdt[*]", OBJECT(&s->wdt[i]), NULL);
20
* all ID registers to privileged access only.
21
qdev_set_parent_bus(DEVICE(&s->wdt[i]), sysbus_get_default());
21
*/
22
+ qdev_prop_set_uint32(DEVICE(&s->wdt[i]), "silicon-rev",
22
- if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
23
+ sc->info->silicon_rev);
23
+ if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
24
return false;
25
}
26
ignore_vfp_enabled = true;
27
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
28
case ARM_VFP_FPINST:
29
case ARM_VFP_FPINST2:
30
/* Not present in VFPv3 */
31
- if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
32
+ if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
33
return false;
34
}
35
break;
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
37
38
vd = a->vd;
39
40
- if (!dc_isar_feature(aa32_fpshvec, s) &&
41
- (veclen != 0 || s->vec_stride != 0)) {
42
+ if (!dc_isar_feature(aa32_fpsp_v3, s)) {
43
return false;
24
}
44
}
25
45
26
object_initialize(&s->ftgmac100, sizeof(s->ftgmac100), TYPE_FTGMAC100);
46
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
47
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
48
+ (veclen != 0 || s->vec_stride != 0)) {
49
return false;
50
}
51
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
53
54
vd = a->vd;
55
56
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
57
+ if (!dc_isar_feature(aa32_fpdp_v3, s)) {
58
return false;
59
}
60
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
62
return false;
63
}
64
65
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
66
- return false;
67
- }
68
-
69
if (!vfp_access_check(s)) {
70
return true;
71
}
72
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
73
TCGv_ptr fpst;
74
int frac_bits;
75
76
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
77
+ if (!dc_isar_feature(aa32_fpsp_v3, s)) {
78
return false;
79
}
80
81
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
82
TCGv_ptr fpst;
83
int frac_bits;
84
85
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
86
- return false;
87
- }
88
-
89
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
90
+ if (!dc_isar_feature(aa32_fpdp_v3, s)) {
91
return false;
92
}
93
27
--
94
--
28
2.7.4
95
2.20.1
29
96
30
97
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
We will eventually remove the early ARM_FEATURE_VFP test,
4
so add a proper test for each trans_* that does not already
5
have another ISA test.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200214181547.21408-11-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/translate-vfp.inc.c | 78 ++++++++++++++++++++++++++++++----
13
1 file changed, 69 insertions(+), 9 deletions(-)
14
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/translate-vfp.inc.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
20
int pass;
21
uint32_t offset;
22
23
+ /* SIZE == 2 is a VFP instruction; otherwise NEON. */
24
+ if (a->size == 2
25
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
26
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
27
+ return false;
28
+ }
29
+
30
/* UNDEF accesses to D16-D31 if they don't exist */
31
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
32
return false;
33
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
34
pass = extract32(offset, 2, 1);
35
offset = extract32(offset, 0, 2) * 8;
36
37
- if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
38
- return false;
39
- }
40
-
41
if (!vfp_access_check(s)) {
42
return true;
43
}
44
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
45
int pass;
46
uint32_t offset;
47
48
+ /* SIZE == 2 is a VFP instruction; otherwise NEON. */
49
+ if (a->size == 2
50
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
51
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
52
+ return false;
53
+ }
54
+
55
/* UNDEF accesses to D16-D31 if they don't exist */
56
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
57
return false;
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
59
pass = extract32(offset, 2, 1);
60
offset = extract32(offset, 0, 2) * 8;
61
62
- if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
63
- return false;
64
- }
65
-
66
if (!vfp_access_check(s)) {
67
return true;
68
}
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
70
TCGv_i32 tmp;
71
bool ignore_vfp_enabled = false;
72
73
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
74
+ return false;
75
+ }
76
+
77
if (arm_dc_feature(s, ARM_FEATURE_M)) {
78
/*
79
* The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
81
{
82
TCGv_i32 tmp;
83
84
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
85
+ return false;
86
+ }
87
+
88
if (!vfp_access_check(s)) {
89
return true;
90
}
91
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
92
{
93
TCGv_i32 tmp;
94
95
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
96
+ return false;
97
+ }
98
+
99
/*
100
* VMOV between two general-purpose registers and two single precision
101
* floating point registers
102
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
103
104
/*
105
* VMOV between two general-purpose registers and one double precision
106
- * floating point register
107
+ * floating point register. Note that this does not require support
108
+ * for double precision arithmetic.
109
*/
110
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
111
+ return false;
112
+ }
113
114
/* UNDEF accesses to D16-D31 if they don't exist */
115
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
116
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
117
uint32_t offset;
118
TCGv_i32 addr, tmp;
119
120
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
121
+ return false;
122
+ }
123
+
124
if (!vfp_access_check(s)) {
125
return true;
126
}
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
128
TCGv_i32 addr;
129
TCGv_i64 tmp;
130
131
+ /* Note that this does not require support for double arithmetic. */
132
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
133
+ return false;
134
+ }
135
+
136
/* UNDEF accesses to D16-D31 if they don't exist */
137
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
138
return false;
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
140
TCGv_i32 addr, tmp;
141
int i, n;
142
143
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
144
+ return false;
145
+ }
146
+
147
n = a->imm;
148
149
if (n == 0 || (a->vd + n) > 32) {
150
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
151
TCGv_i64 tmp;
152
int i, n;
153
154
+ /* Note that this does not require support for double arithmetic. */
155
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
156
+ return false;
157
+ }
158
+
159
n = a->imm >> 1;
160
161
if (n == 0 || (a->vd + n) > 32 || n > 16) {
162
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
163
TCGv_i32 f0, f1, fd;
164
TCGv_ptr fpst;
165
166
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
167
+ return false;
168
+ }
169
+
170
if (!dc_isar_feature(aa32_fpshvec, s) &&
171
(veclen != 0 || s->vec_stride != 0)) {
172
return false;
173
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
174
int veclen = s->vec_len;
175
TCGv_i32 f0, fd;
176
177
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
178
+ return false;
179
+ }
180
+
181
if (!dc_isar_feature(aa32_fpshvec, s) &&
182
(veclen != 0 || s->vec_stride != 0)) {
183
return false;
184
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
185
{
186
TCGv_i32 vd, vm;
187
188
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
189
+ return false;
190
+ }
191
+
192
/* Vm/M bits must be zero for the Z variant */
193
if (a->z && a->vm != 0) {
194
return false;
195
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
196
TCGv_i32 vm;
197
TCGv_ptr fpst;
198
199
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
200
+ return false;
201
+ }
202
+
203
if (!vfp_access_check(s)) {
204
return true;
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
207
TCGv_i32 vm;
208
TCGv_ptr fpst;
209
210
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
211
+ return false;
212
+ }
213
+
214
if (!vfp_access_check(s)) {
215
return true;
216
}
217
--
218
2.20.1
219
220
diff view generated by jsdifflib