1
Probably the last arm pullreq before softfreeze...
1
The following changes since commit 65cc5ccf06a74c98de73ec683d9a543baa302a12:
2
2
3
The following changes since commit 58560ad254fbda71d4daa6622d71683190070ee2:
3
Merge tag 'pull-riscv-to-apply-20230120' of https://github.com/alistair23/qemu into staging (2023-01-20 16:17:56 +0000)
4
5
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.2-20191024' into staging (2019-10-24 16:22:58 +0100)
6
4
7
are available in the Git repository at:
5
are available in the Git repository at:
8
6
9
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20191024
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230123
10
8
11
for you to fetch changes up to a01a4a3e85ae8f6fe21adbedc80f7013faabdcf4:
9
for you to fetch changes up to 3b07a936d3bfe97b07ddffcfbb532985a88033dd:
12
10
13
hw/arm/highbank: Use AddressSpace when using write_secondary_boot() (2019-10-24 17:16:30 +0100)
11
target/arm: Look up ARMCPRegInfo at runtime (2023-01-23 13:32:38 +0000)
14
12
15
----------------------------------------------------------------
13
----------------------------------------------------------------
16
target-arm queue:
14
target-arm queue:
17
* raspi boards: some cleanup
15
* Widen cnthctl_el2 to uint64_t
18
* raspi: implement the bcm2835 system timer device
16
* Unify checking for M Main Extension in MRS/MSR
19
* raspi: implement a dummy thermal sensor
17
* bitbang_i2c, versatile_i2c: code cleanups
20
* KVM: support providing SVE to the guest
18
* SME: refactor SME SM/ZA handling
21
* misc devices: switch to ptimer transaction API
19
* Fix physical address resolution for MTE
22
* cache TB flag state to improve performance of cpu_get_tb_cpu_state
20
* Fix in_debug path in S1_ptw_translate
23
* aspeed: Add an AST2600 eval board
21
* Don't set EXC_RETURN.ES if Security Extension not present
22
* Implement DBGCLAIM registers
23
* Provide stubs for more external debug registers
24
* Look up ARMCPRegInfo at runtime, not translate time
24
25
25
----------------------------------------------------------------
26
----------------------------------------------------------------
26
Andrew Jones (9):
27
David Reiss (1):
27
target/arm/monitor: Introduce qmp_query_cpu_model_expansion
28
target/arm: Unify checking for M Main Extension in MRS/MSR
28
tests: arm: Introduce cpu feature tests
29
target/arm: Allow SVE to be disabled via a CPU property
30
target/arm/cpu64: max cpu: Introduce sve<N> properties
31
target/arm/kvm64: Add kvm_arch_get/put_sve
32
target/arm/kvm64: max cpu: Enable SVE when available
33
target/arm/kvm: scratch vcpu: Preserve input kvm_vcpu_init features
34
target/arm/cpu64: max cpu: Support sve properties with KVM
35
target/arm/kvm: host cpu: Add support for sve<N> properties
36
29
37
Cédric Le Goater (2):
30
Evgeny Iakovlev (2):
38
hw/gpio: Fix property accessors of the AST2600 GPIO 1.8V model
31
target/arm: implement DBGCLAIM registers
39
aspeed: Add an AST2600 eval board
32
target/arm: provide stubs for more external debug registers
40
33
41
Peter Maydell (8):
34
Peter Maydell (1):
42
hw/net/fsl_etsec/etsec.c: Switch to transaction-based ptimer API
35
target/arm: Don't set EXC_RETURN.ES if Security Extension not present
43
hw/timer/xilinx_timer.c: Switch to transaction-based ptimer API
44
hw/dma/xilinx_axidma.c: Switch to transaction-based ptimer API
45
hw/timer/slavio_timer: Remove useless check for NULL t->timer
46
hw/timer/slavio_timer.c: Switch to transaction-based ptimer API
47
hw/timer/grlib_gptimer.c: Switch to transaction-based ptimer API
48
hw/m68k/mcf5206.c: Switch to transaction-based ptimer API
49
hw/watchdog/milkymist-sysctl.c: Switch to transaction-based ptimer API
50
36
51
Philippe Mathieu-Daudé (8):
37
Philippe Mathieu-Daudé (10):
52
hw/misc/bcm2835_thermal: Add a dummy BCM2835 thermal sensor
38
hw/i2c/bitbang_i2c: Define TYPE_GPIO_I2C in public header
53
hw/arm/bcm2835_peripherals: Use the thermal sensor block
39
hw/i2c/bitbang_i2c: Remove unused dummy MemoryRegion
54
hw/timer/bcm2835: Add the BCM2835 SYS_timer
40
hw/i2c/bitbang_i2c: Change state calling bitbang_i2c_set_state() helper
55
hw/arm/bcm2835_peripherals: Use the SYS_timer
41
hw/i2c/bitbang_i2c: Trace state changes
56
hw/arm/bcm2836: Make the SoC code modular
42
hw/i2c/bitbang_i2c: Convert DPRINTF() to trace events
57
hw/arm/bcm2836: Rename cpus[] as cpu[].core
43
hw/i2c/versatile_i2c: Drop useless casts from void * to pointer
58
hw/arm/raspi: Use AddressSpace when using arm_boot::write_secondary_boot
44
hw/i2c/versatile_i2c: Replace VersatileI2CState -> ArmSbconI2CState
59
hw/arm/highbank: Use AddressSpace when using write_secondary_boot()
45
hw/i2c/versatile_i2c: Replace TYPE_VERSATILE_I2C -> TYPE_ARM_SBCON_I2C
46
hw/i2c/versatile_i2c: Use ARM_SBCON_I2C() macro
47
hw/i2c/versatile_i2c: Rename versatile_i2c -> arm_sbcon_i2c
60
48
61
Richard Henderson (24):
49
Richard Henderson (12):
62
target/arm: Split out rebuild_hflags_common
50
target/arm: Widen cnthctl_el2 to uint64_t
63
target/arm: Split out rebuild_hflags_a64
51
target/arm/sme: Reorg SME access handling in handle_msr_i()
64
target/arm: Split out rebuild_hflags_common_32
52
target/arm/sme: Rebuild hflags in set_pstate() helpers
65
target/arm: Split arm_cpu_data_is_big_endian
53
target/arm/sme: Introduce aarch64_set_svcr()
66
target/arm: Split out rebuild_hflags_m32
54
target/arm/sme: Reset SVE state in aarch64_set_svcr()
67
target/arm: Reduce tests vs M-profile in cpu_get_tb_cpu_state
55
target/arm/sme: Reset ZA state in aarch64_set_svcr()
68
target/arm: Split out rebuild_hflags_a32
56
target/arm/sme: Rebuild hflags in aarch64_set_svcr()
69
target/arm: Split out rebuild_hflags_aprofile
57
target/arm/sme: Unify set_pstate() SM/ZA helpers as set_svcr()
70
target/arm: Hoist XSCALE_CPAR, VECLEN, VECSTRIDE in cpu_get_tb_cpu_state
58
target/arm: Fix physical address resolution for MTE
71
target/arm: Simplify set of PSTATE_SS in cpu_get_tb_cpu_state
59
target/arm: Fix in_debug path in S1_ptw_translate
72
target/arm: Hoist computation of TBFLAG_A32.VFPEN
60
target/arm: Reorg do_coproc_insn
73
target/arm: Add arm_rebuild_hflags
61
target/arm: Look up ARMCPRegInfo at runtime
74
target/arm: Split out arm_mmu_idx_el
75
target/arm: Hoist store to cs_base in cpu_get_tb_cpu_state
76
target/arm: Add HELPER(rebuild_hflags_{a32, a64, m32})
77
target/arm: Rebuild hflags at EL changes
78
target/arm: Rebuild hflags at MSR writes
79
target/arm: Rebuild hflags at CPSR writes
80
target/arm: Rebuild hflags at Xscale SCTLR writes
81
target/arm: Rebuild hflags for M-profile
82
target/arm: Rebuild hflags for M-profile NVIC
83
linux-user/aarch64: Rebuild hflags for TARGET_WORDS_BIGENDIAN
84
linux-user/arm: Rebuild hflags for TARGET_WORDS_BIGENDIAN
85
target/arm: Rely on hflags correct in cpu_get_tb_cpu_state
86
62
87
hw/misc/Makefile.objs | 1 +
63
MAINTAINERS | 1 +
88
hw/timer/Makefile.objs | 1 +
64
include/hw/i2c/arm_sbcon_i2c.h | 6 +-
89
tests/Makefile.include | 5 +-
65
include/hw/i2c/bitbang_i2c.h | 2 +
90
qapi/machine-target.json | 6 +-
66
target/arm/cpu.h | 5 +-
91
hw/net/fsl_etsec/etsec.h | 1 -
67
target/arm/helper-sme.h | 3 +-
92
include/hw/arm/aspeed.h | 1 +
68
target/arm/helper.h | 11 +-
93
include/hw/arm/bcm2835_peripherals.h | 5 +-
69
target/arm/translate.h | 7 +
94
include/hw/arm/bcm2836.h | 4 +-
70
hw/arm/musicpal.c | 3 +-
95
include/hw/arm/raspi_platform.h | 1 +
71
hw/arm/realview.c | 2 +-
96
include/hw/misc/bcm2835_thermal.h | 27 ++
72
hw/arm/versatilepb.c | 2 +-
97
include/hw/timer/bcm2835_systmr.h | 33 +++
73
hw/arm/vexpress.c | 2 +-
98
include/qemu/bitops.h | 1 +
74
hw/i2c/{versatile_i2c.c => arm_sbcon_i2c.c} | 39 ++-
99
target/arm/cpu.h | 105 +++++--
75
hw/i2c/bitbang_i2c.c | 80 ++++--
100
target/arm/helper.h | 4 +
76
linux-user/aarch64/cpu_loop.c | 11 +-
101
target/arm/internals.h | 9 +
77
linux-user/aarch64/signal.c | 13 +-
102
target/arm/kvm_arm.h | 39 +++
78
target/arm/debug_helper.c | 54 ++++
103
hw/arm/aspeed.c | 23 ++
79
target/arm/helper.c | 41 ++-
104
hw/arm/bcm2835_peripherals.c | 30 +-
80
target/arm/m_helper.c | 24 +-
105
hw/arm/bcm2836.c | 44 +--
81
target/arm/mte_helper.c | 2 +-
106
hw/arm/highbank.c | 3 +-
82
target/arm/op_helper.c | 27 +-
107
hw/arm/raspi.c | 14 +-
83
target/arm/ptw.c | 4 +-
108
hw/dma/xilinx_axidma.c | 9 +-
84
target/arm/sme_helper.c | 37 +--
109
hw/gpio/aspeed_gpio.c | 8 +-
85
target/arm/translate-a64.c | 68 +++--
110
hw/intc/armv7m_nvic.c | 22 +-
86
target/arm/translate.c | 430 +++++++++++++++-------------
111
hw/m68k/mcf5206.c | 15 +-
87
hw/arm/Kconfig | 4 +-
112
hw/misc/bcm2835_thermal.c | 135 +++++++++
88
hw/i2c/Kconfig | 2 +-
113
hw/net/fsl_etsec/etsec.c | 9 +-
89
hw/i2c/meson.build | 2 +-
114
hw/timer/bcm2835_systmr.c | 163 +++++++++++
90
hw/i2c/trace-events | 7 +
115
hw/timer/grlib_gptimer.c | 28 +-
91
28 files changed, 506 insertions(+), 383 deletions(-)
116
hw/timer/milkymist-sysctl.c | 25 +-
92
rename hw/i2c/{versatile_i2c.c => arm_sbcon_i2c.c} (70%)
117
hw/timer/slavio_timer.c | 32 ++-
118
hw/timer/xilinx_timer.c | 13 +-
119
linux-user/aarch64/cpu_loop.c | 1 +
120
linux-user/arm/cpu_loop.c | 1 +
121
linux-user/syscall.c | 1 +
122
target/arm/cpu.c | 26 +-
123
target/arm/cpu64.c | 364 +++++++++++++++++++++--
124
target/arm/helper-a64.c | 3 +
125
target/arm/helper.c | 403 +++++++++++++++++---------
126
target/arm/kvm.c | 25 +-
127
target/arm/kvm32.c | 6 +-
128
target/arm/kvm64.c | 325 ++++++++++++++++++---
129
target/arm/m_helper.c | 6 +
130
target/arm/machine.c | 1 +
131
target/arm/monitor.c | 158 ++++++++++
132
target/arm/op_helper.c | 4 +
133
target/arm/translate-a64.c | 13 +-
134
target/arm/translate.c | 33 ++-
135
tests/arm-cpu-features.c | 540 +++++++++++++++++++++++++++++++++++
136
docs/arm-cpu-features.rst | 317 ++++++++++++++++++++
137
hw/timer/trace-events | 5 +
138
51 files changed, 2725 insertions(+), 323 deletions(-)
139
create mode 100644 include/hw/misc/bcm2835_thermal.h
140
create mode 100644 include/hw/timer/bcm2835_systmr.h
141
create mode 100644 hw/misc/bcm2835_thermal.c
142
create mode 100644 hw/timer/bcm2835_systmr.c
143
create mode 100644 tests/arm-cpu-features.c
144
create mode 100644 docs/arm-cpu-features.rst
145
93
diff view generated by jsdifflib
Deleted patch
1
From: Cédric Le Goater <clg@kaod.org>
2
1
3
The property names of AST2600 GPIO 1.8V model are one character bigger
4
than the names of the other ASPEED GPIO model. Increase the string
5
buffer size by one and be more strict on the expected pattern of the
6
property name.
7
8
This fixes the QOM test of the ast2600-evb machine under :
9
10
Apple LLVM version 10.0.0 (clang-1000.10.44.4)
11
Target: x86_64-apple-darwin17.7.0
12
Thread model: posix
13
InstalledDir: /Library/Developer/CommandLineTools/usr/bin
14
15
Cc: Rashmica Gupta <rashmica.g@gmail.com>
16
Fixes: 36d737ee82b2 ("hw/gpio: Add in AST2600 specific implementation")
17
Signed-off-by: Cédric Le Goater <clg@kaod.org>
18
Message-id: 20191023130455.1347-2-clg@kaod.org
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
22
hw/gpio/aspeed_gpio.c | 8 ++++----
23
1 file changed, 4 insertions(+), 4 deletions(-)
24
25
diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/gpio/aspeed_gpio.c
28
+++ b/hw/gpio/aspeed_gpio.c
29
@@ -XXX,XX +XXX,XX @@ static void aspeed_gpio_get_pin(Object *obj, Visitor *v, const char *name,
30
{
31
int pin = 0xfff;
32
bool level = true;
33
- char group[3];
34
+ char group[4];
35
AspeedGPIOState *s = ASPEED_GPIO(obj);
36
int set_idx, group_idx = 0;
37
38
if (sscanf(name, "gpio%2[A-Z]%1d", group, &pin) != 2) {
39
/* 1.8V gpio */
40
- if (sscanf(name, "gpio%3s%1d", group, &pin) != 2) {
41
+ if (sscanf(name, "gpio%3[18A-E]%1d", group, &pin) != 2) {
42
error_setg(errp, "%s: error reading %s", __func__, name);
43
return;
44
}
45
@@ -XXX,XX +XXX,XX @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name,
46
Error *local_err = NULL;
47
bool level;
48
int pin = 0xfff;
49
- char group[3];
50
+ char group[4];
51
AspeedGPIOState *s = ASPEED_GPIO(obj);
52
int set_idx, group_idx = 0;
53
54
@@ -XXX,XX +XXX,XX @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name,
55
}
56
if (sscanf(name, "gpio%2[A-Z]%1d", group, &pin) != 2) {
57
/* 1.8V gpio */
58
- if (sscanf(name, "gpio%3s%1d", group, &pin) != 2) {
59
+ if (sscanf(name, "gpio%3[18A-E]%1d", group, &pin) != 2) {
60
error_setg(errp, "%s: error reading %s", __func__, name);
61
return;
62
}
63
--
64
2.20.1
65
66
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
There are 3 conditions that each enable this flag. M-profile always
3
This is a 64-bit register on AArch64, even if the high 44 bits
4
enables; A-profile with EL1 as AA64 always enables. Both of these
4
are RES0. Because this is defined as ARM_CP_STATE_BOTH, we are
5
conditions can easily be cached. The final condition relies on the
5
asserting that the cpreg field is 64-bits.
6
FPEXC register which we are not prepared to cache.
7
6
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1400
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20191023150057.25731-12-richard.henderson@linaro.org
9
Message-id: 20230115171633.3171890-1-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
12
---
13
target/arm/cpu.h | 2 +-
13
target/arm/cpu.h | 2 +-
14
target/arm/helper.c | 14 ++++++++++----
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
2 files changed, 11 insertions(+), 5 deletions(-)
16
15
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
18
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
20
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
22
* the same thing as the current security state of the processor!
21
};
23
*/
22
uint64_t c14_cntfrq; /* Counter Frequency register */
24
FIELD(TBFLAG_A32, NS, 6, 1)
23
uint64_t c14_cntkctl; /* Timer Control register */
25
-FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Not cached. */
24
- uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
26
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
25
+ uint64_t cnthctl_el2; /* Counter/Timer Hyp Control register */
27
FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */
26
uint64_t cntvoff_el2; /* Counter Virtual Offset register */
28
FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
27
ARMGenericTimer c14_timer[NUM_GTIMERS];
29
/* For M profile only, set if FPCCR.LSPACT is set */
28
uint32_t c15_cpar; /* XScale Coprocessor Access Register */
30
diff --git a/target/arm/helper.c b/target/arm/helper.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/helper.c
33
+++ b/target/arm/helper.c
34
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
35
{
36
uint32_t flags = 0;
37
38
+ /* v8M always enables the fpu. */
39
+ flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
40
+
41
if (arm_v7m_is_handler_mode(env)) {
42
flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
43
}
44
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
45
ARMMMUIdx mmu_idx)
46
{
47
uint32_t flags = rebuild_hflags_aprofile(env);
48
+
49
+ if (arm_el_is_aa64(env, 1)) {
50
+ flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
51
+ }
52
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
53
}
54
55
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
56
flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
57
env->vfp.vec_stride);
58
}
59
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
60
+ flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
61
+ }
62
}
63
64
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
65
flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
66
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
67
- || arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
68
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
69
- }
70
pstate_for_ss = env->uncached_cpsr;
71
}
72
73
--
29
--
74
2.20.1
30
2.34.1
75
76
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: David Reiss <dreiss@meta.com>
2
2
3
Continue setting, but not relying upon, env->hflags.
3
BASEPRI, FAULTMASK, and their _NS equivalents only exist on devices with
4
the Main Extension. However, the MRS instruction did not check this,
5
and the MSR instruction handled it inconsistently (warning BASEPRI, but
6
silently ignoring writes to BASEPRI_NS). Unify this behavior and always
7
warn when reading or writing any of these registers if the extension is
8
not present.
4
9
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: David Reiss <dreiss@meta.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 167330628518.10497.13100425787268927786-0@git.sr.ht
7
Message-id: 20191023150057.25731-21-richard.henderson@linaro.org
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
14
---
10
target/arm/m_helper.c | 6 ++++++
15
target/arm/m_helper.c | 22 ++++++++++++++++++++--
11
target/arm/translate.c | 5 ++++-
16
1 file changed, 20 insertions(+), 2 deletions(-)
12
2 files changed, 10 insertions(+), 1 deletion(-)
13
17
14
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
18
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/m_helper.c
20
--- a/target/arm/m_helper.c
17
+++ b/target/arm/m_helper.c
21
+++ b/target/arm/m_helper.c
18
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
22
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
19
switch_v7m_security_state(env, dest & 1);
23
}
20
env->thumb = 1;
24
return env->v7m.primask[M_REG_NS];
21
env->regs[15] = dest & ~1;
25
case 0x91: /* BASEPRI_NS */
22
+ arm_rebuild_hflags(env);
26
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
23
}
27
+ goto bad_reg;
24
28
+ }
25
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
29
if (!env->v7m.secure) {
26
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
30
return 0;
27
switch_v7m_security_state(env, 0);
31
}
28
env->thumb = 1;
32
return env->v7m.basepri[M_REG_NS];
29
env->regs[15] = dest;
33
case 0x93: /* FAULTMASK_NS */
30
+ arm_rebuild_hflags(env);
34
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
31
}
35
+ goto bad_reg;
32
36
+ }
33
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
37
if (!env->v7m.secure) {
34
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
38
return 0;
35
env->regs[14] = lr;
39
}
36
env->regs[15] = addr & 0xfffffffe;
40
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
37
env->thumb = addr & 1;
41
return env->v7m.primask[env->v7m.secure];
38
+ arm_rebuild_hflags(env);
42
case 17: /* BASEPRI */
39
}
43
case 18: /* BASEPRI_MAX */
40
44
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
41
static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
45
+ goto bad_reg;
42
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
46
+ }
43
47
return env->v7m.basepri[env->v7m.secure];
44
/* Otherwise, we have a successful exception exit. */
48
case 19: /* FAULTMASK */
45
arm_clear_exclusive(env);
49
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
46
+ arm_rebuild_hflags(env);
50
+ goto bad_reg;
47
qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
51
+ }
48
}
52
return env->v7m.faultmask[env->v7m.secure];
49
53
default:
50
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
54
bad_reg:
51
xpsr_write(env, 0, XPSR_IT);
55
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
52
env->thumb = newpc & 1;
56
env->v7m.primask[M_REG_NS] = val & 1;
53
env->regs[15] = newpc & ~1;
57
return;
54
+ arm_rebuild_hflags(env);
58
case 0x91: /* BASEPRI_NS */
55
59
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
56
qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
60
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
57
return true;
61
+ goto bad_reg;
58
@@ -XXX,XX +XXX,XX @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
62
+ }
59
switch_v7m_security_state(env, true);
63
+ if (!env->v7m.secure) {
60
xpsr_write(env, 0, XPSR_IT);
64
return;
61
env->regs[15] += 4;
65
}
62
+ arm_rebuild_hflags(env);
66
env->v7m.basepri[M_REG_NS] = val & 0xff;
63
return true;
67
return;
64
68
case 0x93: /* FAULTMASK_NS */
65
gen_invep:
69
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
66
diff --git a/target/arm/translate.c b/target/arm/translate.c
70
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
67
index XXXXXXX..XXXXXXX 100644
71
+ goto bad_reg;
68
--- a/target/arm/translate.c
72
+ }
69
+++ b/target/arm/translate.c
73
+ if (!env->v7m.secure) {
70
@@ -XXX,XX +XXX,XX @@ static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
74
return;
71
75
}
72
static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
76
env->v7m.faultmask[M_REG_NS] = val & 1;
73
{
74
- TCGv_i32 addr, reg;
75
+ TCGv_i32 addr, reg, el;
76
77
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
78
return false;
79
@@ -XXX,XX +XXX,XX @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
80
gen_helper_v7m_msr(cpu_env, addr, reg);
81
tcg_temp_free_i32(addr);
82
tcg_temp_free_i32(reg);
83
+ el = tcg_const_i32(s->current_el);
84
+ gen_helper_rebuild_hflags_m32(cpu_env, el);
85
+ tcg_temp_free_i32(el);
86
gen_lookup_tb(s);
87
return true;
88
}
89
--
77
--
90
2.20.1
78
2.34.1
91
92
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
write_secondary_boot() is used in SMP configurations where the
3
Define TYPE_GPIO_I2C in the public "hw/i2c/bitbang_i2c.h"
4
CPU address space might not be the main System Bus.
4
header and use it in hw/arm/musicpal.c.
5
The rom_add_blob_fixed_as() function allow us to specify an
6
address space. Use it to write each boot blob in the corresponding
7
CPU address space.
8
5
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20191019234715.25750-15-f4bug@amsat.org
8
Acked-by: Corey Minyard <cminyard@mvista.com>
9
Message-id: 20230111085016.44551-2-philmd@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
11
---
14
hw/arm/highbank.c | 3 ++-
12
include/hw/i2c/bitbang_i2c.h | 2 ++
15
1 file changed, 2 insertions(+), 1 deletion(-)
13
hw/arm/musicpal.c | 3 ++-
14
hw/i2c/bitbang_i2c.c | 1 -
15
3 files changed, 4 insertions(+), 2 deletions(-)
16
16
17
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
17
diff --git a/include/hw/i2c/bitbang_i2c.h b/include/hw/i2c/bitbang_i2c.h
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/highbank.c
19
--- a/include/hw/i2c/bitbang_i2c.h
20
+++ b/hw/arm/highbank.c
20
+++ b/include/hw/i2c/bitbang_i2c.h
21
@@ -XXX,XX +XXX,XX @@ static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
21
@@ -XXX,XX +XXX,XX @@
22
for (n = 0; n < ARRAY_SIZE(smpboot); n++) {
22
23
smpboot[n] = tswap32(smpboot[n]);
23
#include "hw/i2c/i2c.h"
24
}
24
25
- rom_add_blob_fixed("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR);
25
+#define TYPE_GPIO_I2C "gpio_i2c"
26
+ rom_add_blob_fixed_as("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR,
26
+
27
+ arm_boot_address_space(cpu, info));
27
typedef struct bitbang_i2c_interface bitbang_i2c_interface;
28
}
28
29
29
#define BITBANG_I2C_SDA 0
30
static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
30
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/arm/musicpal.c
33
+++ b/hw/arm/musicpal.c
34
@@ -XXX,XX +XXX,XX @@
35
#include "hw/block/flash.h"
36
#include "ui/console.h"
37
#include "hw/i2c/i2c.h"
38
+#include "hw/i2c/bitbang_i2c.h"
39
#include "hw/irq.h"
40
#include "hw/or-irq.h"
41
#include "hw/audio/wm8750.h"
42
@@ -XXX,XX +XXX,XX @@ static void musicpal_init(MachineState *machine)
43
44
dev = sysbus_create_simple(TYPE_MUSICPAL_GPIO, MP_GPIO_BASE,
45
qdev_get_gpio_in(pic, MP_GPIO_IRQ));
46
- i2c_dev = sysbus_create_simple("gpio_i2c", -1, NULL);
47
+ i2c_dev = sysbus_create_simple(TYPE_GPIO_I2C, -1, NULL);
48
i2c = (I2CBus *)qdev_get_child_bus(i2c_dev, "i2c");
49
50
lcd_dev = sysbus_create_simple(TYPE_MUSICPAL_LCD, MP_LCD_BASE, NULL);
51
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/hw/i2c/bitbang_i2c.c
54
+++ b/hw/i2c/bitbang_i2c.c
55
@@ -XXX,XX +XXX,XX @@ void bitbang_i2c_init(bitbang_i2c_interface *s, I2CBus *bus)
56
57
/* GPIO interface. */
58
59
-#define TYPE_GPIO_I2C "gpio_i2c"
60
OBJECT_DECLARE_SIMPLE_TYPE(GPIOI2CState, GPIO_I2C)
61
62
struct GPIOI2CState {
31
--
63
--
32
2.20.1
64
2.34.1
33
65
34
66
diff view generated by jsdifflib
1
Switch the xilinx_axidma code away from bottom-half based ptimers to
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
the new transaction-based ptimer API. This just requires adding
3
begin/commit calls around the various places that modify the ptimer
4
state, and using the new ptimer_init() function to create the timer.
5
2
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Acked-by: Corey Minyard <cminyard@mvista.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20230111085016.44551-3-philmd@linaro.org
10
Message-id: 20191017132122.4402-4-peter.maydell@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
8
---
13
hw/dma/xilinx_axidma.c | 9 +++++----
9
hw/i2c/bitbang_i2c.c | 7 ++-----
14
1 file changed, 5 insertions(+), 4 deletions(-)
10
1 file changed, 2 insertions(+), 5 deletions(-)
15
11
16
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
12
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/dma/xilinx_axidma.c
14
--- a/hw/i2c/bitbang_i2c.c
19
+++ b/hw/dma/xilinx_axidma.c
15
+++ b/hw/i2c/bitbang_i2c.c
20
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ void bitbang_i2c_init(bitbang_i2c_interface *s, I2CBus *bus)
21
#include "hw/ptimer.h"
17
OBJECT_DECLARE_SIMPLE_TYPE(GPIOI2CState, GPIO_I2C)
22
#include "hw/qdev-properties.h"
18
23
#include "qemu/log.h"
19
struct GPIOI2CState {
24
-#include "qemu/main-loop.h"
20
+ /*< private >*/
25
#include "qemu/module.h"
21
SysBusDevice parent_obj;
26
22
+ /*< public >*/
27
#include "hw/stream.h"
23
28
@@ -XXX,XX +XXX,XX @@ enum {
24
- MemoryRegion dummy_iomem;
29
};
25
bitbang_i2c_interface bitbang;
30
26
int last_level;
31
struct Stream {
27
qemu_irq out;
32
- QEMUBH *bh;
28
@@ -XXX,XX +XXX,XX @@ static void gpio_i2c_init(Object *obj)
33
ptimer_state *ptimer;
29
{
34
qemu_irq irq;
30
DeviceState *dev = DEVICE(obj);
35
31
GPIOI2CState *s = GPIO_I2C(obj);
36
@@ -XXX,XX +XXX,XX @@ static void stream_complete(struct Stream *s)
32
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
37
unsigned int comp_delay;
33
I2CBus *bus;
38
34
39
/* Start the delayed timer. */
35
- memory_region_init(&s->dummy_iomem, obj, "gpio_i2c", 0);
40
+ ptimer_transaction_begin(s->ptimer);
36
- sysbus_init_mmio(sbd, &s->dummy_iomem);
41
comp_delay = s->regs[R_DMACR] >> 24;
37
-
42
if (comp_delay) {
38
bus = i2c_init_bus(dev, "i2c");
43
ptimer_stop(s->ptimer);
39
bitbang_i2c_init(&s->bitbang, bus);
44
@@ -XXX,XX +XXX,XX @@ static void stream_complete(struct Stream *s)
45
s->regs[R_DMASR] |= DMASR_IOC_IRQ;
46
stream_reload_complete_cnt(s);
47
}
48
+ ptimer_transaction_commit(s->ptimer);
49
}
50
51
static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
52
@@ -XXX,XX +XXX,XX @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
53
struct Stream *st = &s->streams[i];
54
55
st->nr = i;
56
- st->bh = qemu_bh_new(timer_hit, st);
57
- st->ptimer = ptimer_init_with_bh(st->bh, PTIMER_POLICY_DEFAULT);
58
+ st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
59
+ ptimer_transaction_begin(st->ptimer);
60
ptimer_set_freq(st->ptimer, s->freqhz);
61
+ ptimer_transaction_commit(st->ptimer);
62
}
63
return;
64
40
65
--
41
--
66
2.20.1
42
2.34.1
67
43
68
44
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Extend the SVE vq map initialization and validation with KVM's
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
supported vector lengths when KVM is enabled. In order to determine
5
and select supported lengths we add two new KVM functions for getting
6
and setting the KVM_REG_ARM64_SVE_VLS pseudo-register.
7
8
This patch has been co-authored with Richard Henderson, who reworked
9
the target/arm/cpu64.c changes in order to push all the validation and
10
auto-enabling/disabling steps into the finalizer, resulting in a nice
11
LOC reduction.
12
13
Signed-off-by: Andrew Jones <drjones@redhat.com>
14
Reviewed-by: Eric Auger <eric.auger@redhat.com>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
5
Acked-by: Corey Minyard <cminyard@mvista.com>
17
Message-id: 20191024121808.9612-9-drjones@redhat.com
6
Message-id: 20230111085016.44551-4-philmd@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
8
---
20
target/arm/kvm_arm.h | 12 +++
9
hw/i2c/bitbang_i2c.c | 23 +++++++++++++++--------
21
target/arm/cpu64.c | 176 ++++++++++++++++++++++++++++----------
10
1 file changed, 15 insertions(+), 8 deletions(-)
22
target/arm/kvm64.c | 100 +++++++++++++++++++++-
23
tests/arm-cpu-features.c | 106 ++++++++++++++++++++++-
24
docs/arm-cpu-features.rst | 45 +++++++---
25
5 files changed, 381 insertions(+), 58 deletions(-)
26
11
27
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
12
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
28
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/kvm_arm.h
14
--- a/hw/i2c/bitbang_i2c.c
30
+++ b/target/arm/kvm_arm.h
15
+++ b/hw/i2c/bitbang_i2c.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct ARMHostCPUFeatures {
16
@@ -XXX,XX +XXX,XX @@ do { printf("bitbang_i2c: " fmt , ## __VA_ARGS__); } while (0)
32
*/
17
#define DPRINTF(fmt, ...) do {} while(0)
33
bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
34
35
+/**
36
+ * kvm_arm_sve_get_vls:
37
+ * @cs: CPUState
38
+ * @map: bitmap to fill in
39
+ *
40
+ * Get all the SVE vector lengths supported by the KVM host, setting
41
+ * the bits corresponding to their length in quadwords minus one
42
+ * (vq - 1) in @map up to ARM_MAX_VQ.
43
+ */
44
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map);
45
+
46
/**
47
* kvm_arm_set_cpu_features_from_host:
48
* @cpu: ARMCPU to set the features for
49
@@ -XXX,XX +XXX,XX @@ static inline int kvm_arm_vgic_probe(void)
50
static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {}
51
static inline void kvm_arm_pmu_init(CPUState *cs) {}
52
53
+static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) {}
54
#endif
18
#endif
55
19
56
static inline const char *gic_class_name(void)
20
+static void bitbang_i2c_set_state(bitbang_i2c_interface *i2c,
57
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
21
+ bitbang_i2c_state state)
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/cpu64.c
60
+++ b/target/arm/cpu64.c
61
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
62
* any of the above. Finally, if SVE is not disabled, then at least one
63
* vector length must be enabled.
64
*/
65
+ DECLARE_BITMAP(kvm_supported, ARM_MAX_VQ);
66
DECLARE_BITMAP(tmp, ARM_MAX_VQ);
67
uint32_t vq, max_vq = 0;
68
69
+ /* Collect the set of vector lengths supported by KVM. */
70
+ bitmap_zero(kvm_supported, ARM_MAX_VQ);
71
+ if (kvm_enabled() && kvm_arm_sve_supported(CPU(cpu))) {
72
+ kvm_arm_sve_get_vls(CPU(cpu), kvm_supported);
73
+ } else if (kvm_enabled()) {
74
+ assert(!cpu_isar_feature(aa64_sve, cpu));
75
+ }
76
+
77
/*
78
* Process explicit sve<N> properties.
79
* From the properties, sve_vq_map<N> implies sve_vq_init<N>.
80
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
81
return;
82
}
83
84
- /* Propagate enabled bits down through required powers-of-two. */
85
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
86
- if (!test_bit(vq - 1, cpu->sve_vq_init)) {
87
- set_bit(vq - 1, cpu->sve_vq_map);
88
+ if (kvm_enabled()) {
89
+ /*
90
+ * For KVM we have to automatically enable all supported unitialized
91
+ * lengths, even when the smaller lengths are not all powers-of-two.
92
+ */
93
+ bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq);
94
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
95
+ } else {
96
+ /* Propagate enabled bits down through required powers-of-two. */
97
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
98
+ if (!test_bit(vq - 1, cpu->sve_vq_init)) {
99
+ set_bit(vq - 1, cpu->sve_vq_map);
100
+ }
101
}
102
}
103
} else if (cpu->sve_max_vq == 0) {
104
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
105
return;
106
}
107
108
- /* Disabling a power-of-two disables all larger lengths. */
109
- if (test_bit(0, cpu->sve_vq_init)) {
110
- error_setg(errp, "cannot disable sve128");
111
- error_append_hint(errp, "Disabling sve128 results in all vector "
112
- "lengths being disabled.\n");
113
- error_append_hint(errp, "With SVE enabled, at least one vector "
114
- "length must be enabled.\n");
115
- return;
116
- }
117
- for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
118
- if (test_bit(vq - 1, cpu->sve_vq_init)) {
119
- break;
120
+ if (kvm_enabled()) {
121
+ /* Disabling a supported length disables all larger lengths. */
122
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
123
+ if (test_bit(vq - 1, cpu->sve_vq_init) &&
124
+ test_bit(vq - 1, kvm_supported)) {
125
+ break;
126
+ }
127
}
128
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
129
+ bitmap_andnot(cpu->sve_vq_map, kvm_supported,
130
+ cpu->sve_vq_init, max_vq);
131
+ if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
132
+ error_setg(errp, "cannot disable sve%d", vq * 128);
133
+ error_append_hint(errp, "Disabling sve%d results in all "
134
+ "vector lengths being disabled.\n",
135
+ vq * 128);
136
+ error_append_hint(errp, "With SVE enabled, at least one "
137
+ "vector length must be enabled.\n");
138
+ return;
139
+ }
140
+ } else {
141
+ /* Disabling a power-of-two disables all larger lengths. */
142
+ if (test_bit(0, cpu->sve_vq_init)) {
143
+ error_setg(errp, "cannot disable sve128");
144
+ error_append_hint(errp, "Disabling sve128 results in all "
145
+ "vector lengths being disabled.\n");
146
+ error_append_hint(errp, "With SVE enabled, at least one "
147
+ "vector length must be enabled.\n");
148
+ return;
149
+ }
150
+ for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
151
+ if (test_bit(vq - 1, cpu->sve_vq_init)) {
152
+ break;
153
+ }
154
+ }
155
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
156
+ bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
157
}
158
- max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
159
160
- bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
161
max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
165
assert(max_vq != 0);
166
bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
167
168
- /* Ensure all required powers-of-two are enabled. */
169
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
170
- if (!test_bit(vq - 1, cpu->sve_vq_map)) {
171
- error_setg(errp, "cannot disable sve%d", vq * 128);
172
- error_append_hint(errp, "sve%d is required as it "
173
- "is a power-of-two length smaller than "
174
- "the maximum, sve%d\n",
175
- vq * 128, max_vq * 128);
176
+ if (kvm_enabled()) {
177
+ /* Ensure the set of lengths matches what KVM supports. */
178
+ bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq);
179
+ if (!bitmap_empty(tmp, max_vq)) {
180
+ vq = find_last_bit(tmp, max_vq) + 1;
181
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
182
+ if (cpu->sve_max_vq) {
183
+ error_setg(errp, "cannot set sve-max-vq=%d",
184
+ cpu->sve_max_vq);
185
+ error_append_hint(errp, "This KVM host does not support "
186
+ "the vector length %d-bits.\n",
187
+ vq * 128);
188
+ error_append_hint(errp, "It may not be possible to use "
189
+ "sve-max-vq with this KVM host. Try "
190
+ "using only sve<N> properties.\n");
191
+ } else {
192
+ error_setg(errp, "cannot enable sve%d", vq * 128);
193
+ error_append_hint(errp, "This KVM host does not support "
194
+ "the vector length %d-bits.\n",
195
+ vq * 128);
196
+ }
197
+ } else {
198
+ error_setg(errp, "cannot disable sve%d", vq * 128);
199
+ error_append_hint(errp, "The KVM host requires all "
200
+ "supported vector lengths smaller "
201
+ "than %d bits to also be enabled.\n",
202
+ max_vq * 128);
203
+ }
204
return;
205
}
206
+ } else {
207
+ /* Ensure all required powers-of-two are enabled. */
208
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
209
+ if (!test_bit(vq - 1, cpu->sve_vq_map)) {
210
+ error_setg(errp, "cannot disable sve%d", vq * 128);
211
+ error_append_hint(errp, "sve%d is required as it "
212
+ "is a power-of-two length smaller than "
213
+ "the maximum, sve%d\n",
214
+ vq * 128, max_vq * 128);
215
+ return;
216
+ }
217
+ }
218
}
219
220
/*
221
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
222
{
223
ARMCPU *cpu = ARM_CPU(obj);
224
Error *err = NULL;
225
+ uint32_t max_vq;
226
227
- visit_type_uint32(v, name, &cpu->sve_max_vq, &err);
228
-
229
- if (!err && (cpu->sve_max_vq == 0 || cpu->sve_max_vq > ARM_MAX_VQ)) {
230
- error_setg(&err, "unsupported SVE vector length");
231
- error_append_hint(&err, "Valid sve-max-vq in range [1-%d]\n",
232
- ARM_MAX_VQ);
233
+ visit_type_uint32(v, name, &max_vq, &err);
234
+ if (err) {
235
+ error_propagate(errp, err);
236
+ return;
237
}
238
- error_propagate(errp, err);
239
+
240
+ if (kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
241
+ error_setg(errp, "cannot set sve-max-vq");
242
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
243
+ return;
244
+ }
245
+
246
+ if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
247
+ error_setg(errp, "unsupported SVE vector length");
248
+ error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
249
+ ARM_MAX_VQ);
250
+ return;
251
+ }
252
+
253
+ cpu->sve_max_vq = max_vq;
254
}
255
256
static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
257
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
258
return;
259
}
260
261
+ if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
262
+ error_setg(errp, "cannot enable %s", name);
263
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
264
+ return;
265
+ }
266
+
267
if (value) {
268
set_bit(vq - 1, cpu->sve_vq_map);
269
} else {
270
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
271
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
272
cpu->dcz_blocksize = 7; /* 512 bytes */
273
#endif
274
-
275
- object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
276
- cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
277
-
278
- for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
279
- char name[8];
280
- sprintf(name, "sve%d", vq * 128);
281
- object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
282
- cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
283
- }
284
}
285
286
object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
287
cpu_arm_set_sve, NULL, NULL, &error_fatal);
288
+ object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
289
+ cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
290
+
291
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
292
+ char name[8];
293
+ sprintf(name, "sve%d", vq * 128);
294
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
295
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
296
+ }
297
}
298
299
struct ARMCPUInfo {
300
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
301
index XXXXXXX..XXXXXXX 100644
302
--- a/target/arm/kvm64.c
303
+++ b/target/arm/kvm64.c
304
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_sve_supported(CPUState *cpu)
305
return kvm_check_extension(s, KVM_CAP_ARM_SVE);
306
}
307
308
+QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
309
+
310
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
311
+{
22
+{
312
+ /* Only call this function if kvm_arm_sve_supported() returns true. */
23
+ i2c->state = state;
313
+ static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
314
+ static bool probed;
315
+ uint32_t vq = 0;
316
+ int i, j;
317
+
318
+ bitmap_clear(map, 0, ARM_MAX_VQ);
319
+
320
+ /*
321
+ * KVM ensures all host CPUs support the same set of vector lengths.
322
+ * So we only need to create the scratch VCPUs once and then cache
323
+ * the results.
324
+ */
325
+ if (!probed) {
326
+ struct kvm_vcpu_init init = {
327
+ .target = -1,
328
+ .features[0] = (1 << KVM_ARM_VCPU_SVE),
329
+ };
330
+ struct kvm_one_reg reg = {
331
+ .id = KVM_REG_ARM64_SVE_VLS,
332
+ .addr = (uint64_t)&vls[0],
333
+ };
334
+ int fdarray[3], ret;
335
+
336
+ probed = true;
337
+
338
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
339
+ error_report("failed to create scratch VCPU with SVE enabled");
340
+ abort();
341
+ }
342
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
343
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
344
+ if (ret) {
345
+ error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
346
+ strerror(errno));
347
+ abort();
348
+ }
349
+
350
+ for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
351
+ if (vls[i]) {
352
+ vq = 64 - clz64(vls[i]) + i * 64;
353
+ break;
354
+ }
355
+ }
356
+ if (vq > ARM_MAX_VQ) {
357
+ warn_report("KVM supports vector lengths larger than "
358
+ "QEMU can enable");
359
+ }
360
+ }
361
+
362
+ for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
363
+ if (!vls[i]) {
364
+ continue;
365
+ }
366
+ for (j = 1; j <= 64; ++j) {
367
+ vq = j + i * 64;
368
+ if (vq > ARM_MAX_VQ) {
369
+ return;
370
+ }
371
+ if (vls[i] & (1UL << (j - 1))) {
372
+ set_bit(vq - 1, map);
373
+ }
374
+ }
375
+ }
376
+}
24
+}
377
+
25
+
378
+static int kvm_arm_sve_set_vls(CPUState *cs)
26
static void bitbang_i2c_enter_stop(bitbang_i2c_interface *i2c)
379
+{
27
{
380
+ uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
28
DPRINTF("STOP\n");
381
+ struct kvm_one_reg reg = {
29
if (i2c->current_addr >= 0)
382
+ .id = KVM_REG_ARM64_SVE_VLS,
30
i2c_end_transfer(i2c->bus);
383
+ .addr = (uint64_t)&vls[0],
31
i2c->current_addr = -1;
384
+ };
32
- i2c->state = STOPPED;
385
+ ARMCPU *cpu = ARM_CPU(cs);
33
+ bitbang_i2c_set_state(i2c, STOPPED);
386
+ uint32_t vq;
34
}
387
+ int i, j;
35
388
+
36
/* Set device data pin. */
389
+ assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
37
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
390
+
38
if (level == 0) {
391
+ for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
39
DPRINTF("START\n");
392
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
40
/* START condition. */
393
+ i = (vq - 1) / 64;
41
- i2c->state = SENDING_BIT7;
394
+ j = (vq - 1) % 64;
42
+ bitbang_i2c_set_state(i2c, SENDING_BIT7);
395
+ vls[i] |= 1UL << j;
43
i2c->current_addr = -1;
396
+ }
44
} else {
397
+ }
45
/* STOP condition. */
398
+
46
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
399
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
47
case SENDING_BIT7 ... SENDING_BIT0:
400
+}
48
i2c->buffer = (i2c->buffer << 1) | data;
401
+
49
/* will end up in WAITING_FOR_ACK */
402
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
50
- i2c->state++;
403
51
+ bitbang_i2c_set_state(i2c, i2c->state + 1);
404
int kvm_arch_init_vcpu(CPUState *cs)
52
return bitbang_i2c_ret(i2c, 1);
405
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
53
406
54
case WAITING_FOR_ACK:
407
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
55
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
408
!object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
56
* device we were sending to decided to NACK us).
409
- fprintf(stderr, "KVM is not supported for this guest CPU type\n");
57
*/
410
+ error_report("KVM is not supported for this guest CPU type");
58
DPRINTF("Got NACK\n");
411
return -EINVAL;
59
+ bitbang_i2c_set_state(i2c, SENT_NACK);
60
bitbang_i2c_enter_stop(i2c);
61
return bitbang_i2c_ret(i2c, 1);
62
}
63
if (i2c->current_addr & 1) {
64
- i2c->state = RECEIVING_BIT7;
65
+ bitbang_i2c_set_state(i2c, RECEIVING_BIT7);
66
} else {
67
- i2c->state = SENDING_BIT7;
68
+ bitbang_i2c_set_state(i2c, SENDING_BIT7);
69
}
70
return bitbang_i2c_ret(i2c, 0);
412
}
71
}
413
72
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
414
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
73
case RECEIVING_BIT6 ... RECEIVING_BIT0:
74
data = i2c->buffer >> 7;
75
/* will end up in SENDING_ACK */
76
- i2c->state++;
77
+ bitbang_i2c_set_state(i2c, i2c->state + 1);
78
i2c->buffer <<= 1;
79
return bitbang_i2c_ret(i2c, data);
80
81
case SENDING_ACK:
82
- i2c->state = RECEIVING_BIT7;
83
if (data != 0) {
84
DPRINTF("NACKED\n");
85
- i2c->state = SENT_NACK;
86
+ bitbang_i2c_set_state(i2c, SENT_NACK);
87
i2c_nack(i2c->bus);
88
} else {
89
DPRINTF("ACKED\n");
90
+ bitbang_i2c_set_state(i2c, RECEIVING_BIT7);
91
}
92
return bitbang_i2c_ret(i2c, 1);
415
}
93
}
416
417
if (cpu_isar_feature(aa64_sve, cpu)) {
418
+ ret = kvm_arm_sve_set_vls(cs);
419
+ if (ret) {
420
+ return ret;
421
+ }
422
ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
423
if (ret) {
424
return ret;
425
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/tests/arm-cpu-features.c
428
+++ b/tests/arm-cpu-features.c
429
@@ -XXX,XX +XXX,XX @@ static QDict *resp_get_props(QDict *resp)
430
return qdict;
431
}
432
433
+static bool resp_get_feature(QDict *resp, const char *feature)
434
+{
435
+ QDict *props;
436
+
437
+ g_assert(resp);
438
+ g_assert(resp_has_props(resp));
439
+ props = resp_get_props(resp);
440
+ g_assert(qdict_get(props, feature));
441
+ return qdict_get_bool(props, feature);
442
+}
443
+
444
#define assert_has_feature(qts, cpu_type, feature) \
445
({ \
446
QDict *_resp = do_query_no_props(qts, cpu_type); \
447
@@ -XXX,XX +XXX,XX @@ static void sve_tests_sve_off(const void *data)
448
qtest_quit(qts);
449
}
450
451
+static void sve_tests_sve_off_kvm(const void *data)
452
+{
453
+ QTestState *qts;
454
+
455
+ qts = qtest_init(MACHINE "-accel kvm -cpu max,sve=off");
456
+
457
+ /*
458
+ * We don't know if this host supports SVE so we don't
459
+ * attempt to test enabling anything. We only test that
460
+ * everything is disabled (as it should be with sve=off)
461
+ * and that using sve<N>=off to explicitly disable vector
462
+ * lengths is OK too.
463
+ */
464
+ assert_sve_vls(qts, "max", 0, NULL);
465
+ assert_sve_vls(qts, "max", 0, "{ 'sve128': false }");
466
+
467
+ qtest_quit(qts);
468
+}
469
+
470
static void test_query_cpu_model_expansion(const void *data)
471
{
472
QTestState *qts;
473
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
474
qts = qtest_init(MACHINE "-accel kvm -cpu host");
475
476
if (g_str_equal(qtest_get_arch(), "aarch64")) {
477
+ bool kvm_supports_sve;
478
+ char max_name[8], name[8];
479
+ uint32_t max_vq, vq;
480
+ uint64_t vls;
481
+ QDict *resp;
482
+ char *error;
483
+
484
assert_has_feature(qts, "host", "aarch64");
485
assert_has_feature(qts, "host", "pmu");
486
487
- assert_has_feature(qts, "max", "sve");
488
-
489
assert_error(qts, "cortex-a15",
490
"We cannot guarantee the CPU type 'cortex-a15' works "
491
"with KVM on this host", NULL);
492
+
493
+ assert_has_feature(qts, "max", "sve");
494
+ resp = do_query_no_props(qts, "max");
495
+ kvm_supports_sve = resp_get_feature(resp, "sve");
496
+ vls = resp_get_sve_vls(resp);
497
+ qobject_unref(resp);
498
+
499
+ if (kvm_supports_sve) {
500
+ g_assert(vls != 0);
501
+ max_vq = 64 - __builtin_clzll(vls);
502
+ sprintf(max_name, "sve%d", max_vq * 128);
503
+
504
+ /* Enabling a supported length is of course fine. */
505
+ assert_sve_vls(qts, "max", vls, "{ %s: true }", max_name);
506
+
507
+ /* Get the next supported length smaller than max-vq. */
508
+ vq = 64 - __builtin_clzll(vls & ~BIT_ULL(max_vq - 1));
509
+ if (vq) {
510
+ /*
511
+ * We have at least one length smaller than max-vq,
512
+ * so we can disable max-vq.
513
+ */
514
+ assert_sve_vls(qts, "max", (vls & ~BIT_ULL(max_vq - 1)),
515
+ "{ %s: false }", max_name);
516
+
517
+ /*
518
+ * Smaller, supported vector lengths cannot be disabled
519
+ * unless all larger, supported vector lengths are also
520
+ * disabled.
521
+ */
522
+ sprintf(name, "sve%d", vq * 128);
523
+ error = g_strdup_printf("cannot disable %s", name);
524
+ assert_error(qts, "max", error,
525
+ "{ %s: true, %s: false }",
526
+ max_name, name);
527
+ g_free(error);
528
+ }
529
+
530
+ /*
531
+ * The smallest, supported vector length is required, because
532
+ * we need at least one vector length enabled.
533
+ */
534
+ vq = __builtin_ffsll(vls);
535
+ sprintf(name, "sve%d", vq * 128);
536
+ error = g_strdup_printf("cannot disable %s", name);
537
+ assert_error(qts, "max", error, "{ %s: false }", name);
538
+ g_free(error);
539
+
540
+ /* Get an unsupported length. */
541
+ for (vq = 1; vq <= max_vq; ++vq) {
542
+ if (!(vls & BIT_ULL(vq - 1))) {
543
+ break;
544
+ }
545
+ }
546
+ if (vq <= SVE_MAX_VQ) {
547
+ sprintf(name, "sve%d", vq * 128);
548
+ error = g_strdup_printf("cannot enable %s", name);
549
+ assert_error(qts, "max", error, "{ %s: true }", name);
550
+ g_free(error);
551
+ }
552
+ } else {
553
+ g_assert(vls == 0);
554
+ }
555
} else {
556
assert_has_not_feature(qts, "host", "aarch64");
557
assert_has_not_feature(qts, "host", "pmu");
558
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
559
if (kvm_available) {
560
qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
561
NULL, test_query_cpu_model_expansion_kvm);
562
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
563
+ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion/sve-off",
564
+ NULL, sve_tests_sve_off_kvm);
565
+ }
566
}
567
568
return g_test_run();
569
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
570
index XXXXXXX..XXXXXXX 100644
571
--- a/docs/arm-cpu-features.rst
572
+++ b/docs/arm-cpu-features.rst
573
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Dependencies and Constraints
574
575
1) At least one vector length must be enabled when `sve` is enabled.
576
577
- 2) If a vector length `N` is enabled, then all power-of-two vector
578
- lengths smaller than `N` must also be enabled. E.g. if `sve512`
579
- is enabled, then the 128-bit and 256-bit vector lengths must also
580
- be enabled.
581
+ 2) If a vector length `N` is enabled, then, when KVM is enabled, all
582
+ smaller, host supported vector lengths must also be enabled. If
583
+ KVM is not enabled, then only all the smaller, power-of-two vector
584
+ lengths must be enabled. E.g. with KVM if the host supports all
585
+ vector lengths up to 512-bits (128, 256, 384, 512), then if `sve512`
586
+ is enabled, the 128-bit vector length, 256-bit vector length, and
587
+ 384-bit vector length must also be enabled. Without KVM, the 384-bit
588
+ vector length would not be required.
589
+
590
+ 3) If KVM is enabled then only vector lengths that the host CPU type
591
+ support may be enabled. If SVE is not supported by the host, then
592
+ no `sve*` properties may be enabled.
593
594
SVE CPU Property Parsing Semantics
595
----------------------------------
596
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Parsing Semantics
597
an error is generated.
598
599
2) If SVE is enabled (`sve=on`), but no `sve<N>` CPU properties are
600
- provided, then all supported vector lengths are enabled, including
601
- the non-power-of-two lengths.
602
+ provided, then all supported vector lengths are enabled, which when
603
+ KVM is not in use means including the non-power-of-two lengths, and,
604
+ when KVM is in use, it means all vector lengths supported by the host
605
+ processor.
606
607
3) If SVE is enabled, then an error is generated when attempting to
608
disable the last enabled vector length (see constraint (1) of "SVE
609
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Parsing Semantics
610
has been explicitly disabled, then an error is generated (see
611
constraint (2) of "SVE CPU Property Dependencies and Constraints").
612
613
- 5) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
614
+ 5) When KVM is enabled, if the host does not support SVE, then an error
615
+ is generated when attempting to enable any `sve*` properties (see
616
+ constraint (3) of "SVE CPU Property Dependencies and Constraints").
617
+
618
+ 6) When KVM is enabled, if the host does support SVE, then an error is
619
+ generated when attempting to enable any vector lengths not supported
620
+ by the host (see constraint (3) of "SVE CPU Property Dependencies and
621
+ Constraints").
622
+
623
+ 7) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
624
CPU properties are set `on`, then the specified vector lengths are
625
disabled but the default for any unspecified lengths remains enabled.
626
- Disabling a power-of-two vector length also disables all vector
627
- lengths larger than the power-of-two length (see constraint (2) of
628
- "SVE CPU Property Dependencies and Constraints").
629
+ When KVM is not enabled, disabling a power-of-two vector length also
630
+ disables all vector lengths larger than the power-of-two length.
631
+ When KVM is enabled, then disabling any supported vector length also
632
+ disables all larger vector lengths (see constraint (2) of "SVE CPU
633
+ Property Dependencies and Constraints").
634
635
- 6) If one or more `sve<N>` CPU properties are set to `on`, then they
636
+ 8) If one or more `sve<N>` CPU properties are set to `on`, then they
637
are enabled and all unspecified lengths default to disabled, except
638
for the required lengths per constraint (2) of "SVE CPU Property
639
Dependencies and Constraints", which will even be auto-enabled if
640
they were not explicitly enabled.
641
642
- 7) If SVE was disabled (`sve=off`), allowing all vector lengths to be
643
+ 9) If SVE was disabled (`sve=off`), allowing all vector lengths to be
644
explicitly disabled (i.e. avoiding the error specified in (3) of
645
"SVE CPU Property Parsing Semantics"), then if later an `sve=on` is
646
provided an error will be generated. To avoid this error, one must
647
--
94
--
648
2.20.1
95
2.34.1
649
96
650
97
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
write_secondary_boot() is used in SMP configurations where the
3
Trace bitbang state machine changes with trace events.
4
CPU address space might not be the main System Bus.
5
The rom_add_blob_fixed_as() function allow us to specify an
6
address space. Use it to write each boot blob in the corresponding
7
CPU address space.
8
4
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Message-id: 20191019234715.25750-11-f4bug@amsat.org
7
Acked-by: Corey Minyard <cminyard@mvista.com>
8
Message-id: 20230111085016.44551-5-philmd@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
hw/arm/raspi.c | 14 ++++++++------
11
hw/i2c/bitbang_i2c.c | 33 ++++++++++++++++++++++++++++-----
15
1 file changed, 8 insertions(+), 6 deletions(-)
12
hw/i2c/trace-events | 3 +++
13
2 files changed, 31 insertions(+), 5 deletions(-)
16
14
17
diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c
15
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/raspi.c
17
--- a/hw/i2c/bitbang_i2c.c
20
+++ b/hw/arm/raspi.c
18
+++ b/hw/i2c/bitbang_i2c.c
21
@@ -XXX,XX +XXX,XX @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info)
19
@@ -XXX,XX +XXX,XX @@
22
QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0
20
#include "hw/sysbus.h"
23
|| (BOARDSETUP_ADDR >> 4) >= 0x100);
21
#include "qemu/module.h"
24
22
#include "qom/object.h"
25
- rom_add_blob_fixed("raspi_smpboot", smpboot, sizeof(smpboot),
23
+#include "trace.h"
26
- info->smp_loader_start);
24
27
+ rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot),
25
//#define DEBUG_BITBANG_I2C
28
+ info->smp_loader_start,
26
29
+ arm_boot_address_space(cpu, info));
27
@@ -XXX,XX +XXX,XX @@ do { printf("bitbang_i2c: " fmt , ## __VA_ARGS__); } while (0)
28
#define DPRINTF(fmt, ...) do {} while(0)
29
#endif
30
31
+/* bitbang_i2c_state enum to name */
32
+static const char * const sname[] = {
33
+#define NAME(e) [e] = stringify(e)
34
+ NAME(STOPPED),
35
+ [SENDING_BIT7] = "SENDING_BIT7 (START)",
36
+ NAME(SENDING_BIT6),
37
+ NAME(SENDING_BIT5),
38
+ NAME(SENDING_BIT4),
39
+ NAME(SENDING_BIT3),
40
+ NAME(SENDING_BIT2),
41
+ NAME(SENDING_BIT1),
42
+ NAME(SENDING_BIT0),
43
+ NAME(WAITING_FOR_ACK),
44
+ [RECEIVING_BIT7] = "RECEIVING_BIT7 (ACK)",
45
+ NAME(RECEIVING_BIT6),
46
+ NAME(RECEIVING_BIT5),
47
+ NAME(RECEIVING_BIT4),
48
+ NAME(RECEIVING_BIT3),
49
+ NAME(RECEIVING_BIT2),
50
+ NAME(RECEIVING_BIT1),
51
+ NAME(RECEIVING_BIT0),
52
+ NAME(SENDING_ACK),
53
+ NAME(SENT_NACK)
54
+#undef NAME
55
+};
56
+
57
static void bitbang_i2c_set_state(bitbang_i2c_interface *i2c,
58
bitbang_i2c_state state)
59
{
60
+ trace_bitbang_i2c_state(sname[i2c->state], sname[state]);
61
i2c->state = state;
30
}
62
}
31
63
32
static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info)
64
static void bitbang_i2c_enter_stop(bitbang_i2c_interface *i2c)
33
{
65
{
34
+ AddressSpace *as = arm_boot_address_space(cpu, info);
66
- DPRINTF("STOP\n");
35
/* Unlike the AArch32 version we don't need to call the board setup hook.
67
if (i2c->current_addr >= 0)
36
* The mechanism for doing the spin-table is also entirely different.
68
i2c_end_transfer(i2c->bus);
37
* We must have four 64-bit fields at absolute addresses
69
i2c->current_addr = -1;
38
@@ -XXX,XX +XXX,XX @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info)
70
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
39
0, 0, 0, 0
71
return bitbang_i2c_nop(i2c);
40
};
72
}
41
73
if (level == 0) {
42
- rom_add_blob_fixed("raspi_smpboot", smpboot, sizeof(smpboot),
74
- DPRINTF("START\n");
43
- info->smp_loader_start);
75
/* START condition. */
44
- rom_add_blob_fixed("raspi_spintables", spintables, sizeof(spintables),
76
bitbang_i2c_set_state(i2c, SENDING_BIT7);
45
- SPINTABLE_ADDR);
77
i2c->current_addr = -1;
46
+ rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot),
78
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
47
+ info->smp_loader_start, as);
79
/* NACK (either addressing a nonexistent device, or the
48
+ rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables),
80
* device we were sending to decided to NACK us).
49
+ SPINTABLE_ADDR, as);
81
*/
50
}
82
- DPRINTF("Got NACK\n");
51
83
bitbang_i2c_set_state(i2c, SENT_NACK);
52
static void write_board_setup(ARMCPU *cpu, const struct arm_boot_info *info)
84
bitbang_i2c_enter_stop(i2c);
85
return bitbang_i2c_ret(i2c, 1);
86
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
87
88
case SENDING_ACK:
89
if (data != 0) {
90
- DPRINTF("NACKED\n");
91
bitbang_i2c_set_state(i2c, SENT_NACK);
92
i2c_nack(i2c->bus);
93
} else {
94
- DPRINTF("ACKED\n");
95
bitbang_i2c_set_state(i2c, RECEIVING_BIT7);
96
}
97
return bitbang_i2c_ret(i2c, 1);
98
diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events
99
index XXXXXXX..XXXXXXX 100644
100
--- a/hw/i2c/trace-events
101
+++ b/hw/i2c/trace-events
102
@@ -XXX,XX +XXX,XX @@
103
# See docs/devel/tracing.rst for syntax documentation.
104
105
+# bitbang_i2c.c
106
+bitbang_i2c_state(const char *old_state, const char *new_state) "state %s -> %s"
107
+
108
# core.c
109
110
i2c_event(const char *event, uint8_t address) "%s(addr:0x%02x)"
53
--
111
--
54
2.20.1
112
2.34.1
55
113
56
114
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Enable SVE in the KVM guest when the 'max' cpu type is configured
3
Convert the remaining DPRINTF debug macro uses to tracepoints.
4
and KVM supports it. KVM SVE requires use of the new finalize
5
vcpu ioctl, so we add that now too. For starters SVE can only be
6
turned on or off, getting all vector lengths the host CPU supports
7
when on. We'll add the other SVE CPU properties in later patches.
8
4
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Acked-by: Corey Minyard <cminyard@mvista.com>
12
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
8
Message-id: 20230111085016.44551-6-philmd@linaro.org
13
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
14
Message-id: 20191024121808.9612-7-drjones@redhat.com
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
10
---
17
target/arm/kvm_arm.h | 27 +++++++++++++++++++++++++++
11
hw/i2c/bitbang_i2c.c | 18 ++++++------------
18
target/arm/cpu64.c | 17 ++++++++++++++---
12
hw/i2c/trace-events | 4 ++++
19
target/arm/kvm.c | 5 +++++
13
2 files changed, 10 insertions(+), 12 deletions(-)
20
target/arm/kvm64.c | 20 +++++++++++++++++++-
21
tests/arm-cpu-features.c | 4 ++++
22
5 files changed, 69 insertions(+), 4 deletions(-)
23
14
24
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
15
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
25
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/kvm_arm.h
17
--- a/hw/i2c/bitbang_i2c.c
27
+++ b/target/arm/kvm_arm.h
18
+++ b/hw/i2c/bitbang_i2c.c
28
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
29
*/
20
#include "qom/object.h"
30
int kvm_arm_vcpu_init(CPUState *cs);
21
#include "trace.h"
31
22
32
+/**
23
-//#define DEBUG_BITBANG_I2C
33
+ * kvm_arm_vcpu_finalize
24
-
34
+ * @cs: CPUState
25
-#ifdef DEBUG_BITBANG_I2C
35
+ * @feature: int
26
-#define DPRINTF(fmt, ...) \
36
+ *
27
-do { printf("bitbang_i2c: " fmt , ## __VA_ARGS__); } while (0)
37
+ * Finalizes the configuration of the specified VCPU feature by
28
-#else
38
+ * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
29
-#define DPRINTF(fmt, ...) do {} while(0)
39
+ * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
30
-#endif
40
+ * KVM's API documentation.
31
41
+ *
32
/* bitbang_i2c_state enum to name */
42
+ * Returns: 0 if success else < 0 error code
33
static const char * const sname[] = {
43
+ */
34
@@ -XXX,XX +XXX,XX @@ static void bitbang_i2c_enter_stop(bitbang_i2c_interface *i2c)
44
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature);
35
/* Set device data pin. */
36
static int bitbang_i2c_ret(bitbang_i2c_interface *i2c, int level)
37
{
38
+ trace_bitbang_i2c_data(i2c->last_clock, i2c->last_data,
39
+ i2c->device_out, level);
40
i2c->device_out = level;
41
- //DPRINTF("%d %d %d\n", i2c->last_clock, i2c->last_data, i2c->device_out);
45
+
42
+
46
/**
43
return level & i2c->last_data;
47
* kvm_arm_register_device:
48
* @mr: memory region for this device
49
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_aarch32_supported(CPUState *cs);
50
*/
51
bool kvm_arm_pmu_supported(CPUState *cs);
52
53
+/**
54
+ * bool kvm_arm_sve_supported:
55
+ * @cs: CPUState
56
+ *
57
+ * Returns true if the KVM VCPU can enable SVE and false otherwise.
58
+ */
59
+bool kvm_arm_sve_supported(CPUState *cs);
60
+
61
/**
62
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
63
* IPA address space supported by KVM
64
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_pmu_supported(CPUState *cs)
65
return false;
66
}
44
}
67
45
68
+static inline bool kvm_arm_sve_supported(CPUState *cs)
46
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
69
+{
47
70
+ return false;
48
if (i2c->current_addr < 0) {
71
+}
49
i2c->current_addr = i2c->buffer;
72
+
50
- DPRINTF("Address 0x%02x\n", i2c->current_addr);
73
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
51
+ trace_bitbang_i2c_addr(i2c->current_addr);
74
{
52
ret = i2c_start_transfer(i2c->bus, i2c->current_addr >> 1,
75
return -ENOENT;
53
i2c->current_addr & 1);
76
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
54
} else {
55
- DPRINTF("Sent 0x%02x\n", i2c->buffer);
56
+ trace_bitbang_i2c_send(i2c->buffer);
57
ret = i2c_send(i2c->bus, i2c->buffer);
58
}
59
if (ret) {
60
@@ -XXX,XX +XXX,XX @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
61
}
62
case RECEIVING_BIT7:
63
i2c->buffer = i2c_recv(i2c->bus);
64
- DPRINTF("RX byte 0x%02x\n", i2c->buffer);
65
+ trace_bitbang_i2c_recv(i2c->buffer);
66
/* Fall through... */
67
case RECEIVING_BIT6 ... RECEIVING_BIT0:
68
data = i2c->buffer >> 7;
69
diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events
77
index XXXXXXX..XXXXXXX 100644
70
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/cpu64.c
71
--- a/hw/i2c/trace-events
79
+++ b/target/arm/cpu64.c
72
+++ b/hw/i2c/trace-events
80
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
73
@@ -XXX,XX +XXX,XX @@
81
return;
74
82
}
75
# bitbang_i2c.c
83
76
bitbang_i2c_state(const char *old_state, const char *new_state) "state %s -> %s"
84
+ if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
77
+bitbang_i2c_addr(uint8_t addr) "Address 0x%02x"
85
+ error_setg(errp, "'sve' feature not supported by KVM on this host");
78
+bitbang_i2c_send(uint8_t byte) "TX byte 0x%02x"
86
+ return;
79
+bitbang_i2c_recv(uint8_t byte) "RX byte 0x%02x"
87
+ }
80
+bitbang_i2c_data(unsigned dat, unsigned clk, unsigned old_out, unsigned new_out) "dat %u clk %u out %u -> %u"
88
+
81
89
t = cpu->isar.id_aa64pfr0;
82
# core.c
90
t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
83
91
cpu->isar.id_aa64pfr0 = t;
92
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
93
{
94
ARMCPU *cpu = ARM_CPU(obj);
95
uint32_t vq;
96
+ uint64_t t;
97
98
if (kvm_enabled()) {
99
kvm_arm_set_cpu_features_from_host(cpu);
100
+ if (kvm_arm_sve_supported(CPU(cpu))) {
101
+ t = cpu->isar.id_aa64pfr0;
102
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
103
+ cpu->isar.id_aa64pfr0 = t;
104
+ }
105
} else {
106
- uint64_t t;
107
uint32_t u;
108
aarch64_a57_initfn(obj);
109
110
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
111
112
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
113
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
114
- object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
115
- cpu_arm_set_sve, NULL, NULL, &error_fatal);
116
117
for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
118
char name[8];
119
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
120
cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
121
}
122
}
123
+
124
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
125
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
126
}
127
128
struct ARMCPUInfo {
129
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/target/arm/kvm.c
132
+++ b/target/arm/kvm.c
133
@@ -XXX,XX +XXX,XX @@ int kvm_arm_vcpu_init(CPUState *cs)
134
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
135
}
136
137
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
138
+{
139
+ return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
140
+}
141
+
142
void kvm_arm_init_serror_injection(CPUState *cs)
143
{
144
cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
145
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/target/arm/kvm64.c
148
+++ b/target/arm/kvm64.c
149
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_aarch32_supported(CPUState *cpu)
150
return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
151
}
152
153
+bool kvm_arm_sve_supported(CPUState *cpu)
154
+{
155
+ KVMState *s = KVM_STATE(current_machine->accelerator);
156
+
157
+ return kvm_check_extension(s, KVM_CAP_ARM_SVE);
158
+}
159
+
160
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
161
162
int kvm_arch_init_vcpu(CPUState *cs)
163
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
164
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
165
}
166
if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
167
- cpu->has_pmu = false;
168
+ cpu->has_pmu = false;
169
}
170
if (cpu->has_pmu) {
171
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
172
} else {
173
unset_feature(&env->features, ARM_FEATURE_PMU);
174
}
175
+ if (cpu_isar_feature(aa64_sve, cpu)) {
176
+ assert(kvm_arm_sve_supported(cs));
177
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
178
+ }
179
180
/* Do KVM_ARM_VCPU_INIT ioctl */
181
ret = kvm_arm_vcpu_init(cs);
182
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
183
return ret;
184
}
185
186
+ if (cpu_isar_feature(aa64_sve, cpu)) {
187
+ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
188
+ if (ret) {
189
+ return ret;
190
+ }
191
+ }
192
+
193
/*
194
* When KVM is in use, PSCI is emulated in-kernel and not by qemu.
195
* Currently KVM has its own idea about MPIDR assignment, so we
196
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tests/arm-cpu-features.c
199
+++ b/tests/arm-cpu-features.c
200
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
201
assert_has_feature(qts, "host", "aarch64");
202
assert_has_feature(qts, "host", "pmu");
203
204
+ assert_has_feature(qts, "max", "sve");
205
+
206
assert_error(qts, "cortex-a15",
207
"We cannot guarantee the CPU type 'cortex-a15' works "
208
"with KVM on this host", NULL);
209
} else {
210
assert_has_not_feature(qts, "host", "aarch64");
211
assert_has_not_feature(qts, "host", "pmu");
212
+
213
+ assert_has_not_feature(qts, "max", "sve");
214
}
215
216
qtest_quit(qts);
217
--
84
--
218
2.20.1
85
2.34.1
219
86
220
87
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
As we are going to add more core-specific fields, add a 'cpu'
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
structure and move the ARMCPU field there as 'core'.
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
5
Message-id: 20230110082508.24038-2-philmd@linaro.org
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20191019234715.25750-7-f4bug@amsat.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
7
---
11
include/hw/arm/bcm2836.h | 4 +++-
8
hw/i2c/versatile_i2c.c | 4 ++--
12
hw/arm/bcm2836.c | 26 ++++++++++++++------------
9
1 file changed, 2 insertions(+), 2 deletions(-)
13
2 files changed, 17 insertions(+), 13 deletions(-)
14
10
15
diff --git a/include/hw/arm/bcm2836.h b/include/hw/arm/bcm2836.h
11
diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/arm/bcm2836.h
13
--- a/hw/i2c/versatile_i2c.c
18
+++ b/include/hw/arm/bcm2836.h
14
+++ b/hw/i2c/versatile_i2c.c
19
@@ -XXX,XX +XXX,XX @@ typedef struct BCM283XState {
15
@@ -XXX,XX +XXX,XX @@ REG32(CONTROL_CLR, 4)
20
char *cpu_type;
16
static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
21
uint32_t enabled_cpus;
17
unsigned size)
22
18
{
23
- ARMCPU cpus[BCM283X_NCPUS];
19
- VersatileI2CState *s = (VersatileI2CState *)opaque;
24
+ struct {
20
+ VersatileI2CState *s = opaque;
25
+ ARMCPU core;
21
26
+ } cpu[BCM283X_NCPUS];
22
switch (offset) {
27
BCM2836ControlState control;
23
case A_CONTROL_SET:
28
BCM2835PeripheralState peripherals;
24
@@ -XXX,XX +XXX,XX @@ static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
29
} BCM283XState;
25
static void versatile_i2c_write(void *opaque, hwaddr offset,
30
diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c
26
uint64_t value, unsigned size)
31
index XXXXXXX..XXXXXXX 100644
27
{
32
--- a/hw/arm/bcm2836.c
28
- VersatileI2CState *s = (VersatileI2CState *)opaque;
33
+++ b/hw/arm/bcm2836.c
29
+ VersatileI2CState *s = opaque;
34
@@ -XXX,XX +XXX,XX @@ static void bcm2836_init(Object *obj)
30
35
int n;
31
switch (offset) {
36
32
case A_CONTROL_SET:
37
for (n = 0; n < BCM283X_NCPUS; n++) {
38
- object_initialize_child(obj, "cpu[*]", &s->cpus[n], sizeof(s->cpus[n]),
39
- info->cpu_type, &error_abort, NULL);
40
+ object_initialize_child(obj, "cpu[*]", &s->cpu[n].core,
41
+ sizeof(s->cpu[n].core), info->cpu_type,
42
+ &error_abort, NULL);
43
}
44
45
sysbus_init_child_obj(obj, "control", &s->control, sizeof(s->control),
46
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
47
48
for (n = 0; n < BCM283X_NCPUS; n++) {
49
/* TODO: this should be converted to a property of ARM_CPU */
50
- s->cpus[n].mp_affinity = (info->clusterid << 8) | n;
51
+ s->cpu[n].core.mp_affinity = (info->clusterid << 8) | n;
52
53
/* set periphbase/CBAR value for CPU-local registers */
54
- object_property_set_int(OBJECT(&s->cpus[n]),
55
+ object_property_set_int(OBJECT(&s->cpu[n].core),
56
info->peri_base,
57
"reset-cbar", &err);
58
if (err) {
59
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
60
}
61
62
/* start powered off if not enabled */
63
- object_property_set_bool(OBJECT(&s->cpus[n]), n >= s->enabled_cpus,
64
+ object_property_set_bool(OBJECT(&s->cpu[n].core), n >= s->enabled_cpus,
65
"start-powered-off", &err);
66
if (err) {
67
error_propagate(errp, err);
68
return;
69
}
70
71
- object_property_set_bool(OBJECT(&s->cpus[n]), true, "realized", &err);
72
+ object_property_set_bool(OBJECT(&s->cpu[n].core), true,
73
+ "realized", &err);
74
if (err) {
75
error_propagate(errp, err);
76
return;
77
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
78
79
/* Connect irq/fiq outputs from the interrupt controller. */
80
qdev_connect_gpio_out_named(DEVICE(&s->control), "irq", n,
81
- qdev_get_gpio_in(DEVICE(&s->cpus[n]), ARM_CPU_IRQ));
82
+ qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_IRQ));
83
qdev_connect_gpio_out_named(DEVICE(&s->control), "fiq", n,
84
- qdev_get_gpio_in(DEVICE(&s->cpus[n]), ARM_CPU_FIQ));
85
+ qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_FIQ));
86
87
/* Connect timers from the CPU to the interrupt controller */
88
- qdev_connect_gpio_out(DEVICE(&s->cpus[n]), GTIMER_PHYS,
89
+ qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_PHYS,
90
qdev_get_gpio_in_named(DEVICE(&s->control), "cntpnsirq", n));
91
- qdev_connect_gpio_out(DEVICE(&s->cpus[n]), GTIMER_VIRT,
92
+ qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_VIRT,
93
qdev_get_gpio_in_named(DEVICE(&s->control), "cntvirq", n));
94
- qdev_connect_gpio_out(DEVICE(&s->cpus[n]), GTIMER_HYP,
95
+ qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_HYP,
96
qdev_get_gpio_in_named(DEVICE(&s->control), "cnthpirq", n));
97
- qdev_connect_gpio_out(DEVICE(&s->cpus[n]), GTIMER_SEC,
98
+ qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_SEC,
99
qdev_get_gpio_in_named(DEVICE(&s->control), "cntpsirq", n));
100
}
101
}
102
--
33
--
103
2.20.1
34
2.34.1
104
35
105
36
diff view generated by jsdifflib
1
From: Cédric Le Goater <clg@kaod.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Signed-off-by: Cédric Le Goater <clg@kaod.org>
3
In order to rename TYPE_VERSATILE_I2C as TYPE_ARM_SBCON_I2C
4
Reviewed-by: Joel Stanley <joel@jms.id.au>
4
(the formal ARM naming), start renaming its state.
5
Message-id: 20191023130455.1347-3-clg@kaod.org
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230110082508.24038-3-philmd@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
include/hw/arm/aspeed.h | 1 +
11
include/hw/i2c/arm_sbcon_i2c.h | 3 +--
9
hw/arm/aspeed.c | 23 +++++++++++++++++++++++
12
hw/i2c/versatile_i2c.c | 10 +++++-----
10
2 files changed, 24 insertions(+)
13
2 files changed, 6 insertions(+), 7 deletions(-)
11
14
12
diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h
15
diff --git a/include/hw/i2c/arm_sbcon_i2c.h b/include/hw/i2c/arm_sbcon_i2c.h
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/include/hw/arm/aspeed.h
17
--- a/include/hw/i2c/arm_sbcon_i2c.h
15
+++ b/include/hw/arm/aspeed.h
18
+++ b/include/hw/i2c/arm_sbcon_i2c.h
16
@@ -XXX,XX +XXX,XX @@ typedef struct AspeedBoardConfig {
19
@@ -XXX,XX +XXX,XX @@
17
const char *desc;
20
#define TYPE_ARM_SBCON_I2C TYPE_VERSATILE_I2C
18
const char *soc_name;
21
19
uint32_t hw_strap1;
22
typedef struct ArmSbconI2CState ArmSbconI2CState;
20
+ uint32_t hw_strap2;
23
-DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, ARM_SBCON_I2C,
21
const char *fmc_model;
24
- TYPE_ARM_SBCON_I2C)
22
const char *spi_model;
25
+DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, ARM_SBCON_I2C, TYPE_ARM_SBCON_I2C)
23
uint32_t num_cs;
26
24
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
27
struct ArmSbconI2CState {
28
/*< private >*/
29
diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c
25
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/arm/aspeed.c
31
--- a/hw/i2c/versatile_i2c.c
27
+++ b/hw/arm/aspeed.c
32
+++ b/hw/i2c/versatile_i2c.c
28
@@ -XXX,XX +XXX,XX @@ struct AspeedBoardState {
33
@@ -XXX,XX +XXX,XX @@
29
/* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */
34
#include "qom/object.h"
30
#define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1
35
31
36
typedef ArmSbconI2CState VersatileI2CState;
32
+/* AST2600 evb hardware value */
37
-DECLARE_INSTANCE_CHECKER(VersatileI2CState, VERSATILE_I2C,
33
+#define AST2600_EVB_HW_STRAP1 0x000000C0
38
+DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, VERSATILE_I2C,
34
+#define AST2600_EVB_HW_STRAP2 0x00000003
39
TYPE_VERSATILE_I2C)
35
+
40
36
/*
41
37
* The max ram region is for firmwares that scan the address space
42
@@ -XXX,XX +XXX,XX @@ REG32(CONTROL_CLR, 4)
38
* with load/store to guess how much RAM the SoC has.
43
static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
39
@@ -XXX,XX +XXX,XX @@ static void aspeed_board_init(MachineState *machine,
44
unsigned size)
40
&error_abort);
41
object_property_set_int(OBJECT(&bmc->soc), cfg->hw_strap1, "hw-strap1",
42
&error_abort);
43
+ object_property_set_int(OBJECT(&bmc->soc), cfg->hw_strap2, "hw-strap2",
44
+ &error_abort);
45
object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs",
46
&error_abort);
47
object_property_set_int(OBJECT(&bmc->soc), machine->smp.cpus, "num-cpus",
48
@@ -XXX,XX +XXX,XX @@ static void ast2500_evb_i2c_init(AspeedBoardState *bmc)
49
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), "ds1338", 0x32);
50
}
51
52
+static void ast2600_evb_i2c_init(AspeedBoardState *bmc)
53
+{
54
+ /* Start with some devices on our I2C busses */
55
+ ast2500_evb_i2c_init(bmc);
56
+}
57
+
58
static void romulus_bmc_i2c_init(AspeedBoardState *bmc)
59
{
45
{
60
AspeedSoCState *soc = &bmc->soc;
46
- VersatileI2CState *s = opaque;
61
@@ -XXX,XX +XXX,XX @@ static const AspeedBoardConfig aspeed_boards[] = {
47
+ ArmSbconI2CState *s = opaque;
62
.num_cs = 2,
48
63
.i2c_init = witherspoon_bmc_i2c_init,
49
switch (offset) {
64
.ram = 512 * MiB,
50
case A_CONTROL_SET:
65
+ }, {
51
@@ -XXX,XX +XXX,XX @@ static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
66
+ .name = MACHINE_TYPE_NAME("ast2600-evb"),
52
static void versatile_i2c_write(void *opaque, hwaddr offset,
67
+ .desc = "Aspeed AST2600 EVB (Cortex A7)",
53
uint64_t value, unsigned size)
68
+ .soc_name = "ast2600-a0",
54
{
69
+ .hw_strap1 = AST2600_EVB_HW_STRAP1,
55
- VersatileI2CState *s = opaque;
70
+ .hw_strap2 = AST2600_EVB_HW_STRAP2,
56
+ ArmSbconI2CState *s = opaque;
71
+ .fmc_model = "w25q512jv",
57
72
+ .spi_model = "mx66u51235f",
58
switch (offset) {
73
+ .num_cs = 1,
59
case A_CONTROL_SET:
74
+ .i2c_init = ast2600_evb_i2c_init,
60
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps versatile_i2c_ops = {
75
+ .ram = 1 * GiB,
61
static void versatile_i2c_init(Object *obj)
76
},
62
{
63
DeviceState *dev = DEVICE(obj);
64
- VersatileI2CState *s = VERSATILE_I2C(obj);
65
+ ArmSbconI2CState *s = VERSATILE_I2C(obj);
66
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
67
I2CBus *bus;
68
69
@@ -XXX,XX +XXX,XX @@ static void versatile_i2c_init(Object *obj)
70
static const TypeInfo versatile_i2c_info = {
71
.name = TYPE_VERSATILE_I2C,
72
.parent = TYPE_SYS_BUS_DEVICE,
73
- .instance_size = sizeof(VersatileI2CState),
74
+ .instance_size = sizeof(ArmSbconI2CState),
75
.instance_init = versatile_i2c_init,
77
};
76
};
78
77
79
--
78
--
80
2.20.1
79
2.34.1
81
80
82
81
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a function to compute the values of the TBFLAG_ANY bits
4
that will be cached. For now, the env->hflags variable is not
5
used, and the results are fed back to cpu_get_tb_cpu_state.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20191023150057.25731-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 29 ++++++++++++++++++-----------
13
target/arm/helper.c | 26 +++++++++++++++++++-------
14
2 files changed, 37 insertions(+), 18 deletions(-)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
21
uint32_t pstate;
22
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
23
24
+ /* Cached TBFLAGS state. See below for which bits are included. */
25
+ uint32_t hflags;
26
+
27
/* Frequently accessed CPSR bits are stored separately for efficiency.
28
This contains all the other bits. Use cpsr_{read,write} to access
29
the whole CPSR. */
30
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
31
32
#include "exec/cpu-all.h"
33
34
-/* Bit usage in the TB flags field: bit 31 indicates whether we are
35
+/*
36
+ * Bit usage in the TB flags field: bit 31 indicates whether we are
37
* in 32 or 64 bit mode. The meaning of the other bits depends on that.
38
* We put flags which are shared between 32 and 64 bit mode at the top
39
* of the word, and flags which apply to only one mode at the bottom.
40
+ *
41
+ * Unless otherwise noted, these bits are cached in env->hflags.
42
*/
43
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
44
FIELD(TBFLAG_ANY, MMUIDX, 28, 3)
45
FIELD(TBFLAG_ANY, SS_ACTIVE, 27, 1)
46
-FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
47
+FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */
48
/* Target EL if we take a floating-point-disabled exception */
49
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
50
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
51
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
52
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
53
54
/* Bit usage when in AArch32 state: */
55
-FIELD(TBFLAG_A32, THUMB, 0, 1)
56
-FIELD(TBFLAG_A32, VECLEN, 1, 3)
57
-FIELD(TBFLAG_A32, VECSTRIDE, 4, 2)
58
+FIELD(TBFLAG_A32, THUMB, 0, 1) /* Not cached. */
59
+FIELD(TBFLAG_A32, VECLEN, 1, 3) /* Not cached. */
60
+FIELD(TBFLAG_A32, VECSTRIDE, 4, 2) /* Not cached. */
61
/*
62
* We store the bottom two bits of the CPAR as TB flags and handle
63
* checks on the other bits at runtime. This shares the same bits as
64
* VECSTRIDE, which is OK as no XScale CPU has VFP.
65
+ * Not cached, because VECLEN+VECSTRIDE are not cached.
66
*/
67
FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
68
/*
69
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
70
* the same thing as the current security state of the processor!
71
*/
72
FIELD(TBFLAG_A32, NS, 6, 1)
73
-FIELD(TBFLAG_A32, VFPEN, 7, 1)
74
-FIELD(TBFLAG_A32, CONDEXEC, 8, 8)
75
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Not cached. */
76
+FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */
77
FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
78
/* For M profile only, set if FPCCR.LSPACT is set */
79
-FIELD(TBFLAG_A32, LSPACT, 18, 1)
80
+FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */
81
/* For M profile only, set if we must create a new FP context */
82
-FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1)
83
+FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */
84
/* For M profile only, set if FPCCR.S does not match current security state */
85
-FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1)
86
+FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */
87
/* For M profile only, Handler (ie not Thread) mode */
88
FIELD(TBFLAG_A32, HANDLER, 21, 1)
89
/* For M profile only, whether we should generate stack-limit checks */
90
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
91
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
92
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
93
FIELD(TBFLAG_A64, BT, 9, 1)
94
-FIELD(TBFLAG_A64, BTYPE, 10, 2)
95
+FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
96
FIELD(TBFLAG_A64, TBID, 12, 2)
97
98
static inline bool bswap_code(bool sctlr_b)
99
diff --git a/target/arm/helper.c b/target/arm/helper.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/helper.c
102
+++ b/target/arm/helper.c
103
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
104
}
105
#endif
106
107
+static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
108
+ ARMMMUIdx mmu_idx, uint32_t flags)
109
+{
110
+ flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
111
+ flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
112
+ arm_to_core_mmu_idx(mmu_idx));
113
+
114
+ if (arm_cpu_data_is_big_endian(env)) {
115
+ flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
116
+ }
117
+ if (arm_singlestep_active(env)) {
118
+ flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
119
+ }
120
+ return flags;
121
+}
122
+
123
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
124
target_ulong *cs_base, uint32_t *pflags)
125
{
126
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
127
}
128
}
129
130
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
131
+ flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
132
133
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
134
* states defined in the ARM ARM for software singlestep:
135
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
136
* 0 x Inactive (the TB flag for SS is always 0)
137
* 1 0 Active-pending
138
* 1 1 Active-not-pending
139
+ * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
140
*/
141
- if (arm_singlestep_active(env)) {
142
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
143
+ if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE)) {
144
if (is_a64(env)) {
145
if (env->pstate & PSTATE_SS) {
146
flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
147
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
148
}
149
}
150
}
151
- if (arm_cpu_data_is_big_endian(env)) {
152
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
153
- }
154
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
155
156
if (arm_v7m_is_handler_mode(env)) {
157
flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
158
--
159
2.20.1
160
161
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a function to compute the values of the TBFLAG_A64 bits
4
that will be cached. For now, the env->hflags variable is not
5
used, and the results are fed back to cpu_get_tb_cpu_state.
6
7
Note that not all BTI related flags are cached, so we have to
8
test the BTI feature twice -- once for those bits moved out to
9
rebuild_hflags_a64 and once for those bits that remain in
10
cpu_get_tb_cpu_state.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20191023150057.25731-3-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
target/arm/helper.c | 131 +++++++++++++++++++++++---------------------
18
1 file changed, 69 insertions(+), 62 deletions(-)
19
20
diff --git a/target/arm/helper.c b/target/arm/helper.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.c
23
+++ b/target/arm/helper.c
24
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
25
return flags;
26
}
27
28
+static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
29
+ ARMMMUIdx mmu_idx)
30
+{
31
+ ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
32
+ ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
33
+ uint32_t flags = 0;
34
+ uint64_t sctlr;
35
+ int tbii, tbid;
36
+
37
+ flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
38
+
39
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
40
+ if (regime_el(env, stage1) < 2) {
41
+ ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
42
+ tbid = (p1.tbi << 1) | p0.tbi;
43
+ tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
44
+ } else {
45
+ tbid = p0.tbi;
46
+ tbii = tbid & !p0.tbid;
47
+ }
48
+
49
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
50
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
51
+
52
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
53
+ int sve_el = sve_exception_el(env, el);
54
+ uint32_t zcr_len;
55
+
56
+ /*
57
+ * If SVE is disabled, but FP is enabled,
58
+ * then the effective len is 0.
59
+ */
60
+ if (sve_el != 0 && fp_el == 0) {
61
+ zcr_len = 0;
62
+ } else {
63
+ zcr_len = sve_zcr_len_for_el(env, el);
64
+ }
65
+ flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
66
+ flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
67
+ }
68
+
69
+ sctlr = arm_sctlr(env, el);
70
+
71
+ if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
72
+ /*
73
+ * In order to save space in flags, we record only whether
74
+ * pauth is "inactive", meaning all insns are implemented as
75
+ * a nop, or "active" when some action must be performed.
76
+ * The decision of which action to take is left to a helper.
77
+ */
78
+ if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
79
+ flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
80
+ }
81
+ }
82
+
83
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
84
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
85
+ if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
86
+ flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
87
+ }
88
+ }
89
+
90
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
91
+}
92
+
93
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
94
target_ulong *cs_base, uint32_t *pflags)
95
{
96
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
97
uint32_t flags = 0;
98
99
if (is_a64(env)) {
100
- ARMCPU *cpu = env_archcpu(env);
101
- uint64_t sctlr;
102
-
103
*pc = env->pc;
104
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
105
-
106
- /* Get control bits for tagged addresses. */
107
- {
108
- ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
109
- ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
110
- int tbii, tbid;
111
-
112
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
113
- if (regime_el(env, stage1) < 2) {
114
- ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
115
- tbid = (p1.tbi << 1) | p0.tbi;
116
- tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
117
- } else {
118
- tbid = p0.tbi;
119
- tbii = tbid & !p0.tbid;
120
- }
121
-
122
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
123
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
124
- }
125
-
126
- if (cpu_isar_feature(aa64_sve, cpu)) {
127
- int sve_el = sve_exception_el(env, current_el);
128
- uint32_t zcr_len;
129
-
130
- /* If SVE is disabled, but FP is enabled,
131
- * then the effective len is 0.
132
- */
133
- if (sve_el != 0 && fp_el == 0) {
134
- zcr_len = 0;
135
- } else {
136
- zcr_len = sve_zcr_len_for_el(env, current_el);
137
- }
138
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
139
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
140
- }
141
-
142
- sctlr = arm_sctlr(env, current_el);
143
-
144
- if (cpu_isar_feature(aa64_pauth, cpu)) {
145
- /*
146
- * In order to save space in flags, we record only whether
147
- * pauth is "inactive", meaning all insns are implemented as
148
- * a nop, or "active" when some action must be performed.
149
- * The decision of which action to take is left to a helper.
150
- */
151
- if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
152
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
153
- }
154
- }
155
-
156
- if (cpu_isar_feature(aa64_bti, cpu)) {
157
- /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
158
- if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
159
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
160
- }
161
+ flags = rebuild_hflags_a64(env, current_el, fp_el, mmu_idx);
162
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
163
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
164
}
165
} else {
166
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
167
flags = FIELD_DP32(flags, TBFLAG_A32,
168
XSCALE_CPAR, env->cp15.c15_cpar);
169
}
170
- }
171
172
- flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
173
+ flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
174
+ }
175
176
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
177
* states defined in the ARM ARM for software singlestep:
178
--
179
2.20.1
180
181
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a function to compute the values of the TBFLAG_A32 bits
4
that will be cached, and are used by all profiles.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-4-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 16 +++++++++++-----
12
1 file changed, 11 insertions(+), 5 deletions(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
19
return flags;
20
}
21
22
+static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
23
+ ARMMMUIdx mmu_idx, uint32_t flags)
24
+{
25
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
26
+ flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
27
+
28
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
29
+}
30
+
31
static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
32
ARMMMUIdx mmu_idx)
33
{
34
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
35
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
36
int current_el = arm_current_el(env);
37
int fp_el = fp_exception_el(env, current_el);
38
- uint32_t flags = 0;
39
+ uint32_t flags;
40
41
if (is_a64(env)) {
42
*pc = env->pc;
43
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
44
}
45
} else {
46
*pc = env->regs[15];
47
+ flags = rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
48
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
49
flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
50
flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
51
flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
52
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
53
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
54
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
55
|| arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
56
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
57
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
58
flags = FIELD_DP32(flags, TBFLAG_A32,
59
XSCALE_CPAR, env->cp15.c15_cpar);
60
}
61
-
62
- flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
63
}
64
65
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
66
--
67
2.20.1
68
69
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
These are the SVE equivalents to kvm_arch_get/put_fpsimd. Note, the
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
swabbing is different than it is for fpsmid because the vector format
5
is a little-endian stream of words.
6
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
5
Message-id: 20230110082508.24038-4-philmd@linaro.org
10
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
11
Message-id: 20191024121808.9612-6-drjones@redhat.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
7
---
14
target/arm/kvm64.c | 185 ++++++++++++++++++++++++++++++++++++++-------
8
include/hw/i2c/arm_sbcon_i2c.h | 3 +--
15
1 file changed, 156 insertions(+), 29 deletions(-)
9
hw/arm/realview.c | 2 +-
10
hw/arm/versatilepb.c | 2 +-
11
hw/arm/vexpress.c | 2 +-
12
hw/i2c/versatile_i2c.c | 4 ++--
13
5 files changed, 6 insertions(+), 7 deletions(-)
16
14
17
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
15
diff --git a/include/hw/i2c/arm_sbcon_i2c.h b/include/hw/i2c/arm_sbcon_i2c.h
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm64.c
17
--- a/include/hw/i2c/arm_sbcon_i2c.h
20
+++ b/target/arm/kvm64.c
18
+++ b/include/hw/i2c/arm_sbcon_i2c.h
21
@@ -XXX,XX +XXX,XX @@ int kvm_arch_destroy_vcpu(CPUState *cs)
19
@@ -XXX,XX +XXX,XX @@
22
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
20
#include "hw/i2c/bitbang_i2c.h"
23
{
21
#include "qom/object.h"
24
/* Return true if the regidx is a register we should synchronize
22
25
- * via the cpreg_tuples array (ie is not a core reg we sync by
23
-#define TYPE_VERSATILE_I2C "versatile_i2c"
26
- * hand in kvm_arch_get/put_registers())
24
-#define TYPE_ARM_SBCON_I2C TYPE_VERSATILE_I2C
27
+ * via the cpreg_tuples array (ie is not a core or sve reg that
25
+#define TYPE_ARM_SBCON_I2C "versatile_i2c"
28
+ * we sync by hand in kvm_arch_get/put_registers())
26
29
*/
27
typedef struct ArmSbconI2CState ArmSbconI2CState;
30
switch (regidx & KVM_REG_ARM_COPROC_MASK) {
28
DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, ARM_SBCON_I2C, TYPE_ARM_SBCON_I2C)
31
case KVM_REG_ARM_CORE:
29
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
32
+ case KVM_REG_ARM64_SVE:
30
index XXXXXXX..XXXXXXX 100644
33
return false;
31
--- a/hw/arm/realview.c
34
default:
32
+++ b/hw/arm/realview.c
35
return true;
33
@@ -XXX,XX +XXX,XX @@ static void realview_init(MachineState *machine,
36
@@ -XXX,XX +XXX,XX @@ int kvm_arm_cpreg_level(uint64_t regidx)
37
38
static int kvm_arch_put_fpsimd(CPUState *cs)
39
{
40
- ARMCPU *cpu = ARM_CPU(cs);
41
- CPUARMState *env = &cpu->env;
42
+ CPUARMState *env = &ARM_CPU(cs)->env;
43
struct kvm_one_reg reg;
44
- uint32_t fpr;
45
int i, ret;
46
47
for (i = 0; i < 32; i++) {
48
@@ -XXX,XX +XXX,XX @@ static int kvm_arch_put_fpsimd(CPUState *cs)
49
}
34
}
50
}
35
}
51
36
52
- reg.addr = (uintptr_t)(&fpr);
37
- dev = sysbus_create_simple(TYPE_VERSATILE_I2C, 0x10002000, NULL);
53
- fpr = vfp_get_fpsr(env);
38
+ dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, 0x10002000, NULL);
54
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
39
i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c");
55
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
40
i2c_slave_create_simple(i2c, "ds1338", 0x68);
56
- if (ret) {
41
57
- return ret;
42
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
58
+ return 0;
43
index XXXXXXX..XXXXXXX 100644
59
+}
44
--- a/hw/arm/versatilepb.c
60
+
45
+++ b/hw/arm/versatilepb.c
61
+/*
46
@@ -XXX,XX +XXX,XX @@ static void versatile_init(MachineState *machine, int board_id)
62
+ * SVE registers are encoded in KVM's memory in an endianness-invariant format.
47
/* Add PL031 Real Time Clock. */
63
+ * The byte at offset i from the start of the in-memory representation contains
48
sysbus_create_simple("pl031", 0x101e8000, pic[10]);
64
+ * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
49
65
+ * lowest offsets are stored in the lowest memory addresses, then that nearly
50
- dev = sysbus_create_simple(TYPE_VERSATILE_I2C, 0x10002000, NULL);
66
+ * matches QEMU's representation, which is to use an array of host-endian
51
+ dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, 0x10002000, NULL);
67
+ * uint64_t's, where the lower offsets are at the lower indices. To complete
52
i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c");
68
+ * the translation we just need to byte swap the uint64_t's on big-endian hosts.
53
i2c_slave_create_simple(i2c, "ds1338", 0x68);
69
+ */
54
70
+static uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
55
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
71
+{
56
index XXXXXXX..XXXXXXX 100644
72
+#ifdef HOST_WORDS_BIGENDIAN
57
--- a/hw/arm/vexpress.c
73
+ int i;
58
+++ b/hw/arm/vexpress.c
74
+
59
@@ -XXX,XX +XXX,XX @@ static void vexpress_common_init(MachineState *machine)
75
+ for (i = 0; i < nr; ++i) {
60
sysbus_create_simple("sp804", map[VE_TIMER01], pic[2]);
76
+ dst[i] = bswap64(src[i]);
61
sysbus_create_simple("sp804", map[VE_TIMER23], pic[3]);
77
}
62
78
63
- dev = sysbus_create_simple(TYPE_VERSATILE_I2C, map[VE_SERIALDVI], NULL);
79
- reg.addr = (uintptr_t)(&fpr);
64
+ dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, map[VE_SERIALDVI], NULL);
80
- fpr = vfp_get_fpcr(env);
65
i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c");
81
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
66
i2c_slave_create_simple(i2c, "sii9022", 0x39);
82
+ return dst;
67
83
+#else
68
diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c
84
+ return src;
69
index XXXXXXX..XXXXXXX 100644
85
+#endif
70
--- a/hw/i2c/versatile_i2c.c
86
+}
71
+++ b/hw/i2c/versatile_i2c.c
87
+
72
@@ -XXX,XX +XXX,XX @@
88
+/*
73
89
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
74
typedef ArmSbconI2CState VersatileI2CState;
90
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
75
DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, VERSATILE_I2C,
91
+ * code the slice index to zero for now as it's unlikely we'll need more than
76
- TYPE_VERSATILE_I2C)
92
+ * one slice for quite some time.
77
+ TYPE_ARM_SBCON_I2C)
93
+ */
78
94
+static int kvm_arch_put_sve(CPUState *cs)
79
95
+{
80
96
+ ARMCPU *cpu = ARM_CPU(cs);
81
@@ -XXX,XX +XXX,XX @@ static void versatile_i2c_init(Object *obj)
97
+ CPUARMState *env = &cpu->env;
98
+ uint64_t tmp[ARM_MAX_VQ * 2];
99
+ uint64_t *r;
100
+ struct kvm_one_reg reg;
101
+ int n, ret;
102
+
103
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
104
+ r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
105
+ reg.addr = (uintptr_t)r;
106
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
107
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
108
+ if (ret) {
109
+ return ret;
110
+ }
111
+ }
112
+
113
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
114
+ r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
115
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
116
+ reg.addr = (uintptr_t)r;
117
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
118
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
119
+ if (ret) {
120
+ return ret;
121
+ }
122
+ }
123
+
124
+ r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
125
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
126
+ reg.addr = (uintptr_t)r;
127
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
128
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
129
if (ret) {
130
return ret;
131
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
132
{
133
struct kvm_one_reg reg;
134
uint64_t val;
135
+ uint32_t fpr;
136
int i, ret;
137
unsigned int el;
138
139
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
140
}
141
}
142
143
- ret = kvm_arch_put_fpsimd(cs);
144
+ if (cpu_isar_feature(aa64_sve, cpu)) {
145
+ ret = kvm_arch_put_sve(cs);
146
+ } else {
147
+ ret = kvm_arch_put_fpsimd(cs);
148
+ }
149
+ if (ret) {
150
+ return ret;
151
+ }
152
+
153
+ reg.addr = (uintptr_t)(&fpr);
154
+ fpr = vfp_get_fpsr(env);
155
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
156
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
157
+ if (ret) {
158
+ return ret;
159
+ }
160
+
161
+ reg.addr = (uintptr_t)(&fpr);
162
+ fpr = vfp_get_fpcr(env);
163
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
164
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
165
if (ret) {
166
return ret;
167
}
168
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
169
170
static int kvm_arch_get_fpsimd(CPUState *cs)
171
{
172
- ARMCPU *cpu = ARM_CPU(cs);
173
- CPUARMState *env = &cpu->env;
174
+ CPUARMState *env = &ARM_CPU(cs)->env;
175
struct kvm_one_reg reg;
176
- uint32_t fpr;
177
int i, ret;
178
179
for (i = 0; i < 32; i++) {
180
@@ -XXX,XX +XXX,XX @@ static int kvm_arch_get_fpsimd(CPUState *cs)
181
}
182
}
183
184
- reg.addr = (uintptr_t)(&fpr);
185
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
186
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
187
- if (ret) {
188
- return ret;
189
- }
190
- vfp_set_fpsr(env, fpr);
191
+ return 0;
192
+}
193
194
- reg.addr = (uintptr_t)(&fpr);
195
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
196
+/*
197
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
198
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
199
+ * code the slice index to zero for now as it's unlikely we'll need more than
200
+ * one slice for quite some time.
201
+ */
202
+static int kvm_arch_get_sve(CPUState *cs)
203
+{
204
+ ARMCPU *cpu = ARM_CPU(cs);
205
+ CPUARMState *env = &cpu->env;
206
+ struct kvm_one_reg reg;
207
+ uint64_t *r;
208
+ int n, ret;
209
+
210
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
211
+ r = &env->vfp.zregs[n].d[0];
212
+ reg.addr = (uintptr_t)r;
213
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
214
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
215
+ if (ret) {
216
+ return ret;
217
+ }
218
+ sve_bswap64(r, r, cpu->sve_max_vq * 2);
219
+ }
220
+
221
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
222
+ r = &env->vfp.pregs[n].p[0];
223
+ reg.addr = (uintptr_t)r;
224
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
225
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
226
+ if (ret) {
227
+ return ret;
228
+ }
229
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
230
+ }
231
+
232
+ r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
233
+ reg.addr = (uintptr_t)r;
234
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
235
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
236
if (ret) {
237
return ret;
238
}
239
- vfp_set_fpcr(env, fpr);
240
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
241
242
return 0;
243
}
82
}
244
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
83
245
struct kvm_one_reg reg;
84
static const TypeInfo versatile_i2c_info = {
246
uint64_t val;
85
- .name = TYPE_VERSATILE_I2C,
247
unsigned int el;
86
+ .name = TYPE_ARM_SBCON_I2C,
248
+ uint32_t fpr;
87
.parent = TYPE_SYS_BUS_DEVICE,
249
int i, ret;
88
.instance_size = sizeof(ArmSbconI2CState),
250
89
.instance_init = versatile_i2c_init,
251
ARMCPU *cpu = ARM_CPU(cs);
252
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
253
env->spsr = env->banked_spsr[i];
254
}
255
256
- ret = kvm_arch_get_fpsimd(cs);
257
+ if (cpu_isar_feature(aa64_sve, cpu)) {
258
+ ret = kvm_arch_get_sve(cs);
259
+ } else {
260
+ ret = kvm_arch_get_fpsimd(cs);
261
+ }
262
if (ret) {
263
return ret;
264
}
265
266
+ reg.addr = (uintptr_t)(&fpr);
267
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
268
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
269
+ if (ret) {
270
+ return ret;
271
+ }
272
+ vfp_set_fpsr(env, fpr);
273
+
274
+ reg.addr = (uintptr_t)(&fpr);
275
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
276
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
277
+ if (ret) {
278
+ return ret;
279
+ }
280
+ vfp_set_fpcr(env, fpr);
281
+
282
ret = kvm_get_vcpu_events(cpu);
283
if (ret) {
284
return ret;
285
--
90
--
286
2.20.1
91
2.34.1
287
92
288
93
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Since 97a28b0eeac14 ("target/arm: Allow VFP and Neon to be disabled via
3
ARM_SBCON_I2C() macro and ArmSbconI2CState typedef are
4
a CPU property") we can disable the 'max' cpu model's VFP and neon
4
already declared via the QOM DECLARE_INSTANCE_CHECKER()
5
features, but there's no way to disable SVE. Add the 'sve=on|off'
5
macro in "hw/i2c/arm_sbcon_i2c.h". Drop the VERSATILE_I2C
6
property to give it that flexibility. We also rename
6
declarations from versatile_i2c.c.
7
cpu_max_get/set_sve_vq to cpu_max_get/set_sve_max_vq in order for them
8
to follow the typical *_get/set_<property-name> pattern.
9
7
10
Signed-off-by: Andrew Jones <drjones@redhat.com>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Message-id: 20230110082508.24038-5-philmd@linaro.org
13
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
14
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
15
Message-id: 20191024121808.9612-4-drjones@redhat.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
12
---
18
target/arm/cpu.c | 3 ++-
13
hw/i2c/versatile_i2c.c | 7 +------
19
target/arm/cpu64.c | 52 ++++++++++++++++++++++++++++++++++------
14
1 file changed, 1 insertion(+), 6 deletions(-)
20
target/arm/monitor.c | 2 +-
21
tests/arm-cpu-features.c | 1 +
22
4 files changed, 49 insertions(+), 9 deletions(-)
23
15
24
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/cpu.c
18
--- a/hw/i2c/versatile_i2c.c
27
+++ b/target/arm/cpu.c
19
+++ b/hw/i2c/versatile_i2c.c
28
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
20
@@ -XXX,XX +XXX,XX @@
29
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
21
#include "qemu/module.h"
30
env->cp15.cptr_el[3] |= CPTR_EZ;
22
#include "qom/object.h"
31
/* with maximum vector length */
23
32
- env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
24
-typedef ArmSbconI2CState VersatileI2CState;
33
+ env->vfp.zcr_el[1] = cpu_isar_feature(aa64_sve, cpu) ?
25
-DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, VERSATILE_I2C,
34
+ cpu->sve_max_vq - 1 : 0;
26
- TYPE_ARM_SBCON_I2C)
35
env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
27
-
36
env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
28
-
37
/*
29
38
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
30
REG32(CONTROL_GET, 0)
39
index XXXXXXX..XXXXXXX 100644
31
REG32(CONTROL_SET, 0)
40
--- a/target/arm/cpu64.c
32
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps versatile_i2c_ops = {
41
+++ b/target/arm/cpu64.c
33
static void versatile_i2c_init(Object *obj)
42
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
43
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
44
}
45
46
-static void cpu_max_get_sve_vq(Object *obj, Visitor *v, const char *name,
47
- void *opaque, Error **errp)
48
+static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
49
+ void *opaque, Error **errp)
50
{
34
{
51
ARMCPU *cpu = ARM_CPU(obj);
35
DeviceState *dev = DEVICE(obj);
52
- visit_type_uint32(v, name, &cpu->sve_max_vq, errp);
36
- ArmSbconI2CState *s = VERSATILE_I2C(obj);
53
+ uint32_t value;
37
+ ArmSbconI2CState *s = ARM_SBCON_I2C(obj);
54
+
38
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
55
+ /* All vector lengths are disabled when SVE is off. */
39
I2CBus *bus;
56
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
57
+ value = 0;
58
+ } else {
59
+ value = cpu->sve_max_vq;
60
+ }
61
+ visit_type_uint32(v, name, &value, errp);
62
}
63
64
-static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
65
- void *opaque, Error **errp)
66
+static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
67
+ void *opaque, Error **errp)
68
{
69
ARMCPU *cpu = ARM_CPU(obj);
70
Error *err = NULL;
71
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
72
error_propagate(errp, err);
73
}
74
75
+static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
76
+ void *opaque, Error **errp)
77
+{
78
+ ARMCPU *cpu = ARM_CPU(obj);
79
+ bool value = cpu_isar_feature(aa64_sve, cpu);
80
+
81
+ visit_type_bool(v, name, &value, errp);
82
+}
83
+
84
+static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
85
+ void *opaque, Error **errp)
86
+{
87
+ ARMCPU *cpu = ARM_CPU(obj);
88
+ Error *err = NULL;
89
+ bool value;
90
+ uint64_t t;
91
+
92
+ visit_type_bool(v, name, &value, &err);
93
+ if (err) {
94
+ error_propagate(errp, err);
95
+ return;
96
+ }
97
+
98
+ t = cpu->isar.id_aa64pfr0;
99
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
100
+ cpu->isar.id_aa64pfr0 = t;
101
+}
102
+
103
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
104
* otherwise, a CPU with as many features enabled as our emulation supports.
105
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
106
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
107
#endif
108
109
cpu->sve_max_vq = ARM_MAX_VQ;
110
- object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_vq,
111
- cpu_max_set_sve_vq, NULL, NULL, &error_fatal);
112
+ object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
113
+ cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
114
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
115
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
116
}
117
}
118
119
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/monitor.c
122
+++ b/target/arm/monitor.c
123
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
124
* then the order that considers those dependencies must be used.
125
*/
126
static const char *cpu_model_advertised_features[] = {
127
- "aarch64", "pmu",
128
+ "aarch64", "pmu", "sve",
129
NULL
130
};
131
132
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tests/arm-cpu-features.c
135
+++ b/tests/arm-cpu-features.c
136
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data)
137
138
if (g_str_equal(qtest_get_arch(), "aarch64")) {
139
assert_has_feature(qts, "max", "aarch64");
140
+ assert_has_feature(qts, "max", "sve");
141
assert_has_feature(qts, "cortex-a57", "pmu");
142
assert_has_feature(qts, "cortex-a57", "aarch64");
143
40
144
--
41
--
145
2.20.1
42
2.34.1
146
43
147
44
diff view generated by jsdifflib
1
Switch the fsl_etsec code away from bottom-half based ptimers to
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
the new transaction-based ptimer API. This just requires adding
3
begin/commit calls around the various places that modify the ptimer
4
state, and using the new ptimer_init() function to create the timer.
5
2
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
This device model started with the Versatile board, named
4
TYPE_VERSATILE_I2C, then ended up renamed TYPE_ARM_SBCON_I2C
5
as per the official "ARM SBCon two-wire serial bus interface"
6
description from:
7
https://developer.arm.com/documentation/dui0440/b/programmer-s-reference/two-wire-serial-bus-interface--sbcon
8
9
Use the latter name as a better description.
10
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Message-id: 20230110082508.24038-6-philmd@linaro.org
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20191017132122.4402-2-peter.maydell@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
15
---
13
hw/net/fsl_etsec/etsec.h | 1 -
16
MAINTAINERS | 1 +
14
hw/net/fsl_etsec/etsec.c | 9 +++++----
17
hw/i2c/{versatile_i2c.c => arm_sbcon_i2c.c} | 24 ++++++++++-----------
15
2 files changed, 5 insertions(+), 5 deletions(-)
18
hw/arm/Kconfig | 4 ++--
19
hw/i2c/Kconfig | 2 +-
20
hw/i2c/meson.build | 2 +-
21
5 files changed, 17 insertions(+), 16 deletions(-)
22
rename hw/i2c/{versatile_i2c.c => arm_sbcon_i2c.c} (81%)
16
23
17
diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h
24
diff --git a/MAINTAINERS b/MAINTAINERS
18
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/net/fsl_etsec/etsec.h
26
--- a/MAINTAINERS
20
+++ b/hw/net/fsl_etsec/etsec.h
27
+++ b/MAINTAINERS
21
@@ -XXX,XX +XXX,XX @@ typedef struct eTSEC {
28
@@ -XXX,XX +XXX,XX @@ M: Peter Maydell <peter.maydell@linaro.org>
22
uint16_t phy_control;
29
L: qemu-arm@nongnu.org
23
30
S: Maintained
24
/* Polling */
31
F: hw/*/versatile*
25
- QEMUBH *bh;
32
+F: hw/i2c/arm_sbcon_i2c.c
26
struct ptimer_state *ptimer;
33
F: include/hw/i2c/arm_sbcon_i2c.h
27
34
F: hw/misc/arm_sysctl.c
28
/* Whether we should flush the rx queue when buffer becomes available. */
35
F: docs/system/arm/versatile.rst
29
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
36
diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/arm_sbcon_i2c.c
37
similarity index 81%
38
rename from hw/i2c/versatile_i2c.c
39
rename to hw/i2c/arm_sbcon_i2c.c
30
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/net/fsl_etsec/etsec.c
41
--- a/hw/i2c/versatile_i2c.c
32
+++ b/hw/net/fsl_etsec/etsec.c
42
+++ b/hw/i2c/arm_sbcon_i2c.c
33
@@ -XXX,XX +XXX,XX @@
43
@@ -XXX,XX +XXX,XX @@ REG32(CONTROL_CLR, 4)
34
#include "etsec.h"
44
#define SCL BIT(0)
35
#include "registers.h"
45
#define SDA BIT(1)
36
#include "qemu/log.h"
46
37
-#include "qemu/main-loop.h"
47
-static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
38
#include "qemu/module.h"
48
+static uint64_t arm_sbcon_i2c_read(void *opaque, hwaddr offset,
39
49
unsigned size)
40
/* #define HEX_DUMP */
50
{
41
@@ -XXX,XX +XXX,XX @@ static void write_dmactrl(eTSEC *etsec,
51
ArmSbconI2CState *s = opaque;
42
52
@@ -XXX,XX +XXX,XX @@ static uint64_t versatile_i2c_read(void *opaque, hwaddr offset,
43
if (!(value & DMACTRL_WOP)) {
44
/* Start polling */
45
+ ptimer_transaction_begin(etsec->ptimer);
46
ptimer_stop(etsec->ptimer);
47
ptimer_set_count(etsec->ptimer, 1);
48
ptimer_run(etsec->ptimer, 1);
49
+ ptimer_transaction_commit(etsec->ptimer);
50
}
53
}
51
}
54
}
52
55
53
@@ -XXX,XX +XXX,XX @@ static void etsec_realize(DeviceState *dev, Error **errp)
56
-static void versatile_i2c_write(void *opaque, hwaddr offset,
54
object_get_typename(OBJECT(dev)), dev->id, etsec);
57
+static void arm_sbcon_i2c_write(void *opaque, hwaddr offset,
55
qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a);
58
uint64_t value, unsigned size)
56
59
{
57
-
60
ArmSbconI2CState *s = opaque;
58
- etsec->bh = qemu_bh_new(etsec_timer_hit, etsec);
61
@@ -XXX,XX +XXX,XX @@ static void versatile_i2c_write(void *opaque, hwaddr offset,
59
- etsec->ptimer = ptimer_init_with_bh(etsec->bh, PTIMER_POLICY_DEFAULT);
62
s->in = bitbang_i2c_set(&s->bitbang, BITBANG_I2C_SDA, (s->out & SDA) != 0);
60
+ etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT);
61
+ ptimer_transaction_begin(etsec->ptimer);
62
ptimer_set_freq(etsec->ptimer, 100);
63
+ ptimer_transaction_commit(etsec->ptimer);
64
}
63
}
65
64
66
static void etsec_instance_init(Object *obj)
65
-static const MemoryRegionOps versatile_i2c_ops = {
66
- .read = versatile_i2c_read,
67
- .write = versatile_i2c_write,
68
+static const MemoryRegionOps arm_sbcon_i2c_ops = {
69
+ .read = arm_sbcon_i2c_read,
70
+ .write = arm_sbcon_i2c_write,
71
.endianness = DEVICE_NATIVE_ENDIAN,
72
};
73
74
-static void versatile_i2c_init(Object *obj)
75
+static void arm_sbcon_i2c_init(Object *obj)
76
{
77
DeviceState *dev = DEVICE(obj);
78
ArmSbconI2CState *s = ARM_SBCON_I2C(obj);
79
@@ -XXX,XX +XXX,XX @@ static void versatile_i2c_init(Object *obj)
80
81
bus = i2c_init_bus(dev, "i2c");
82
bitbang_i2c_init(&s->bitbang, bus);
83
- memory_region_init_io(&s->iomem, obj, &versatile_i2c_ops, s,
84
+ memory_region_init_io(&s->iomem, obj, &arm_sbcon_i2c_ops, s,
85
"arm_sbcon_i2c", 0x1000);
86
sysbus_init_mmio(sbd, &s->iomem);
87
}
88
89
-static const TypeInfo versatile_i2c_info = {
90
+static const TypeInfo arm_sbcon_i2c_info = {
91
.name = TYPE_ARM_SBCON_I2C,
92
.parent = TYPE_SYS_BUS_DEVICE,
93
.instance_size = sizeof(ArmSbconI2CState),
94
- .instance_init = versatile_i2c_init,
95
+ .instance_init = arm_sbcon_i2c_init,
96
};
97
98
-static void versatile_i2c_register_types(void)
99
+static void arm_sbcon_i2c_register_types(void)
100
{
101
- type_register_static(&versatile_i2c_info);
102
+ type_register_static(&arm_sbcon_i2c_info);
103
}
104
105
-type_init(versatile_i2c_register_types)
106
+type_init(arm_sbcon_i2c_register_types)
107
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
108
index XXXXXXX..XXXXXXX 100644
109
--- a/hw/arm/Kconfig
110
+++ b/hw/arm/Kconfig
111
@@ -XXX,XX +XXX,XX @@ config REALVIEW
112
select PL110
113
select PL181 # display
114
select PL310 # cache controller
115
- select VERSATILE_I2C
116
+ select ARM_SBCON_I2C
117
select DS1338 # I2C RTC+NVRAM
118
select USB_OHCI
119
120
@@ -XXX,XX +XXX,XX @@ config MPS2
121
select SPLIT_IRQ
122
select UNIMP
123
select CMSDK_APB_WATCHDOG
124
- select VERSATILE_I2C
125
+ select ARM_SBCON_I2C
126
127
config FSL_IMX7
128
bool
129
diff --git a/hw/i2c/Kconfig b/hw/i2c/Kconfig
130
index XXXXXXX..XXXXXXX 100644
131
--- a/hw/i2c/Kconfig
132
+++ b/hw/i2c/Kconfig
133
@@ -XXX,XX +XXX,XX @@ config SMBUS_EEPROM
134
bool
135
select SMBUS
136
137
-config VERSATILE_I2C
138
+config ARM_SBCON_I2C
139
bool
140
select BITBANG_I2C
141
142
diff --git a/hw/i2c/meson.build b/hw/i2c/meson.build
143
index XXXXXXX..XXXXXXX 100644
144
--- a/hw/i2c/meson.build
145
+++ b/hw/i2c/meson.build
146
@@ -XXX,XX +XXX,XX @@ i2c_ss.add(when: 'CONFIG_ALLWINNER_I2C', if_true: files('allwinner-i2c.c'))
147
i2c_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('microbit_i2c.c'))
148
i2c_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_smbus.c'))
149
i2c_ss.add(when: 'CONFIG_SMBUS_EEPROM', if_true: files('smbus_eeprom.c'))
150
-i2c_ss.add(when: 'CONFIG_VERSATILE_I2C', if_true: files('versatile_i2c.c'))
151
+i2c_ss.add(when: 'CONFIG_ARM_SBCON_I2C', if_true: files('arm_sbcon_i2c.c'))
152
i2c_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_i2c.c'))
153
i2c_ss.add(when: 'CONFIG_PPC4XX', if_true: files('ppc4xx_i2c.c'))
154
i2c_ss.add(when: 'CONFIG_PCA954X', if_true: files('i2c_mux_pca954x.c'))
67
--
155
--
68
2.20.1
156
2.34.1
69
157
70
158
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Continue setting, but not relying upon, env->hflags.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-18-richard.henderson@linaro.org
4
Reviewed-by: Fabiano Rosas <farosas@suse.de>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-id: 20230112102436.1913-2-philmd@linaro.org
7
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
8
[PMD: Split patch in multiple tiny steps]
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
11
---
10
target/arm/translate-a64.c | 13 +++++++++++--
12
target/arm/translate-a64.c | 24 +++++++++++++-----------
11
target/arm/translate.c | 28 +++++++++++++++++++++++-----
13
1 file changed, 13 insertions(+), 11 deletions(-)
12
2 files changed, 34 insertions(+), 7 deletions(-)
13
14
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
19
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
19
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
20
goto do_unallocated;
20
/* I/O operations must end the TB here (whether read or write) */
21
}
21
s->base.is_jmp = DISAS_UPDATE;
22
if (sme_access_check(s)) {
22
- } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
23
- bool i = crm & 1;
23
- /* We default to ending the TB on a coprocessor register write,
24
- bool changed = false;
24
+ }
25
+ int old = s->pstate_sm | (s->pstate_za << 1);
25
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
26
+ int new = (crm & 1) * 3;
26
+ /*
27
+ int msk = (crm >> 1) & 3;
27
+ * A write to any coprocessor regiser that ends a TB
28
28
+ * must rebuild the hflags for the next TB.
29
- if ((crm & 2) && i != s->pstate_sm) {
29
+ */
30
- gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
30
+ TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
31
- changed = true;
31
+ gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
32
- }
32
+ tcg_temp_free_i32(tcg_el);
33
- if ((crm & 4) && i != s->pstate_za) {
33
+ /*
34
- gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
34
+ * We default to ending the TB on a coprocessor register write,
35
- changed = true;
35
* but allow this to be suppressed by the register definition
36
- }
36
* (usually only necessary to work around guest bugs).
37
- if (changed) {
37
*/
38
+ if ((old ^ new) & msk) {
38
diff --git a/target/arm/translate.c b/target/arm/translate.c
39
+ /* At least one bit changes. */
39
index XXXXXXX..XXXXXXX 100644
40
+ bool i = crm & 1;
40
--- a/target/arm/translate.c
41
+++ b/target/arm/translate.c
42
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
43
ri = get_arm_cp_reginfo(s->cp_regs,
44
ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
45
if (ri) {
46
+ bool need_exit_tb;
47
+
41
+
48
/* Check access permissions */
42
+ if ((crm & 2) && i != s->pstate_sm) {
49
if (!cp_access_ok(s->current_el, ri, isread)) {
43
+ gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
50
return 1;
44
+ }
51
@@ -XXX,XX +XXX,XX @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
45
+ if ((crm & 4) && i != s->pstate_za) {
52
}
46
+ gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
53
}
47
+ }
54
48
gen_rebuild_hflags(s);
55
- if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
49
} else {
56
- /* I/O operations must end the TB here (whether read or write) */
50
s->base.is_jmp = DISAS_NEXT;
57
- gen_lookup_tb(s);
58
- } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
59
- /* We default to ending the TB on a coprocessor register write,
60
+ /* I/O operations must end the TB here (whether read or write) */
61
+ need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
62
+ (ri->type & ARM_CP_IO));
63
+
64
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
65
+ /*
66
+ * A write to any coprocessor regiser that ends a TB
67
+ * must rebuild the hflags for the next TB.
68
+ */
69
+ TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
70
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
71
+ gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
72
+ } else {
73
+ gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
74
+ }
75
+ tcg_temp_free_i32(tcg_el);
76
+ /*
77
+ * We default to ending the TB on a coprocessor register write,
78
* but allow this to be suppressed by the register definition
79
* (usually only necessary to work around guest bugs).
80
*/
81
+ need_exit_tb = true;
82
+ }
83
+ if (need_exit_tb) {
84
gen_lookup_tb(s);
85
}
86
87
--
51
--
88
2.20.1
52
2.34.1
89
53
90
54
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Continue setting, but not relying upon, env->hflags.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-19-richard.henderson@linaro.org
4
Reviewed-by: Fabiano Rosas <farosas@suse.de>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-id: 20230112102436.1913-3-philmd@linaro.org
7
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
8
[PMD: Split patch in multiple tiny steps]
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
11
---
10
target/arm/op_helper.c | 3 +++
12
target/arm/sme_helper.c | 2 ++
11
1 file changed, 3 insertions(+)
13
target/arm/translate-a64.c | 1 -
14
2 files changed, 2 insertions(+), 1 deletion(-)
12
15
13
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
16
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/op_helper.c
18
--- a/target/arm/sme_helper.c
16
+++ b/target/arm/op_helper.c
19
+++ b/target/arm/sme_helper.c
17
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
20
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
18
void HELPER(setend)(CPUARMState *env)
21
}
19
{
22
env->svcr ^= R_SVCR_SM_MASK;
20
env->uncached_cpsr ^= CPSR_E;
23
arm_reset_sve_state(env);
21
+ arm_rebuild_hflags(env);
24
+ arm_rebuild_hflags(env);
22
}
25
}
23
26
24
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
27
void helper_set_pstate_za(CPUARMState *env, uint32_t i)
25
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(cpsr_read)(CPUARMState *env)
28
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
26
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
29
if (i) {
27
{
30
memset(env->zarray, 0, sizeof(env->zarray));
28
cpsr_write(env, val, mask, CPSRWriteByInstr);
31
}
29
+ /* TODO: Not all cpsr bits are relevant to hflags. */
30
+ arm_rebuild_hflags(env);
32
+ arm_rebuild_hflags(env);
31
}
33
}
32
34
33
/* Write the CPSR for a 32-bit exception return */
35
void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
36
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-a64.c
39
+++ b/target/arm/translate-a64.c
40
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
41
if ((crm & 4) && i != s->pstate_za) {
42
gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
43
}
44
- gen_rebuild_hflags(s);
45
} else {
46
s->base.is_jmp = DISAS_NEXT;
47
}
34
--
48
--
35
2.20.1
49
2.34.1
36
50
37
51
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Allow cpu 'host' to enable SVE when it's available, unless the
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
user chooses to disable it with the added 'sve=off' cpu property.
4
Reviewed-by: Fabiano Rosas <farosas@suse.de>
5
Also give the user the ability to select vector lengths with the
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
sve<N> properties. We don't adopt 'max' cpu's other sve property,
6
Message-id: 20230112102436.1913-4-philmd@linaro.org
7
sve-max-vq, because that property is difficult to use with KVM.
7
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
8
That property assumes all vector lengths in the range from 1 up
8
[PMD: Split patch in multiple tiny steps]
9
to and including the specified maximum length are supported, but
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
there may be optional lengths not supported by the host in that
11
range. With KVM one must be more specific when enabling vector
12
lengths.
13
14
Signed-off-by: Andrew Jones <drjones@redhat.com>
15
Reviewed-by: Eric Auger <eric.auger@redhat.com>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
18
Message-id: 20191024121808.9612-10-drjones@redhat.com
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
11
---
21
target/arm/cpu.h | 2 ++
12
target/arm/cpu.h | 1 +
22
target/arm/cpu.c | 3 +++
13
linux-user/aarch64/cpu_loop.c | 2 +-
23
target/arm/cpu64.c | 33 +++++++++++++++++----------------
14
linux-user/aarch64/signal.c | 2 +-
24
target/arm/kvm64.c | 14 +++++++++++++-
15
target/arm/helper.c | 8 ++++++++
25
tests/arm-cpu-features.c | 23 +++++++++++------------
16
target/arm/sme_helper.c | 4 ++--
26
docs/arm-cpu-features.rst | 19 ++++++++++++-------
17
5 files changed, 13 insertions(+), 4 deletions(-)
27
6 files changed, 58 insertions(+), 36 deletions(-)
28
18
29
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
30
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
32
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
33
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
23
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
34
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
24
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
35
void aarch64_sve_change_el(CPUARMState *env, int old_el,
25
void aarch64_sve_change_el(CPUARMState *env, int old_el,
36
int new_el, bool el0_a64);
26
int new_el, bool el0_a64);
37
+void aarch64_add_sve_properties(Object *obj);
27
+void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask);
38
#else
28
void arm_reset_sve_state(CPUARMState *env);
39
static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
29
40
static inline void aarch64_sve_change_el(CPUARMState *env, int o,
30
/*
41
int n, bool a)
31
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
42
{ }
43
+static inline void aarch64_add_sve_properties(Object *obj) { }
44
#endif
45
46
#if !defined(CONFIG_TCG)
47
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
48
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/cpu.c
33
--- a/linux-user/aarch64/cpu_loop.c
50
+++ b/target/arm/cpu.c
34
+++ b/linux-user/aarch64/cpu_loop.c
51
@@ -XXX,XX +XXX,XX @@ static void arm_host_initfn(Object *obj)
35
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
52
ARMCPU *cpu = ARM_CPU(obj);
36
* On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
53
37
* PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
54
kvm_arm_set_cpu_features_from_host(cpu);
38
*/
55
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
39
+ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
56
+ aarch64_add_sve_properties(obj);
40
if (FIELD_EX64(env->svcr, SVCR, SM)) {
57
+ }
41
- env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
58
arm_cpu_post_init(obj);
42
arm_rebuild_hflags(env);
43
arm_reset_sve_state(env);
44
}
45
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/linux-user/aarch64/signal.c
48
+++ b/linux-user/aarch64/signal.c
49
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
50
* Invoke the signal handler with both SM and ZA disabled.
51
* When clearing SM, ResetSVEState, per SMSTOP.
52
*/
53
+ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
54
if (FIELD_EX64(env->svcr, SVCR, SM)) {
55
arm_reset_sve_state(env);
56
}
57
if (env->svcr) {
58
- env->svcr = 0;
59
arm_rebuild_hflags(env);
60
}
61
62
diff --git a/target/arm/helper.c b/target/arm/helper.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/helper.c
65
+++ b/target/arm/helper.c
66
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
67
return CP_ACCESS_OK;
59
}
68
}
60
69
61
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
70
+void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/cpu64.c
64
+++ b/target/arm/cpu64.c
65
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
66
cpu->isar.id_aa64pfr0 = t;
67
}
68
69
+void aarch64_add_sve_properties(Object *obj)
70
+{
71
+{
71
+ uint32_t vq;
72
+ uint64_t change = (env->svcr ^ new) & mask;
72
+
73
+
73
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
74
+ env->svcr ^= change;
74
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
75
+
76
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
77
+ char name[8];
78
+ sprintf(name, "sve%d", vq * 128);
79
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
80
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
81
+ }
82
+}
75
+}
83
+
76
+
84
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
77
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
85
* otherwise, a CPU with as many features enabled as our emulation supports.
78
uint64_t value)
86
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
87
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
88
static void aarch64_max_initfn(Object *obj)
89
{
79
{
90
ARMCPU *cpu = ARM_CPU(obj);
80
helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
91
- uint32_t vq;
81
helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
92
- uint64_t t;
82
+ aarch64_set_svcr(env, value, -1);
93
83
arm_rebuild_hflags(env);
94
if (kvm_enabled()) {
84
}
95
kvm_arm_set_cpu_features_from_host(cpu);
85
96
- if (kvm_arm_sve_supported(CPU(cpu))) {
86
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
97
- t = cpu->isar.id_aa64pfr0;
87
index XXXXXXX..XXXXXXX 100644
98
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
88
--- a/target/arm/sme_helper.c
99
- cpu->isar.id_aa64pfr0 = t;
89
+++ b/target/arm/sme_helper.c
100
- }
90
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
101
} else {
91
if (i == FIELD_EX64(env->svcr, SVCR, SM)) {
102
+ uint64_t t;
92
return;
103
uint32_t u;
104
aarch64_a57_initfn(obj);
105
106
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
107
#endif
108
}
93
}
109
94
- env->svcr ^= R_SVCR_SM_MASK;
110
- object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
95
+ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
111
- cpu_arm_set_sve, NULL, NULL, &error_fatal);
96
arm_reset_sve_state(env);
112
+ aarch64_add_sve_properties(obj);
97
arm_rebuild_hflags(env);
113
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
114
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
115
-
116
- for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
117
- char name[8];
118
- sprintf(name, "sve%d", vq * 128);
119
- object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
120
- cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
121
- }
122
}
98
}
123
99
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
124
struct ARMCPUInfo {
100
if (i == FIELD_EX64(env->svcr, SVCR, ZA)) {
125
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
101
return;
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/kvm64.c
128
+++ b/target/arm/kvm64.c
129
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
130
* and then query that CPU for the relevant ID registers.
131
*/
132
int fdarray[3];
133
+ bool sve_supported;
134
uint64_t features = 0;
135
+ uint64_t t;
136
int err;
137
138
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
139
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
140
ARM64_SYS_REG(3, 0, 0, 3, 2));
141
}
102
}
142
103
- env->svcr ^= R_SVCR_ZA_MASK;
143
+ sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
104
+ aarch64_set_svcr(env, 0, R_SVCR_ZA_MASK);
144
+
145
kvm_arm_destroy_scratch_host_vcpu(fdarray);
146
147
if (err < 0) {
148
return false;
149
}
150
151
- /* We can assume any KVM supporting CPU is at least a v8
152
+ /* Add feature bits that can't appear until after VCPU init. */
153
+ if (sve_supported) {
154
+ t = ahcf->isar.id_aa64pfr0;
155
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
156
+ ahcf->isar.id_aa64pfr0 = t;
157
+ }
158
+
159
+ /*
160
+ * We can assume any KVM supporting CPU is at least a v8
161
* with VFPv4+Neon; this in turn implies most of the other
162
* feature bits.
163
*/
164
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tests/arm-cpu-features.c
167
+++ b/tests/arm-cpu-features.c
168
@@ -XXX,XX +XXX,XX @@ static void sve_tests_sve_off_kvm(const void *data)
169
{
170
QTestState *qts;
171
172
- qts = qtest_init(MACHINE "-accel kvm -cpu max,sve=off");
173
+ qts = qtest_init(MACHINE "-accel kvm -cpu host,sve=off");
174
105
175
/*
106
/*
176
* We don't know if this host supports SVE so we don't
107
* ResetSMEState.
177
@@ -XXX,XX +XXX,XX @@ static void sve_tests_sve_off_kvm(const void *data)
178
* and that using sve<N>=off to explicitly disable vector
179
* lengths is OK too.
180
*/
181
- assert_sve_vls(qts, "max", 0, NULL);
182
- assert_sve_vls(qts, "max", 0, "{ 'sve128': false }");
183
+ assert_sve_vls(qts, "host", 0, NULL);
184
+ assert_sve_vls(qts, "host", 0, "{ 'sve128': false }");
185
186
qtest_quit(qts);
187
}
188
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
189
"We cannot guarantee the CPU type 'cortex-a15' works "
190
"with KVM on this host", NULL);
191
192
- assert_has_feature(qts, "max", "sve");
193
- resp = do_query_no_props(qts, "max");
194
+ assert_has_feature(qts, "host", "sve");
195
+ resp = do_query_no_props(qts, "host");
196
kvm_supports_sve = resp_get_feature(resp, "sve");
197
vls = resp_get_sve_vls(resp);
198
qobject_unref(resp);
199
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
200
sprintf(max_name, "sve%d", max_vq * 128);
201
202
/* Enabling a supported length is of course fine. */
203
- assert_sve_vls(qts, "max", vls, "{ %s: true }", max_name);
204
+ assert_sve_vls(qts, "host", vls, "{ %s: true }", max_name);
205
206
/* Get the next supported length smaller than max-vq. */
207
vq = 64 - __builtin_clzll(vls & ~BIT_ULL(max_vq - 1));
208
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
209
* We have at least one length smaller than max-vq,
210
* so we can disable max-vq.
211
*/
212
- assert_sve_vls(qts, "max", (vls & ~BIT_ULL(max_vq - 1)),
213
+ assert_sve_vls(qts, "host", (vls & ~BIT_ULL(max_vq - 1)),
214
"{ %s: false }", max_name);
215
216
/*
217
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
218
*/
219
sprintf(name, "sve%d", vq * 128);
220
error = g_strdup_printf("cannot disable %s", name);
221
- assert_error(qts, "max", error,
222
+ assert_error(qts, "host", error,
223
"{ %s: true, %s: false }",
224
max_name, name);
225
g_free(error);
226
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
227
vq = __builtin_ffsll(vls);
228
sprintf(name, "sve%d", vq * 128);
229
error = g_strdup_printf("cannot disable %s", name);
230
- assert_error(qts, "max", error, "{ %s: false }", name);
231
+ assert_error(qts, "host", error, "{ %s: false }", name);
232
g_free(error);
233
234
/* Get an unsupported length. */
235
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
236
if (vq <= SVE_MAX_VQ) {
237
sprintf(name, "sve%d", vq * 128);
238
error = g_strdup_printf("cannot enable %s", name);
239
- assert_error(qts, "max", error, "{ %s: true }", name);
240
+ assert_error(qts, "host", error, "{ %s: true }", name);
241
g_free(error);
242
}
243
} else {
244
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
245
} else {
246
assert_has_not_feature(qts, "host", "aarch64");
247
assert_has_not_feature(qts, "host", "pmu");
248
-
249
- assert_has_not_feature(qts, "max", "sve");
250
+ assert_has_not_feature(qts, "host", "sve");
251
}
252
253
qtest_quit(qts);
254
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
255
index XXXXXXX..XXXXXXX 100644
256
--- a/docs/arm-cpu-features.rst
257
+++ b/docs/arm-cpu-features.rst
258
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Examples
259
260
$ qemu-system-aarch64 -M virt -cpu max
261
262
- 3) Only enable the 128-bit vector length::
263
+ 3) When KVM is enabled, implicitly enable all host CPU supported vector
264
+ lengths with the `host` CPU type::
265
+
266
+ $ qemu-system-aarch64 -M virt,accel=kvm -cpu host
267
+
268
+ 4) Only enable the 128-bit vector length::
269
270
$ qemu-system-aarch64 -M virt -cpu max,sve128=on
271
272
- 4) Disable the 512-bit vector length and all larger vector lengths,
273
+ 5) Disable the 512-bit vector length and all larger vector lengths,
274
since 512 is a power-of-two. This results in all the smaller,
275
uninitialized lengths (128, 256, and 384) defaulting to enabled::
276
277
$ qemu-system-aarch64 -M virt -cpu max,sve512=off
278
279
- 5) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
280
+ 6) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
281
282
$ qemu-system-aarch64 -M virt -cpu max,sve128=on,sve256=on,sve512=on
283
284
- 6) The same as (5), but since the 128-bit and 256-bit vector
285
+ 7) The same as (6), but since the 128-bit and 256-bit vector
286
lengths are required for the 512-bit vector length to be enabled,
287
then allow them to be auto-enabled::
288
289
$ qemu-system-aarch64 -M virt -cpu max,sve512=on
290
291
- 7) Do the same as (6), but by first disabling SVE and then re-enabling it::
292
+ 8) Do the same as (7), but by first disabling SVE and then re-enabling it::
293
294
$ qemu-system-aarch64 -M virt -cpu max,sve=off,sve512=on,sve=on
295
296
- 8) Force errors regarding the last vector length::
297
+ 9) Force errors regarding the last vector length::
298
299
$ qemu-system-aarch64 -M virt -cpu max,sve128=off
300
$ qemu-system-aarch64 -M virt -cpu max,sve=off,sve128=off,sve=on
301
@@ -XXX,XX +XXX,XX @@ The examples in "SVE CPU Property Examples" exhibit many ways to select
302
vector lengths which developers may find useful in order to avoid overly
303
verbose command lines. However, the recommended way to select vector
304
lengths is to explicitly enable each desired length. Therefore only
305
-example's (1), (3), and (5) exhibit recommended uses of the properties.
306
+example's (1), (4), and (6) exhibit recommended uses of the properties.
307
308
--
108
--
309
2.20.1
109
2.34.1
310
110
311
111
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This function assumes nothing about the current state of the cpu,
3
Move arm_reset_sve_state() calls to aarch64_set_svcr().
4
and writes the computed value to env->hflags.
5
4
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-13-richard.henderson@linaro.org
6
Reviewed-by: Fabiano Rosas <farosas@suse.de>
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-id: 20230112102436.1913-5-philmd@linaro.org
9
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
10
[PMD: Split patch in multiple tiny steps]
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
target/arm/cpu.h | 6 ++++++
14
target/arm/cpu.h | 1 -
12
target/arm/helper.c | 30 ++++++++++++++++++++++--------
15
linux-user/aarch64/cpu_loop.c | 1 -
13
2 files changed, 28 insertions(+), 8 deletions(-)
16
linux-user/aarch64/signal.c | 8 +-------
17
target/arm/helper.c | 13 +++++++++++++
18
target/arm/sme_helper.c | 10 ----------
19
5 files changed, 14 insertions(+), 19 deletions(-)
14
20
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
23
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
25
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
20
void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void
26
void aarch64_sve_change_el(CPUARMState *env, int old_el,
21
*opaque);
27
int new_el, bool el0_a64);
22
28
void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask);
23
+/**
29
-void arm_reset_sve_state(CPUARMState *env);
24
+ * arm_rebuild_hflags:
30
25
+ * Rebuild the cached TBFLAGS for arbitrary changed processor state.
31
/*
26
+ */
32
* SVE registers are encoded in KVM's memory in an endianness-invariant format.
27
+void arm_rebuild_hflags(CPUARMState *env);
33
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
28
+
34
index XXXXXXX..XXXXXXX 100644
29
/**
35
--- a/linux-user/aarch64/cpu_loop.c
30
* aa32_vfp_dreg:
36
+++ b/linux-user/aarch64/cpu_loop.c
31
* Return a pointer to the Dn register within env in 32-bit mode.
37
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
38
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
39
if (FIELD_EX64(env->svcr, SVCR, SM)) {
40
arm_rebuild_hflags(env);
41
- arm_reset_sve_state(env);
42
}
43
ret = do_syscall(env,
44
env->xregs[8],
45
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/linux-user/aarch64/signal.c
48
+++ b/linux-user/aarch64/signal.c
49
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
50
env->btype = 2;
51
}
52
53
- /*
54
- * Invoke the signal handler with both SM and ZA disabled.
55
- * When clearing SM, ResetSVEState, per SMSTOP.
56
- */
57
+ /* Invoke the signal handler with both SM and ZA disabled. */
58
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
59
- if (FIELD_EX64(env->svcr, SVCR, SM)) {
60
- arm_reset_sve_state(env);
61
- }
62
if (env->svcr) {
63
arm_rebuild_hflags(env);
64
}
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
65
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
index XXXXXXX..XXXXXXX 100644
66
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/helper.c
67
--- a/target/arm/helper.c
35
+++ b/target/arm/helper.c
68
+++ b/target/arm/helper.c
36
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
69
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
37
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
70
return CP_ACCESS_OK;
38
}
71
}
39
72
40
+static uint32_t rebuild_hflags_internal(CPUARMState *env)
73
+/* ResetSVEState */
74
+static void arm_reset_sve_state(CPUARMState *env)
41
+{
75
+{
42
+ int el = arm_current_el(env);
76
+ memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
43
+ int fp_el = fp_exception_el(env, el);
77
+ /* Recall that FFR is stored as pregs[16]. */
44
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
78
+ memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
45
+
79
+ vfp_set_fpcr(env, 0x0800009f);
46
+ if (is_a64(env)) {
47
+ return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
48
+ } else if (arm_feature(env, ARM_FEATURE_M)) {
49
+ return rebuild_hflags_m32(env, fp_el, mmu_idx);
50
+ } else {
51
+ return rebuild_hflags_a32(env, fp_el, mmu_idx);
52
+ }
53
+}
80
+}
54
+
81
+
55
+void arm_rebuild_hflags(CPUARMState *env)
82
void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
56
+{
83
{
57
+ env->hflags = rebuild_hflags_internal(env);
84
uint64_t change = (env->svcr ^ new) & mask;
58
+}
85
86
env->svcr ^= change;
59
+
87
+
60
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
88
+ if (change & R_SVCR_SM_MASK) {
61
target_ulong *cs_base, uint32_t *pflags)
89
+ arm_reset_sve_state(env);
90
+ }
91
}
92
93
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
94
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/sme_helper.c
97
+++ b/target/arm/sme_helper.c
98
@@ -XXX,XX +XXX,XX @@
99
#include "vec_internal.h"
100
#include "sve_ldst_internal.h"
101
102
-/* ResetSVEState */
103
-void arm_reset_sve_state(CPUARMState *env)
104
-{
105
- memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
106
- /* Recall that FFR is stored as pregs[16]. */
107
- memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
108
- vfp_set_fpcr(env, 0x0800009f);
109
-}
110
-
111
void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
62
{
112
{
63
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
113
if (i == FIELD_EX64(env->svcr, SVCR, SM)) {
64
- int current_el = arm_current_el(env);
114
return;
65
- int fp_el = fp_exception_el(env, current_el);
115
}
66
uint32_t flags, pstate_for_ss;
116
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
67
117
- arm_reset_sve_state(env);
68
+ flags = rebuild_hflags_internal(env);
118
arm_rebuild_hflags(env);
69
+
119
}
70
if (is_a64(env)) {
120
71
*pc = env->pc;
72
- flags = rebuild_hflags_a64(env, current_el, fp_el, mmu_idx);
73
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
74
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
75
}
76
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
77
*pc = env->regs[15];
78
79
if (arm_feature(env, ARM_FEATURE_M)) {
80
- flags = rebuild_hflags_m32(env, fp_el, mmu_idx);
81
-
82
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
83
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
84
!= env->v7m.secure) {
85
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
86
flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
87
}
88
} else {
89
- flags = rebuild_hflags_a32(env, fp_el, mmu_idx);
90
-
91
/*
92
* Note that XSCALE_CPAR shares bits with VECSTRIDE.
93
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
94
--
121
--
95
2.20.1
122
2.34.1
96
123
97
124
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Create a function to compute the values of the TBFLAG_ANY bits
4
that will be cached, and are used by A-profile.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-9-richard.henderson@linaro.org
4
Reviewed-by: Fabiano Rosas <farosas@suse.de>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-id: 20230112102436.1913-6-philmd@linaro.org
7
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
8
[PMD: Split patch in multiple tiny steps]
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/helper.c | 20 ++++++++++++--------
12
target/arm/helper.c | 12 ++++++++++++
12
1 file changed, 12 insertions(+), 8 deletions(-)
13
target/arm/sme_helper.c | 12 ------------
14
2 files changed, 12 insertions(+), 12 deletions(-)
13
15
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
18
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
19
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
20
@@ -XXX,XX +XXX,XX @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
19
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
21
if (change & R_SVCR_SM_MASK) {
22
arm_reset_sve_state(env);
23
}
24
+
25
+ /*
26
+ * ResetSMEState.
27
+ *
28
+ * SetPSTATE_ZA zeros on enable and disable. We can zero this only
29
+ * on enable: while disabled, the storage is inaccessible and the
30
+ * value does not matter. We're not saving the storage in vmstate
31
+ * when disabled either.
32
+ */
33
+ if (change & new & R_SVCR_ZA_MASK) {
34
+ memset(env->zarray, 0, sizeof(env->zarray));
35
+ }
20
}
36
}
21
37
22
+static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
38
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
23
+{
39
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
24
+ int flags = 0;
40
index XXXXXXX..XXXXXXX 100644
25
+
41
--- a/target/arm/sme_helper.c
26
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
42
+++ b/target/arm/sme_helper.c
27
+ arm_debug_target_el(env));
43
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
28
+ return flags;
44
return;
29
+}
45
}
30
+
46
aarch64_set_svcr(env, 0, R_SVCR_ZA_MASK);
31
static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
47
-
32
ARMMMUIdx mmu_idx)
48
- /*
33
{
49
- * ResetSMEState.
34
- return rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
50
- *
35
+ uint32_t flags = rebuild_hflags_aprofile(env);
51
- * SetPSTATE_ZA zeros on enable and disable. We can zero this only
36
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
52
- * on enable: while disabled, the storage is inaccessible and the
53
- * value does not matter. We're not saving the storage in vmstate
54
- * when disabled either.
55
- */
56
- if (i) {
57
- memset(env->zarray, 0, sizeof(env->zarray));
58
- }
59
arm_rebuild_hflags(env);
37
}
60
}
38
61
39
static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
40
ARMMMUIdx mmu_idx)
41
{
42
+ uint32_t flags = rebuild_hflags_aprofile(env);
43
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
44
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
45
- uint32_t flags = 0;
46
uint64_t sctlr;
47
int tbii, tbid;
48
49
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
50
}
51
}
52
53
- if (!arm_feature(env, ARM_FEATURE_M)) {
54
- int target_el = arm_debug_target_el(env);
55
-
56
- flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
57
- }
58
-
59
*pflags = flags;
60
*cs_base = 0;
61
}
62
--
62
--
63
2.20.1
63
2.34.1
64
64
65
65
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Begin setting, but not relying upon, env->hflags.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-17-richard.henderson@linaro.org
4
Reviewed-by: Fabiano Rosas <farosas@suse.de>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-id: 20230112102436.1913-7-philmd@linaro.org
7
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
8
[PMD: Split patch in multiple tiny steps]
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
11
---
10
linux-user/syscall.c | 1 +
12
linux-user/aarch64/cpu_loop.c | 8 +-------
11
target/arm/cpu.c | 1 +
13
linux-user/aarch64/signal.c | 3 ---
12
target/arm/helper-a64.c | 3 +++
14
target/arm/helper.c | 6 +++++-
13
target/arm/helper.c | 2 ++
15
target/arm/sme_helper.c | 8 --------
14
target/arm/machine.c | 1 +
16
4 files changed, 6 insertions(+), 19 deletions(-)
15
target/arm/op_helper.c | 1 +
16
6 files changed, 9 insertions(+)
17
17
18
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
18
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
19
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
20
--- a/linux-user/syscall.c
20
--- a/linux-user/aarch64/cpu_loop.c
21
+++ b/linux-user/syscall.c
21
+++ b/linux-user/aarch64/cpu_loop.c
22
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
22
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
23
aarch64_sve_narrow_vq(env, vq);
23
24
}
24
switch (trapnr) {
25
env->vfp.zcr_el[1] = vq - 1;
25
case EXCP_SWI:
26
+ arm_rebuild_hflags(env);
26
- /*
27
ret = vq * 16;
27
- * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
28
}
28
- * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
29
return ret;
29
- */
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
+ /* On syscall, PSTATE.ZA is preserved, PSTATE.SM is cleared. */
31
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
32
- if (FIELD_EX64(env->svcr, SVCR, SM)) {
33
- arm_rebuild_hflags(env);
34
- }
35
ret = do_syscall(env,
36
env->xregs[8],
37
env->xregs[0],
38
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
31
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.c
40
--- a/linux-user/aarch64/signal.c
33
+++ b/target/arm/cpu.c
41
+++ b/linux-user/aarch64/signal.c
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
42
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
35
43
36
hw_breakpoint_update_all(cpu);
44
/* Invoke the signal handler with both SM and ZA disabled. */
37
hw_watchpoint_update_all(cpu);
45
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
38
+ arm_rebuild_hflags(env);
46
- if (env->svcr) {
39
}
47
- arm_rebuild_hflags(env);
40
48
- }
41
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
49
42
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
50
if (info) {
43
index XXXXXXX..XXXXXXX 100644
51
tswap_siginfo(&frame->info, info);
44
--- a/target/arm/helper-a64.c
45
+++ b/target/arm/helper-a64.c
46
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
47
} else {
48
env->regs[15] = new_pc & ~0x3;
49
}
50
+ helper_rebuild_hflags_a32(env, new_el);
51
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
52
"AArch32 EL%d PC 0x%" PRIx32 "\n",
53
cur_el, new_el, env->regs[15]);
54
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
55
}
56
aarch64_restore_sp(env, new_el);
57
env->pc = new_pc;
58
+ helper_rebuild_hflags_a64(env, new_el);
59
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
60
"AArch64 EL%d PC 0x%" PRIx64 "\n",
61
cur_el, new_el, env->pc);
62
}
63
+
64
/*
65
* Note that cur_el can never be 0. If new_el is 0, then
66
* el0_a64 is return_to_aa64, else el0_a64 is ignored.
67
diff --git a/target/arm/helper.c b/target/arm/helper.c
52
diff --git a/target/arm/helper.c b/target/arm/helper.c
68
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/helper.c
54
--- a/target/arm/helper.c
70
+++ b/target/arm/helper.c
55
+++ b/target/arm/helper.c
71
@@ -XXX,XX +XXX,XX @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
56
@@ -XXX,XX +XXX,XX @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
72
env->regs[14] = env->regs[15] + offset;
57
{
58
uint64_t change = (env->svcr ^ new) & mask;
59
60
+ if (change == 0) {
61
+ return;
62
+ }
63
env->svcr ^= change;
64
65
if (change & R_SVCR_SM_MASK) {
66
@@ -XXX,XX +XXX,XX @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
67
if (change & new & R_SVCR_ZA_MASK) {
68
memset(env->zarray, 0, sizeof(env->zarray));
73
}
69
}
74
env->regs[15] = newpc;
70
+
75
+ arm_rebuild_hflags(env);
71
+ arm_rebuild_hflags(env);
76
}
72
}
77
73
78
static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
74
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
79
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
75
@@ -XXX,XX +XXX,XX @@ static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
80
pstate_write(env, PSTATE_DAIF | new_mode);
76
helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
81
env->aarch64 = 1;
77
helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
82
aarch64_restore_sp(env, new_el);
78
aarch64_set_svcr(env, value, -1);
83
+ helper_rebuild_hflags_a64(env, new_el);
79
- arm_rebuild_hflags(env);
84
80
}
85
env->pc = addr;
81
86
82
static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
87
diff --git a/target/arm/machine.c b/target/arm/machine.c
83
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
88
index XXXXXXX..XXXXXXX 100644
84
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/machine.c
85
--- a/target/arm/sme_helper.c
90
+++ b/target/arm/machine.c
86
+++ b/target/arm/sme_helper.c
91
@@ -XXX,XX +XXX,XX @@ static int cpu_post_load(void *opaque, int version_id)
87
@@ -XXX,XX +XXX,XX @@
92
if (!kvm_enabled()) {
88
93
pmu_op_finish(&cpu->env);
89
void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
94
}
90
{
95
+ arm_rebuild_hflags(&cpu->env);
91
- if (i == FIELD_EX64(env->svcr, SVCR, SM)) {
96
92
- return;
97
return 0;
93
- }
94
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
95
- arm_rebuild_hflags(env);
98
}
96
}
99
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
97
100
index XXXXXXX..XXXXXXX 100644
98
void helper_set_pstate_za(CPUARMState *env, uint32_t i)
101
--- a/target/arm/op_helper.c
99
{
102
+++ b/target/arm/op_helper.c
100
- if (i == FIELD_EX64(env->svcr, SVCR, ZA)) {
103
@@ -XXX,XX +XXX,XX @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
101
- return;
104
* state. Do the masking now.
102
- }
105
*/
103
aarch64_set_svcr(env, 0, R_SVCR_ZA_MASK);
106
env->regs[15] &= (env->thumb ? ~1 : ~3);
104
- arm_rebuild_hflags(env);
107
+ arm_rebuild_hflags(env);
105
}
108
106
109
qemu_mutex_lock_iothread();
107
void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
110
arm_call_el_change_hook(env_archcpu(env));
111
--
108
--
112
2.20.1
109
2.34.1
113
110
114
111
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Set TBFLAG_ANY.BE_DATA in rebuild_hflags_common_32 and
3
Unify the two helper_set_pstate_{sm,za} in this function.
4
rebuild_hflags_a64 instead of rebuild_hflags_common, where we do
4
Do not call helper_* functions from svcr_write.
5
not need to re-test is_a64() nor re-compute the various inputs.
6
5
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20191023150057.25731-5-richard.henderson@linaro.org
7
Reviewed-by: Fabiano Rosas <farosas@suse.de>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-id: 20230112102436.1913-8-philmd@linaro.org
10
Message-Id: <20230112004322.161330-1-richard.henderson@linaro.org>
11
[PMD: Split patch in multiple tiny steps]
12
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
target/arm/cpu.h | 49 +++++++++++++++++++++++++++------------------
15
target/arm/helper-sme.h | 3 +--
13
target/arm/helper.c | 16 +++++++++++----
16
target/arm/helper.c | 2 --
14
2 files changed, 42 insertions(+), 23 deletions(-)
17
target/arm/sme_helper.c | 9 ++-------
18
target/arm/translate-a64.c | 10 ++--------
19
4 files changed, 5 insertions(+), 19 deletions(-)
15
20
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
17
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
23
--- a/target/arm/helper-sme.h
19
+++ b/target/arm/cpu.h
24
+++ b/target/arm/helper-sme.h
20
@@ -XXX,XX +XXX,XX @@ static inline uint64_t arm_sctlr(CPUARMState *env, int el)
25
@@ -XXX,XX +XXX,XX @@
21
}
26
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
}
27
*/
23
28
24
+static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
29
-DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
25
+ bool sctlr_b)
30
-DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
26
+{
31
+DEF_HELPER_FLAGS_3(set_svcr, TCG_CALL_NO_RWG, void, env, i32, i32)
27
+#ifdef CONFIG_USER_ONLY
32
28
+ /*
33
DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
29
+ * In system mode, BE32 is modelled in line with the
30
+ * architecture (as word-invariant big-endianness), where loads
31
+ * and stores are done little endian but from addresses which
32
+ * are adjusted by XORing with the appropriate constant. So the
33
+ * endianness to use for the raw data access is not affected by
34
+ * SCTLR.B.
35
+ * In user mode, however, we model BE32 as byte-invariant
36
+ * big-endianness (because user-only code cannot tell the
37
+ * difference), and so we need to use a data access endianness
38
+ * that depends on SCTLR.B.
39
+ */
40
+ if (sctlr_b) {
41
+ return true;
42
+ }
43
+#endif
44
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
45
+ return env->uncached_cpsr & CPSR_E;
46
+}
47
+
48
+static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
49
+{
50
+ return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
51
+}
52
53
/* Return true if the processor is in big-endian mode. */
54
static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
55
{
56
- /* In 32bit endianness is determined by looking at CPSR's E bit */
57
if (!is_a64(env)) {
58
- return
59
-#ifdef CONFIG_USER_ONLY
60
- /* In system mode, BE32 is modelled in line with the
61
- * architecture (as word-invariant big-endianness), where loads
62
- * and stores are done little endian but from addresses which
63
- * are adjusted by XORing with the appropriate constant. So the
64
- * endianness to use for the raw data access is not affected by
65
- * SCTLR.B.
66
- * In user mode, however, we model BE32 as byte-invariant
67
- * big-endianness (because user-only code cannot tell the
68
- * difference), and so we need to use a data access endianness
69
- * that depends on SCTLR.B.
70
- */
71
- arm_sctlr_b(env) ||
72
-#endif
73
- ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
74
+ return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
75
} else {
76
int cur_el = arm_current_el(env);
77
uint64_t sctlr = arm_sctlr(env, cur_el);
78
-
79
- return (sctlr & (cur_el ? SCTLR_EE : SCTLR_E0E)) != 0;
80
+ return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
81
}
82
}
83
34
84
diff --git a/target/arm/helper.c b/target/arm/helper.c
35
diff --git a/target/arm/helper.c b/target/arm/helper.c
85
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/helper.c
37
--- a/target/arm/helper.c
87
+++ b/target/arm/helper.c
38
+++ b/target/arm/helper.c
88
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
39
@@ -XXX,XX +XXX,XX @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
89
flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
40
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
90
arm_to_core_mmu_idx(mmu_idx));
41
uint64_t value)
91
92
- if (arm_cpu_data_is_big_endian(env)) {
93
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
94
- }
95
if (arm_singlestep_active(env)) {
96
flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
97
}
98
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
99
static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
100
ARMMMUIdx mmu_idx, uint32_t flags)
101
{
42
{
102
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
43
- helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
103
+ bool sctlr_b = arm_sctlr_b(env);
44
- helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
104
+
45
aarch64_set_svcr(env, value, -1);
105
+ if (sctlr_b) {
46
}
106
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
47
107
+ }
48
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
108
+ if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
49
index XXXXXXX..XXXXXXX 100644
109
+ flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
50
--- a/target/arm/sme_helper.c
110
+ }
51
+++ b/target/arm/sme_helper.c
111
flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
52
@@ -XXX,XX +XXX,XX @@
112
53
#include "vec_internal.h"
113
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
54
#include "sve_ldst_internal.h"
114
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
55
115
56
-void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
116
sctlr = arm_sctlr(env, el);
57
+void helper_set_svcr(CPUARMState *env, uint32_t val, uint32_t mask)
117
58
{
118
+ if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
59
- aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
119
+ flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
60
-}
120
+ }
61
-
121
+
62
-void helper_set_pstate_za(CPUARMState *env, uint32_t i)
122
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
63
-{
123
/*
64
- aarch64_set_svcr(env, 0, R_SVCR_ZA_MASK);
124
* In order to save space in flags, we record only whether
65
+ aarch64_set_svcr(env, val, mask);
66
}
67
68
void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
69
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-a64.c
72
+++ b/target/arm/translate-a64.c
73
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
74
75
if ((old ^ new) & msk) {
76
/* At least one bit changes. */
77
- bool i = crm & 1;
78
-
79
- if ((crm & 2) && i != s->pstate_sm) {
80
- gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
81
- }
82
- if ((crm & 4) && i != s->pstate_za) {
83
- gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
84
- }
85
+ gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
86
+ tcg_constant_i32(msk));
87
} else {
88
s->base.is_jmp = DISAS_NEXT;
89
}
125
--
90
--
126
2.20.1
91
2.34.1
127
92
128
93
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Create a function to compute the values of the TBFLAG_A32 bits
4
that will be cached, and are used by M-profile.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 45 ++++++++++++++++++++++++++++++---------------
12
1 file changed, 30 insertions(+), 15 deletions(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
19
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
20
}
21
22
+static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
23
+ ARMMMUIdx mmu_idx)
24
+{
25
+ uint32_t flags = 0;
26
+
27
+ if (arm_v7m_is_handler_mode(env)) {
28
+ flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
29
+ }
30
+
31
+ /*
32
+ * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
33
+ * is suppressing them because the requested execution priority
34
+ * is less than 0.
35
+ */
36
+ if (arm_feature(env, ARM_FEATURE_V8) &&
37
+ !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
38
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
39
+ flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
40
+ }
41
+
42
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
43
+}
44
+
45
static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
46
ARMMMUIdx mmu_idx)
47
{
48
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
49
}
50
} else {
51
*pc = env->regs[15];
52
- flags = rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
53
+
54
+ if (arm_feature(env, ARM_FEATURE_M)) {
55
+ flags = rebuild_hflags_m32(env, fp_el, mmu_idx);
56
+ } else {
57
+ flags = rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
58
+ }
59
+
60
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
61
flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
62
flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
63
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
64
}
65
}
66
67
- if (arm_v7m_is_handler_mode(env)) {
68
- flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
69
- }
70
-
71
- /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
72
- * suppressing them because the requested execution priority is less than 0.
73
- */
74
- if (arm_feature(env, ARM_FEATURE_V8) &&
75
- arm_feature(env, ARM_FEATURE_M) &&
76
- !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
77
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
78
- flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
79
- }
80
-
81
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
82
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
83
flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
84
--
85
2.20.1
86
87
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Hoist the computation of some TBFLAG_A32 bits that only apply to
3
Conversion to probe_access_full missed applying the page offset.
4
M-profile under a single test for ARM_FEATURE_M.
5
4
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Fixes: b8967ddf ("target/arm: Use probe_access_full for MTE")
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1416
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-7-richard.henderson@linaro.org
8
Message-id: 20230114031213.2970349-1-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/helper.c | 49 +++++++++++++++++++++------------------------
12
target/arm/mte_helper.c | 2 +-
12
1 file changed, 23 insertions(+), 26 deletions(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
--- a/target/arm/mte_helper.c
17
+++ b/target/arm/helper.c
18
+++ b/target/arm/mte_helper.c
18
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
19
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
19
20
* Remember these values across the second lookup below,
20
if (arm_feature(env, ARM_FEATURE_M)) {
21
* which may invalidate this pointer via tlb resize.
21
flags = rebuild_hflags_m32(env, fp_el, mmu_idx);
22
*/
22
+
23
- ptr_paddr = full->phys_addr;
23
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
24
+ ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
24
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
25
attrs = full->attrs;
25
+ != env->v7m.secure) {
26
full = NULL;
26
+ flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
27
+ }
28
+
29
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
30
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
31
+ (env->v7m.secure &&
32
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
33
+ /*
34
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
35
+ * active FP context; we must create a new FP context before
36
+ * executing any FP insn.
37
+ */
38
+ flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
39
+ }
40
+
41
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
42
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
43
+ flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
44
+ }
45
} else {
46
flags = rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
47
}
48
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
49
}
50
}
51
52
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
53
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
54
- flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
55
- }
56
-
57
- if (arm_feature(env, ARM_FEATURE_M) &&
58
- (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
59
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
60
- (env->v7m.secure &&
61
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
62
- /*
63
- * ASPEN is set, but FPCA/SFPA indicate that there is no active
64
- * FP context; we must create a new FP context before executing
65
- * any FP insn.
66
- */
67
- flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
68
- }
69
-
70
- if (arm_feature(env, ARM_FEATURE_M)) {
71
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
72
-
73
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
74
- flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
75
- }
76
- }
77
-
78
if (!arm_feature(env, ARM_FEATURE_M)) {
79
int target_el = arm_debug_target_el(env);
80
27
81
--
28
--
82
2.20.1
29
2.34.1
83
84
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Currently a trivial wrapper for rebuild_hflags_common_32.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-8-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper.c | 8 +++++++-
11
1 file changed, 7 insertions(+), 1 deletion(-)
12
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
18
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
19
}
20
21
+static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
22
+ ARMMMUIdx mmu_idx)
23
+{
24
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
25
+}
26
+
27
static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
28
ARMMMUIdx mmu_idx)
29
{
30
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
31
flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
32
}
33
} else {
34
- flags = rebuild_hflags_common_32(env, fp_el, mmu_idx, 0);
35
+ flags = rebuild_hflags_a32(env, fp_el, mmu_idx);
36
}
37
38
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
39
--
40
2.20.1
41
42
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
We do not need to compute any of these values for M-profile.
4
Further, XSCALE_CPAR overlaps VECSTRIDE so obviously the two
5
sets must be mutually exclusive.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20191023150057.25731-10-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 21 ++++++++++++++-------
13
1 file changed, 14 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
20
}
21
} else {
22
flags = rebuild_hflags_a32(env, fp_el, mmu_idx);
23
+
24
+ /*
25
+ * Note that XSCALE_CPAR shares bits with VECSTRIDE.
26
+ * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
27
+ */
28
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
29
+ flags = FIELD_DP32(flags, TBFLAG_A32,
30
+ XSCALE_CPAR, env->cp15.c15_cpar);
31
+ } else {
32
+ flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
33
+ env->vfp.vec_len);
34
+ flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
35
+ env->vfp.vec_stride);
36
+ }
37
}
38
39
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
40
- flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
41
- flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
42
flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
43
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
44
|| arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
45
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
46
}
47
- /* Note that XSCALE_CPAR shares bits with VECSTRIDE */
48
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
49
- flags = FIELD_DP32(flags, TBFLAG_A32,
50
- XSCALE_CPAR, env->cp15.c15_cpar);
51
- }
52
}
53
54
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
55
--
56
2.20.1
57
58
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Hoist the variable load for PSTATE into the existing test vs is_a64.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-11-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper.c | 20 ++++++++------------
11
1 file changed, 8 insertions(+), 12 deletions(-)
12
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
18
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
19
int current_el = arm_current_el(env);
20
int fp_el = fp_exception_el(env, current_el);
21
- uint32_t flags;
22
+ uint32_t flags, pstate_for_ss;
23
24
if (is_a64(env)) {
25
*pc = env->pc;
26
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
27
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
28
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
29
}
30
+ pstate_for_ss = env->pstate;
31
} else {
32
*pc = env->regs[15];
33
34
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
35
|| arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
36
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
37
}
38
+ pstate_for_ss = env->uncached_cpsr;
39
}
40
41
- /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
42
+ /*
43
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
44
* states defined in the ARM ARM for software singlestep:
45
* SS_ACTIVE PSTATE.SS State
46
* 0 x Inactive (the TB flag for SS is always 0)
47
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
48
* 1 1 Active-not-pending
49
* SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
50
*/
51
- if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE)) {
52
- if (is_a64(env)) {
53
- if (env->pstate & PSTATE_SS) {
54
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
55
- }
56
- } else {
57
- if (env->uncached_cpsr & PSTATE_SS) {
58
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
59
- }
60
- }
61
+ if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
62
+ (pstate_for_ss & PSTATE_SS)) {
63
+ flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
64
}
65
66
*pflags = flags;
67
--
68
2.20.1
69
70
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Avoid calling arm_current_el() twice.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-14-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/internals.h | 9 +++++++++
12
target/arm/helper.c | 12 +++++++-----
13
2 files changed, 16 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/internals.h
18
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_virq(ARMCPU *cpu);
20
*/
21
void arm_cpu_update_vfiq(ARMCPU *cpu);
22
23
+/**
24
+ * arm_mmu_idx_el:
25
+ * @env: The cpu environment
26
+ * @el: The EL to use.
27
+ *
28
+ * Return the full ARMMMUIdx for the translation regime for EL.
29
+ */
30
+ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
31
+
32
/**
33
* arm_mmu_idx:
34
* @env: The cpu environment
35
diff --git a/target/arm/helper.c b/target/arm/helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper.c
38
+++ b/target/arm/helper.c
39
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
40
}
41
#endif
42
43
-ARMMMUIdx arm_mmu_idx(CPUARMState *env)
44
+ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
45
{
46
- int el;
47
-
48
if (arm_feature(env, ARM_FEATURE_M)) {
49
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
50
}
51
52
- el = arm_current_el(env);
53
if (el < 2 && arm_is_secure_below_el3(env)) {
54
return ARMMMUIdx_S1SE0 + el;
55
} else {
56
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
57
}
58
}
59
60
+ARMMMUIdx arm_mmu_idx(CPUARMState *env)
61
+{
62
+ return arm_mmu_idx_el(env, arm_current_el(env));
63
+}
64
+
65
int cpu_mmu_index(CPUARMState *env, bool ifetch)
66
{
67
return arm_to_core_mmu_idx(arm_mmu_idx(env));
68
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_internal(CPUARMState *env)
69
{
70
int el = arm_current_el(env);
71
int fp_el = fp_exception_el(env, el);
72
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
73
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
74
75
if (is_a64(env)) {
76
return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
By performing this store early, we avoid having to save and restore
4
the register holding the address around any function calls.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
19
{
20
uint32_t flags, pstate_for_ss;
21
22
+ *cs_base = 0;
23
flags = rebuild_hflags_internal(env);
24
25
if (is_a64(env)) {
26
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
27
}
28
29
*pflags = flags;
30
- *cs_base = 0;
31
}
32
33
#ifdef TARGET_AARCH64
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Continue setting, but not relying upon, env->hflags.
3
During the conversion, the test against get_phys_addr_lpae got inverted,
4
meaning that successful translations went to the 'failed' label.
4
5
6
Cc: qemu-stable@nongnu.org
7
Fixes: f3639a64f60 ("target/arm: Use softmmu tlbs for page table walking")
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1417
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20191023150057.25731-23-richard.henderson@linaro.org
10
Message-id: 20230114054605.2977022-1-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
13
---
10
linux-user/aarch64/cpu_loop.c | 1 +
14
target/arm/ptw.c | 4 ++--
11
1 file changed, 1 insertion(+)
15
1 file changed, 2 insertions(+), 2 deletions(-)
12
16
13
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
17
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/aarch64/cpu_loop.c
19
--- a/target/arm/ptw.c
16
+++ b/linux-user/aarch64/cpu_loop.c
20
+++ b/target/arm/ptw.c
17
@@ -XXX,XX +XXX,XX @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
21
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
18
for (i = 1; i < 4; ++i) {
22
};
19
env->cp15.sctlr_el[i] |= SCTLR_EE;
23
GetPhysAddrResult s2 = { };
20
}
24
21
+ arm_rebuild_hflags(env);
25
- if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
22
#endif
26
- false, &s2, fi)) {
23
27
+ if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
24
if (cpu_isar_feature(aa64_pauth, cpu)) {
28
+ false, &s2, fi)) {
29
goto fail;
30
}
31
ptw->out_phys = s2.f.phys_addr;
25
--
32
--
26
2.20.1
33
2.34.1
27
28
diff view generated by jsdifflib
1
Switch the grlib_gptimer code away from bottom-half based ptimers to
1
In v7m_exception_taken(), for v8M we set the EXC_RETURN.ES bit if
2
the new transaction-based ptimer API. This just requires adding
2
either the exception targets Secure or if the CPU doesn't implement
3
begin/commit calls around the various places that modify the ptimer
3
the Security Extension. This is incorrect: the v8M Arm ARM specifies
4
state, and using the new ptimer_init() function to create the timer.
4
that the ES bit should be RES0 if the Security Extension is not
5
implemented, and the pseudocode agrees.
5
6
7
Remove the incorrect condition, so that we leave the ES bit 0
8
if the Security Extension isn't implemented.
9
10
This doesn't have any guest-visible effects for our current set of
11
emulated CPUs, because all our v8M CPUs implement the Security
12
Extension; but it's worth fixing in case we add a v8M CPU without
13
the extension in future.
14
15
Reported-by: Igor Kotrasinski <i.kotrasinsk@samsung.com>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20191021134357.14266-3-peter.maydell@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
18
---
12
hw/timer/grlib_gptimer.c | 28 ++++++++++++++++++++++++----
19
target/arm/m_helper.c | 2 +-
13
1 file changed, 24 insertions(+), 4 deletions(-)
20
1 file changed, 1 insertion(+), 1 deletion(-)
14
21
15
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
22
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
16
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/timer/grlib_gptimer.c
24
--- a/target/arm/m_helper.c
18
+++ b/hw/timer/grlib_gptimer.c
25
+++ b/target/arm/m_helper.c
19
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
20
#include "hw/irq.h"
27
}
21
#include "hw/ptimer.h"
28
22
#include "hw/qdev-properties.h"
29
lr &= ~R_V7M_EXCRET_ES_MASK;
23
-#include "qemu/main-loop.h"
30
- if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
24
#include "qemu/module.h"
31
+ if (targets_secure) {
25
32
lr |= R_V7M_EXCRET_ES_MASK;
26
#include "trace.h"
33
}
27
@@ -XXX,XX +XXX,XX @@ typedef struct GPTimer GPTimer;
34
lr &= ~R_V7M_EXCRET_SPSEL_MASK;
28
typedef struct GPTimerUnit GPTimerUnit;
29
30
struct GPTimer {
31
- QEMUBH *bh;
32
struct ptimer_state *ptimer;
33
34
qemu_irq irq;
35
@@ -XXX,XX +XXX,XX @@ struct GPTimerUnit {
36
uint32_t config;
37
};
38
39
+static void grlib_gptimer_tx_begin(GPTimer *timer)
40
+{
41
+ ptimer_transaction_begin(timer->ptimer);
42
+}
43
+
44
+static void grlib_gptimer_tx_commit(GPTimer *timer)
45
+{
46
+ ptimer_transaction_commit(timer->ptimer);
47
+}
48
+
49
+/* Must be called within grlib_gptimer_tx_begin/commit block */
50
static void grlib_gptimer_enable(GPTimer *timer)
51
{
52
assert(timer != NULL);
53
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_enable(GPTimer *timer)
54
ptimer_run(timer->ptimer, 1);
55
}
56
57
+/* Must be called within grlib_gptimer_tx_begin/commit block */
58
static void grlib_gptimer_restart(GPTimer *timer)
59
{
60
assert(timer != NULL);
61
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_set_scaler(GPTimerUnit *unit, uint32_t scaler)
62
trace_grlib_gptimer_set_scaler(scaler, value);
63
64
for (i = 0; i < unit->nr_timers; i++) {
65
+ ptimer_transaction_begin(unit->timers[i].ptimer);
66
ptimer_set_freq(unit->timers[i].ptimer, value);
67
+ ptimer_transaction_commit(unit->timers[i].ptimer);
68
}
69
}
70
71
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_write(void *opaque, hwaddr addr,
72
switch (timer_addr) {
73
case COUNTER_OFFSET:
74
trace_grlib_gptimer_writel(id, addr, value);
75
+ grlib_gptimer_tx_begin(&unit->timers[id]);
76
unit->timers[id].counter = value;
77
grlib_gptimer_enable(&unit->timers[id]);
78
+ grlib_gptimer_tx_commit(&unit->timers[id]);
79
return;
80
81
case COUNTER_RELOAD_OFFSET:
82
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_write(void *opaque, hwaddr addr,
83
/* gptimer_restart calls gptimer_enable, so if "enable" and "load"
84
bits are present, we just have to call restart. */
85
86
+ grlib_gptimer_tx_begin(&unit->timers[id]);
87
if (value & GPTIMER_LOAD) {
88
grlib_gptimer_restart(&unit->timers[id]);
89
} else if (value & GPTIMER_ENABLE) {
90
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_write(void *opaque, hwaddr addr,
91
value &= ~(GPTIMER_LOAD & GPTIMER_DEBUG_HALT);
92
93
unit->timers[id].config = value;
94
+ grlib_gptimer_tx_commit(&unit->timers[id]);
95
return;
96
97
default:
98
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_reset(DeviceState *d)
99
timer->counter = 0;
100
timer->reload = 0;
101
timer->config = 0;
102
+ ptimer_transaction_begin(timer->ptimer);
103
ptimer_stop(timer->ptimer);
104
ptimer_set_count(timer->ptimer, 0);
105
ptimer_set_freq(timer->ptimer, unit->freq_hz);
106
+ ptimer_transaction_commit(timer->ptimer);
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp)
111
GPTimer *timer = &unit->timers[i];
112
113
timer->unit = unit;
114
- timer->bh = qemu_bh_new(grlib_gptimer_hit, timer);
115
- timer->ptimer = ptimer_init_with_bh(timer->bh, PTIMER_POLICY_DEFAULT);
116
+ timer->ptimer = ptimer_init(grlib_gptimer_hit, timer,
117
+ PTIMER_POLICY_DEFAULT);
118
timer->id = i;
119
120
/* One IRQ line for each timer */
121
sysbus_init_irq(sbd, &timer->irq);
122
123
+ ptimer_transaction_begin(timer->ptimer);
124
ptimer_set_freq(timer->ptimer, unit->freq_hz);
125
+ ptimer_transaction_commit(timer->ptimer);
126
}
127
128
memory_region_init_io(&unit->iomem, OBJECT(unit), &grlib_gptimer_ops,
129
--
35
--
130
2.20.1
36
2.34.1
131
132
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Evgeny Iakovlev <eiakovlev@linux.microsoft.com>
2
2
3
Introduce cpu properties to give fine control over SVE vector lengths.
3
The architecture does not define any functionality for the CLAIM tag bits.
4
We introduce a property for each valid length up to the current
4
So we will just keep the raw bits, as per spec.
5
maximum supported, which is 2048-bits. The properties are named, e.g.
6
sve128, sve256, sve384, sve512, ..., where the number is the number of
7
bits. See the updates to docs/arm-cpu-features.rst for a description
8
of the semantics and for example uses.
9
5
10
Note, as sve-max-vq is still present and we'd like to be able to
6
Signed-off-by: Evgeny Iakovlev <eiakovlev@linux.microsoft.com>
11
support qmp_query_cpu_model_expansion with guests launched with e.g.
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
-cpu max,sve-max-vq=8 on their command lines, then we do allow
13
sve-max-vq and sve<N> properties to be provided at the same time, but
14
this is not recommended, and is why sve-max-vq is not mentioned in the
15
document. If sve-max-vq is provided then it enables all lengths smaller
16
than and including the max and disables all lengths larger. It also has
17
the side-effect that no larger lengths may be enabled and that the max
18
itself cannot be disabled. Smaller non-power-of-two lengths may,
19
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
20
guest the vector lengths 128, 256, and 512 bits.
21
22
This patch has been co-authored with Richard Henderson, who reworked
23
the target/arm/cpu64.c changes in order to push all the validation and
24
auto-enabling/disabling steps into the finalizer, resulting in a nice
25
LOC reduction.
26
27
Signed-off-by: Andrew Jones <drjones@redhat.com>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Reviewed-by: Eric Auger <eric.auger@redhat.com>
9
Message-id: 20230120155929.32384-2-eiakovlev@linux.microsoft.com
30
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
31
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
32
Message-id: 20191024121808.9612-5-drjones@redhat.com
33
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
---
11
---
35
include/qemu/bitops.h | 1 +
12
target/arm/cpu.h | 1 +
36
target/arm/cpu.h | 19 ++++
13
target/arm/debug_helper.c | 33 +++++++++++++++++++++++++++++++++
37
target/arm/cpu.c | 19 ++++
14
2 files changed, 34 insertions(+)
38
target/arm/cpu64.c | 192 ++++++++++++++++++++++++++++++++++++-
39
target/arm/helper.c | 10 +-
40
target/arm/monitor.c | 12 +++
41
tests/arm-cpu-features.c | 194 ++++++++++++++++++++++++++++++++++++++
42
docs/arm-cpu-features.rst | 168 +++++++++++++++++++++++++++++++--
43
8 files changed, 606 insertions(+), 9 deletions(-)
44
15
45
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/include/qemu/bitops.h
48
+++ b/include/qemu/bitops.h
49
@@ -XXX,XX +XXX,XX @@
50
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
51
52
#define BIT(nr) (1UL << (nr))
53
+#define BIT_ULL(nr) (1ULL << (nr))
54
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
55
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
56
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
57
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
58
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/cpu.h
18
--- a/target/arm/cpu.h
60
+++ b/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
61
@@ -XXX,XX +XXX,XX @@ typedef struct {
20
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
62
21
uint64_t dbgbcr[16]; /* breakpoint control registers */
63
#ifdef TARGET_AARCH64
22
uint64_t dbgwvr[16]; /* watchpoint value registers */
64
# define ARM_MAX_VQ 16
23
uint64_t dbgwcr[16]; /* watchpoint control registers */
65
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
24
+ uint64_t dbgclaim; /* DBGCLAIM bits */
66
+uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq);
25
uint64_t mdscr_el1;
67
#else
26
uint64_t oslsr_el1; /* OS Lock Status */
68
# define ARM_MAX_VQ 1
27
uint64_t osdlr_el1; /* OS DoubleLock status */
69
+static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
28
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
70
+static inline uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq)
29
index XXXXXXX..XXXXXXX 100644
71
+{ return 0; }
30
--- a/target/arm/debug_helper.c
72
#endif
31
+++ b/target/arm/debug_helper.c
73
32
@@ -XXX,XX +XXX,XX @@ static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
74
typedef struct ARMVectorReg {
33
}
75
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
76
77
/* Used to set the maximum vector length the cpu will support. */
78
uint32_t sve_max_vq;
79
+
80
+ /*
81
+ * In sve_vq_map each set bit is a supported vector length of
82
+ * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
83
+ * length in quadwords.
84
+ *
85
+ * While processing properties during initialization, corresponding
86
+ * sve_vq_init bits are set for bits in sve_vq_map that have been
87
+ * set by properties.
88
+ */
89
+ DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ);
90
+ DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ);
91
};
92
93
void arm_cpu_post_init(Object *obj);
94
@@ -XXX,XX +XXX,XX @@ static inline int arm_feature(CPUARMState *env, int feature)
95
return (env->features & (1ULL << feature)) != 0;
96
}
34
}
97
35
98
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
36
+static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri,
99
+
37
+ uint64_t value)
100
#if !defined(CONFIG_USER_ONLY)
101
/* Return true if exception levels below EL3 are in secure state,
102
* or would be following an exception return to that level.
103
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/cpu.c
106
+++ b/target/arm/cpu.c
107
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_finalizefn(Object *obj)
108
#endif
109
}
110
111
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
112
+{
38
+{
113
+ Error *local_err = NULL;
39
+ env->cp15.dbgclaim |= (value & 0xFF);
114
+
115
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
116
+ arm_cpu_sve_finalize(cpu, &local_err);
117
+ if (local_err != NULL) {
118
+ error_propagate(errp, local_err);
119
+ return;
120
+ }
121
+ }
122
+}
40
+}
123
+
41
+
124
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
42
+static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri)
125
{
126
CPUState *cs = CPU(dev);
127
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
128
return;
129
}
130
131
+ arm_cpu_finalize_features(cpu, &local_err);
132
+ if (local_err != NULL) {
133
+ error_propagate(errp, local_err);
134
+ return;
135
+ }
136
+
137
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
138
cpu->has_vfp != cpu->has_neon) {
139
/*
140
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/cpu64.c
143
+++ b/target/arm/cpu64.c
144
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
145
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
146
}
147
148
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
149
+{
43
+{
150
+ /*
44
+ /* CLAIM bits are RAO */
151
+ * If any vector lengths are explicitly enabled with sve<N> properties,
45
+ return 0xFF;
152
+ * then all other lengths are implicitly disabled. If sve-max-vq is
153
+ * specified then it is the same as explicitly enabling all lengths
154
+ * up to and including the specified maximum, which means all larger
155
+ * lengths will be implicitly disabled. If no sve<N> properties
156
+ * are enabled and sve-max-vq is not specified, then all lengths not
157
+ * explicitly disabled will be enabled. Additionally, all power-of-two
158
+ * vector lengths less than the maximum enabled length will be
159
+ * automatically enabled and all vector lengths larger than the largest
160
+ * disabled power-of-two vector length will be automatically disabled.
161
+ * Errors are generated if the user provided input that interferes with
162
+ * any of the above. Finally, if SVE is not disabled, then at least one
163
+ * vector length must be enabled.
164
+ */
165
+ DECLARE_BITMAP(tmp, ARM_MAX_VQ);
166
+ uint32_t vq, max_vq = 0;
167
+
168
+ /*
169
+ * Process explicit sve<N> properties.
170
+ * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
171
+ * Check first for any sve<N> enabled.
172
+ */
173
+ if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
174
+ max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
175
+
176
+ if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
177
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
178
+ error_append_hint(errp, "sve%d is larger than the maximum vector "
179
+ "length, sve-max-vq=%d (%d bits)\n",
180
+ max_vq * 128, cpu->sve_max_vq,
181
+ cpu->sve_max_vq * 128);
182
+ return;
183
+ }
184
+
185
+ /* Propagate enabled bits down through required powers-of-two. */
186
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
187
+ if (!test_bit(vq - 1, cpu->sve_vq_init)) {
188
+ set_bit(vq - 1, cpu->sve_vq_map);
189
+ }
190
+ }
191
+ } else if (cpu->sve_max_vq == 0) {
192
+ /*
193
+ * No explicit bits enabled, and no implicit bits from sve-max-vq.
194
+ */
195
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
196
+ /* SVE is disabled and so are all vector lengths. Good. */
197
+ return;
198
+ }
199
+
200
+ /* Disabling a power-of-two disables all larger lengths. */
201
+ if (test_bit(0, cpu->sve_vq_init)) {
202
+ error_setg(errp, "cannot disable sve128");
203
+ error_append_hint(errp, "Disabling sve128 results in all vector "
204
+ "lengths being disabled.\n");
205
+ error_append_hint(errp, "With SVE enabled, at least one vector "
206
+ "length must be enabled.\n");
207
+ return;
208
+ }
209
+ for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
210
+ if (test_bit(vq - 1, cpu->sve_vq_init)) {
211
+ break;
212
+ }
213
+ }
214
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
215
+
216
+ bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
217
+ max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
218
+ }
219
+
220
+ /*
221
+ * Process the sve-max-vq property.
222
+ * Note that we know from the above that no bit above
223
+ * sve-max-vq is currently set.
224
+ */
225
+ if (cpu->sve_max_vq != 0) {
226
+ max_vq = cpu->sve_max_vq;
227
+
228
+ if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
229
+ test_bit(max_vq - 1, cpu->sve_vq_init)) {
230
+ error_setg(errp, "cannot disable sve%d", max_vq * 128);
231
+ error_append_hint(errp, "The maximum vector length must be "
232
+ "enabled, sve-max-vq=%d (%d bits)\n",
233
+ max_vq, max_vq * 128);
234
+ return;
235
+ }
236
+
237
+ /* Set all bits not explicitly set within sve-max-vq. */
238
+ bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
239
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
240
+ }
241
+
242
+ /*
243
+ * We should know what max-vq is now. Also, as we're done
244
+ * manipulating sve-vq-map, we ensure any bits above max-vq
245
+ * are clear, just in case anybody looks.
246
+ */
247
+ assert(max_vq != 0);
248
+ bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
249
+
250
+ /* Ensure all required powers-of-two are enabled. */
251
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
252
+ if (!test_bit(vq - 1, cpu->sve_vq_map)) {
253
+ error_setg(errp, "cannot disable sve%d", vq * 128);
254
+ error_append_hint(errp, "sve%d is required as it "
255
+ "is a power-of-two length smaller than "
256
+ "the maximum, sve%d\n",
257
+ vq * 128, max_vq * 128);
258
+ return;
259
+ }
260
+ }
261
+
262
+ /*
263
+ * Now that we validated all our vector lengths, the only question
264
+ * left to answer is if we even want SVE at all.
265
+ */
266
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
267
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
268
+ error_append_hint(errp, "SVE must be enabled to enable vector "
269
+ "lengths.\n");
270
+ error_append_hint(errp, "Add sve=on to the CPU property list.\n");
271
+ return;
272
+ }
273
+
274
+ /* From now on sve_max_vq is the actual maximum supported length. */
275
+ cpu->sve_max_vq = max_vq;
276
+}
46
+}
277
+
47
+
278
+uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq)
48
+static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
49
+ uint64_t value)
279
+{
50
+{
280
+ uint32_t bitnum;
51
+ env->cp15.dbgclaim &= ~(value & 0xFF);
281
+
282
+ /*
283
+ * We allow vq == ARM_MAX_VQ + 1 to be input because the caller may want
284
+ * to find the maximum vq enabled, which may be ARM_MAX_VQ, but this
285
+ * function always returns the next smaller than the input.
286
+ */
287
+ assert(vq && vq <= ARM_MAX_VQ + 1);
288
+
289
+ bitnum = find_last_bit(cpu->sve_vq_map, vq - 1);
290
+ return bitnum == vq - 1 ? 0 : bitnum + 1;
291
+}
52
+}
292
+
53
+
293
static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
54
static const ARMCPRegInfo debug_cp_reginfo[] = {
294
void *opaque, Error **errp)
55
/*
295
{
56
* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
296
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
57
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
297
error_propagate(errp, err);
58
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
298
}
59
.access = PL1_RW, .accessfn = access_tda,
299
60
.type = ARM_CP_NOP },
300
+static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
61
+ /*
301
+ void *opaque, Error **errp)
62
+ * Dummy DBGCLAIM registers.
302
+{
63
+ * "The architecture does not define any functionality for the CLAIM tag bits.",
303
+ ARMCPU *cpu = ARM_CPU(obj);
64
+ * so we only keep the raw bits
304
+ uint32_t vq = atoi(&name[3]) / 128;
65
+ */
305
+ bool value;
66
+ { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH,
306
+
67
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6,
307
+ /* All vector lengths are disabled when SVE is off. */
68
+ .type = ARM_CP_ALIAS,
308
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
69
+ .access = PL1_RW, .accessfn = access_tda,
309
+ value = false;
70
+ .writefn = dbgclaimset_write, .readfn = dbgclaimset_read },
310
+ } else {
71
+ { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH,
311
+ value = test_bit(vq - 1, cpu->sve_vq_map);
72
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6,
312
+ }
73
+ .access = PL1_RW, .accessfn = access_tda,
313
+ visit_type_bool(v, name, &value, errp);
74
+ .writefn = dbgclaimclr_write, .raw_writefn = raw_write,
314
+}
75
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) },
315
+
316
+static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
317
+ void *opaque, Error **errp)
318
+{
319
+ ARMCPU *cpu = ARM_CPU(obj);
320
+ uint32_t vq = atoi(&name[3]) / 128;
321
+ Error *err = NULL;
322
+ bool value;
323
+
324
+ visit_type_bool(v, name, &value, &err);
325
+ if (err) {
326
+ error_propagate(errp, err);
327
+ return;
328
+ }
329
+
330
+ if (value) {
331
+ set_bit(vq - 1, cpu->sve_vq_map);
332
+ } else {
333
+ clear_bit(vq - 1, cpu->sve_vq_map);
334
+ }
335
+ set_bit(vq - 1, cpu->sve_vq_init);
336
+}
337
+
338
static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
339
void *opaque, Error **errp)
340
{
341
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
342
static void aarch64_max_initfn(Object *obj)
343
{
344
ARMCPU *cpu = ARM_CPU(obj);
345
+ uint32_t vq;
346
347
if (kvm_enabled()) {
348
kvm_arm_set_cpu_features_from_host(cpu);
349
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
350
cpu->dcz_blocksize = 7; /* 512 bytes */
351
#endif
352
353
- cpu->sve_max_vq = ARM_MAX_VQ;
354
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
355
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
356
object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
357
cpu_arm_set_sve, NULL, NULL, &error_fatal);
358
+
359
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
360
+ char name[8];
361
+ sprintf(name, "sve%d", vq * 128);
362
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
363
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
364
+ }
365
}
366
}
367
368
diff --git a/target/arm/helper.c b/target/arm/helper.c
369
index XXXXXXX..XXXXXXX 100644
370
--- a/target/arm/helper.c
371
+++ b/target/arm/helper.c
372
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
373
return 0;
374
}
375
376
+static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
377
+{
378
+ uint32_t start_vq = (start_len & 0xf) + 1;
379
+
380
+ return arm_cpu_vq_map_next_smaller(cpu, start_vq + 1) - 1;
381
+}
382
+
383
/*
384
* Given that SVE is enabled, return the vector length for EL.
385
*/
386
@@ -XXX,XX +XXX,XX @@ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
387
if (arm_feature(env, ARM_FEATURE_EL3)) {
388
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
389
}
390
- return zcr_len;
391
+
392
+ return sve_zcr_get_valid_len(cpu, zcr_len);
393
}
394
395
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
396
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
397
index XXXXXXX..XXXXXXX 100644
398
--- a/target/arm/monitor.c
399
+++ b/target/arm/monitor.c
400
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
401
return head;
402
}
403
404
+QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
405
+
406
/*
407
* These are cpu model features we want to advertise. The order here
408
* matters as this is the order in which qmp_query_cpu_model_expansion
409
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
410
*/
411
static const char *cpu_model_advertised_features[] = {
412
"aarch64", "pmu", "sve",
413
+ "sve128", "sve256", "sve384", "sve512",
414
+ "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
415
+ "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
416
NULL
417
};
76
};
418
77
419
@@ -XXX,XX +XXX,XX @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
78
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
420
if (!err) {
421
visit_check_struct(visitor, &err);
422
}
423
+ if (!err) {
424
+ arm_cpu_finalize_features(ARM_CPU(obj), &err);
425
+ }
426
visit_end_struct(visitor, NULL);
427
visit_free(visitor);
428
if (err) {
429
@@ -XXX,XX +XXX,XX @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
430
error_propagate(errp, err);
431
return NULL;
432
}
433
+ } else {
434
+ Error *err = NULL;
435
+ arm_cpu_finalize_features(ARM_CPU(obj), &err);
436
+ assert(err == NULL);
437
}
438
439
expansion_info = g_new0(CpuModelExpansionInfo, 1);
440
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
441
index XXXXXXX..XXXXXXX 100644
442
--- a/tests/arm-cpu-features.c
443
+++ b/tests/arm-cpu-features.c
444
@@ -XXX,XX +XXX,XX @@
445
* See the COPYING file in the top-level directory.
446
*/
447
#include "qemu/osdep.h"
448
+#include "qemu/bitops.h"
449
#include "libqtest.h"
450
#include "qapi/qmp/qdict.h"
451
#include "qapi/qmp/qjson.h"
452
453
+/*
454
+ * We expect the SVE max-vq to be 16. Also it must be <= 64
455
+ * for our test code, otherwise 'vls' can't just be a uint64_t.
456
+ */
457
+#define SVE_MAX_VQ 16
458
+
459
#define MACHINE "-machine virt,gic-version=max "
460
#define QUERY_HEAD "{ 'execute': 'query-cpu-model-expansion', " \
461
"'arguments': { 'type': 'full', "
462
@@ -XXX,XX +XXX,XX @@ static void assert_bad_props(QTestState *qts, const char *cpu_type)
463
qobject_unref(resp);
464
}
465
466
+static uint64_t resp_get_sve_vls(QDict *resp)
467
+{
468
+ QDict *props;
469
+ const QDictEntry *e;
470
+ uint64_t vls = 0;
471
+ int n = 0;
472
+
473
+ g_assert(resp);
474
+ g_assert(resp_has_props(resp));
475
+
476
+ props = resp_get_props(resp);
477
+
478
+ for (e = qdict_first(props); e; e = qdict_next(props, e)) {
479
+ if (strlen(e->key) > 3 && !strncmp(e->key, "sve", 3) &&
480
+ g_ascii_isdigit(e->key[3])) {
481
+ char *endptr;
482
+ int bits;
483
+
484
+ bits = g_ascii_strtoll(&e->key[3], &endptr, 10);
485
+ if (!bits || *endptr != '\0') {
486
+ continue;
487
+ }
488
+
489
+ if (qdict_get_bool(props, e->key)) {
490
+ vls |= BIT_ULL((bits / 128) - 1);
491
+ }
492
+ ++n;
493
+ }
494
+ }
495
+
496
+ g_assert(n == SVE_MAX_VQ);
497
+
498
+ return vls;
499
+}
500
+
501
+#define assert_sve_vls(qts, cpu_type, expected_vls, fmt, ...) \
502
+({ \
503
+ QDict *_resp = do_query(qts, cpu_type, fmt, ##__VA_ARGS__); \
504
+ g_assert(_resp); \
505
+ g_assert(resp_has_props(_resp)); \
506
+ g_assert(resp_get_sve_vls(_resp) == expected_vls); \
507
+ qobject_unref(_resp); \
508
+})
509
+
510
+static void sve_tests_default(QTestState *qts, const char *cpu_type)
511
+{
512
+ /*
513
+ * With no sve-max-vq or sve<N> properties on the command line
514
+ * the default is to have all vector lengths enabled. This also
515
+ * tests that 'sve' is 'on' by default.
516
+ */
517
+ assert_sve_vls(qts, cpu_type, BIT_ULL(SVE_MAX_VQ) - 1, NULL);
518
+
519
+ /* With SVE off, all vector lengths should also be off. */
520
+ assert_sve_vls(qts, cpu_type, 0, "{ 'sve': false }");
521
+
522
+ /* With SVE on, we must have at least one vector length enabled. */
523
+ assert_error(qts, cpu_type, "cannot disable sve128", "{ 'sve128': false }");
524
+
525
+ /* Basic enable/disable tests. */
526
+ assert_sve_vls(qts, cpu_type, 0x7, "{ 'sve384': true }");
527
+ assert_sve_vls(qts, cpu_type, ((BIT_ULL(SVE_MAX_VQ) - 1) & ~BIT_ULL(2)),
528
+ "{ 'sve384': false }");
529
+
530
+ /*
531
+ * ---------------------------------------------------------------------
532
+ * power-of-two(vq) all-power- can can
533
+ * of-two(< vq) enable disable
534
+ * ---------------------------------------------------------------------
535
+ * vq < max_vq no MUST* yes yes
536
+ * vq < max_vq yes MUST* yes no
537
+ * ---------------------------------------------------------------------
538
+ * vq == max_vq n/a MUST* yes** yes**
539
+ * ---------------------------------------------------------------------
540
+ * vq > max_vq n/a no no yes
541
+ * vq > max_vq n/a yes yes yes
542
+ * ---------------------------------------------------------------------
543
+ *
544
+ * [*] "MUST" means this requirement must already be satisfied,
545
+ * otherwise 'max_vq' couldn't itself be enabled.
546
+ *
547
+ * [**] Not testable with the QMP interface, only with the command line.
548
+ */
549
+
550
+ /* max_vq := 8 */
551
+ assert_sve_vls(qts, cpu_type, 0x8b, "{ 'sve1024': true }");
552
+
553
+ /* max_vq := 8, vq < max_vq, !power-of-two(vq) */
554
+ assert_sve_vls(qts, cpu_type, 0x8f,
555
+ "{ 'sve1024': true, 'sve384': true }");
556
+ assert_sve_vls(qts, cpu_type, 0x8b,
557
+ "{ 'sve1024': true, 'sve384': false }");
558
+
559
+ /* max_vq := 8, vq < max_vq, power-of-two(vq) */
560
+ assert_sve_vls(qts, cpu_type, 0x8b,
561
+ "{ 'sve1024': true, 'sve256': true }");
562
+ assert_error(qts, cpu_type, "cannot disable sve256",
563
+ "{ 'sve1024': true, 'sve256': false }");
564
+
565
+ /* max_vq := 3, vq > max_vq, !all-power-of-two(< vq) */
566
+ assert_error(qts, cpu_type, "cannot disable sve512",
567
+ "{ 'sve384': true, 'sve512': false, 'sve640': true }");
568
+
569
+ /*
570
+ * We can disable power-of-two vector lengths when all larger lengths
571
+ * are also disabled. We only need to disable the power-of-two length,
572
+ * as all non-enabled larger lengths will then be auto-disabled.
573
+ */
574
+ assert_sve_vls(qts, cpu_type, 0x7, "{ 'sve512': false }");
575
+
576
+ /* max_vq := 3, vq > max_vq, all-power-of-two(< vq) */
577
+ assert_sve_vls(qts, cpu_type, 0x1f,
578
+ "{ 'sve384': true, 'sve512': true, 'sve640': true }");
579
+ assert_sve_vls(qts, cpu_type, 0xf,
580
+ "{ 'sve384': true, 'sve512': true, 'sve640': false }");
581
+}
582
+
583
+static void sve_tests_sve_max_vq_8(const void *data)
584
+{
585
+ QTestState *qts;
586
+
587
+ qts = qtest_init(MACHINE "-cpu max,sve-max-vq=8");
588
+
589
+ assert_sve_vls(qts, "max", BIT_ULL(8) - 1, NULL);
590
+
591
+ /*
592
+ * Disabling the max-vq set by sve-max-vq is not allowed, but
593
+ * of course enabling it is OK.
594
+ */
595
+ assert_error(qts, "max", "cannot disable sve1024", "{ 'sve1024': false }");
596
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve1024': true }");
597
+
598
+ /*
599
+ * Enabling anything larger than max-vq set by sve-max-vq is not
600
+ * allowed, but of course disabling everything larger is OK.
601
+ */
602
+ assert_error(qts, "max", "cannot enable sve1152", "{ 'sve1152': true }");
603
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve1152': false }");
604
+
605
+ /*
606
+ * We can enable/disable non power-of-two lengths smaller than the
607
+ * max-vq set by sve-max-vq, but, while we can enable power-of-two
608
+ * lengths, we can't disable them.
609
+ */
610
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve384': true }");
611
+ assert_sve_vls(qts, "max", 0xfb, "{ 'sve384': false }");
612
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve256': true }");
613
+ assert_error(qts, "max", "cannot disable sve256", "{ 'sve256': false }");
614
+
615
+ qtest_quit(qts);
616
+}
617
+
618
+static void sve_tests_sve_off(const void *data)
619
+{
620
+ QTestState *qts;
621
+
622
+ qts = qtest_init(MACHINE "-cpu max,sve=off");
623
+
624
+ /* SVE is off, so the map should be empty. */
625
+ assert_sve_vls(qts, "max", 0, NULL);
626
+
627
+ /* The map stays empty even if we turn lengths off. */
628
+ assert_sve_vls(qts, "max", 0, "{ 'sve128': false }");
629
+
630
+ /* It's an error to enable lengths when SVE is off. */
631
+ assert_error(qts, "max", "cannot enable sve128", "{ 'sve128': true }");
632
+
633
+ /* With SVE re-enabled we should get all vector lengths enabled. */
634
+ assert_sve_vls(qts, "max", BIT_ULL(SVE_MAX_VQ) - 1, "{ 'sve': true }");
635
+
636
+ /* Or enable SVE with just specific vector lengths. */
637
+ assert_sve_vls(qts, "max", 0x3,
638
+ "{ 'sve': true, 'sve128': true, 'sve256': true }");
639
+
640
+ qtest_quit(qts);
641
+}
642
+
643
static void test_query_cpu_model_expansion(const void *data)
644
{
645
QTestState *qts;
646
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data)
647
if (g_str_equal(qtest_get_arch(), "aarch64")) {
648
assert_has_feature(qts, "max", "aarch64");
649
assert_has_feature(qts, "max", "sve");
650
+ assert_has_feature(qts, "max", "sve128");
651
assert_has_feature(qts, "cortex-a57", "pmu");
652
assert_has_feature(qts, "cortex-a57", "aarch64");
653
654
+ sve_tests_default(qts, "max");
655
+
656
/* Test that features that depend on KVM generate errors without. */
657
assert_error(qts, "max",
658
"'aarch64' feature cannot be disabled "
659
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
660
qtest_add_data_func("/arm/query-cpu-model-expansion",
661
NULL, test_query_cpu_model_expansion);
662
663
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
664
+ qtest_add_data_func("/arm/max/query-cpu-model-expansion/sve-max-vq-8",
665
+ NULL, sve_tests_sve_max_vq_8);
666
+ qtest_add_data_func("/arm/max/query-cpu-model-expansion/sve-off",
667
+ NULL, sve_tests_sve_off);
668
+ }
669
+
670
if (kvm_available) {
671
qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
672
NULL, test_query_cpu_model_expansion_kvm);
673
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
674
index XXXXXXX..XXXXXXX 100644
675
--- a/docs/arm-cpu-features.rst
676
+++ b/docs/arm-cpu-features.rst
677
@@ -XXX,XX +XXX,XX @@ block in the script for usage) is used to issue the QMP commands.
678
(QEMU) query-cpu-model-expansion type=full model={"name":"max"}
679
{ "return": {
680
"model": { "name": "max", "props": {
681
- "pmu": true, "aarch64": true
682
+ "sve1664": true, "pmu": true, "sve1792": true, "sve1920": true,
683
+ "sve128": true, "aarch64": true, "sve1024": true, "sve": true,
684
+ "sve640": true, "sve768": true, "sve1408": true, "sve256": true,
685
+ "sve1152": true, "sve512": true, "sve384": true, "sve1536": true,
686
+ "sve896": true, "sve1280": true, "sve2048": true
687
}}}}
688
689
-We see that the `max` CPU type has the `pmu` and `aarch64` CPU features.
690
-We also see that the CPU features are enabled, as they are all `true`.
691
+We see that the `max` CPU type has the `pmu`, `aarch64`, `sve`, and many
692
+`sve<N>` CPU features. We also see that all the CPU features are
693
+enabled, as they are all `true`. (The `sve<N>` CPU features are all
694
+optional SVE vector lengths (see "SVE CPU Properties"). While with TCG
695
+all SVE vector lengths can be supported, when KVM is in use it's more
696
+likely that only a few lengths will be supported, if SVE is supported at
697
+all.)
698
699
(2) Let's try to disable the PMU::
700
701
(QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"pmu":false}}
702
{ "return": {
703
"model": { "name": "max", "props": {
704
- "pmu": false, "aarch64": true
705
+ "sve1664": true, "pmu": false, "sve1792": true, "sve1920": true,
706
+ "sve128": true, "aarch64": true, "sve1024": true, "sve": true,
707
+ "sve640": true, "sve768": true, "sve1408": true, "sve256": true,
708
+ "sve1152": true, "sve512": true, "sve384": true, "sve1536": true,
709
+ "sve896": true, "sve1280": true, "sve2048": true
710
}}}}
711
712
We see it worked, as `pmu` is now `false`.
713
@@ -XXX,XX +XXX,XX @@ We see it worked, as `pmu` is now `false`.
714
It looks like this feature is limited to a configuration we do not
715
currently have.
716
717
-(4) Let's try probing CPU features for the Cortex-A15 CPU type::
718
+(4) Let's disable `sve` and see what happens to all the optional SVE
719
+ vector lengths::
720
+
721
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"sve":false}}
722
+ { "return": {
723
+ "model": { "name": "max", "props": {
724
+ "sve1664": false, "pmu": true, "sve1792": false, "sve1920": false,
725
+ "sve128": false, "aarch64": true, "sve1024": false, "sve": false,
726
+ "sve640": false, "sve768": false, "sve1408": false, "sve256": false,
727
+ "sve1152": false, "sve512": false, "sve384": false, "sve1536": false,
728
+ "sve896": false, "sve1280": false, "sve2048": false
729
+ }}}}
730
+
731
+As expected they are now all `false`.
732
+
733
+(5) Let's try probing CPU features for the Cortex-A15 CPU type::
734
735
(QEMU) query-cpu-model-expansion type=full model={"name":"cortex-a15"}
736
{"return": {"model": {"name": "cortex-a15", "props": {"pmu": true}}}}
737
@@ -XXX,XX +XXX,XX @@ After determining which CPU features are available and supported for a
738
given CPU type, then they may be selectively enabled or disabled on the
739
QEMU command line with that CPU type::
740
741
- $ qemu-system-aarch64 -M virt -cpu max,pmu=off
742
+ $ qemu-system-aarch64 -M virt -cpu max,pmu=off,sve=on,sve128=on,sve256=on
743
744
-The example above disables the PMU for the `max` CPU type.
745
+The example above disables the PMU and enables the first two SVE vector
746
+lengths for the `max` CPU type. Note, the `sve=on` isn't actually
747
+necessary, because, as we observed above with our probe of the `max` CPU
748
+type, `sve` is already on by default. Also, based on our probe of
749
+defaults, it would seem we need to disable many SVE vector lengths, rather
750
+than only enabling the two we want. This isn't the case, because, as
751
+disabling many SVE vector lengths would be quite verbose, the `sve<N>` CPU
752
+properties have special semantics (see "SVE CPU Property Parsing
753
+Semantics").
754
+
755
+SVE CPU Properties
756
+==================
757
+
758
+There are two types of SVE CPU properties: `sve` and `sve<N>`. The first
759
+is used to enable or disable the entire SVE feature, just as the `pmu`
760
+CPU property completely enables or disables the PMU. The second type
761
+is used to enable or disable specific vector lengths, where `N` is the
762
+number of bits of the length. The `sve<N>` CPU properties have special
763
+dependencies and constraints, see "SVE CPU Property Dependencies and
764
+Constraints" below. Additionally, as we want all supported vector lengths
765
+to be enabled by default, then, in order to avoid overly verbose command
766
+lines (command lines full of `sve<N>=off`, for all `N` not wanted), we
767
+provide the parsing semantics listed in "SVE CPU Property Parsing
768
+Semantics".
769
+
770
+SVE CPU Property Dependencies and Constraints
771
+---------------------------------------------
772
+
773
+ 1) At least one vector length must be enabled when `sve` is enabled.
774
+
775
+ 2) If a vector length `N` is enabled, then all power-of-two vector
776
+ lengths smaller than `N` must also be enabled. E.g. if `sve512`
777
+ is enabled, then the 128-bit and 256-bit vector lengths must also
778
+ be enabled.
779
+
780
+SVE CPU Property Parsing Semantics
781
+----------------------------------
782
+
783
+ 1) If SVE is disabled (`sve=off`), then which SVE vector lengths
784
+ are enabled or disabled is irrelevant to the guest, as the entire
785
+ SVE feature is disabled and that disables all vector lengths for
786
+ the guest. However QEMU will still track any `sve<N>` CPU
787
+ properties provided by the user. If later an `sve=on` is provided,
788
+ then the guest will get only the enabled lengths. If no `sve=on`
789
+ is provided and there are explicitly enabled vector lengths, then
790
+ an error is generated.
791
+
792
+ 2) If SVE is enabled (`sve=on`), but no `sve<N>` CPU properties are
793
+ provided, then all supported vector lengths are enabled, including
794
+ the non-power-of-two lengths.
795
+
796
+ 3) If SVE is enabled, then an error is generated when attempting to
797
+ disable the last enabled vector length (see constraint (1) of "SVE
798
+ CPU Property Dependencies and Constraints").
799
+
800
+ 4) If one or more vector lengths have been explicitly enabled and at
801
+ at least one of the dependency lengths of the maximum enabled length
802
+ has been explicitly disabled, then an error is generated (see
803
+ constraint (2) of "SVE CPU Property Dependencies and Constraints").
804
+
805
+ 5) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
806
+ CPU properties are set `on`, then the specified vector lengths are
807
+ disabled but the default for any unspecified lengths remains enabled.
808
+ Disabling a power-of-two vector length also disables all vector
809
+ lengths larger than the power-of-two length (see constraint (2) of
810
+ "SVE CPU Property Dependencies and Constraints").
811
+
812
+ 6) If one or more `sve<N>` CPU properties are set to `on`, then they
813
+ are enabled and all unspecified lengths default to disabled, except
814
+ for the required lengths per constraint (2) of "SVE CPU Property
815
+ Dependencies and Constraints", which will even be auto-enabled if
816
+ they were not explicitly enabled.
817
+
818
+ 7) If SVE was disabled (`sve=off`), allowing all vector lengths to be
819
+ explicitly disabled (i.e. avoiding the error specified in (3) of
820
+ "SVE CPU Property Parsing Semantics"), then if later an `sve=on` is
821
+ provided an error will be generated. To avoid this error, one must
822
+ enable at least one vector length prior to enabling SVE.
823
+
824
+SVE CPU Property Examples
825
+-------------------------
826
+
827
+ 1) Disable SVE::
828
+
829
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off
830
+
831
+ 2) Implicitly enable all vector lengths for the `max` CPU type::
832
+
833
+ $ qemu-system-aarch64 -M virt -cpu max
834
+
835
+ 3) Only enable the 128-bit vector length::
836
+
837
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=on
838
+
839
+ 4) Disable the 512-bit vector length and all larger vector lengths,
840
+ since 512 is a power-of-two. This results in all the smaller,
841
+ uninitialized lengths (128, 256, and 384) defaulting to enabled::
842
+
843
+ $ qemu-system-aarch64 -M virt -cpu max,sve512=off
844
+
845
+ 5) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
846
+
847
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=on,sve256=on,sve512=on
848
+
849
+ 6) The same as (5), but since the 128-bit and 256-bit vector
850
+ lengths are required for the 512-bit vector length to be enabled,
851
+ then allow them to be auto-enabled::
852
+
853
+ $ qemu-system-aarch64 -M virt -cpu max,sve512=on
854
+
855
+ 7) Do the same as (6), but by first disabling SVE and then re-enabling it::
856
+
857
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off,sve512=on,sve=on
858
+
859
+ 8) Force errors regarding the last vector length::
860
+
861
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=off
862
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off,sve128=off,sve=on
863
+
864
+SVE CPU Property Recommendations
865
+--------------------------------
866
+
867
+The examples in "SVE CPU Property Examples" exhibit many ways to select
868
+vector lengths which developers may find useful in order to avoid overly
869
+verbose command lines. However, the recommended way to select vector
870
+lengths is to explicitly enable each desired length. Therefore only
871
+example's (1), (3), and (5) exhibit recommended uses of the properties.
872
873
--
79
--
874
2.20.1
80
2.34.1
875
876
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Evgeny Iakovlev <eiakovlev@linux.microsoft.com>
2
2
3
This file creates the BCM2836/BCM2837 blocks.
3
Qemu doesn't implement Debug Communication Channel, as well as the rest
4
The biggest differences with the BCM2838 we are going to add, are
4
of external debug interface. However, Microsoft Hyper-V in tries to
5
the base addresses of the interrupt controller and the peripherals.
5
access some of those registers during an EL2 context switch.
6
Add these addresses in the BCM283XInfo structure to make this
7
block more modular. Remove the MCORE_OFFSET offset as it is
8
not useful and rather confusing.
9
6
10
Reviewed-by: Esteban Bosse <estebanbosse@gmail.com>
7
Since there is no architectural way to not advertise support for external
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
debug, provide RAZ/WI stubs for OSDTRRX_EL1, OSDTRTX_EL1 and OSECCR_EL1
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
registers in the same way the rest of DCM is currently done. Do account
13
Message-id: 20191019234715.25750-6-f4bug@amsat.org
10
for access traps though with access_tda.
11
12
Signed-off-by: Evgeny Iakovlev <eiakovlev@linux.microsoft.com>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-id: 20230120155929.32384-3-eiakovlev@linux.microsoft.com
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
---
16
hw/arm/bcm2836.c | 18 +++++++++---------
17
target/arm/debug_helper.c | 21 +++++++++++++++++++++
17
1 file changed, 9 insertions(+), 9 deletions(-)
18
1 file changed, 21 insertions(+)
18
19
19
diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c
20
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
20
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/bcm2836.c
22
--- a/target/arm/debug_helper.c
22
+++ b/hw/arm/bcm2836.c
23
+++ b/target/arm/debug_helper.c
23
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
24
#include "hw/arm/raspi_platform.h"
25
.opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
25
#include "hw/sysbus.h"
26
.access = PL0_R, .accessfn = access_tda,
26
27
.type = ARM_CP_CONST, .resetvalue = 0 },
27
-/* Peripheral base address seen by the CPU */
28
+ /*
28
-#define BCM2836_PERI_BASE 0x3F000000
29
+ * OSDTRRX_EL1/OSDTRTX_EL1 are used for save and restore of DBGDTRRX_EL0.
29
-
30
+ * It is a component of the Debug Communications Channel, which is not implemented.
30
-/* "QA7" (Pi2) interrupt controller and mailboxes etc. */
31
+ */
31
-#define BCM2836_CONTROL_BASE 0x40000000
32
+ { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
32
-
33
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2,
33
struct BCM283XInfo {
34
+ .access = PL1_RW, .accessfn = access_tda,
34
const char *name;
35
+ .type = ARM_CP_CONST, .resetvalue = 0 },
35
const char *cpu_type;
36
+ { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
36
+ hwaddr peri_base; /* Peripheral base address seen by the CPU */
37
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
37
+ hwaddr ctrl_base; /* Interrupt controller and mailboxes etc. */
38
+ .access = PL1_RW, .accessfn = access_tda,
38
int clusterid;
39
+ .type = ARM_CP_CONST, .resetvalue = 0 },
39
};
40
+ /*
40
41
+ * OSECCR_EL1 provides a mechanism for an operating system
41
@@ -XXX,XX +XXX,XX @@ static const BCM283XInfo bcm283x_socs[] = {
42
+ * to access the contents of EDECCR. EDECCR is not implemented though,
42
{
43
+ * as is the rest of external device mechanism.
43
.name = TYPE_BCM2836,
44
+ */
44
.cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"),
45
+ { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
45
+ .peri_base = 0x3f000000,
46
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
46
+ .ctrl_base = 0x40000000,
47
+ .access = PL1_RW, .accessfn = access_tda,
47
.clusterid = 0xf,
48
+ .type = ARM_CP_CONST, .resetvalue = 0 },
48
},
49
/*
49
#ifdef TARGET_AARCH64
50
* DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
50
{
51
* it is unlikely a guest will care.
51
.name = TYPE_BCM2837,
52
.cpu_type = ARM_CPU_TYPE_NAME("cortex-a53"),
53
+ .peri_base = 0x3f000000,
54
+ .ctrl_base = 0x40000000,
55
.clusterid = 0x0,
56
},
57
#endif
58
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
59
}
60
61
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->peripherals), 0,
62
- BCM2836_PERI_BASE, 1);
63
+ info->peri_base, 1);
64
65
/* bcm2836 interrupt controller (and mailboxes, etc.) */
66
object_property_set_bool(OBJECT(&s->control), true, "realized", &err);
67
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
68
return;
69
}
70
71
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->control), 0, BCM2836_CONTROL_BASE);
72
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->control), 0, info->ctrl_base);
73
74
sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0,
75
qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-irq", 0));
76
@@ -XXX,XX +XXX,XX @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
77
78
/* set periphbase/CBAR value for CPU-local registers */
79
object_property_set_int(OBJECT(&s->cpus[n]),
80
- BCM2836_PERI_BASE + MSYNC_OFFSET,
81
+ info->peri_base,
82
"reset-cbar", &err);
83
if (err) {
84
error_propagate(errp, err);
85
--
52
--
86
2.20.1
53
2.34.1
87
88
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Continue setting, but not relying upon, env->hflags.
3
Move the ri == NULL case to the top of the function and return.
4
This allows the else to be removed and the code unindented.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20191023150057.25731-20-richard.henderson@linaro.org
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Message-id: 20230106194451.1213153-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
11
---
9
target/arm/helper.c | 10 ++++++++++
12
target/arm/translate.c | 406 ++++++++++++++++++++---------------------
10
1 file changed, 10 insertions(+)
13
1 file changed, 203 insertions(+), 203 deletions(-)
11
14
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper.c
17
--- a/target/arm/translate.c
15
+++ b/target/arm/helper.c
18
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
19
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
17
/* ??? Lots of these bits are not implemented. */
20
bool isread, int rt, int rt2)
18
/* This may enable/disable the MMU, so do a TLB flush. */
21
{
19
tlb_flush(CPU(cpu));
22
const ARMCPRegInfo *ri;
20
+
23
+ bool need_exit_tb;
21
+ if (ri->type & ARM_CP_SUPPRESS_TB_END) {
24
22
+ /*
25
ri = get_arm_cp_reginfo(s->cp_regs,
23
+ * Normally we would always end the TB on an SCTLR write; see the
26
ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
24
+ * comment in ARMCPRegInfo sctlr initialization below for why Xscale
27
- if (ri) {
25
+ * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
28
- bool need_exit_tb;
26
+ * of hflags from the translator, so do it here.
29
27
+ */
30
- /* Check access permissions */
28
+ arm_rebuild_hflags(env);
31
- if (!cp_access_ok(s->current_el, ri, isread)) {
32
- unallocated_encoding(s);
33
- return;
34
- }
35
-
36
- if (s->hstr_active || ri->accessfn ||
37
- (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
38
- /* Emit code to perform further access permissions checks at
39
- * runtime; this may result in an exception.
40
- * Note that on XScale all cp0..c13 registers do an access check
41
- * call in order to handle c15_cpar.
42
- */
43
- uint32_t syndrome;
44
-
45
- /* Note that since we are an implementation which takes an
46
- * exception on a trapped conditional instruction only if the
47
- * instruction passes its condition code check, we can take
48
- * advantage of the clause in the ARM ARM that allows us to set
49
- * the COND field in the instruction to 0xE in all cases.
50
- * We could fish the actual condition out of the insn (ARM)
51
- * or the condexec bits (Thumb) but it isn't necessary.
52
- */
53
- switch (cpnum) {
54
- case 14:
55
- if (is64) {
56
- syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
57
- isread, false);
58
- } else {
59
- syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
60
- rt, isread, false);
61
- }
62
- break;
63
- case 15:
64
- if (is64) {
65
- syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
66
- isread, false);
67
- } else {
68
- syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
69
- rt, isread, false);
70
- }
71
- break;
72
- default:
73
- /* ARMv8 defines that only coprocessors 14 and 15 exist,
74
- * so this can only happen if this is an ARMv7 or earlier CPU,
75
- * in which case the syndrome information won't actually be
76
- * guest visible.
77
- */
78
- assert(!arm_dc_feature(s, ARM_FEATURE_V8));
79
- syndrome = syn_uncategorized();
80
- break;
81
- }
82
-
83
- gen_set_condexec(s);
84
- gen_update_pc(s, 0);
85
- gen_helper_access_check_cp_reg(cpu_env,
86
- tcg_constant_ptr(ri),
87
- tcg_constant_i32(syndrome),
88
- tcg_constant_i32(isread));
89
- } else if (ri->type & ARM_CP_RAISES_EXC) {
90
- /*
91
- * The readfn or writefn might raise an exception;
92
- * synchronize the CPU state in case it does.
93
- */
94
- gen_set_condexec(s);
95
- gen_update_pc(s, 0);
96
- }
97
-
98
- /* Handle special cases first */
99
- switch (ri->type & ARM_CP_SPECIAL_MASK) {
100
- case 0:
101
- break;
102
- case ARM_CP_NOP:
103
- return;
104
- case ARM_CP_WFI:
105
- if (isread) {
106
- unallocated_encoding(s);
107
- return;
108
- }
109
- gen_update_pc(s, curr_insn_len(s));
110
- s->base.is_jmp = DISAS_WFI;
111
- return;
112
- default:
113
- g_assert_not_reached();
114
- }
115
-
116
- if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
117
- gen_io_start();
118
- }
119
-
120
- if (isread) {
121
- /* Read */
122
- if (is64) {
123
- TCGv_i64 tmp64;
124
- TCGv_i32 tmp;
125
- if (ri->type & ARM_CP_CONST) {
126
- tmp64 = tcg_constant_i64(ri->resetvalue);
127
- } else if (ri->readfn) {
128
- tmp64 = tcg_temp_new_i64();
129
- gen_helper_get_cp_reg64(tmp64, cpu_env,
130
- tcg_constant_ptr(ri));
131
- } else {
132
- tmp64 = tcg_temp_new_i64();
133
- tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
134
- }
135
- tmp = tcg_temp_new_i32();
136
- tcg_gen_extrl_i64_i32(tmp, tmp64);
137
- store_reg(s, rt, tmp);
138
- tmp = tcg_temp_new_i32();
139
- tcg_gen_extrh_i64_i32(tmp, tmp64);
140
- tcg_temp_free_i64(tmp64);
141
- store_reg(s, rt2, tmp);
142
- } else {
143
- TCGv_i32 tmp;
144
- if (ri->type & ARM_CP_CONST) {
145
- tmp = tcg_constant_i32(ri->resetvalue);
146
- } else if (ri->readfn) {
147
- tmp = tcg_temp_new_i32();
148
- gen_helper_get_cp_reg(tmp, cpu_env, tcg_constant_ptr(ri));
149
- } else {
150
- tmp = load_cpu_offset(ri->fieldoffset);
151
- }
152
- if (rt == 15) {
153
- /* Destination register of r15 for 32 bit loads sets
154
- * the condition codes from the high 4 bits of the value
155
- */
156
- gen_set_nzcv(tmp);
157
- tcg_temp_free_i32(tmp);
158
- } else {
159
- store_reg(s, rt, tmp);
160
- }
161
- }
162
+ if (!ri) {
163
+ /*
164
+ * Unknown register; this might be a guest error or a QEMU
165
+ * unimplemented feature.
166
+ */
167
+ if (is64) {
168
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
169
+ "64 bit system register cp:%d opc1: %d crm:%d "
170
+ "(%s)\n",
171
+ isread ? "read" : "write", cpnum, opc1, crm,
172
+ s->ns ? "non-secure" : "secure");
173
} else {
174
- /* Write */
175
- if (ri->type & ARM_CP_CONST) {
176
- /* If not forbidden by access permissions, treat as WI */
177
- return;
178
- }
179
-
180
- if (is64) {
181
- TCGv_i32 tmplo, tmphi;
182
- TCGv_i64 tmp64 = tcg_temp_new_i64();
183
- tmplo = load_reg(s, rt);
184
- tmphi = load_reg(s, rt2);
185
- tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
186
- tcg_temp_free_i32(tmplo);
187
- tcg_temp_free_i32(tmphi);
188
- if (ri->writefn) {
189
- gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri),
190
- tmp64);
191
- } else {
192
- tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
193
- }
194
- tcg_temp_free_i64(tmp64);
195
- } else {
196
- TCGv_i32 tmp = load_reg(s, rt);
197
- if (ri->writefn) {
198
- gen_helper_set_cp_reg(cpu_env, tcg_constant_ptr(ri), tmp);
199
- tcg_temp_free_i32(tmp);
200
- } else {
201
- store_cpu_offset(tmp, ri->fieldoffset, 4);
202
- }
203
- }
204
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
205
+ "system register cp:%d opc1:%d crn:%d crm:%d "
206
+ "opc2:%d (%s)\n",
207
+ isread ? "read" : "write", cpnum, opc1, crn,
208
+ crm, opc2, s->ns ? "non-secure" : "secure");
209
}
210
-
211
- /* I/O operations must end the TB here (whether read or write) */
212
- need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
213
- (ri->type & ARM_CP_IO));
214
-
215
- if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
216
- /*
217
- * A write to any coprocessor register that ends a TB
218
- * must rebuild the hflags for the next TB.
219
- */
220
- gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
221
- /*
222
- * We default to ending the TB on a coprocessor register write,
223
- * but allow this to be suppressed by the register definition
224
- * (usually only necessary to work around guest bugs).
225
- */
226
- need_exit_tb = true;
227
- }
228
- if (need_exit_tb) {
229
- gen_lookup_tb(s);
230
- }
231
-
232
+ unallocated_encoding(s);
233
return;
234
}
235
236
- /* Unknown register; this might be a guest error or a QEMU
237
- * unimplemented feature.
238
- */
239
- if (is64) {
240
- qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
241
- "64 bit system register cp:%d opc1: %d crm:%d "
242
- "(%s)\n",
243
- isread ? "read" : "write", cpnum, opc1, crm,
244
- s->ns ? "non-secure" : "secure");
245
- } else {
246
- qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
247
- "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
248
- "(%s)\n",
249
- isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
250
- s->ns ? "non-secure" : "secure");
251
+ /* Check access permissions */
252
+ if (!cp_access_ok(s->current_el, ri, isread)) {
253
+ unallocated_encoding(s);
254
+ return;
255
}
256
257
- unallocated_encoding(s);
258
- return;
259
+ if (s->hstr_active || ri->accessfn ||
260
+ (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
261
+ /*
262
+ * Emit code to perform further access permissions checks at
263
+ * runtime; this may result in an exception.
264
+ * Note that on XScale all cp0..c13 registers do an access check
265
+ * call in order to handle c15_cpar.
266
+ */
267
+ uint32_t syndrome;
268
+
269
+ /*
270
+ * Note that since we are an implementation which takes an
271
+ * exception on a trapped conditional instruction only if the
272
+ * instruction passes its condition code check, we can take
273
+ * advantage of the clause in the ARM ARM that allows us to set
274
+ * the COND field in the instruction to 0xE in all cases.
275
+ * We could fish the actual condition out of the insn (ARM)
276
+ * or the condexec bits (Thumb) but it isn't necessary.
277
+ */
278
+ switch (cpnum) {
279
+ case 14:
280
+ if (is64) {
281
+ syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
282
+ isread, false);
283
+ } else {
284
+ syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
285
+ rt, isread, false);
286
+ }
287
+ break;
288
+ case 15:
289
+ if (is64) {
290
+ syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
291
+ isread, false);
292
+ } else {
293
+ syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
294
+ rt, isread, false);
295
+ }
296
+ break;
297
+ default:
298
+ /*
299
+ * ARMv8 defines that only coprocessors 14 and 15 exist,
300
+ * so this can only happen if this is an ARMv7 or earlier CPU,
301
+ * in which case the syndrome information won't actually be
302
+ * guest visible.
303
+ */
304
+ assert(!arm_dc_feature(s, ARM_FEATURE_V8));
305
+ syndrome = syn_uncategorized();
306
+ break;
307
+ }
308
+
309
+ gen_set_condexec(s);
310
+ gen_update_pc(s, 0);
311
+ gen_helper_access_check_cp_reg(cpu_env,
312
+ tcg_constant_ptr(ri),
313
+ tcg_constant_i32(syndrome),
314
+ tcg_constant_i32(isread));
315
+ } else if (ri->type & ARM_CP_RAISES_EXC) {
316
+ /*
317
+ * The readfn or writefn might raise an exception;
318
+ * synchronize the CPU state in case it does.
319
+ */
320
+ gen_set_condexec(s);
321
+ gen_update_pc(s, 0);
322
+ }
323
+
324
+ /* Handle special cases first */
325
+ switch (ri->type & ARM_CP_SPECIAL_MASK) {
326
+ case 0:
327
+ break;
328
+ case ARM_CP_NOP:
329
+ return;
330
+ case ARM_CP_WFI:
331
+ if (isread) {
332
+ unallocated_encoding(s);
333
+ return;
334
+ }
335
+ gen_update_pc(s, curr_insn_len(s));
336
+ s->base.is_jmp = DISAS_WFI;
337
+ return;
338
+ default:
339
+ g_assert_not_reached();
340
+ }
341
+
342
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
343
+ gen_io_start();
344
+ }
345
+
346
+ if (isread) {
347
+ /* Read */
348
+ if (is64) {
349
+ TCGv_i64 tmp64;
350
+ TCGv_i32 tmp;
351
+ if (ri->type & ARM_CP_CONST) {
352
+ tmp64 = tcg_constant_i64(ri->resetvalue);
353
+ } else if (ri->readfn) {
354
+ tmp64 = tcg_temp_new_i64();
355
+ gen_helper_get_cp_reg64(tmp64, cpu_env,
356
+ tcg_constant_ptr(ri));
357
+ } else {
358
+ tmp64 = tcg_temp_new_i64();
359
+ tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
360
+ }
361
+ tmp = tcg_temp_new_i32();
362
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
363
+ store_reg(s, rt, tmp);
364
+ tmp = tcg_temp_new_i32();
365
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
366
+ tcg_temp_free_i64(tmp64);
367
+ store_reg(s, rt2, tmp);
368
+ } else {
369
+ TCGv_i32 tmp;
370
+ if (ri->type & ARM_CP_CONST) {
371
+ tmp = tcg_constant_i32(ri->resetvalue);
372
+ } else if (ri->readfn) {
373
+ tmp = tcg_temp_new_i32();
374
+ gen_helper_get_cp_reg(tmp, cpu_env, tcg_constant_ptr(ri));
375
+ } else {
376
+ tmp = load_cpu_offset(ri->fieldoffset);
377
+ }
378
+ if (rt == 15) {
379
+ /* Destination register of r15 for 32 bit loads sets
380
+ * the condition codes from the high 4 bits of the value
381
+ */
382
+ gen_set_nzcv(tmp);
383
+ tcg_temp_free_i32(tmp);
384
+ } else {
385
+ store_reg(s, rt, tmp);
386
+ }
387
+ }
388
+ } else {
389
+ /* Write */
390
+ if (ri->type & ARM_CP_CONST) {
391
+ /* If not forbidden by access permissions, treat as WI */
392
+ return;
393
+ }
394
+
395
+ if (is64) {
396
+ TCGv_i32 tmplo, tmphi;
397
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
398
+ tmplo = load_reg(s, rt);
399
+ tmphi = load_reg(s, rt2);
400
+ tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
401
+ tcg_temp_free_i32(tmplo);
402
+ tcg_temp_free_i32(tmphi);
403
+ if (ri->writefn) {
404
+ gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tmp64);
405
+ } else {
406
+ tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
407
+ }
408
+ tcg_temp_free_i64(tmp64);
409
+ } else {
410
+ TCGv_i32 tmp = load_reg(s, rt);
411
+ if (ri->writefn) {
412
+ gen_helper_set_cp_reg(cpu_env, tcg_constant_ptr(ri), tmp);
413
+ tcg_temp_free_i32(tmp);
414
+ } else {
415
+ store_cpu_offset(tmp, ri->fieldoffset, 4);
416
+ }
417
+ }
418
+ }
419
+
420
+ /* I/O operations must end the TB here (whether read or write) */
421
+ need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
422
+ (ri->type & ARM_CP_IO));
423
+
424
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
425
+ /*
426
+ * A write to any coprocessor register that ends a TB
427
+ * must rebuild the hflags for the next TB.
428
+ */
429
+ gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
430
+ /*
431
+ * We default to ending the TB on a coprocessor register write,
432
+ * but allow this to be suppressed by the register definition
433
+ * (usually only necessary to work around guest bugs).
434
+ */
435
+ need_exit_tb = true;
436
+ }
437
+ if (need_exit_tb) {
438
+ gen_lookup_tb(s);
29
+ }
439
+ }
30
}
440
}
31
441
32
static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
442
/* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
33
--
443
--
34
2.20.1
444
2.34.1
35
445
36
446
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This functions are given the mode and el state of the cpu
3
Do not encode the pointer as a constant in the opcode stream.
4
and writes the computed value to env->hflags.
4
This pointer is specific to the cpu that first generated the
5
translation, which runs into problems with both hot-pluggable
6
cpus and user-only threads, as cpus are removed. It's also a
7
potential correctness issue in the theoretical case of a
8
slightly-heterogenous system, because if CPU 0 generates a
9
TB and then CPU 1 executes it, CPU 1 will end up using CPU 0's
10
hash table, which might have a wrong set of registers in it.
11
(All our current systems are either completely homogenous,
12
M-profile, or have CPUs sufficiently different that they
13
wouldn't be sharing TBs anyway because the differences would
14
show up in the TB flags, so the correctness issue is only
15
theoretical, not practical.)
5
16
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
17
Perform the lookup in either helper_access_check_cp_reg,
18
or a new helper_lookup_cp_reg.
19
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20191023150057.25731-16-richard.henderson@linaro.org
21
Message-id: 20230106194451.1213153-3-richard.henderson@linaro.org
22
[PMM: added note in commit message about correctness issue]
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
25
---
11
target/arm/helper.h | 4 ++++
26
target/arm/helper.h | 11 +++++----
12
target/arm/helper.c | 24 ++++++++++++++++++++++++
27
target/arm/translate.h | 7 ++++++
13
2 files changed, 28 insertions(+)
28
target/arm/op_helper.c | 27 ++++++++++++++------
29
target/arm/translate-a64.c | 49 ++++++++++++++++++++++---------------
30
target/arm/translate.c | 50 +++++++++++++++++++++++++-------------
31
5 files changed, 95 insertions(+), 49 deletions(-)
14
32
15
diff --git a/target/arm/helper.h b/target/arm/helper.h
33
diff --git a/target/arm/helper.h b/target/arm/helper.h
16
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.h
35
--- a/target/arm/helper.h
18
+++ b/target/arm/helper.h
36
+++ b/target/arm/helper.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
37
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(v8m_stackcheck, void, env, i32)
20
DEF_HELPER_2(get_user_reg, i32, env, i32)
38
21
DEF_HELPER_3(set_user_reg, void, env, i32, i32)
39
DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
22
40
23
+DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
41
-DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
24
+DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
42
-DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
25
+DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
43
-DEF_HELPER_2(get_cp_reg, i32, env, ptr)
26
+
44
-DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
27
DEF_HELPER_1(vfp_get_fpscr, i32, env)
45
-DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
28
DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
46
+DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
29
47
+DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
30
diff --git a/target/arm/helper.c b/target/arm/helper.c
48
+DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
49
+DEF_HELPER_2(get_cp_reg, i32, env, cptr)
50
+DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
51
+DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
52
53
DEF_HELPER_2(get_r13_banked, i32, env, i32)
54
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
55
diff --git a/target/arm/translate.h b/target/arm/translate.h
31
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/helper.c
57
--- a/target/arm/translate.h
33
+++ b/target/arm/helper.c
58
+++ b/target/arm/translate.h
34
@@ -XXX,XX +XXX,XX @@ void arm_rebuild_hflags(CPUARMState *env)
59
@@ -XXX,XX +XXX,XX @@ static inline void set_disas_label(DisasContext *s, DisasLabel l)
35
env->hflags = rebuild_hflags_internal(env);
60
s->pc_save = l.pc_save;
36
}
61
}
37
62
38
+void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
63
+static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key)
39
+{
64
+{
40
+ int fp_el = fp_exception_el(env, el);
65
+ TCGv_ptr ret = tcg_temp_new_ptr();
41
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
66
+ gen_helper_lookup_cp_reg(ret, cpu_env, tcg_constant_i32(key));
42
+
67
+ return ret;
43
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
44
+}
68
+}
45
+
69
+
46
+void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
70
/*
71
* Helpers for implementing sets of trans_* functions.
72
* Defer the implementation of NAME to FUNC, with optional extra arguments.
73
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/op_helper.c
76
+++ b/target/arm/op_helper.c
77
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
78
}
79
}
80
81
-void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
82
- uint32_t isread)
83
+const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
84
+ uint32_t syndrome, uint32_t isread)
85
{
86
ARMCPU *cpu = env_archcpu(env);
87
- const ARMCPRegInfo *ri = rip;
88
+ const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
89
CPAccessResult res = CP_ACCESS_OK;
90
int target_el;
91
92
+ assert(ri != NULL);
93
+
94
if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
95
&& extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
96
res = CP_ACCESS_TRAP;
97
@@ -XXX,XX +XXX,XX @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
98
res = ri->accessfn(env, ri, isread);
99
}
100
if (likely(res == CP_ACCESS_OK)) {
101
- return;
102
+ return ri;
103
}
104
105
fail:
106
@@ -XXX,XX +XXX,XX @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
107
raise_exception(env, EXCP_UDEF, syndrome, target_el);
108
}
109
110
-void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
111
+const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
47
+{
112
+{
48
+ int fp_el = fp_exception_el(env, el);
113
+ ARMCPU *cpu = env_archcpu(env);
49
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
114
+ const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
50
+
115
+
51
+ env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
116
+ assert(ri != NULL);
117
+ return ri;
52
+}
118
+}
53
+
119
+
54
+void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
120
+void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
55
+{
121
{
56
+ int fp_el = fp_exception_el(env, el);
122
const ARMCPRegInfo *ri = rip;
57
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
123
58
+
124
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
59
+ env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
125
}
60
+}
126
}
61
+
127
62
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
128
-uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
63
target_ulong *cs_base, uint32_t *pflags)
129
+uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
64
{
130
{
131
const ARMCPRegInfo *ri = rip;
132
uint32_t res;
133
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
134
return res;
135
}
136
137
-void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
138
+void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
139
{
140
const ARMCPRegInfo *ri = rip;
141
142
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
143
}
144
}
145
146
-uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
147
+uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
148
{
149
const ARMCPRegInfo *ri = rip;
150
uint64_t res;
151
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/target/arm/translate-a64.c
154
+++ b/target/arm/translate-a64.c
155
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
156
unsigned int op0, unsigned int op1, unsigned int op2,
157
unsigned int crn, unsigned int crm, unsigned int rt)
158
{
159
- const ARMCPRegInfo *ri;
160
+ uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
161
+ crn, crm, op0, op1, op2);
162
+ const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
163
+ TCGv_ptr tcg_ri = NULL;
164
TCGv_i64 tcg_rt;
165
166
- ri = get_arm_cp_reginfo(s->cp_regs,
167
- ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
168
- crn, crm, op0, op1, op2));
169
-
170
if (!ri) {
171
/* Unknown register; this might be a guest error or a QEMU
172
* unimplemented feature.
173
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
174
175
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
176
gen_a64_update_pc(s, 0);
177
- gen_helper_access_check_cp_reg(cpu_env,
178
- tcg_constant_ptr(ri),
179
+ tcg_ri = tcg_temp_new_ptr();
180
+ gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
181
+ tcg_constant_i32(key),
182
tcg_constant_i32(syndrome),
183
tcg_constant_i32(isread));
184
} else if (ri->type & ARM_CP_RAISES_EXC) {
185
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
186
case 0:
187
break;
188
case ARM_CP_NOP:
189
- return;
190
+ goto exit;
191
case ARM_CP_NZCV:
192
tcg_rt = cpu_reg(s, rt);
193
if (isread) {
194
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
195
} else {
196
gen_set_nzcv(tcg_rt);
197
}
198
- return;
199
+ goto exit;
200
case ARM_CP_CURRENTEL:
201
/* Reads as current EL value from pstate, which is
202
* guaranteed to be constant by the tb flags.
203
*/
204
tcg_rt = cpu_reg(s, rt);
205
tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
206
- return;
207
+ goto exit;
208
case ARM_CP_DC_ZVA:
209
/* Writes clear the aligned block of memory which rt points into. */
210
if (s->mte_active[0]) {
211
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
212
tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
213
}
214
gen_helper_dc_zva(cpu_env, tcg_rt);
215
- return;
216
+ goto exit;
217
case ARM_CP_DC_GVA:
218
{
219
TCGv_i64 clean_addr, tag;
220
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
221
tcg_temp_free_i64(tag);
222
}
223
}
224
- return;
225
+ goto exit;
226
case ARM_CP_DC_GZVA:
227
{
228
TCGv_i64 clean_addr, tag;
229
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
230
tcg_temp_free_i64(tag);
231
}
232
}
233
- return;
234
+ goto exit;
235
default:
236
g_assert_not_reached();
237
}
238
if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
239
- return;
240
+ goto exit;
241
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
242
- return;
243
+ goto exit;
244
} else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
245
- return;
246
+ goto exit;
247
}
248
249
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
250
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
251
if (ri->type & ARM_CP_CONST) {
252
tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
253
} else if (ri->readfn) {
254
- gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_constant_ptr(ri));
255
+ if (!tcg_ri) {
256
+ tcg_ri = gen_lookup_cp_reg(key);
257
+ }
258
+ gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri);
259
} else {
260
tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
261
}
262
} else {
263
if (ri->type & ARM_CP_CONST) {
264
/* If not forbidden by access permissions, treat as WI */
265
- return;
266
+ goto exit;
267
} else if (ri->writefn) {
268
- gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tcg_rt);
269
+ if (!tcg_ri) {
270
+ tcg_ri = gen_lookup_cp_reg(key);
271
+ }
272
+ gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt);
273
} else {
274
tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
275
}
276
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
277
*/
278
s->base.is_jmp = DISAS_UPDATE_EXIT;
279
}
280
+
281
+ exit:
282
+ if (tcg_ri) {
283
+ tcg_temp_free_ptr(tcg_ri);
284
+ }
285
}
286
287
/* System
288
diff --git a/target/arm/translate.c b/target/arm/translate.c
289
index XXXXXXX..XXXXXXX 100644
290
--- a/target/arm/translate.c
291
+++ b/target/arm/translate.c
292
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
293
int opc1, int crn, int crm, int opc2,
294
bool isread, int rt, int rt2)
295
{
296
- const ARMCPRegInfo *ri;
297
+ uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
298
+ const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
299
+ TCGv_ptr tcg_ri = NULL;
300
bool need_exit_tb;
301
302
- ri = get_arm_cp_reginfo(s->cp_regs,
303
- ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
304
-
305
if (!ri) {
306
/*
307
* Unknown register; this might be a guest error or a QEMU
308
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
309
310
gen_set_condexec(s);
311
gen_update_pc(s, 0);
312
- gen_helper_access_check_cp_reg(cpu_env,
313
- tcg_constant_ptr(ri),
314
+ tcg_ri = tcg_temp_new_ptr();
315
+ gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
316
+ tcg_constant_i32(key),
317
tcg_constant_i32(syndrome),
318
tcg_constant_i32(isread));
319
} else if (ri->type & ARM_CP_RAISES_EXC) {
320
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
321
case 0:
322
break;
323
case ARM_CP_NOP:
324
- return;
325
+ goto exit;
326
case ARM_CP_WFI:
327
if (isread) {
328
unallocated_encoding(s);
329
- return;
330
+ } else {
331
+ gen_update_pc(s, curr_insn_len(s));
332
+ s->base.is_jmp = DISAS_WFI;
333
}
334
- gen_update_pc(s, curr_insn_len(s));
335
- s->base.is_jmp = DISAS_WFI;
336
- return;
337
+ goto exit;
338
default:
339
g_assert_not_reached();
340
}
341
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
342
if (ri->type & ARM_CP_CONST) {
343
tmp64 = tcg_constant_i64(ri->resetvalue);
344
} else if (ri->readfn) {
345
+ if (!tcg_ri) {
346
+ tcg_ri = gen_lookup_cp_reg(key);
347
+ }
348
tmp64 = tcg_temp_new_i64();
349
- gen_helper_get_cp_reg64(tmp64, cpu_env,
350
- tcg_constant_ptr(ri));
351
+ gen_helper_get_cp_reg64(tmp64, cpu_env, tcg_ri);
352
} else {
353
tmp64 = tcg_temp_new_i64();
354
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
355
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
356
if (ri->type & ARM_CP_CONST) {
357
tmp = tcg_constant_i32(ri->resetvalue);
358
} else if (ri->readfn) {
359
+ if (!tcg_ri) {
360
+ tcg_ri = gen_lookup_cp_reg(key);
361
+ }
362
tmp = tcg_temp_new_i32();
363
- gen_helper_get_cp_reg(tmp, cpu_env, tcg_constant_ptr(ri));
364
+ gen_helper_get_cp_reg(tmp, cpu_env, tcg_ri);
365
} else {
366
tmp = load_cpu_offset(ri->fieldoffset);
367
}
368
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
369
/* Write */
370
if (ri->type & ARM_CP_CONST) {
371
/* If not forbidden by access permissions, treat as WI */
372
- return;
373
+ goto exit;
374
}
375
376
if (is64) {
377
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
378
tcg_temp_free_i32(tmplo);
379
tcg_temp_free_i32(tmphi);
380
if (ri->writefn) {
381
- gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tmp64);
382
+ if (!tcg_ri) {
383
+ tcg_ri = gen_lookup_cp_reg(key);
384
+ }
385
+ gen_helper_set_cp_reg64(cpu_env, tcg_ri, tmp64);
386
} else {
387
tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
388
}
389
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
390
} else {
391
TCGv_i32 tmp = load_reg(s, rt);
392
if (ri->writefn) {
393
- gen_helper_set_cp_reg(cpu_env, tcg_constant_ptr(ri), tmp);
394
+ if (!tcg_ri) {
395
+ tcg_ri = gen_lookup_cp_reg(key);
396
+ }
397
+ gen_helper_set_cp_reg(cpu_env, tcg_ri, tmp);
398
tcg_temp_free_i32(tmp);
399
} else {
400
store_cpu_offset(tmp, ri->fieldoffset, 4);
401
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
402
if (need_exit_tb) {
403
gen_lookup_tb(s);
404
}
405
+
406
+ exit:
407
+ if (tcg_ri) {
408
+ tcg_temp_free_ptr(tcg_ri);
409
+ }
410
}
411
412
/* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
65
--
413
--
66
2.20.1
414
2.34.1
67
68
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Continue setting, but not relying upon, env->hflags.
4
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20191023150057.25731-22-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
hw/intc/armv7m_nvic.c | 22 +++++++++++++---------
11
1 file changed, 13 insertions(+), 9 deletions(-)
12
13
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/armv7m_nvic.c
16
+++ b/hw/intc/armv7m_nvic.c
17
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
18
}
19
}
20
nvic_irq_update(s);
21
- return MEMTX_OK;
22
+ goto exit_ok;
23
case 0x200 ... 0x23f: /* NVIC Set pend */
24
/* the special logic in armv7m_nvic_set_pending()
25
* is not needed since IRQs are never escalated
26
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
27
}
28
}
29
nvic_irq_update(s);
30
- return MEMTX_OK;
31
+ goto exit_ok;
32
case 0x300 ... 0x33f: /* NVIC Active */
33
- return MEMTX_OK; /* R/O */
34
+ goto exit_ok; /* R/O */
35
case 0x400 ... 0x5ef: /* NVIC Priority */
36
startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
37
38
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
39
}
40
}
41
nvic_irq_update(s);
42
- return MEMTX_OK;
43
+ goto exit_ok;
44
case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
45
if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
46
- return MEMTX_OK;
47
+ goto exit_ok;
48
}
49
/* fall through */
50
case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
51
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
52
set_prio(s, hdlidx, sbank, newprio);
53
}
54
nvic_irq_update(s);
55
- return MEMTX_OK;
56
+ goto exit_ok;
57
case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
58
if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
59
- return MEMTX_OK;
60
+ goto exit_ok;
61
}
62
/* All bits are W1C, so construct 32 bit value with 0s in
63
* the parts not written by the access size
64
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
65
*/
66
s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
67
}
68
- return MEMTX_OK;
69
+ goto exit_ok;
70
}
71
if (size == 4) {
72
nvic_writel(s, offset, value, attrs);
73
- return MEMTX_OK;
74
+ goto exit_ok;
75
}
76
qemu_log_mask(LOG_GUEST_ERROR,
77
"NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
78
/* This is UNPREDICTABLE; treat as RAZ/WI */
79
+
80
+ exit_ok:
81
+ /* Ensure any changes made are reflected in the cached hflags. */
82
+ arm_rebuild_hflags(&s->cpu->env);
83
return MEMTX_OK;
84
}
85
86
--
87
2.20.1
88
89
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Continue setting, but not relying upon, env->hflags.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20191023150057.25731-24-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/arm/cpu_loop.c | 1 +
11
1 file changed, 1 insertion(+)
12
13
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/arm/cpu_loop.c
16
+++ b/linux-user/arm/cpu_loop.c
17
@@ -XXX,XX +XXX,XX @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
18
} else {
19
env->cp15.sctlr_el[1] |= SCTLR_B;
20
}
21
+ arm_rebuild_hflags(env);
22
#endif
23
24
ts->stack_base = info->start_stack;
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This is the payoff.
4
5
From perf record -g data of ubuntu 18 boot and shutdown:
6
7
BEFORE:
8
9
- 23.02% 2.82% qemu-system-aar [.] helper_lookup_tb_ptr
10
- 20.22% helper_lookup_tb_ptr
11
+ 10.05% tb_htable_lookup
12
- 9.13% cpu_get_tb_cpu_state
13
3.20% aa64_va_parameters_both
14
0.55% fp_exception_el
15
16
- 11.66% 4.74% qemu-system-aar [.] cpu_get_tb_cpu_state
17
- 6.96% cpu_get_tb_cpu_state
18
3.63% aa64_va_parameters_both
19
0.60% fp_exception_el
20
0.53% sve_exception_el
21
22
AFTER:
23
24
- 16.40% 3.40% qemu-system-aar [.] helper_lookup_tb_ptr
25
- 13.03% helper_lookup_tb_ptr
26
+ 11.19% tb_htable_lookup
27
0.55% cpu_get_tb_cpu_state
28
29
0.98% 0.71% qemu-system-aar [.] cpu_get_tb_cpu_state
30
31
0.87% 0.24% qemu-system-aar [.] rebuild_hflags_a64
32
33
Before, helper_lookup_tb_ptr is the second hottest function in the
34
application, consuming almost a quarter of the runtime. Within the
35
entire execution, cpu_get_tb_cpu_state consumes about 12%.
36
37
After, helper_lookup_tb_ptr has dropped to the fourth hottest function,
38
with consumption dropping to a sixth of the runtime. Within the
39
entire execution, cpu_get_tb_cpu_state has dropped below 1%, and the
40
supporting function to rebuild hflags also consumes about 1%.
41
42
Assertions are retained for --enable-debug-tcg.
43
44
Tested-by: Alex Bennée <alex.bennee@linaro.org>
45
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
46
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
47
Message-id: 20191023150057.25731-25-richard.henderson@linaro.org
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
49
---
50
target/arm/helper.c | 9 ++++++---
51
1 file changed, 6 insertions(+), 3 deletions(-)
52
53
diff --git a/target/arm/helper.c b/target/arm/helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/helper.c
56
+++ b/target/arm/helper.c
57
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
58
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
59
target_ulong *cs_base, uint32_t *pflags)
60
{
61
- uint32_t flags, pstate_for_ss;
62
+ uint32_t flags = env->hflags;
63
+ uint32_t pstate_for_ss;
64
65
*cs_base = 0;
66
- flags = rebuild_hflags_internal(env);
67
+#ifdef CONFIG_DEBUG_TCG
68
+ assert(flags == rebuild_hflags_internal(env));
69
+#endif
70
71
- if (is_a64(env)) {
72
+ if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
73
*pc = env->pc;
74
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
75
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
Switch the xilinx_timer code away from bottom-half based ptimers to
2
the new transaction-based ptimer API. This just requires adding
3
begin/commit calls around the various places that modify the ptimer
4
state, and using the new ptimer_init() function to create the timer.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20191017132122.4402-3-peter.maydell@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/timer/xilinx_timer.c | 13 ++++++++-----
14
1 file changed, 8 insertions(+), 5 deletions(-)
15
16
diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/timer/xilinx_timer.c
19
+++ b/hw/timer/xilinx_timer.c
20
@@ -XXX,XX +XXX,XX @@
21
#include "hw/ptimer.h"
22
#include "hw/qdev-properties.h"
23
#include "qemu/log.h"
24
-#include "qemu/main-loop.h"
25
#include "qemu/module.h"
26
27
#define D(x)
28
@@ -XXX,XX +XXX,XX @@
29
30
struct xlx_timer
31
{
32
- QEMUBH *bh;
33
ptimer_state *ptimer;
34
void *parent;
35
int nr; /* for debug. */
36
@@ -XXX,XX +XXX,XX @@ timer_read(void *opaque, hwaddr addr, unsigned int size)
37
return r;
38
}
39
40
+/* Must be called inside ptimer transaction block */
41
static void timer_enable(struct xlx_timer *xt)
42
{
43
uint64_t count;
44
@@ -XXX,XX +XXX,XX @@ timer_write(void *opaque, hwaddr addr,
45
value &= ~TCSR_TINT;
46
47
xt->regs[addr] = value & 0x7ff;
48
- if (value & TCSR_ENT)
49
+ if (value & TCSR_ENT) {
50
+ ptimer_transaction_begin(xt->ptimer);
51
timer_enable(xt);
52
+ ptimer_transaction_commit(xt->ptimer);
53
+ }
54
break;
55
56
default:
57
@@ -XXX,XX +XXX,XX @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
58
59
xt->parent = t;
60
xt->nr = i;
61
- xt->bh = qemu_bh_new(timer_hit, xt);
62
- xt->ptimer = ptimer_init_with_bh(xt->bh, PTIMER_POLICY_DEFAULT);
63
+ xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_DEFAULT);
64
+ ptimer_transaction_begin(xt->ptimer);
65
ptimer_set_freq(xt->ptimer, t->freq_hz);
66
+ ptimer_transaction_commit(xt->ptimer);
67
}
68
69
memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, "xlnx.xps-timer",
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
In the slavio timer devcie, the ptimer TimerContext::timer is
2
always created by slavio_timer_init(), so there's no need to
3
check it for NULL; remove the single unneeded NULL check.
4
1
5
This will be useful to avoid compiler/Coverity errors when
6
a subsequent change adds a use of t->timer before the location
7
we currently do the NULL check.
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20191021134357.14266-2-peter.maydell@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
hw/timer/slavio_timer.c | 12 +++++-------
16
1 file changed, 5 insertions(+), 7 deletions(-)
17
18
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/timer/slavio_timer.c
21
+++ b/hw/timer/slavio_timer.c
22
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
23
// set limit, reset counter
24
qemu_irq_lower(t->irq);
25
t->limit = val & TIMER_MAX_COUNT32;
26
- if (t->timer) {
27
- if (t->limit == 0) { /* free-run */
28
- ptimer_set_limit(t->timer,
29
- LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 1);
30
- } else {
31
- ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 1);
32
- }
33
+ if (t->limit == 0) { /* free-run */
34
+ ptimer_set_limit(t->timer,
35
+ LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 1);
36
+ } else {
37
+ ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 1);
38
}
39
}
40
break;
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
Deleted patch
1
Switch the slavio_timer code away from bottom-half based ptimers to
2
the new transaction-based ptimer API. This just requires adding
3
begin/commit calls around the various places that modify the ptimer
4
state, and using the new ptimer_init() function to create the timer.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20191021134357.14266-4-peter.maydell@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/timer/slavio_timer.c | 20 ++++++++++++++++----
13
1 file changed, 16 insertions(+), 4 deletions(-)
14
15
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/timer/slavio_timer.c
18
+++ b/hw/timer/slavio_timer.c
19
@@ -XXX,XX +XXX,XX @@
20
#include "hw/sysbus.h"
21
#include "migration/vmstate.h"
22
#include "trace.h"
23
-#include "qemu/main-loop.h"
24
#include "qemu/module.h"
25
26
/*
27
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
28
saddr = addr >> 2;
29
switch (saddr) {
30
case TIMER_LIMIT:
31
+ ptimer_transaction_begin(t->timer);
32
if (slavio_timer_is_user(tc)) {
33
uint64_t count;
34
35
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
36
ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 1);
37
}
38
}
39
+ ptimer_transaction_commit(t->timer);
40
break;
41
case TIMER_COUNTER:
42
if (slavio_timer_is_user(tc)) {
43
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
44
t->reached = 0;
45
count = ((uint64_t)t->counthigh) << 32 | t->count;
46
trace_slavio_timer_mem_writel_limit(timer_index, count);
47
+ ptimer_transaction_begin(t->timer);
48
ptimer_set_count(t->timer, LIMIT_TO_PERIODS(t->limit - count));
49
+ ptimer_transaction_commit(t->timer);
50
} else {
51
trace_slavio_timer_mem_writel_counter_invalid();
52
}
53
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
54
case TIMER_COUNTER_NORST:
55
// set limit without resetting counter
56
t->limit = val & TIMER_MAX_COUNT32;
57
+ ptimer_transaction_begin(t->timer);
58
if (t->limit == 0) { /* free-run */
59
ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 0);
60
} else {
61
ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 0);
62
}
63
+ ptimer_transaction_commit(t->timer);
64
break;
65
case TIMER_STATUS:
66
+ ptimer_transaction_begin(t->timer);
67
if (slavio_timer_is_user(tc)) {
68
// start/stop user counter
69
if (val & 1) {
70
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
71
}
72
}
73
t->run = val & 1;
74
+ ptimer_transaction_commit(t->timer);
75
break;
76
case TIMER_MODE:
77
if (timer_index == 0) {
78
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
79
unsigned int processor = 1 << i;
80
CPUTimerState *curr_timer = &s->cputimer[i + 1];
81
82
+ ptimer_transaction_begin(curr_timer->timer);
83
// check for a change in timer mode for this processor
84
if ((val & processor) != (s->cputimer_mode & processor)) {
85
if (val & processor) { // counter -> user timer
86
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_mem_writel(void *opaque, hwaddr addr,
87
trace_slavio_timer_mem_writel_mode_counter(timer_index);
88
}
89
}
90
+ ptimer_transaction_commit(curr_timer->timer);
91
}
92
} else {
93
trace_slavio_timer_mem_writel_mode_invalid();
94
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_reset(DeviceState *d)
95
curr_timer->count = 0;
96
curr_timer->reached = 0;
97
if (i <= s->num_cpus) {
98
+ ptimer_transaction_begin(curr_timer->timer);
99
ptimer_set_limit(curr_timer->timer,
100
LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 1);
101
ptimer_run(curr_timer->timer, 0);
102
curr_timer->run = 1;
103
+ ptimer_transaction_commit(curr_timer->timer);
104
}
105
}
106
s->cputimer_mode = 0;
107
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_init(Object *obj)
108
{
109
SLAVIO_TIMERState *s = SLAVIO_TIMER(obj);
110
SysBusDevice *dev = SYS_BUS_DEVICE(obj);
111
- QEMUBH *bh;
112
unsigned int i;
113
TimerContext *tc;
114
115
@@ -XXX,XX +XXX,XX @@ static void slavio_timer_init(Object *obj)
116
tc->s = s;
117
tc->timer_index = i;
118
119
- bh = qemu_bh_new(slavio_timer_irq, tc);
120
- s->cputimer[i].timer = ptimer_init_with_bh(bh, PTIMER_POLICY_DEFAULT);
121
+ s->cputimer[i].timer = ptimer_init(slavio_timer_irq, tc,
122
+ PTIMER_POLICY_DEFAULT);
123
+ ptimer_transaction_begin(s->cputimer[i].timer);
124
ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD);
125
+ ptimer_transaction_commit(s->cputimer[i].timer);
126
127
size = i == 0 ? SYS_TIMER_SIZE : CPU_TIMER_SIZE;
128
snprintf(timer_name, sizeof(timer_name), "timer-%i", i);
129
--
130
2.20.1
131
132
diff view generated by jsdifflib
Deleted patch
1
Switch the mcf5206 code away from bottom-half based ptimers to
2
the new transaction-based ptimer API. This just requires adding
3
begin/commit calls around the various places that modify the ptimer
4
state, and using the new ptimer_init() function to create the timer.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Thomas Huth <thuth@redhat.com>
8
Message-id: 20191021140600.10725-1-peter.maydell@linaro.org
9
---
10
hw/m68k/mcf5206.c | 15 +++++++++------
11
1 file changed, 9 insertions(+), 6 deletions(-)
12
13
diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/m68k/mcf5206.c
16
+++ b/hw/m68k/mcf5206.c
17
@@ -XXX,XX +XXX,XX @@
18
19
#include "qemu/osdep.h"
20
#include "qemu/error-report.h"
21
-#include "qemu/main-loop.h"
22
#include "cpu.h"
23
#include "hw/hw.h"
24
#include "hw/irq.h"
25
@@ -XXX,XX +XXX,XX @@ static void m5206_timer_recalibrate(m5206_timer_state *s)
26
int prescale;
27
int mode;
28
29
+ ptimer_transaction_begin(s->timer);
30
ptimer_stop(s->timer);
31
32
- if ((s->tmr & TMR_RST) == 0)
33
- return;
34
+ if ((s->tmr & TMR_RST) == 0) {
35
+ goto exit;
36
+ }
37
38
prescale = (s->tmr >> 8) + 1;
39
mode = (s->tmr >> 1) & 3;
40
@@ -XXX,XX +XXX,XX @@ static void m5206_timer_recalibrate(m5206_timer_state *s)
41
ptimer_set_limit(s->timer, s->trr, 0);
42
43
ptimer_run(s->timer, 0);
44
+exit:
45
+ ptimer_transaction_commit(s->timer);
46
}
47
48
static void m5206_timer_trigger(void *opaque)
49
@@ -XXX,XX +XXX,XX @@ static void m5206_timer_write(m5206_timer_state *s, uint32_t addr, uint32_t val)
50
s->tcr = val;
51
break;
52
case 0xc:
53
+ ptimer_transaction_begin(s->timer);
54
ptimer_set_count(s->timer, val);
55
+ ptimer_transaction_commit(s->timer);
56
break;
57
case 0x11:
58
s->ter &= ~val;
59
@@ -XXX,XX +XXX,XX @@ static void m5206_timer_write(m5206_timer_state *s, uint32_t addr, uint32_t val)
60
static m5206_timer_state *m5206_timer_init(qemu_irq irq)
61
{
62
m5206_timer_state *s;
63
- QEMUBH *bh;
64
65
s = g_new0(m5206_timer_state, 1);
66
- bh = qemu_bh_new(m5206_timer_trigger, s);
67
- s->timer = ptimer_init_with_bh(bh, PTIMER_POLICY_DEFAULT);
68
+ s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_DEFAULT);
69
s->irq = irq;
70
m5206_timer_reset(s);
71
return s;
72
--
73
2.20.1
74
75
diff view generated by jsdifflib
Deleted patch
1
Switch the milkymist-sysctl code away from bottom-half based
2
ptimers to the new transaction-based ptimer API. This just requires
3
adding begin/commit calls around the various places that modify the
4
ptimer state, and using the new ptimer_init() function to create the
5
timer.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20191021141040.11007-1-peter.maydell@linaro.org
10
---
11
hw/timer/milkymist-sysctl.c | 25 ++++++++++++++++++-------
12
1 file changed, 18 insertions(+), 7 deletions(-)
13
14
diff --git a/hw/timer/milkymist-sysctl.c b/hw/timer/milkymist-sysctl.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/timer/milkymist-sysctl.c
17
+++ b/hw/timer/milkymist-sysctl.c
18
@@ -XXX,XX +XXX,XX @@
19
#include "hw/ptimer.h"
20
#include "hw/qdev-properties.h"
21
#include "qemu/error-report.h"
22
-#include "qemu/main-loop.h"
23
#include "qemu/module.h"
24
25
enum {
26
@@ -XXX,XX +XXX,XX @@ struct MilkymistSysctlState {
27
28
MemoryRegion regs_region;
29
30
- QEMUBH *bh0;
31
- QEMUBH *bh1;
32
ptimer_state *ptimer0;
33
ptimer_state *ptimer1;
34
35
@@ -XXX,XX +XXX,XX @@ static void sysctl_write(void *opaque, hwaddr addr, uint64_t value,
36
s->regs[addr] = value;
37
break;
38
case R_TIMER0_COMPARE:
39
+ ptimer_transaction_begin(s->ptimer0);
40
ptimer_set_limit(s->ptimer0, value, 0);
41
s->regs[addr] = value;
42
+ ptimer_transaction_commit(s->ptimer0);
43
break;
44
case R_TIMER1_COMPARE:
45
+ ptimer_transaction_begin(s->ptimer1);
46
ptimer_set_limit(s->ptimer1, value, 0);
47
s->regs[addr] = value;
48
+ ptimer_transaction_commit(s->ptimer1);
49
break;
50
case R_TIMER0_CONTROL:
51
+ ptimer_transaction_begin(s->ptimer0);
52
s->regs[addr] = value;
53
if (s->regs[R_TIMER0_CONTROL] & CTRL_ENABLE) {
54
trace_milkymist_sysctl_start_timer0();
55
@@ -XXX,XX +XXX,XX @@ static void sysctl_write(void *opaque, hwaddr addr, uint64_t value,
56
trace_milkymist_sysctl_stop_timer0();
57
ptimer_stop(s->ptimer0);
58
}
59
+ ptimer_transaction_commit(s->ptimer0);
60
break;
61
case R_TIMER1_CONTROL:
62
+ ptimer_transaction_begin(s->ptimer1);
63
s->regs[addr] = value;
64
if (s->regs[R_TIMER1_CONTROL] & CTRL_ENABLE) {
65
trace_milkymist_sysctl_start_timer1();
66
@@ -XXX,XX +XXX,XX @@ static void sysctl_write(void *opaque, hwaddr addr, uint64_t value,
67
trace_milkymist_sysctl_stop_timer1();
68
ptimer_stop(s->ptimer1);
69
}
70
+ ptimer_transaction_commit(s->ptimer1);
71
break;
72
case R_ICAP:
73
sysctl_icap_write(s, value);
74
@@ -XXX,XX +XXX,XX @@ static void milkymist_sysctl_reset(DeviceState *d)
75
s->regs[i] = 0;
76
}
77
78
+ ptimer_transaction_begin(s->ptimer0);
79
ptimer_stop(s->ptimer0);
80
+ ptimer_transaction_commit(s->ptimer0);
81
+ ptimer_transaction_begin(s->ptimer1);
82
ptimer_stop(s->ptimer1);
83
+ ptimer_transaction_commit(s->ptimer1);
84
85
/* defaults */
86
s->regs[R_ICAP] = ICAP_READY;
87
@@ -XXX,XX +XXX,XX @@ static void milkymist_sysctl_realize(DeviceState *dev, Error **errp)
88
{
89
MilkymistSysctlState *s = MILKYMIST_SYSCTL(dev);
90
91
- s->bh0 = qemu_bh_new(timer0_hit, s);
92
- s->bh1 = qemu_bh_new(timer1_hit, s);
93
- s->ptimer0 = ptimer_init_with_bh(s->bh0, PTIMER_POLICY_DEFAULT);
94
- s->ptimer1 = ptimer_init_with_bh(s->bh1, PTIMER_POLICY_DEFAULT);
95
+ s->ptimer0 = ptimer_init(timer0_hit, s, PTIMER_POLICY_DEFAULT);
96
+ s->ptimer1 = ptimer_init(timer1_hit, s, PTIMER_POLICY_DEFAULT);
97
98
+ ptimer_transaction_begin(s->ptimer0);
99
ptimer_set_freq(s->ptimer0, s->freq_hz);
100
+ ptimer_transaction_commit(s->ptimer0);
101
+ ptimer_transaction_begin(s->ptimer1);
102
ptimer_set_freq(s->ptimer1, s->freq_hz);
103
+ ptimer_transaction_commit(s->ptimer1);
104
}
105
106
static const VMStateDescription vmstate_milkymist_sysctl = {
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
Deleted patch
1
From: Andrew Jones <drjones@redhat.com>
2
1
3
Add support for the query-cpu-model-expansion QMP command to Arm. We
4
do this selectively, only exposing CPU properties which represent
5
optional CPU features which the user may want to enable/disable.
6
Additionally we restrict the list of queryable cpu models to 'max',
7
'host', or the current type when KVM is in use. And, finally, we only
8
implement expansion type 'full', as Arm does not yet have a "base"
9
CPU type. More details and example queries are described in a new
10
document (docs/arm-cpu-features.rst).
11
12
Note, certainly more features may be added to the list of advertised
13
features, e.g. 'vfp' and 'neon'. The only requirement is that we can
14
detect invalid configurations and emit failures at QMP query time.
15
For 'vfp' and 'neon' this will require some refactoring to share a
16
validation function between the QMP query and the CPU realize
17
functions.
18
19
Signed-off-by: Andrew Jones <drjones@redhat.com>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Reviewed-by: Eric Auger <eric.auger@redhat.com>
22
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
23
Message-id: 20191024121808.9612-2-drjones@redhat.com
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
26
qapi/machine-target.json | 6 +-
27
target/arm/monitor.c | 146 ++++++++++++++++++++++++++++++++++++++
28
docs/arm-cpu-features.rst | 137 +++++++++++++++++++++++++++++++++++
29
3 files changed, 286 insertions(+), 3 deletions(-)
30
create mode 100644 docs/arm-cpu-features.rst
31
32
diff --git a/qapi/machine-target.json b/qapi/machine-target.json
33
index XXXXXXX..XXXXXXX 100644
34
--- a/qapi/machine-target.json
35
+++ b/qapi/machine-target.json
36
@@ -XXX,XX +XXX,XX @@
37
##
38
{ 'struct': 'CpuModelExpansionInfo',
39
'data': { 'model': 'CpuModelInfo' },
40
- 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' }
41
+ 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' }
42
43
##
44
# @query-cpu-model-expansion:
45
@@ -XXX,XX +XXX,XX @@
46
# query-cpu-model-expansion while using these is not advised.
47
#
48
# Some architectures may not support all expansion types. s390x supports
49
-# "full" and "static".
50
+# "full" and "static". Arm only supports "full".
51
#
52
# Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU models is
53
# not supported, if the model cannot be expanded, if the model contains
54
@@ -XXX,XX +XXX,XX @@
55
'data': { 'type': 'CpuModelExpansionType',
56
'model': 'CpuModelInfo' },
57
'returns': 'CpuModelExpansionInfo',
58
- 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' }
59
+ 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' }
60
61
##
62
# @CpuDefinitionInfo:
63
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/monitor.c
66
+++ b/target/arm/monitor.c
67
@@ -XXX,XX +XXX,XX @@
68
*/
69
70
#include "qemu/osdep.h"
71
+#include "hw/boards.h"
72
#include "kvm_arm.h"
73
+#include "qapi/error.h"
74
+#include "qapi/visitor.h"
75
+#include "qapi/qobject-input-visitor.h"
76
+#include "qapi/qapi-commands-machine-target.h"
77
#include "qapi/qapi-commands-misc-target.h"
78
+#include "qapi/qmp/qerror.h"
79
+#include "qapi/qmp/qdict.h"
80
+#include "qom/qom-qobject.h"
81
82
static GICCapability *gic_cap_new(int version)
83
{
84
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
85
86
return head;
87
}
88
+
89
+/*
90
+ * These are cpu model features we want to advertise. The order here
91
+ * matters as this is the order in which qmp_query_cpu_model_expansion
92
+ * will attempt to set them. If there are dependencies between features,
93
+ * then the order that considers those dependencies must be used.
94
+ */
95
+static const char *cpu_model_advertised_features[] = {
96
+ "aarch64", "pmu",
97
+ NULL
98
+};
99
+
100
+CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
101
+ CpuModelInfo *model,
102
+ Error **errp)
103
+{
104
+ CpuModelExpansionInfo *expansion_info;
105
+ const QDict *qdict_in = NULL;
106
+ QDict *qdict_out;
107
+ ObjectClass *oc;
108
+ Object *obj;
109
+ const char *name;
110
+ int i;
111
+
112
+ if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
113
+ error_setg(errp, "The requested expansion type is not supported");
114
+ return NULL;
115
+ }
116
+
117
+ if (!kvm_enabled() && !strcmp(model->name, "host")) {
118
+ error_setg(errp, "The CPU type '%s' requires KVM", model->name);
119
+ return NULL;
120
+ }
121
+
122
+ oc = cpu_class_by_name(TYPE_ARM_CPU, model->name);
123
+ if (!oc) {
124
+ error_setg(errp, "The CPU type '%s' is not a recognized ARM CPU type",
125
+ model->name);
126
+ return NULL;
127
+ }
128
+
129
+ if (kvm_enabled()) {
130
+ const char *cpu_type = current_machine->cpu_type;
131
+ int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
132
+ bool supported = false;
133
+
134
+ if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) {
135
+ /* These are kvmarm's recommended cpu types */
136
+ supported = true;
137
+ } else if (strlen(model->name) == len &&
138
+ !strncmp(model->name, cpu_type, len)) {
139
+ /* KVM is enabled and we're using this type, so it works. */
140
+ supported = true;
141
+ }
142
+ if (!supported) {
143
+ error_setg(errp, "We cannot guarantee the CPU type '%s' works "
144
+ "with KVM on this host", model->name);
145
+ return NULL;
146
+ }
147
+ }
148
+
149
+ if (model->props) {
150
+ qdict_in = qobject_to(QDict, model->props);
151
+ if (!qdict_in) {
152
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
153
+ return NULL;
154
+ }
155
+ }
156
+
157
+ obj = object_new(object_class_get_name(oc));
158
+
159
+ if (qdict_in) {
160
+ Visitor *visitor;
161
+ Error *err = NULL;
162
+
163
+ visitor = qobject_input_visitor_new(model->props);
164
+ visit_start_struct(visitor, NULL, NULL, 0, &err);
165
+ if (err) {
166
+ visit_free(visitor);
167
+ object_unref(obj);
168
+ error_propagate(errp, err);
169
+ return NULL;
170
+ }
171
+
172
+ i = 0;
173
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
174
+ if (qdict_get(qdict_in, name)) {
175
+ object_property_set(obj, visitor, name, &err);
176
+ if (err) {
177
+ break;
178
+ }
179
+ }
180
+ }
181
+
182
+ if (!err) {
183
+ visit_check_struct(visitor, &err);
184
+ }
185
+ visit_end_struct(visitor, NULL);
186
+ visit_free(visitor);
187
+ if (err) {
188
+ object_unref(obj);
189
+ error_propagate(errp, err);
190
+ return NULL;
191
+ }
192
+ }
193
+
194
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
195
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
196
+ expansion_info->model->name = g_strdup(model->name);
197
+
198
+ qdict_out = qdict_new();
199
+
200
+ i = 0;
201
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
202
+ ObjectProperty *prop = object_property_find(obj, name, NULL);
203
+ if (prop) {
204
+ Error *err = NULL;
205
+ QObject *value;
206
+
207
+ assert(prop->get);
208
+ value = object_property_get_qobject(obj, name, &err);
209
+ assert(!err);
210
+
211
+ qdict_put_obj(qdict_out, name, value);
212
+ }
213
+ }
214
+
215
+ if (!qdict_size(qdict_out)) {
216
+ qobject_unref(qdict_out);
217
+ } else {
218
+ expansion_info->model->props = QOBJECT(qdict_out);
219
+ expansion_info->model->has_props = true;
220
+ }
221
+
222
+ object_unref(obj);
223
+
224
+ return expansion_info;
225
+}
226
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
227
new file mode 100644
228
index XXXXXXX..XXXXXXX
229
--- /dev/null
230
+++ b/docs/arm-cpu-features.rst
231
@@ -XXX,XX +XXX,XX @@
232
+================
233
+ARM CPU Features
234
+================
235
+
236
+Examples of probing and using ARM CPU features
237
+
238
+Introduction
239
+============
240
+
241
+CPU features are optional features that a CPU of supporting type may
242
+choose to implement or not. In QEMU, optional CPU features have
243
+corresponding boolean CPU proprieties that, when enabled, indicate
244
+that the feature is implemented, and, conversely, when disabled,
245
+indicate that it is not implemented. An example of an ARM CPU feature
246
+is the Performance Monitoring Unit (PMU). CPU types such as the
247
+Cortex-A15 and the Cortex-A57, which respectively implement ARM
248
+architecture reference manuals ARMv7-A and ARMv8-A, may both optionally
249
+implement PMUs. For example, if a user wants to use a Cortex-A15 without
250
+a PMU, then the `-cpu` parameter should contain `pmu=off` on the QEMU
251
+command line, i.e. `-cpu cortex-a15,pmu=off`.
252
+
253
+As not all CPU types support all optional CPU features, then whether or
254
+not a CPU property exists depends on the CPU type. For example, CPUs
255
+that implement the ARMv8-A architecture reference manual may optionally
256
+support the AArch32 CPU feature, which may be enabled by disabling the
257
+`aarch64` CPU property. A CPU type such as the Cortex-A15, which does
258
+not implement ARMv8-A, will not have the `aarch64` CPU property.
259
+
260
+QEMU's support may be limited for some CPU features, only partially
261
+supporting the feature or only supporting the feature under certain
262
+configurations. For example, the `aarch64` CPU feature, which, when
263
+disabled, enables the optional AArch32 CPU feature, is only supported
264
+when using the KVM accelerator and when running on a host CPU type that
265
+supports the feature.
266
+
267
+CPU Feature Probing
268
+===================
269
+
270
+Determining which CPU features are available and functional for a given
271
+CPU type is possible with the `query-cpu-model-expansion` QMP command.
272
+Below are some examples where `scripts/qmp/qmp-shell` (see the top comment
273
+block in the script for usage) is used to issue the QMP commands.
274
+
275
+(1) Determine which CPU features are available for the `max` CPU type
276
+ (Note, we started QEMU with qemu-system-aarch64, so `max` is
277
+ implementing the ARMv8-A reference manual in this case)::
278
+
279
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max"}
280
+ { "return": {
281
+ "model": { "name": "max", "props": {
282
+ "pmu": true, "aarch64": true
283
+ }}}}
284
+
285
+We see that the `max` CPU type has the `pmu` and `aarch64` CPU features.
286
+We also see that the CPU features are enabled, as they are all `true`.
287
+
288
+(2) Let's try to disable the PMU::
289
+
290
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"pmu":false}}
291
+ { "return": {
292
+ "model": { "name": "max", "props": {
293
+ "pmu": false, "aarch64": true
294
+ }}}}
295
+
296
+We see it worked, as `pmu` is now `false`.
297
+
298
+(3) Let's try to disable `aarch64`, which enables the AArch32 CPU feature::
299
+
300
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"aarch64":false}}
301
+ {"error": {
302
+ "class": "GenericError", "desc":
303
+ "'aarch64' feature cannot be disabled unless KVM is enabled and 32-bit EL1 is supported"
304
+ }}
305
+
306
+It looks like this feature is limited to a configuration we do not
307
+currently have.
308
+
309
+(4) Let's try probing CPU features for the Cortex-A15 CPU type::
310
+
311
+ (QEMU) query-cpu-model-expansion type=full model={"name":"cortex-a15"}
312
+ {"return": {"model": {"name": "cortex-a15", "props": {"pmu": true}}}}
313
+
314
+Only the `pmu` CPU feature is available.
315
+
316
+A note about CPU feature dependencies
317
+-------------------------------------
318
+
319
+It's possible for features to have dependencies on other features. I.e.
320
+it may be possible to change one feature at a time without error, but
321
+when attempting to change all features at once an error could occur
322
+depending on the order they are processed. It's also possible changing
323
+all at once doesn't generate an error, because a feature's dependencies
324
+are satisfied with other features, but the same feature cannot be changed
325
+independently without error. For these reasons callers should always
326
+attempt to make their desired changes all at once in order to ensure the
327
+collection is valid.
328
+
329
+A note about CPU models and KVM
330
+-------------------------------
331
+
332
+Named CPU models generally do not work with KVM. There are a few cases
333
+that do work, e.g. using the named CPU model `cortex-a57` with KVM on a
334
+seattle host, but mostly if KVM is enabled the `host` CPU type must be
335
+used. This means the guest is provided all the same CPU features as the
336
+host CPU type has. And, for this reason, the `host` CPU type should
337
+enable all CPU features that the host has by default. Indeed it's even
338
+a bit strange to allow disabling CPU features that the host has when using
339
+the `host` CPU type, but in the absence of CPU models it's the best we can
340
+do if we want to launch guests without all the host's CPU features enabled.
341
+
342
+Enabling KVM also affects the `query-cpu-model-expansion` QMP command. The
343
+affect is not only limited to specific features, as pointed out in example
344
+(3) of "CPU Feature Probing", but also to which CPU types may be expanded.
345
+When KVM is enabled, only the `max`, `host`, and current CPU type may be
346
+expanded. This restriction is necessary as it's not possible to know all
347
+CPU types that may work with KVM, but it does impose a small risk of users
348
+experiencing unexpected errors. For example on a seattle, as mentioned
349
+above, the `cortex-a57` CPU type is also valid when KVM is enabled.
350
+Therefore a user could use the `host` CPU type for the current type, but
351
+then attempt to query `cortex-a57`, however that query will fail with our
352
+restrictions. This shouldn't be an issue though as management layers and
353
+users have been preferring the `host` CPU type for use with KVM for quite
354
+some time. Additionally, if the KVM-enabled QEMU instance running on a
355
+seattle host is using the `cortex-a57` CPU type, then querying `cortex-a57`
356
+will work.
357
+
358
+Using CPU Features
359
+==================
360
+
361
+After determining which CPU features are available and supported for a
362
+given CPU type, then they may be selectively enabled or disabled on the
363
+QEMU command line with that CPU type::
364
+
365
+ $ qemu-system-aarch64 -M virt -cpu max,pmu=off
366
+
367
+The example above disables the PMU for the `max` CPU type.
368
+
369
--
370
2.20.1
371
372
diff view generated by jsdifflib
Deleted patch
1
From: Andrew Jones <drjones@redhat.com>
2
1
3
Now that Arm CPUs have advertised features lets add tests to ensure
4
we maintain their expected availability with and without KVM.
5
6
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
Message-id: 20191024121808.9612-3-drjones@redhat.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
tests/Makefile.include | 5 +-
12
tests/arm-cpu-features.c | 240 +++++++++++++++++++++++++++++++++++++++
13
2 files changed, 244 insertions(+), 1 deletion(-)
14
create mode 100644 tests/arm-cpu-features.c
15
16
diff --git a/tests/Makefile.include b/tests/Makefile.include
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/Makefile.include
19
+++ b/tests/Makefile.include
20
@@ -XXX,XX +XXX,XX @@ check-qtest-sparc64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
21
check-qtest-sparc64-y += tests/prom-env-test$(EXESUF)
22
check-qtest-sparc64-y += tests/boot-serial-test$(EXESUF)
23
24
+check-qtest-arm-y += tests/arm-cpu-features$(EXESUF)
25
check-qtest-arm-y += tests/microbit-test$(EXESUF)
26
check-qtest-arm-y += tests/m25p80-test$(EXESUF)
27
check-qtest-arm-y += tests/test-arm-mptimer$(EXESUF)
28
@@ -XXX,XX +XXX,XX @@ check-qtest-arm-y += tests/boot-serial-test$(EXESUF)
29
check-qtest-arm-y += tests/hexloader-test$(EXESUF)
30
check-qtest-arm-$(CONFIG_PFLASH_CFI02) += tests/pflash-cfi02-test$(EXESUF)
31
32
-check-qtest-aarch64-y = tests/numa-test$(EXESUF)
33
+check-qtest-aarch64-y += tests/arm-cpu-features$(EXESUF)
34
+check-qtest-aarch64-y += tests/numa-test$(EXESUF)
35
check-qtest-aarch64-y += tests/boot-serial-test$(EXESUF)
36
check-qtest-aarch64-y += tests/migration-test$(EXESUF)
37
# TODO: once aarch64 TCG is fixed on ARM 32 bit host, make test unconditional
38
@@ -XXX,XX +XXX,XX @@ tests/test-qapi-util$(EXESUF): tests/test-qapi-util.o $(test-util-obj-y)
39
tests/numa-test$(EXESUF): tests/numa-test.o
40
tests/vmgenid-test$(EXESUF): tests/vmgenid-test.o tests/boot-sector.o tests/acpi-utils.o
41
tests/cdrom-test$(EXESUF): tests/cdrom-test.o tests/boot-sector.o $(libqos-obj-y)
42
+tests/arm-cpu-features$(EXESUF): tests/arm-cpu-features.o
43
44
tests/migration/stress$(EXESUF): tests/migration/stress.o
45
    $(call quiet-command, $(LINKPROG) -static -O3 $(PTHREAD_LIB) -o $@ $< ,"LINK","$(TARGET_DIR)$@")
46
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
47
new file mode 100644
48
index XXXXXXX..XXXXXXX
49
--- /dev/null
50
+++ b/tests/arm-cpu-features.c
51
@@ -XXX,XX +XXX,XX @@
52
+/*
53
+ * Arm CPU feature test cases
54
+ *
55
+ * Copyright (c) 2019 Red Hat Inc.
56
+ * Authors:
57
+ * Andrew Jones <drjones@redhat.com>
58
+ *
59
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
60
+ * See the COPYING file in the top-level directory.
61
+ */
62
+#include "qemu/osdep.h"
63
+#include "libqtest.h"
64
+#include "qapi/qmp/qdict.h"
65
+#include "qapi/qmp/qjson.h"
66
+
67
+#define MACHINE "-machine virt,gic-version=max "
68
+#define QUERY_HEAD "{ 'execute': 'query-cpu-model-expansion', " \
69
+ "'arguments': { 'type': 'full', "
70
+#define QUERY_TAIL "}}"
71
+
72
+static QDict *do_query_no_props(QTestState *qts, const char *cpu_type)
73
+{
74
+ return qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s }"
75
+ QUERY_TAIL, cpu_type);
76
+}
77
+
78
+static QDict *do_query(QTestState *qts, const char *cpu_type,
79
+ const char *fmt, ...)
80
+{
81
+ QDict *resp;
82
+
83
+ if (fmt) {
84
+ QDict *args;
85
+ va_list ap;
86
+
87
+ va_start(ap, fmt);
88
+ args = qdict_from_vjsonf_nofail(fmt, ap);
89
+ va_end(ap);
90
+
91
+ resp = qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s, "
92
+ "'props': %p }"
93
+ QUERY_TAIL, cpu_type, args);
94
+ } else {
95
+ resp = do_query_no_props(qts, cpu_type);
96
+ }
97
+
98
+ return resp;
99
+}
100
+
101
+static const char *resp_get_error(QDict *resp)
102
+{
103
+ QDict *qdict;
104
+
105
+ g_assert(resp);
106
+
107
+ qdict = qdict_get_qdict(resp, "error");
108
+ if (qdict) {
109
+ return qdict_get_str(qdict, "desc");
110
+ }
111
+ return NULL;
112
+}
113
+
114
+#define assert_error(qts, cpu_type, expected_error, fmt, ...) \
115
+({ \
116
+ QDict *_resp; \
117
+ const char *_error; \
118
+ \
119
+ _resp = do_query(qts, cpu_type, fmt, ##__VA_ARGS__); \
120
+ g_assert(_resp); \
121
+ _error = resp_get_error(_resp); \
122
+ g_assert(_error); \
123
+ g_assert(g_str_equal(_error, expected_error)); \
124
+ qobject_unref(_resp); \
125
+})
126
+
127
+static bool resp_has_props(QDict *resp)
128
+{
129
+ QDict *qdict;
130
+
131
+ g_assert(resp);
132
+
133
+ if (!qdict_haskey(resp, "return")) {
134
+ return false;
135
+ }
136
+ qdict = qdict_get_qdict(resp, "return");
137
+
138
+ if (!qdict_haskey(qdict, "model")) {
139
+ return false;
140
+ }
141
+ qdict = qdict_get_qdict(qdict, "model");
142
+
143
+ return qdict_haskey(qdict, "props");
144
+}
145
+
146
+static QDict *resp_get_props(QDict *resp)
147
+{
148
+ QDict *qdict;
149
+
150
+ g_assert(resp);
151
+ g_assert(resp_has_props(resp));
152
+
153
+ qdict = qdict_get_qdict(resp, "return");
154
+ qdict = qdict_get_qdict(qdict, "model");
155
+ qdict = qdict_get_qdict(qdict, "props");
156
+ return qdict;
157
+}
158
+
159
+#define assert_has_feature(qts, cpu_type, feature) \
160
+({ \
161
+ QDict *_resp = do_query_no_props(qts, cpu_type); \
162
+ g_assert(_resp); \
163
+ g_assert(resp_has_props(_resp)); \
164
+ g_assert(qdict_get(resp_get_props(_resp), feature)); \
165
+ qobject_unref(_resp); \
166
+})
167
+
168
+#define assert_has_not_feature(qts, cpu_type, feature) \
169
+({ \
170
+ QDict *_resp = do_query_no_props(qts, cpu_type); \
171
+ g_assert(_resp); \
172
+ g_assert(!resp_has_props(_resp) || \
173
+ !qdict_get(resp_get_props(_resp), feature)); \
174
+ qobject_unref(_resp); \
175
+})
176
+
177
+static void assert_type_full(QTestState *qts)
178
+{
179
+ const char *error;
180
+ QDict *resp;
181
+
182
+ resp = qtest_qmp(qts, "{ 'execute': 'query-cpu-model-expansion', "
183
+ "'arguments': { 'type': 'static', "
184
+ "'model': { 'name': 'foo' }}}");
185
+ g_assert(resp);
186
+ error = resp_get_error(resp);
187
+ g_assert(error);
188
+ g_assert(g_str_equal(error,
189
+ "The requested expansion type is not supported"));
190
+ qobject_unref(resp);
191
+}
192
+
193
+static void assert_bad_props(QTestState *qts, const char *cpu_type)
194
+{
195
+ const char *error;
196
+ QDict *resp;
197
+
198
+ resp = qtest_qmp(qts, "{ 'execute': 'query-cpu-model-expansion', "
199
+ "'arguments': { 'type': 'full', "
200
+ "'model': { 'name': %s, "
201
+ "'props': false }}}",
202
+ cpu_type);
203
+ g_assert(resp);
204
+ error = resp_get_error(resp);
205
+ g_assert(error);
206
+ g_assert(g_str_equal(error,
207
+ "Invalid parameter type for 'props', expected: dict"));
208
+ qobject_unref(resp);
209
+}
210
+
211
+static void test_query_cpu_model_expansion(const void *data)
212
+{
213
+ QTestState *qts;
214
+
215
+ qts = qtest_init(MACHINE "-cpu max");
216
+
217
+ /* Test common query-cpu-model-expansion input validation */
218
+ assert_type_full(qts);
219
+ assert_bad_props(qts, "max");
220
+ assert_error(qts, "foo", "The CPU type 'foo' is not a recognized "
221
+ "ARM CPU type", NULL);
222
+ assert_error(qts, "max", "Parameter 'not-a-prop' is unexpected",
223
+ "{ 'not-a-prop': false }");
224
+ assert_error(qts, "host", "The CPU type 'host' requires KVM", NULL);
225
+
226
+ /* Test expected feature presence/absence for some cpu types */
227
+ assert_has_feature(qts, "max", "pmu");
228
+ assert_has_feature(qts, "cortex-a15", "pmu");
229
+ assert_has_not_feature(qts, "cortex-a15", "aarch64");
230
+
231
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
232
+ assert_has_feature(qts, "max", "aarch64");
233
+ assert_has_feature(qts, "cortex-a57", "pmu");
234
+ assert_has_feature(qts, "cortex-a57", "aarch64");
235
+
236
+ /* Test that features that depend on KVM generate errors without. */
237
+ assert_error(qts, "max",
238
+ "'aarch64' feature cannot be disabled "
239
+ "unless KVM is enabled and 32-bit EL1 "
240
+ "is supported",
241
+ "{ 'aarch64': false }");
242
+ }
243
+
244
+ qtest_quit(qts);
245
+}
246
+
247
+static void test_query_cpu_model_expansion_kvm(const void *data)
248
+{
249
+ QTestState *qts;
250
+
251
+ qts = qtest_init(MACHINE "-accel kvm -cpu host");
252
+
253
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
254
+ assert_has_feature(qts, "host", "aarch64");
255
+ assert_has_feature(qts, "host", "pmu");
256
+
257
+ assert_error(qts, "cortex-a15",
258
+ "We cannot guarantee the CPU type 'cortex-a15' works "
259
+ "with KVM on this host", NULL);
260
+ } else {
261
+ assert_has_not_feature(qts, "host", "aarch64");
262
+ assert_has_not_feature(qts, "host", "pmu");
263
+ }
264
+
265
+ qtest_quit(qts);
266
+}
267
+
268
+int main(int argc, char **argv)
269
+{
270
+ bool kvm_available = false;
271
+
272
+ if (!access("/dev/kvm", R_OK | W_OK)) {
273
+#if defined(HOST_AARCH64)
274
+ kvm_available = g_str_equal(qtest_get_arch(), "aarch64");
275
+#elif defined(HOST_ARM)
276
+ kvm_available = g_str_equal(qtest_get_arch(), "arm");
277
+#endif
278
+ }
279
+
280
+ g_test_init(&argc, &argv, NULL);
281
+
282
+ qtest_add_data_func("/arm/query-cpu-model-expansion",
283
+ NULL, test_query_cpu_model_expansion);
284
+
285
+ if (kvm_available) {
286
+ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
287
+ NULL, test_query_cpu_model_expansion_kvm);
288
+ }
289
+
290
+ return g_test_run();
291
+}
292
--
293
2.20.1
294
295
diff view generated by jsdifflib
Deleted patch
1
From: Andrew Jones <drjones@redhat.com>
2
1
3
kvm_arm_create_scratch_host_vcpu() takes a struct kvm_vcpu_init
4
parameter. Rather than just using it as an output parameter to
5
pass back the preferred target, use it also as an input parameter,
6
allowing a caller to pass a selected target if they wish and to
7
also pass cpu features. If the caller doesn't want to select a
8
target they can pass -1 for the target which indicates they want
9
to use the preferred target and have it passed back like before.
10
11
Signed-off-by: Andrew Jones <drjones@redhat.com>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Eric Auger <eric.auger@redhat.com>
14
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
15
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
16
Message-id: 20191024121808.9612-8-drjones@redhat.com
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
target/arm/kvm.c | 20 +++++++++++++++-----
20
target/arm/kvm32.c | 6 +++++-
21
target/arm/kvm64.c | 6 +++++-
22
3 files changed, 25 insertions(+), 7 deletions(-)
23
24
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/kvm.c
27
+++ b/target/arm/kvm.c
28
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
29
int *fdarray,
30
struct kvm_vcpu_init *init)
31
{
32
- int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
33
+ int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
34
35
kvmfd = qemu_open("/dev/kvm", O_RDWR);
36
if (kvmfd < 0) {
37
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
38
goto finish;
39
}
40
41
- ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
42
+ if (init->target == -1) {
43
+ struct kvm_vcpu_init preferred;
44
+
45
+ ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
46
+ if (!ret) {
47
+ init->target = preferred.target;
48
+ }
49
+ }
50
if (ret >= 0) {
51
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
52
if (ret < 0) {
53
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
54
* creating one kind of guest CPU which is its preferred
55
* CPU type.
56
*/
57
+ struct kvm_vcpu_init try;
58
+
59
while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
60
- init->target = *cpus_to_try++;
61
- memset(init->features, 0, sizeof(init->features));
62
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
63
+ try.target = *cpus_to_try++;
64
+ memcpy(try.features, init->features, sizeof(init->features));
65
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
66
if (ret >= 0) {
67
break;
68
}
69
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
70
if (ret < 0) {
71
goto err;
72
}
73
+ init->target = try.target;
74
} else {
75
/* Treat a NULL cpus_to_try argument the same as an empty
76
* list, which means we will fail the call since this must
77
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/kvm32.c
80
+++ b/target/arm/kvm32.c
81
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
82
QEMU_KVM_ARM_TARGET_CORTEX_A15,
83
QEMU_KVM_ARM_TARGET_NONE
84
};
85
- struct kvm_vcpu_init init;
86
+ /*
87
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
88
+ * to use the preferred target
89
+ */
90
+ struct kvm_vcpu_init init = { .target = -1, };
91
92
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
93
return false;
94
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/kvm64.c
97
+++ b/target/arm/kvm64.c
98
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
99
KVM_ARM_TARGET_CORTEX_A57,
100
QEMU_KVM_ARM_TARGET_NONE
101
};
102
- struct kvm_vcpu_init init;
103
+ /*
104
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
105
+ * to use the preferred target
106
+ */
107
+ struct kvm_vcpu_init init = { .target = -1, };
108
109
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
110
return false;
111
--
112
2.20.1
113
114
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
We will soon implement the SYS_timer. This timer is used by Linux
4
in the thermal subsystem, so once available, the subsystem will be
5
enabled and poll the temperature sensors. We need to provide the
6
minimum required to keep Linux booting.
7
8
Add a dummy thermal sensor returning ~25°C based on:
9
https://github.com/raspberrypi/linux/blob/rpi-5.3.y/drivers/thermal/broadcom/bcm2835_thermal.c
10
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-id: 20191019234715.25750-2-f4bug@amsat.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/misc/Makefile.objs | 1 +
17
include/hw/misc/bcm2835_thermal.h | 27 ++++++
18
hw/misc/bcm2835_thermal.c | 135 ++++++++++++++++++++++++++++++
19
3 files changed, 163 insertions(+)
20
create mode 100644 include/hw/misc/bcm2835_thermal.h
21
create mode 100644 hw/misc/bcm2835_thermal.c
22
23
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
24
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/misc/Makefile.objs
26
+++ b/hw/misc/Makefile.objs
27
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_OMAP) += omap_tap.o
28
common-obj-$(CONFIG_RASPI) += bcm2835_mbox.o
29
common-obj-$(CONFIG_RASPI) += bcm2835_property.o
30
common-obj-$(CONFIG_RASPI) += bcm2835_rng.o
31
+common-obj-$(CONFIG_RASPI) += bcm2835_thermal.o
32
common-obj-$(CONFIG_SLAVIO) += slavio_misc.o
33
common-obj-$(CONFIG_ZYNQ) += zynq_slcr.o
34
common-obj-$(CONFIG_ZYNQ) += zynq-xadc.o
35
diff --git a/include/hw/misc/bcm2835_thermal.h b/include/hw/misc/bcm2835_thermal.h
36
new file mode 100644
37
index XXXXXXX..XXXXXXX
38
--- /dev/null
39
+++ b/include/hw/misc/bcm2835_thermal.h
40
@@ -XXX,XX +XXX,XX @@
41
+/*
42
+ * BCM2835 dummy thermal sensor
43
+ *
44
+ * Copyright (C) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
45
+ *
46
+ * SPDX-License-Identifier: GPL-2.0-or-later
47
+ */
48
+
49
+#ifndef HW_MISC_BCM2835_THERMAL_H
50
+#define HW_MISC_BCM2835_THERMAL_H
51
+
52
+#include "hw/sysbus.h"
53
+
54
+#define TYPE_BCM2835_THERMAL "bcm2835-thermal"
55
+
56
+#define BCM2835_THERMAL(obj) \
57
+ OBJECT_CHECK(Bcm2835ThermalState, (obj), TYPE_BCM2835_THERMAL)
58
+
59
+typedef struct {
60
+ /*< private >*/
61
+ SysBusDevice parent_obj;
62
+ /*< public >*/
63
+ MemoryRegion iomem;
64
+ uint32_t ctl;
65
+} Bcm2835ThermalState;
66
+
67
+#endif
68
diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c
69
new file mode 100644
70
index XXXXXXX..XXXXXXX
71
--- /dev/null
72
+++ b/hw/misc/bcm2835_thermal.c
73
@@ -XXX,XX +XXX,XX @@
74
+/*
75
+ * BCM2835 dummy thermal sensor
76
+ *
77
+ * Copyright (C) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
78
+ *
79
+ * SPDX-License-Identifier: GPL-2.0-or-later
80
+ */
81
+
82
+#include "qemu/osdep.h"
83
+#include "qemu/log.h"
84
+#include "qapi/error.h"
85
+#include "hw/misc/bcm2835_thermal.h"
86
+#include "hw/registerfields.h"
87
+#include "migration/vmstate.h"
88
+
89
+REG32(CTL, 0)
90
+FIELD(CTL, POWER_DOWN, 0, 1)
91
+FIELD(CTL, RESET, 1, 1)
92
+FIELD(CTL, BANDGAP_CTRL, 2, 3)
93
+FIELD(CTL, INTERRUPT_ENABLE, 5, 1)
94
+FIELD(CTL, DIRECT, 6, 1)
95
+FIELD(CTL, INTERRUPT_CLEAR, 7, 1)
96
+FIELD(CTL, HOLD, 8, 10)
97
+FIELD(CTL, RESET_DELAY, 18, 8)
98
+FIELD(CTL, REGULATOR_ENABLE, 26, 1)
99
+
100
+REG32(STAT, 4)
101
+FIELD(STAT, DATA, 0, 10)
102
+FIELD(STAT, VALID, 10, 1)
103
+FIELD(STAT, INTERRUPT, 11, 1)
104
+
105
+#define THERMAL_OFFSET_C 412
106
+#define THERMAL_COEFF (-0.538f)
107
+
108
+static uint16_t bcm2835_thermal_temp2adc(int temp_C)
109
+{
110
+ return (temp_C - THERMAL_OFFSET_C) / THERMAL_COEFF;
111
+}
112
+
113
+static uint64_t bcm2835_thermal_read(void *opaque, hwaddr addr, unsigned size)
114
+{
115
+ Bcm2835ThermalState *s = BCM2835_THERMAL(opaque);
116
+ uint32_t val = 0;
117
+
118
+ switch (addr) {
119
+ case A_CTL:
120
+ val = s->ctl;
121
+ break;
122
+ case A_STAT:
123
+ /* Temperature is constantly 25°C. */
124
+ val = FIELD_DP32(bcm2835_thermal_temp2adc(25), STAT, VALID, true);
125
+ break;
126
+ default:
127
+ /* MemoryRegionOps are aligned, so this can not happen. */
128
+ g_assert_not_reached();
129
+ }
130
+ return val;
131
+}
132
+
133
+static void bcm2835_thermal_write(void *opaque, hwaddr addr,
134
+ uint64_t value, unsigned size)
135
+{
136
+ Bcm2835ThermalState *s = BCM2835_THERMAL(opaque);
137
+
138
+ switch (addr) {
139
+ case A_CTL:
140
+ s->ctl = value;
141
+ break;
142
+ case A_STAT:
143
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: write 0x%" PRIx64
144
+ " to 0x%" HWADDR_PRIx "\n",
145
+ __func__, value, addr);
146
+ break;
147
+ default:
148
+ /* MemoryRegionOps are aligned, so this can not happen. */
149
+ g_assert_not_reached();
150
+ }
151
+}
152
+
153
+static const MemoryRegionOps bcm2835_thermal_ops = {
154
+ .read = bcm2835_thermal_read,
155
+ .write = bcm2835_thermal_write,
156
+ .impl.max_access_size = 4,
157
+ .valid.min_access_size = 4,
158
+ .endianness = DEVICE_NATIVE_ENDIAN,
159
+};
160
+
161
+static void bcm2835_thermal_reset(DeviceState *dev)
162
+{
163
+ Bcm2835ThermalState *s = BCM2835_THERMAL(dev);
164
+
165
+ s->ctl = 0;
166
+}
167
+
168
+static void bcm2835_thermal_realize(DeviceState *dev, Error **errp)
169
+{
170
+ Bcm2835ThermalState *s = BCM2835_THERMAL(dev);
171
+
172
+ memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_thermal_ops,
173
+ s, TYPE_BCM2835_THERMAL, 8);
174
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
175
+}
176
+
177
+static const VMStateDescription bcm2835_thermal_vmstate = {
178
+ .name = "bcm2835_thermal",
179
+ .version_id = 1,
180
+ .minimum_version_id = 1,
181
+ .fields = (VMStateField[]) {
182
+ VMSTATE_UINT32(ctl, Bcm2835ThermalState),
183
+ VMSTATE_END_OF_LIST()
184
+ }
185
+};
186
+
187
+static void bcm2835_thermal_class_init(ObjectClass *klass, void *data)
188
+{
189
+ DeviceClass *dc = DEVICE_CLASS(klass);
190
+
191
+ dc->realize = bcm2835_thermal_realize;
192
+ dc->reset = bcm2835_thermal_reset;
193
+ dc->vmsd = &bcm2835_thermal_vmstate;
194
+}
195
+
196
+static const TypeInfo bcm2835_thermal_info = {
197
+ .name = TYPE_BCM2835_THERMAL,
198
+ .parent = TYPE_SYS_BUS_DEVICE,
199
+ .instance_size = sizeof(Bcm2835ThermalState),
200
+ .class_init = bcm2835_thermal_class_init,
201
+};
202
+
203
+static void bcm2835_thermal_register_types(void)
204
+{
205
+ type_register_static(&bcm2835_thermal_info);
206
+}
207
+
208
+type_init(bcm2835_thermal_register_types)
209
--
210
2.20.1
211
212
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
Map the thermal sensor in the BCM2835 block.
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Message-id: 20191019234715.25750-3-f4bug@amsat.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
include/hw/arm/bcm2835_peripherals.h | 2 ++
11
include/hw/arm/raspi_platform.h | 1 +
12
hw/arm/bcm2835_peripherals.c | 13 +++++++++++++
13
3 files changed, 16 insertions(+)
14
15
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/arm/bcm2835_peripherals.h
18
+++ b/include/hw/arm/bcm2835_peripherals.h
19
@@ -XXX,XX +XXX,XX @@
20
#include "hw/misc/bcm2835_property.h"
21
#include "hw/misc/bcm2835_rng.h"
22
#include "hw/misc/bcm2835_mbox.h"
23
+#include "hw/misc/bcm2835_thermal.h"
24
#include "hw/sd/sdhci.h"
25
#include "hw/sd/bcm2835_sdhost.h"
26
#include "hw/gpio/bcm2835_gpio.h"
27
@@ -XXX,XX +XXX,XX @@ typedef struct BCM2835PeripheralState {
28
SDHCIState sdhci;
29
BCM2835SDHostState sdhost;
30
BCM2835GpioState gpio;
31
+ Bcm2835ThermalState thermal;
32
UnimplementedDeviceState i2s;
33
UnimplementedDeviceState spi[1];
34
UnimplementedDeviceState i2c[3];
35
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/hw/arm/raspi_platform.h
38
+++ b/include/hw/arm/raspi_platform.h
39
@@ -XXX,XX +XXX,XX @@
40
#define SPI0_OFFSET 0x204000
41
#define BSC0_OFFSET 0x205000 /* BSC0 I2C/TWI */
42
#define OTP_OFFSET 0x20f000
43
+#define THERMAL_OFFSET 0x212000
44
#define BSC_SL_OFFSET 0x214000 /* SPI slave */
45
#define AUX_OFFSET 0x215000 /* AUX: UART1/SPI1/SPI2 */
46
#define EMMC1_OFFSET 0x300000
47
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/arm/bcm2835_peripherals.c
50
+++ b/hw/arm/bcm2835_peripherals.c
51
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
52
object_property_add_const_link(OBJECT(&s->dma), "dma-mr",
53
OBJECT(&s->gpu_bus_mr), &error_abort);
54
55
+ /* Thermal */
56
+ sysbus_init_child_obj(obj, "thermal", &s->thermal, sizeof(s->thermal),
57
+ TYPE_BCM2835_THERMAL);
58
+
59
/* GPIO */
60
sysbus_init_child_obj(obj, "gpio", &s->gpio, sizeof(s->gpio),
61
TYPE_BCM2835_GPIO);
62
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
63
INTERRUPT_DMA0 + n));
64
}
65
66
+ /* THERMAL */
67
+ object_property_set_bool(OBJECT(&s->thermal), true, "realized", &err);
68
+ if (err) {
69
+ error_propagate(errp, err);
70
+ return;
71
+ }
72
+ memory_region_add_subregion(&s->peri_mr, THERMAL_OFFSET,
73
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->thermal), 0));
74
+
75
/* GPIO */
76
object_property_set_bool(OBJECT(&s->gpio), true, "realized", &err);
77
if (err) {
78
--
79
2.20.1
80
81
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
Add the 64-bit free running timer. Do not model the COMPARE register
4
(no IRQ generated).
5
This timer is used by Linux kernel and recently U-Boot:
6
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/clocksource/bcm2835_timer.c?h=v3.7
7
https://github.com/u-boot/u-boot/blob/v2019.07/include/configs/rpi.h#L19
8
9
Datasheet used:
10
https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf
11
12
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-id: 20191019234715.25750-4-f4bug@amsat.org
15
[PMM: squashed in switch to using memset in reset]
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
hw/timer/Makefile.objs | 1 +
19
include/hw/timer/bcm2835_systmr.h | 33 ++++++
20
hw/timer/bcm2835_systmr.c | 163 ++++++++++++++++++++++++++++++
21
hw/timer/trace-events | 5 +
22
4 files changed, 202 insertions(+)
23
create mode 100644 include/hw/timer/bcm2835_systmr.h
24
create mode 100644 hw/timer/bcm2835_systmr.c
25
26
diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs
27
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/timer/Makefile.objs
29
+++ b/hw/timer/Makefile.objs
30
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o
31
common-obj-$(CONFIG_CMSDK_APB_TIMER) += cmsdk-apb-timer.o
32
common-obj-$(CONFIG_CMSDK_APB_DUALTIMER) += cmsdk-apb-dualtimer.o
33
common-obj-$(CONFIG_MSF2) += mss-timer.o
34
+common-obj-$(CONFIG_RASPI) += bcm2835_systmr.o
35
diff --git a/include/hw/timer/bcm2835_systmr.h b/include/hw/timer/bcm2835_systmr.h
36
new file mode 100644
37
index XXXXXXX..XXXXXXX
38
--- /dev/null
39
+++ b/include/hw/timer/bcm2835_systmr.h
40
@@ -XXX,XX +XXX,XX @@
41
+/*
42
+ * BCM2835 SYS timer emulation
43
+ *
44
+ * Copyright (c) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
45
+ *
46
+ * SPDX-License-Identifier: GPL-2.0-or-later
47
+ */
48
+
49
+#ifndef BCM2835_SYSTIMER_H
50
+#define BCM2835_SYSTIMER_H
51
+
52
+#include "hw/sysbus.h"
53
+#include "hw/irq.h"
54
+
55
+#define TYPE_BCM2835_SYSTIMER "bcm2835-sys-timer"
56
+#define BCM2835_SYSTIMER(obj) \
57
+ OBJECT_CHECK(BCM2835SystemTimerState, (obj), TYPE_BCM2835_SYSTIMER)
58
+
59
+typedef struct {
60
+ /*< private >*/
61
+ SysBusDevice parent_obj;
62
+
63
+ /*< public >*/
64
+ MemoryRegion iomem;
65
+ qemu_irq irq;
66
+
67
+ struct {
68
+ uint32_t status;
69
+ uint32_t compare[4];
70
+ } reg;
71
+} BCM2835SystemTimerState;
72
+
73
+#endif
74
diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c
75
new file mode 100644
76
index XXXXXXX..XXXXXXX
77
--- /dev/null
78
+++ b/hw/timer/bcm2835_systmr.c
79
@@ -XXX,XX +XXX,XX @@
80
+/*
81
+ * BCM2835 SYS timer emulation
82
+ *
83
+ * Copyright (C) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
84
+ *
85
+ * SPDX-License-Identifier: GPL-2.0-or-later
86
+ *
87
+ * Datasheet: BCM2835 ARM Peripherals (C6357-M-1398)
88
+ * https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf
89
+ *
90
+ * Only the free running 64-bit counter is implemented.
91
+ * The 4 COMPARE registers and the interruption are not implemented.
92
+ */
93
+
94
+#include "qemu/osdep.h"
95
+#include "qemu/log.h"
96
+#include "qemu/timer.h"
97
+#include "hw/timer/bcm2835_systmr.h"
98
+#include "hw/registerfields.h"
99
+#include "migration/vmstate.h"
100
+#include "trace.h"
101
+
102
+REG32(CTRL_STATUS, 0x00)
103
+REG32(COUNTER_LOW, 0x04)
104
+REG32(COUNTER_HIGH, 0x08)
105
+REG32(COMPARE0, 0x0c)
106
+REG32(COMPARE1, 0x10)
107
+REG32(COMPARE2, 0x14)
108
+REG32(COMPARE3, 0x18)
109
+
110
+static void bcm2835_systmr_update_irq(BCM2835SystemTimerState *s)
111
+{
112
+ bool enable = !!s->reg.status;
113
+
114
+ trace_bcm2835_systmr_irq(enable);
115
+ qemu_set_irq(s->irq, enable);
116
+}
117
+
118
+static void bcm2835_systmr_update_compare(BCM2835SystemTimerState *s,
119
+ unsigned timer_index)
120
+{
121
+ /* TODO fow now, since neither Linux nor U-boot use these timers. */
122
+ qemu_log_mask(LOG_UNIMP, "COMPARE register %u not implemented\n",
123
+ timer_index);
124
+}
125
+
126
+static uint64_t bcm2835_systmr_read(void *opaque, hwaddr offset,
127
+ unsigned size)
128
+{
129
+ BCM2835SystemTimerState *s = BCM2835_SYSTIMER(opaque);
130
+ uint64_t r = 0;
131
+
132
+ switch (offset) {
133
+ case A_CTRL_STATUS:
134
+ r = s->reg.status;
135
+ break;
136
+ case A_COMPARE0 ... A_COMPARE3:
137
+ r = s->reg.compare[(offset - A_COMPARE0) >> 2];
138
+ break;
139
+ case A_COUNTER_LOW:
140
+ case A_COUNTER_HIGH:
141
+ /* Free running counter at 1MHz */
142
+ r = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL);
143
+ r >>= 8 * (offset - A_COUNTER_LOW);
144
+ r &= UINT32_MAX;
145
+ break;
146
+ default:
147
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad offset 0x%" HWADDR_PRIx "\n",
148
+ __func__, offset);
149
+ break;
150
+ }
151
+ trace_bcm2835_systmr_read(offset, r);
152
+
153
+ return r;
154
+}
155
+
156
+static void bcm2835_systmr_write(void *opaque, hwaddr offset,
157
+ uint64_t value, unsigned size)
158
+{
159
+ BCM2835SystemTimerState *s = BCM2835_SYSTIMER(opaque);
160
+
161
+ trace_bcm2835_systmr_write(offset, value);
162
+ switch (offset) {
163
+ case A_CTRL_STATUS:
164
+ s->reg.status &= ~value; /* Ack */
165
+ bcm2835_systmr_update_irq(s);
166
+ break;
167
+ case A_COMPARE0 ... A_COMPARE3:
168
+ s->reg.compare[(offset - A_COMPARE0) >> 2] = value;
169
+ bcm2835_systmr_update_compare(s, (offset - A_COMPARE0) >> 2);
170
+ break;
171
+ case A_COUNTER_LOW:
172
+ case A_COUNTER_HIGH:
173
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: read-only ofs 0x%" HWADDR_PRIx "\n",
174
+ __func__, offset);
175
+ break;
176
+ default:
177
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: bad offset 0x%" HWADDR_PRIx "\n",
178
+ __func__, offset);
179
+ break;
180
+ }
181
+}
182
+
183
+static const MemoryRegionOps bcm2835_systmr_ops = {
184
+ .read = bcm2835_systmr_read,
185
+ .write = bcm2835_systmr_write,
186
+ .endianness = DEVICE_LITTLE_ENDIAN,
187
+ .impl = {
188
+ .min_access_size = 4,
189
+ .max_access_size = 4,
190
+ },
191
+};
192
+
193
+static void bcm2835_systmr_reset(DeviceState *dev)
194
+{
195
+ BCM2835SystemTimerState *s = BCM2835_SYSTIMER(dev);
196
+
197
+ memset(&s->reg, 0, sizeof(s->reg));
198
+}
199
+
200
+static void bcm2835_systmr_realize(DeviceState *dev, Error **errp)
201
+{
202
+ BCM2835SystemTimerState *s = BCM2835_SYSTIMER(dev);
203
+
204
+ memory_region_init_io(&s->iomem, OBJECT(dev), &bcm2835_systmr_ops,
205
+ s, "bcm2835-sys-timer", 0x20);
206
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
207
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
208
+}
209
+
210
+static const VMStateDescription bcm2835_systmr_vmstate = {
211
+ .name = "bcm2835_sys_timer",
212
+ .version_id = 1,
213
+ .minimum_version_id = 1,
214
+ .fields = (VMStateField[]) {
215
+ VMSTATE_UINT32(reg.status, BCM2835SystemTimerState),
216
+ VMSTATE_UINT32_ARRAY(reg.compare, BCM2835SystemTimerState, 4),
217
+ VMSTATE_END_OF_LIST()
218
+ }
219
+};
220
+
221
+static void bcm2835_systmr_class_init(ObjectClass *klass, void *data)
222
+{
223
+ DeviceClass *dc = DEVICE_CLASS(klass);
224
+
225
+ dc->realize = bcm2835_systmr_realize;
226
+ dc->reset = bcm2835_systmr_reset;
227
+ dc->vmsd = &bcm2835_systmr_vmstate;
228
+}
229
+
230
+static const TypeInfo bcm2835_systmr_info = {
231
+ .name = TYPE_BCM2835_SYSTIMER,
232
+ .parent = TYPE_SYS_BUS_DEVICE,
233
+ .instance_size = sizeof(BCM2835SystemTimerState),
234
+ .class_init = bcm2835_systmr_class_init,
235
+};
236
+
237
+static void bcm2835_systmr_register_types(void)
238
+{
239
+ type_register_static(&bcm2835_systmr_info);
240
+}
241
+
242
+type_init(bcm2835_systmr_register_types);
243
diff --git a/hw/timer/trace-events b/hw/timer/trace-events
244
index XXXXXXX..XXXXXXX 100644
245
--- a/hw/timer/trace-events
246
+++ b/hw/timer/trace-events
247
@@ -XXX,XX +XXX,XX @@ pl031_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
248
pl031_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
249
pl031_alarm_raised(void) "alarm raised"
250
pl031_set_alarm(uint32_t ticks) "alarm set for %u ticks"
251
+
252
+# bcm2835_systmr.c
253
+bcm2835_systmr_irq(bool enable) "timer irq state %u"
254
+bcm2835_systmr_read(uint64_t offset, uint64_t data) "timer read: offset 0x%" PRIx64 " data 0x%" PRIx64
255
+bcm2835_systmr_write(uint64_t offset, uint64_t data) "timer write: offset 0x%" PRIx64 " data 0x%" PRIx64
256
--
257
2.20.1
258
259
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
Connect the recently added SYS_timer.
4
Now U-Boot does not hang anymore polling a free running counter
5
stuck at 0.
6
This timer is also used by the Linux kernel thermal subsystem.
7
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20191019234715.25750-5-f4bug@amsat.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
include/hw/arm/bcm2835_peripherals.h | 3 ++-
14
hw/arm/bcm2835_peripherals.c | 17 ++++++++++++++++-
15
2 files changed, 18 insertions(+), 2 deletions(-)
16
17
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/bcm2835_peripherals.h
20
+++ b/include/hw/arm/bcm2835_peripherals.h
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/sd/sdhci.h"
23
#include "hw/sd/bcm2835_sdhost.h"
24
#include "hw/gpio/bcm2835_gpio.h"
25
+#include "hw/timer/bcm2835_systmr.h"
26
#include "hw/misc/unimp.h"
27
28
#define TYPE_BCM2835_PERIPHERALS "bcm2835-peripherals"
29
@@ -XXX,XX +XXX,XX @@ typedef struct BCM2835PeripheralState {
30
MemoryRegion ram_alias[4];
31
qemu_irq irq, fiq;
32
33
- UnimplementedDeviceState systmr;
34
+ BCM2835SystemTimerState systmr;
35
UnimplementedDeviceState armtmr;
36
UnimplementedDeviceState cprman;
37
UnimplementedDeviceState a2w;
38
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/bcm2835_peripherals.c
41
+++ b/hw/arm/bcm2835_peripherals.c
42
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
43
/* Interrupt Controller */
44
sysbus_init_child_obj(obj, "ic", &s->ic, sizeof(s->ic), TYPE_BCM2835_IC);
45
46
+ /* SYS Timer */
47
+ sysbus_init_child_obj(obj, "systimer", &s->systmr, sizeof(s->systmr),
48
+ TYPE_BCM2835_SYSTIMER);
49
+
50
/* UART0 */
51
sysbus_init_child_obj(obj, "uart0", &s->uart0, sizeof(s->uart0),
52
TYPE_PL011);
53
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
54
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ic), 0));
55
sysbus_pass_irq(SYS_BUS_DEVICE(s), SYS_BUS_DEVICE(&s->ic));
56
57
+ /* Sys Timer */
58
+ object_property_set_bool(OBJECT(&s->systmr), true, "realized", &err);
59
+ if (err) {
60
+ error_propagate(errp, err);
61
+ return;
62
+ }
63
+ memory_region_add_subregion(&s->peri_mr, ST_OFFSET,
64
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systmr), 0));
65
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->systmr), 0,
66
+ qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_ARM_IRQ,
67
+ INTERRUPT_ARM_TIMER));
68
+
69
/* UART0 */
70
qdev_prop_set_chr(DEVICE(&s->uart0), "chardev", serial_hd(0));
71
object_property_set_bool(OBJECT(&s->uart0), true, "realized", &err);
72
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
73
}
74
75
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
76
- create_unimp(s, &s->systmr, "bcm2835-systimer", ST_OFFSET, 0x20);
77
create_unimp(s, &s->cprman, "bcm2835-cprman", CPRMAN_OFFSET, 0x1000);
78
create_unimp(s, &s->a2w, "bcm2835-a2w", A2W_OFFSET, 0x1000);
79
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib