1
Arm patches for rc1:
1
Only thing for Arm for rc1 is RTH's fix for the KVM SVE probe code.
2
* two final "remove the old API" patches for some API transitions
3
* bugfix for raspi/highbank Linux boot
4
2
5
thanks
6
-- PMM
3
-- PMM
7
4
8
The following changes since commit 654efcb511d394c1d3f5292c28503d1d19e5b1d3:
5
The following changes since commit 4e06b3fc1b5e1ec03f22190eabe56891dc9c2236:
9
6
10
Merge remote-tracking branch 'remotes/vivier/tags/q800-branch-pull-request' into staging (2019-11-11 09:23:46 +0000)
7
Merge tag 'pull-hex-20220731' of https://github.com/quic/qemu into staging (2022-07-31 21:38:54 -0700)
11
8
12
are available in the Git repository at:
9
are available in the Git repository at:
13
10
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20191111
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220801
15
12
16
for you to fetch changes up to 45c078f163fd47c35e7505d98928fae63baada7d:
13
for you to fetch changes up to 5265d24c981dfdda8d29b44f7e84a514da75eedc:
17
14
18
hw/arm/boot: Set NSACR.{CP11, CP10} in dummy SMC setup routine (2019-11-11 13:44:16 +0000)
15
target/arm: Move sve probe inside kvm >= 4.15 branch (2022-08-01 16:21:18 +0100)
19
16
20
----------------------------------------------------------------
17
----------------------------------------------------------------
21
target-arm queue:
18
target-arm queue:
22
* Remove old unassigned_access CPU hook API
19
* Fix KVM SVE ID register probe code
23
* Remove old ptimer_init_with_bh() API
24
* hw/arm/boot: Set NSACR.{CP11, CP10} in dummy SMC setup routine
25
20
26
----------------------------------------------------------------
21
----------------------------------------------------------------
27
Clement Deschamps (1):
22
Richard Henderson (3):
28
hw/arm/boot: Set NSACR.{CP11, CP10} in dummy SMC setup routine
23
target/arm: Use kvm_arm_sve_supported in kvm_arm_get_host_cpu_features
24
target/arm: Set KVM_ARM_VCPU_SVE while probing the host
25
target/arm: Move sve probe inside kvm >= 4.15 branch
29
26
30
Peter Maydell (2):
27
target/arm/kvm64.c | 45 ++++++++++++++++++++++-----------------------
31
ptimer: Remove old ptimer_init_with_bh() API
28
1 file changed, 22 insertions(+), 23 deletions(-)
32
Remove unassigned_access CPU hook
33
34
include/hw/arm/boot.h | 7 ++--
35
include/hw/core/cpu.h | 24 --------------
36
include/hw/ptimer.h | 45 ++++++++++++-------------
37
accel/tcg/cputlb.c | 2 --
38
hw/arm/boot.c | 3 ++
39
hw/core/ptimer.c | 91 +++++++++------------------------------------------
40
memory.c | 7 ----
41
7 files changed, 44 insertions(+), 135 deletions(-)
42
diff view generated by jsdifflib
1
From: Clement Deschamps <clement.deschamps@greensocs.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The boot.c code usually puts the CPU into NS mode directly when it is
3
Indication for support for SVE will not depend on whether we
4
booting a kernel. Since fc1120a7f5f2d4b6 this has included a
4
perform the query on the main kvm_state or the temp vcpu.
5
requirement to set NSACR to give NS state access to the FPU; we fixed
6
that for the usual code path in ece628fcf6. However, it is also
7
possible for a board model to request an alternative mode of booting,
8
where its 'board_setup' code hook runs in Secure state and is
9
responsible for doing the S->NS transition after it has done whatever
10
work it must do in Secure state. In this situation the board_setup
11
code now also needs to update NSACR.
12
5
13
This affects all boards which set info->secure_board_setup, which is
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
currently the 'raspi' and 'highbank' families. They both use the
7
Message-id: 20220726045828.53697-2-richard.henderson@linaro.org
15
common arm_write_secure_board_setup_dummy_smc().
16
17
Set the NSACR CP11 and CP10 bits in the code written by that
18
function, to allow FPU access in Non-Secure state when using dummy
19
SMC setup routine. Otherwise an AArch32 kernel booted on the
20
highbank or raspi boards will UNDEF as soon as it tries to use the
21
FPU.
22
23
Update the comment describing secure_board_setup to note the new
24
requirements on users of it.
25
26
This fixes a kernel panic when booting raspbian on raspi2.
27
28
Successfully tested with:
29
2017-01-11-raspbian-jessie-lite.img
30
2018-11-13-raspbian-stretch-lite.img
31
2019-07-10-raspbian-buster-lite.img
32
33
Fixes: fc1120a7f5
34
Signed-off-by: Clement Deschamps <clement.deschamps@greensocs.com>
35
Tested-by: Laurent Bonnans <laurent.bonnans@here.com>
36
Message-id: 20191104151137.81931-1-clement.deschamps@greensocs.com
37
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
38
[PMM: updated comment to boot.h to note new requirement on
39
users of secure_board_setup; edited/rewrote commit message]
40
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
41
---
10
---
42
include/hw/arm/boot.h | 7 +++++--
11
target/arm/kvm64.c | 2 +-
43
hw/arm/boot.c | 3 +++
12
1 file changed, 1 insertion(+), 1 deletion(-)
44
2 files changed, 8 insertions(+), 2 deletions(-)
45
13
46
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
14
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
47
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
48
--- a/include/hw/arm/boot.h
16
--- a/target/arm/kvm64.c
49
+++ b/include/hw/arm/boot.h
17
+++ b/target/arm/kvm64.c
50
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
18
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
51
void (*write_board_setup)(ARMCPU *cpu,
19
}
52
const struct arm_boot_info *info);
20
}
53
21
54
- /* If set, the board specific loader/setup blob will be run from secure
22
- sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
55
+ /*
23
+ sve_supported = kvm_arm_sve_supported();
56
+ * If set, the board specific loader/setup blob will be run from secure
24
57
* mode, regardless of secure_boot. The blob becomes responsible for
25
/* Add feature bits that can't appear until after VCPU init. */
58
- * changing to non-secure state if implementing a non-secure boot
26
if (sve_supported) {
59
+ * changing to non-secure state if implementing a non-secure boot,
60
+ * including setting up EL3/Secure registers such as the NSACR as
61
+ * required by the Linux booting ABI before the switch to non-secure.
62
*/
63
bool secure_board_setup;
64
65
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/hw/arm/boot.c
68
+++ b/hw/arm/boot.c
69
@@ -XXX,XX +XXX,XX @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
70
};
71
uint32_t board_setup_blob[] = {
72
/* board setup addr */
73
+ 0xee110f51, /* mrc p15, 0, r0, c1, c1, 2 ;read NSACR */
74
+ 0xe3800b03, /* orr r0, #0xc00 ;set CP11, CP10 */
75
+ 0xee010f51, /* mcr p15, 0, r0, c1, c1, 2 ;write NSACR */
76
0xe3a00e00 + (mvbar_addr >> 4), /* mov r0, #mvbar_addr */
77
0xee0c0f30, /* mcr p15, 0, r0, c12, c0, 1 ;set MVBAR */
78
0xee110f11, /* mrc p15, 0, r0, c1 , c1, 0 ;read SCR */
79
--
27
--
80
2.20.1
28
2.25.1
81
82
diff view generated by jsdifflib
1
Now all the users of ptimers have converted to the transaction-based
1
From: Richard Henderson <richard.henderson@linaro.org>
2
API, we can remove ptimer_init_with_bh() and all the code paths
3
that are used only by bottom-half based ptimers, and tidy up the
4
documentation comments to consider the transaction-based API the
5
only possibility.
6
2
7
The code changes result from:
3
Because we weren't setting this flag, our probe of ID_AA64ZFR0
8
* s->bh no longer exists
4
was always returning zero. This also obviates the adjustment
9
* s->callback is now always non-NULL
5
of ID_AA64PFR0, which had sanitized the SVE field.
10
6
7
The effects of the bug are not visible, because the only thing that
8
ID_AA64ZFR0 is used for within qemu at present is tcg translation.
9
The other tests for SVE within KVM are via ID_AA64PFR0.SVE.
10
11
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20220726045828.53697-3-richard.henderson@linaro.org
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20191025142411.17085-1-peter.maydell@linaro.org
14
---
16
---
15
include/hw/ptimer.h | 45 +++++++++++-----------
17
target/arm/kvm64.c | 27 +++++++++++++--------------
16
hw/core/ptimer.c | 91 ++++++++-------------------------------------
18
1 file changed, 13 insertions(+), 14 deletions(-)
17
2 files changed, 36 insertions(+), 100 deletions(-)
18
19
19
diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h
20
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
20
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/ptimer.h
22
--- a/target/arm/kvm64.c
22
+++ b/include/hw/ptimer.h
23
+++ b/target/arm/kvm64.c
23
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
24
25
bool sve_supported;
25
#include "qemu/timer.h"
26
bool pmu_supported = false;
26
27
uint64_t features = 0;
27
-/* The ptimer API implements a simple periodic countdown timer.
28
- uint64_t t;
28
+/*
29
int err;
29
+ * The ptimer API implements a simple periodic countdown timer.
30
30
* The countdown timer has a value (which can be read and written via
31
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
31
* ptimer_get_count() and ptimer_set_count()). When it is enabled
32
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
32
* using ptimer_run(), the value will count downwards at the frequency
33
struct kvm_vcpu_init init = { .target = -1, };
33
* which has been configured using ptimer_set_period() or ptimer_set_freq().
34
34
- * When it reaches zero it will trigger a QEMU bottom half handler, and
35
/*
35
+ * When it reaches zero it will trigger a callback function, and
36
- * Ask for Pointer Authentication if supported. We can't play the
36
* can be set to either reload itself from a specified limit value
37
- * SVE trick of synthesising the ID reg as KVM won't tell us
37
* and keep counting down, or to stop (as a one-shot timer).
38
- * whether we have the architected or IMPDEF version of PAuth, so
38
*
39
- * we have to use the actual ID regs.
39
+ * A transaction-based API is used for modifying ptimer state: all calls
40
+ * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
40
+ * to functions which modify ptimer state must be between matched calls to
41
+ * which is otherwise RAZ.
41
+ * ptimer_transaction_begin() and ptimer_transaction_commit().
42
+ */
42
+ * When ptimer_transaction_commit() is called it will evaluate the state
43
+ sve_supported = kvm_arm_sve_supported();
43
+ * of the timer after all the changes in the transaction, and call the
44
+ if (sve_supported) {
44
+ * callback if necessary. (See the ptimer_init() documentation for the full
45
+ init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
45
+ * list of state-modifying functions and detailed semantics of the callback.)
46
+ }
46
+ *
47
+
47
* Forgetting to set the period/frequency (or setting it to zero) is a
48
+ /*
48
* bug in the QEMU device and will cause warning messages to be printed
49
+ * Ask for Pointer Authentication if supported, so that we get
49
* to stderr when the guest attempts to enable the timer.
50
+ * the unsanitized field values for AA64ISAR1_EL1.
50
@@ -XXX,XX +XXX,XX @@
51
*/
51
* ptimer_set_count() or ptimer_set_limit() will not trigger the timer
52
if (kvm_arm_pauth_supported()) {
52
* (though it will cause a reload). Only a counter decrement to "0"
53
init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
53
* will cause a trigger. Not compatible with NO_IMMEDIATE_TRIGGER;
54
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
54
- * ptimer_init_with_bh() will assert() that you don't set both.
55
}
55
+ * ptimer_init() will assert() that you don't set both.
56
}
56
*/
57
57
#define PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT (1 << 5)
58
- sve_supported = kvm_arm_sve_supported();
58
59
@@ -XXX,XX +XXX,XX @@
60
typedef struct ptimer_state ptimer_state;
61
typedef void (*ptimer_cb)(void *opaque);
62
63
-/**
64
- * ptimer_init_with_bh - Allocate and return a new ptimer
65
- * @bh: QEMU bottom half which is run on timer expiry
66
- * @policy: PTIMER_POLICY_* bits specifying behaviour
67
- *
68
- * The ptimer returned must be freed using ptimer_free().
69
- * The ptimer takes ownership of @bh and will delete it
70
- * when the ptimer is eventually freed.
71
- */
72
-ptimer_state *ptimer_init_with_bh(QEMUBH *bh, uint8_t policy_mask);
73
-
59
-
74
/**
60
- /* Add feature bits that can't appear until after VCPU init. */
75
* ptimer_init - Allocate and return a new ptimer
61
if (sve_supported) {
76
* @callback: function to call on ptimer expiry
62
- t = ahcf->isar.id_aa64pfr0;
77
@@ -XXX,XX +XXX,XX @@ ptimer_state *ptimer_init(ptimer_cb callback,
63
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
78
* ptimer_free - Free a ptimer
64
- ahcf->isar.id_aa64pfr0 = t;
79
* @s: timer to free
80
*
81
- * Free a ptimer created using ptimer_init_with_bh() (including
82
- * deleting the bottom half which it is using).
83
+ * Free a ptimer created using ptimer_init().
84
*/
85
void ptimer_free(ptimer_state *s);
86
87
@@ -XXX,XX +XXX,XX @@ void ptimer_transaction_commit(ptimer_state *s);
88
* may be more appropriate.
89
*
90
* This function will assert if it is called outside a
91
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
92
+ * ptimer_transaction_begin/commit block.
93
*/
94
void ptimer_set_period(ptimer_state *s, int64_t period);
95
96
@@ -XXX,XX +XXX,XX @@ void ptimer_set_period(ptimer_state *s, int64_t period);
97
* precise to fractions of a nanosecond, avoiding rounding errors.
98
*
99
* This function will assert if it is called outside a
100
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
101
+ * ptimer_transaction_begin/commit block.
102
*/
103
void ptimer_set_freq(ptimer_state *s, uint32_t freq);
104
105
@@ -XXX,XX +XXX,XX @@ uint64_t ptimer_get_limit(ptimer_state *s);
106
* reload the counter when their reload register is written to.
107
*
108
* This function will assert if it is called outside a
109
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
110
+ * ptimer_transaction_begin/commit block.
111
*/
112
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
113
114
@@ -XXX,XX +XXX,XX @@ uint64_t ptimer_get_count(ptimer_state *s);
115
* point in the future.
116
*
117
* This function will assert if it is called outside a
118
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
119
+ * ptimer_transaction_begin/commit block.
120
*/
121
void ptimer_set_count(ptimer_state *s, uint64_t count);
122
123
@@ -XXX,XX +XXX,XX @@ void ptimer_set_count(ptimer_state *s, uint64_t count);
124
* @s: ptimer
125
* @oneshot: non-zero if this timer should only count down once
126
*
127
- * Start a ptimer counting down; when it reaches zero the bottom half
128
- * passed to ptimer_init_with_bh() will be invoked.
129
+ * Start a ptimer counting down; when it reaches zero the callback function
130
+ * passed to ptimer_init() will be invoked.
131
* If the @oneshot argument is zero,
132
* the counter value will then be reloaded from the limit and it will
133
* start counting down again. If @oneshot is non-zero, then the counter
134
* will disable itself when it reaches zero.
135
*
136
* This function will assert if it is called outside a
137
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
138
+ * ptimer_transaction_begin/commit block.
139
*/
140
void ptimer_run(ptimer_state *s, int oneshot);
141
142
@@ -XXX,XX +XXX,XX @@ void ptimer_run(ptimer_state *s, int oneshot);
143
* restarted.
144
*
145
* This function will assert if it is called outside a
146
- * ptimer_transaction_begin/commit block, unless this is a bottom-half ptimer.
147
+ * ptimer_transaction_begin/commit block.
148
*/
149
void ptimer_stop(ptimer_state *s);
150
151
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/hw/core/ptimer.c
154
+++ b/hw/core/ptimer.c
155
@@ -XXX,XX +XXX,XX @@ struct ptimer_state
156
int64_t last_event;
157
int64_t next_event;
158
uint8_t policy_mask;
159
- QEMUBH *bh;
160
QEMUTimer *timer;
161
ptimer_cb callback;
162
void *callback_opaque;
163
@@ -XXX,XX +XXX,XX @@ struct ptimer_state
164
/* Use a bottom-half routine to avoid reentrancy issues. */
165
static void ptimer_trigger(ptimer_state *s)
166
{
167
- if (s->bh) {
168
- replay_bh_schedule_event(s->bh);
169
- }
170
- if (s->callback) {
171
- s->callback(s->callback_opaque);
172
- }
173
+ s->callback(s->callback_opaque);
174
}
175
176
static void ptimer_reload(ptimer_state *s, int delta_adjust)
177
@@ -XXX,XX +XXX,XX @@ uint64_t ptimer_get_count(ptimer_state *s)
178
179
void ptimer_set_count(ptimer_state *s, uint64_t count)
180
{
181
- assert(s->in_transaction || !s->callback);
182
+ assert(s->in_transaction);
183
s->delta = count;
184
if (s->enabled) {
185
- if (!s->callback) {
186
- s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
187
- ptimer_reload(s, 0);
188
- } else {
189
- s->need_reload = true;
190
- }
191
+ s->need_reload = true;
192
}
193
}
194
195
@@ -XXX,XX +XXX,XX @@ void ptimer_run(ptimer_state *s, int oneshot)
196
{
197
bool was_disabled = !s->enabled;
198
199
- assert(s->in_transaction || !s->callback);
200
+ assert(s->in_transaction);
201
202
if (was_disabled && s->period == 0) {
203
if (!qtest_enabled()) {
204
@@ -XXX,XX +XXX,XX @@ void ptimer_run(ptimer_state *s, int oneshot)
205
}
206
s->enabled = oneshot ? 2 : 1;
207
if (was_disabled) {
208
- if (!s->callback) {
209
- s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
210
- ptimer_reload(s, 0);
211
- } else {
212
- s->need_reload = true;
213
- }
214
+ s->need_reload = true;
215
}
216
}
217
218
@@ -XXX,XX +XXX,XX @@ void ptimer_run(ptimer_state *s, int oneshot)
219
is immediately restarted. */
220
void ptimer_stop(ptimer_state *s)
221
{
222
- assert(s->in_transaction || !s->callback);
223
+ assert(s->in_transaction);
224
225
if (!s->enabled)
226
return;
227
@@ -XXX,XX +XXX,XX @@ void ptimer_stop(ptimer_state *s)
228
s->delta = ptimer_get_count(s);
229
timer_del(s->timer);
230
s->enabled = 0;
231
- if (s->callback) {
232
- s->need_reload = false;
233
- }
234
+ s->need_reload = false;
235
}
236
237
/* Set counter increment interval in nanoseconds. */
238
void ptimer_set_period(ptimer_state *s, int64_t period)
239
{
240
- assert(s->in_transaction || !s->callback);
241
+ assert(s->in_transaction);
242
s->delta = ptimer_get_count(s);
243
s->period = period;
244
s->period_frac = 0;
245
if (s->enabled) {
246
- if (!s->callback) {
247
- s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
248
- ptimer_reload(s, 0);
249
- } else {
250
- s->need_reload = true;
251
- }
252
+ s->need_reload = true;
253
}
254
}
255
256
/* Set counter frequency in Hz. */
257
void ptimer_set_freq(ptimer_state *s, uint32_t freq)
258
{
259
- assert(s->in_transaction || !s->callback);
260
+ assert(s->in_transaction);
261
s->delta = ptimer_get_count(s);
262
s->period = 1000000000ll / freq;
263
s->period_frac = (1000000000ll << 32) / freq;
264
if (s->enabled) {
265
- if (!s->callback) {
266
- s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
267
- ptimer_reload(s, 0);
268
- } else {
269
- s->need_reload = true;
270
- }
271
+ s->need_reload = true;
272
}
273
}
274
275
@@ -XXX,XX +XXX,XX @@ void ptimer_set_freq(ptimer_state *s, uint32_t freq)
276
count = limit. */
277
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload)
278
{
279
- assert(s->in_transaction || !s->callback);
280
+ assert(s->in_transaction);
281
s->limit = limit;
282
if (reload)
283
s->delta = limit;
284
if (s->enabled && reload) {
285
- if (!s->callback) {
286
- s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
287
- ptimer_reload(s, 0);
288
- } else {
289
- s->need_reload = true;
290
- }
291
+ s->need_reload = true;
292
}
293
}
294
295
@@ -XXX,XX +XXX,XX @@ uint64_t ptimer_get_limit(ptimer_state *s)
296
297
void ptimer_transaction_begin(ptimer_state *s)
298
{
299
- assert(!s->in_transaction || !s->callback);
300
+ assert(!s->in_transaction);
301
s->in_transaction = true;
302
s->need_reload = false;
303
}
304
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_ptimer = {
305
}
306
};
307
308
-ptimer_state *ptimer_init_with_bh(QEMUBH *bh, uint8_t policy_mask)
309
-{
310
- ptimer_state *s;
311
-
65
-
312
- s = (ptimer_state *)g_malloc0(sizeof(ptimer_state));
66
/*
313
- s->bh = bh;
67
* There is a range of kernels between kernel commit 73433762fcae
314
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s);
68
* and f81cb2c3ad41 which have a bug where the kernel doesn't expose
315
- s->policy_mask = policy_mask;
69
* SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
316
-
70
- * SVE support, so we only read it here, rather than together with all
317
- /*
71
- * the other ID registers earlier.
318
- * These two policies are incompatible -- trigger-on-decrement implies
72
+ * SVE support, which resulted in an error rather than RAZ.
319
- * a timer trigger when the count becomes 0, but no-immediate-trigger
73
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
320
- * implies a trigger when the count stops being 0.
74
*/
321
- */
75
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
322
- assert(!((policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) &&
76
ARM64_SYS_REG(3, 0, 0, 4, 4));
323
- (policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)));
324
- return s;
325
-}
326
-
327
ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque,
328
uint8_t policy_mask)
329
{
330
ptimer_state *s;
331
332
- /*
333
- * The callback function is mandatory; so we use it to distinguish
334
- * old-style QEMUBH ptimers from new transaction API ptimers.
335
- * (ptimer_init_with_bh() allows a NULL bh pointer and at least
336
- * one device (digic-timer) passes NULL, so it's not the case
337
- * that either s->bh != NULL or s->callback != NULL.)
338
- */
339
+ /* The callback function is mandatory. */
340
assert(callback);
341
342
s = g_new0(ptimer_state, 1);
343
@@ -XXX,XX +XXX,XX @@ ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque,
344
345
void ptimer_free(ptimer_state *s)
346
{
347
- if (s->bh) {
348
- qemu_bh_delete(s->bh);
349
- }
350
timer_free(s->timer);
351
g_free(s);
352
}
353
--
77
--
354
2.20.1
78
2.25.1
355
356
diff view generated by jsdifflib
1
All targets have now migrated away from the old unassigned_access
1
From: Richard Henderson <richard.henderson@linaro.org>
2
hook to the new do_transaction_failed hook. This means we can remove
3
the core-code infrastructure for that hook and the code that calls it.
4
2
3
The test for the IF block indicates no ID registers are exposed, much
4
less host support for SVE. Move the SVE probe into the ELSE block.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220726045828.53697-4-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20191108173732.11816-1-peter.maydell@linaro.org
9
---
10
---
10
include/hw/core/cpu.h | 24 ------------------------
11
target/arm/kvm64.c | 22 +++++++++++-----------
11
accel/tcg/cputlb.c | 2 --
12
1 file changed, 11 insertions(+), 11 deletions(-)
12
memory.c | 7 -------
13
3 files changed, 33 deletions(-)
14
13
15
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
14
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/core/cpu.h
16
--- a/target/arm/kvm64.c
18
+++ b/include/hw/core/cpu.h
17
+++ b/target/arm/kvm64.c
19
@@ -XXX,XX +XXX,XX @@ typedef enum MMUAccessType {
18
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
20
19
err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
21
typedef struct CPUWatchpoint CPUWatchpoint;
20
ARM64_SYS_REG(3, 3, 9, 12, 0));
22
21
}
23
-typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
24
- bool is_write, bool is_exec, int opaque,
25
- unsigned size);
26
-
27
struct TranslationBlock;
28
29
/**
30
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock;
31
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
32
* @has_work: Callback for checking if there is work to do.
33
* @do_interrupt: Callback for interrupt handling.
34
- * @do_unassigned_access: Callback for unassigned access handling.
35
- * (this is deprecated: new targets should use do_transaction_failed instead)
36
* @do_unaligned_access: Callback for unaligned access handling, if
37
* the target defines #TARGET_ALIGNED_ONLY.
38
* @do_transaction_failed: Callback for handling failed memory transactions
39
@@ -XXX,XX +XXX,XX @@ typedef struct CPUClass {
40
int reset_dump_flags;
41
bool (*has_work)(CPUState *cpu);
42
void (*do_interrupt)(CPUState *cpu);
43
- CPUUnassignedAccess do_unassigned_access;
44
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
45
MMUAccessType access_type,
46
int mmu_idx, uintptr_t retaddr);
47
@@ -XXX,XX +XXX,XX @@ struct CPUState {
48
* we store some rarely used information in the CPU context.
49
*/
50
uintptr_t mem_io_pc;
51
- /*
52
- * This is only needed for the legacy cpu_unassigned_access() hook;
53
- * when all targets using it have been converted to use
54
- * cpu_transaction_failed() instead it can be removed.
55
- */
56
- MMUAccessType mem_io_access_type;
57
58
int kvm_fd;
59
struct KVMState *kvm_state;
60
@@ -XXX,XX +XXX,XX @@ void cpu_interrupt(CPUState *cpu, int mask);
61
#ifdef NEED_CPU_H
62
63
#ifdef CONFIG_SOFTMMU
64
-static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
65
- bool is_write, bool is_exec,
66
- int opaque, unsigned size)
67
-{
68
- CPUClass *cc = CPU_GET_CLASS(cpu);
69
-
70
- if (cc->do_unassigned_access) {
71
- cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
72
- }
22
- }
73
-}
23
74
-
24
- if (sve_supported) {
75
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
25
- /*
76
MMUAccessType access_type,
26
- * There is a range of kernels between kernel commit 73433762fcae
77
int mmu_idx, uintptr_t retaddr)
27
- * and f81cb2c3ad41 which have a bug where the kernel doesn't expose
78
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
28
- * SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
79
index XXXXXXX..XXXXXXX 100644
29
- * SVE support, which resulted in an error rather than RAZ.
80
--- a/accel/tcg/cputlb.c
30
- * So only read the register if we set KVM_ARM_VCPU_SVE above.
81
+++ b/accel/tcg/cputlb.c
31
- */
82
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
32
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
83
cpu_io_recompile(cpu, retaddr);
33
- ARM64_SYS_REG(3, 0, 0, 4, 4));
34
+ if (sve_supported) {
35
+ /*
36
+ * There is a range of kernels between kernel commit 73433762fcae
37
+ * and f81cb2c3ad41 which have a bug where the kernel doesn't
38
+ * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
39
+ * enabled SVE support, which resulted in an error rather than RAZ.
40
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
41
+ */
42
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
43
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
44
+ }
84
}
45
}
85
46
86
- cpu->mem_io_access_type = access_type;
47
kvm_arm_destroy_scratch_host_vcpu(fdarray);
87
-
88
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
89
qemu_mutex_lock_iothread();
90
locked = true;
91
diff --git a/memory.c b/memory.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/memory.c
94
+++ b/memory.c
95
@@ -XXX,XX +XXX,XX @@ static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
96
#ifdef DEBUG_UNASSIGNED
97
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
98
#endif
99
- if (current_cpu != NULL) {
100
- bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
101
- cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
102
- }
103
return 0;
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static void unassigned_mem_write(void *opaque, hwaddr addr,
107
#ifdef DEBUG_UNASSIGNED
108
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
109
#endif
110
- if (current_cpu != NULL) {
111
- cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
112
- }
113
}
114
115
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
116
--
48
--
117
2.20.1
49
2.25.1
118
119
diff view generated by jsdifflib