1
The following changes since commit 9c125d17e9402c232c46610802e5931b3639d77b:
1
The following changes since commit ffd454c67e38cc6df792733ebc5d967eee28ac0d:
2
2
3
Merge tag 'pull-tcg-20220420' of https://gitlab.com/rth7680/qemu into staging (2022-04-20 16:43:11 -0700)
3
Merge tag 'pull-vfio-20240107' of https://github.com/legoater/qemu into staging (2024-01-08 10:28:42 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to d45c83328feab2e4083991693160f0a417cfd9b0:
9
for you to fetch changes up to 0b2675c473f68f13bc5ca1dd1c43ce421542e7b8:
10
10
11
virtiofsd: Add docs/helper for killpriv_v2/no_killpriv_v2 option (2022-04-21 12:05:15 +0200)
11
Rename "QEMU global mutex" to "BQL" in comments and docs (2024-01-08 10:45:43 -0500)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Small contrib/vhost-user-blk, contrib/vhost-user-scsi, and tools/virtiofsd
17
improvements.
18
19
----------------------------------------------------------------
16
----------------------------------------------------------------
20
17
21
Liu Yiding (1):
18
Philippe Mathieu-Daudé (1):
22
virtiofsd: Add docs/helper for killpriv_v2/no_killpriv_v2 option
19
iothread: Remove unused Error** argument in aio_context_set_aio_params
23
20
24
Sakshi Kaushik (1):
21
Stefan Hajnoczi (5):
25
Implements Backend Program conventions for vhost-user-scsi
22
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
23
qemu/main-loop: rename QEMU_IOTHREAD_LOCK_GUARD to BQL_LOCK_GUARD
24
qemu/main-loop: rename qemu_cond_wait_iothread() to
25
qemu_cond_wait_bql()
26
Replace "iothread lock" with "BQL" in comments
27
Rename "QEMU global mutex" to "BQL" in comments and docs
26
28
27
Stefan Hajnoczi (1):
29
docs/devel/multi-thread-tcg.rst | 7 +-
28
contrib/vhost-user-blk: add missing GOptionEntry NULL terminator
30
docs/devel/qapi-code-gen.rst | 2 +-
29
31
docs/devel/replay.rst | 2 +-
30
docs/tools/virtiofsd.rst | 5 ++
32
docs/devel/reset.rst | 2 +-
31
contrib/vhost-user-blk/vhost-user-blk.c | 3 +-
33
docs/devel/multiple-iothreads.txt | 14 ++--
32
contrib/vhost-user-scsi/vhost-user-scsi.c | 77 +++++++++++++++--------
34
hw/display/qxl.h | 2 +-
33
tools/virtiofsd/helper.c | 3 +
35
include/block/aio-wait.h | 2 +-
34
4 files changed, 62 insertions(+), 26 deletions(-)
36
include/block/aio.h | 3 +-
37
include/block/blockjob.h | 6 +-
38
include/exec/cpu-common.h | 2 +-
39
include/exec/memory.h | 4 +-
40
include/exec/ramblock.h | 2 +-
41
include/io/task.h | 2 +-
42
include/migration/register.h | 8 +-
43
include/qemu/coroutine-core.h | 2 +-
44
include/qemu/coroutine.h | 2 +-
45
include/qemu/main-loop.h | 68 ++++++++-------
46
include/qemu/thread.h | 2 +-
47
target/arm/internals.h | 4 +-
48
accel/accel-blocker.c | 10 +--
49
accel/dummy-cpus.c | 8 +-
50
accel/hvf/hvf-accel-ops.c | 4 +-
51
accel/kvm/kvm-accel-ops.c | 4 +-
52
accel/kvm/kvm-all.c | 22 ++---
53
accel/tcg/cpu-exec.c | 26 +++---
54
accel/tcg/cputlb.c | 20 ++---
55
accel/tcg/tcg-accel-ops-icount.c | 6 +-
56
accel/tcg/tcg-accel-ops-mttcg.c | 12 +--
57
accel/tcg/tcg-accel-ops-rr.c | 18 ++--
58
accel/tcg/tcg-accel-ops.c | 2 +-
59
accel/tcg/translate-all.c | 2 +-
60
cpu-common.c | 4 +-
61
dump/dump.c | 4 +-
62
hw/block/dataplane/virtio-blk.c | 8 +-
63
hw/block/virtio-blk.c | 2 +-
64
hw/core/cpu-common.c | 6 +-
65
hw/display/virtio-gpu.c | 2 +-
66
hw/i386/intel_iommu.c | 6 +-
67
hw/i386/kvm/xen_evtchn.c | 30 +++----
68
hw/i386/kvm/xen_gnttab.c | 2 +-
69
hw/i386/kvm/xen_overlay.c | 2 +-
70
hw/i386/kvm/xen_xenstore.c | 2 +-
71
hw/intc/arm_gicv3_cpuif.c | 2 +-
72
hw/intc/s390_flic.c | 18 ++--
73
hw/mips/mips_int.c | 2 +-
74
hw/misc/edu.c | 4 +-
75
hw/misc/imx6_src.c | 2 +-
76
hw/misc/imx7_src.c | 2 +-
77
hw/net/xen_nic.c | 8 +-
78
hw/ppc/pegasos2.c | 2 +-
79
hw/ppc/ppc.c | 6 +-
80
hw/ppc/spapr.c | 2 +-
81
hw/ppc/spapr_events.c | 2 +-
82
hw/ppc/spapr_rng.c | 4 +-
83
hw/ppc/spapr_softmmu.c | 4 +-
84
hw/remote/mpqemu-link.c | 22 ++---
85
hw/remote/vfio-user-obj.c | 2 +-
86
hw/s390x/s390-skeys.c | 2 +-
87
hw/scsi/virtio-scsi-dataplane.c | 6 +-
88
iothread.c | 3 +-
89
migration/block-dirty-bitmap.c | 14 ++--
90
migration/block.c | 38 ++++-----
91
migration/colo.c | 62 +++++++-------
92
migration/dirtyrate.c | 12 +--
93
migration/migration.c | 54 ++++++------
94
migration/ram.c | 16 ++--
95
net/tap.c | 2 +-
96
replay/replay-internal.c | 2 +-
97
semihosting/console.c | 8 +-
98
stubs/iothread-lock.c | 6 +-
99
system/cpu-throttle.c | 6 +-
100
system/cpus.c | 55 +++++++------
101
system/dirtylimit.c | 4 +-
102
system/memory.c | 2 +-
103
system/physmem.c | 14 ++--
104
system/runstate.c | 2 +-
105
system/watchpoint.c | 4 +-
106
target/arm/arm-powerctl.c | 14 ++--
107
target/arm/helper.c | 6 +-
108
target/arm/hvf/hvf.c | 8 +-
109
target/arm/kvm.c | 8 +-
110
target/arm/ptw.c | 6 +-
111
target/arm/tcg/helper-a64.c | 8 +-
112
target/arm/tcg/m_helper.c | 6 +-
113
target/arm/tcg/op_helper.c | 24 +++---
114
target/arm/tcg/psci.c | 2 +-
115
target/hppa/int_helper.c | 8 +-
116
target/i386/hvf/hvf.c | 6 +-
117
target/i386/kvm/hyperv.c | 4 +-
118
target/i386/kvm/kvm.c | 28 +++----
119
target/i386/kvm/xen-emu.c | 16 ++--
120
target/i386/nvmm/nvmm-accel-ops.c | 6 +-
121
target/i386/nvmm/nvmm-all.c | 20 ++---
122
target/i386/tcg/sysemu/fpu_helper.c | 6 +-
123
target/i386/tcg/sysemu/misc_helper.c | 4 +-
124
target/i386/whpx/whpx-accel-ops.c | 6 +-
125
target/i386/whpx/whpx-all.c | 24 +++---
126
target/loongarch/tcg/csr_helper.c | 4 +-
127
target/mips/kvm.c | 4 +-
128
target/mips/tcg/sysemu/cp0_helper.c | 4 +-
129
target/openrisc/sys_helper.c | 16 ++--
130
target/ppc/excp_helper.c | 14 ++--
131
target/ppc/helper_regs.c | 2 +-
132
target/ppc/kvm.c | 4 +-
133
target/ppc/misc_helper.c | 8 +-
134
target/ppc/timebase_helper.c | 8 +-
135
target/riscv/cpu_helper.c | 4 +-
136
target/s390x/kvm/kvm.c | 4 +-
137
target/s390x/tcg/misc_helper.c | 118 +++++++++++++--------------
138
target/sparc/int32_helper.c | 2 +-
139
target/sparc/int64_helper.c | 6 +-
140
target/sparc/win_helper.c | 20 ++---
141
target/xtensa/exc_helper.c | 8 +-
142
ui/spice-core.c | 6 +-
143
util/aio-posix.c | 3 +-
144
util/aio-win32.c | 3 +-
145
util/async.c | 2 +-
146
util/main-loop.c | 13 ++-
147
util/qsp.c | 6 +-
148
util/rcu.c | 16 ++--
149
audio/coreaudio.m | 8 +-
150
memory_ldst.c.inc | 18 ++--
151
target/i386/hvf/README.md | 2 +-
152
ui/cocoa.m | 56 ++++++-------
153
124 files changed, 646 insertions(+), 655 deletions(-)
35
154
36
--
155
--
37
2.35.1
156
2.43.0
157
158
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
2
3
aio_context_set_aio_params() doesn't use its undocumented
4
Error** argument. Remove it to simplify.
5
6
Note this removes a use of "unchecked Error**" in
7
iothread_set_aio_context_params().
8
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Reviewed-by: Markus Armbruster <armbru@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Message-ID: <20231120171806.19361-1-philmd@linaro.org>
13
---
14
include/block/aio.h | 3 +--
15
iothread.c | 3 +--
16
util/aio-posix.c | 3 +--
17
util/aio-win32.c | 3 +--
18
util/main-loop.c | 5 +----
19
5 files changed, 5 insertions(+), 12 deletions(-)
20
21
diff --git a/include/block/aio.h b/include/block/aio.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/block/aio.h
24
+++ b/include/block/aio.h
25
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
26
* @max_batch: maximum number of requests in a batch, 0 means that the
27
* engine will use its default
28
*/
29
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
30
- Error **errp);
31
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
32
33
/**
34
* aio_context_set_thread_pool_params:
35
diff --git a/iothread.c b/iothread.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/iothread.c
38
+++ b/iothread.c
39
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
40
}
41
42
aio_context_set_aio_params(iothread->ctx,
43
- iothread->parent_obj.aio_max_batch,
44
- errp);
45
+ iothread->parent_obj.aio_max_batch);
46
47
aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
48
base->thread_pool_max, errp);
49
diff --git a/util/aio-posix.c b/util/aio-posix.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/util/aio-posix.c
52
+++ b/util/aio-posix.c
53
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
54
aio_notify(ctx);
55
}
56
57
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
58
- Error **errp)
59
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
60
{
61
/*
62
* No thread synchronization here, it doesn't matter if an incorrect value
63
diff --git a/util/aio-win32.c b/util/aio-win32.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/util/aio-win32.c
66
+++ b/util/aio-win32.c
67
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
68
}
69
}
70
71
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
72
- Error **errp)
73
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
74
{
75
}
76
diff --git a/util/main-loop.c b/util/main-loop.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/util/main-loop.c
79
+++ b/util/main-loop.c
80
@@ -XXX,XX +XXX,XX @@ static void main_loop_update_params(EventLoopBase *base, Error **errp)
81
return;
82
}
83
84
- aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
85
- if (*errp) {
86
- return;
87
- }
88
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch);
89
90
aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
91
base->thread_pool_max, errp);
92
--
93
2.43.0
94
95
diff view generated by jsdifflib
1
The GLib documentation says "a NULL-terminated array of GOptionEntrys"
1
The Big QEMU Lock (BQL) has many names and they are confusing. The
2
so we'd better make sure there is a terminator that lets
2
actual QemuMutex variable is called qemu_global_mutex but it's commonly
3
g_option_context_add_main_entries() know when the end of the array has
3
referred to as the BQL in discussions and some code comments. The
4
been reached.
4
locking APIs, however, are called qemu_mutex_lock_iothread() and
5
qemu_mutex_unlock_iothread().
6
7
The "iothread" name is historic and comes from when the main thread was
8
split into into KVM vcpu threads and the "iothread" (now called the main
9
loop thread). I have contributed to the confusion myself by introducing
10
a separate --object iothread, a separate concept unrelated to the BQL.
11
12
The "iothread" name is no longer appropriate for the BQL. Rename the
13
locking APIs to:
14
- void bql_lock(void)
15
- void bql_unlock(void)
16
- bool bql_locked(void)
17
18
There are more APIs with "iothread" in their names. Subsequent patches
19
will rename them. There are also comments and documentation that will be
20
updated in later patches.
5
21
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
23
Reviewed-by: Paul Durrant <paul@xen.org>
8
Message-id: 20220411150057.3009667-1-stefanha@redhat.com
24
Acked-by: Fabiano Rosas <farosas@suse.de>
25
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
26
Reviewed-by: Cédric Le Goater <clg@kaod.org>
27
Acked-by: Peter Xu <peterx@redhat.com>
28
Acked-by: Eric Farman <farman@linux.ibm.com>
29
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
30
Acked-by: Hyman Huang <yong.huang@smartx.com>
31
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
32
Message-id: 20240102153529.486531-2-stefanha@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
33
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
34
---
11
contrib/vhost-user-blk/vhost-user-blk.c | 3 ++-
35
include/block/aio-wait.h | 2 +-
12
1 file changed, 2 insertions(+), 1 deletion(-)
36
include/qemu/main-loop.h | 39 +++++----
37
include/qemu/thread.h | 2 +-
38
accel/accel-blocker.c | 10 +--
39
accel/dummy-cpus.c | 8 +-
40
accel/hvf/hvf-accel-ops.c | 4 +-
41
accel/kvm/kvm-accel-ops.c | 4 +-
42
accel/kvm/kvm-all.c | 22 ++---
43
accel/tcg/cpu-exec.c | 26 +++---
44
accel/tcg/cputlb.c | 16 ++--
45
accel/tcg/tcg-accel-ops-icount.c | 4 +-
46
accel/tcg/tcg-accel-ops-mttcg.c | 12 +--
47
accel/tcg/tcg-accel-ops-rr.c | 14 ++--
48
accel/tcg/tcg-accel-ops.c | 2 +-
49
accel/tcg/translate-all.c | 2 +-
50
cpu-common.c | 4 +-
51
dump/dump.c | 4 +-
52
hw/core/cpu-common.c | 6 +-
53
hw/i386/intel_iommu.c | 6 +-
54
hw/i386/kvm/xen_evtchn.c | 16 ++--
55
hw/i386/kvm/xen_overlay.c | 2 +-
56
hw/i386/kvm/xen_xenstore.c | 2 +-
57
hw/intc/arm_gicv3_cpuif.c | 2 +-
58
hw/intc/s390_flic.c | 18 ++--
59
hw/misc/edu.c | 4 +-
60
hw/misc/imx6_src.c | 2 +-
61
hw/misc/imx7_src.c | 2 +-
62
hw/net/xen_nic.c | 8 +-
63
hw/ppc/pegasos2.c | 2 +-
64
hw/ppc/ppc.c | 4 +-
65
hw/ppc/spapr.c | 2 +-
66
hw/ppc/spapr_rng.c | 4 +-
67
hw/ppc/spapr_softmmu.c | 4 +-
68
hw/remote/mpqemu-link.c | 20 ++---
69
hw/remote/vfio-user-obj.c | 2 +-
70
hw/s390x/s390-skeys.c | 2 +-
71
migration/block-dirty-bitmap.c | 4 +-
72
migration/block.c | 16 ++--
73
migration/colo.c | 60 +++++++-------
74
migration/dirtyrate.c | 12 +--
75
migration/migration.c | 52 ++++++------
76
migration/ram.c | 12 +--
77
replay/replay-internal.c | 2 +-
78
semihosting/console.c | 8 +-
79
stubs/iothread-lock.c | 6 +-
80
system/cpu-throttle.c | 4 +-
81
system/cpus.c | 51 ++++++------
82
system/dirtylimit.c | 4 +-
83
system/memory.c | 2 +-
84
system/physmem.c | 8 +-
85
system/runstate.c | 2 +-
86
system/watchpoint.c | 4 +-
87
target/arm/arm-powerctl.c | 14 ++--
88
target/arm/helper.c | 4 +-
89
target/arm/hvf/hvf.c | 8 +-
90
target/arm/kvm.c | 8 +-
91
target/arm/ptw.c | 6 +-
92
target/arm/tcg/helper-a64.c | 8 +-
93
target/arm/tcg/m_helper.c | 6 +-
94
target/arm/tcg/op_helper.c | 24 +++---
95
target/arm/tcg/psci.c | 2 +-
96
target/hppa/int_helper.c | 8 +-
97
target/i386/hvf/hvf.c | 6 +-
98
target/i386/kvm/hyperv.c | 4 +-
99
target/i386/kvm/kvm.c | 28 +++----
100
target/i386/kvm/xen-emu.c | 14 ++--
101
target/i386/nvmm/nvmm-accel-ops.c | 4 +-
102
target/i386/nvmm/nvmm-all.c | 20 ++---
103
target/i386/tcg/sysemu/fpu_helper.c | 6 +-
104
target/i386/tcg/sysemu/misc_helper.c | 4 +-
105
target/i386/whpx/whpx-accel-ops.c | 4 +-
106
target/i386/whpx/whpx-all.c | 24 +++---
107
target/loongarch/tcg/csr_helper.c | 4 +-
108
target/mips/kvm.c | 4 +-
109
target/mips/tcg/sysemu/cp0_helper.c | 4 +-
110
target/openrisc/sys_helper.c | 16 ++--
111
target/ppc/excp_helper.c | 12 +--
112
target/ppc/kvm.c | 4 +-
113
target/ppc/misc_helper.c | 8 +-
114
target/ppc/timebase_helper.c | 8 +-
115
target/s390x/kvm/kvm.c | 4 +-
116
target/s390x/tcg/misc_helper.c | 118 +++++++++++++--------------
117
target/sparc/int32_helper.c | 2 +-
118
target/sparc/int64_helper.c | 6 +-
119
target/sparc/win_helper.c | 20 ++---
120
target/xtensa/exc_helper.c | 8 +-
121
ui/spice-core.c | 4 +-
122
util/async.c | 2 +-
123
util/main-loop.c | 8 +-
124
util/qsp.c | 6 +-
125
util/rcu.c | 14 ++--
126
audio/coreaudio.m | 4 +-
127
memory_ldst.c.inc | 18 ++--
128
target/i386/hvf/README.md | 2 +-
129
ui/cocoa.m | 50 ++++++------
130
95 files changed, 529 insertions(+), 529 deletions(-)
13
131
14
diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c
132
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
15
index XXXXXXX..XXXXXXX 100644
133
index XXXXXXX..XXXXXXX 100644
16
--- a/contrib/vhost-user-blk/vhost-user-blk.c
134
--- a/include/block/aio-wait.h
17
+++ b/contrib/vhost-user-blk/vhost-user-blk.c
135
+++ b/include/block/aio-wait.h
18
@@ -XXX,XX +XXX,XX @@ static GOptionEntry entries[] = {
136
@@ -XXX,XX +XXX,XX @@ static inline bool in_aio_context_home_thread(AioContext *ctx)
19
{"blk-file", 'b', 0, G_OPTION_ARG_FILENAME, &opt_blk_file,
137
}
20
"block device or file path", "PATH"},
138
21
{ "read-only", 'r', 0, G_OPTION_ARG_NONE, &opt_read_only,
139
if (ctx == qemu_get_aio_context()) {
22
- "Enable read-only", NULL }
140
- return qemu_mutex_iothread_locked();
23
+ "Enable read-only", NULL },
141
+ return bql_locked();
24
+ { NULL, },
142
} else {
143
return false;
144
}
145
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/include/qemu/main-loop.h
148
+++ b/include/qemu/main-loop.h
149
@@ -XXX,XX +XXX,XX @@ GSource *iohandler_get_g_source(void);
150
AioContext *iohandler_get_aio_context(void);
151
152
/**
153
- * qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
154
+ * bql_locked: Return lock status of the Big QEMU Lock (BQL)
155
*
156
- * The main loop mutex is the coarsest lock in QEMU, and as such it
157
+ * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
158
* must always be taken outside other locks. This function helps
159
* functions take different paths depending on whether the current
160
- * thread is running within the main loop mutex.
161
+ * thread is running within the BQL.
162
*
163
* This function should never be used in the block layer, because
164
* unit tests, block layer tools and qemu-storage-daemon do not
165
* have a BQL.
166
* Please instead refer to qemu_in_main_thread().
167
*/
168
-bool qemu_mutex_iothread_locked(void);
169
+bool bql_locked(void);
170
171
/**
172
* qemu_in_main_thread: return whether it's possible to safely access
173
@@ -XXX,XX +XXX,XX @@ bool qemu_in_main_thread(void);
174
} while (0)
175
176
/**
177
- * qemu_mutex_lock_iothread: Lock the main loop mutex.
178
+ * bql_lock: Lock the Big QEMU Lock (BQL).
179
*
180
- * This function locks the main loop mutex. The mutex is taken by
181
+ * This function locks the Big QEMU Lock (BQL). The lock is taken by
182
* main() in vl.c and always taken except while waiting on
183
- * external events (such as with select). The mutex should be taken
184
+ * external events (such as with select). The lock should be taken
185
* by threads other than the main loop thread when calling
186
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
187
* functions documented in this file.
188
*
189
- * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
190
+ * NOTE: tools currently are single-threaded and bql_lock
191
* is a no-op there.
192
*/
193
-#define qemu_mutex_lock_iothread() \
194
- qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
195
-void qemu_mutex_lock_iothread_impl(const char *file, int line);
196
+#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
197
+void bql_lock_impl(const char *file, int line);
198
199
/**
200
- * qemu_mutex_unlock_iothread: Unlock the main loop mutex.
201
+ * bql_unlock: Unlock the Big QEMU Lock (BQL).
202
*
203
- * This function unlocks the main loop mutex. The mutex is taken by
204
+ * This function unlocks the Big QEMU Lock. The lock is taken by
205
* main() in vl.c and always taken except while waiting on
206
- * external events (such as with select). The mutex should be unlocked
207
+ * external events (such as with select). The lock should be unlocked
208
* as soon as possible by threads other than the main loop thread,
209
* because it prevents the main loop from processing callbacks,
210
* including timers and bottom halves.
211
*
212
- * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
213
+ * NOTE: tools currently are single-threaded and bql_unlock
214
* is a no-op there.
215
*/
216
-void qemu_mutex_unlock_iothread(void);
217
+void bql_unlock(void);
218
219
/**
220
* QEMU_IOTHREAD_LOCK_GUARD
221
*
222
- * Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread.
223
+ * Wrap a block of code in a conditional bql_{lock,unlock}.
224
*/
225
typedef struct IOThreadLockAuto IOThreadLockAuto;
226
227
static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file,
228
int line)
229
{
230
- if (qemu_mutex_iothread_locked()) {
231
+ if (bql_locked()) {
232
return NULL;
233
}
234
- qemu_mutex_lock_iothread_impl(file, line);
235
+ bql_lock_impl(file, line);
236
/* Anything non-NULL causes the cleanup function to be called */
237
return (IOThreadLockAuto *)(uintptr_t)1;
238
}
239
240
static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l)
241
{
242
- qemu_mutex_unlock_iothread();
243
+ bql_unlock();
244
}
245
246
G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock)
247
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
248
index XXXXXXX..XXXXXXX 100644
249
--- a/include/qemu/thread.h
250
+++ b/include/qemu/thread.h
251
@@ -XXX,XX +XXX,XX @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
252
typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
253
const char *f, int l);
254
255
-extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
256
+extern QemuMutexLockFunc bql_mutex_lock_func;
257
extern QemuMutexLockFunc qemu_mutex_lock_func;
258
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
259
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
260
diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c
261
index XXXXXXX..XXXXXXX 100644
262
--- a/accel/accel-blocker.c
263
+++ b/accel/accel-blocker.c
264
@@ -XXX,XX +XXX,XX @@ void accel_blocker_init(void)
265
266
void accel_ioctl_begin(void)
267
{
268
- if (likely(qemu_mutex_iothread_locked())) {
269
+ if (likely(bql_locked())) {
270
return;
271
}
272
273
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_begin(void)
274
275
void accel_ioctl_end(void)
276
{
277
- if (likely(qemu_mutex_iothread_locked())) {
278
+ if (likely(bql_locked())) {
279
return;
280
}
281
282
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_end(void)
283
284
void accel_cpu_ioctl_begin(CPUState *cpu)
285
{
286
- if (unlikely(qemu_mutex_iothread_locked())) {
287
+ if (unlikely(bql_locked())) {
288
return;
289
}
290
291
@@ -XXX,XX +XXX,XX @@ void accel_cpu_ioctl_begin(CPUState *cpu)
292
293
void accel_cpu_ioctl_end(CPUState *cpu)
294
{
295
- if (unlikely(qemu_mutex_iothread_locked())) {
296
+ if (unlikely(bql_locked())) {
297
return;
298
}
299
300
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_inhibit_begin(void)
301
* We allow to inhibit only when holding the BQL, so we can identify
302
* when an inhibitor wants to issue an ioctl easily.
303
*/
304
- g_assert(qemu_mutex_iothread_locked());
305
+ g_assert(bql_locked());
306
307
/* Block further invocations of the ioctls outside the BQL. */
308
CPU_FOREACH(cpu) {
309
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
310
index XXXXXXX..XXXXXXX 100644
311
--- a/accel/dummy-cpus.c
312
+++ b/accel/dummy-cpus.c
313
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
314
315
rcu_register_thread();
316
317
- qemu_mutex_lock_iothread();
318
+ bql_lock();
319
qemu_thread_get_self(cpu->thread);
320
cpu->thread_id = qemu_get_thread_id();
321
cpu->neg.can_do_io = true;
322
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
323
qemu_guest_random_seed_thread_part2(cpu->random_seed);
324
325
do {
326
- qemu_mutex_unlock_iothread();
327
+ bql_unlock();
328
#ifndef _WIN32
329
do {
330
int sig;
331
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
332
#else
333
qemu_sem_wait(&cpu->sem);
334
#endif
335
- qemu_mutex_lock_iothread();
336
+ bql_lock();
337
qemu_wait_io_event(cpu);
338
} while (!cpu->unplug);
339
340
- qemu_mutex_unlock_iothread();
341
+ bql_unlock();
342
rcu_unregister_thread();
343
return NULL;
344
}
345
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
346
index XXXXXXX..XXXXXXX 100644
347
--- a/accel/hvf/hvf-accel-ops.c
348
+++ b/accel/hvf/hvf-accel-ops.c
349
@@ -XXX,XX +XXX,XX @@ static void *hvf_cpu_thread_fn(void *arg)
350
351
rcu_register_thread();
352
353
- qemu_mutex_lock_iothread();
354
+ bql_lock();
355
qemu_thread_get_self(cpu->thread);
356
357
cpu->thread_id = qemu_get_thread_id();
358
@@ -XXX,XX +XXX,XX @@ static void *hvf_cpu_thread_fn(void *arg)
359
360
hvf_vcpu_destroy(cpu);
361
cpu_thread_signal_destroyed(cpu);
362
- qemu_mutex_unlock_iothread();
363
+ bql_unlock();
364
rcu_unregister_thread();
365
return NULL;
366
}
367
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
368
index XXXXXXX..XXXXXXX 100644
369
--- a/accel/kvm/kvm-accel-ops.c
370
+++ b/accel/kvm/kvm-accel-ops.c
371
@@ -XXX,XX +XXX,XX @@ static void *kvm_vcpu_thread_fn(void *arg)
372
373
rcu_register_thread();
374
375
- qemu_mutex_lock_iothread();
376
+ bql_lock();
377
qemu_thread_get_self(cpu->thread);
378
cpu->thread_id = qemu_get_thread_id();
379
cpu->neg.can_do_io = true;
380
@@ -XXX,XX +XXX,XX @@ static void *kvm_vcpu_thread_fn(void *arg)
381
382
kvm_destroy_vcpu(cpu);
383
cpu_thread_signal_destroyed(cpu);
384
- qemu_mutex_unlock_iothread();
385
+ bql_unlock();
386
rcu_unregister_thread();
387
return NULL;
388
}
389
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/accel/kvm/kvm-all.c
392
+++ b/accel/kvm/kvm-all.c
393
@@ -XXX,XX +XXX,XX @@ static void kvm_dirty_ring_flush(void)
394
* should always be with BQL held, serialization is guaranteed.
395
* However, let's be sure of it.
396
*/
397
- assert(qemu_mutex_iothread_locked());
398
+ assert(bql_locked());
399
/*
400
* First make sure to flush the hardware buffers by kicking all
401
* vcpus out in a synchronous way.
402
@@ -XXX,XX +XXX,XX @@ static void *kvm_dirty_ring_reaper_thread(void *data)
403
trace_kvm_dirty_ring_reaper("wakeup");
404
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
405
406
- qemu_mutex_lock_iothread();
407
+ bql_lock();
408
kvm_dirty_ring_reap(s, NULL);
409
- qemu_mutex_unlock_iothread();
410
+ bql_unlock();
411
412
r->reaper_iteration++;
413
}
414
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
415
return EXCP_HLT;
416
}
417
418
- qemu_mutex_unlock_iothread();
419
+ bql_unlock();
420
cpu_exec_start(cpu);
421
422
do {
423
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
424
425
#ifdef KVM_HAVE_MCE_INJECTION
426
if (unlikely(have_sigbus_pending)) {
427
- qemu_mutex_lock_iothread();
428
+ bql_lock();
429
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
430
pending_sigbus_addr);
431
have_sigbus_pending = false;
432
- qemu_mutex_unlock_iothread();
433
+ bql_unlock();
434
}
435
#endif
436
437
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
438
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
439
*/
440
trace_kvm_dirty_ring_full(cpu->cpu_index);
441
- qemu_mutex_lock_iothread();
442
+ bql_lock();
443
/*
444
* We throttle vCPU by making it sleep once it exit from kernel
445
* due to dirty ring full. In the dirtylimit scenario, reaping
446
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
447
} else {
448
kvm_dirty_ring_reap(kvm_state, NULL);
449
}
450
- qemu_mutex_unlock_iothread();
451
+ bql_unlock();
452
dirtylimit_vcpu_execute(cpu);
453
ret = 0;
454
break;
455
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
456
break;
457
case KVM_SYSTEM_EVENT_CRASH:
458
kvm_cpu_synchronize_state(cpu);
459
- qemu_mutex_lock_iothread();
460
+ bql_lock();
461
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
462
- qemu_mutex_unlock_iothread();
463
+ bql_unlock();
464
ret = 0;
465
break;
466
default:
467
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
468
} while (ret == 0);
469
470
cpu_exec_end(cpu);
471
- qemu_mutex_lock_iothread();
472
+ bql_lock();
473
474
if (ret < 0) {
475
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
476
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
477
index XXXXXXX..XXXXXXX 100644
478
--- a/accel/tcg/cpu-exec.c
479
+++ b/accel/tcg/cpu-exec.c
480
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
481
tcg_ctx->gen_tb = NULL;
482
}
483
#endif
484
- if (qemu_mutex_iothread_locked()) {
485
- qemu_mutex_unlock_iothread();
486
+ if (bql_locked()) {
487
+ bql_unlock();
488
}
489
assert_no_pages_locked();
490
}
491
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_halt(CPUState *cpu)
492
#if defined(TARGET_I386)
493
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
494
X86CPU *x86_cpu = X86_CPU(cpu);
495
- qemu_mutex_lock_iothread();
496
+ bql_lock();
497
apic_poll_irq(x86_cpu->apic_state);
498
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
499
- qemu_mutex_unlock_iothread();
500
+ bql_unlock();
501
}
502
#endif /* TARGET_I386 */
503
if (!cpu_has_work(cpu)) {
504
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
505
#else
506
if (replay_exception()) {
507
CPUClass *cc = CPU_GET_CLASS(cpu);
508
- qemu_mutex_lock_iothread();
509
+ bql_lock();
510
cc->tcg_ops->do_interrupt(cpu);
511
- qemu_mutex_unlock_iothread();
512
+ bql_unlock();
513
cpu->exception_index = -1;
514
515
if (unlikely(cpu->singlestep_enabled)) {
516
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
517
518
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
519
int interrupt_request;
520
- qemu_mutex_lock_iothread();
521
+ bql_lock();
522
interrupt_request = cpu->interrupt_request;
523
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
524
/* Mask out external interrupts for this step. */
525
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
526
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
527
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
528
cpu->exception_index = EXCP_DEBUG;
529
- qemu_mutex_unlock_iothread();
530
+ bql_unlock();
531
return true;
532
}
533
#if !defined(CONFIG_USER_ONLY)
534
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
535
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
536
cpu->halted = 1;
537
cpu->exception_index = EXCP_HLT;
538
- qemu_mutex_unlock_iothread();
539
+ bql_unlock();
540
return true;
541
}
542
#if defined(TARGET_I386)
543
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
544
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
545
do_cpu_init(x86_cpu);
546
cpu->exception_index = EXCP_HALTED;
547
- qemu_mutex_unlock_iothread();
548
+ bql_unlock();
549
return true;
550
}
551
#else
552
else if (interrupt_request & CPU_INTERRUPT_RESET) {
553
replay_interrupt();
554
cpu_reset(cpu);
555
- qemu_mutex_unlock_iothread();
556
+ bql_unlock();
557
return true;
558
}
559
#endif /* !TARGET_I386 */
560
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
561
*/
562
if (unlikely(cpu->singlestep_enabled)) {
563
cpu->exception_index = EXCP_DEBUG;
564
- qemu_mutex_unlock_iothread();
565
+ bql_unlock();
566
return true;
567
}
568
cpu->exception_index = -1;
569
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
570
}
571
572
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
573
- qemu_mutex_unlock_iothread();
574
+ bql_unlock();
575
}
576
577
/* Finally, check if we need to exit to the main loop. */
578
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
579
index XXXXXXX..XXXXXXX 100644
580
--- a/accel/tcg/cputlb.c
581
+++ b/accel/tcg/cputlb.c
582
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
583
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
584
mr = section->mr;
585
586
- qemu_mutex_lock_iothread();
587
+ bql_lock();
588
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
589
type, ra, mr, mr_offset);
590
- qemu_mutex_unlock_iothread();
591
+ bql_unlock();
592
593
return ret;
594
}
595
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
596
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
597
mr = section->mr;
598
599
- qemu_mutex_lock_iothread();
600
+ bql_lock();
601
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
602
MMU_DATA_LOAD, ra, mr, mr_offset);
603
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
604
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
605
- qemu_mutex_unlock_iothread();
606
+ bql_unlock();
607
608
return int128_make128(b, a);
609
}
610
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
611
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
612
mr = section->mr;
613
614
- qemu_mutex_lock_iothread();
615
+ bql_lock();
616
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
617
ra, mr, mr_offset);
618
- qemu_mutex_unlock_iothread();
619
+ bql_unlock();
620
621
return ret;
622
}
623
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
624
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
625
mr = section->mr;
626
627
- qemu_mutex_lock_iothread();
628
+ bql_lock();
629
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
630
mmu_idx, ra, mr, mr_offset);
631
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
632
size - 8, mmu_idx, ra, mr, mr_offset + 8);
633
- qemu_mutex_unlock_iothread();
634
+ bql_unlock();
635
636
return ret;
637
}
638
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
639
index XXXXXXX..XXXXXXX 100644
640
--- a/accel/tcg/tcg-accel-ops-icount.c
641
+++ b/accel/tcg/tcg-accel-ops-icount.c
642
@@ -XXX,XX +XXX,XX @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
643
* We're called without the iothread lock, so must take it while
644
* we're calling timer handlers.
645
*/
646
- qemu_mutex_lock_iothread();
647
+ bql_lock();
648
icount_notify_aio_contexts();
649
- qemu_mutex_unlock_iothread();
650
+ bql_unlock();
651
}
652
}
653
654
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
655
index XXXXXXX..XXXXXXX 100644
656
--- a/accel/tcg/tcg-accel-ops-mttcg.c
657
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
658
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
659
rcu_add_force_rcu_notifier(&force_rcu.notifier);
660
tcg_register_thread();
661
662
- qemu_mutex_lock_iothread();
663
+ bql_lock();
664
qemu_thread_get_self(cpu->thread);
665
666
cpu->thread_id = qemu_get_thread_id();
667
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
668
do {
669
if (cpu_can_run(cpu)) {
670
int r;
671
- qemu_mutex_unlock_iothread();
672
+ bql_unlock();
673
r = tcg_cpus_exec(cpu);
674
- qemu_mutex_lock_iothread();
675
+ bql_lock();
676
switch (r) {
677
case EXCP_DEBUG:
678
cpu_handle_guest_debug(cpu);
679
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
680
*/
681
break;
682
case EXCP_ATOMIC:
683
- qemu_mutex_unlock_iothread();
684
+ bql_unlock();
685
cpu_exec_step_atomic(cpu);
686
- qemu_mutex_lock_iothread();
687
+ bql_lock();
688
default:
689
/* Ignore everything else? */
690
break;
691
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
692
} while (!cpu->unplug || cpu_can_run(cpu));
693
694
tcg_cpus_destroy(cpu);
695
- qemu_mutex_unlock_iothread();
696
+ bql_unlock();
697
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
698
rcu_unregister_thread();
699
return NULL;
700
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
701
index XXXXXXX..XXXXXXX 100644
702
--- a/accel/tcg/tcg-accel-ops-rr.c
703
+++ b/accel/tcg/tcg-accel-ops-rr.c
704
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
705
rcu_add_force_rcu_notifier(&force_rcu);
706
tcg_register_thread();
707
708
- qemu_mutex_lock_iothread();
709
+ bql_lock();
710
qemu_thread_get_self(cpu->thread);
711
712
cpu->thread_id = qemu_get_thread_id();
713
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
714
/* Only used for icount_enabled() */
715
int64_t cpu_budget = 0;
716
717
- qemu_mutex_unlock_iothread();
718
+ bql_unlock();
719
replay_mutex_lock();
720
- qemu_mutex_lock_iothread();
721
+ bql_lock();
722
723
if (icount_enabled()) {
724
int cpu_count = rr_cpu_count();
725
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
726
if (cpu_can_run(cpu)) {
727
int r;
728
729
- qemu_mutex_unlock_iothread();
730
+ bql_unlock();
731
if (icount_enabled()) {
732
icount_prepare_for_run(cpu, cpu_budget);
733
}
734
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
735
if (icount_enabled()) {
736
icount_process_data(cpu);
737
}
738
- qemu_mutex_lock_iothread();
739
+ bql_lock();
740
741
if (r == EXCP_DEBUG) {
742
cpu_handle_guest_debug(cpu);
743
break;
744
} else if (r == EXCP_ATOMIC) {
745
- qemu_mutex_unlock_iothread();
746
+ bql_unlock();
747
cpu_exec_step_atomic(cpu);
748
- qemu_mutex_lock_iothread();
749
+ bql_lock();
750
break;
751
}
752
} else if (cpu->stop) {
753
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
754
index XXXXXXX..XXXXXXX 100644
755
--- a/accel/tcg/tcg-accel-ops.c
756
+++ b/accel/tcg/tcg-accel-ops.c
757
@@ -XXX,XX +XXX,XX @@ static void tcg_cpu_reset_hold(CPUState *cpu)
758
/* mask must never be zero, except for A20 change call */
759
void tcg_handle_interrupt(CPUState *cpu, int mask)
760
{
761
- g_assert(qemu_mutex_iothread_locked());
762
+ g_assert(bql_locked());
763
764
cpu->interrupt_request |= mask;
765
766
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
767
index XXXXXXX..XXXXXXX 100644
768
--- a/accel/tcg/translate-all.c
769
+++ b/accel/tcg/translate-all.c
770
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
771
772
void cpu_interrupt(CPUState *cpu, int mask)
773
{
774
- g_assert(qemu_mutex_iothread_locked());
775
+ g_assert(bql_locked());
776
cpu->interrupt_request |= mask;
777
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
778
}
779
diff --git a/cpu-common.c b/cpu-common.c
780
index XXXXXXX..XXXXXXX 100644
781
--- a/cpu-common.c
782
+++ b/cpu-common.c
783
@@ -XXX,XX +XXX,XX @@ void process_queued_cpu_work(CPUState *cpu)
784
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
785
* neither CPU can proceed.
786
*/
787
- qemu_mutex_unlock_iothread();
788
+ bql_unlock();
789
start_exclusive();
790
wi->func(cpu, wi->data);
791
end_exclusive();
792
- qemu_mutex_lock_iothread();
793
+ bql_lock();
794
} else {
795
wi->func(cpu, wi->data);
796
}
797
diff --git a/dump/dump.c b/dump/dump.c
798
index XXXXXXX..XXXXXXX 100644
799
--- a/dump/dump.c
800
+++ b/dump/dump.c
801
@@ -XXX,XX +XXX,XX @@ static int dump_cleanup(DumpState *s)
802
s->guest_note = NULL;
803
if (s->resume) {
804
if (s->detached) {
805
- qemu_mutex_lock_iothread();
806
+ bql_lock();
807
}
808
vm_start();
809
if (s->detached) {
810
- qemu_mutex_unlock_iothread();
811
+ bql_unlock();
812
}
813
}
814
migrate_del_blocker(&dump_migration_blocker);
815
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
816
index XXXXXXX..XXXXXXX 100644
817
--- a/hw/core/cpu-common.c
818
+++ b/hw/core/cpu-common.c
819
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_create(const char *typename)
820
* BQL here if we need to. cpu_interrupt assumes it is held.*/
821
void cpu_reset_interrupt(CPUState *cpu, int mask)
822
{
823
- bool need_lock = !qemu_mutex_iothread_locked();
824
+ bool need_lock = !bql_locked();
825
826
if (need_lock) {
827
- qemu_mutex_lock_iothread();
828
+ bql_lock();
829
}
830
cpu->interrupt_request &= ~mask;
831
if (need_lock) {
832
- qemu_mutex_unlock_iothread();
833
+ bql_unlock();
834
}
835
}
836
837
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
838
index XXXXXXX..XXXXXXX 100644
839
--- a/hw/i386/intel_iommu.c
840
+++ b/hw/i386/intel_iommu.c
841
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
842
{
843
bool use_iommu, pt;
844
/* Whether we need to take the BQL on our own */
845
- bool take_bql = !qemu_mutex_iothread_locked();
846
+ bool take_bql = !bql_locked();
847
848
assert(as);
849
850
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
851
* it. We'd better make sure we have had it already, or, take it.
852
*/
853
if (take_bql) {
854
- qemu_mutex_lock_iothread();
855
+ bql_lock();
856
}
857
858
/* Turn off first then on the other */
859
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
860
}
861
862
if (take_bql) {
863
- qemu_mutex_unlock_iothread();
864
+ bql_unlock();
865
}
866
867
return use_iommu;
868
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
869
index XXXXXXX..XXXXXXX 100644
870
--- a/hw/i386/kvm/xen_evtchn.c
871
+++ b/hw/i386/kvm/xen_evtchn.c
872
@@ -XXX,XX +XXX,XX @@ void xen_evtchn_set_callback_level(int level)
873
* effect immediately. That just leaves interdomain loopback as the case
874
* which uses the BH.
875
*/
876
- if (!qemu_mutex_iothread_locked()) {
877
+ if (!bql_locked()) {
878
qemu_bh_schedule(s->gsi_bh);
879
return;
880
}
881
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_set_callback_param(uint64_t param)
882
* We need the BQL because set_callback_pci_intx() may call into PCI code,
883
* and because we may need to manipulate the old and new GSI levels.
884
*/
885
- assert(qemu_mutex_iothread_locked());
886
+ assert(bql_locked());
887
qemu_mutex_lock(&s->port_lock);
888
889
switch (type) {
890
@@ -XXX,XX +XXX,XX @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
891
XenEvtchnPort *p = &s->port_table[port];
892
893
/* Because it *might* be a PIRQ port */
894
- assert(qemu_mutex_iothread_locked());
895
+ assert(bql_locked());
896
897
switch (p->type) {
898
case EVTCHNSTAT_closed:
899
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_soft_reset(void)
900
return -ENOTSUP;
901
}
902
903
- assert(qemu_mutex_iothread_locked());
904
+ assert(bql_locked());
905
906
qemu_mutex_lock(&s->port_lock);
907
908
@@ -XXX,XX +XXX,XX @@ bool xen_evtchn_set_gsi(int gsi, int level)
909
XenEvtchnState *s = xen_evtchn_singleton;
910
int pirq;
911
912
- assert(qemu_mutex_iothread_locked());
913
+ assert(bql_locked());
914
915
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
916
return false;
917
@@ -XXX,XX +XXX,XX @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
918
return;
919
}
920
921
- assert(qemu_mutex_iothread_locked());
922
+ assert(bql_locked());
923
924
pirq = msi_pirq_target(addr, data);
925
926
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
927
return 1; /* Not a PIRQ */
928
}
929
930
- assert(qemu_mutex_iothread_locked());
931
+ assert(bql_locked());
932
933
pirq = msi_pirq_target(address, data);
934
if (!pirq || pirq >= s->nr_pirqs) {
935
@@ -XXX,XX +XXX,XX @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
936
return false;
937
}
938
939
- assert(qemu_mutex_iothread_locked());
940
+ assert(bql_locked());
941
942
pirq = msi_pirq_target(address, data);
943
if (!pirq || pirq >= s->nr_pirqs) {
944
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
945
index XXXXXXX..XXXXXXX 100644
946
--- a/hw/i386/kvm/xen_overlay.c
947
+++ b/hw/i386/kvm/xen_overlay.c
948
@@ -XXX,XX +XXX,XX @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
949
return -ENOENT;
950
}
951
952
- assert(qemu_mutex_iothread_locked());
953
+ assert(bql_locked());
954
955
if (s->shinfo_gpa) {
956
/* If removing shinfo page, turn the kernel magic off first */
957
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
958
index XXXXXXX..XXXXXXX 100644
959
--- a/hw/i386/kvm/xen_xenstore.c
960
+++ b/hw/i386/kvm/xen_xenstore.c
961
@@ -XXX,XX +XXX,XX @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
962
{
963
XenXenstoreState *s = opaque;
964
965
- assert(qemu_mutex_iothread_locked());
966
+ assert(bql_locked());
967
968
/*
969
* If there's a response pending, we obviously can't scribble over
970
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
971
index XXXXXXX..XXXXXXX 100644
972
--- a/hw/intc/arm_gicv3_cpuif.c
973
+++ b/hw/intc/arm_gicv3_cpuif.c
974
@@ -XXX,XX +XXX,XX @@ void gicv3_cpuif_update(GICv3CPUState *cs)
975
ARMCPU *cpu = ARM_CPU(cs->cpu);
976
CPUARMState *env = &cpu->env;
977
978
- g_assert(qemu_mutex_iothread_locked());
979
+ g_assert(bql_locked());
980
981
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
982
cs->hppi.grp, cs->hppi.prio);
983
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
984
index XXXXXXX..XXXXXXX 100644
985
--- a/hw/intc/s390_flic.c
986
+++ b/hw/intc/s390_flic.c
987
@@ -XXX,XX +XXX,XX @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
988
QEMUS390FlicIO *cur, *next;
989
uint8_t isc;
990
991
- g_assert(qemu_mutex_iothread_locked());
992
+ g_assert(bql_locked());
993
if (!(flic->pending & FLIC_PENDING_IO)) {
994
return 0;
995
}
996
@@ -XXX,XX +XXX,XX @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
997
{
998
uint32_t tmp;
999
1000
- g_assert(qemu_mutex_iothread_locked());
1001
+ g_assert(bql_locked());
1002
g_assert(flic->pending & FLIC_PENDING_SERVICE);
1003
tmp = flic->service_param;
1004
flic->service_param = 0;
1005
@@ -XXX,XX +XXX,XX @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
1006
QEMUS390FlicIO *io;
1007
uint8_t isc;
1008
1009
- g_assert(qemu_mutex_iothread_locked());
1010
+ g_assert(bql_locked());
1011
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
1012
return NULL;
1013
}
1014
@@ -XXX,XX +XXX,XX @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
1015
1016
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
1017
{
1018
- g_assert(qemu_mutex_iothread_locked());
1019
+ g_assert(bql_locked());
1020
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
1021
flic->pending &= ~FLIC_PENDING_MCHK_CR;
1022
}
1023
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
1024
{
1025
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
1026
1027
- g_assert(qemu_mutex_iothread_locked());
1028
+ g_assert(bql_locked());
1029
/* multiplexing is good enough for sclp - kvm does it internally as well */
1030
flic->service_param |= parm;
1031
flic->pending |= FLIC_PENDING_SERVICE;
1032
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
1033
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
1034
QEMUS390FlicIO *io;
1035
1036
- g_assert(qemu_mutex_iothread_locked());
1037
+ g_assert(bql_locked());
1038
io = g_new0(QEMUS390FlicIO, 1);
1039
io->id = subchannel_id;
1040
io->nr = subchannel_nr;
1041
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
1042
{
1043
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
1044
1045
- g_assert(qemu_mutex_iothread_locked());
1046
+ g_assert(bql_locked());
1047
flic->pending |= FLIC_PENDING_MCHK_CR;
1048
1049
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
1050
@@ -XXX,XX +XXX,XX @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
1051
1052
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
1053
{
1054
- g_assert(qemu_mutex_iothread_locked());
1055
+ g_assert(bql_locked());
1056
return !!flic->pending;
1057
}
1058
1059
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_flic_reset(DeviceState *dev)
1060
QEMUS390FlicIO *cur, *next;
1061
int isc;
1062
1063
- g_assert(qemu_mutex_iothread_locked());
1064
+ g_assert(bql_locked());
1065
flic->simm = 0;
1066
flic->nimm = 0;
1067
flic->pending = 0;
1068
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
1069
index XXXXXXX..XXXXXXX 100644
1070
--- a/hw/misc/edu.c
1071
+++ b/hw/misc/edu.c
1072
@@ -XXX,XX +XXX,XX @@ static void *edu_fact_thread(void *opaque)
1073
smp_mb__after_rmw();
1074
1075
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
1076
- qemu_mutex_lock_iothread();
1077
+ bql_lock();
1078
edu_raise_irq(edu, FACT_IRQ);
1079
- qemu_mutex_unlock_iothread();
1080
+ bql_unlock();
1081
}
1082
}
1083
1084
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
1085
index XXXXXXX..XXXXXXX 100644
1086
--- a/hw/misc/imx6_src.c
1087
+++ b/hw/misc/imx6_src.c
1088
@@ -XXX,XX +XXX,XX @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
1089
struct SRCSCRResetInfo *ri = data.host_ptr;
1090
IMX6SRCState *s = ri->s;
1091
1092
- assert(qemu_mutex_iothread_locked());
1093
+ assert(bql_locked());
1094
1095
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
1096
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
1097
diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c
1098
index XXXXXXX..XXXXXXX 100644
1099
--- a/hw/misc/imx7_src.c
1100
+++ b/hw/misc/imx7_src.c
1101
@@ -XXX,XX +XXX,XX @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
1102
struct SRCSCRResetInfo *ri = data.host_ptr;
1103
IMX7SRCState *s = ri->s;
1104
1105
- assert(qemu_mutex_iothread_locked());
1106
+ assert(bql_locked());
1107
1108
s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);
1109
1110
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
1111
index XXXXXXX..XXXXXXX 100644
1112
--- a/hw/net/xen_nic.c
1113
+++ b/hw/net/xen_nic.c
1114
@@ -XXX,XX +XXX,XX @@ static bool net_tx_packets(struct XenNetDev *netdev)
1115
void *page;
1116
void *tmpbuf = NULL;
1117
1118
- assert(qemu_mutex_iothread_locked());
1119
+ assert(bql_locked());
1120
1121
for (;;) {
1122
rc = netdev->tx_ring.req_cons;
1123
@@ -XXX,XX +XXX,XX @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
1124
RING_IDX rc, rp;
1125
void *page;
1126
1127
- assert(qemu_mutex_iothread_locked());
1128
+ assert(bql_locked());
1129
1130
if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) {
1131
return -1;
1132
@@ -XXX,XX +XXX,XX @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp)
1133
XenNetDev *netdev = XEN_NET_DEVICE(xendev);
1134
unsigned int port, rx_copy;
1135
1136
- assert(qemu_mutex_iothread_locked());
1137
+ assert(bql_locked());
1138
1139
if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u",
1140
&netdev->tx_ring_ref) != 1) {
1141
@@ -XXX,XX +XXX,XX @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp)
1142
1143
trace_xen_netdev_disconnect(netdev->dev);
1144
1145
- assert(qemu_mutex_iothread_locked());
1146
+ assert(bql_locked());
1147
1148
netdev->tx_ring.sring = NULL;
1149
netdev->rx_ring.sring = NULL;
1150
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
1151
index XXXXXXX..XXXXXXX 100644
1152
--- a/hw/ppc/pegasos2.c
1153
+++ b/hw/ppc/pegasos2.c
1154
@@ -XXX,XX +XXX,XX @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
1155
CPUPPCState *env = &cpu->env;
1156
1157
/* The TCG path should also be holding the BQL at this point */
1158
- g_assert(qemu_mutex_iothread_locked());
1159
+ g_assert(bql_locked());
1160
1161
if (FIELD_EX64(env->msr, MSR, PR)) {
1162
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");
1163
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
1164
index XXXXXXX..XXXXXXX 100644
1165
--- a/hw/ppc/ppc.c
1166
+++ b/hw/ppc/ppc.c
1167
@@ -XXX,XX +XXX,XX @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
1168
{
1169
PowerPCCPU *cpu = env_archcpu(env);
1170
1171
- qemu_mutex_lock_iothread();
1172
+ bql_lock();
1173
1174
switch ((val >> 28) & 0x3) {
1175
case 0x0:
1176
@@ -XXX,XX +XXX,XX @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
1177
break;
1178
}
1179
1180
- qemu_mutex_unlock_iothread();
1181
+ bql_unlock();
1182
}
1183
1184
/* PowerPC 40x internal IRQ controller */
1185
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
1186
index XXXXXXX..XXXXXXX 100644
1187
--- a/hw/ppc/spapr.c
1188
+++ b/hw/ppc/spapr.c
1189
@@ -XXX,XX +XXX,XX @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1190
CPUPPCState *env = &cpu->env;
1191
1192
/* The TCG path should also be holding the BQL at this point */
1193
- g_assert(qemu_mutex_iothread_locked());
1194
+ g_assert(bql_locked());
1195
1196
g_assert(!vhyp_cpu_in_nested(cpu));
1197
1198
diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c
1199
index XXXXXXX..XXXXXXX 100644
1200
--- a/hw/ppc/spapr_rng.c
1201
+++ b/hw/ppc/spapr_rng.c
1202
@@ -XXX,XX +XXX,XX @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr,
1203
while (hrdata.received < 8) {
1204
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
1205
random_recv, &hrdata);
1206
- qemu_mutex_unlock_iothread();
1207
+ bql_unlock();
1208
qemu_sem_wait(&hrdata.sem);
1209
- qemu_mutex_lock_iothread();
1210
+ bql_lock();
1211
}
1212
1213
qemu_sem_destroy(&hrdata.sem);
1214
diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c
1215
index XXXXXXX..XXXXXXX 100644
1216
--- a/hw/ppc/spapr_softmmu.c
1217
+++ b/hw/ppc/spapr_softmmu.c
1218
@@ -XXX,XX +XXX,XX @@ static void *hpt_prepare_thread(void *opaque)
1219
pending->ret = H_NO_MEM;
1220
}
1221
1222
- qemu_mutex_lock_iothread();
1223
+ bql_lock();
1224
1225
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
1226
/* Ready to go */
1227
@@ -XXX,XX +XXX,XX @@ static void *hpt_prepare_thread(void *opaque)
1228
free_pending_hpt(pending);
1229
}
1230
1231
- qemu_mutex_unlock_iothread();
1232
+ bql_unlock();
1233
return NULL;
1234
}
1235
1236
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
1237
index XXXXXXX..XXXXXXX 100644
1238
--- a/hw/remote/mpqemu-link.c
1239
+++ b/hw/remote/mpqemu-link.c
1240
@@ -XXX,XX +XXX,XX @@
1241
*/
1242
bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
1243
{
1244
- bool iolock = qemu_mutex_iothread_locked();
1245
+ bool drop_bql = bql_locked();
1246
bool iothread = qemu_in_iothread();
1247
struct iovec send[2] = {};
1248
int *fds = NULL;
1249
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
1250
* for IOThread case.
1251
* Also skip lock handling while in a co-routine in the main context.
1252
*/
1253
- if (iolock && !iothread && !qemu_in_coroutine()) {
1254
- qemu_mutex_unlock_iothread();
1255
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1256
+ bql_unlock();
1257
}
1258
1259
if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send),
1260
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
1261
trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds);
1262
}
1263
1264
- if (iolock && !iothread && !qemu_in_coroutine()) {
1265
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1266
/* See above comment why skip locking here. */
1267
- qemu_mutex_lock_iothread();
1268
+ bql_lock();
1269
}
1270
1271
return ret;
1272
@@ -XXX,XX +XXX,XX @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
1273
size_t *nfds, Error **errp)
1274
{
1275
struct iovec iov = { .iov_base = buf, .iov_len = len };
1276
- bool iolock = qemu_mutex_iothread_locked();
1277
+ bool drop_bql = bql_locked();
1278
bool iothread = qemu_in_iothread();
1279
int ret = -1;
1280
1281
@@ -XXX,XX +XXX,XX @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
1282
*/
1283
assert(qemu_in_coroutine() || !iothread);
1284
1285
- if (iolock && !iothread && !qemu_in_coroutine()) {
1286
- qemu_mutex_unlock_iothread();
1287
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1288
+ bql_unlock();
1289
}
1290
1291
ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
1292
1293
- if (iolock && !iothread && !qemu_in_coroutine()) {
1294
- qemu_mutex_lock_iothread();
1295
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1296
+ bql_lock();
1297
}
1298
1299
return (ret <= 0) ? ret : iov.iov_len;
1300
diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
1301
index XXXXXXX..XXXXXXX 100644
1302
--- a/hw/remote/vfio-user-obj.c
1303
+++ b/hw/remote/vfio-user-obj.c
1304
@@ -XXX,XX +XXX,XX @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
1305
}
1306
1307
if (release_lock) {
1308
- qemu_mutex_unlock_iothread();
1309
+ bql_unlock();
1310
release_lock = false;
1311
}
1312
1313
diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c
1314
index XXXXXXX..XXXXXXX 100644
1315
--- a/hw/s390x/s390-skeys.c
1316
+++ b/hw/s390x/s390-skeys.c
1317
@@ -XXX,XX +XXX,XX @@ void qmp_dump_skeys(const char *filename, Error **errp)
1318
goto out;
1319
}
1320
1321
- assert(qemu_mutex_iothread_locked());
1322
+ assert(bql_locked());
1323
guest_phys_blocks_init(&guest_phys_blocks);
1324
guest_phys_blocks_append(&guest_phys_blocks);
1325
1326
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
1327
index XXXXXXX..XXXXXXX 100644
1328
--- a/migration/block-dirty-bitmap.c
1329
+++ b/migration/block-dirty-bitmap.c
1330
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_state_pending(void *opaque,
1331
SaveBitmapState *dbms;
1332
uint64_t pending = 0;
1333
1334
- qemu_mutex_lock_iothread();
1335
+ bql_lock();
1336
1337
QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
1338
uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
1339
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_state_pending(void *opaque,
1340
pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
1341
}
1342
1343
- qemu_mutex_unlock_iothread();
1344
+ bql_unlock();
1345
1346
trace_dirty_bitmap_state_pending(pending);
1347
1348
diff --git a/migration/block.c b/migration/block.c
1349
index XXXXXXX..XXXXXXX 100644
1350
--- a/migration/block.c
1351
+++ b/migration/block.c
1352
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1353
int64_t count;
1354
1355
if (bmds->shared_base) {
1356
- qemu_mutex_lock_iothread();
1357
+ bql_lock();
1358
/* Skip unallocated sectors; intentionally treats failure or
1359
* partial sector as an allocated sector */
1360
while (cur_sector < total_sectors &&
1361
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1362
}
1363
cur_sector += count >> BDRV_SECTOR_BITS;
1364
}
1365
- qemu_mutex_unlock_iothread();
1366
+ bql_unlock();
1367
}
1368
1369
if (cur_sector >= total_sectors) {
1370
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1371
* I/O runs in the main loop AioContext (see
1372
* qemu_get_current_aio_context()).
1373
*/
1374
- qemu_mutex_lock_iothread();
1375
+ bql_lock();
1376
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
1377
nr_sectors * BDRV_SECTOR_SIZE);
1378
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
1379
0, blk_mig_read_cb, blk);
1380
- qemu_mutex_unlock_iothread();
1381
+ bql_unlock();
1382
1383
bmds->cur_sector = cur_sector + nr_sectors;
1384
return (bmds->cur_sector >= total_sectors);
1385
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
1386
/* Always called with iothread lock taken for
1387
* simplicity, block_save_complete also calls it.
1388
*/
1389
- qemu_mutex_lock_iothread();
1390
+ bql_lock();
1391
ret = blk_mig_save_dirty_block(f, 1);
1392
- qemu_mutex_unlock_iothread();
1393
+ bql_unlock();
1394
}
1395
if (ret < 0) {
1396
return ret;
1397
@@ -XXX,XX +XXX,XX @@ static void block_state_pending(void *opaque, uint64_t *must_precopy,
1398
/* Estimate pending number of bytes to send */
1399
uint64_t pending;
1400
1401
- qemu_mutex_lock_iothread();
1402
+ bql_lock();
1403
pending = get_remaining_dirty();
1404
- qemu_mutex_unlock_iothread();
1405
+ bql_unlock();
1406
1407
blk_mig_lock();
1408
pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
1409
diff --git a/migration/colo.c b/migration/colo.c
1410
index XXXXXXX..XXXXXXX 100644
1411
--- a/migration/colo.c
1412
+++ b/migration/colo.c
1413
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1414
qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL);
1415
bioc->usage = 0;
1416
1417
- qemu_mutex_lock_iothread();
1418
+ bql_lock();
1419
if (failover_get_state() != FAILOVER_STATUS_NONE) {
1420
- qemu_mutex_unlock_iothread();
1421
+ bql_unlock();
1422
goto out;
1423
}
1424
vm_stop_force_state(RUN_STATE_COLO);
1425
- qemu_mutex_unlock_iothread();
1426
+ bql_unlock();
1427
trace_colo_vm_state_change("run", "stop");
1428
/*
1429
* Failover request bh could be called after vm_stop_force_state(),
1430
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1431
if (failover_get_state() != FAILOVER_STATUS_NONE) {
1432
goto out;
1433
}
1434
- qemu_mutex_lock_iothread();
1435
+ bql_lock();
1436
1437
replication_do_checkpoint_all(&local_err);
1438
if (local_err) {
1439
- qemu_mutex_unlock_iothread();
1440
+ bql_unlock();
1441
goto out;
1442
}
1443
1444
colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err);
1445
if (local_err) {
1446
- qemu_mutex_unlock_iothread();
1447
+ bql_unlock();
1448
goto out;
1449
}
1450
/* Note: device state is saved into buffer */
1451
ret = qemu_save_device_state(fb);
1452
1453
- qemu_mutex_unlock_iothread();
1454
+ bql_unlock();
1455
if (ret < 0) {
1456
goto out;
1457
}
1458
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1459
1460
ret = 0;
1461
1462
- qemu_mutex_lock_iothread();
1463
+ bql_lock();
1464
vm_start();
1465
- qemu_mutex_unlock_iothread();
1466
+ bql_unlock();
1467
trace_colo_vm_state_change("stop", "run");
1468
1469
out:
1470
@@ -XXX,XX +XXX,XX @@ static void colo_process_checkpoint(MigrationState *s)
1471
fb = qemu_file_new_output(QIO_CHANNEL(bioc));
1472
object_unref(OBJECT(bioc));
1473
1474
- qemu_mutex_lock_iothread();
1475
+ bql_lock();
1476
replication_start_all(REPLICATION_MODE_PRIMARY, &local_err);
1477
if (local_err) {
1478
- qemu_mutex_unlock_iothread();
1479
+ bql_unlock();
1480
goto out;
1481
}
1482
1483
vm_start();
1484
- qemu_mutex_unlock_iothread();
1485
+ bql_unlock();
1486
trace_colo_vm_state_change("stop", "run");
1487
1488
timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) +
1489
@@ -XXX,XX +XXX,XX @@ out:
1490
1491
void migrate_start_colo_process(MigrationState *s)
1492
{
1493
- qemu_mutex_unlock_iothread();
1494
+ bql_unlock();
1495
qemu_event_init(&s->colo_checkpoint_event, false);
1496
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
1497
colo_checkpoint_notify, s);
1498
1499
qemu_sem_init(&s->colo_exit_sem, 0);
1500
colo_process_checkpoint(s);
1501
- qemu_mutex_lock_iothread();
1502
+ bql_lock();
1503
}
1504
1505
static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1506
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1507
Error *local_err = NULL;
1508
int ret;
1509
1510
- qemu_mutex_lock_iothread();
1511
+ bql_lock();
1512
vm_stop_force_state(RUN_STATE_COLO);
1513
- qemu_mutex_unlock_iothread();
1514
+ bql_unlock();
1515
trace_colo_vm_state_change("run", "stop");
1516
1517
/* FIXME: This is unnecessary for periodic checkpoint mode */
1518
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1519
return;
1520
}
1521
1522
- qemu_mutex_lock_iothread();
1523
+ bql_lock();
1524
cpu_synchronize_all_states();
1525
ret = qemu_loadvm_state_main(mis->from_src_file, mis);
1526
- qemu_mutex_unlock_iothread();
1527
+ bql_unlock();
1528
1529
if (ret < 0) {
1530
error_setg(errp, "Load VM's live state (ram) error");
1531
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1532
return;
1533
}
1534
1535
- qemu_mutex_lock_iothread();
1536
+ bql_lock();
1537
vmstate_loading = true;
1538
colo_flush_ram_cache();
1539
ret = qemu_load_device_state(fb);
1540
if (ret < 0) {
1541
error_setg(errp, "COLO: load device state failed");
1542
vmstate_loading = false;
1543
- qemu_mutex_unlock_iothread();
1544
+ bql_unlock();
1545
return;
1546
}
1547
1548
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1549
if (local_err) {
1550
error_propagate(errp, local_err);
1551
vmstate_loading = false;
1552
- qemu_mutex_unlock_iothread();
1553
+ bql_unlock();
1554
return;
1555
}
1556
1557
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1558
if (local_err) {
1559
error_propagate(errp, local_err);
1560
vmstate_loading = false;
1561
- qemu_mutex_unlock_iothread();
1562
+ bql_unlock();
1563
return;
1564
}
1565
/* Notify all filters of all NIC to do checkpoint */
1566
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1567
if (local_err) {
1568
error_propagate(errp, local_err);
1569
vmstate_loading = false;
1570
- qemu_mutex_unlock_iothread();
1571
+ bql_unlock();
1572
return;
1573
}
1574
1575
vmstate_loading = false;
1576
vm_start();
1577
- qemu_mutex_unlock_iothread();
1578
+ bql_unlock();
1579
trace_colo_vm_state_change("stop", "run");
1580
1581
if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
1582
@@ -XXX,XX +XXX,XX @@ static void *colo_process_incoming_thread(void *opaque)
1583
fb = qemu_file_new_input(QIO_CHANNEL(bioc));
1584
object_unref(OBJECT(bioc));
1585
1586
- qemu_mutex_lock_iothread();
1587
+ bql_lock();
1588
replication_start_all(REPLICATION_MODE_SECONDARY, &local_err);
1589
if (local_err) {
1590
- qemu_mutex_unlock_iothread();
1591
+ bql_unlock();
1592
goto out;
1593
}
1594
vm_start();
1595
- qemu_mutex_unlock_iothread();
1596
+ bql_unlock();
1597
trace_colo_vm_state_change("stop", "run");
1598
1599
colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY,
1600
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
1601
Error *local_err = NULL;
1602
QemuThread th;
1603
1604
- assert(qemu_mutex_iothread_locked());
1605
+ assert(bql_locked());
1606
1607
if (!migration_incoming_colo_enabled()) {
1608
return 0;
1609
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
1610
qemu_coroutine_yield();
1611
mis->colo_incoming_co = NULL;
1612
1613
- qemu_mutex_unlock_iothread();
1614
+ bql_unlock();
1615
/* Wait checkpoint incoming thread exit before free resource */
1616
qemu_thread_join(&th);
1617
- qemu_mutex_lock_iothread();
1618
+ bql_lock();
1619
1620
/* We hold the global iothread lock, so it is safe here */
1621
colo_release_ram_cache();
1622
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
1623
index XXXXXXX..XXXXXXX 100644
1624
--- a/migration/dirtyrate.c
1625
+++ b/migration/dirtyrate.c
1626
@@ -XXX,XX +XXX,XX @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
1627
1628
void global_dirty_log_change(unsigned int flag, bool start)
1629
{
1630
- qemu_mutex_lock_iothread();
1631
+ bql_lock();
1632
if (start) {
1633
memory_global_dirty_log_start(flag);
1634
} else {
1635
memory_global_dirty_log_stop(flag);
1636
}
1637
- qemu_mutex_unlock_iothread();
1638
+ bql_unlock();
1639
}
1640
1641
/*
1642
@@ -XXX,XX +XXX,XX @@ void global_dirty_log_change(unsigned int flag, bool start)
1643
*/
1644
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
1645
{
1646
- qemu_mutex_lock_iothread();
1647
+ bql_lock();
1648
memory_global_dirty_log_sync(false);
1649
if (one_shot) {
1650
memory_global_dirty_log_stop(flag);
1651
}
1652
- qemu_mutex_unlock_iothread();
1653
+ bql_unlock();
1654
}
1655
1656
static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
1657
@@ -XXX,XX +XXX,XX @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
1658
int64_t start_time;
1659
DirtyPageRecord dirty_pages;
1660
1661
- qemu_mutex_lock_iothread();
1662
+ bql_lock();
1663
memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
1664
1665
/*
1666
@@ -XXX,XX +XXX,XX @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
1667
* KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
1668
*/
1669
dirtyrate_manual_reset_protect();
1670
- qemu_mutex_unlock_iothread();
1671
+ bql_unlock();
1672
1673
record_dirtypages_bitmap(&dirty_pages, true);
1674
1675
diff --git a/migration/migration.c b/migration/migration.c
1676
index XXXXXXX..XXXXXXX 100644
1677
--- a/migration/migration.c
1678
+++ b/migration/migration.c
1679
@@ -XXX,XX +XXX,XX @@ static void migrate_fd_cleanup(MigrationState *s)
1680
QEMUFile *tmp;
1681
1682
trace_migrate_fd_cleanup();
1683
- qemu_mutex_unlock_iothread();
1684
+ bql_unlock();
1685
if (s->migration_thread_running) {
1686
qemu_thread_join(&s->thread);
1687
s->migration_thread_running = false;
1688
}
1689
- qemu_mutex_lock_iothread();
1690
+ bql_lock();
1691
1692
multifd_save_cleanup();
1693
qemu_mutex_lock(&s->qemu_file_lock);
1694
@@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms, Error **errp)
1695
}
1696
1697
trace_postcopy_start();
1698
- qemu_mutex_lock_iothread();
1699
+ bql_lock();
1700
trace_postcopy_start_set_run();
1701
1702
migration_downtime_start(ms);
1703
@@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms, Error **errp)
1704
1705
migration_downtime_end(ms);
1706
1707
- qemu_mutex_unlock_iothread();
1708
+ bql_unlock();
1709
1710
if (migrate_postcopy_ram()) {
1711
/*
1712
@@ -XXX,XX +XXX,XX @@ fail:
1713
error_report_err(local_err);
1714
}
1715
}
1716
- qemu_mutex_unlock_iothread();
1717
+ bql_unlock();
1718
return -1;
1719
}
1720
1721
@@ -XXX,XX +XXX,XX @@ static int migration_maybe_pause(MigrationState *s,
1722
* wait for the 'pause_sem' semaphore.
1723
*/
1724
if (s->state != MIGRATION_STATUS_CANCELLING) {
1725
- qemu_mutex_unlock_iothread();
1726
+ bql_unlock();
1727
migrate_set_state(&s->state, *current_active_state,
1728
MIGRATION_STATUS_PRE_SWITCHOVER);
1729
qemu_sem_wait(&s->pause_sem);
1730
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
1731
new_state);
1732
*current_active_state = new_state;
1733
- qemu_mutex_lock_iothread();
1734
+ bql_lock();
1735
}
1736
1737
return s->state == new_state ? 0 : -EINVAL;
1738
@@ -XXX,XX +XXX,XX @@ static int migration_completion_precopy(MigrationState *s,
1739
{
1740
int ret;
1741
1742
- qemu_mutex_lock_iothread();
1743
+ bql_lock();
1744
migration_downtime_start(s);
1745
1746
s->vm_old_state = runstate_get();
1747
@@ -XXX,XX +XXX,XX @@ static int migration_completion_precopy(MigrationState *s,
1748
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
1749
s->block_inactive);
1750
out_unlock:
1751
- qemu_mutex_unlock_iothread();
1752
+ bql_unlock();
1753
return ret;
1754
}
1755
1756
@@ -XXX,XX +XXX,XX @@ static void migration_completion_postcopy(MigrationState *s)
1757
{
1758
trace_migration_completion_postcopy_end();
1759
1760
- qemu_mutex_lock_iothread();
1761
+ bql_lock();
1762
qemu_savevm_state_complete_postcopy(s->to_dst_file);
1763
- qemu_mutex_unlock_iothread();
1764
+ bql_unlock();
1765
1766
/*
1767
* Shutdown the postcopy fast path thread. This is only needed when dest
1768
@@ -XXX,XX +XXX,XX @@ static void migration_completion_failed(MigrationState *s,
1769
*/
1770
Error *local_err = NULL;
1771
1772
- qemu_mutex_lock_iothread();
1773
+ bql_lock();
1774
bdrv_activate_all(&local_err);
1775
if (local_err) {
1776
error_report_err(local_err);
1777
} else {
1778
s->block_inactive = false;
1779
}
1780
- qemu_mutex_unlock_iothread();
1781
+ bql_unlock();
1782
}
1783
1784
migrate_set_state(&s->state, current_active_state,
1785
@@ -XXX,XX +XXX,XX @@ static void migration_iteration_finish(MigrationState *s)
1786
/* If we enabled cpu throttling for auto-converge, turn it off. */
1787
cpu_throttle_stop();
1788
1789
- qemu_mutex_lock_iothread();
1790
+ bql_lock();
1791
switch (s->state) {
1792
case MIGRATION_STATUS_COMPLETED:
1793
migration_calculate_complete(s);
1794
@@ -XXX,XX +XXX,XX @@ static void migration_iteration_finish(MigrationState *s)
1795
break;
1796
}
1797
migrate_fd_cleanup_schedule(s);
1798
- qemu_mutex_unlock_iothread();
1799
+ bql_unlock();
1800
}
1801
1802
static void bg_migration_iteration_finish(MigrationState *s)
1803
@@ -XXX,XX +XXX,XX @@ static void bg_migration_iteration_finish(MigrationState *s)
1804
*/
1805
ram_write_tracking_stop();
1806
1807
- qemu_mutex_lock_iothread();
1808
+ bql_lock();
1809
switch (s->state) {
1810
case MIGRATION_STATUS_COMPLETED:
1811
migration_calculate_complete(s);
1812
@@ -XXX,XX +XXX,XX @@ static void bg_migration_iteration_finish(MigrationState *s)
1813
}
1814
1815
migrate_fd_cleanup_schedule(s);
1816
- qemu_mutex_unlock_iothread();
1817
+ bql_unlock();
1818
}
1819
1820
/*
1821
@@ -XXX,XX +XXX,XX @@ static void *migration_thread(void *opaque)
1822
object_ref(OBJECT(s));
1823
update_iteration_initial_status(s);
1824
1825
- qemu_mutex_lock_iothread();
1826
+ bql_lock();
1827
qemu_savevm_state_header(s->to_dst_file);
1828
- qemu_mutex_unlock_iothread();
1829
+ bql_unlock();
1830
1831
/*
1832
* If we opened the return path, we need to make sure dst has it
1833
@@ -XXX,XX +XXX,XX @@ static void *migration_thread(void *opaque)
1834
qemu_savevm_send_colo_enable(s->to_dst_file);
1835
}
1836
1837
- qemu_mutex_lock_iothread();
1838
+ bql_lock();
1839
qemu_savevm_state_setup(s->to_dst_file);
1840
- qemu_mutex_unlock_iothread();
1841
+ bql_unlock();
1842
1843
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
1844
MIGRATION_STATUS_ACTIVE);
1845
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1846
ram_write_tracking_prepare();
1847
#endif
1848
1849
- qemu_mutex_lock_iothread();
1850
+ bql_lock();
1851
qemu_savevm_state_header(s->to_dst_file);
1852
qemu_savevm_state_setup(s->to_dst_file);
1853
- qemu_mutex_unlock_iothread();
1854
+ bql_unlock();
1855
1856
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
1857
MIGRATION_STATUS_ACTIVE);
1858
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1859
trace_migration_thread_setup_complete();
1860
migration_downtime_start(s);
1861
1862
- qemu_mutex_lock_iothread();
1863
+ bql_lock();
1864
1865
s->vm_old_state = runstate_get();
1866
1867
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1868
s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
1869
qemu_bh_schedule(s->vm_start_bh);
1870
1871
- qemu_mutex_unlock_iothread();
1872
+ bql_unlock();
1873
1874
while (migration_is_active(s)) {
1875
MigIterateState iter_state = bg_migration_iteration_run(s);
1876
@@ -XXX,XX +XXX,XX @@ fail:
1877
if (early_fail) {
1878
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
1879
MIGRATION_STATUS_FAILED);
1880
- qemu_mutex_unlock_iothread();
1881
+ bql_unlock();
1882
}
1883
1884
bg_migration_iteration_finish(s);
1885
diff --git a/migration/ram.c b/migration/ram.c
1886
index XXXXXXX..XXXXXXX 100644
1887
--- a/migration/ram.c
1888
+++ b/migration/ram.c
1889
@@ -XXX,XX +XXX,XX @@ static int ram_save_setup(QEMUFile *f, void *opaque)
1890
migration_ops = g_malloc0(sizeof(MigrationOps));
1891
migration_ops->ram_save_target_page = ram_save_target_page_legacy;
1892
1893
- qemu_mutex_unlock_iothread();
1894
+ bql_unlock();
1895
ret = multifd_send_sync_main(f);
1896
- qemu_mutex_lock_iothread();
1897
+ bql_lock();
1898
if (ret < 0) {
1899
return ret;
1900
}
1901
@@ -XXX,XX +XXX,XX @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
1902
uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
1903
1904
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
1905
- qemu_mutex_lock_iothread();
1906
+ bql_lock();
1907
WITH_RCU_READ_LOCK_GUARD() {
1908
migration_bitmap_sync_precopy(rs, false);
1909
}
1910
- qemu_mutex_unlock_iothread();
1911
+ bql_unlock();
1912
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
1913
}
1914
1915
@@ -XXX,XX +XXX,XX @@ void colo_incoming_start_dirty_log(void)
1916
{
1917
RAMBlock *block = NULL;
1918
/* For memory_global_dirty_log_start below. */
1919
- qemu_mutex_lock_iothread();
1920
+ bql_lock();
1921
qemu_mutex_lock_ramlist();
1922
1923
memory_global_dirty_log_sync(false);
1924
@@ -XXX,XX +XXX,XX @@ void colo_incoming_start_dirty_log(void)
1925
}
1926
ram_state->migration_dirty_pages = 0;
1927
qemu_mutex_unlock_ramlist();
1928
- qemu_mutex_unlock_iothread();
1929
+ bql_unlock();
1930
}
1931
1932
/* It is need to hold the global lock to call this helper */
1933
diff --git a/replay/replay-internal.c b/replay/replay-internal.c
1934
index XXXXXXX..XXXXXXX 100644
1935
--- a/replay/replay-internal.c
1936
+++ b/replay/replay-internal.c
1937
@@ -XXX,XX +XXX,XX @@ void replay_mutex_lock(void)
1938
{
1939
if (replay_mode != REPLAY_MODE_NONE) {
1940
unsigned long id;
1941
- g_assert(!qemu_mutex_iothread_locked());
1942
+ g_assert(!bql_locked());
1943
g_assert(!replay_mutex_locked());
1944
qemu_mutex_lock(&lock);
1945
id = mutex_tail++;
1946
diff --git a/semihosting/console.c b/semihosting/console.c
1947
index XXXXXXX..XXXXXXX 100644
1948
--- a/semihosting/console.c
1949
+++ b/semihosting/console.c
1950
@@ -XXX,XX +XXX,XX @@ static SemihostingConsole console;
1951
static int console_can_read(void *opaque)
1952
{
1953
SemihostingConsole *c = opaque;
1954
- g_assert(qemu_mutex_iothread_locked());
1955
+ g_assert(bql_locked());
1956
return (int)fifo8_num_free(&c->fifo);
1957
}
1958
1959
@@ -XXX,XX +XXX,XX @@ static void console_wake_up(gpointer data, gpointer user_data)
1960
static void console_read(void *opaque, const uint8_t *buf, int size)
1961
{
1962
SemihostingConsole *c = opaque;
1963
- g_assert(qemu_mutex_iothread_locked());
1964
+ g_assert(bql_locked());
1965
while (size-- && !fifo8_is_full(&c->fifo)) {
1966
fifo8_push(&c->fifo, *buf++);
1967
}
1968
@@ -XXX,XX +XXX,XX @@ bool qemu_semihosting_console_ready(void)
1969
{
1970
SemihostingConsole *c = &console;
1971
1972
- g_assert(qemu_mutex_iothread_locked());
1973
+ g_assert(bql_locked());
1974
return !fifo8_is_empty(&c->fifo);
1975
}
1976
1977
@@ -XXX,XX +XXX,XX @@ void qemu_semihosting_console_block_until_ready(CPUState *cs)
1978
{
1979
SemihostingConsole *c = &console;
1980
1981
- g_assert(qemu_mutex_iothread_locked());
1982
+ g_assert(bql_locked());
1983
1984
/* Block if the fifo is completely empty. */
1985
if (fifo8_is_empty(&c->fifo)) {
1986
diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c
1987
index XXXXXXX..XXXXXXX 100644
1988
--- a/stubs/iothread-lock.c
1989
+++ b/stubs/iothread-lock.c
1990
@@ -XXX,XX +XXX,XX @@
1991
#include "qemu/osdep.h"
1992
#include "qemu/main-loop.h"
1993
1994
-bool qemu_mutex_iothread_locked(void)
1995
+bool bql_locked(void)
1996
{
1997
return false;
1998
}
1999
2000
-void qemu_mutex_lock_iothread_impl(const char *file, int line)
2001
+void bql_lock_impl(const char *file, int line)
2002
{
2003
}
2004
2005
-void qemu_mutex_unlock_iothread(void)
2006
+void bql_unlock(void)
2007
{
2008
}
2009
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
2010
index XXXXXXX..XXXXXXX 100644
2011
--- a/system/cpu-throttle.c
2012
+++ b/system/cpu-throttle.c
2013
@@ -XXX,XX +XXX,XX @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2014
qemu_cond_timedwait_iothread(cpu->halt_cond,
2015
sleeptime_ns / SCALE_MS);
2016
} else {
2017
- qemu_mutex_unlock_iothread();
2018
+ bql_unlock();
2019
g_usleep(sleeptime_ns / SCALE_US);
2020
- qemu_mutex_lock_iothread();
2021
+ bql_lock();
2022
}
2023
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2024
}
2025
diff --git a/system/cpus.c b/system/cpus.c
2026
index XXXXXXX..XXXXXXX 100644
2027
--- a/system/cpus.c
2028
+++ b/system/cpus.c
2029
@@ -XXX,XX +XXX,XX @@
2030
2031
#endif /* CONFIG_LINUX */
2032
2033
-static QemuMutex qemu_global_mutex;
2034
+/* The Big QEMU Lock (BQL) */
2035
+static QemuMutex bql;
2036
2037
/*
2038
* The chosen accelerator is supposed to register this.
2039
@@ -XXX,XX +XXX,XX @@ void qemu_init_cpu_loop(void)
2040
qemu_init_sigbus();
2041
qemu_cond_init(&qemu_cpu_cond);
2042
qemu_cond_init(&qemu_pause_cond);
2043
- qemu_mutex_init(&qemu_global_mutex);
2044
+ qemu_mutex_init(&bql);
2045
2046
qemu_thread_get_self(&io_thread);
2047
}
2048
2049
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
2050
{
2051
- do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
2052
+ do_run_on_cpu(cpu, func, data, &bql);
2053
}
2054
2055
static void qemu_cpu_stop(CPUState *cpu, bool exit)
2056
@@ -XXX,XX +XXX,XX @@ void qemu_wait_io_event(CPUState *cpu)
2057
slept = true;
2058
qemu_plugin_vcpu_idle_cb(cpu);
2059
}
2060
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
2061
+ qemu_cond_wait(cpu->halt_cond, &bql);
2062
}
2063
if (slept) {
2064
qemu_plugin_vcpu_resume_cb(cpu);
2065
@@ -XXX,XX +XXX,XX @@ bool qemu_in_vcpu_thread(void)
2066
return current_cpu && qemu_cpu_is_self(current_cpu);
2067
}
2068
2069
-QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
2070
+QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked)
2071
2072
-bool qemu_mutex_iothread_locked(void)
2073
+bool bql_locked(void)
2074
{
2075
- return get_iothread_locked();
2076
+ return get_bql_locked();
2077
}
2078
2079
bool qemu_in_main_thread(void)
2080
{
2081
- return qemu_mutex_iothread_locked();
2082
+ return bql_locked();
2083
}
2084
2085
/*
2086
* The BQL is taken from so many places that it is worth profiling the
2087
* callers directly, instead of funneling them all through a single function.
2088
*/
2089
-void qemu_mutex_lock_iothread_impl(const char *file, int line)
2090
+void bql_lock_impl(const char *file, int line)
2091
{
2092
- QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
2093
+ QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func);
2094
2095
- g_assert(!qemu_mutex_iothread_locked());
2096
- bql_lock(&qemu_global_mutex, file, line);
2097
- set_iothread_locked(true);
2098
+ g_assert(!bql_locked());
2099
+ bql_lock_fn(&bql, file, line);
2100
+ set_bql_locked(true);
2101
}
2102
2103
-void qemu_mutex_unlock_iothread(void)
2104
+void bql_unlock(void)
2105
{
2106
- g_assert(qemu_mutex_iothread_locked());
2107
- set_iothread_locked(false);
2108
- qemu_mutex_unlock(&qemu_global_mutex);
2109
+ g_assert(bql_locked());
2110
+ set_bql_locked(false);
2111
+ qemu_mutex_unlock(&bql);
2112
}
2113
2114
void qemu_cond_wait_iothread(QemuCond *cond)
2115
{
2116
- qemu_cond_wait(cond, &qemu_global_mutex);
2117
+ qemu_cond_wait(cond, &bql);
2118
}
2119
2120
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
2121
{
2122
- qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
2123
+ qemu_cond_timedwait(cond, &bql, ms);
2124
}
2125
2126
/* signal CPU creation */
2127
@@ -XXX,XX +XXX,XX @@ void pause_all_vcpus(void)
2128
replay_mutex_unlock();
2129
2130
while (!all_vcpus_paused()) {
2131
- qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
2132
+ qemu_cond_wait(&qemu_pause_cond, &bql);
2133
CPU_FOREACH(cpu) {
2134
qemu_cpu_kick(cpu);
2135
}
2136
}
2137
2138
- qemu_mutex_unlock_iothread();
2139
+ bql_unlock();
2140
replay_mutex_lock();
2141
- qemu_mutex_lock_iothread();
2142
+ bql_lock();
2143
}
2144
2145
void cpu_resume(CPUState *cpu)
2146
@@ -XXX,XX +XXX,XX @@ void cpu_remove_sync(CPUState *cpu)
2147
cpu->stop = true;
2148
cpu->unplug = true;
2149
qemu_cpu_kick(cpu);
2150
- qemu_mutex_unlock_iothread();
2151
+ bql_unlock();
2152
qemu_thread_join(cpu->thread);
2153
- qemu_mutex_lock_iothread();
2154
+ bql_lock();
2155
}
2156
2157
void cpus_register_accel(const AccelOpsClass *ops)
2158
@@ -XXX,XX +XXX,XX @@ void qemu_init_vcpu(CPUState *cpu)
2159
cpus_accel->create_vcpu_thread(cpu);
2160
2161
while (!cpu->created) {
2162
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2163
+ qemu_cond_wait(&qemu_cpu_cond, &bql);
2164
}
2165
}
2166
2167
diff --git a/system/dirtylimit.c b/system/dirtylimit.c
2168
index XXXXXXX..XXXXXXX 100644
2169
--- a/system/dirtylimit.c
2170
+++ b/system/dirtylimit.c
2171
@@ -XXX,XX +XXX,XX @@ void vcpu_dirty_rate_stat_stop(void)
2172
{
2173
qatomic_set(&vcpu_dirty_rate_stat->running, 0);
2174
dirtylimit_state_unlock();
2175
- qemu_mutex_unlock_iothread();
2176
+ bql_unlock();
2177
qemu_thread_join(&vcpu_dirty_rate_stat->thread);
2178
- qemu_mutex_lock_iothread();
2179
+ bql_lock();
2180
dirtylimit_state_lock();
2181
}
2182
2183
diff --git a/system/memory.c b/system/memory.c
2184
index XXXXXXX..XXXXXXX 100644
2185
--- a/system/memory.c
2186
+++ b/system/memory.c
2187
@@ -XXX,XX +XXX,XX @@ void memory_region_transaction_commit(void)
2188
AddressSpace *as;
2189
2190
assert(memory_region_transaction_depth);
2191
- assert(qemu_mutex_iothread_locked());
2192
+ assert(bql_locked());
2193
2194
--memory_region_transaction_depth;
2195
if (!memory_region_transaction_depth) {
2196
diff --git a/system/physmem.c b/system/physmem.c
2197
index XXXXXXX..XXXXXXX 100644
2198
--- a/system/physmem.c
2199
+++ b/system/physmem.c
2200
@@ -XXX,XX +XXX,XX @@ bool prepare_mmio_access(MemoryRegion *mr)
2201
{
2202
bool release_lock = false;
2203
2204
- if (!qemu_mutex_iothread_locked()) {
2205
- qemu_mutex_lock_iothread();
2206
+ if (!bql_locked()) {
2207
+ bql_lock();
2208
release_lock = true;
2209
}
2210
if (mr->flush_coalesced_mmio) {
2211
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2212
}
2213
2214
if (release_lock) {
2215
- qemu_mutex_unlock_iothread();
2216
+ bql_unlock();
2217
release_lock = false;
2218
}
2219
2220
@@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2221
}
2222
2223
if (release_lock) {
2224
- qemu_mutex_unlock_iothread();
2225
+ bql_unlock();
2226
release_lock = false;
2227
}
2228
2229
diff --git a/system/runstate.c b/system/runstate.c
2230
index XXXXXXX..XXXXXXX 100644
2231
--- a/system/runstate.c
2232
+++ b/system/runstate.c
2233
@@ -XXX,XX +XXX,XX @@ void qemu_init_subsystems(void)
2234
2235
qemu_init_cpu_list();
2236
qemu_init_cpu_loop();
2237
- qemu_mutex_lock_iothread();
2238
+ bql_lock();
2239
2240
atexit(qemu_run_exit_notifiers);
2241
2242
diff --git a/system/watchpoint.c b/system/watchpoint.c
2243
index XXXXXXX..XXXXXXX 100644
2244
--- a/system/watchpoint.c
2245
+++ b/system/watchpoint.c
2246
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
2247
* Now raise the debug interrupt so that it will
2248
* trigger after the current instruction.
2249
*/
2250
- qemu_mutex_lock_iothread();
2251
+ bql_lock();
2252
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2253
- qemu_mutex_unlock_iothread();
2254
+ bql_unlock();
2255
return;
2256
}
2257
2258
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
2259
index XXXXXXX..XXXXXXX 100644
2260
--- a/target/arm/arm-powerctl.c
2261
+++ b/target/arm/arm-powerctl.c
2262
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
2263
g_free(info);
2264
2265
/* Finally set the power status */
2266
- assert(qemu_mutex_iothread_locked());
2267
+ assert(bql_locked());
2268
target_cpu->power_state = PSCI_ON;
2269
}
2270
2271
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
2272
ARMCPU *target_cpu;
2273
struct CpuOnInfo *info;
2274
2275
- assert(qemu_mutex_iothread_locked());
2276
+ assert(bql_locked());
2277
2278
DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
2279
"\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
2280
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
2281
target_cpu_state->halted = 0;
2282
2283
/* Finally set the power status */
2284
- assert(qemu_mutex_iothread_locked());
2285
+ assert(bql_locked());
2286
target_cpu->power_state = PSCI_ON;
2287
}
2288
2289
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_on_and_reset(uint64_t cpuid)
2290
CPUState *target_cpu_state;
2291
ARMCPU *target_cpu;
2292
2293
- assert(qemu_mutex_iothread_locked());
2294
+ assert(bql_locked());
2295
2296
/* Retrieve the cpu we are powering up */
2297
target_cpu_state = arm_get_cpu_by_id(cpuid);
2298
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
2299
{
2300
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
2301
2302
- assert(qemu_mutex_iothread_locked());
2303
+ assert(bql_locked());
2304
target_cpu->power_state = PSCI_OFF;
2305
target_cpu_state->halted = 1;
2306
target_cpu_state->exception_index = EXCP_HLT;
2307
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_off(uint64_t cpuid)
2308
CPUState *target_cpu_state;
2309
ARMCPU *target_cpu;
2310
2311
- assert(qemu_mutex_iothread_locked());
2312
+ assert(bql_locked());
2313
2314
DPRINTF("cpu %" PRId64 "\n", cpuid);
2315
2316
@@ -XXX,XX +XXX,XX @@ int arm_reset_cpu(uint64_t cpuid)
2317
CPUState *target_cpu_state;
2318
ARMCPU *target_cpu;
2319
2320
- assert(qemu_mutex_iothread_locked());
2321
+ assert(bql_locked());
2322
2323
DPRINTF("cpu %" PRId64 "\n", cpuid);
2324
2325
diff --git a/target/arm/helper.c b/target/arm/helper.c
2326
index XXXXXXX..XXXXXXX 100644
2327
--- a/target/arm/helper.c
2328
+++ b/target/arm/helper.c
2329
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
2330
* VFIQ are masked unless running at EL0 or EL1, and HCR
2331
* can only be written at EL2.
2332
*/
2333
- g_assert(qemu_mutex_iothread_locked());
2334
+ g_assert(bql_locked());
2335
arm_cpu_update_virq(cpu);
2336
arm_cpu_update_vfiq(cpu);
2337
arm_cpu_update_vserr(cpu);
2338
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
2339
* BQL needs to be held for any modification of
2340
* cs->interrupt_request.
2341
*/
2342
- g_assert(qemu_mutex_iothread_locked());
2343
+ g_assert(bql_locked());
2344
2345
arm_call_pre_el_change_hook(cpu);
2346
2347
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
2348
index XXXXXXX..XXXXXXX 100644
2349
--- a/target/arm/hvf/hvf.c
2350
+++ b/target/arm/hvf/hvf.c
2351
@@ -XXX,XX +XXX,XX @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
2352
* sleeping.
2353
*/
2354
qatomic_set_mb(&cpu->thread_kicked, false);
2355
- qemu_mutex_unlock_iothread();
2356
+ bql_unlock();
2357
pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
2358
- qemu_mutex_lock_iothread();
2359
+ bql_lock();
2360
}
2361
2362
static void hvf_wfi(CPUState *cpu)
2363
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2364
2365
flush_cpu_state(cpu);
2366
2367
- qemu_mutex_unlock_iothread();
2368
+ bql_unlock();
2369
assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
2370
2371
/* handle VMEXIT */
2372
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2373
uint32_t ec = syn_get_ec(syndrome);
2374
2375
ret = 0;
2376
- qemu_mutex_lock_iothread();
2377
+ bql_lock();
2378
switch (exit_reason) {
2379
case HV_EXIT_REASON_EXCEPTION:
2380
/* This is the main one, handle below. */
2381
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
2382
index XXXXXXX..XXXXXXX 100644
2383
--- a/target/arm/kvm.c
2384
+++ b/target/arm/kvm.c
2385
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
2386
if (run->s.regs.device_irq_level != cpu->device_irq_level) {
2387
switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
2388
2389
- qemu_mutex_lock_iothread();
2390
+ bql_lock();
2391
2392
if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
2393
qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
2394
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
2395
2396
/* We also mark unknown levels as processed to not waste cycles */
2397
cpu->device_irq_level = run->s.regs.device_irq_level;
2398
- qemu_mutex_unlock_iothread();
2399
+ bql_unlock();
2400
}
2401
2402
return MEMTXATTRS_UNSPECIFIED;
2403
@@ -XXX,XX +XXX,XX @@ static bool kvm_arm_handle_debug(ARMCPU *cpu,
2404
env->exception.syndrome = debug_exit->hsr;
2405
env->exception.vaddress = debug_exit->far;
2406
env->exception.target_el = 1;
2407
- qemu_mutex_lock_iothread();
2408
+ bql_lock();
2409
arm_cpu_do_interrupt(cs);
2410
- qemu_mutex_unlock_iothread();
2411
+ bql_unlock();
2412
2413
return false;
2414
}
2415
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
2416
index XXXXXXX..XXXXXXX 100644
2417
--- a/target/arm/ptw.c
2418
+++ b/target/arm/ptw.c
2419
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
2420
#if !TCG_OVERSIZED_GUEST
2421
# error "Unexpected configuration"
2422
#endif
2423
- bool locked = qemu_mutex_iothread_locked();
2424
+ bool locked = bql_locked();
2425
if (!locked) {
2426
- qemu_mutex_lock_iothread();
2427
+ bql_lock();
2428
}
2429
if (ptw->out_be) {
2430
cur_val = ldq_be_p(host);
2431
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
2432
}
2433
}
2434
if (!locked) {
2435
- qemu_mutex_unlock_iothread();
2436
+ bql_unlock();
2437
}
2438
#endif
2439
2440
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
2441
index XXXXXXX..XXXXXXX 100644
2442
--- a/target/arm/tcg/helper-a64.c
2443
+++ b/target/arm/tcg/helper-a64.c
2444
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
2445
goto illegal_return;
2446
}
2447
2448
- qemu_mutex_lock_iothread();
2449
+ bql_lock();
2450
arm_call_pre_el_change_hook(env_archcpu(env));
2451
- qemu_mutex_unlock_iothread();
2452
+ bql_unlock();
2453
2454
if (!return_to_aa64) {
2455
env->aarch64 = false;
2456
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
2457
*/
2458
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
2459
2460
- qemu_mutex_lock_iothread();
2461
+ bql_lock();
2462
arm_call_el_change_hook(env_archcpu(env));
2463
- qemu_mutex_unlock_iothread();
2464
+ bql_unlock();
2465
2466
return;
2467
2468
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
2469
index XXXXXXX..XXXXXXX 100644
2470
--- a/target/arm/tcg/m_helper.c
2471
+++ b/target/arm/tcg/m_helper.c
2472
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2473
bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
2474
bool take_exception;
2475
2476
- /* Take the iothread lock as we are going to touch the NVIC */
2477
- qemu_mutex_lock_iothread();
2478
+ /* Take the BQL as we are going to touch the NVIC */
2479
+ bql_lock();
2480
2481
/* Check the background context had access to the FPU */
2482
if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
2483
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2484
take_exception = !stacked_ok &&
2485
armv7m_nvic_can_take_pending_exception(env->nvic);
2486
2487
- qemu_mutex_unlock_iothread();
2488
+ bql_unlock();
2489
2490
if (take_exception) {
2491
raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
2492
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
2493
index XXXXXXX..XXXXXXX 100644
2494
--- a/target/arm/tcg/op_helper.c
2495
+++ b/target/arm/tcg/op_helper.c
2496
@@ -XXX,XX +XXX,XX @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
2497
{
2498
uint32_t mask;
2499
2500
- qemu_mutex_lock_iothread();
2501
+ bql_lock();
2502
arm_call_pre_el_change_hook(env_archcpu(env));
2503
- qemu_mutex_unlock_iothread();
2504
+ bql_unlock();
2505
2506
mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
2507
cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
2508
@@ -XXX,XX +XXX,XX @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
2509
env->regs[15] &= (env->thumb ? ~1 : ~3);
2510
arm_rebuild_hflags(env);
2511
2512
- qemu_mutex_lock_iothread();
2513
+ bql_lock();
2514
arm_call_el_change_hook(env_archcpu(env));
2515
- qemu_mutex_unlock_iothread();
2516
+ bql_unlock();
2517
}
2518
2519
/* Access to user mode registers from privileged modes. */
2520
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
2521
const ARMCPRegInfo *ri = rip;
2522
2523
if (ri->type & ARM_CP_IO) {
2524
- qemu_mutex_lock_iothread();
2525
+ bql_lock();
2526
ri->writefn(env, ri, value);
2527
- qemu_mutex_unlock_iothread();
2528
+ bql_unlock();
2529
} else {
2530
ri->writefn(env, ri, value);
2531
}
2532
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
2533
uint32_t res;
2534
2535
if (ri->type & ARM_CP_IO) {
2536
- qemu_mutex_lock_iothread();
2537
+ bql_lock();
2538
res = ri->readfn(env, ri);
2539
- qemu_mutex_unlock_iothread();
2540
+ bql_unlock();
2541
} else {
2542
res = ri->readfn(env, ri);
2543
}
2544
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
2545
const ARMCPRegInfo *ri = rip;
2546
2547
if (ri->type & ARM_CP_IO) {
2548
- qemu_mutex_lock_iothread();
2549
+ bql_lock();
2550
ri->writefn(env, ri, value);
2551
- qemu_mutex_unlock_iothread();
2552
+ bql_unlock();
2553
} else {
2554
ri->writefn(env, ri, value);
2555
}
2556
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
2557
uint64_t res;
2558
2559
if (ri->type & ARM_CP_IO) {
2560
- qemu_mutex_lock_iothread();
2561
+ bql_lock();
2562
res = ri->readfn(env, ri);
2563
- qemu_mutex_unlock_iothread();
2564
+ bql_unlock();
2565
} else {
2566
res = ri->readfn(env, ri);
2567
}
2568
diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c
2569
index XXXXXXX..XXXXXXX 100644
2570
--- a/target/arm/tcg/psci.c
2571
+++ b/target/arm/tcg/psci.c
2572
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
2573
}
2574
target_cpu = ARM_CPU(target_cpu_state);
2575
2576
- g_assert(qemu_mutex_iothread_locked());
2577
+ g_assert(bql_locked());
2578
ret = target_cpu->power_state;
2579
break;
2580
default:
2581
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
2582
index XXXXXXX..XXXXXXX 100644
2583
--- a/target/hppa/int_helper.c
2584
+++ b/target/hppa/int_helper.c
2585
@@ -XXX,XX +XXX,XX @@ void hppa_cpu_alarm_timer(void *opaque)
2586
void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val)
2587
{
2588
env->cr[CR_EIRR] &= ~val;
2589
- qemu_mutex_lock_iothread();
2590
+ bql_lock();
2591
eval_interrupt(env_archcpu(env));
2592
- qemu_mutex_unlock_iothread();
2593
+ bql_unlock();
2594
}
2595
2596
void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val)
2597
{
2598
env->cr[CR_EIEM] = val;
2599
- qemu_mutex_lock_iothread();
2600
+ bql_lock();
2601
eval_interrupt(env_archcpu(env));
2602
- qemu_mutex_unlock_iothread();
2603
+ bql_unlock();
2604
}
2605
2606
void hppa_cpu_do_interrupt(CPUState *cs)
2607
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
2608
index XXXXXXX..XXXXXXX 100644
2609
--- a/target/i386/hvf/hvf.c
2610
+++ b/target/i386/hvf/hvf.c
2611
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2612
}
2613
vmx_update_tpr(cpu);
2614
2615
- qemu_mutex_unlock_iothread();
2616
+ bql_unlock();
2617
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
2618
- qemu_mutex_lock_iothread();
2619
+ bql_lock();
2620
return EXCP_HLT;
2621
}
2622
2623
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2624
rip = rreg(cpu->accel->fd, HV_X86_RIP);
2625
env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS);
2626
2627
- qemu_mutex_lock_iothread();
2628
+ bql_lock();
2629
2630
update_apic_tpr(cpu);
2631
current_cpu = cpu;
2632
diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c
2633
index XXXXXXX..XXXXXXX 100644
2634
--- a/target/i386/kvm/hyperv.c
2635
+++ b/target/i386/kvm/hyperv.c
2636
@@ -XXX,XX +XXX,XX @@ void hyperv_x86_synic_update(X86CPU *cpu)
2637
2638
static void async_synic_update(CPUState *cs, run_on_cpu_data data)
2639
{
2640
- qemu_mutex_lock_iothread();
2641
+ bql_lock();
2642
hyperv_x86_synic_update(X86_CPU(cs));
2643
- qemu_mutex_unlock_iothread();
2644
+ bql_unlock();
2645
}
2646
2647
int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
2648
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
2649
index XXXXXXX..XXXXXXX 100644
2650
--- a/target/i386/kvm/kvm.c
2651
+++ b/target/i386/kvm/kvm.c
2652
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2653
/* Inject NMI */
2654
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2655
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2656
- qemu_mutex_lock_iothread();
2657
+ bql_lock();
2658
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2659
- qemu_mutex_unlock_iothread();
2660
+ bql_unlock();
2661
DPRINTF("injected NMI\n");
2662
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2663
if (ret < 0) {
2664
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2665
}
2666
}
2667
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2668
- qemu_mutex_lock_iothread();
2669
+ bql_lock();
2670
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2671
- qemu_mutex_unlock_iothread();
2672
+ bql_unlock();
2673
DPRINTF("injected SMI\n");
2674
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2675
if (ret < 0) {
2676
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2677
}
2678
2679
if (!kvm_pic_in_kernel()) {
2680
- qemu_mutex_lock_iothread();
2681
+ bql_lock();
2682
}
2683
2684
/* Force the VCPU out of its inner loop to process any INIT requests
2685
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2686
DPRINTF("setting tpr\n");
2687
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2688
2689
- qemu_mutex_unlock_iothread();
2690
+ bql_unlock();
2691
}
2692
}
2693
2694
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2695
/* We need to protect the apic state against concurrent accesses from
2696
* different threads in case the userspace irqchip is used. */
2697
if (!kvm_irqchip_in_kernel()) {
2698
- qemu_mutex_lock_iothread();
2699
+ bql_lock();
2700
}
2701
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2702
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2703
if (!kvm_irqchip_in_kernel()) {
2704
- qemu_mutex_unlock_iothread();
2705
+ bql_unlock();
2706
}
2707
return cpu_get_mem_attrs(env);
2708
}
2709
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2710
switch (run->exit_reason) {
2711
case KVM_EXIT_HLT:
2712
DPRINTF("handle_hlt\n");
2713
- qemu_mutex_lock_iothread();
2714
+ bql_lock();
2715
ret = kvm_handle_halt(cpu);
2716
- qemu_mutex_unlock_iothread();
2717
+ bql_unlock();
2718
break;
2719
case KVM_EXIT_SET_TPR:
2720
ret = 0;
2721
break;
2722
case KVM_EXIT_TPR_ACCESS:
2723
- qemu_mutex_lock_iothread();
2724
+ bql_lock();
2725
ret = kvm_handle_tpr_access(cpu);
2726
- qemu_mutex_unlock_iothread();
2727
+ bql_unlock();
2728
break;
2729
case KVM_EXIT_FAIL_ENTRY:
2730
code = run->fail_entry.hardware_entry_failure_reason;
2731
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2732
break;
2733
case KVM_EXIT_DEBUG:
2734
DPRINTF("kvm_exit_debug\n");
2735
- qemu_mutex_lock_iothread();
2736
+ bql_lock();
2737
ret = kvm_handle_debug(cpu, &run->debug.arch);
2738
- qemu_mutex_unlock_iothread();
2739
+ bql_unlock();
2740
break;
2741
case KVM_EXIT_HYPERV:
2742
ret = kvm_hv_handle_exit(cpu, &run->hyperv);
2743
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
2744
index XXXXXXX..XXXXXXX 100644
2745
--- a/target/i386/kvm/xen-emu.c
2746
+++ b/target/i386/kvm/xen-emu.c
2747
@@ -XXX,XX +XXX,XX @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
2748
2749
/* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */
2750
if (!vi->evtchn_upcall_pending) {
2751
- qemu_mutex_lock_iothread();
2752
+ bql_lock();
2753
/*
2754
* Check again now we have the lock, because it may have been
2755
* asserted in the interim. And we don't want to take the lock
2756
@@ -XXX,XX +XXX,XX @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
2757
X86_CPU(cs)->env.xen_callback_asserted = false;
2758
xen_evtchn_set_callback_level(0);
2759
}
2760
- qemu_mutex_unlock_iothread();
2761
+ bql_unlock();
2762
}
2763
}
2764
2765
@@ -XXX,XX +XXX,XX @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu,
2766
2767
switch (hp.index) {
2768
case HVM_PARAM_CALLBACK_IRQ:
2769
- qemu_mutex_lock_iothread();
2770
+ bql_lock();
2771
err = xen_evtchn_set_callback_param(hp.value);
2772
- qemu_mutex_unlock_iothread();
2773
+ bql_unlock();
2774
xen_set_long_mode(exit->u.hcall.longmode);
2775
break;
2776
default:
2777
@@ -XXX,XX +XXX,XX @@ int kvm_xen_soft_reset(void)
2778
CPUState *cpu;
2779
int err;
2780
2781
- assert(qemu_mutex_iothread_locked());
2782
+ assert(bql_locked());
2783
2784
trace_kvm_xen_soft_reset();
2785
2786
@@ -XXX,XX +XXX,XX @@ static int schedop_shutdown(CPUState *cs, uint64_t arg)
2787
break;
2788
2789
case SHUTDOWN_soft_reset:
2790
- qemu_mutex_lock_iothread();
2791
+ bql_lock();
2792
ret = kvm_xen_soft_reset();
2793
- qemu_mutex_unlock_iothread();
2794
+ bql_unlock();
2795
break;
2796
2797
default:
2798
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
2799
index XXXXXXX..XXXXXXX 100644
2800
--- a/target/i386/nvmm/nvmm-accel-ops.c
2801
+++ b/target/i386/nvmm/nvmm-accel-ops.c
2802
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
2803
2804
rcu_register_thread();
2805
2806
- qemu_mutex_lock_iothread();
2807
+ bql_lock();
2808
qemu_thread_get_self(cpu->thread);
2809
cpu->thread_id = qemu_get_thread_id();
2810
current_cpu = cpu;
2811
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
2812
2813
nvmm_destroy_vcpu(cpu);
2814
cpu_thread_signal_destroyed(cpu);
2815
- qemu_mutex_unlock_iothread();
2816
+ bql_unlock();
2817
rcu_unregister_thread();
2818
return NULL;
2819
}
2820
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
2821
index XXXXXXX..XXXXXXX 100644
2822
--- a/target/i386/nvmm/nvmm-all.c
2823
+++ b/target/i386/nvmm/nvmm-all.c
2824
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_pre_run(CPUState *cpu)
2825
uint8_t tpr;
2826
int ret;
2827
2828
- qemu_mutex_lock_iothread();
2829
+ bql_lock();
2830
2831
tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
2832
if (tpr != qcpu->tpr) {
2833
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_pre_run(CPUState *cpu)
2834
}
2835
}
2836
2837
- qemu_mutex_unlock_iothread();
2838
+ bql_unlock();
2839
}
2840
2841
/*
2842
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
2843
tpr = exit->exitstate.cr8;
2844
if (qcpu->tpr != tpr) {
2845
qcpu->tpr = tpr;
2846
- qemu_mutex_lock_iothread();
2847
+ bql_lock();
2848
cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
2849
- qemu_mutex_unlock_iothread();
2850
+ bql_unlock();
2851
}
2852
}
2853
2854
@@ -XXX,XX +XXX,XX @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
2855
CPUX86State *env = cpu_env(cpu);
2856
int ret = 0;
2857
2858
- qemu_mutex_lock_iothread();
2859
+ bql_lock();
2860
2861
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2862
(env->eflags & IF_MASK)) &&
2863
@@ -XXX,XX +XXX,XX @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
2864
ret = 1;
2865
}
2866
2867
- qemu_mutex_unlock_iothread();
2868
+ bql_unlock();
2869
2870
return ret;
2871
}
2872
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_loop(CPUState *cpu)
2873
return 0;
2874
}
2875
2876
- qemu_mutex_unlock_iothread();
2877
+ bql_unlock();
2878
cpu_exec_start(cpu);
2879
2880
/*
2881
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_loop(CPUState *cpu)
2882
error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
2883
exit->reason, exit->u.inv.hwcode);
2884
nvmm_get_registers(cpu);
2885
- qemu_mutex_lock_iothread();
2886
+ bql_lock();
2887
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2888
- qemu_mutex_unlock_iothread();
2889
+ bql_unlock();
2890
ret = -1;
2891
break;
2892
}
2893
} while (ret == 0);
2894
2895
cpu_exec_end(cpu);
2896
- qemu_mutex_lock_iothread();
2897
+ bql_lock();
2898
2899
qatomic_set(&cpu->exit_request, false);
2900
2901
diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c
2902
index XXXXXXX..XXXXXXX 100644
2903
--- a/target/i386/tcg/sysemu/fpu_helper.c
2904
+++ b/target/i386/tcg/sysemu/fpu_helper.c
2905
@@ -XXX,XX +XXX,XX @@ void x86_register_ferr_irq(qemu_irq irq)
2906
void fpu_check_raise_ferr_irq(CPUX86State *env)
2907
{
2908
if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
2909
- qemu_mutex_lock_iothread();
2910
+ bql_lock();
2911
qemu_irq_raise(ferr_irq);
2912
- qemu_mutex_unlock_iothread();
2913
+ bql_unlock();
2914
return;
2915
}
2916
}
2917
@@ -XXX,XX +XXX,XX @@ void cpu_set_ignne(void)
2918
{
2919
CPUX86State *env = &X86_CPU(first_cpu)->env;
2920
2921
- assert(qemu_mutex_iothread_locked());
2922
+ assert(bql_locked());
2923
2924
env->hflags2 |= HF2_IGNNE_MASK;
2925
/*
2926
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
2927
index XXXXXXX..XXXXXXX 100644
2928
--- a/target/i386/tcg/sysemu/misc_helper.c
2929
+++ b/target/i386/tcg/sysemu/misc_helper.c
2930
@@ -XXX,XX +XXX,XX @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
2931
break;
2932
case 8:
2933
if (!(env->hflags2 & HF2_VINTR_MASK)) {
2934
- qemu_mutex_lock_iothread();
2935
+ bql_lock();
2936
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
2937
- qemu_mutex_unlock_iothread();
2938
+ bql_unlock();
2939
}
2940
env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
2941
2942
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
2943
index XXXXXXX..XXXXXXX 100644
2944
--- a/target/i386/whpx/whpx-accel-ops.c
2945
+++ b/target/i386/whpx/whpx-accel-ops.c
2946
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
2947
2948
rcu_register_thread();
2949
2950
- qemu_mutex_lock_iothread();
2951
+ bql_lock();
2952
qemu_thread_get_self(cpu->thread);
2953
cpu->thread_id = qemu_get_thread_id();
2954
current_cpu = cpu;
2955
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
2956
2957
whpx_destroy_vcpu(cpu);
2958
cpu_thread_signal_destroyed(cpu);
2959
- qemu_mutex_unlock_iothread();
2960
+ bql_unlock();
2961
rcu_unregister_thread();
2962
return NULL;
2963
}
2964
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
2965
index XXXXXXX..XXXXXXX 100644
2966
--- a/target/i386/whpx/whpx-all.c
2967
+++ b/target/i386/whpx/whpx-all.c
2968
@@ -XXX,XX +XXX,XX @@ static int whpx_first_vcpu_starting(CPUState *cpu)
2969
struct whpx_state *whpx = &whpx_global;
2970
HRESULT hr;
2971
2972
- g_assert(qemu_mutex_iothread_locked());
2973
+ g_assert(bql_locked());
2974
2975
if (!QTAILQ_EMPTY(&cpu->breakpoints) ||
2976
(whpx->breakpoints.breakpoints &&
2977
@@ -XXX,XX +XXX,XX @@ static int whpx_handle_halt(CPUState *cpu)
2978
CPUX86State *env = cpu_env(cpu);
2979
int ret = 0;
2980
2981
- qemu_mutex_lock_iothread();
2982
+ bql_lock();
2983
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2984
(env->eflags & IF_MASK)) &&
2985
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
2986
@@ -XXX,XX +XXX,XX @@ static int whpx_handle_halt(CPUState *cpu)
2987
cpu->halted = true;
2988
ret = 1;
2989
}
2990
- qemu_mutex_unlock_iothread();
2991
+ bql_unlock();
2992
2993
return ret;
2994
}
2995
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_pre_run(CPUState *cpu)
2996
memset(&new_int, 0, sizeof(new_int));
2997
memset(reg_values, 0, sizeof(reg_values));
2998
2999
- qemu_mutex_lock_iothread();
3000
+ bql_lock();
3001
3002
/* Inject NMI */
3003
if (!vcpu->interruption_pending &&
3004
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_pre_run(CPUState *cpu)
3005
reg_count += 1;
3006
}
3007
3008
- qemu_mutex_unlock_iothread();
3009
+ bql_unlock();
3010
vcpu->ready_for_pic_interrupt = false;
3011
3012
if (reg_count) {
3013
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_post_run(CPUState *cpu)
3014
uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
3015
if (vcpu->tpr != tpr) {
3016
vcpu->tpr = tpr;
3017
- qemu_mutex_lock_iothread();
3018
+ bql_lock();
3019
cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr));
3020
- qemu_mutex_unlock_iothread();
3021
+ bql_unlock();
3022
}
3023
3024
vcpu->interruption_pending =
3025
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3026
WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE;
3027
int ret;
3028
3029
- g_assert(qemu_mutex_iothread_locked());
3030
+ g_assert(bql_locked());
3031
3032
if (whpx->running_cpus++ == 0) {
3033
/* Insert breakpoints into memory, update exception exit bitmap. */
3034
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3035
}
3036
}
3037
3038
- qemu_mutex_unlock_iothread();
3039
+ bql_unlock();
3040
3041
if (exclusive_step_mode != WHPX_STEP_NONE) {
3042
start_exclusive();
3043
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3044
error_report("WHPX: Unexpected VP exit code %d",
3045
vcpu->exit_ctx.ExitReason);
3046
whpx_get_registers(cpu);
3047
- qemu_mutex_lock_iothread();
3048
+ bql_lock();
3049
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3050
- qemu_mutex_unlock_iothread();
3051
+ bql_unlock();
3052
break;
3053
}
3054
3055
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3056
cpu_exec_end(cpu);
3057
}
3058
3059
- qemu_mutex_lock_iothread();
3060
+ bql_lock();
3061
current_cpu = cpu;
3062
3063
if (--whpx->running_cpus == 0) {
3064
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
3065
index XXXXXXX..XXXXXXX 100644
3066
--- a/target/loongarch/tcg/csr_helper.c
3067
+++ b/target/loongarch/tcg/csr_helper.c
3068
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val)
3069
int64_t old_v = 0;
3070
3071
if (val & 0x1) {
3072
- qemu_mutex_lock_iothread();
3073
+ bql_lock();
3074
loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0);
3075
- qemu_mutex_unlock_iothread();
3076
+ bql_unlock();
3077
}
3078
return old_v;
3079
}
3080
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
3081
index XXXXXXX..XXXXXXX 100644
3082
--- a/target/mips/kvm.c
3083
+++ b/target/mips/kvm.c
3084
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
3085
int r;
3086
struct kvm_mips_interrupt intr;
3087
3088
- qemu_mutex_lock_iothread();
3089
+ bql_lock();
3090
3091
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3092
cpu_mips_io_interrupts_pending(cpu)) {
3093
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
3094
}
3095
}
3096
3097
- qemu_mutex_unlock_iothread();
3098
+ bql_unlock();
3099
}
3100
3101
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
3102
diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c
3103
index XXXXXXX..XXXXXXX 100644
3104
--- a/target/mips/tcg/sysemu/cp0_helper.c
3105
+++ b/target/mips/tcg/sysemu/cp0_helper.c
3106
@@ -XXX,XX +XXX,XX @@ static inline void mips_vpe_wake(MIPSCPU *c)
3107
* because there might be other conditions that state that c should
3108
* be sleeping.
3109
*/
3110
- qemu_mutex_lock_iothread();
3111
+ bql_lock();
3112
cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
3113
- qemu_mutex_unlock_iothread();
3114
+ bql_unlock();
3115
}
3116
3117
static inline void mips_vpe_sleep(MIPSCPU *cpu)
3118
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
3119
index XXXXXXX..XXXXXXX 100644
3120
--- a/target/openrisc/sys_helper.c
3121
+++ b/target/openrisc/sys_helper.c
3122
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
3123
break;
3124
case TO_SPR(9, 0): /* PICMR */
3125
env->picmr = rb;
3126
- qemu_mutex_lock_iothread();
3127
+ bql_lock();
3128
if (env->picsr & env->picmr) {
3129
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
3130
} else {
3131
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
3132
}
3133
- qemu_mutex_unlock_iothread();
3134
+ bql_unlock();
3135
break;
3136
case TO_SPR(9, 2): /* PICSR */
3137
env->picsr &= ~rb;
3138
break;
3139
case TO_SPR(10, 0): /* TTMR */
3140
{
3141
- qemu_mutex_lock_iothread();
3142
+ bql_lock();
3143
if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) {
3144
switch (rb & TTMR_M) {
3145
case TIMER_NONE:
3146
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
3147
cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
3148
}
3149
cpu_openrisc_timer_update(cpu);
3150
- qemu_mutex_unlock_iothread();
3151
+ bql_unlock();
3152
}
3153
break;
3154
3155
case TO_SPR(10, 1): /* TTCR */
3156
- qemu_mutex_lock_iothread();
3157
+ bql_lock();
3158
cpu_openrisc_count_set(cpu, rb);
3159
cpu_openrisc_timer_update(cpu);
3160
- qemu_mutex_unlock_iothread();
3161
+ bql_unlock();
3162
break;
3163
}
3164
#endif
3165
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
3166
return env->ttmr;
3167
3168
case TO_SPR(10, 1): /* TTCR */
3169
- qemu_mutex_lock_iothread();
3170
+ bql_lock();
3171
cpu_openrisc_count_update(cpu);
3172
- qemu_mutex_unlock_iothread();
3173
+ bql_unlock();
3174
return cpu_openrisc_count_get(cpu);
3175
}
3176
#endif
3177
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
3178
index XXXXXXX..XXXXXXX 100644
3179
--- a/target/ppc/excp_helper.c
3180
+++ b/target/ppc/excp_helper.c
3181
@@ -XXX,XX +XXX,XX @@ void helper_msgsnd(target_ulong rb)
3182
return;
3183
}
3184
3185
- qemu_mutex_lock_iothread();
3186
+ bql_lock();
3187
CPU_FOREACH(cs) {
3188
PowerPCCPU *cpu = POWERPC_CPU(cs);
3189
CPUPPCState *cenv = &cpu->env;
3190
@@ -XXX,XX +XXX,XX @@ void helper_msgsnd(target_ulong rb)
3191
ppc_set_irq(cpu, irq, 1);
3192
}
3193
}
3194
- qemu_mutex_unlock_iothread();
3195
+ bql_unlock();
3196
}
3197
3198
/* Server Processor Control */
3199
@@ -XXX,XX +XXX,XX @@ static void book3s_msgsnd_common(int pir, int irq)
3200
{
3201
CPUState *cs;
3202
3203
- qemu_mutex_lock_iothread();
3204
+ bql_lock();
3205
CPU_FOREACH(cs) {
3206
PowerPCCPU *cpu = POWERPC_CPU(cs);
3207
CPUPPCState *cenv = &cpu->env;
3208
@@ -XXX,XX +XXX,XX @@ static void book3s_msgsnd_common(int pir, int irq)
3209
ppc_set_irq(cpu, irq, 1);
3210
}
3211
}
3212
- qemu_mutex_unlock_iothread();
3213
+ bql_unlock();
3214
}
3215
3216
void helper_book3s_msgsnd(target_ulong rb)
3217
@@ -XXX,XX +XXX,XX @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
3218
}
3219
3220
/* Does iothread need to be locked for walking CPU list? */
3221
- qemu_mutex_lock_iothread();
3222
+ bql_lock();
3223
THREAD_SIBLING_FOREACH(cs, ccs) {
3224
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3225
uint32_t thread_id = ppc_cpu_tir(ccpu);
3226
3227
if (ttir == thread_id) {
3228
ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1);
3229
- qemu_mutex_unlock_iothread();
3230
+ bql_unlock();
3231
return;
3232
}
3233
}
3234
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
3235
index XXXXXXX..XXXXXXX 100644
3236
--- a/target/ppc/kvm.c
3237
+++ b/target/ppc/kvm.c
3238
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3239
CPUPPCState *env = &cpu->env;
3240
int ret;
3241
3242
- qemu_mutex_lock_iothread();
3243
+ bql_lock();
3244
3245
switch (run->exit_reason) {
3246
case KVM_EXIT_DCR:
3247
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3248
break;
3249
}
3250
3251
- qemu_mutex_unlock_iothread();
3252
+ bql_unlock();
3253
return ret;
3254
}
3255
3256
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
3257
index XXXXXXX..XXXXXXX 100644
3258
--- a/target/ppc/misc_helper.c
3259
+++ b/target/ppc/misc_helper.c
3260
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dpdes(CPUPPCState *env)
3261
return dpdes;
3262
}
3263
3264
- qemu_mutex_lock_iothread();
3265
+ bql_lock();
3266
THREAD_SIBLING_FOREACH(cs, ccs) {
3267
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3268
CPUPPCState *cenv = &ccpu->env;
3269
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dpdes(CPUPPCState *env)
3270
dpdes |= (0x1 << thread_id);
3271
}
3272
}
3273
- qemu_mutex_unlock_iothread();
3274
+ bql_unlock();
3275
3276
return dpdes;
3277
}
3278
@@ -XXX,XX +XXX,XX @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val)
3279
}
3280
3281
/* Does iothread need to be locked for walking CPU list? */
3282
- qemu_mutex_lock_iothread();
3283
+ bql_lock();
3284
THREAD_SIBLING_FOREACH(cs, ccs) {
3285
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3286
uint32_t thread_id = ppc_cpu_tir(ccpu);
3287
3288
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
3289
}
3290
- qemu_mutex_unlock_iothread();
3291
+ bql_unlock();
3292
}
3293
#endif /* defined(TARGET_PPC64) */
3294
3295
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
3296
index XXXXXXX..XXXXXXX 100644
3297
--- a/target/ppc/timebase_helper.c
3298
+++ b/target/ppc/timebase_helper.c
3299
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
3300
} else {
3301
int ret;
3302
3303
- qemu_mutex_lock_iothread();
3304
+ bql_lock();
3305
ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
3306
- qemu_mutex_unlock_iothread();
3307
+ bql_unlock();
3308
if (unlikely(ret != 0)) {
3309
qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
3310
(uint32_t)dcrn, (uint32_t)dcrn);
3311
@@ -XXX,XX +XXX,XX @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
3312
POWERPC_EXCP_INVAL_INVAL, GETPC());
3313
} else {
3314
int ret;
3315
- qemu_mutex_lock_iothread();
3316
+ bql_lock();
3317
ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
3318
- qemu_mutex_unlock_iothread();
3319
+ bql_unlock();
3320
if (unlikely(ret != 0)) {
3321
qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
3322
(uint32_t)dcrn, (uint32_t)dcrn);
3323
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
3324
index XXXXXXX..XXXXXXX 100644
3325
--- a/target/s390x/kvm/kvm.c
3326
+++ b/target/s390x/kvm/kvm.c
3327
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3328
S390CPU *cpu = S390_CPU(cs);
3329
int ret = 0;
3330
3331
- qemu_mutex_lock_iothread();
3332
+ bql_lock();
3333
3334
kvm_cpu_synchronize_state(cs);
3335
3336
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3337
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
3338
break;
3339
}
3340
- qemu_mutex_unlock_iothread();
3341
+ bql_unlock();
3342
3343
if (ret == 0) {
3344
ret = EXCP_INTERRUPT;
3345
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
3346
index XXXXXXX..XXXXXXX 100644
3347
--- a/target/s390x/tcg/misc_helper.c
3348
+++ b/target/s390x/tcg/misc_helper.c
3349
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(stck)(CPUS390XState *env)
3350
/* SCLP service call */
3351
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
3352
{
3353
- qemu_mutex_lock_iothread();
3354
+ bql_lock();
3355
int r = sclp_service_call(env_archcpu(env), r1, r2);
3356
- qemu_mutex_unlock_iothread();
3357
+ bql_unlock();
3358
if (r < 0) {
3359
tcg_s390_program_interrupt(env, -r, GETPC());
3360
}
3361
@@ -XXX,XX +XXX,XX @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
3362
switch (num) {
3363
case 0x500:
3364
/* KVM hypercall */
3365
- qemu_mutex_lock_iothread();
3366
+ bql_lock();
3367
r = s390_virtio_hypercall(env);
3368
- qemu_mutex_unlock_iothread();
3369
+ bql_unlock();
3370
break;
3371
case 0x44:
3372
/* yield */
3373
@@ -XXX,XX +XXX,XX @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
3374
break;
3375
case 0x308:
3376
/* ipl */
3377
- qemu_mutex_lock_iothread();
3378
+ bql_lock();
3379
handle_diag_308(env, r1, r3, GETPC());
3380
- qemu_mutex_unlock_iothread();
3381
+ bql_unlock();
3382
r = 0;
3383
break;
3384
case 0x288:
3385
@@ -XXX,XX +XXX,XX @@ static void update_ckc_timer(CPUS390XState *env)
3386
3387
/* stop the timer and remove pending CKC IRQs */
3388
timer_del(env->tod_timer);
3389
- g_assert(qemu_mutex_iothread_locked());
3390
+ g_assert(bql_locked());
3391
env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
3392
3393
/* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
3394
@@ -XXX,XX +XXX,XX @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
3395
{
3396
env->ckc = ckc;
3397
3398
- qemu_mutex_lock_iothread();
3399
+ bql_lock();
3400
update_ckc_timer(env);
3401
- qemu_mutex_unlock_iothread();
3402
+ bql_unlock();
3403
}
3404
3405
void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
3406
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
3407
.low = tod_low,
3408
};
3409
3410
- qemu_mutex_lock_iothread();
3411
+ bql_lock();
3412
tdc->set(td, &tod, &error_abort);
3413
- qemu_mutex_unlock_iothread();
3414
+ bql_unlock();
3415
return 0;
3416
}
3417
3418
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
3419
int cc;
3420
3421
/* TODO: needed to inject interrupts - push further down */
3422
- qemu_mutex_lock_iothread();
3423
+ bql_lock();
3424
cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
3425
- qemu_mutex_unlock_iothread();
3426
+ bql_unlock();
3427
3428
return cc;
3429
}
3430
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
3431
void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
3432
{
3433
S390CPU *cpu = env_archcpu(env);
3434
- qemu_mutex_lock_iothread();
3435
+ bql_lock();
3436
ioinst_handle_xsch(cpu, r1, GETPC());
3437
- qemu_mutex_unlock_iothread();
3438
+ bql_unlock();
3439
}
3440
3441
void HELPER(csch)(CPUS390XState *env, uint64_t r1)
3442
{
3443
S390CPU *cpu = env_archcpu(env);
3444
- qemu_mutex_lock_iothread();
3445
+ bql_lock();
3446
ioinst_handle_csch(cpu, r1, GETPC());
3447
- qemu_mutex_unlock_iothread();
3448
+ bql_unlock();
3449
}
3450
3451
void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
3452
{
3453
S390CPU *cpu = env_archcpu(env);
3454
- qemu_mutex_lock_iothread();
3455
+ bql_lock();
3456
ioinst_handle_hsch(cpu, r1, GETPC());
3457
- qemu_mutex_unlock_iothread();
3458
+ bql_unlock();
3459
}
3460
3461
void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3462
{
3463
S390CPU *cpu = env_archcpu(env);
3464
- qemu_mutex_lock_iothread();
3465
+ bql_lock();
3466
ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
3467
- qemu_mutex_unlock_iothread();
3468
+ bql_unlock();
3469
}
3470
3471
void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
3472
{
3473
S390CPU *cpu = env_archcpu(env);
3474
- qemu_mutex_lock_iothread();
3475
+ bql_lock();
3476
ioinst_handle_rchp(cpu, r1, GETPC());
3477
- qemu_mutex_unlock_iothread();
3478
+ bql_unlock();
3479
}
3480
3481
void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
3482
{
3483
S390CPU *cpu = env_archcpu(env);
3484
- qemu_mutex_lock_iothread();
3485
+ bql_lock();
3486
ioinst_handle_rsch(cpu, r1, GETPC());
3487
- qemu_mutex_unlock_iothread();
3488
+ bql_unlock();
3489
}
3490
3491
void HELPER(sal)(CPUS390XState *env, uint64_t r1)
3492
{
3493
S390CPU *cpu = env_archcpu(env);
3494
3495
- qemu_mutex_lock_iothread();
3496
+ bql_lock();
3497
ioinst_handle_sal(cpu, r1, GETPC());
3498
- qemu_mutex_unlock_iothread();
3499
+ bql_unlock();
3500
}
3501
3502
void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
3503
{
3504
S390CPU *cpu = env_archcpu(env);
3505
3506
- qemu_mutex_lock_iothread();
3507
+ bql_lock();
3508
ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
3509
- qemu_mutex_unlock_iothread();
3510
+ bql_unlock();
3511
}
3512
3513
void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3514
{
3515
S390CPU *cpu = env_archcpu(env);
3516
- qemu_mutex_lock_iothread();
3517
+ bql_lock();
3518
ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
3519
- qemu_mutex_unlock_iothread();
3520
+ bql_unlock();
3521
}
3522
3523
void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
3524
{
3525
S390CPU *cpu = env_archcpu(env);
3526
3527
- qemu_mutex_lock_iothread();
3528
+ bql_lock();
3529
ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
3530
- qemu_mutex_unlock_iothread();
3531
+ bql_unlock();
3532
}
3533
3534
void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3535
{
3536
S390CPU *cpu = env_archcpu(env);
3537
- qemu_mutex_lock_iothread();
3538
+ bql_lock();
3539
ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
3540
- qemu_mutex_unlock_iothread();
3541
+ bql_unlock();
3542
}
3543
3544
uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3545
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3546
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
3547
}
3548
3549
- qemu_mutex_lock_iothread();
3550
+ bql_lock();
3551
io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
3552
if (!io) {
3553
- qemu_mutex_unlock_iothread();
3554
+ bql_unlock();
3555
return 0;
3556
}
3557
3558
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3559
if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
3560
/* writing failed, reinject and properly clean up */
3561
s390_io_interrupt(io->id, io->nr, io->parm, io->word);
3562
- qemu_mutex_unlock_iothread();
3563
+ bql_unlock();
3564
g_free(io);
3565
s390_cpu_virt_mem_handle_exc(cpu, ra);
3566
return 0;
3567
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3568
}
3569
3570
g_free(io);
3571
- qemu_mutex_unlock_iothread();
3572
+ bql_unlock();
3573
return 1;
3574
}
3575
3576
void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3577
{
3578
S390CPU *cpu = env_archcpu(env);
3579
- qemu_mutex_lock_iothread();
3580
+ bql_lock();
3581
ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
3582
- qemu_mutex_unlock_iothread();
3583
+ bql_unlock();
3584
}
3585
3586
void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
3587
{
3588
S390CPU *cpu = env_archcpu(env);
3589
- qemu_mutex_lock_iothread();
3590
+ bql_lock();
3591
ioinst_handle_chsc(cpu, inst >> 16, GETPC());
3592
- qemu_mutex_unlock_iothread();
3593
+ bql_unlock();
3594
}
3595
#endif
3596
3597
@@ -XXX,XX +XXX,XX @@ void HELPER(clp)(CPUS390XState *env, uint32_t r2)
3598
{
3599
S390CPU *cpu = env_archcpu(env);
3600
3601
- qemu_mutex_lock_iothread();
3602
+ bql_lock();
3603
clp_service_call(cpu, r2, GETPC());
3604
- qemu_mutex_unlock_iothread();
3605
+ bql_unlock();
3606
}
3607
3608
void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3609
{
3610
S390CPU *cpu = env_archcpu(env);
3611
3612
- qemu_mutex_lock_iothread();
3613
+ bql_lock();
3614
pcilg_service_call(cpu, r1, r2, GETPC());
3615
- qemu_mutex_unlock_iothread();
3616
+ bql_unlock();
3617
}
3618
3619
void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3620
{
3621
S390CPU *cpu = env_archcpu(env);
3622
3623
- qemu_mutex_lock_iothread();
3624
+ bql_lock();
3625
pcistg_service_call(cpu, r1, r2, GETPC());
3626
- qemu_mutex_unlock_iothread();
3627
+ bql_unlock();
3628
}
3629
3630
void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3631
@@ -XXX,XX +XXX,XX @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3632
{
3633
S390CPU *cpu = env_archcpu(env);
3634
3635
- qemu_mutex_lock_iothread();
3636
+ bql_lock();
3637
stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
3638
- qemu_mutex_unlock_iothread();
3639
+ bql_unlock();
3640
}
3641
3642
void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
3643
@@ -XXX,XX +XXX,XX @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
3644
S390CPU *cpu = env_archcpu(env);
3645
int r;
3646
3647
- qemu_mutex_lock_iothread();
3648
+ bql_lock();
3649
r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff);
3650
- qemu_mutex_unlock_iothread();
3651
+ bql_unlock();
3652
/* css_do_sic() may actually return a PGM_xxx value to inject */
3653
if (r) {
3654
tcg_s390_program_interrupt(env, -r, GETPC());
3655
@@ -XXX,XX +XXX,XX @@ void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3656
{
3657
S390CPU *cpu = env_archcpu(env);
3658
3659
- qemu_mutex_lock_iothread();
3660
+ bql_lock();
3661
rpcit_service_call(cpu, r1, r2, GETPC());
3662
- qemu_mutex_unlock_iothread();
3663
+ bql_unlock();
3664
}
3665
3666
void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
3667
@@ -XXX,XX +XXX,XX @@ void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
3668
{
3669
S390CPU *cpu = env_archcpu(env);
3670
3671
- qemu_mutex_lock_iothread();
3672
+ bql_lock();
3673
pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
3674
- qemu_mutex_unlock_iothread();
3675
+ bql_unlock();
3676
}
3677
3678
void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3679
@@ -XXX,XX +XXX,XX @@ void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3680
{
3681
S390CPU *cpu = env_archcpu(env);
3682
3683
- qemu_mutex_lock_iothread();
3684
+ bql_lock();
3685
mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
3686
- qemu_mutex_unlock_iothread();
3687
+ bql_unlock();
3688
}
3689
#endif
3690
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
3691
index XXXXXXX..XXXXXXX 100644
3692
--- a/target/sparc/int32_helper.c
3693
+++ b/target/sparc/int32_helper.c
3694
@@ -XXX,XX +XXX,XX @@ void cpu_check_irqs(CPUSPARCState *env)
3695
CPUState *cs;
3696
3697
/* We should be holding the BQL before we mess with IRQs */
3698
- g_assert(qemu_mutex_iothread_locked());
3699
+ g_assert(bql_locked());
3700
3701
if (env->pil_in && (env->interrupt_index == 0 ||
3702
(env->interrupt_index & ~15) == TT_EXTINT)) {
3703
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
3704
index XXXXXXX..XXXXXXX 100644
3705
--- a/target/sparc/int64_helper.c
3706
+++ b/target/sparc/int64_helper.c
3707
@@ -XXX,XX +XXX,XX @@ void cpu_check_irqs(CPUSPARCState *env)
3708
(env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER));
3709
3710
/* We should be holding the BQL before we mess with IRQs */
3711
- g_assert(qemu_mutex_iothread_locked());
3712
+ g_assert(bql_locked());
3713
3714
/* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */
3715
if (env->ivec_status & 0x20) {
3716
@@ -XXX,XX +XXX,XX @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value)
3717
env->softint = value;
3718
#if !defined(CONFIG_USER_ONLY)
3719
if (cpu_interrupts_enabled(env)) {
3720
- qemu_mutex_lock_iothread();
3721
+ bql_lock();
3722
cpu_check_irqs(env);
3723
- qemu_mutex_unlock_iothread();
3724
+ bql_unlock();
3725
}
3726
#endif
3727
return true;
3728
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
3729
index XXXXXXX..XXXXXXX 100644
3730
--- a/target/sparc/win_helper.c
3731
+++ b/target/sparc/win_helper.c
3732
@@ -XXX,XX +XXX,XX @@ void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
3733
cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
3734
} else {
3735
/* cpu_put_psr may trigger interrupts, hence BQL */
3736
- qemu_mutex_lock_iothread();
3737
+ bql_lock();
3738
cpu_put_psr(env, new_psr);
3739
- qemu_mutex_unlock_iothread();
3740
+ bql_unlock();
3741
}
3742
}
3743
3744
@@ -XXX,XX +XXX,XX @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
3745
3746
#if !defined(CONFIG_USER_ONLY)
3747
if (cpu_interrupts_enabled(env)) {
3748
- qemu_mutex_lock_iothread();
3749
+ bql_lock();
3750
cpu_check_irqs(env);
3751
- qemu_mutex_unlock_iothread();
3752
+ bql_unlock();
3753
}
3754
#endif
3755
}
3756
@@ -XXX,XX +XXX,XX @@ void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
3757
env->psrpil = new_pil;
3758
3759
if (cpu_interrupts_enabled(env)) {
3760
- qemu_mutex_lock_iothread();
3761
+ bql_lock();
3762
cpu_check_irqs(env);
3763
- qemu_mutex_unlock_iothread();
3764
+ bql_unlock();
3765
}
3766
#endif
3767
}
3768
@@ -XXX,XX +XXX,XX @@ void helper_done(CPUSPARCState *env)
3769
3770
#if !defined(CONFIG_USER_ONLY)
3771
if (cpu_interrupts_enabled(env)) {
3772
- qemu_mutex_lock_iothread();
3773
+ bql_lock();
3774
cpu_check_irqs(env);
3775
- qemu_mutex_unlock_iothread();
3776
+ bql_unlock();
3777
}
3778
#endif
3779
}
3780
@@ -XXX,XX +XXX,XX @@ void helper_retry(CPUSPARCState *env)
3781
3782
#if !defined(CONFIG_USER_ONLY)
3783
if (cpu_interrupts_enabled(env)) {
3784
- qemu_mutex_lock_iothread();
3785
+ bql_lock();
3786
cpu_check_irqs(env);
3787
- qemu_mutex_unlock_iothread();
3788
+ bql_unlock();
3789
}
3790
#endif
3791
}
3792
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
3793
index XXXXXXX..XXXXXXX 100644
3794
--- a/target/xtensa/exc_helper.c
3795
+++ b/target/xtensa/exc_helper.c
3796
@@ -XXX,XX +XXX,XX @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
3797
env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
3798
(intlevel << PS_INTLEVEL_SHIFT);
3799
3800
- qemu_mutex_lock_iothread();
3801
+ bql_lock();
3802
check_interrupts(env);
3803
- qemu_mutex_unlock_iothread();
3804
+ bql_unlock();
3805
3806
if (env->pending_irq_level) {
3807
cpu_loop_exit(cpu);
3808
@@ -XXX,XX +XXX,XX @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
3809
3810
void HELPER(check_interrupts)(CPUXtensaState *env)
3811
{
3812
- qemu_mutex_lock_iothread();
3813
+ bql_lock();
3814
check_interrupts(env);
3815
- qemu_mutex_unlock_iothread();
3816
+ bql_unlock();
3817
}
3818
3819
void HELPER(intset)(CPUXtensaState *env, uint32_t v)
3820
diff --git a/ui/spice-core.c b/ui/spice-core.c
3821
index XXXXXXX..XXXXXXX 100644
3822
--- a/ui/spice-core.c
3823
+++ b/ui/spice-core.c
3824
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
3825
*/
3826
bool need_lock = !qemu_thread_is_self(&me);
3827
if (need_lock) {
3828
- qemu_mutex_lock_iothread();
3829
+ bql_lock();
3830
}
3831
3832
if (info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT) {
3833
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
3834
}
3835
3836
if (need_lock) {
3837
- qemu_mutex_unlock_iothread();
3838
+ bql_unlock();
3839
}
3840
3841
qapi_free_SpiceServerInfo(server);
3842
diff --git a/util/async.c b/util/async.c
3843
index XXXXXXX..XXXXXXX 100644
3844
--- a/util/async.c
3845
+++ b/util/async.c
3846
@@ -XXX,XX +XXX,XX @@ AioContext *qemu_get_current_aio_context(void)
3847
if (ctx) {
3848
return ctx;
3849
}
3850
- if (qemu_mutex_iothread_locked()) {
3851
+ if (bql_locked()) {
3852
/* Possibly in a vCPU thread. */
3853
return qemu_get_aio_context();
3854
}
3855
diff --git a/util/main-loop.c b/util/main-loop.c
3856
index XXXXXXX..XXXXXXX 100644
3857
--- a/util/main-loop.c
3858
+++ b/util/main-loop.c
3859
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3860
3861
glib_pollfds_fill(&timeout);
3862
3863
- qemu_mutex_unlock_iothread();
3864
+ bql_unlock();
3865
replay_mutex_unlock();
3866
3867
ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
3868
3869
replay_mutex_lock();
3870
- qemu_mutex_lock_iothread();
3871
+ bql_lock();
3872
3873
glib_pollfds_poll();
3874
3875
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3876
3877
poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
3878
3879
- qemu_mutex_unlock_iothread();
3880
+ bql_unlock();
3881
3882
replay_mutex_unlock();
3883
3884
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3885
3886
replay_mutex_lock();
3887
3888
- qemu_mutex_lock_iothread();
3889
+ bql_lock();
3890
if (g_poll_ret > 0) {
3891
for (i = 0; i < w->num; i++) {
3892
w->revents[i] = poll_fds[n_poll_fds + i].revents;
3893
diff --git a/util/qsp.c b/util/qsp.c
3894
index XXXXXXX..XXXXXXX 100644
3895
--- a/util/qsp.c
3896
+++ b/util/qsp.c
3897
@@ -XXX,XX +XXX,XX @@ static const char * const qsp_typenames[] = {
3898
[QSP_CONDVAR] = "condvar",
25
};
3899
};
26
3900
27
int main(int argc, char **argv)
3901
-QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl;
3902
+QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl;
3903
QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl;
3904
QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl;
3905
QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl;
3906
@@ -XXX,XX +XXX,XX @@ void qsp_enable(void)
3907
{
3908
qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock);
3909
qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock);
3910
- qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock);
3911
+ qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock);
3912
qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
3913
qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
3914
qatomic_set(&qemu_cond_wait_func, qsp_cond_wait);
3915
@@ -XXX,XX +XXX,XX @@ void qsp_disable(void)
3916
{
3917
qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl);
3918
qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl);
3919
- qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl);
3920
+ qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl);
3921
qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
3922
qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
3923
qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
3924
diff --git a/util/rcu.c b/util/rcu.c
3925
index XXXXXXX..XXXXXXX 100644
3926
--- a/util/rcu.c
3927
+++ b/util/rcu.c
3928
@@ -XXX,XX +XXX,XX @@ static void *call_rcu_thread(void *opaque)
3929
3930
qatomic_sub(&rcu_call_count, n);
3931
synchronize_rcu();
3932
- qemu_mutex_lock_iothread();
3933
+ bql_lock();
3934
while (n > 0) {
3935
node = try_dequeue();
3936
while (!node) {
3937
- qemu_mutex_unlock_iothread();
3938
+ bql_unlock();
3939
qemu_event_reset(&rcu_call_ready_event);
3940
node = try_dequeue();
3941
if (!node) {
3942
qemu_event_wait(&rcu_call_ready_event);
3943
node = try_dequeue();
3944
}
3945
- qemu_mutex_lock_iothread();
3946
+ bql_lock();
3947
}
3948
3949
n--;
3950
node->func(node);
3951
}
3952
- qemu_mutex_unlock_iothread();
3953
+ bql_unlock();
3954
}
3955
abort();
3956
}
3957
@@ -XXX,XX +XXX,XX @@ static void drain_rcu_callback(struct rcu_head *node)
3958
void drain_call_rcu(void)
3959
{
3960
struct rcu_drain rcu_drain;
3961
- bool locked = qemu_mutex_iothread_locked();
3962
+ bool locked = bql_locked();
3963
3964
memset(&rcu_drain, 0, sizeof(struct rcu_drain));
3965
qemu_event_init(&rcu_drain.drain_complete_event, false);
3966
3967
if (locked) {
3968
- qemu_mutex_unlock_iothread();
3969
+ bql_unlock();
3970
}
3971
3972
3973
@@ -XXX,XX +XXX,XX @@ void drain_call_rcu(void)
3974
qatomic_dec(&in_drain_call_rcu);
3975
3976
if (locked) {
3977
- qemu_mutex_lock_iothread();
3978
+ bql_lock();
3979
}
3980
3981
}
3982
diff --git a/audio/coreaudio.m b/audio/coreaudio.m
3983
index XXXXXXX..XXXXXXX 100644
3984
--- a/audio/coreaudio.m
3985
+++ b/audio/coreaudio.m
3986
@@ -XXX,XX +XXX,XX @@ static OSStatus handle_voice_change(
3987
{
3988
coreaudioVoiceOut *core = in_client_data;
3989
3990
- qemu_mutex_lock_iothread();
3991
+ bql_lock();
3992
3993
if (core->outputDeviceID) {
3994
fini_out_device(core);
3995
@@ -XXX,XX +XXX,XX @@ static OSStatus handle_voice_change(
3996
update_device_playback_state(core);
3997
}
3998
3999
- qemu_mutex_unlock_iothread();
4000
+ bql_unlock();
4001
return 0;
4002
}
4003
4004
diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc
4005
index XXXXXXX..XXXXXXX 100644
4006
--- a/memory_ldst.c.inc
4007
+++ b/memory_ldst.c.inc
4008
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
4009
*result = r;
4010
}
4011
if (release_lock) {
4012
- qemu_mutex_unlock_iothread();
4013
+ bql_unlock();
4014
}
4015
RCU_READ_UNLOCK();
4016
return val;
4017
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
4018
*result = r;
4019
}
4020
if (release_lock) {
4021
- qemu_mutex_unlock_iothread();
4022
+ bql_unlock();
4023
}
4024
RCU_READ_UNLOCK();
4025
return val;
4026
@@ -XXX,XX +XXX,XX @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
4027
*result = r;
4028
}
4029
if (release_lock) {
4030
- qemu_mutex_unlock_iothread();
4031
+ bql_unlock();
4032
}
4033
RCU_READ_UNLOCK();
4034
return val;
4035
@@ -XXX,XX +XXX,XX @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
4036
*result = r;
4037
}
4038
if (release_lock) {
4039
- qemu_mutex_unlock_iothread();
4040
+ bql_unlock();
4041
}
4042
RCU_READ_UNLOCK();
4043
return val;
4044
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
4045
*result = r;
4046
}
4047
if (release_lock) {
4048
- qemu_mutex_unlock_iothread();
4049
+ bql_unlock();
4050
}
4051
RCU_READ_UNLOCK();
4052
}
4053
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
4054
*result = r;
4055
}
4056
if (release_lock) {
4057
- qemu_mutex_unlock_iothread();
4058
+ bql_unlock();
4059
}
4060
RCU_READ_UNLOCK();
4061
}
4062
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
4063
*result = r;
4064
}
4065
if (release_lock) {
4066
- qemu_mutex_unlock_iothread();
4067
+ bql_unlock();
4068
}
4069
RCU_READ_UNLOCK();
4070
}
4071
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
4072
*result = r;
4073
}
4074
if (release_lock) {
4075
- qemu_mutex_unlock_iothread();
4076
+ bql_unlock();
4077
}
4078
RCU_READ_UNLOCK();
4079
}
4080
@@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
4081
*result = r;
4082
}
4083
if (release_lock) {
4084
- qemu_mutex_unlock_iothread();
4085
+ bql_unlock();
4086
}
4087
RCU_READ_UNLOCK();
4088
}
4089
diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md
4090
index XXXXXXX..XXXXXXX 100644
4091
--- a/target/i386/hvf/README.md
4092
+++ b/target/i386/hvf/README.md
4093
@@ -XXX,XX +XXX,XX @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk
4094
4095
1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
4096
2. Removal of `apic_page` and hyperv-related functionality.
4097
-3. More relaxed use of `qemu_mutex_lock_iothread`.
4098
+3. More relaxed use of `bql_lock`.
4099
diff --git a/ui/cocoa.m b/ui/cocoa.m
4100
index XXXXXXX..XXXXXXX 100644
4101
--- a/ui/cocoa.m
4102
+++ b/ui/cocoa.m
4103
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
4104
typedef void (^CodeBlock)(void);
4105
typedef bool (^BoolCodeBlock)(void);
4106
4107
-static void with_iothread_lock(CodeBlock block)
4108
+static void with_bql(CodeBlock block)
4109
{
4110
- bool locked = qemu_mutex_iothread_locked();
4111
+ bool locked = bql_locked();
4112
if (!locked) {
4113
- qemu_mutex_lock_iothread();
4114
+ bql_lock();
4115
}
4116
block();
4117
if (!locked) {
4118
- qemu_mutex_unlock_iothread();
4119
+ bql_unlock();
4120
}
4121
}
4122
4123
-static bool bool_with_iothread_lock(BoolCodeBlock block)
4124
+static bool bool_with_bql(BoolCodeBlock block)
4125
{
4126
- bool locked = qemu_mutex_iothread_locked();
4127
+ bool locked = bql_locked();
4128
bool val;
4129
4130
if (!locked) {
4131
- qemu_mutex_lock_iothread();
4132
+ bql_lock();
4133
}
4134
val = block();
4135
if (!locked) {
4136
- qemu_mutex_unlock_iothread();
4137
+ bql_unlock();
4138
}
4139
return val;
4140
}
4141
@@ -XXX,XX +XXX,XX @@ - (void) updateUIInfo
4142
return;
4143
}
4144
4145
- with_iothread_lock(^{
4146
+ with_bql(^{
4147
[self updateUIInfoLocked];
4148
});
4149
}
4150
@@ -XXX,XX +XXX,XX @@ - (void) handleMonitorInput:(NSEvent *)event
4151
4152
- (bool) handleEvent:(NSEvent *)event
4153
{
4154
- return bool_with_iothread_lock(^{
4155
+ return bool_with_bql(^{
4156
return [self handleEventLocked:event];
4157
});
4158
}
4159
@@ -XXX,XX +XXX,XX @@ - (QEMUScreen) gscreen {return screen;}
4160
*/
4161
- (void) raiseAllKeys
4162
{
4163
- with_iothread_lock(^{
4164
+ with_bql(^{
4165
qkbd_state_lift_all_keys(kbd);
4166
});
4167
}
4168
@@ -XXX,XX +XXX,XX @@ - (void)applicationWillTerminate:(NSNotification *)aNotification
4169
{
4170
COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n");
4171
4172
- with_iothread_lock(^{
4173
+ with_bql(^{
4174
shutdown_action = SHUTDOWN_ACTION_POWEROFF;
4175
qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI);
4176
});
4177
@@ -XXX,XX +XXX,XX @@ - (void)displayConsole:(id)sender
4178
/* Pause the guest */
4179
- (void)pauseQEMU:(id)sender
4180
{
4181
- with_iothread_lock(^{
4182
+ with_bql(^{
4183
qmp_stop(NULL);
4184
});
4185
[sender setEnabled: NO];
4186
@@ -XXX,XX +XXX,XX @@ - (void)pauseQEMU:(id)sender
4187
/* Resume running the guest operating system */
4188
- (void)resumeQEMU:(id) sender
4189
{
4190
- with_iothread_lock(^{
4191
+ with_bql(^{
4192
qmp_cont(NULL);
4193
});
4194
[sender setEnabled: NO];
4195
@@ -XXX,XX +XXX,XX @@ - (void)removePause
4196
/* Restarts QEMU */
4197
- (void)restartQEMU:(id)sender
4198
{
4199
- with_iothread_lock(^{
4200
+ with_bql(^{
4201
qmp_system_reset(NULL);
4202
});
4203
}
4204
@@ -XXX,XX +XXX,XX @@ - (void)restartQEMU:(id)sender
4205
/* Powers down QEMU */
4206
- (void)powerDownQEMU:(id)sender
4207
{
4208
- with_iothread_lock(^{
4209
+ with_bql(^{
4210
qmp_system_powerdown(NULL);
4211
});
4212
}
4213
@@ -XXX,XX +XXX,XX @@ - (void)ejectDeviceMedia:(id)sender
4214
}
4215
4216
__block Error *err = NULL;
4217
- with_iothread_lock(^{
4218
+ with_bql(^{
4219
qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding],
4220
NULL, false, false, &err);
4221
});
4222
@@ -XXX,XX +XXX,XX @@ - (void)changeDeviceMedia:(id)sender
4223
}
4224
4225
__block Error *err = NULL;
4226
- with_iothread_lock(^{
4227
+ with_bql(^{
4228
qmp_blockdev_change_medium([drive cStringUsingEncoding:
4229
NSASCIIStringEncoding],
4230
NULL,
4231
@@ -XXX,XX +XXX,XX @@ - (void)adjustSpeed:(id)sender
4232
// get the throttle percentage
4233
throttle_pct = [sender tag];
4234
4235
- with_iothread_lock(^{
4236
+ with_bql(^{
4237
cpu_throttle_set(throttle_pct);
4238
});
4239
COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%');
4240
@@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t
4241
return;
4242
}
4243
4244
- with_iothread_lock(^{
4245
+ with_bql(^{
4246
QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo);
4247
qemu_event_reset(&cbevent);
4248
qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT);
4249
@@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t
4250
while (info == cbinfo &&
4251
info->types[QEMU_CLIPBOARD_TYPE_TEXT].available &&
4252
info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) {
4253
- qemu_mutex_unlock_iothread();
4254
+ bql_unlock();
4255
qemu_event_wait(&cbevent);
4256
- qemu_mutex_lock_iothread();
4257
+ bql_lock();
4258
}
4259
4260
if (info == cbinfo) {
4261
@@ -XXX,XX +XXX,XX @@ static void cocoa_clipboard_request(QemuClipboardInfo *info,
4262
int status;
4263
4264
COCOA_DEBUG("Second thread: calling qemu_default_main()\n");
4265
- qemu_mutex_lock_iothread();
4266
+ bql_lock();
4267
status = qemu_default_main();
4268
- qemu_mutex_unlock_iothread();
4269
+ bql_unlock();
4270
COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n");
4271
[cbowner release];
4272
exit(status);
4273
@@ -XXX,XX +XXX,XX @@ static int cocoa_main(void)
4274
4275
COCOA_DEBUG("Entered %s()\n", __func__);
4276
4277
- qemu_mutex_unlock_iothread();
4278
+ bql_unlock();
4279
qemu_thread_create(&thread, "qemu_main", call_qemu_main,
4280
NULL, QEMU_THREAD_DETACHED);
4281
28
--
4282
--
29
2.35.1
4283
2.43.0
4284
4285
diff view generated by jsdifflib
New patch
1
1
The name "iothread" is overloaded. Use the term Big QEMU Lock (BQL)
2
instead, it is already widely used and unambiguous.
3
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
Reviewed-by: Paul Durrant <paul@xen.org>
6
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
10
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
11
Message-id: 20240102153529.486531-3-stefanha@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
include/qemu/main-loop.h | 19 +++++++++----------
15
hw/i386/kvm/xen_evtchn.c | 14 +++++++-------
16
hw/i386/kvm/xen_gnttab.c | 2 +-
17
hw/mips/mips_int.c | 2 +-
18
hw/ppc/ppc.c | 2 +-
19
target/i386/kvm/xen-emu.c | 2 +-
20
target/ppc/excp_helper.c | 2 +-
21
target/ppc/helper_regs.c | 2 +-
22
target/riscv/cpu_helper.c | 4 ++--
23
9 files changed, 24 insertions(+), 25 deletions(-)
24
25
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/qemu/main-loop.h
28
+++ b/include/qemu/main-loop.h
29
@@ -XXX,XX +XXX,XX @@ void bql_lock_impl(const char *file, int line);
30
void bql_unlock(void);
31
32
/**
33
- * QEMU_IOTHREAD_LOCK_GUARD
34
+ * BQL_LOCK_GUARD
35
*
36
* Wrap a block of code in a conditional bql_{lock,unlock}.
37
*/
38
-typedef struct IOThreadLockAuto IOThreadLockAuto;
39
+typedef struct BQLLockAuto BQLLockAuto;
40
41
-static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file,
42
- int line)
43
+static inline BQLLockAuto *bql_auto_lock(const char *file, int line)
44
{
45
if (bql_locked()) {
46
return NULL;
47
}
48
bql_lock_impl(file, line);
49
/* Anything non-NULL causes the cleanup function to be called */
50
- return (IOThreadLockAuto *)(uintptr_t)1;
51
+ return (BQLLockAuto *)(uintptr_t)1;
52
}
53
54
-static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l)
55
+static inline void bql_auto_unlock(BQLLockAuto *l)
56
{
57
bql_unlock();
58
}
59
60
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock)
61
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
62
63
-#define QEMU_IOTHREAD_LOCK_GUARD() \
64
- g_autoptr(IOThreadLockAuto) _iothread_lock_auto __attribute__((unused)) \
65
- = qemu_iothread_auto_lock(__FILE__, __LINE__)
66
+#define BQL_LOCK_GUARD() \
67
+ g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \
68
+ = bql_auto_lock(__FILE__, __LINE__)
69
70
/*
71
* qemu_cond_wait_iothread: Wait on condition for the main loop mutex
72
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/i386/kvm/xen_evtchn.c
75
+++ b/hw/i386/kvm/xen_evtchn.c
76
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_reset_op(struct evtchn_reset *reset)
77
return -ESRCH;
78
}
79
80
- QEMU_IOTHREAD_LOCK_GUARD();
81
+ BQL_LOCK_GUARD();
82
return xen_evtchn_soft_reset();
83
}
84
85
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_close_op(struct evtchn_close *close)
86
return -EINVAL;
87
}
88
89
- QEMU_IOTHREAD_LOCK_GUARD();
90
+ BQL_LOCK_GUARD();
91
qemu_mutex_lock(&s->port_lock);
92
93
ret = close_port(s, close->port, &flush_kvm_routes);
94
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
95
return -EINVAL;
96
}
97
98
- QEMU_IOTHREAD_LOCK_GUARD();
99
+ BQL_LOCK_GUARD();
100
101
if (s->pirq[pirq->pirq].port) {
102
return -EBUSY;
103
@@ -XXX,XX +XXX,XX @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map)
104
return -ENOTSUP;
105
}
106
107
- QEMU_IOTHREAD_LOCK_GUARD();
108
+ BQL_LOCK_GUARD();
109
QEMU_LOCK_GUARD(&s->port_lock);
110
111
if (map->domid != DOMID_SELF && map->domid != xen_domid) {
112
@@ -XXX,XX +XXX,XX @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
113
return -EINVAL;
114
}
115
116
- QEMU_IOTHREAD_LOCK_GUARD();
117
+ BQL_LOCK_GUARD();
118
qemu_mutex_lock(&s->port_lock);
119
120
if (!pirq_inuse(s, pirq)) {
121
@@ -XXX,XX +XXX,XX @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
122
return -ENOTSUP;
123
}
124
125
- QEMU_IOTHREAD_LOCK_GUARD();
126
+ BQL_LOCK_GUARD();
127
QEMU_LOCK_GUARD(&s->port_lock);
128
129
if (!pirq_inuse(s, pirq)) {
130
@@ -XXX,XX +XXX,XX @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
131
return -ENOTSUP;
132
}
133
134
- QEMU_IOTHREAD_LOCK_GUARD();
135
+ BQL_LOCK_GUARD();
136
QEMU_LOCK_GUARD(&s->port_lock);
137
138
if (!pirq_inuse(s, pirq)) {
139
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/hw/i386/kvm/xen_gnttab.c
142
+++ b/hw/i386/kvm/xen_gnttab.c
143
@@ -XXX,XX +XXX,XX @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
144
return -EINVAL;
145
}
146
147
- QEMU_IOTHREAD_LOCK_GUARD();
148
+ BQL_LOCK_GUARD();
149
QEMU_LOCK_GUARD(&s->gnt_lock);
150
151
xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
152
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/hw/mips/mips_int.c
155
+++ b/hw/mips/mips_int.c
156
@@ -XXX,XX +XXX,XX @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
157
return;
158
}
159
160
- QEMU_IOTHREAD_LOCK_GUARD();
161
+ BQL_LOCK_GUARD();
162
163
if (level) {
164
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
165
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
166
index XXXXXXX..XXXXXXX 100644
167
--- a/hw/ppc/ppc.c
168
+++ b/hw/ppc/ppc.c
169
@@ -XXX,XX +XXX,XX @@ void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
170
unsigned int old_pending;
171
172
/* We may already have the BQL if coming from the reset path */
173
- QEMU_IOTHREAD_LOCK_GUARD();
174
+ BQL_LOCK_GUARD();
175
176
old_pending = env->pending_interrupts;
177
178
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/i386/kvm/xen-emu.c
181
+++ b/target/i386/kvm/xen-emu.c
182
@@ -XXX,XX +XXX,XX @@ static int xen_set_shared_info(uint64_t gfn)
183
uint64_t gpa = gfn << TARGET_PAGE_BITS;
184
int i, err;
185
186
- QEMU_IOTHREAD_LOCK_GUARD();
187
+ BQL_LOCK_GUARD();
188
189
/*
190
* The xen_overlay device tells KVM about it too, since it had to
191
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/target/ppc/excp_helper.c
194
+++ b/target/ppc/excp_helper.c
195
@@ -XXX,XX +XXX,XX @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
196
void ppc_maybe_interrupt(CPUPPCState *env)
197
{
198
CPUState *cs = env_cpu(env);
199
- QEMU_IOTHREAD_LOCK_GUARD();
200
+ BQL_LOCK_GUARD();
201
202
if (ppc_next_unmasked_interrupt(env)) {
203
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
204
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/ppc/helper_regs.c
207
+++ b/target/ppc/helper_regs.c
208
@@ -XXX,XX +XXX,XX @@ void cpu_interrupt_exittb(CPUState *cs)
209
* unless running with TCG.
210
*/
211
if (tcg_enabled()) {
212
- QEMU_IOTHREAD_LOCK_GUARD();
213
+ BQL_LOCK_GUARD();
214
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
215
}
216
}
217
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
218
index XXXXXXX..XXXXXXX 100644
219
--- a/target/riscv/cpu_helper.c
220
+++ b/target/riscv/cpu_helper.c
221
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_interrupt(CPURISCVState *env)
222
uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
223
CPUState *cs = env_cpu(env);
224
225
- QEMU_IOTHREAD_LOCK_GUARD();
226
+ BQL_LOCK_GUARD();
227
228
if (env->virt_enabled) {
229
gein = get_field(env->hstatus, HSTATUS_VGEIN);
230
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
231
/* No need to update mip for VSTIP */
232
mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
233
234
- QEMU_IOTHREAD_LOCK_GUARD();
235
+ BQL_LOCK_GUARD();
236
237
env->mip = (env->mip & ~mask) | (value & mask);
238
239
--
240
2.43.0
241
242
diff view generated by jsdifflib
1
From: Liu Yiding <liuyd.fnst@fujitsu.com>
1
The name "iothread" is overloaded. Use the term Big QEMU Lock (BQL)
2
2
instead, it is already widely used and unambiguous.
3
virtiofsd has introduced killpriv_v2/no_killpriv_v2 for a while. Add
4
description of it to docs/helper.
5
6
Signed-off-by: Liu Yiding <liuyd.fnst@fujitsu.com>
7
Message-Id: <20220421095151.2231099-1-liuyd.fnst@fujitsu.com>
8
9
[Small documentation fixes: s/as client supports/as the client supports/
10
and s/. /. /.
11
--Stefan]
12
3
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
Reviewed-by: Cédric Le Goater <clg@kaod.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Paul Durrant <paul@xen.org>
8
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
9
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
10
Message-id: 20240102153529.486531-4-stefanha@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
12
---
15
docs/tools/virtiofsd.rst | 5 +++++
13
include/qemu/main-loop.h | 10 +++++-----
16
tools/virtiofsd/helper.c | 3 +++
14
accel/tcg/tcg-accel-ops-rr.c | 4 ++--
17
2 files changed, 8 insertions(+)
15
hw/display/virtio-gpu.c | 2 +-
16
hw/ppc/spapr_events.c | 2 +-
17
system/cpu-throttle.c | 2 +-
18
system/cpus.c | 4 ++--
19
target/i386/nvmm/nvmm-accel-ops.c | 2 +-
20
target/i386/whpx/whpx-accel-ops.c | 2 +-
21
8 files changed, 14 insertions(+), 14 deletions(-)
18
22
19
diff --git a/docs/tools/virtiofsd.rst b/docs/tools/virtiofsd.rst
23
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
20
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
21
--- a/docs/tools/virtiofsd.rst
25
--- a/include/qemu/main-loop.h
22
+++ b/docs/tools/virtiofsd.rst
26
+++ b/include/qemu/main-loop.h
23
@@ -XXX,XX +XXX,XX @@ Options
27
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
24
label. Server will try to set that label on newly created file
28
= bql_auto_lock(__FILE__, __LINE__)
25
atomically wherever possible.
29
26
30
/*
27
+ * killpriv_v2|no_killpriv_v2 -
31
- * qemu_cond_wait_iothread: Wait on condition for the main loop mutex
28
+ Enable/disable ``FUSE_HANDLE_KILLPRIV_V2`` support. KILLPRIV_V2 is enabled
32
+ * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL)
29
+ by default as long as the client supports it. Enabling this option helps
33
*
30
+ with performance in write path.
34
- * This function atomically releases the main loop mutex and causes
31
+
35
+ * This function atomically releases the Big QEMU Lock (BQL) and causes
32
.. option:: --socket-path=PATH
36
* the calling thread to block on the condition.
33
37
*/
34
Listen on vhost-user UNIX domain socket at PATH.
38
-void qemu_cond_wait_iothread(QemuCond *cond);
35
diff --git a/tools/virtiofsd/helper.c b/tools/virtiofsd/helper.c
39
+void qemu_cond_wait_bql(QemuCond *cond);
40
41
/*
42
- * qemu_cond_timedwait_iothread: like the previous, but with timeout
43
+ * qemu_cond_timedwait_bql: like the previous, but with timeout
44
*/
45
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
46
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
47
48
/* internal interfaces */
49
50
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
36
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
37
--- a/tools/virtiofsd/helper.c
52
--- a/accel/tcg/tcg-accel-ops-rr.c
38
+++ b/tools/virtiofsd/helper.c
53
+++ b/accel/tcg/tcg-accel-ops-rr.c
39
@@ -XXX,XX +XXX,XX @@ void fuse_cmdline_help(void)
54
@@ -XXX,XX +XXX,XX @@ static void rr_wait_io_event(void)
40
" -o announce_submounts Announce sub-mount points to the guest\n"
55
41
" -o posix_acl/no_posix_acl Enable/Disable posix_acl. (default: disabled)\n"
56
while (all_cpu_threads_idle()) {
42
" -o security_label/no_security_label Enable/Disable security label. (default: disabled)\n"
57
rr_stop_kick_timer();
43
+ " -o killpriv_v2/no_killpriv_v2\n"
58
- qemu_cond_wait_iothread(first_cpu->halt_cond);
44
+ " Enable/Disable FUSE_HANDLE_KILLPRIV_V2.\n"
59
+ qemu_cond_wait_bql(first_cpu->halt_cond);
45
+ " (default: enabled as long as client supports it)\n"
60
}
46
);
61
62
rr_start_kick_timer();
63
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
64
65
/* wait for initial kick-off after machine start */
66
while (first_cpu->stopped) {
67
- qemu_cond_wait_iothread(first_cpu->halt_cond);
68
+ qemu_cond_wait_bql(first_cpu->halt_cond);
69
70
/* process any pending work */
71
CPU_FOREACH(cpu) {
72
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/display/virtio-gpu.c
75
+++ b/hw/display/virtio-gpu.c
76
@@ -XXX,XX +XXX,XX @@ void virtio_gpu_reset(VirtIODevice *vdev)
77
g->reset_finished = false;
78
qemu_bh_schedule(g->reset_bh);
79
while (!g->reset_finished) {
80
- qemu_cond_wait_iothread(&g->reset_cond);
81
+ qemu_cond_wait_bql(&g->reset_cond);
82
}
83
} else {
84
virtio_gpu_reset_bh(g);
85
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/ppc/spapr_events.c
88
+++ b/hw/ppc/spapr_events.c
89
@@ -XXX,XX +XXX,XX @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
90
}
91
return;
92
}
93
- qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond);
94
+ qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond);
95
if (spapr->fwnmi_machine_check_addr == -1) {
96
/*
97
* If the machine was reset while waiting for the interlock,
98
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/system/cpu-throttle.c
101
+++ b/system/cpu-throttle.c
102
@@ -XXX,XX +XXX,XX @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
103
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
104
while (sleeptime_ns > 0 && !cpu->stop) {
105
if (sleeptime_ns > SCALE_MS) {
106
- qemu_cond_timedwait_iothread(cpu->halt_cond,
107
+ qemu_cond_timedwait_bql(cpu->halt_cond,
108
sleeptime_ns / SCALE_MS);
109
} else {
110
bql_unlock();
111
diff --git a/system/cpus.c b/system/cpus.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/system/cpus.c
114
+++ b/system/cpus.c
115
@@ -XXX,XX +XXX,XX @@ void bql_unlock(void)
116
qemu_mutex_unlock(&bql);
47
}
117
}
48
118
119
-void qemu_cond_wait_iothread(QemuCond *cond)
120
+void qemu_cond_wait_bql(QemuCond *cond)
121
{
122
qemu_cond_wait(cond, &bql);
123
}
124
125
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
126
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms)
127
{
128
qemu_cond_timedwait(cond, &bql, ms);
129
}
130
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/target/i386/nvmm/nvmm-accel-ops.c
133
+++ b/target/i386/nvmm/nvmm-accel-ops.c
134
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
135
}
136
}
137
while (cpu_thread_is_idle(cpu)) {
138
- qemu_cond_wait_iothread(cpu->halt_cond);
139
+ qemu_cond_wait_bql(cpu->halt_cond);
140
}
141
qemu_wait_io_event_common(cpu);
142
} while (!cpu->unplug || cpu_can_run(cpu));
143
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/target/i386/whpx/whpx-accel-ops.c
146
+++ b/target/i386/whpx/whpx-accel-ops.c
147
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
148
}
149
}
150
while (cpu_thread_is_idle(cpu)) {
151
- qemu_cond_wait_iothread(cpu->halt_cond);
152
+ qemu_cond_wait_bql(cpu->halt_cond);
153
}
154
qemu_wait_io_event_common(cpu);
155
} while (!cpu->unplug || cpu_can_run(cpu));
49
--
156
--
50
2.35.1
157
2.43.0
158
159
diff view generated by jsdifflib
New patch
1
The term "iothread lock" is obsolete. The APIs use Big QEMU Lock (BQL)
2
in their names. Update the code comments to use "BQL" instead of
3
"iothread lock".
1
4
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Paul Durrant <paul@xen.org>
8
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
9
Reviewed-by: Cédric Le Goater <clg@kaod.org>
10
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
11
Message-id: 20240102153529.486531-5-stefanha@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
docs/devel/reset.rst | 2 +-
15
hw/display/qxl.h | 2 +-
16
include/exec/cpu-common.h | 2 +-
17
include/exec/memory.h | 4 ++--
18
include/exec/ramblock.h | 2 +-
19
include/migration/register.h | 8 ++++----
20
target/arm/internals.h | 4 ++--
21
accel/tcg/cputlb.c | 4 ++--
22
accel/tcg/tcg-accel-ops-icount.c | 2 +-
23
hw/remote/mpqemu-link.c | 2 +-
24
migration/block-dirty-bitmap.c | 10 +++++-----
25
migration/block.c | 22 +++++++++++-----------
26
migration/colo.c | 2 +-
27
migration/migration.c | 2 +-
28
migration/ram.c | 4 ++--
29
system/physmem.c | 6 +++---
30
target/arm/helper.c | 2 +-
31
ui/spice-core.c | 2 +-
32
util/rcu.c | 2 +-
33
audio/coreaudio.m | 4 ++--
34
ui/cocoa.m | 6 +++---
35
21 files changed, 47 insertions(+), 47 deletions(-)
36
37
diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst
38
index XXXXXXX..XXXXXXX 100644
39
--- a/docs/devel/reset.rst
40
+++ b/docs/devel/reset.rst
41
@@ -XXX,XX +XXX,XX @@ Triggering reset
42
43
This section documents the APIs which "users" of a resettable object should use
44
to control it. All resettable control functions must be called while holding
45
-the iothread lock.
46
+the BQL.
47
48
You can apply a reset to an object using ``resettable_assert_reset()``. You need
49
to call ``resettable_release_reset()`` to release the object from reset. To
50
diff --git a/hw/display/qxl.h b/hw/display/qxl.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/hw/display/qxl.h
53
+++ b/hw/display/qxl.h
54
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
55
*
56
* Use with care; by the time this function returns, the returned pointer is
57
* not protected by RCU anymore. If the caller is not within an RCU critical
58
- * section and does not hold the iothread lock, it must have other means of
59
+ * section and does not hold the BQL, it must have other means of
60
* protecting the pointer, such as a reference to the region that includes
61
* the incoming ram_addr_t.
62
*
63
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/include/exec/cpu-common.h
66
+++ b/include/exec/cpu-common.h
67
@@ -XXX,XX +XXX,XX @@ RAMBlock *qemu_ram_block_by_name(const char *name);
68
*
69
* By the time this function returns, the returned pointer is not protected
70
* by RCU anymore. If the caller is not within an RCU critical section and
71
- * does not hold the iothread lock, it must have other means of protecting the
72
+ * does not hold the BQL, it must have other means of protecting the
73
* pointer, such as a reference to the memory region that owns the RAMBlock.
74
*/
75
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
76
diff --git a/include/exec/memory.h b/include/exec/memory.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/exec/memory.h
79
+++ b/include/exec/memory.h
80
@@ -XXX,XX +XXX,XX @@ int memory_region_get_fd(MemoryRegion *mr);
81
*
82
* Use with care; by the time this function returns, the returned pointer is
83
* not protected by RCU anymore. If the caller is not within an RCU critical
84
- * section and does not hold the iothread lock, it must have other means of
85
+ * section and does not hold the BQL, it must have other means of
86
* protecting the pointer, such as a reference to the region that includes
87
* the incoming ram_addr_t.
88
*
89
@@ -XXX,XX +XXX,XX @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
90
*
91
* Use with care; by the time this function returns, the returned pointer is
92
* not protected by RCU anymore. If the caller is not within an RCU critical
93
- * section and does not hold the iothread lock, it must have other means of
94
+ * section and does not hold the BQL, it must have other means of
95
* protecting the pointer, such as a reference to the region that includes
96
* the incoming ram_addr_t.
97
*
98
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/include/exec/ramblock.h
101
+++ b/include/exec/ramblock.h
102
@@ -XXX,XX +XXX,XX @@ struct RAMBlock {
103
ram_addr_t max_length;
104
void (*resized)(const char*, uint64_t length, void *host);
105
uint32_t flags;
106
- /* Protected by iothread lock. */
107
+ /* Protected by the BQL. */
108
char idstr[256];
109
/* RCU-enabled, writes protected by the ramlist lock */
110
QLIST_ENTRY(RAMBlock) next;
111
diff --git a/include/migration/register.h b/include/migration/register.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/include/migration/register.h
114
+++ b/include/migration/register.h
115
@@ -XXX,XX +XXX,XX @@
116
#include "hw/vmstate-if.h"
117
118
typedef struct SaveVMHandlers {
119
- /* This runs inside the iothread lock. */
120
+ /* This runs inside the BQL. */
121
SaveStateHandler *save_state;
122
123
/*
124
@@ -XXX,XX +XXX,XX @@ typedef struct SaveVMHandlers {
125
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
126
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
127
128
- /* This runs both outside and inside the iothread lock. */
129
+ /* This runs both outside and inside the BQL. */
130
bool (*is_active)(void *opaque);
131
bool (*has_postcopy)(void *opaque);
132
133
@@ -XXX,XX +XXX,XX @@ typedef struct SaveVMHandlers {
134
*/
135
bool (*is_active_iterate)(void *opaque);
136
137
- /* This runs outside the iothread lock in the migration case, and
138
+ /* This runs outside the BQL in the migration case, and
139
* within the lock in the savevm case. The callback had better only
140
* use data that is local to the migration thread or protected
141
* by other locks.
142
*/
143
int (*save_live_iterate)(QEMUFile *f, void *opaque);
144
145
- /* This runs outside the iothread lock! */
146
+ /* This runs outside the BQL! */
147
/* Note for save_live_pending:
148
* must_precopy:
149
* - must be migrated in precopy or in stopped state
150
diff --git a/target/arm/internals.h b/target/arm/internals.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/arm/internals.h
153
+++ b/target/arm/internals.h
154
@@ -XXX,XX +XXX,XX @@ static inline const char *aarch32_mode_name(uint32_t psr)
155
*
156
* Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
157
* a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
158
- * Must be called with the iothread lock held.
159
+ * Must be called with the BQL held.
160
*/
161
void arm_cpu_update_virq(ARMCPU *cpu);
162
163
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_virq(ARMCPU *cpu);
164
*
165
* Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
166
* a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
167
- * Must be called with the iothread lock held.
168
+ * Must be called with the BQL held.
169
*/
170
void arm_cpu_update_vfiq(ARMCPU *cpu);
171
172
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/accel/tcg/cputlb.c
175
+++ b/accel/tcg/cputlb.c
176
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
177
* @size: number of bytes
178
* @mmu_idx: virtual address context
179
* @ra: return address into tcg generated code, or 0
180
- * Context: iothread lock held
181
+ * Context: BQL held
182
*
183
* Load @size bytes from @addr, which is memory-mapped i/o.
184
* The bytes are concatenated in big-endian order with @ret_be.
185
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
186
* @size: number of bytes
187
* @mmu_idx: virtual address context
188
* @ra: return address into tcg generated code, or 0
189
- * Context: iothread lock held
190
+ * Context: BQL held
191
*
192
* Store @size bytes at @addr, which is memory-mapped i/o.
193
* The bytes to store are extracted in little-endian order from @val_le;
194
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
195
index XXXXXXX..XXXXXXX 100644
196
--- a/accel/tcg/tcg-accel-ops-icount.c
197
+++ b/accel/tcg/tcg-accel-ops-icount.c
198
@@ -XXX,XX +XXX,XX @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
199
200
if (cpu->icount_budget == 0) {
201
/*
202
- * We're called without the iothread lock, so must take it while
203
+ * We're called without the BQL, so must take it while
204
* we're calling timer handlers.
205
*/
206
bql_lock();
207
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
208
index XXXXXXX..XXXXXXX 100644
209
--- a/hw/remote/mpqemu-link.c
210
+++ b/hw/remote/mpqemu-link.c
211
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
212
assert(qemu_in_coroutine() || !iothread);
213
214
/*
215
- * Skip unlocking/locking iothread lock when the IOThread is running
216
+ * Skip unlocking/locking BQL when the IOThread is running
217
* in co-routine context. Co-routine context is asserted above
218
* for IOThread case.
219
* Also skip lock handling while in a co-routine in the main context.
220
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/migration/block-dirty-bitmap.c
223
+++ b/migration/block-dirty-bitmap.c
224
@@ -XXX,XX +XXX,XX @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
225
g_free(buf);
226
}
227
228
-/* Called with iothread lock taken. */
229
+/* Called with the BQL taken. */
230
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
231
{
232
SaveBitmapState *dbms;
233
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
234
}
235
}
236
237
-/* Called with iothread lock taken. */
238
+/* Called with the BQL taken. */
239
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
240
const char *bs_name, GHashTable *alias_map)
241
{
242
@@ -XXX,XX +XXX,XX @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
243
return 0;
244
}
245
246
-/* Called with iothread lock taken. */
247
+/* Called with the BQL taken. */
248
static int init_dirty_bitmap_migration(DBMSaveState *s)
249
{
250
BlockDriverState *bs;
251
@@ -XXX,XX +XXX,XX @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
252
BlockBackend *blk;
253
GHashTable *alias_map = NULL;
254
255
- /* Runs in the migration thread, but holds the iothread lock */
256
+ /* Runs in the migration thread, but holds the BQL */
257
GLOBAL_STATE_CODE();
258
GRAPH_RDLOCK_GUARD_MAINLOOP();
259
260
@@ -XXX,XX +XXX,XX @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
261
return s->bulk_completed;
262
}
263
264
-/* Called with iothread lock taken. */
265
+/* Called with the BQL taken. */
266
267
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
268
{
269
diff --git a/migration/block.c b/migration/block.c
270
index XXXXXXX..XXXXXXX 100644
271
--- a/migration/block.c
272
+++ b/migration/block.c
273
@@ -XXX,XX +XXX,XX @@ typedef struct BlkMigState {
274
int prev_progress;
275
int bulk_completed;
276
277
- /* Lock must be taken _inside_ the iothread lock. */
278
+ /* Lock must be taken _inside_ the BQL. */
279
QemuMutex lock;
280
} BlkMigState;
281
282
@@ -XXX,XX +XXX,XX @@ static void blk_mig_unlock(void)
283
qemu_mutex_unlock(&block_mig_state.lock);
284
}
285
286
-/* Must run outside of the iothread lock during the bulk phase,
287
+/* Must run outside of the BQL during the bulk phase,
288
* or the VM will stall.
289
*/
290
291
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
292
return (bmds->cur_sector >= total_sectors);
293
}
294
295
-/* Called with iothread lock taken. */
296
+/* Called with the BQL taken. */
297
298
static int set_dirty_tracking(void)
299
{
300
@@ -XXX,XX +XXX,XX @@ fail:
301
return ret;
302
}
303
304
-/* Called with iothread lock taken. */
305
+/* Called with the BQL taken. */
306
307
static void unset_dirty_tracking(void)
308
{
309
@@ -XXX,XX +XXX,XX @@ static void blk_mig_reset_dirty_cursor(void)
310
}
311
}
312
313
-/* Called with iothread lock taken. */
314
+/* Called with the BQL taken. */
315
316
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
317
int is_async)
318
@@ -XXX,XX +XXX,XX @@ error:
319
return ret;
320
}
321
322
-/* Called with iothread lock taken.
323
+/* Called with the BQL taken.
324
*
325
* return value:
326
* 0: too much data for max_downtime
327
@@ -XXX,XX +XXX,XX @@ static int flush_blks(QEMUFile *f)
328
return ret;
329
}
330
331
-/* Called with iothread lock taken. */
332
+/* Called with the BQL taken. */
333
334
static int64_t get_remaining_dirty(void)
335
{
336
@@ -XXX,XX +XXX,XX @@ static int64_t get_remaining_dirty(void)
337
338
339
340
-/* Called with iothread lock taken. */
341
+/* Called with the BQL taken. */
342
static void block_migration_cleanup_bmds(void)
343
{
344
BlkMigDevState *bmds;
345
@@ -XXX,XX +XXX,XX @@ static void block_migration_cleanup_bmds(void)
346
}
347
}
348
349
-/* Called with iothread lock taken. */
350
+/* Called with the BQL taken. */
351
static void block_migration_cleanup(void *opaque)
352
{
353
BlkMigBlock *blk;
354
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
355
}
356
ret = 0;
357
} else {
358
- /* Always called with iothread lock taken for
359
+ /* Always called with the BQL taken for
360
* simplicity, block_save_complete also calls it.
361
*/
362
bql_lock();
363
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
364
return (delta_bytes > 0);
365
}
366
367
-/* Called with iothread lock taken. */
368
+/* Called with the BQL taken. */
369
370
static int block_save_complete(QEMUFile *f, void *opaque)
371
{
372
diff --git a/migration/colo.c b/migration/colo.c
373
index XXXXXXX..XXXXXXX 100644
374
--- a/migration/colo.c
375
+++ b/migration/colo.c
376
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
377
qemu_thread_join(&th);
378
bql_lock();
379
380
- /* We hold the global iothread lock, so it is safe here */
381
+ /* We hold the global BQL, so it is safe here */
382
colo_release_ram_cache();
383
384
return 0;
385
diff --git a/migration/migration.c b/migration/migration.c
386
index XXXXXXX..XXXXXXX 100644
387
--- a/migration/migration.c
388
+++ b/migration/migration.c
389
@@ -XXX,XX +XXX,XX @@ fail:
390
391
/**
392
* migration_maybe_pause: Pause if required to by
393
- * migrate_pause_before_switchover called with the iothread locked
394
+ * migrate_pause_before_switchover called with the BQL locked
395
* Returns: 0 on success
396
*/
397
static int migration_maybe_pause(MigrationState *s,
398
diff --git a/migration/ram.c b/migration/ram.c
399
index XXXXXXX..XXXXXXX 100644
400
--- a/migration/ram.c
401
+++ b/migration/ram.c
402
@@ -XXX,XX +XXX,XX @@ static void ram_save_cleanup(void *opaque)
403
404
/* We don't use dirty log with background snapshots */
405
if (!migrate_background_snapshot()) {
406
- /* caller have hold iothread lock or is in a bh, so there is
407
+ /* caller have hold BQL or is in a bh, so there is
408
* no writing race against the migration bitmap
409
*/
410
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
411
@@ -XXX,XX +XXX,XX @@ out:
412
*
413
* Returns zero to indicate success or negative on error
414
*
415
- * Called with iothread lock
416
+ * Called with the BQL
417
*
418
* @f: QEMUFile where to send the data
419
* @opaque: RAMState pointer
420
diff --git a/system/physmem.c b/system/physmem.c
421
index XXXXXXX..XXXXXXX 100644
422
--- a/system/physmem.c
423
+++ b/system/physmem.c
424
@@ -XXX,XX +XXX,XX @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
425
abort();
426
427
found:
428
- /* It is safe to write mru_block outside the iothread lock. This
429
+ /* It is safe to write mru_block outside the BQL. This
430
* is what happens:
431
*
432
* mru_block = xxx
433
@@ -XXX,XX +XXX,XX @@ int qemu_ram_get_fd(RAMBlock *rb)
434
return rb->fd;
435
}
436
437
-/* Called with iothread lock held. */
438
+/* Called with the BQL held. */
439
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
440
{
441
RAMBlock *block;
442
@@ -XXX,XX +XXX,XX @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
443
}
444
}
445
446
-/* Called with iothread lock held. */
447
+/* Called with the BQL held. */
448
void qemu_ram_unset_idstr(RAMBlock *block)
449
{
450
/* FIXME: arch_init.c assumes that this is not called throughout
451
diff --git a/target/arm/helper.c b/target/arm/helper.c
452
index XXXXXXX..XXXXXXX 100644
453
--- a/target/arm/helper.c
454
+++ b/target/arm/helper.c
455
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
456
* Updates to VI and VF require us to update the status of
457
* virtual interrupts, which are the logical OR of these bits
458
* and the state of the input lines from the GIC. (This requires
459
- * that we have the iothread lock, which is done by marking the
460
+ * that we have the BQL, which is done by marking the
461
* reginfo structs as ARM_CP_IO.)
462
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
463
* possible for it to be taken immediately, because VIRQ and
464
diff --git a/ui/spice-core.c b/ui/spice-core.c
465
index XXXXXXX..XXXXXXX 100644
466
--- a/ui/spice-core.c
467
+++ b/ui/spice-core.c
468
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
469
* not do that. It isn't that easy to fix it in spice and even
470
* when it is fixed we still should cover the already released
471
* spice versions. So detect that we've been called from another
472
- * thread and grab the iothread lock if so before calling qemu
473
+ * thread and grab the BQL if so before calling qemu
474
* functions.
475
*/
476
bool need_lock = !qemu_thread_is_self(&me);
477
diff --git a/util/rcu.c b/util/rcu.c
478
index XXXXXXX..XXXXXXX 100644
479
--- a/util/rcu.c
480
+++ b/util/rcu.c
481
@@ -XXX,XX +XXX,XX @@ static void rcu_init_complete(void)
482
483
qemu_event_init(&rcu_call_ready_event, false);
484
485
- /* The caller is assumed to have iothread lock, so the call_rcu thread
486
+ /* The caller is assumed to have BQL, so the call_rcu thread
487
* must have been quiescent even after forking, just recreate it.
488
*/
489
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
490
diff --git a/audio/coreaudio.m b/audio/coreaudio.m
491
index XXXXXXX..XXXXXXX 100644
492
--- a/audio/coreaudio.m
493
+++ b/audio/coreaudio.m
494
@@ -XXX,XX +XXX,XX @@ static ret_type glue(coreaudio_, name)args_decl \
495
#undef COREAUDIO_WRAPPER_FUNC
496
497
/*
498
- * callback to feed audiooutput buffer. called without iothread lock.
499
+ * callback to feed audiooutput buffer. called without BQL.
500
* allowed to lock "buf_mutex", but disallowed to have any other locks.
501
*/
502
static OSStatus audioDeviceIOProc(
503
@@ -XXX,XX +XXX,XX @@ static void update_device_playback_state(coreaudioVoiceOut *core)
504
}
505
}
506
507
-/* called without iothread lock. */
508
+/* called without BQL. */
509
static OSStatus handle_voice_change(
510
AudioObjectID in_object_id,
511
UInt32 in_number_addresses,
512
diff --git a/ui/cocoa.m b/ui/cocoa.m
513
index XXXXXXX..XXXXXXX 100644
514
--- a/ui/cocoa.m
515
+++ b/ui/cocoa.m
516
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
517
static QemuClipboardInfo *cbinfo;
518
static QemuEvent cbevent;
519
520
-// Utility functions to run specified code block with iothread lock held
521
+// Utility functions to run specified code block with the BQL held
522
typedef void (^CodeBlock)(void);
523
typedef bool (^BoolCodeBlock)(void);
524
525
@@ -XXX,XX +XXX,XX @@ - (void) setContentDimensions
526
527
- (void) updateUIInfoLocked
528
{
529
- /* Must be called with the iothread lock, i.e. via updateUIInfo */
530
+ /* Must be called with the BQL, i.e. via updateUIInfo */
531
NSSize frameSize;
532
QemuUIInfo info;
533
534
@@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
535
* Create the menu entries which depend on QEMU state (for consoles
536
* and removable devices). These make calls back into QEMU functions,
537
* which is OK because at this point we know that the second thread
538
- * holds the iothread lock and is synchronously waiting for us to
539
+ * holds the BQL and is synchronously waiting for us to
540
* finish.
541
*/
542
add_console_menu_entries();
543
--
544
2.43.0
545
546
diff view generated by jsdifflib
1
From: Sakshi Kaushik <sakshikaushik717@gmail.com>
1
The term "QEMU global mutex" is identical to the more widely used Big
2
2
QEMU Lock ("BQL"). Update the code comments and documentation to use
3
Signed-off-by: Sakshi Kaushik <sakshikaushik717@gmail.com>
3
"BQL" instead of "QEMU global mutex".
4
Message-id: 20220406162410.8536-1-sakshikaushik717@gmail.com
4
5
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
[Name the iSCSI URL long option --iscsi-uri instead of --iscsi_uri for
6
Acked-by: Markus Armbruster <armbru@redhat.com>
7
consistency, fix --fd which was rejected due to an outdated
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
--socket-path check, and add missing entries[] terminator.
8
Reviewed-by: Paul Durrant <paul@xen.org>
9
--Stefan]
9
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
10
10
Reviewed-by: Cédric Le Goater <clg@kaod.org>
11
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
12
Message-id: 20240102153529.486531-6-stefanha@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
14
---
13
contrib/vhost-user-scsi/vhost-user-scsi.c | 77 +++++++++++++++--------
15
docs/devel/multi-thread-tcg.rst | 7 +++----
14
1 file changed, 52 insertions(+), 25 deletions(-)
16
docs/devel/qapi-code-gen.rst | 2 +-
15
17
docs/devel/replay.rst | 2 +-
16
diff --git a/contrib/vhost-user-scsi/vhost-user-scsi.c b/contrib/vhost-user-scsi/vhost-user-scsi.c
18
docs/devel/multiple-iothreads.txt | 14 +++++++-------
17
index XXXXXXX..XXXXXXX 100644
19
include/block/blockjob.h | 6 +++---
18
--- a/contrib/vhost-user-scsi/vhost-user-scsi.c
20
include/io/task.h | 2 +-
19
+++ b/contrib/vhost-user-scsi/vhost-user-scsi.c
21
include/qemu/coroutine-core.h | 2 +-
20
@@ -XXX,XX +XXX,XX @@ fail:
22
include/qemu/coroutine.h | 2 +-
21
23
hw/block/dataplane/virtio-blk.c | 8 ++++----
22
/** vhost-user-scsi **/
24
hw/block/virtio-blk.c | 2 +-
23
25
hw/scsi/virtio-scsi-dataplane.c | 6 +++---
24
+static int opt_fdnum = -1;
26
net/tap.c | 2 +-
25
+static char *opt_socket_path;
27
12 files changed, 27 insertions(+), 28 deletions(-)
26
+static gboolean opt_print_caps;
28
27
+static char *iscsi_uri;
29
diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst
28
+
30
index XXXXXXX..XXXXXXX 100644
29
+static GOptionEntry entries[] = {
31
--- a/docs/devel/multi-thread-tcg.rst
30
+ { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
32
+++ b/docs/devel/multi-thread-tcg.rst
31
+ "Print capabilities", NULL },
33
@@ -XXX,XX +XXX,XX @@ instruction. This could be a future optimisation.
32
+ { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
34
Emulated hardware state
33
+ "Use inherited fd socket", "FDNUM" },
35
-----------------------
34
+ { "iscsi-uri", 'i', 0, G_OPTION_ARG_FILENAME, &iscsi_uri,
36
35
+ "iSCSI URI to connect to", "FDNUM" },
37
-Currently thanks to KVM work any access to IO memory is automatically
36
+ { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
38
-protected by the global iothread mutex, also known as the BQL (Big
37
+ "Use UNIX socket path", "PATH" },
39
-QEMU Lock). Any IO region that doesn't use global mutex is expected to
38
+ { NULL, }
40
-do its own locking.
39
+};
41
+Currently thanks to KVM work any access to IO memory is automatically protected
40
+
42
+by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected
41
int main(int argc, char **argv)
43
+to do its own locking.
42
{
44
43
VusDev *vdev_scsi = NULL;
45
However IO memory isn't the only way emulated hardware state can be
44
- char *unix_fn = NULL;
46
modified. Some architectures have model specific registers that
45
- char *iscsi_uri = NULL;
47
diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst
46
- int lsock = -1, csock = -1, opt, err = EXIT_SUCCESS;
48
index XXXXXXX..XXXXXXX 100644
47
+ int lsock = -1, csock = -1, err = EXIT_SUCCESS;
49
--- a/docs/devel/qapi-code-gen.rst
48
50
+++ b/docs/devel/qapi-code-gen.rst
49
- while ((opt = getopt(argc, argv, "u:i:")) != -1) {
51
@@ -XXX,XX +XXX,XX @@ blocking the guest and other background operations.
50
- switch (opt) {
52
Coroutine safety can be hard to prove, similar to thread safety. Common
51
- case 'h':
53
pitfalls are:
52
- goto help;
54
53
- case 'u':
55
-- The global mutex isn't held across ``qemu_coroutine_yield()``, so
54
- unix_fn = g_strdup(optarg);
56
+- The BQL isn't held across ``qemu_coroutine_yield()``, so
55
- break;
57
operations that used to assume that they execute atomically may have
56
- case 'i':
58
to be more careful to protect against changes in the global state.
57
- iscsi_uri = g_strdup(optarg);
59
58
- break;
60
diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst
59
- default:
61
index XXXXXXX..XXXXXXX 100644
60
- goto help;
62
--- a/docs/devel/replay.rst
61
- }
63
+++ b/docs/devel/replay.rst
62
+ GError *error = NULL;
64
@@ -XXX,XX +XXX,XX @@ modes.
63
+ GOptionContext *context;
65
Reading and writing requests are created by CPU thread of QEMU. Later these
64
+
66
requests proceed to block layer which creates "bottom halves". Bottom
65
+ context = g_option_context_new(NULL);
67
halves consist of callback and its parameters. They are processed when
66
+ g_option_context_add_main_entries(context, entries, NULL);
68
-main loop locks the global mutex. These locks are not synchronized with
67
+ if (!g_option_context_parse(context, &argc, &argv, &error)) {
69
+main loop locks the BQL. These locks are not synchronized with
68
+ g_printerr("Option parsing failed: %s\n", error->message);
70
replaying process because main loop also processes the events that do not
69
+ exit(EXIT_FAILURE);
71
affect the virtual machine state (like user interaction with monitor).
70
+ }
72
71
+
73
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
72
+ if (opt_print_caps) {
74
index XXXXXXX..XXXXXXX 100644
73
+ g_print("{\n");
75
--- a/docs/devel/multiple-iothreads.txt
74
+ g_print(" \"type\": \"scsi\"\n");
76
+++ b/docs/devel/multiple-iothreads.txt
75
+ g_print("}\n");
77
@@ -XXX,XX +XXX,XX @@ the COPYING file in the top-level directory.
76
+ goto out;
78
79
80
This document explains the IOThread feature and how to write code that runs
81
-outside the QEMU global mutex.
82
+outside the BQL.
83
84
The main loop and IOThreads
85
---------------------------
86
@@ -XXX,XX +XXX,XX @@ scalability bottleneck on hosts with many CPUs. Work can be spread across
87
several IOThreads instead of just one main loop. When set up correctly this
88
can improve I/O latency and reduce jitter seen by the guest.
89
90
-The main loop is also deeply associated with the QEMU global mutex, which is a
91
-scalability bottleneck in itself. vCPU threads and the main loop use the QEMU
92
-global mutex to serialize execution of QEMU code. This mutex is necessary
93
-because a lot of QEMU's code historically was not thread-safe.
94
+The main loop is also deeply associated with the BQL, which is a
95
+scalability bottleneck in itself. vCPU threads and the main loop use the BQL
96
+to serialize execution of QEMU code. This mutex is necessary because a lot of
97
+QEMU's code historically was not thread-safe.
98
99
The fact that all I/O processing is done in a single main loop and that the
100
-QEMU global mutex is contended by all vCPU threads and the main loop explain
101
+BQL is contended by all vCPU threads and the main loop explain
102
why it is desirable to place work into IOThreads.
103
104
The experimental virtio-blk data-plane implementation has been benchmarked and
105
@@ -XXX,XX +XXX,XX @@ There are several old APIs that use the main loop AioContext:
106
107
Since they implicitly work on the main loop they cannot be used in code that
108
runs in an IOThread. They might cause a crash or deadlock if called from an
109
-IOThread since the QEMU global mutex is not held.
110
+IOThread since the BQL is not held.
111
112
Instead, use the AioContext functions directly (see include/block/aio.h):
113
* aio_set_fd_handler() - monitor a file descriptor
114
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
115
index XXXXXXX..XXXXXXX 100644
116
--- a/include/block/blockjob.h
117
+++ b/include/block/blockjob.h
118
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
119
120
/**
121
* Speed that was set with @block_job_set_speed.
122
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
123
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
124
*/
125
int64_t speed;
126
127
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
128
129
/**
130
* Block other operations when block job is running.
131
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
132
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
133
*/
134
Error *blocker;
135
136
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
137
138
/**
139
* BlockDriverStates that are involved in this block job.
140
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
141
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
142
*/
143
GSList *nodes;
144
} BlockJob;
145
diff --git a/include/io/task.h b/include/io/task.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/include/io/task.h
148
+++ b/include/io/task.h
149
@@ -XXX,XX +XXX,XX @@ typedef void (*QIOTaskWorker)(QIOTask *task,
150
* lookups) to be easily run non-blocking. Reporting the
151
* results in the main thread context means that the caller
152
* typically does not need to be concerned about thread
153
- * safety wrt the QEMU global mutex.
154
+ * safety wrt the BQL.
155
*
156
* For example, the socket_listen() method will block the caller
157
* while DNS lookups take place if given a name, instead of IP
158
diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h
159
index XXXXXXX..XXXXXXX 100644
160
--- a/include/qemu/coroutine-core.h
161
+++ b/include/qemu/coroutine-core.h
162
@@ -XXX,XX +XXX,XX @@
163
* rather than callbacks, for operations that need to give up control while
164
* waiting for events to complete.
165
*
166
- * These functions are re-entrant and may be used outside the global mutex.
167
+ * These functions are re-entrant and may be used outside the BQL.
168
*
169
* Functions that execute in coroutine context cannot be called
170
* directly from normal functions. Use @coroutine_fn to mark such
171
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
172
index XXXXXXX..XXXXXXX 100644
173
--- a/include/qemu/coroutine.h
174
+++ b/include/qemu/coroutine.h
175
@@ -XXX,XX +XXX,XX @@
176
* rather than callbacks, for operations that need to give up control while
177
* waiting for events to complete.
178
*
179
- * These functions are re-entrant and may be used outside the global mutex.
180
+ * These functions are re-entrant and may be used outside the BQL.
181
*
182
* Functions that execute in coroutine context cannot be called
183
* directly from normal functions. Use @coroutine_fn to mark such
184
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
185
index XXXXXXX..XXXXXXX 100644
186
--- a/hw/block/dataplane/virtio-blk.c
187
+++ b/hw/block/dataplane/virtio-blk.c
188
@@ -XXX,XX +XXX,XX @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
77
}
189
}
78
- if (!unix_fn || !iscsi_uri) {
190
}
79
+
191
80
+ if (!iscsi_uri) {
192
-/* Context: QEMU global mutex held */
81
goto help;
193
+/* Context: BQL held */
194
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
195
VirtIOBlockDataPlane **dataplane,
196
Error **errp)
197
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
198
return true;
199
}
200
201
-/* Context: QEMU global mutex held */
202
+/* Context: BQL held */
203
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
204
{
205
VirtIOBlock *vblk;
206
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
207
g_free(s);
208
}
209
210
-/* Context: QEMU global mutex held */
211
+/* Context: BQL held */
212
int virtio_blk_data_plane_start(VirtIODevice *vdev)
213
{
214
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
215
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
216
virtio_queue_host_notifier_read(host_notifier);
217
}
218
219
-/* Context: QEMU global mutex held */
220
+/* Context: BQL held */
221
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
222
{
223
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
224
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/hw/block/virtio-blk.c
227
+++ b/hw/block/virtio-blk.c
228
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_resize(void *opaque)
229
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
230
231
/*
232
- * virtio_notify_config() needs to acquire the global mutex,
233
+ * virtio_notify_config() needs to acquire the BQL,
234
* so it can't be called from an iothread. Instead, schedule
235
* it to be run in the main context BH.
236
*/
237
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/hw/scsi/virtio-scsi-dataplane.c
240
+++ b/hw/scsi/virtio-scsi-dataplane.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "scsi/constants.h"
243
#include "hw/virtio/virtio-bus.h"
244
245
-/* Context: QEMU global mutex held */
246
+/* Context: BQL held */
247
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
248
{
249
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
250
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
82
}
251
}
83
252
}
84
- lsock = unix_sock_new(unix_fn);
253
85
- if (lsock < 0) {
254
-/* Context: QEMU global mutex held */
86
- goto err;
255
+/* Context: BQL held */
87
+ if (opt_socket_path) {
256
int virtio_scsi_dataplane_start(VirtIODevice *vdev)
88
+ lsock = unix_sock_new(opt_socket_path);
257
{
89
+ if (lsock < 0) {
258
int i;
90
+ exit(EXIT_FAILURE);
259
@@ -XXX,XX +XXX,XX @@ fail_guest_notifiers:
91
+ }
260
return -ENOSYS;
92
+ } else if (opt_fdnum < 0) {
261
}
93
+ g_print("%s\n", g_option_context_get_help(context, true, NULL));
262
94
+ exit(EXIT_FAILURE);
263
-/* Context: QEMU global mutex held */
95
+ } else {
264
+/* Context: BQL held */
96
+ lsock = opt_fdnum;
265
void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
97
}
266
{
98
267
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
99
csock = accept(lsock, NULL, NULL);
268
diff --git a/net/tap.c b/net/tap.c
100
@@ -XXX,XX +XXX,XX @@ out:
269
index XXXXXXX..XXXXXXX 100644
101
if (vdev_scsi) {
270
--- a/net/tap.c
102
g_main_loop_unref(vdev_scsi->loop);
271
+++ b/net/tap.c
103
g_free(vdev_scsi);
272
@@ -XXX,XX +XXX,XX @@ static void tap_send(void *opaque)
104
- unlink(unix_fn);
273
105
+ unlink(opt_socket_path);
274
/*
106
}
275
* When the host keeps receiving more packets while tap_send() is
107
if (csock >= 0) {
276
- * running we can hog the QEMU global mutex. Limit the number of
108
close(csock);
277
+ * running we can hog the BQL. Limit the number of
109
@@ -XXX,XX +XXX,XX @@ out:
278
* packets that are processed per tap_send() callback to prevent
110
if (lsock >= 0) {
279
* stalling the guest.
111
close(lsock);
280
*/
112
}
113
- g_free(unix_fn);
114
+ g_free(opt_socket_path);
115
g_free(iscsi_uri);
116
117
return err;
118
@@ -XXX,XX +XXX,XX @@ err:
119
goto out;
120
121
help:
122
- fprintf(stderr, "Usage: %s [ -u unix_sock_path -i iscsi_uri ] | [ -h ]\n",
123
+ fprintf(stderr, "Usage: %s [ -s socket-path -i iscsi-uri -f fd -p print-capabilities ] | [ -h ]\n",
124
argv[0]);
125
- fprintf(stderr, " -u path to unix socket\n");
126
- fprintf(stderr, " -i iscsi uri for lun 0\n");
127
+ fprintf(stderr, " -s, --socket-path=SOCKET_PATH path to unix socket\n");
128
+ fprintf(stderr, " -i, --iscsi-uri=ISCSI_URI iscsi uri for lun 0\n");
129
+ fprintf(stderr, " -f, --fd=FILE_DESCRIPTOR file-descriptor\n");
130
+ fprintf(stderr, " -p, --print-capabilities=PRINT_CAPABILITIES denotes print-capabilities\n");
131
fprintf(stderr, " -h print help and quit\n");
132
133
goto err;
134
--
281
--
135
2.35.1
282
2.43.0
283
284
diff view generated by jsdifflib