1
The following changes since commit 248b23735645f7cbb503d9be6f5bf825f2a603ab:
1
The following changes since commit ffd454c67e38cc6df792733ebc5d967eee28ac0d:
2
2
3
Update version for v2.10.0-rc4 release (2017-08-24 17:34:26 +0100)
3
Merge tag 'pull-vfio-20240107' of https://github.com/legoater/qemu into staging (2024-01-08 10:28:42 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 3e4c705212abfe8c9882a00beb2d1466a8a53cec:
9
for you to fetch changes up to 0b2675c473f68f13bc5ca1dd1c43ce421542e7b8:
10
10
11
qcow2: allocate cluster_cache/cluster_data on demand (2017-08-30 18:02:10 +0100)
11
Rename "QEMU global mutex" to "BQL" in comments and docs (2024-01-08 10:45:43 -0500)
12
13
----------------------------------------------------------------
14
Pull request
12
15
13
----------------------------------------------------------------
16
----------------------------------------------------------------
14
17
15
----------------------------------------------------------------
18
Philippe Mathieu-Daudé (1):
19
iothread: Remove unused Error** argument in aio_context_set_aio_params
16
20
17
Alberto Garcia (8):
21
Stefan Hajnoczi (5):
18
throttle: Fix wrong variable name in the header documentation
22
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
19
throttle: Update the throttle_fix_bucket() documentation
23
qemu/main-loop: rename QEMU_IOTHREAD_LOCK_GUARD to BQL_LOCK_GUARD
20
throttle: Make throttle_is_valid() a bit less verbose
24
qemu/main-loop: rename qemu_cond_wait_iothread() to
21
throttle: Remove throttle_fix_bucket() / throttle_unfix_bucket()
25
qemu_cond_wait_bql()
22
throttle: Make LeakyBucket.avg and LeakyBucket.max integer types
26
Replace "iothread lock" with "BQL" in comments
23
throttle: Make burst_length 64bit and add range checks
27
Rename "QEMU global mutex" to "BQL" in comments and docs
24
throttle: Test the valid range of config values
25
misc: Remove unused Error variables
26
28
27
Dan Aloni (1):
29
docs/devel/multi-thread-tcg.rst | 7 +-
28
nvme: Fix get/set number of queues feature, again
30
docs/devel/qapi-code-gen.rst | 2 +-
29
31
docs/devel/replay.rst | 2 +-
30
Eduardo Habkost (1):
32
docs/devel/reset.rst | 2 +-
31
oslib-posix: Print errors before aborting on qemu_alloc_stack()
33
docs/devel/multiple-iothreads.txt | 14 ++--
32
34
hw/display/qxl.h | 2 +-
33
Fred Rolland (1):
35
include/block/aio-wait.h | 2 +-
34
qemu-doc: Add UUID support in initiator name
36
include/block/aio.h | 3 +-
35
37
include/block/blockjob.h | 6 +-
36
Stefan Hajnoczi (4):
38
include/exec/cpu-common.h | 2 +-
37
scripts: add argparse module for Python 2.6 compatibility
39
include/exec/memory.h | 4 +-
38
docker.py: Python 2.6 argparse compatibility
40
include/exec/ramblock.h | 2 +-
39
tests: migration/guestperf Python 2.6 argparse compatibility
41
include/io/task.h | 2 +-
40
qcow2: allocate cluster_cache/cluster_data on demand
42
include/migration/register.h | 8 +-
41
43
include/qemu/coroutine-core.h | 2 +-
42
include/qemu/throttle.h | 8 +-
44
include/qemu/coroutine.h | 2 +-
43
block/qcow.c | 12 +-
45
include/qemu/main-loop.h | 68 ++++++++-------
44
block/qcow2-cluster.c | 17 +
46
include/qemu/thread.h | 2 +-
45
block/qcow2.c | 20 +-
47
target/arm/internals.h | 4 +-
46
dump.c | 4 +-
48
accel/accel-blocker.c | 10 +--
47
hw/block/nvme.c | 4 +-
49
accel/dummy-cpus.c | 8 +-
48
tests/test-throttle.c | 80 +-
50
accel/hvf/hvf-accel-ops.c | 4 +-
49
util/oslib-posix.c | 2 +
51
accel/kvm/kvm-accel-ops.c | 4 +-
50
util/throttle.c | 86 +-
52
accel/kvm/kvm-all.c | 22 ++---
51
COPYING.PYTHON | 270 ++++
53
accel/tcg/cpu-exec.c | 26 +++---
52
qemu-doc.texi | 5 +-
54
accel/tcg/cputlb.c | 20 ++---
53
scripts/argparse.py | 2406 ++++++++++++++++++++++++++++++++++++
55
accel/tcg/tcg-accel-ops-icount.c | 6 +-
54
tests/docker/docker.py | 4 +-
56
accel/tcg/tcg-accel-ops-mttcg.c | 12 +--
55
tests/migration/guestperf/shell.py | 8 +-
57
accel/tcg/tcg-accel-ops-rr.c | 18 ++--
56
14 files changed, 2831 insertions(+), 95 deletions(-)
58
accel/tcg/tcg-accel-ops.c | 2 +-
57
create mode 100644 COPYING.PYTHON
59
accel/tcg/translate-all.c | 2 +-
58
create mode 100644 scripts/argparse.py
60
cpu-common.c | 4 +-
61
dump/dump.c | 4 +-
62
hw/block/dataplane/virtio-blk.c | 8 +-
63
hw/block/virtio-blk.c | 2 +-
64
hw/core/cpu-common.c | 6 +-
65
hw/display/virtio-gpu.c | 2 +-
66
hw/i386/intel_iommu.c | 6 +-
67
hw/i386/kvm/xen_evtchn.c | 30 +++----
68
hw/i386/kvm/xen_gnttab.c | 2 +-
69
hw/i386/kvm/xen_overlay.c | 2 +-
70
hw/i386/kvm/xen_xenstore.c | 2 +-
71
hw/intc/arm_gicv3_cpuif.c | 2 +-
72
hw/intc/s390_flic.c | 18 ++--
73
hw/mips/mips_int.c | 2 +-
74
hw/misc/edu.c | 4 +-
75
hw/misc/imx6_src.c | 2 +-
76
hw/misc/imx7_src.c | 2 +-
77
hw/net/xen_nic.c | 8 +-
78
hw/ppc/pegasos2.c | 2 +-
79
hw/ppc/ppc.c | 6 +-
80
hw/ppc/spapr.c | 2 +-
81
hw/ppc/spapr_events.c | 2 +-
82
hw/ppc/spapr_rng.c | 4 +-
83
hw/ppc/spapr_softmmu.c | 4 +-
84
hw/remote/mpqemu-link.c | 22 ++---
85
hw/remote/vfio-user-obj.c | 2 +-
86
hw/s390x/s390-skeys.c | 2 +-
87
hw/scsi/virtio-scsi-dataplane.c | 6 +-
88
iothread.c | 3 +-
89
migration/block-dirty-bitmap.c | 14 ++--
90
migration/block.c | 38 ++++-----
91
migration/colo.c | 62 +++++++-------
92
migration/dirtyrate.c | 12 +--
93
migration/migration.c | 54 ++++++------
94
migration/ram.c | 16 ++--
95
net/tap.c | 2 +-
96
replay/replay-internal.c | 2 +-
97
semihosting/console.c | 8 +-
98
stubs/iothread-lock.c | 6 +-
99
system/cpu-throttle.c | 6 +-
100
system/cpus.c | 55 +++++++------
101
system/dirtylimit.c | 4 +-
102
system/memory.c | 2 +-
103
system/physmem.c | 14 ++--
104
system/runstate.c | 2 +-
105
system/watchpoint.c | 4 +-
106
target/arm/arm-powerctl.c | 14 ++--
107
target/arm/helper.c | 6 +-
108
target/arm/hvf/hvf.c | 8 +-
109
target/arm/kvm.c | 8 +-
110
target/arm/ptw.c | 6 +-
111
target/arm/tcg/helper-a64.c | 8 +-
112
target/arm/tcg/m_helper.c | 6 +-
113
target/arm/tcg/op_helper.c | 24 +++---
114
target/arm/tcg/psci.c | 2 +-
115
target/hppa/int_helper.c | 8 +-
116
target/i386/hvf/hvf.c | 6 +-
117
target/i386/kvm/hyperv.c | 4 +-
118
target/i386/kvm/kvm.c | 28 +++----
119
target/i386/kvm/xen-emu.c | 16 ++--
120
target/i386/nvmm/nvmm-accel-ops.c | 6 +-
121
target/i386/nvmm/nvmm-all.c | 20 ++---
122
target/i386/tcg/sysemu/fpu_helper.c | 6 +-
123
target/i386/tcg/sysemu/misc_helper.c | 4 +-
124
target/i386/whpx/whpx-accel-ops.c | 6 +-
125
target/i386/whpx/whpx-all.c | 24 +++---
126
target/loongarch/tcg/csr_helper.c | 4 +-
127
target/mips/kvm.c | 4 +-
128
target/mips/tcg/sysemu/cp0_helper.c | 4 +-
129
target/openrisc/sys_helper.c | 16 ++--
130
target/ppc/excp_helper.c | 14 ++--
131
target/ppc/helper_regs.c | 2 +-
132
target/ppc/kvm.c | 4 +-
133
target/ppc/misc_helper.c | 8 +-
134
target/ppc/timebase_helper.c | 8 +-
135
target/riscv/cpu_helper.c | 4 +-
136
target/s390x/kvm/kvm.c | 4 +-
137
target/s390x/tcg/misc_helper.c | 118 +++++++++++++--------------
138
target/sparc/int32_helper.c | 2 +-
139
target/sparc/int64_helper.c | 6 +-
140
target/sparc/win_helper.c | 20 ++---
141
target/xtensa/exc_helper.c | 8 +-
142
ui/spice-core.c | 6 +-
143
util/aio-posix.c | 3 +-
144
util/aio-win32.c | 3 +-
145
util/async.c | 2 +-
146
util/main-loop.c | 13 ++-
147
util/qsp.c | 6 +-
148
util/rcu.c | 16 ++--
149
audio/coreaudio.m | 8 +-
150
memory_ldst.c.inc | 18 ++--
151
target/i386/hvf/README.md | 2 +-
152
ui/cocoa.m | 56 ++++++-------
153
124 files changed, 646 insertions(+), 655 deletions(-)
59
154
60
--
155
--
61
2.13.5
156
2.43.0
62
157
63
158
diff view generated by jsdifflib
Deleted patch
1
From: Dan Aloni <dan@kernelim.com>
2
1
3
The number of queues that should be return by the admin command should:
4
5
1) Only mention the number of non-admin queues.
6
2) It is zero-based, meaning that '0 == one non-admin queue',
7
'1 == two non-admin queues', and so forth.
8
9
Because our `num_queues` means the number of queues _plus_ the admin
10
queue, then the right calculation for the number returned from the admin
11
command is `num_queues - 2`, combining the two requirements mentioned.
12
13
The issue was discovered by reducing num_queues from 64 to 8 and running
14
a Linux VM with an SMP parameter larger than that (e.g. 22). It tries to
15
utilize all queues, and therefore fails with an invalid queue number
16
when trying to queue I/Os on the last queue.
17
18
Signed-off-by: Dan Aloni <dan@kernelim.com>
19
CC: Alex Friedman <alex@e8storage.com>
20
CC: Keith Busch <keith.busch@intel.com>
21
CC: Stefan Hajnoczi <stefanha@redhat.com>
22
Reviewed-by: Keith Busch <keith.busch@intel.com>
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
24
---
25
hw/block/nvme.c | 4 ++--
26
1 file changed, 2 insertions(+), 2 deletions(-)
27
28
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/block/nvme.c
31
+++ b/hw/block/nvme.c
32
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
33
result = blk_enable_write_cache(n->conf.blk);
34
break;
35
case NVME_NUMBER_OF_QUEUES:
36
- result = cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16));
37
+ result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
38
break;
39
default:
40
return NVME_INVALID_FIELD | NVME_DNR;
41
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
42
break;
43
case NVME_NUMBER_OF_QUEUES:
44
req->cqe.result =
45
- cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16));
46
+ cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
47
break;
48
default:
49
return NVME_INVALID_FIELD | NVME_DNR;
50
--
51
2.13.5
52
53
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
The level of the burst bucket is stored in bkt.burst_level, not
4
bkt.burst_length.
5
6
Signed-off-by: Alberto Garcia <berto@igalia.com>
7
Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr>
8
Message-id: 49aab2711d02f285567f3b3b13a113847af33812.1503580370.git.berto@igalia.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/qemu/throttle.h | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/throttle.h
17
+++ b/include/qemu/throttle.h
18
@@ -XXX,XX +XXX,XX @@ typedef enum {
19
* - The bkt.avg rate does not apply until the bucket is full,
20
* allowing the user to do bursts until then. The I/O limit during
21
* bursts is bkt.max. To enforce this limit we keep an additional
22
- * bucket in bkt.burst_length that leaks at a rate of bkt.max units
23
+ * bucket in bkt.burst_level that leaks at a rate of bkt.max units
24
* per second.
25
*
26
* - Because of all of the above, the user can perform I/O at a
27
--
28
2.13.5
29
30
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
The way the throttling algorithm works is that requests start being
4
throttled once the bucket level exceeds the burst limit. When we get
5
there the bucket leaks at the level set by the user (bkt->avg), and
6
that leak rate is what prevents guest I/O from exceeding the desired
7
limit.
8
9
If we don't allow bursts (i.e. bkt->max == 0) then we can start
10
throttling requests immediately. The problem with keeping the
11
threshold at 0 is that it only allows one request at a time, and as
12
soon as there's a bit of I/O from the guest every other request will
13
be throttled and performance will suffer considerably. That can even
14
make the guest unable to reach the throttle limit if that limit is
15
high enough, and that happens regardless of the block scheduler used
16
by the guest.
17
18
Increasing that threshold gives flexibility to the guest, allowing it
19
to perform short bursts of I/O before being throttled. Increasing the
20
threshold too much does not make a difference in the long run (because
21
it's the leak rate what defines the actual throughput) but it does
22
allow the guest to perform longer initial bursts and exceed the
23
throttle limit for a short while.
24
25
A burst value of bkt->avg / 10 allows the guest to perform 100ms'
26
worth of I/O at the target rate without being throttled.
27
28
Signed-off-by: Alberto Garcia <berto@igalia.com>
29
Message-id: 31aae6645f0d1fbf3860fb2b528b757236f0c0a7.1503580370.git.berto@igalia.com
30
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
31
---
32
util/throttle.c | 11 +++--------
33
1 file changed, 3 insertions(+), 8 deletions(-)
34
35
diff --git a/util/throttle.c b/util/throttle.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/util/throttle.c
38
+++ b/util/throttle.c
39
@@ -XXX,XX +XXX,XX @@ static void throttle_fix_bucket(LeakyBucket *bkt)
40
/* zero bucket level */
41
bkt->level = bkt->burst_level = 0;
42
43
- /* The following is done to cope with the Linux CFQ block scheduler
44
- * which regroup reads and writes by block of 100ms in the guest.
45
- * When they are two process one making reads and one making writes cfq
46
- * make a pattern looking like the following:
47
- * WWWWWWWWWWWRRRRRRRRRRRRRRWWWWWWWWWWWWWwRRRRRRRRRRRRRRRRR
48
- * Having a max burst value of 100ms of the average will help smooth the
49
- * throttling
50
- */
51
+ /* If bkt->max is 0 we still want to allow short bursts of I/O
52
+ * from the guest, otherwise every other request will be throttled
53
+ * and performance will suffer considerably. */
54
min = bkt->avg / 10;
55
if (bkt->avg && !bkt->max) {
56
bkt->max = min;
57
--
58
2.13.5
59
60
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
Use a pointer to the bucket instead of repeating cfg->buckets[i] all
4
the time. This makes the code more concise and will help us expand the
5
checks later and save a few line breaks.
6
7
Signed-off-by: Alberto Garcia <berto@igalia.com>
8
Message-id: 763ffc40a26b17d54cf93f5a999e4656049fcf0c.1503580370.git.berto@igalia.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
util/throttle.c | 15 +++++++--------
12
1 file changed, 7 insertions(+), 8 deletions(-)
13
14
diff --git a/util/throttle.c b/util/throttle.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/util/throttle.c
17
+++ b/util/throttle.c
18
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
19
}
20
21
for (i = 0; i < BUCKETS_COUNT; i++) {
22
- if (cfg->buckets[i].avg < 0 ||
23
- cfg->buckets[i].max < 0 ||
24
- cfg->buckets[i].avg > THROTTLE_VALUE_MAX ||
25
- cfg->buckets[i].max > THROTTLE_VALUE_MAX) {
26
+ LeakyBucket *bkt = &cfg->buckets[i];
27
+ if (bkt->avg < 0 || bkt->max < 0 ||
28
+ bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
29
error_setg(errp, "bps/iops/max values must be within [0, %lld]",
30
THROTTLE_VALUE_MAX);
31
return false;
32
}
33
34
- if (!cfg->buckets[i].burst_length) {
35
+ if (!bkt->burst_length) {
36
error_setg(errp, "the burst length cannot be 0");
37
return false;
38
}
39
40
- if (cfg->buckets[i].burst_length > 1 && !cfg->buckets[i].max) {
41
+ if (bkt->burst_length > 1 && !bkt->max) {
42
error_setg(errp, "burst length set without burst rate");
43
return false;
44
}
45
46
- if (cfg->buckets[i].max && !cfg->buckets[i].avg) {
47
+ if (bkt->max && !bkt->avg) {
48
error_setg(errp, "bps_max/iops_max require corresponding"
49
" bps/iops values");
50
return false;
51
}
52
53
- if (cfg->buckets[i].max && cfg->buckets[i].max < cfg->buckets[i].avg) {
54
+ if (bkt->max && bkt->max < bkt->avg) {
55
error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops");
56
return false;
57
}
58
--
59
2.13.5
60
61
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
Both the throttling limits set with the throttling.iops-* and
3
aio_context_set_aio_params() doesn't use its undocumented
4
throttling.bps-* options and their QMP equivalents defined in the
4
Error** argument. Remove it to simplify.
5
BlockIOThrottle struct are integer values.
6
5
7
Those limits are also reported in the BlockDeviceInfo struct and they
6
Note this removes a use of "unchecked Error**" in
8
are integers there as well.
7
iothread_set_aio_context_params().
9
8
10
Therefore there's no reason to store them internally as double and do
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
the conversion everytime we're setting or querying them, so this patch
10
Reviewed-by: Markus Armbruster <armbru@redhat.com>
12
uses uint64_t for those types. Let's also use an unsigned type because
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
we don't allow negative values anyway.
12
Message-ID: <20231120171806.19361-1-philmd@linaro.org>
13
---
14
include/block/aio.h | 3 +--
15
iothread.c | 3 +--
16
util/aio-posix.c | 3 +--
17
util/aio-win32.c | 3 +--
18
util/main-loop.c | 5 +----
19
5 files changed, 5 insertions(+), 12 deletions(-)
14
20
15
LeakyBucket.level and LeakyBucket.burst_level do however remain double
21
diff --git a/include/block/aio.h b/include/block/aio.h
16
because their value changes depending on the fraction of time elapsed
17
since the previous I/O operation.
18
19
Signed-off-by: Alberto Garcia <berto@igalia.com>
20
Message-id: f29b840422767b5be2c41c2dfdbbbf6c5f8fedf8.1503580370.git.berto@igalia.com
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
---
23
include/qemu/throttle.h | 4 ++--
24
tests/test-throttle.c | 3 ++-
25
util/throttle.c | 7 +++----
26
3 files changed, 7 insertions(+), 7 deletions(-)
27
28
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
29
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
30
--- a/include/qemu/throttle.h
23
--- a/include/block/aio.h
31
+++ b/include/qemu/throttle.h
24
+++ b/include/block/aio.h
32
@@ -XXX,XX +XXX,XX @@ typedef enum {
25
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
26
* @max_batch: maximum number of requests in a batch, 0 means that the
27
* engine will use its default
33
*/
28
*/
34
29
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
35
typedef struct LeakyBucket {
30
- Error **errp);
36
- double avg; /* average goal in units per second */
31
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
37
- double max; /* leaky bucket max burst in units */
32
38
+ uint64_t avg; /* average goal in units per second */
33
/**
39
+ uint64_t max; /* leaky bucket max burst in units */
34
* aio_context_set_thread_pool_params:
40
double level; /* bucket level in units */
35
diff --git a/iothread.c b/iothread.c
41
double burst_level; /* bucket level in units (for computing bursts) */
42
unsigned burst_length; /* max length of the burst period, in seconds */
43
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
44
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
45
--- a/tests/test-throttle.c
37
--- a/iothread.c
46
+++ b/tests/test-throttle.c
38
+++ b/iothread.c
47
@@ -XXX,XX +XXX,XX @@ static void test_enabled(void)
39
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
48
for (i = 0; i < BUCKETS_COUNT; i++) {
49
throttle_config_init(&cfg);
50
set_cfg_value(false, i, 150);
51
+ g_assert(throttle_is_valid(&cfg, NULL));
52
g_assert(throttle_enabled(&cfg));
53
}
40
}
54
41
55
for (i = 0; i < BUCKETS_COUNT; i++) {
42
aio_context_set_aio_params(iothread->ctx,
56
throttle_config_init(&cfg);
43
- iothread->parent_obj.aio_max_batch,
57
set_cfg_value(false, i, -150);
44
- errp);
58
- g_assert(!throttle_enabled(&cfg));
45
+ iothread->parent_obj.aio_max_batch);
59
+ g_assert(!throttle_is_valid(&cfg, NULL));
46
47
aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
48
base->thread_pool_max, errp);
49
diff --git a/util/aio-posix.c b/util/aio-posix.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/util/aio-posix.c
52
+++ b/util/aio-posix.c
53
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
54
aio_notify(ctx);
55
}
56
57
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
58
- Error **errp)
59
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
60
{
61
/*
62
* No thread synchronization here, it doesn't matter if an incorrect value
63
diff --git a/util/aio-win32.c b/util/aio-win32.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/util/aio-win32.c
66
+++ b/util/aio-win32.c
67
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
60
}
68
}
61
}
69
}
62
70
63
diff --git a/util/throttle.c b/util/throttle.c
71
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
72
- Error **errp)
73
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
74
{
75
}
76
diff --git a/util/main-loop.c b/util/main-loop.c
64
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
65
--- a/util/throttle.c
78
--- a/util/main-loop.c
66
+++ b/util/throttle.c
79
+++ b/util/main-loop.c
67
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
80
@@ -XXX,XX +XXX,XX @@ static void main_loop_update_params(EventLoopBase *base, Error **errp)
68
/* If bkt->max is 0 we still want to allow short bursts of I/O
81
return;
69
* from the guest, otherwise every other request will be throttled
70
* and performance will suffer considerably. */
71
- bucket_size = bkt->avg / 10;
72
+ bucket_size = (double) bkt->avg / 10;
73
burst_bucket_size = 0;
74
} else {
75
/* If we have a burst limit then we have to wait until all I/O
76
* at burst rate has finished before throttling to bkt->avg */
77
bucket_size = bkt->max * bkt->burst_length;
78
- burst_bucket_size = bkt->max / 10;
79
+ burst_bucket_size = (double) bkt->max / 10;
80
}
82
}
81
83
82
/* If the main bucket is full then we have to wait */
84
- aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
83
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
85
- if (*errp) {
84
86
- return;
85
for (i = 0; i < BUCKETS_COUNT; i++) {
87
- }
86
LeakyBucket *bkt = &cfg->buckets[i];
88
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch);
87
- if (bkt->avg < 0 || bkt->max < 0 ||
89
88
- bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
90
aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
89
+ if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
91
base->thread_pool_max, errp);
90
error_setg(errp, "bps/iops/max values must be within [0, %lld]",
91
THROTTLE_VALUE_MAX);
92
return false;
93
--
92
--
94
2.13.5
93
2.43.0
95
94
96
95
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
The Big QEMU Lock (BQL) has many names and they are confusing. The
2
actual QemuMutex variable is called qemu_global_mutex but it's commonly
3
referred to as the BQL in discussions and some code comments. The
4
locking APIs, however, are called qemu_mutex_lock_iothread() and
5
qemu_mutex_unlock_iothread().
2
6
3
The throttling code can change internally the value of bkt->max if it
7
The "iothread" name is historic and comes from when the main thread was
4
hasn't been set by the user. The problem with this is that if we want
8
split into into KVM vcpu threads and the "iothread" (now called the main
5
to retrieve the original value we have to undo this change first. This
9
loop thread). I have contributed to the confusion myself by introducing
6
is ugly and unnecessary: this patch removes the throttle_fix_bucket()
10
a separate --object iothread, a separate concept unrelated to the BQL.
7
and throttle_unfix_bucket() functions completely and moves the logic
8
to throttle_compute_wait().
9
11
10
Signed-off-by: Alberto Garcia <berto@igalia.com>
12
The "iothread" name is no longer appropriate for the BQL. Rename the
11
Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr>
13
locking APIs to:
12
Message-id: 5b0b9e1ac6eb208d709eddc7b09e7669a523bff3.1503580370.git.berto@igalia.com
14
- void bql_lock(void)
15
- void bql_unlock(void)
16
- bool bql_locked(void)
17
18
There are more APIs with "iothread" in their names. Subsequent patches
19
will rename them. There are also comments and documentation that will be
20
updated in later patches.
21
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
Reviewed-by: Paul Durrant <paul@xen.org>
24
Acked-by: Fabiano Rosas <farosas@suse.de>
25
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
26
Reviewed-by: Cédric Le Goater <clg@kaod.org>
27
Acked-by: Peter Xu <peterx@redhat.com>
28
Acked-by: Eric Farman <farman@linux.ibm.com>
29
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
30
Acked-by: Hyman Huang <yong.huang@smartx.com>
31
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
32
Message-id: 20240102153529.486531-2-stefanha@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
33
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
34
---
15
util/throttle.c | 62 +++++++++++++++++++++------------------------------------
35
include/block/aio-wait.h | 2 +-
16
1 file changed, 23 insertions(+), 39 deletions(-)
36
include/qemu/main-loop.h | 39 +++++----
37
include/qemu/thread.h | 2 +-
38
accel/accel-blocker.c | 10 +--
39
accel/dummy-cpus.c | 8 +-
40
accel/hvf/hvf-accel-ops.c | 4 +-
41
accel/kvm/kvm-accel-ops.c | 4 +-
42
accel/kvm/kvm-all.c | 22 ++---
43
accel/tcg/cpu-exec.c | 26 +++---
44
accel/tcg/cputlb.c | 16 ++--
45
accel/tcg/tcg-accel-ops-icount.c | 4 +-
46
accel/tcg/tcg-accel-ops-mttcg.c | 12 +--
47
accel/tcg/tcg-accel-ops-rr.c | 14 ++--
48
accel/tcg/tcg-accel-ops.c | 2 +-
49
accel/tcg/translate-all.c | 2 +-
50
cpu-common.c | 4 +-
51
dump/dump.c | 4 +-
52
hw/core/cpu-common.c | 6 +-
53
hw/i386/intel_iommu.c | 6 +-
54
hw/i386/kvm/xen_evtchn.c | 16 ++--
55
hw/i386/kvm/xen_overlay.c | 2 +-
56
hw/i386/kvm/xen_xenstore.c | 2 +-
57
hw/intc/arm_gicv3_cpuif.c | 2 +-
58
hw/intc/s390_flic.c | 18 ++--
59
hw/misc/edu.c | 4 +-
60
hw/misc/imx6_src.c | 2 +-
61
hw/misc/imx7_src.c | 2 +-
62
hw/net/xen_nic.c | 8 +-
63
hw/ppc/pegasos2.c | 2 +-
64
hw/ppc/ppc.c | 4 +-
65
hw/ppc/spapr.c | 2 +-
66
hw/ppc/spapr_rng.c | 4 +-
67
hw/ppc/spapr_softmmu.c | 4 +-
68
hw/remote/mpqemu-link.c | 20 ++---
69
hw/remote/vfio-user-obj.c | 2 +-
70
hw/s390x/s390-skeys.c | 2 +-
71
migration/block-dirty-bitmap.c | 4 +-
72
migration/block.c | 16 ++--
73
migration/colo.c | 60 +++++++-------
74
migration/dirtyrate.c | 12 +--
75
migration/migration.c | 52 ++++++------
76
migration/ram.c | 12 +--
77
replay/replay-internal.c | 2 +-
78
semihosting/console.c | 8 +-
79
stubs/iothread-lock.c | 6 +-
80
system/cpu-throttle.c | 4 +-
81
system/cpus.c | 51 ++++++------
82
system/dirtylimit.c | 4 +-
83
system/memory.c | 2 +-
84
system/physmem.c | 8 +-
85
system/runstate.c | 2 +-
86
system/watchpoint.c | 4 +-
87
target/arm/arm-powerctl.c | 14 ++--
88
target/arm/helper.c | 4 +-
89
target/arm/hvf/hvf.c | 8 +-
90
target/arm/kvm.c | 8 +-
91
target/arm/ptw.c | 6 +-
92
target/arm/tcg/helper-a64.c | 8 +-
93
target/arm/tcg/m_helper.c | 6 +-
94
target/arm/tcg/op_helper.c | 24 +++---
95
target/arm/tcg/psci.c | 2 +-
96
target/hppa/int_helper.c | 8 +-
97
target/i386/hvf/hvf.c | 6 +-
98
target/i386/kvm/hyperv.c | 4 +-
99
target/i386/kvm/kvm.c | 28 +++----
100
target/i386/kvm/xen-emu.c | 14 ++--
101
target/i386/nvmm/nvmm-accel-ops.c | 4 +-
102
target/i386/nvmm/nvmm-all.c | 20 ++---
103
target/i386/tcg/sysemu/fpu_helper.c | 6 +-
104
target/i386/tcg/sysemu/misc_helper.c | 4 +-
105
target/i386/whpx/whpx-accel-ops.c | 4 +-
106
target/i386/whpx/whpx-all.c | 24 +++---
107
target/loongarch/tcg/csr_helper.c | 4 +-
108
target/mips/kvm.c | 4 +-
109
target/mips/tcg/sysemu/cp0_helper.c | 4 +-
110
target/openrisc/sys_helper.c | 16 ++--
111
target/ppc/excp_helper.c | 12 +--
112
target/ppc/kvm.c | 4 +-
113
target/ppc/misc_helper.c | 8 +-
114
target/ppc/timebase_helper.c | 8 +-
115
target/s390x/kvm/kvm.c | 4 +-
116
target/s390x/tcg/misc_helper.c | 118 +++++++++++++--------------
117
target/sparc/int32_helper.c | 2 +-
118
target/sparc/int64_helper.c | 6 +-
119
target/sparc/win_helper.c | 20 ++---
120
target/xtensa/exc_helper.c | 8 +-
121
ui/spice-core.c | 4 +-
122
util/async.c | 2 +-
123
util/main-loop.c | 8 +-
124
util/qsp.c | 6 +-
125
util/rcu.c | 14 ++--
126
audio/coreaudio.m | 4 +-
127
memory_ldst.c.inc | 18 ++--
128
target/i386/hvf/README.md | 2 +-
129
ui/cocoa.m | 50 ++++++------
130
95 files changed, 529 insertions(+), 529 deletions(-)
17
131
18
diff --git a/util/throttle.c b/util/throttle.c
132
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
19
index XXXXXXX..XXXXXXX 100644
133
index XXXXXXX..XXXXXXX 100644
20
--- a/util/throttle.c
134
--- a/include/block/aio-wait.h
21
+++ b/util/throttle.c
135
+++ b/include/block/aio-wait.h
22
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_do_compute_wait(double limit, double extra)
136
@@ -XXX,XX +XXX,XX @@ static inline bool in_aio_context_home_thread(AioContext *ctx)
23
int64_t throttle_compute_wait(LeakyBucket *bkt)
137
}
24
{
138
25
double extra; /* the number of extra units blocking the io */
139
if (ctx == qemu_get_aio_context()) {
26
+ double bucket_size; /* I/O before throttling to bkt->avg */
140
- return qemu_mutex_iothread_locked();
27
+ double burst_bucket_size; /* Before throttling to bkt->max */
141
+ return bql_locked();
28
142
} else {
29
if (!bkt->avg) {
143
return false;
144
}
145
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/include/qemu/main-loop.h
148
+++ b/include/qemu/main-loop.h
149
@@ -XXX,XX +XXX,XX @@ GSource *iohandler_get_g_source(void);
150
AioContext *iohandler_get_aio_context(void);
151
152
/**
153
- * qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
154
+ * bql_locked: Return lock status of the Big QEMU Lock (BQL)
155
*
156
- * The main loop mutex is the coarsest lock in QEMU, and as such it
157
+ * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
158
* must always be taken outside other locks. This function helps
159
* functions take different paths depending on whether the current
160
- * thread is running within the main loop mutex.
161
+ * thread is running within the BQL.
162
*
163
* This function should never be used in the block layer, because
164
* unit tests, block layer tools and qemu-storage-daemon do not
165
* have a BQL.
166
* Please instead refer to qemu_in_main_thread().
167
*/
168
-bool qemu_mutex_iothread_locked(void);
169
+bool bql_locked(void);
170
171
/**
172
* qemu_in_main_thread: return whether it's possible to safely access
173
@@ -XXX,XX +XXX,XX @@ bool qemu_in_main_thread(void);
174
} while (0)
175
176
/**
177
- * qemu_mutex_lock_iothread: Lock the main loop mutex.
178
+ * bql_lock: Lock the Big QEMU Lock (BQL).
179
*
180
- * This function locks the main loop mutex. The mutex is taken by
181
+ * This function locks the Big QEMU Lock (BQL). The lock is taken by
182
* main() in vl.c and always taken except while waiting on
183
- * external events (such as with select). The mutex should be taken
184
+ * external events (such as with select). The lock should be taken
185
* by threads other than the main loop thread when calling
186
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
187
* functions documented in this file.
188
*
189
- * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
190
+ * NOTE: tools currently are single-threaded and bql_lock
191
* is a no-op there.
192
*/
193
-#define qemu_mutex_lock_iothread() \
194
- qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
195
-void qemu_mutex_lock_iothread_impl(const char *file, int line);
196
+#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
197
+void bql_lock_impl(const char *file, int line);
198
199
/**
200
- * qemu_mutex_unlock_iothread: Unlock the main loop mutex.
201
+ * bql_unlock: Unlock the Big QEMU Lock (BQL).
202
*
203
- * This function unlocks the main loop mutex. The mutex is taken by
204
+ * This function unlocks the Big QEMU Lock. The lock is taken by
205
* main() in vl.c and always taken except while waiting on
206
- * external events (such as with select). The mutex should be unlocked
207
+ * external events (such as with select). The lock should be unlocked
208
* as soon as possible by threads other than the main loop thread,
209
* because it prevents the main loop from processing callbacks,
210
* including timers and bottom halves.
211
*
212
- * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
213
+ * NOTE: tools currently are single-threaded and bql_unlock
214
* is a no-op there.
215
*/
216
-void qemu_mutex_unlock_iothread(void);
217
+void bql_unlock(void);
218
219
/**
220
* QEMU_IOTHREAD_LOCK_GUARD
221
*
222
- * Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread.
223
+ * Wrap a block of code in a conditional bql_{lock,unlock}.
224
*/
225
typedef struct IOThreadLockAuto IOThreadLockAuto;
226
227
static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file,
228
int line)
229
{
230
- if (qemu_mutex_iothread_locked()) {
231
+ if (bql_locked()) {
232
return NULL;
233
}
234
- qemu_mutex_lock_iothread_impl(file, line);
235
+ bql_lock_impl(file, line);
236
/* Anything non-NULL causes the cleanup function to be called */
237
return (IOThreadLockAuto *)(uintptr_t)1;
238
}
239
240
static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l)
241
{
242
- qemu_mutex_unlock_iothread();
243
+ bql_unlock();
244
}
245
246
G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock)
247
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
248
index XXXXXXX..XXXXXXX 100644
249
--- a/include/qemu/thread.h
250
+++ b/include/qemu/thread.h
251
@@ -XXX,XX +XXX,XX @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
252
typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
253
const char *f, int l);
254
255
-extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
256
+extern QemuMutexLockFunc bql_mutex_lock_func;
257
extern QemuMutexLockFunc qemu_mutex_lock_func;
258
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
259
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
260
diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c
261
index XXXXXXX..XXXXXXX 100644
262
--- a/accel/accel-blocker.c
263
+++ b/accel/accel-blocker.c
264
@@ -XXX,XX +XXX,XX @@ void accel_blocker_init(void)
265
266
void accel_ioctl_begin(void)
267
{
268
- if (likely(qemu_mutex_iothread_locked())) {
269
+ if (likely(bql_locked())) {
270
return;
271
}
272
273
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_begin(void)
274
275
void accel_ioctl_end(void)
276
{
277
- if (likely(qemu_mutex_iothread_locked())) {
278
+ if (likely(bql_locked())) {
279
return;
280
}
281
282
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_end(void)
283
284
void accel_cpu_ioctl_begin(CPUState *cpu)
285
{
286
- if (unlikely(qemu_mutex_iothread_locked())) {
287
+ if (unlikely(bql_locked())) {
288
return;
289
}
290
291
@@ -XXX,XX +XXX,XX @@ void accel_cpu_ioctl_begin(CPUState *cpu)
292
293
void accel_cpu_ioctl_end(CPUState *cpu)
294
{
295
- if (unlikely(qemu_mutex_iothread_locked())) {
296
+ if (unlikely(bql_locked())) {
297
return;
298
}
299
300
@@ -XXX,XX +XXX,XX @@ void accel_ioctl_inhibit_begin(void)
301
* We allow to inhibit only when holding the BQL, so we can identify
302
* when an inhibitor wants to issue an ioctl easily.
303
*/
304
- g_assert(qemu_mutex_iothread_locked());
305
+ g_assert(bql_locked());
306
307
/* Block further invocations of the ioctls outside the BQL. */
308
CPU_FOREACH(cpu) {
309
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
310
index XXXXXXX..XXXXXXX 100644
311
--- a/accel/dummy-cpus.c
312
+++ b/accel/dummy-cpus.c
313
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
314
315
rcu_register_thread();
316
317
- qemu_mutex_lock_iothread();
318
+ bql_lock();
319
qemu_thread_get_self(cpu->thread);
320
cpu->thread_id = qemu_get_thread_id();
321
cpu->neg.can_do_io = true;
322
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
323
qemu_guest_random_seed_thread_part2(cpu->random_seed);
324
325
do {
326
- qemu_mutex_unlock_iothread();
327
+ bql_unlock();
328
#ifndef _WIN32
329
do {
330
int sig;
331
@@ -XXX,XX +XXX,XX @@ static void *dummy_cpu_thread_fn(void *arg)
332
#else
333
qemu_sem_wait(&cpu->sem);
334
#endif
335
- qemu_mutex_lock_iothread();
336
+ bql_lock();
337
qemu_wait_io_event(cpu);
338
} while (!cpu->unplug);
339
340
- qemu_mutex_unlock_iothread();
341
+ bql_unlock();
342
rcu_unregister_thread();
343
return NULL;
344
}
345
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
346
index XXXXXXX..XXXXXXX 100644
347
--- a/accel/hvf/hvf-accel-ops.c
348
+++ b/accel/hvf/hvf-accel-ops.c
349
@@ -XXX,XX +XXX,XX @@ static void *hvf_cpu_thread_fn(void *arg)
350
351
rcu_register_thread();
352
353
- qemu_mutex_lock_iothread();
354
+ bql_lock();
355
qemu_thread_get_self(cpu->thread);
356
357
cpu->thread_id = qemu_get_thread_id();
358
@@ -XXX,XX +XXX,XX @@ static void *hvf_cpu_thread_fn(void *arg)
359
360
hvf_vcpu_destroy(cpu);
361
cpu_thread_signal_destroyed(cpu);
362
- qemu_mutex_unlock_iothread();
363
+ bql_unlock();
364
rcu_unregister_thread();
365
return NULL;
366
}
367
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
368
index XXXXXXX..XXXXXXX 100644
369
--- a/accel/kvm/kvm-accel-ops.c
370
+++ b/accel/kvm/kvm-accel-ops.c
371
@@ -XXX,XX +XXX,XX @@ static void *kvm_vcpu_thread_fn(void *arg)
372
373
rcu_register_thread();
374
375
- qemu_mutex_lock_iothread();
376
+ bql_lock();
377
qemu_thread_get_self(cpu->thread);
378
cpu->thread_id = qemu_get_thread_id();
379
cpu->neg.can_do_io = true;
380
@@ -XXX,XX +XXX,XX @@ static void *kvm_vcpu_thread_fn(void *arg)
381
382
kvm_destroy_vcpu(cpu);
383
cpu_thread_signal_destroyed(cpu);
384
- qemu_mutex_unlock_iothread();
385
+ bql_unlock();
386
rcu_unregister_thread();
387
return NULL;
388
}
389
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/accel/kvm/kvm-all.c
392
+++ b/accel/kvm/kvm-all.c
393
@@ -XXX,XX +XXX,XX @@ static void kvm_dirty_ring_flush(void)
394
* should always be with BQL held, serialization is guaranteed.
395
* However, let's be sure of it.
396
*/
397
- assert(qemu_mutex_iothread_locked());
398
+ assert(bql_locked());
399
/*
400
* First make sure to flush the hardware buffers by kicking all
401
* vcpus out in a synchronous way.
402
@@ -XXX,XX +XXX,XX @@ static void *kvm_dirty_ring_reaper_thread(void *data)
403
trace_kvm_dirty_ring_reaper("wakeup");
404
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
405
406
- qemu_mutex_lock_iothread();
407
+ bql_lock();
408
kvm_dirty_ring_reap(s, NULL);
409
- qemu_mutex_unlock_iothread();
410
+ bql_unlock();
411
412
r->reaper_iteration++;
413
}
414
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
415
return EXCP_HLT;
416
}
417
418
- qemu_mutex_unlock_iothread();
419
+ bql_unlock();
420
cpu_exec_start(cpu);
421
422
do {
423
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
424
425
#ifdef KVM_HAVE_MCE_INJECTION
426
if (unlikely(have_sigbus_pending)) {
427
- qemu_mutex_lock_iothread();
428
+ bql_lock();
429
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
430
pending_sigbus_addr);
431
have_sigbus_pending = false;
432
- qemu_mutex_unlock_iothread();
433
+ bql_unlock();
434
}
435
#endif
436
437
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
438
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
439
*/
440
trace_kvm_dirty_ring_full(cpu->cpu_index);
441
- qemu_mutex_lock_iothread();
442
+ bql_lock();
443
/*
444
* We throttle vCPU by making it sleep once it exit from kernel
445
* due to dirty ring full. In the dirtylimit scenario, reaping
446
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
447
} else {
448
kvm_dirty_ring_reap(kvm_state, NULL);
449
}
450
- qemu_mutex_unlock_iothread();
451
+ bql_unlock();
452
dirtylimit_vcpu_execute(cpu);
453
ret = 0;
454
break;
455
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
456
break;
457
case KVM_SYSTEM_EVENT_CRASH:
458
kvm_cpu_synchronize_state(cpu);
459
- qemu_mutex_lock_iothread();
460
+ bql_lock();
461
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
462
- qemu_mutex_unlock_iothread();
463
+ bql_unlock();
464
ret = 0;
465
break;
466
default:
467
@@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu)
468
} while (ret == 0);
469
470
cpu_exec_end(cpu);
471
- qemu_mutex_lock_iothread();
472
+ bql_lock();
473
474
if (ret < 0) {
475
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
476
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
477
index XXXXXXX..XXXXXXX 100644
478
--- a/accel/tcg/cpu-exec.c
479
+++ b/accel/tcg/cpu-exec.c
480
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
481
tcg_ctx->gen_tb = NULL;
482
}
483
#endif
484
- if (qemu_mutex_iothread_locked()) {
485
- qemu_mutex_unlock_iothread();
486
+ if (bql_locked()) {
487
+ bql_unlock();
488
}
489
assert_no_pages_locked();
490
}
491
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_halt(CPUState *cpu)
492
#if defined(TARGET_I386)
493
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
494
X86CPU *x86_cpu = X86_CPU(cpu);
495
- qemu_mutex_lock_iothread();
496
+ bql_lock();
497
apic_poll_irq(x86_cpu->apic_state);
498
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
499
- qemu_mutex_unlock_iothread();
500
+ bql_unlock();
501
}
502
#endif /* TARGET_I386 */
503
if (!cpu_has_work(cpu)) {
504
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
505
#else
506
if (replay_exception()) {
507
CPUClass *cc = CPU_GET_CLASS(cpu);
508
- qemu_mutex_lock_iothread();
509
+ bql_lock();
510
cc->tcg_ops->do_interrupt(cpu);
511
- qemu_mutex_unlock_iothread();
512
+ bql_unlock();
513
cpu->exception_index = -1;
514
515
if (unlikely(cpu->singlestep_enabled)) {
516
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
517
518
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
519
int interrupt_request;
520
- qemu_mutex_lock_iothread();
521
+ bql_lock();
522
interrupt_request = cpu->interrupt_request;
523
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
524
/* Mask out external interrupts for this step. */
525
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
526
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
527
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
528
cpu->exception_index = EXCP_DEBUG;
529
- qemu_mutex_unlock_iothread();
530
+ bql_unlock();
531
return true;
532
}
533
#if !defined(CONFIG_USER_ONLY)
534
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
535
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
536
cpu->halted = 1;
537
cpu->exception_index = EXCP_HLT;
538
- qemu_mutex_unlock_iothread();
539
+ bql_unlock();
540
return true;
541
}
542
#if defined(TARGET_I386)
543
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
544
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
545
do_cpu_init(x86_cpu);
546
cpu->exception_index = EXCP_HALTED;
547
- qemu_mutex_unlock_iothread();
548
+ bql_unlock();
549
return true;
550
}
551
#else
552
else if (interrupt_request & CPU_INTERRUPT_RESET) {
553
replay_interrupt();
554
cpu_reset(cpu);
555
- qemu_mutex_unlock_iothread();
556
+ bql_unlock();
557
return true;
558
}
559
#endif /* !TARGET_I386 */
560
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
561
*/
562
if (unlikely(cpu->singlestep_enabled)) {
563
cpu->exception_index = EXCP_DEBUG;
564
- qemu_mutex_unlock_iothread();
565
+ bql_unlock();
566
return true;
567
}
568
cpu->exception_index = -1;
569
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
570
}
571
572
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
573
- qemu_mutex_unlock_iothread();
574
+ bql_unlock();
575
}
576
577
/* Finally, check if we need to exit to the main loop. */
578
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
579
index XXXXXXX..XXXXXXX 100644
580
--- a/accel/tcg/cputlb.c
581
+++ b/accel/tcg/cputlb.c
582
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
583
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
584
mr = section->mr;
585
586
- qemu_mutex_lock_iothread();
587
+ bql_lock();
588
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
589
type, ra, mr, mr_offset);
590
- qemu_mutex_unlock_iothread();
591
+ bql_unlock();
592
593
return ret;
594
}
595
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
596
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
597
mr = section->mr;
598
599
- qemu_mutex_lock_iothread();
600
+ bql_lock();
601
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
602
MMU_DATA_LOAD, ra, mr, mr_offset);
603
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
604
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
605
- qemu_mutex_unlock_iothread();
606
+ bql_unlock();
607
608
return int128_make128(b, a);
609
}
610
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
611
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
612
mr = section->mr;
613
614
- qemu_mutex_lock_iothread();
615
+ bql_lock();
616
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
617
ra, mr, mr_offset);
618
- qemu_mutex_unlock_iothread();
619
+ bql_unlock();
620
621
return ret;
622
}
623
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
624
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
625
mr = section->mr;
626
627
- qemu_mutex_lock_iothread();
628
+ bql_lock();
629
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
630
mmu_idx, ra, mr, mr_offset);
631
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
632
size - 8, mmu_idx, ra, mr, mr_offset + 8);
633
- qemu_mutex_unlock_iothread();
634
+ bql_unlock();
635
636
return ret;
637
}
638
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
639
index XXXXXXX..XXXXXXX 100644
640
--- a/accel/tcg/tcg-accel-ops-icount.c
641
+++ b/accel/tcg/tcg-accel-ops-icount.c
642
@@ -XXX,XX +XXX,XX @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
643
* We're called without the iothread lock, so must take it while
644
* we're calling timer handlers.
645
*/
646
- qemu_mutex_lock_iothread();
647
+ bql_lock();
648
icount_notify_aio_contexts();
649
- qemu_mutex_unlock_iothread();
650
+ bql_unlock();
651
}
652
}
653
654
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
655
index XXXXXXX..XXXXXXX 100644
656
--- a/accel/tcg/tcg-accel-ops-mttcg.c
657
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
658
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
659
rcu_add_force_rcu_notifier(&force_rcu.notifier);
660
tcg_register_thread();
661
662
- qemu_mutex_lock_iothread();
663
+ bql_lock();
664
qemu_thread_get_self(cpu->thread);
665
666
cpu->thread_id = qemu_get_thread_id();
667
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
668
do {
669
if (cpu_can_run(cpu)) {
670
int r;
671
- qemu_mutex_unlock_iothread();
672
+ bql_unlock();
673
r = tcg_cpus_exec(cpu);
674
- qemu_mutex_lock_iothread();
675
+ bql_lock();
676
switch (r) {
677
case EXCP_DEBUG:
678
cpu_handle_guest_debug(cpu);
679
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
680
*/
681
break;
682
case EXCP_ATOMIC:
683
- qemu_mutex_unlock_iothread();
684
+ bql_unlock();
685
cpu_exec_step_atomic(cpu);
686
- qemu_mutex_lock_iothread();
687
+ bql_lock();
688
default:
689
/* Ignore everything else? */
690
break;
691
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
692
} while (!cpu->unplug || cpu_can_run(cpu));
693
694
tcg_cpus_destroy(cpu);
695
- qemu_mutex_unlock_iothread();
696
+ bql_unlock();
697
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
698
rcu_unregister_thread();
699
return NULL;
700
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
701
index XXXXXXX..XXXXXXX 100644
702
--- a/accel/tcg/tcg-accel-ops-rr.c
703
+++ b/accel/tcg/tcg-accel-ops-rr.c
704
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
705
rcu_add_force_rcu_notifier(&force_rcu);
706
tcg_register_thread();
707
708
- qemu_mutex_lock_iothread();
709
+ bql_lock();
710
qemu_thread_get_self(cpu->thread);
711
712
cpu->thread_id = qemu_get_thread_id();
713
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
714
/* Only used for icount_enabled() */
715
int64_t cpu_budget = 0;
716
717
- qemu_mutex_unlock_iothread();
718
+ bql_unlock();
719
replay_mutex_lock();
720
- qemu_mutex_lock_iothread();
721
+ bql_lock();
722
723
if (icount_enabled()) {
724
int cpu_count = rr_cpu_count();
725
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
726
if (cpu_can_run(cpu)) {
727
int r;
728
729
- qemu_mutex_unlock_iothread();
730
+ bql_unlock();
731
if (icount_enabled()) {
732
icount_prepare_for_run(cpu, cpu_budget);
733
}
734
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
735
if (icount_enabled()) {
736
icount_process_data(cpu);
737
}
738
- qemu_mutex_lock_iothread();
739
+ bql_lock();
740
741
if (r == EXCP_DEBUG) {
742
cpu_handle_guest_debug(cpu);
743
break;
744
} else if (r == EXCP_ATOMIC) {
745
- qemu_mutex_unlock_iothread();
746
+ bql_unlock();
747
cpu_exec_step_atomic(cpu);
748
- qemu_mutex_lock_iothread();
749
+ bql_lock();
750
break;
751
}
752
} else if (cpu->stop) {
753
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
754
index XXXXXXX..XXXXXXX 100644
755
--- a/accel/tcg/tcg-accel-ops.c
756
+++ b/accel/tcg/tcg-accel-ops.c
757
@@ -XXX,XX +XXX,XX @@ static void tcg_cpu_reset_hold(CPUState *cpu)
758
/* mask must never be zero, except for A20 change call */
759
void tcg_handle_interrupt(CPUState *cpu, int mask)
760
{
761
- g_assert(qemu_mutex_iothread_locked());
762
+ g_assert(bql_locked());
763
764
cpu->interrupt_request |= mask;
765
766
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
767
index XXXXXXX..XXXXXXX 100644
768
--- a/accel/tcg/translate-all.c
769
+++ b/accel/tcg/translate-all.c
770
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
771
772
void cpu_interrupt(CPUState *cpu, int mask)
773
{
774
- g_assert(qemu_mutex_iothread_locked());
775
+ g_assert(bql_locked());
776
cpu->interrupt_request |= mask;
777
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
778
}
779
diff --git a/cpu-common.c b/cpu-common.c
780
index XXXXXXX..XXXXXXX 100644
781
--- a/cpu-common.c
782
+++ b/cpu-common.c
783
@@ -XXX,XX +XXX,XX @@ void process_queued_cpu_work(CPUState *cpu)
784
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
785
* neither CPU can proceed.
786
*/
787
- qemu_mutex_unlock_iothread();
788
+ bql_unlock();
789
start_exclusive();
790
wi->func(cpu, wi->data);
791
end_exclusive();
792
- qemu_mutex_lock_iothread();
793
+ bql_lock();
794
} else {
795
wi->func(cpu, wi->data);
796
}
797
diff --git a/dump/dump.c b/dump/dump.c
798
index XXXXXXX..XXXXXXX 100644
799
--- a/dump/dump.c
800
+++ b/dump/dump.c
801
@@ -XXX,XX +XXX,XX @@ static int dump_cleanup(DumpState *s)
802
s->guest_note = NULL;
803
if (s->resume) {
804
if (s->detached) {
805
- qemu_mutex_lock_iothread();
806
+ bql_lock();
807
}
808
vm_start();
809
if (s->detached) {
810
- qemu_mutex_unlock_iothread();
811
+ bql_unlock();
812
}
813
}
814
migrate_del_blocker(&dump_migration_blocker);
815
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
816
index XXXXXXX..XXXXXXX 100644
817
--- a/hw/core/cpu-common.c
818
+++ b/hw/core/cpu-common.c
819
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_create(const char *typename)
820
* BQL here if we need to. cpu_interrupt assumes it is held.*/
821
void cpu_reset_interrupt(CPUState *cpu, int mask)
822
{
823
- bool need_lock = !qemu_mutex_iothread_locked();
824
+ bool need_lock = !bql_locked();
825
826
if (need_lock) {
827
- qemu_mutex_lock_iothread();
828
+ bql_lock();
829
}
830
cpu->interrupt_request &= ~mask;
831
if (need_lock) {
832
- qemu_mutex_unlock_iothread();
833
+ bql_unlock();
834
}
835
}
836
837
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
838
index XXXXXXX..XXXXXXX 100644
839
--- a/hw/i386/intel_iommu.c
840
+++ b/hw/i386/intel_iommu.c
841
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
842
{
843
bool use_iommu, pt;
844
/* Whether we need to take the BQL on our own */
845
- bool take_bql = !qemu_mutex_iothread_locked();
846
+ bool take_bql = !bql_locked();
847
848
assert(as);
849
850
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
851
* it. We'd better make sure we have had it already, or, take it.
852
*/
853
if (take_bql) {
854
- qemu_mutex_lock_iothread();
855
+ bql_lock();
856
}
857
858
/* Turn off first then on the other */
859
@@ -XXX,XX +XXX,XX @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
860
}
861
862
if (take_bql) {
863
- qemu_mutex_unlock_iothread();
864
+ bql_unlock();
865
}
866
867
return use_iommu;
868
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
869
index XXXXXXX..XXXXXXX 100644
870
--- a/hw/i386/kvm/xen_evtchn.c
871
+++ b/hw/i386/kvm/xen_evtchn.c
872
@@ -XXX,XX +XXX,XX @@ void xen_evtchn_set_callback_level(int level)
873
* effect immediately. That just leaves interdomain loopback as the case
874
* which uses the BH.
875
*/
876
- if (!qemu_mutex_iothread_locked()) {
877
+ if (!bql_locked()) {
878
qemu_bh_schedule(s->gsi_bh);
879
return;
880
}
881
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_set_callback_param(uint64_t param)
882
* We need the BQL because set_callback_pci_intx() may call into PCI code,
883
* and because we may need to manipulate the old and new GSI levels.
884
*/
885
- assert(qemu_mutex_iothread_locked());
886
+ assert(bql_locked());
887
qemu_mutex_lock(&s->port_lock);
888
889
switch (type) {
890
@@ -XXX,XX +XXX,XX @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
891
XenEvtchnPort *p = &s->port_table[port];
892
893
/* Because it *might* be a PIRQ port */
894
- assert(qemu_mutex_iothread_locked());
895
+ assert(bql_locked());
896
897
switch (p->type) {
898
case EVTCHNSTAT_closed:
899
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_soft_reset(void)
900
return -ENOTSUP;
901
}
902
903
- assert(qemu_mutex_iothread_locked());
904
+ assert(bql_locked());
905
906
qemu_mutex_lock(&s->port_lock);
907
908
@@ -XXX,XX +XXX,XX @@ bool xen_evtchn_set_gsi(int gsi, int level)
909
XenEvtchnState *s = xen_evtchn_singleton;
910
int pirq;
911
912
- assert(qemu_mutex_iothread_locked());
913
+ assert(bql_locked());
914
915
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
916
return false;
917
@@ -XXX,XX +XXX,XX @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
918
return;
919
}
920
921
- assert(qemu_mutex_iothread_locked());
922
+ assert(bql_locked());
923
924
pirq = msi_pirq_target(addr, data);
925
926
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
927
return 1; /* Not a PIRQ */
928
}
929
930
- assert(qemu_mutex_iothread_locked());
931
+ assert(bql_locked());
932
933
pirq = msi_pirq_target(address, data);
934
if (!pirq || pirq >= s->nr_pirqs) {
935
@@ -XXX,XX +XXX,XX @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
936
return false;
937
}
938
939
- assert(qemu_mutex_iothread_locked());
940
+ assert(bql_locked());
941
942
pirq = msi_pirq_target(address, data);
943
if (!pirq || pirq >= s->nr_pirqs) {
944
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
945
index XXXXXXX..XXXXXXX 100644
946
--- a/hw/i386/kvm/xen_overlay.c
947
+++ b/hw/i386/kvm/xen_overlay.c
948
@@ -XXX,XX +XXX,XX @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
949
return -ENOENT;
950
}
951
952
- assert(qemu_mutex_iothread_locked());
953
+ assert(bql_locked());
954
955
if (s->shinfo_gpa) {
956
/* If removing shinfo page, turn the kernel magic off first */
957
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
958
index XXXXXXX..XXXXXXX 100644
959
--- a/hw/i386/kvm/xen_xenstore.c
960
+++ b/hw/i386/kvm/xen_xenstore.c
961
@@ -XXX,XX +XXX,XX @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
962
{
963
XenXenstoreState *s = opaque;
964
965
- assert(qemu_mutex_iothread_locked());
966
+ assert(bql_locked());
967
968
/*
969
* If there's a response pending, we obviously can't scribble over
970
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
971
index XXXXXXX..XXXXXXX 100644
972
--- a/hw/intc/arm_gicv3_cpuif.c
973
+++ b/hw/intc/arm_gicv3_cpuif.c
974
@@ -XXX,XX +XXX,XX @@ void gicv3_cpuif_update(GICv3CPUState *cs)
975
ARMCPU *cpu = ARM_CPU(cs->cpu);
976
CPUARMState *env = &cpu->env;
977
978
- g_assert(qemu_mutex_iothread_locked());
979
+ g_assert(bql_locked());
980
981
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
982
cs->hppi.grp, cs->hppi.prio);
983
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
984
index XXXXXXX..XXXXXXX 100644
985
--- a/hw/intc/s390_flic.c
986
+++ b/hw/intc/s390_flic.c
987
@@ -XXX,XX +XXX,XX @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
988
QEMUS390FlicIO *cur, *next;
989
uint8_t isc;
990
991
- g_assert(qemu_mutex_iothread_locked());
992
+ g_assert(bql_locked());
993
if (!(flic->pending & FLIC_PENDING_IO)) {
30
return 0;
994
return 0;
31
}
995
}
32
996
@@ -XXX,XX +XXX,XX @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
33
- /* If the bucket is full then we have to wait */
997
{
34
- extra = bkt->level - bkt->max * bkt->burst_length;
998
uint32_t tmp;
35
+ if (!bkt->max) {
999
36
+ /* If bkt->max is 0 we still want to allow short bursts of I/O
1000
- g_assert(qemu_mutex_iothread_locked());
37
+ * from the guest, otherwise every other request will be throttled
1001
+ g_assert(bql_locked());
38
+ * and performance will suffer considerably. */
1002
g_assert(flic->pending & FLIC_PENDING_SERVICE);
39
+ bucket_size = bkt->avg / 10;
1003
tmp = flic->service_param;
40
+ burst_bucket_size = 0;
1004
flic->service_param = 0;
41
+ } else {
1005
@@ -XXX,XX +XXX,XX @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
42
+ /* If we have a burst limit then we have to wait until all I/O
1006
QEMUS390FlicIO *io;
43
+ * at burst rate has finished before throttling to bkt->avg */
1007
uint8_t isc;
44
+ bucket_size = bkt->max * bkt->burst_length;
1008
45
+ burst_bucket_size = bkt->max / 10;
1009
- g_assert(qemu_mutex_iothread_locked());
46
+ }
1010
+ g_assert(bql_locked());
47
+
1011
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
48
+ /* If the main bucket is full then we have to wait */
1012
return NULL;
49
+ extra = bkt->level - bucket_size;
1013
}
50
if (extra > 0) {
1014
@@ -XXX,XX +XXX,XX @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
51
return throttle_do_compute_wait(bkt->avg, extra);
1015
52
}
1016
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
53
1017
{
54
- /* If the bucket is not full yet we have to make sure that we
1018
- g_assert(qemu_mutex_iothread_locked());
55
- * fulfill the goal of bkt->max units per second. */
1019
+ g_assert(bql_locked());
56
+ /* If the main bucket is not full yet we still have to check the
1020
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
57
+ * burst bucket in order to enforce the burst limit */
1021
flic->pending &= ~FLIC_PENDING_MCHK_CR;
58
if (bkt->burst_length > 1) {
1022
}
59
- /* We use 1/10 of the max value to smooth the throttling.
1023
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
60
- * See throttle_fix_bucket() for more details. */
1024
{
61
- extra = bkt->burst_level - bkt->max / 10;
1025
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
62
+ extra = bkt->burst_level - burst_bucket_size;
1026
63
if (extra > 0) {
1027
- g_assert(qemu_mutex_iothread_locked());
64
return throttle_do_compute_wait(bkt->max, extra);
1028
+ g_assert(bql_locked());
65
}
1029
/* multiplexing is good enough for sclp - kvm does it internally as well */
66
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
1030
flic->service_param |= parm;
67
return true;
1031
flic->pending |= FLIC_PENDING_SERVICE;
68
}
1032
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
69
1033
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
70
-/* fix bucket parameters */
1034
QEMUS390FlicIO *io;
71
-static void throttle_fix_bucket(LeakyBucket *bkt)
1035
72
-{
1036
- g_assert(qemu_mutex_iothread_locked());
73
- double min;
1037
+ g_assert(bql_locked());
74
-
1038
io = g_new0(QEMUS390FlicIO, 1);
75
- /* zero bucket level */
1039
io->id = subchannel_id;
76
- bkt->level = bkt->burst_level = 0;
1040
io->nr = subchannel_nr;
77
-
1041
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
78
- /* If bkt->max is 0 we still want to allow short bursts of I/O
1042
{
79
- * from the guest, otherwise every other request will be throttled
1043
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
80
- * and performance will suffer considerably. */
1044
81
- min = bkt->avg / 10;
1045
- g_assert(qemu_mutex_iothread_locked());
82
- if (bkt->avg && !bkt->max) {
1046
+ g_assert(bql_locked());
83
- bkt->max = min;
1047
flic->pending |= FLIC_PENDING_MCHK_CR;
84
- }
1048
85
-}
1049
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
86
-
1050
@@ -XXX,XX +XXX,XX @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
87
-/* undo internal bucket parameter changes (see throttle_fix_bucket()) */
1051
88
-static void throttle_unfix_bucket(LeakyBucket *bkt)
1052
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
89
-{
1053
{
90
- if (bkt->max < bkt->avg) {
1054
- g_assert(qemu_mutex_iothread_locked());
91
- bkt->max = 0;
1055
+ g_assert(bql_locked());
92
- }
1056
return !!flic->pending;
93
-}
1057
}
94
-
1058
95
/* Used to configure the throttle
1059
@@ -XXX,XX +XXX,XX @@ static void qemu_s390_flic_reset(DeviceState *dev)
96
*
1060
QEMUS390FlicIO *cur, *next;
97
* @ts: the throttle state we are working on
1061
int isc;
98
@@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts,
1062
99
1063
- g_assert(qemu_mutex_iothread_locked());
100
ts->cfg = *cfg;
1064
+ g_assert(bql_locked());
101
1065
flic->simm = 0;
102
+ /* Zero bucket level */
1066
flic->nimm = 0;
103
for (i = 0; i < BUCKETS_COUNT; i++) {
1067
flic->pending = 0;
104
- throttle_fix_bucket(&ts->cfg.buckets[i]);
1068
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
105
+ ts->cfg.buckets[i].level = 0;
1069
index XXXXXXX..XXXXXXX 100644
106
+ ts->cfg.buckets[i].burst_level = 0;
1070
--- a/hw/misc/edu.c
107
}
1071
+++ b/hw/misc/edu.c
108
1072
@@ -XXX,XX +XXX,XX @@ static void *edu_fact_thread(void *opaque)
109
ts->previous_leak = qemu_clock_get_ns(clock_type);
1073
smp_mb__after_rmw();
110
@@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts,
1074
1075
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
1076
- qemu_mutex_lock_iothread();
1077
+ bql_lock();
1078
edu_raise_irq(edu, FACT_IRQ);
1079
- qemu_mutex_unlock_iothread();
1080
+ bql_unlock();
1081
}
1082
}
1083
1084
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
1085
index XXXXXXX..XXXXXXX 100644
1086
--- a/hw/misc/imx6_src.c
1087
+++ b/hw/misc/imx6_src.c
1088
@@ -XXX,XX +XXX,XX @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
1089
struct SRCSCRResetInfo *ri = data.host_ptr;
1090
IMX6SRCState *s = ri->s;
1091
1092
- assert(qemu_mutex_iothread_locked());
1093
+ assert(bql_locked());
1094
1095
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
1096
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
1097
diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c
1098
index XXXXXXX..XXXXXXX 100644
1099
--- a/hw/misc/imx7_src.c
1100
+++ b/hw/misc/imx7_src.c
1101
@@ -XXX,XX +XXX,XX @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
1102
struct SRCSCRResetInfo *ri = data.host_ptr;
1103
IMX7SRCState *s = ri->s;
1104
1105
- assert(qemu_mutex_iothread_locked());
1106
+ assert(bql_locked());
1107
1108
s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);
1109
1110
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
1111
index XXXXXXX..XXXXXXX 100644
1112
--- a/hw/net/xen_nic.c
1113
+++ b/hw/net/xen_nic.c
1114
@@ -XXX,XX +XXX,XX @@ static bool net_tx_packets(struct XenNetDev *netdev)
1115
void *page;
1116
void *tmpbuf = NULL;
1117
1118
- assert(qemu_mutex_iothread_locked());
1119
+ assert(bql_locked());
1120
1121
for (;;) {
1122
rc = netdev->tx_ring.req_cons;
1123
@@ -XXX,XX +XXX,XX @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
1124
RING_IDX rc, rp;
1125
void *page;
1126
1127
- assert(qemu_mutex_iothread_locked());
1128
+ assert(bql_locked());
1129
1130
if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) {
1131
return -1;
1132
@@ -XXX,XX +XXX,XX @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp)
1133
XenNetDev *netdev = XEN_NET_DEVICE(xendev);
1134
unsigned int port, rx_copy;
1135
1136
- assert(qemu_mutex_iothread_locked());
1137
+ assert(bql_locked());
1138
1139
if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u",
1140
&netdev->tx_ring_ref) != 1) {
1141
@@ -XXX,XX +XXX,XX @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp)
1142
1143
trace_xen_netdev_disconnect(netdev->dev);
1144
1145
- assert(qemu_mutex_iothread_locked());
1146
+ assert(bql_locked());
1147
1148
netdev->tx_ring.sring = NULL;
1149
netdev->rx_ring.sring = NULL;
1150
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
1151
index XXXXXXX..XXXXXXX 100644
1152
--- a/hw/ppc/pegasos2.c
1153
+++ b/hw/ppc/pegasos2.c
1154
@@ -XXX,XX +XXX,XX @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
1155
CPUPPCState *env = &cpu->env;
1156
1157
/* The TCG path should also be holding the BQL at this point */
1158
- g_assert(qemu_mutex_iothread_locked());
1159
+ g_assert(bql_locked());
1160
1161
if (FIELD_EX64(env->msr, MSR, PR)) {
1162
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");
1163
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
1164
index XXXXXXX..XXXXXXX 100644
1165
--- a/hw/ppc/ppc.c
1166
+++ b/hw/ppc/ppc.c
1167
@@ -XXX,XX +XXX,XX @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
1168
{
1169
PowerPCCPU *cpu = env_archcpu(env);
1170
1171
- qemu_mutex_lock_iothread();
1172
+ bql_lock();
1173
1174
switch ((val >> 28) & 0x3) {
1175
case 0x0:
1176
@@ -XXX,XX +XXX,XX @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
1177
break;
1178
}
1179
1180
- qemu_mutex_unlock_iothread();
1181
+ bql_unlock();
1182
}
1183
1184
/* PowerPC 40x internal IRQ controller */
1185
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
1186
index XXXXXXX..XXXXXXX 100644
1187
--- a/hw/ppc/spapr.c
1188
+++ b/hw/ppc/spapr.c
1189
@@ -XXX,XX +XXX,XX @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1190
CPUPPCState *env = &cpu->env;
1191
1192
/* The TCG path should also be holding the BQL at this point */
1193
- g_assert(qemu_mutex_iothread_locked());
1194
+ g_assert(bql_locked());
1195
1196
g_assert(!vhyp_cpu_in_nested(cpu));
1197
1198
diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c
1199
index XXXXXXX..XXXXXXX 100644
1200
--- a/hw/ppc/spapr_rng.c
1201
+++ b/hw/ppc/spapr_rng.c
1202
@@ -XXX,XX +XXX,XX @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr,
1203
while (hrdata.received < 8) {
1204
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
1205
random_recv, &hrdata);
1206
- qemu_mutex_unlock_iothread();
1207
+ bql_unlock();
1208
qemu_sem_wait(&hrdata.sem);
1209
- qemu_mutex_lock_iothread();
1210
+ bql_lock();
1211
}
1212
1213
qemu_sem_destroy(&hrdata.sem);
1214
diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c
1215
index XXXXXXX..XXXXXXX 100644
1216
--- a/hw/ppc/spapr_softmmu.c
1217
+++ b/hw/ppc/spapr_softmmu.c
1218
@@ -XXX,XX +XXX,XX @@ static void *hpt_prepare_thread(void *opaque)
1219
pending->ret = H_NO_MEM;
1220
}
1221
1222
- qemu_mutex_lock_iothread();
1223
+ bql_lock();
1224
1225
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
1226
/* Ready to go */
1227
@@ -XXX,XX +XXX,XX @@ static void *hpt_prepare_thread(void *opaque)
1228
free_pending_hpt(pending);
1229
}
1230
1231
- qemu_mutex_unlock_iothread();
1232
+ bql_unlock();
1233
return NULL;
1234
}
1235
1236
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
1237
index XXXXXXX..XXXXXXX 100644
1238
--- a/hw/remote/mpqemu-link.c
1239
+++ b/hw/remote/mpqemu-link.c
1240
@@ -XXX,XX +XXX,XX @@
111
*/
1241
*/
112
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
1242
bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
113
{
1243
{
114
- int i;
1244
- bool iolock = qemu_mutex_iothread_locked();
115
-
1245
+ bool drop_bql = bql_locked();
116
*cfg = ts->cfg;
1246
bool iothread = qemu_in_iothread();
117
-
1247
struct iovec send[2] = {};
118
- for (i = 0; i < BUCKETS_COUNT; i++) {
1248
int *fds = NULL;
119
- throttle_unfix_bucket(&cfg->buckets[i]);
1249
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
120
- }
1250
* for IOThread case.
121
}
1251
* Also skip lock handling while in a co-routine in the main context.
122
1252
*/
1253
- if (iolock && !iothread && !qemu_in_coroutine()) {
1254
- qemu_mutex_unlock_iothread();
1255
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1256
+ bql_unlock();
1257
}
1258
1259
if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send),
1260
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
1261
trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds);
1262
}
1263
1264
- if (iolock && !iothread && !qemu_in_coroutine()) {
1265
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1266
/* See above comment why skip locking here. */
1267
- qemu_mutex_lock_iothread();
1268
+ bql_lock();
1269
}
1270
1271
return ret;
1272
@@ -XXX,XX +XXX,XX @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
1273
size_t *nfds, Error **errp)
1274
{
1275
struct iovec iov = { .iov_base = buf, .iov_len = len };
1276
- bool iolock = qemu_mutex_iothread_locked();
1277
+ bool drop_bql = bql_locked();
1278
bool iothread = qemu_in_iothread();
1279
int ret = -1;
1280
1281
@@ -XXX,XX +XXX,XX @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
1282
*/
1283
assert(qemu_in_coroutine() || !iothread);
1284
1285
- if (iolock && !iothread && !qemu_in_coroutine()) {
1286
- qemu_mutex_unlock_iothread();
1287
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1288
+ bql_unlock();
1289
}
1290
1291
ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
1292
1293
- if (iolock && !iothread && !qemu_in_coroutine()) {
1294
- qemu_mutex_lock_iothread();
1295
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
1296
+ bql_lock();
1297
}
1298
1299
return (ret <= 0) ? ret : iov.iov_len;
1300
diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
1301
index XXXXXXX..XXXXXXX 100644
1302
--- a/hw/remote/vfio-user-obj.c
1303
+++ b/hw/remote/vfio-user-obj.c
1304
@@ -XXX,XX +XXX,XX @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
1305
}
1306
1307
if (release_lock) {
1308
- qemu_mutex_unlock_iothread();
1309
+ bql_unlock();
1310
release_lock = false;
1311
}
1312
1313
diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c
1314
index XXXXXXX..XXXXXXX 100644
1315
--- a/hw/s390x/s390-skeys.c
1316
+++ b/hw/s390x/s390-skeys.c
1317
@@ -XXX,XX +XXX,XX @@ void qmp_dump_skeys(const char *filename, Error **errp)
1318
goto out;
1319
}
1320
1321
- assert(qemu_mutex_iothread_locked());
1322
+ assert(bql_locked());
1323
guest_phys_blocks_init(&guest_phys_blocks);
1324
guest_phys_blocks_append(&guest_phys_blocks);
1325
1326
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
1327
index XXXXXXX..XXXXXXX 100644
1328
--- a/migration/block-dirty-bitmap.c
1329
+++ b/migration/block-dirty-bitmap.c
1330
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_state_pending(void *opaque,
1331
SaveBitmapState *dbms;
1332
uint64_t pending = 0;
1333
1334
- qemu_mutex_lock_iothread();
1335
+ bql_lock();
1336
1337
QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
1338
uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
1339
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_state_pending(void *opaque,
1340
pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
1341
}
1342
1343
- qemu_mutex_unlock_iothread();
1344
+ bql_unlock();
1345
1346
trace_dirty_bitmap_state_pending(pending);
1347
1348
diff --git a/migration/block.c b/migration/block.c
1349
index XXXXXXX..XXXXXXX 100644
1350
--- a/migration/block.c
1351
+++ b/migration/block.c
1352
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1353
int64_t count;
1354
1355
if (bmds->shared_base) {
1356
- qemu_mutex_lock_iothread();
1357
+ bql_lock();
1358
/* Skip unallocated sectors; intentionally treats failure or
1359
* partial sector as an allocated sector */
1360
while (cur_sector < total_sectors &&
1361
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1362
}
1363
cur_sector += count >> BDRV_SECTOR_BITS;
1364
}
1365
- qemu_mutex_unlock_iothread();
1366
+ bql_unlock();
1367
}
1368
1369
if (cur_sector >= total_sectors) {
1370
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
1371
* I/O runs in the main loop AioContext (see
1372
* qemu_get_current_aio_context()).
1373
*/
1374
- qemu_mutex_lock_iothread();
1375
+ bql_lock();
1376
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
1377
nr_sectors * BDRV_SECTOR_SIZE);
1378
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
1379
0, blk_mig_read_cb, blk);
1380
- qemu_mutex_unlock_iothread();
1381
+ bql_unlock();
1382
1383
bmds->cur_sector = cur_sector + nr_sectors;
1384
return (bmds->cur_sector >= total_sectors);
1385
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
1386
/* Always called with iothread lock taken for
1387
* simplicity, block_save_complete also calls it.
1388
*/
1389
- qemu_mutex_lock_iothread();
1390
+ bql_lock();
1391
ret = blk_mig_save_dirty_block(f, 1);
1392
- qemu_mutex_unlock_iothread();
1393
+ bql_unlock();
1394
}
1395
if (ret < 0) {
1396
return ret;
1397
@@ -XXX,XX +XXX,XX @@ static void block_state_pending(void *opaque, uint64_t *must_precopy,
1398
/* Estimate pending number of bytes to send */
1399
uint64_t pending;
1400
1401
- qemu_mutex_lock_iothread();
1402
+ bql_lock();
1403
pending = get_remaining_dirty();
1404
- qemu_mutex_unlock_iothread();
1405
+ bql_unlock();
1406
1407
blk_mig_lock();
1408
pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
1409
diff --git a/migration/colo.c b/migration/colo.c
1410
index XXXXXXX..XXXXXXX 100644
1411
--- a/migration/colo.c
1412
+++ b/migration/colo.c
1413
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1414
qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL);
1415
bioc->usage = 0;
1416
1417
- qemu_mutex_lock_iothread();
1418
+ bql_lock();
1419
if (failover_get_state() != FAILOVER_STATUS_NONE) {
1420
- qemu_mutex_unlock_iothread();
1421
+ bql_unlock();
1422
goto out;
1423
}
1424
vm_stop_force_state(RUN_STATE_COLO);
1425
- qemu_mutex_unlock_iothread();
1426
+ bql_unlock();
1427
trace_colo_vm_state_change("run", "stop");
1428
/*
1429
* Failover request bh could be called after vm_stop_force_state(),
1430
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1431
if (failover_get_state() != FAILOVER_STATUS_NONE) {
1432
goto out;
1433
}
1434
- qemu_mutex_lock_iothread();
1435
+ bql_lock();
1436
1437
replication_do_checkpoint_all(&local_err);
1438
if (local_err) {
1439
- qemu_mutex_unlock_iothread();
1440
+ bql_unlock();
1441
goto out;
1442
}
1443
1444
colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err);
1445
if (local_err) {
1446
- qemu_mutex_unlock_iothread();
1447
+ bql_unlock();
1448
goto out;
1449
}
1450
/* Note: device state is saved into buffer */
1451
ret = qemu_save_device_state(fb);
1452
1453
- qemu_mutex_unlock_iothread();
1454
+ bql_unlock();
1455
if (ret < 0) {
1456
goto out;
1457
}
1458
@@ -XXX,XX +XXX,XX @@ static int colo_do_checkpoint_transaction(MigrationState *s,
1459
1460
ret = 0;
1461
1462
- qemu_mutex_lock_iothread();
1463
+ bql_lock();
1464
vm_start();
1465
- qemu_mutex_unlock_iothread();
1466
+ bql_unlock();
1467
trace_colo_vm_state_change("stop", "run");
1468
1469
out:
1470
@@ -XXX,XX +XXX,XX @@ static void colo_process_checkpoint(MigrationState *s)
1471
fb = qemu_file_new_output(QIO_CHANNEL(bioc));
1472
object_unref(OBJECT(bioc));
1473
1474
- qemu_mutex_lock_iothread();
1475
+ bql_lock();
1476
replication_start_all(REPLICATION_MODE_PRIMARY, &local_err);
1477
if (local_err) {
1478
- qemu_mutex_unlock_iothread();
1479
+ bql_unlock();
1480
goto out;
1481
}
1482
1483
vm_start();
1484
- qemu_mutex_unlock_iothread();
1485
+ bql_unlock();
1486
trace_colo_vm_state_change("stop", "run");
1487
1488
timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) +
1489
@@ -XXX,XX +XXX,XX @@ out:
1490
1491
void migrate_start_colo_process(MigrationState *s)
1492
{
1493
- qemu_mutex_unlock_iothread();
1494
+ bql_unlock();
1495
qemu_event_init(&s->colo_checkpoint_event, false);
1496
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
1497
colo_checkpoint_notify, s);
1498
1499
qemu_sem_init(&s->colo_exit_sem, 0);
1500
colo_process_checkpoint(s);
1501
- qemu_mutex_lock_iothread();
1502
+ bql_lock();
1503
}
1504
1505
static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1506
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1507
Error *local_err = NULL;
1508
int ret;
1509
1510
- qemu_mutex_lock_iothread();
1511
+ bql_lock();
1512
vm_stop_force_state(RUN_STATE_COLO);
1513
- qemu_mutex_unlock_iothread();
1514
+ bql_unlock();
1515
trace_colo_vm_state_change("run", "stop");
1516
1517
/* FIXME: This is unnecessary for periodic checkpoint mode */
1518
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1519
return;
1520
}
1521
1522
- qemu_mutex_lock_iothread();
1523
+ bql_lock();
1524
cpu_synchronize_all_states();
1525
ret = qemu_loadvm_state_main(mis->from_src_file, mis);
1526
- qemu_mutex_unlock_iothread();
1527
+ bql_unlock();
1528
1529
if (ret < 0) {
1530
error_setg(errp, "Load VM's live state (ram) error");
1531
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1532
return;
1533
}
1534
1535
- qemu_mutex_lock_iothread();
1536
+ bql_lock();
1537
vmstate_loading = true;
1538
colo_flush_ram_cache();
1539
ret = qemu_load_device_state(fb);
1540
if (ret < 0) {
1541
error_setg(errp, "COLO: load device state failed");
1542
vmstate_loading = false;
1543
- qemu_mutex_unlock_iothread();
1544
+ bql_unlock();
1545
return;
1546
}
1547
1548
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1549
if (local_err) {
1550
error_propagate(errp, local_err);
1551
vmstate_loading = false;
1552
- qemu_mutex_unlock_iothread();
1553
+ bql_unlock();
1554
return;
1555
}
1556
1557
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1558
if (local_err) {
1559
error_propagate(errp, local_err);
1560
vmstate_loading = false;
1561
- qemu_mutex_unlock_iothread();
1562
+ bql_unlock();
1563
return;
1564
}
1565
/* Notify all filters of all NIC to do checkpoint */
1566
@@ -XXX,XX +XXX,XX @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
1567
if (local_err) {
1568
error_propagate(errp, local_err);
1569
vmstate_loading = false;
1570
- qemu_mutex_unlock_iothread();
1571
+ bql_unlock();
1572
return;
1573
}
1574
1575
vmstate_loading = false;
1576
vm_start();
1577
- qemu_mutex_unlock_iothread();
1578
+ bql_unlock();
1579
trace_colo_vm_state_change("stop", "run");
1580
1581
if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
1582
@@ -XXX,XX +XXX,XX @@ static void *colo_process_incoming_thread(void *opaque)
1583
fb = qemu_file_new_input(QIO_CHANNEL(bioc));
1584
object_unref(OBJECT(bioc));
1585
1586
- qemu_mutex_lock_iothread();
1587
+ bql_lock();
1588
replication_start_all(REPLICATION_MODE_SECONDARY, &local_err);
1589
if (local_err) {
1590
- qemu_mutex_unlock_iothread();
1591
+ bql_unlock();
1592
goto out;
1593
}
1594
vm_start();
1595
- qemu_mutex_unlock_iothread();
1596
+ bql_unlock();
1597
trace_colo_vm_state_change("stop", "run");
1598
1599
colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY,
1600
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
1601
Error *local_err = NULL;
1602
QemuThread th;
1603
1604
- assert(qemu_mutex_iothread_locked());
1605
+ assert(bql_locked());
1606
1607
if (!migration_incoming_colo_enabled()) {
1608
return 0;
1609
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
1610
qemu_coroutine_yield();
1611
mis->colo_incoming_co = NULL;
1612
1613
- qemu_mutex_unlock_iothread();
1614
+ bql_unlock();
1615
/* Wait checkpoint incoming thread exit before free resource */
1616
qemu_thread_join(&th);
1617
- qemu_mutex_lock_iothread();
1618
+ bql_lock();
1619
1620
/* We hold the global iothread lock, so it is safe here */
1621
colo_release_ram_cache();
1622
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
1623
index XXXXXXX..XXXXXXX 100644
1624
--- a/migration/dirtyrate.c
1625
+++ b/migration/dirtyrate.c
1626
@@ -XXX,XX +XXX,XX @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
1627
1628
void global_dirty_log_change(unsigned int flag, bool start)
1629
{
1630
- qemu_mutex_lock_iothread();
1631
+ bql_lock();
1632
if (start) {
1633
memory_global_dirty_log_start(flag);
1634
} else {
1635
memory_global_dirty_log_stop(flag);
1636
}
1637
- qemu_mutex_unlock_iothread();
1638
+ bql_unlock();
1639
}
1640
1641
/*
1642
@@ -XXX,XX +XXX,XX @@ void global_dirty_log_change(unsigned int flag, bool start)
1643
*/
1644
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
1645
{
1646
- qemu_mutex_lock_iothread();
1647
+ bql_lock();
1648
memory_global_dirty_log_sync(false);
1649
if (one_shot) {
1650
memory_global_dirty_log_stop(flag);
1651
}
1652
- qemu_mutex_unlock_iothread();
1653
+ bql_unlock();
1654
}
1655
1656
static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
1657
@@ -XXX,XX +XXX,XX @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
1658
int64_t start_time;
1659
DirtyPageRecord dirty_pages;
1660
1661
- qemu_mutex_lock_iothread();
1662
+ bql_lock();
1663
memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
1664
1665
/*
1666
@@ -XXX,XX +XXX,XX @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
1667
* KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
1668
*/
1669
dirtyrate_manual_reset_protect();
1670
- qemu_mutex_unlock_iothread();
1671
+ bql_unlock();
1672
1673
record_dirtypages_bitmap(&dirty_pages, true);
1674
1675
diff --git a/migration/migration.c b/migration/migration.c
1676
index XXXXXXX..XXXXXXX 100644
1677
--- a/migration/migration.c
1678
+++ b/migration/migration.c
1679
@@ -XXX,XX +XXX,XX @@ static void migrate_fd_cleanup(MigrationState *s)
1680
QEMUFile *tmp;
1681
1682
trace_migrate_fd_cleanup();
1683
- qemu_mutex_unlock_iothread();
1684
+ bql_unlock();
1685
if (s->migration_thread_running) {
1686
qemu_thread_join(&s->thread);
1687
s->migration_thread_running = false;
1688
}
1689
- qemu_mutex_lock_iothread();
1690
+ bql_lock();
1691
1692
multifd_save_cleanup();
1693
qemu_mutex_lock(&s->qemu_file_lock);
1694
@@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms, Error **errp)
1695
}
1696
1697
trace_postcopy_start();
1698
- qemu_mutex_lock_iothread();
1699
+ bql_lock();
1700
trace_postcopy_start_set_run();
1701
1702
migration_downtime_start(ms);
1703
@@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms, Error **errp)
1704
1705
migration_downtime_end(ms);
1706
1707
- qemu_mutex_unlock_iothread();
1708
+ bql_unlock();
1709
1710
if (migrate_postcopy_ram()) {
1711
/*
1712
@@ -XXX,XX +XXX,XX @@ fail:
1713
error_report_err(local_err);
1714
}
1715
}
1716
- qemu_mutex_unlock_iothread();
1717
+ bql_unlock();
1718
return -1;
1719
}
1720
1721
@@ -XXX,XX +XXX,XX @@ static int migration_maybe_pause(MigrationState *s,
1722
* wait for the 'pause_sem' semaphore.
1723
*/
1724
if (s->state != MIGRATION_STATUS_CANCELLING) {
1725
- qemu_mutex_unlock_iothread();
1726
+ bql_unlock();
1727
migrate_set_state(&s->state, *current_active_state,
1728
MIGRATION_STATUS_PRE_SWITCHOVER);
1729
qemu_sem_wait(&s->pause_sem);
1730
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
1731
new_state);
1732
*current_active_state = new_state;
1733
- qemu_mutex_lock_iothread();
1734
+ bql_lock();
1735
}
1736
1737
return s->state == new_state ? 0 : -EINVAL;
1738
@@ -XXX,XX +XXX,XX @@ static int migration_completion_precopy(MigrationState *s,
1739
{
1740
int ret;
1741
1742
- qemu_mutex_lock_iothread();
1743
+ bql_lock();
1744
migration_downtime_start(s);
1745
1746
s->vm_old_state = runstate_get();
1747
@@ -XXX,XX +XXX,XX @@ static int migration_completion_precopy(MigrationState *s,
1748
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
1749
s->block_inactive);
1750
out_unlock:
1751
- qemu_mutex_unlock_iothread();
1752
+ bql_unlock();
1753
return ret;
1754
}
1755
1756
@@ -XXX,XX +XXX,XX @@ static void migration_completion_postcopy(MigrationState *s)
1757
{
1758
trace_migration_completion_postcopy_end();
1759
1760
- qemu_mutex_lock_iothread();
1761
+ bql_lock();
1762
qemu_savevm_state_complete_postcopy(s->to_dst_file);
1763
- qemu_mutex_unlock_iothread();
1764
+ bql_unlock();
1765
1766
/*
1767
* Shutdown the postcopy fast path thread. This is only needed when dest
1768
@@ -XXX,XX +XXX,XX @@ static void migration_completion_failed(MigrationState *s,
1769
*/
1770
Error *local_err = NULL;
1771
1772
- qemu_mutex_lock_iothread();
1773
+ bql_lock();
1774
bdrv_activate_all(&local_err);
1775
if (local_err) {
1776
error_report_err(local_err);
1777
} else {
1778
s->block_inactive = false;
1779
}
1780
- qemu_mutex_unlock_iothread();
1781
+ bql_unlock();
1782
}
1783
1784
migrate_set_state(&s->state, current_active_state,
1785
@@ -XXX,XX +XXX,XX @@ static void migration_iteration_finish(MigrationState *s)
1786
/* If we enabled cpu throttling for auto-converge, turn it off. */
1787
cpu_throttle_stop();
1788
1789
- qemu_mutex_lock_iothread();
1790
+ bql_lock();
1791
switch (s->state) {
1792
case MIGRATION_STATUS_COMPLETED:
1793
migration_calculate_complete(s);
1794
@@ -XXX,XX +XXX,XX @@ static void migration_iteration_finish(MigrationState *s)
1795
break;
1796
}
1797
migrate_fd_cleanup_schedule(s);
1798
- qemu_mutex_unlock_iothread();
1799
+ bql_unlock();
1800
}
1801
1802
static void bg_migration_iteration_finish(MigrationState *s)
1803
@@ -XXX,XX +XXX,XX @@ static void bg_migration_iteration_finish(MigrationState *s)
1804
*/
1805
ram_write_tracking_stop();
1806
1807
- qemu_mutex_lock_iothread();
1808
+ bql_lock();
1809
switch (s->state) {
1810
case MIGRATION_STATUS_COMPLETED:
1811
migration_calculate_complete(s);
1812
@@ -XXX,XX +XXX,XX @@ static void bg_migration_iteration_finish(MigrationState *s)
1813
}
1814
1815
migrate_fd_cleanup_schedule(s);
1816
- qemu_mutex_unlock_iothread();
1817
+ bql_unlock();
1818
}
1819
1820
/*
1821
@@ -XXX,XX +XXX,XX @@ static void *migration_thread(void *opaque)
1822
object_ref(OBJECT(s));
1823
update_iteration_initial_status(s);
1824
1825
- qemu_mutex_lock_iothread();
1826
+ bql_lock();
1827
qemu_savevm_state_header(s->to_dst_file);
1828
- qemu_mutex_unlock_iothread();
1829
+ bql_unlock();
1830
1831
/*
1832
* If we opened the return path, we need to make sure dst has it
1833
@@ -XXX,XX +XXX,XX @@ static void *migration_thread(void *opaque)
1834
qemu_savevm_send_colo_enable(s->to_dst_file);
1835
}
1836
1837
- qemu_mutex_lock_iothread();
1838
+ bql_lock();
1839
qemu_savevm_state_setup(s->to_dst_file);
1840
- qemu_mutex_unlock_iothread();
1841
+ bql_unlock();
1842
1843
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
1844
MIGRATION_STATUS_ACTIVE);
1845
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1846
ram_write_tracking_prepare();
1847
#endif
1848
1849
- qemu_mutex_lock_iothread();
1850
+ bql_lock();
1851
qemu_savevm_state_header(s->to_dst_file);
1852
qemu_savevm_state_setup(s->to_dst_file);
1853
- qemu_mutex_unlock_iothread();
1854
+ bql_unlock();
1855
1856
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
1857
MIGRATION_STATUS_ACTIVE);
1858
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1859
trace_migration_thread_setup_complete();
1860
migration_downtime_start(s);
1861
1862
- qemu_mutex_lock_iothread();
1863
+ bql_lock();
1864
1865
s->vm_old_state = runstate_get();
1866
1867
@@ -XXX,XX +XXX,XX @@ static void *bg_migration_thread(void *opaque)
1868
s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
1869
qemu_bh_schedule(s->vm_start_bh);
1870
1871
- qemu_mutex_unlock_iothread();
1872
+ bql_unlock();
1873
1874
while (migration_is_active(s)) {
1875
MigIterateState iter_state = bg_migration_iteration_run(s);
1876
@@ -XXX,XX +XXX,XX @@ fail:
1877
if (early_fail) {
1878
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
1879
MIGRATION_STATUS_FAILED);
1880
- qemu_mutex_unlock_iothread();
1881
+ bql_unlock();
1882
}
1883
1884
bg_migration_iteration_finish(s);
1885
diff --git a/migration/ram.c b/migration/ram.c
1886
index XXXXXXX..XXXXXXX 100644
1887
--- a/migration/ram.c
1888
+++ b/migration/ram.c
1889
@@ -XXX,XX +XXX,XX @@ static int ram_save_setup(QEMUFile *f, void *opaque)
1890
migration_ops = g_malloc0(sizeof(MigrationOps));
1891
migration_ops->ram_save_target_page = ram_save_target_page_legacy;
1892
1893
- qemu_mutex_unlock_iothread();
1894
+ bql_unlock();
1895
ret = multifd_send_sync_main(f);
1896
- qemu_mutex_lock_iothread();
1897
+ bql_lock();
1898
if (ret < 0) {
1899
return ret;
1900
}
1901
@@ -XXX,XX +XXX,XX @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
1902
uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
1903
1904
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
1905
- qemu_mutex_lock_iothread();
1906
+ bql_lock();
1907
WITH_RCU_READ_LOCK_GUARD() {
1908
migration_bitmap_sync_precopy(rs, false);
1909
}
1910
- qemu_mutex_unlock_iothread();
1911
+ bql_unlock();
1912
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
1913
}
1914
1915
@@ -XXX,XX +XXX,XX @@ void colo_incoming_start_dirty_log(void)
1916
{
1917
RAMBlock *block = NULL;
1918
/* For memory_global_dirty_log_start below. */
1919
- qemu_mutex_lock_iothread();
1920
+ bql_lock();
1921
qemu_mutex_lock_ramlist();
1922
1923
memory_global_dirty_log_sync(false);
1924
@@ -XXX,XX +XXX,XX @@ void colo_incoming_start_dirty_log(void)
1925
}
1926
ram_state->migration_dirty_pages = 0;
1927
qemu_mutex_unlock_ramlist();
1928
- qemu_mutex_unlock_iothread();
1929
+ bql_unlock();
1930
}
1931
1932
/* It is need to hold the global lock to call this helper */
1933
diff --git a/replay/replay-internal.c b/replay/replay-internal.c
1934
index XXXXXXX..XXXXXXX 100644
1935
--- a/replay/replay-internal.c
1936
+++ b/replay/replay-internal.c
1937
@@ -XXX,XX +XXX,XX @@ void replay_mutex_lock(void)
1938
{
1939
if (replay_mode != REPLAY_MODE_NONE) {
1940
unsigned long id;
1941
- g_assert(!qemu_mutex_iothread_locked());
1942
+ g_assert(!bql_locked());
1943
g_assert(!replay_mutex_locked());
1944
qemu_mutex_lock(&lock);
1945
id = mutex_tail++;
1946
diff --git a/semihosting/console.c b/semihosting/console.c
1947
index XXXXXXX..XXXXXXX 100644
1948
--- a/semihosting/console.c
1949
+++ b/semihosting/console.c
1950
@@ -XXX,XX +XXX,XX @@ static SemihostingConsole console;
1951
static int console_can_read(void *opaque)
1952
{
1953
SemihostingConsole *c = opaque;
1954
- g_assert(qemu_mutex_iothread_locked());
1955
+ g_assert(bql_locked());
1956
return (int)fifo8_num_free(&c->fifo);
1957
}
1958
1959
@@ -XXX,XX +XXX,XX @@ static void console_wake_up(gpointer data, gpointer user_data)
1960
static void console_read(void *opaque, const uint8_t *buf, int size)
1961
{
1962
SemihostingConsole *c = opaque;
1963
- g_assert(qemu_mutex_iothread_locked());
1964
+ g_assert(bql_locked());
1965
while (size-- && !fifo8_is_full(&c->fifo)) {
1966
fifo8_push(&c->fifo, *buf++);
1967
}
1968
@@ -XXX,XX +XXX,XX @@ bool qemu_semihosting_console_ready(void)
1969
{
1970
SemihostingConsole *c = &console;
1971
1972
- g_assert(qemu_mutex_iothread_locked());
1973
+ g_assert(bql_locked());
1974
return !fifo8_is_empty(&c->fifo);
1975
}
1976
1977
@@ -XXX,XX +XXX,XX @@ void qemu_semihosting_console_block_until_ready(CPUState *cs)
1978
{
1979
SemihostingConsole *c = &console;
1980
1981
- g_assert(qemu_mutex_iothread_locked());
1982
+ g_assert(bql_locked());
1983
1984
/* Block if the fifo is completely empty. */
1985
if (fifo8_is_empty(&c->fifo)) {
1986
diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c
1987
index XXXXXXX..XXXXXXX 100644
1988
--- a/stubs/iothread-lock.c
1989
+++ b/stubs/iothread-lock.c
1990
@@ -XXX,XX +XXX,XX @@
1991
#include "qemu/osdep.h"
1992
#include "qemu/main-loop.h"
1993
1994
-bool qemu_mutex_iothread_locked(void)
1995
+bool bql_locked(void)
1996
{
1997
return false;
1998
}
1999
2000
-void qemu_mutex_lock_iothread_impl(const char *file, int line)
2001
+void bql_lock_impl(const char *file, int line)
2002
{
2003
}
2004
2005
-void qemu_mutex_unlock_iothread(void)
2006
+void bql_unlock(void)
2007
{
2008
}
2009
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
2010
index XXXXXXX..XXXXXXX 100644
2011
--- a/system/cpu-throttle.c
2012
+++ b/system/cpu-throttle.c
2013
@@ -XXX,XX +XXX,XX @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2014
qemu_cond_timedwait_iothread(cpu->halt_cond,
2015
sleeptime_ns / SCALE_MS);
2016
} else {
2017
- qemu_mutex_unlock_iothread();
2018
+ bql_unlock();
2019
g_usleep(sleeptime_ns / SCALE_US);
2020
- qemu_mutex_lock_iothread();
2021
+ bql_lock();
2022
}
2023
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2024
}
2025
diff --git a/system/cpus.c b/system/cpus.c
2026
index XXXXXXX..XXXXXXX 100644
2027
--- a/system/cpus.c
2028
+++ b/system/cpus.c
2029
@@ -XXX,XX +XXX,XX @@
2030
2031
#endif /* CONFIG_LINUX */
2032
2033
-static QemuMutex qemu_global_mutex;
2034
+/* The Big QEMU Lock (BQL) */
2035
+static QemuMutex bql;
2036
2037
/*
2038
* The chosen accelerator is supposed to register this.
2039
@@ -XXX,XX +XXX,XX @@ void qemu_init_cpu_loop(void)
2040
qemu_init_sigbus();
2041
qemu_cond_init(&qemu_cpu_cond);
2042
qemu_cond_init(&qemu_pause_cond);
2043
- qemu_mutex_init(&qemu_global_mutex);
2044
+ qemu_mutex_init(&bql);
2045
2046
qemu_thread_get_self(&io_thread);
2047
}
2048
2049
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
2050
{
2051
- do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
2052
+ do_run_on_cpu(cpu, func, data, &bql);
2053
}
2054
2055
static void qemu_cpu_stop(CPUState *cpu, bool exit)
2056
@@ -XXX,XX +XXX,XX @@ void qemu_wait_io_event(CPUState *cpu)
2057
slept = true;
2058
qemu_plugin_vcpu_idle_cb(cpu);
2059
}
2060
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
2061
+ qemu_cond_wait(cpu->halt_cond, &bql);
2062
}
2063
if (slept) {
2064
qemu_plugin_vcpu_resume_cb(cpu);
2065
@@ -XXX,XX +XXX,XX @@ bool qemu_in_vcpu_thread(void)
2066
return current_cpu && qemu_cpu_is_self(current_cpu);
2067
}
2068
2069
-QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
2070
+QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked)
2071
2072
-bool qemu_mutex_iothread_locked(void)
2073
+bool bql_locked(void)
2074
{
2075
- return get_iothread_locked();
2076
+ return get_bql_locked();
2077
}
2078
2079
bool qemu_in_main_thread(void)
2080
{
2081
- return qemu_mutex_iothread_locked();
2082
+ return bql_locked();
2083
}
2084
2085
/*
2086
* The BQL is taken from so many places that it is worth profiling the
2087
* callers directly, instead of funneling them all through a single function.
2088
*/
2089
-void qemu_mutex_lock_iothread_impl(const char *file, int line)
2090
+void bql_lock_impl(const char *file, int line)
2091
{
2092
- QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
2093
+ QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func);
2094
2095
- g_assert(!qemu_mutex_iothread_locked());
2096
- bql_lock(&qemu_global_mutex, file, line);
2097
- set_iothread_locked(true);
2098
+ g_assert(!bql_locked());
2099
+ bql_lock_fn(&bql, file, line);
2100
+ set_bql_locked(true);
2101
}
2102
2103
-void qemu_mutex_unlock_iothread(void)
2104
+void bql_unlock(void)
2105
{
2106
- g_assert(qemu_mutex_iothread_locked());
2107
- set_iothread_locked(false);
2108
- qemu_mutex_unlock(&qemu_global_mutex);
2109
+ g_assert(bql_locked());
2110
+ set_bql_locked(false);
2111
+ qemu_mutex_unlock(&bql);
2112
}
2113
2114
void qemu_cond_wait_iothread(QemuCond *cond)
2115
{
2116
- qemu_cond_wait(cond, &qemu_global_mutex);
2117
+ qemu_cond_wait(cond, &bql);
2118
}
2119
2120
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
2121
{
2122
- qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
2123
+ qemu_cond_timedwait(cond, &bql, ms);
2124
}
2125
2126
/* signal CPU creation */
2127
@@ -XXX,XX +XXX,XX @@ void pause_all_vcpus(void)
2128
replay_mutex_unlock();
2129
2130
while (!all_vcpus_paused()) {
2131
- qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
2132
+ qemu_cond_wait(&qemu_pause_cond, &bql);
2133
CPU_FOREACH(cpu) {
2134
qemu_cpu_kick(cpu);
2135
}
2136
}
2137
2138
- qemu_mutex_unlock_iothread();
2139
+ bql_unlock();
2140
replay_mutex_lock();
2141
- qemu_mutex_lock_iothread();
2142
+ bql_lock();
2143
}
2144
2145
void cpu_resume(CPUState *cpu)
2146
@@ -XXX,XX +XXX,XX @@ void cpu_remove_sync(CPUState *cpu)
2147
cpu->stop = true;
2148
cpu->unplug = true;
2149
qemu_cpu_kick(cpu);
2150
- qemu_mutex_unlock_iothread();
2151
+ bql_unlock();
2152
qemu_thread_join(cpu->thread);
2153
- qemu_mutex_lock_iothread();
2154
+ bql_lock();
2155
}
2156
2157
void cpus_register_accel(const AccelOpsClass *ops)
2158
@@ -XXX,XX +XXX,XX @@ void qemu_init_vcpu(CPUState *cpu)
2159
cpus_accel->create_vcpu_thread(cpu);
2160
2161
while (!cpu->created) {
2162
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2163
+ qemu_cond_wait(&qemu_cpu_cond, &bql);
2164
}
2165
}
2166
2167
diff --git a/system/dirtylimit.c b/system/dirtylimit.c
2168
index XXXXXXX..XXXXXXX 100644
2169
--- a/system/dirtylimit.c
2170
+++ b/system/dirtylimit.c
2171
@@ -XXX,XX +XXX,XX @@ void vcpu_dirty_rate_stat_stop(void)
2172
{
2173
qatomic_set(&vcpu_dirty_rate_stat->running, 0);
2174
dirtylimit_state_unlock();
2175
- qemu_mutex_unlock_iothread();
2176
+ bql_unlock();
2177
qemu_thread_join(&vcpu_dirty_rate_stat->thread);
2178
- qemu_mutex_lock_iothread();
2179
+ bql_lock();
2180
dirtylimit_state_lock();
2181
}
2182
2183
diff --git a/system/memory.c b/system/memory.c
2184
index XXXXXXX..XXXXXXX 100644
2185
--- a/system/memory.c
2186
+++ b/system/memory.c
2187
@@ -XXX,XX +XXX,XX @@ void memory_region_transaction_commit(void)
2188
AddressSpace *as;
2189
2190
assert(memory_region_transaction_depth);
2191
- assert(qemu_mutex_iothread_locked());
2192
+ assert(bql_locked());
2193
2194
--memory_region_transaction_depth;
2195
if (!memory_region_transaction_depth) {
2196
diff --git a/system/physmem.c b/system/physmem.c
2197
index XXXXXXX..XXXXXXX 100644
2198
--- a/system/physmem.c
2199
+++ b/system/physmem.c
2200
@@ -XXX,XX +XXX,XX @@ bool prepare_mmio_access(MemoryRegion *mr)
2201
{
2202
bool release_lock = false;
2203
2204
- if (!qemu_mutex_iothread_locked()) {
2205
- qemu_mutex_lock_iothread();
2206
+ if (!bql_locked()) {
2207
+ bql_lock();
2208
release_lock = true;
2209
}
2210
if (mr->flush_coalesced_mmio) {
2211
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2212
}
2213
2214
if (release_lock) {
2215
- qemu_mutex_unlock_iothread();
2216
+ bql_unlock();
2217
release_lock = false;
2218
}
2219
2220
@@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2221
}
2222
2223
if (release_lock) {
2224
- qemu_mutex_unlock_iothread();
2225
+ bql_unlock();
2226
release_lock = false;
2227
}
2228
2229
diff --git a/system/runstate.c b/system/runstate.c
2230
index XXXXXXX..XXXXXXX 100644
2231
--- a/system/runstate.c
2232
+++ b/system/runstate.c
2233
@@ -XXX,XX +XXX,XX @@ void qemu_init_subsystems(void)
2234
2235
qemu_init_cpu_list();
2236
qemu_init_cpu_loop();
2237
- qemu_mutex_lock_iothread();
2238
+ bql_lock();
2239
2240
atexit(qemu_run_exit_notifiers);
2241
2242
diff --git a/system/watchpoint.c b/system/watchpoint.c
2243
index XXXXXXX..XXXXXXX 100644
2244
--- a/system/watchpoint.c
2245
+++ b/system/watchpoint.c
2246
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
2247
* Now raise the debug interrupt so that it will
2248
* trigger after the current instruction.
2249
*/
2250
- qemu_mutex_lock_iothread();
2251
+ bql_lock();
2252
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2253
- qemu_mutex_unlock_iothread();
2254
+ bql_unlock();
2255
return;
2256
}
2257
2258
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
2259
index XXXXXXX..XXXXXXX 100644
2260
--- a/target/arm/arm-powerctl.c
2261
+++ b/target/arm/arm-powerctl.c
2262
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
2263
g_free(info);
2264
2265
/* Finally set the power status */
2266
- assert(qemu_mutex_iothread_locked());
2267
+ assert(bql_locked());
2268
target_cpu->power_state = PSCI_ON;
2269
}
2270
2271
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
2272
ARMCPU *target_cpu;
2273
struct CpuOnInfo *info;
2274
2275
- assert(qemu_mutex_iothread_locked());
2276
+ assert(bql_locked());
2277
2278
DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
2279
"\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
2280
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
2281
target_cpu_state->halted = 0;
2282
2283
/* Finally set the power status */
2284
- assert(qemu_mutex_iothread_locked());
2285
+ assert(bql_locked());
2286
target_cpu->power_state = PSCI_ON;
2287
}
2288
2289
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_on_and_reset(uint64_t cpuid)
2290
CPUState *target_cpu_state;
2291
ARMCPU *target_cpu;
2292
2293
- assert(qemu_mutex_iothread_locked());
2294
+ assert(bql_locked());
2295
2296
/* Retrieve the cpu we are powering up */
2297
target_cpu_state = arm_get_cpu_by_id(cpuid);
2298
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
2299
{
2300
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
2301
2302
- assert(qemu_mutex_iothread_locked());
2303
+ assert(bql_locked());
2304
target_cpu->power_state = PSCI_OFF;
2305
target_cpu_state->halted = 1;
2306
target_cpu_state->exception_index = EXCP_HLT;
2307
@@ -XXX,XX +XXX,XX @@ int arm_set_cpu_off(uint64_t cpuid)
2308
CPUState *target_cpu_state;
2309
ARMCPU *target_cpu;
2310
2311
- assert(qemu_mutex_iothread_locked());
2312
+ assert(bql_locked());
2313
2314
DPRINTF("cpu %" PRId64 "\n", cpuid);
2315
2316
@@ -XXX,XX +XXX,XX @@ int arm_reset_cpu(uint64_t cpuid)
2317
CPUState *target_cpu_state;
2318
ARMCPU *target_cpu;
2319
2320
- assert(qemu_mutex_iothread_locked());
2321
+ assert(bql_locked());
2322
2323
DPRINTF("cpu %" PRId64 "\n", cpuid);
2324
2325
diff --git a/target/arm/helper.c b/target/arm/helper.c
2326
index XXXXXXX..XXXXXXX 100644
2327
--- a/target/arm/helper.c
2328
+++ b/target/arm/helper.c
2329
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
2330
* VFIQ are masked unless running at EL0 or EL1, and HCR
2331
* can only be written at EL2.
2332
*/
2333
- g_assert(qemu_mutex_iothread_locked());
2334
+ g_assert(bql_locked());
2335
arm_cpu_update_virq(cpu);
2336
arm_cpu_update_vfiq(cpu);
2337
arm_cpu_update_vserr(cpu);
2338
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
2339
* BQL needs to be held for any modification of
2340
* cs->interrupt_request.
2341
*/
2342
- g_assert(qemu_mutex_iothread_locked());
2343
+ g_assert(bql_locked());
2344
2345
arm_call_pre_el_change_hook(cpu);
2346
2347
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
2348
index XXXXXXX..XXXXXXX 100644
2349
--- a/target/arm/hvf/hvf.c
2350
+++ b/target/arm/hvf/hvf.c
2351
@@ -XXX,XX +XXX,XX @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
2352
* sleeping.
2353
*/
2354
qatomic_set_mb(&cpu->thread_kicked, false);
2355
- qemu_mutex_unlock_iothread();
2356
+ bql_unlock();
2357
pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
2358
- qemu_mutex_lock_iothread();
2359
+ bql_lock();
2360
}
2361
2362
static void hvf_wfi(CPUState *cpu)
2363
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2364
2365
flush_cpu_state(cpu);
2366
2367
- qemu_mutex_unlock_iothread();
2368
+ bql_unlock();
2369
assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
2370
2371
/* handle VMEXIT */
2372
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2373
uint32_t ec = syn_get_ec(syndrome);
2374
2375
ret = 0;
2376
- qemu_mutex_lock_iothread();
2377
+ bql_lock();
2378
switch (exit_reason) {
2379
case HV_EXIT_REASON_EXCEPTION:
2380
/* This is the main one, handle below. */
2381
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
2382
index XXXXXXX..XXXXXXX 100644
2383
--- a/target/arm/kvm.c
2384
+++ b/target/arm/kvm.c
2385
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
2386
if (run->s.regs.device_irq_level != cpu->device_irq_level) {
2387
switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
2388
2389
- qemu_mutex_lock_iothread();
2390
+ bql_lock();
2391
2392
if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
2393
qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
2394
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
2395
2396
/* We also mark unknown levels as processed to not waste cycles */
2397
cpu->device_irq_level = run->s.regs.device_irq_level;
2398
- qemu_mutex_unlock_iothread();
2399
+ bql_unlock();
2400
}
2401
2402
return MEMTXATTRS_UNSPECIFIED;
2403
@@ -XXX,XX +XXX,XX @@ static bool kvm_arm_handle_debug(ARMCPU *cpu,
2404
env->exception.syndrome = debug_exit->hsr;
2405
env->exception.vaddress = debug_exit->far;
2406
env->exception.target_el = 1;
2407
- qemu_mutex_lock_iothread();
2408
+ bql_lock();
2409
arm_cpu_do_interrupt(cs);
2410
- qemu_mutex_unlock_iothread();
2411
+ bql_unlock();
2412
2413
return false;
2414
}
2415
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
2416
index XXXXXXX..XXXXXXX 100644
2417
--- a/target/arm/ptw.c
2418
+++ b/target/arm/ptw.c
2419
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
2420
#if !TCG_OVERSIZED_GUEST
2421
# error "Unexpected configuration"
2422
#endif
2423
- bool locked = qemu_mutex_iothread_locked();
2424
+ bool locked = bql_locked();
2425
if (!locked) {
2426
- qemu_mutex_lock_iothread();
2427
+ bql_lock();
2428
}
2429
if (ptw->out_be) {
2430
cur_val = ldq_be_p(host);
2431
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
2432
}
2433
}
2434
if (!locked) {
2435
- qemu_mutex_unlock_iothread();
2436
+ bql_unlock();
2437
}
2438
#endif
2439
2440
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
2441
index XXXXXXX..XXXXXXX 100644
2442
--- a/target/arm/tcg/helper-a64.c
2443
+++ b/target/arm/tcg/helper-a64.c
2444
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
2445
goto illegal_return;
2446
}
2447
2448
- qemu_mutex_lock_iothread();
2449
+ bql_lock();
2450
arm_call_pre_el_change_hook(env_archcpu(env));
2451
- qemu_mutex_unlock_iothread();
2452
+ bql_unlock();
2453
2454
if (!return_to_aa64) {
2455
env->aarch64 = false;
2456
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
2457
*/
2458
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
2459
2460
- qemu_mutex_lock_iothread();
2461
+ bql_lock();
2462
arm_call_el_change_hook(env_archcpu(env));
2463
- qemu_mutex_unlock_iothread();
2464
+ bql_unlock();
2465
2466
return;
2467
2468
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
2469
index XXXXXXX..XXXXXXX 100644
2470
--- a/target/arm/tcg/m_helper.c
2471
+++ b/target/arm/tcg/m_helper.c
2472
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2473
bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
2474
bool take_exception;
2475
2476
- /* Take the iothread lock as we are going to touch the NVIC */
2477
- qemu_mutex_lock_iothread();
2478
+ /* Take the BQL as we are going to touch the NVIC */
2479
+ bql_lock();
2480
2481
/* Check the background context had access to the FPU */
2482
if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
2483
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2484
take_exception = !stacked_ok &&
2485
armv7m_nvic_can_take_pending_exception(env->nvic);
2486
2487
- qemu_mutex_unlock_iothread();
2488
+ bql_unlock();
2489
2490
if (take_exception) {
2491
raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
2492
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
2493
index XXXXXXX..XXXXXXX 100644
2494
--- a/target/arm/tcg/op_helper.c
2495
+++ b/target/arm/tcg/op_helper.c
2496
@@ -XXX,XX +XXX,XX @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
2497
{
2498
uint32_t mask;
2499
2500
- qemu_mutex_lock_iothread();
2501
+ bql_lock();
2502
arm_call_pre_el_change_hook(env_archcpu(env));
2503
- qemu_mutex_unlock_iothread();
2504
+ bql_unlock();
2505
2506
mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
2507
cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
2508
@@ -XXX,XX +XXX,XX @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
2509
env->regs[15] &= (env->thumb ? ~1 : ~3);
2510
arm_rebuild_hflags(env);
2511
2512
- qemu_mutex_lock_iothread();
2513
+ bql_lock();
2514
arm_call_el_change_hook(env_archcpu(env));
2515
- qemu_mutex_unlock_iothread();
2516
+ bql_unlock();
2517
}
2518
2519
/* Access to user mode registers from privileged modes. */
2520
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
2521
const ARMCPRegInfo *ri = rip;
2522
2523
if (ri->type & ARM_CP_IO) {
2524
- qemu_mutex_lock_iothread();
2525
+ bql_lock();
2526
ri->writefn(env, ri, value);
2527
- qemu_mutex_unlock_iothread();
2528
+ bql_unlock();
2529
} else {
2530
ri->writefn(env, ri, value);
2531
}
2532
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
2533
uint32_t res;
2534
2535
if (ri->type & ARM_CP_IO) {
2536
- qemu_mutex_lock_iothread();
2537
+ bql_lock();
2538
res = ri->readfn(env, ri);
2539
- qemu_mutex_unlock_iothread();
2540
+ bql_unlock();
2541
} else {
2542
res = ri->readfn(env, ri);
2543
}
2544
@@ -XXX,XX +XXX,XX @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
2545
const ARMCPRegInfo *ri = rip;
2546
2547
if (ri->type & ARM_CP_IO) {
2548
- qemu_mutex_lock_iothread();
2549
+ bql_lock();
2550
ri->writefn(env, ri, value);
2551
- qemu_mutex_unlock_iothread();
2552
+ bql_unlock();
2553
} else {
2554
ri->writefn(env, ri, value);
2555
}
2556
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
2557
uint64_t res;
2558
2559
if (ri->type & ARM_CP_IO) {
2560
- qemu_mutex_lock_iothread();
2561
+ bql_lock();
2562
res = ri->readfn(env, ri);
2563
- qemu_mutex_unlock_iothread();
2564
+ bql_unlock();
2565
} else {
2566
res = ri->readfn(env, ri);
2567
}
2568
diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c
2569
index XXXXXXX..XXXXXXX 100644
2570
--- a/target/arm/tcg/psci.c
2571
+++ b/target/arm/tcg/psci.c
2572
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
2573
}
2574
target_cpu = ARM_CPU(target_cpu_state);
2575
2576
- g_assert(qemu_mutex_iothread_locked());
2577
+ g_assert(bql_locked());
2578
ret = target_cpu->power_state;
2579
break;
2580
default:
2581
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
2582
index XXXXXXX..XXXXXXX 100644
2583
--- a/target/hppa/int_helper.c
2584
+++ b/target/hppa/int_helper.c
2585
@@ -XXX,XX +XXX,XX @@ void hppa_cpu_alarm_timer(void *opaque)
2586
void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val)
2587
{
2588
env->cr[CR_EIRR] &= ~val;
2589
- qemu_mutex_lock_iothread();
2590
+ bql_lock();
2591
eval_interrupt(env_archcpu(env));
2592
- qemu_mutex_unlock_iothread();
2593
+ bql_unlock();
2594
}
2595
2596
void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val)
2597
{
2598
env->cr[CR_EIEM] = val;
2599
- qemu_mutex_lock_iothread();
2600
+ bql_lock();
2601
eval_interrupt(env_archcpu(env));
2602
- qemu_mutex_unlock_iothread();
2603
+ bql_unlock();
2604
}
2605
2606
void hppa_cpu_do_interrupt(CPUState *cs)
2607
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
2608
index XXXXXXX..XXXXXXX 100644
2609
--- a/target/i386/hvf/hvf.c
2610
+++ b/target/i386/hvf/hvf.c
2611
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2612
}
2613
vmx_update_tpr(cpu);
2614
2615
- qemu_mutex_unlock_iothread();
2616
+ bql_unlock();
2617
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
2618
- qemu_mutex_lock_iothread();
2619
+ bql_lock();
2620
return EXCP_HLT;
2621
}
2622
2623
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
2624
rip = rreg(cpu->accel->fd, HV_X86_RIP);
2625
env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS);
2626
2627
- qemu_mutex_lock_iothread();
2628
+ bql_lock();
2629
2630
update_apic_tpr(cpu);
2631
current_cpu = cpu;
2632
diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c
2633
index XXXXXXX..XXXXXXX 100644
2634
--- a/target/i386/kvm/hyperv.c
2635
+++ b/target/i386/kvm/hyperv.c
2636
@@ -XXX,XX +XXX,XX @@ void hyperv_x86_synic_update(X86CPU *cpu)
2637
2638
static void async_synic_update(CPUState *cs, run_on_cpu_data data)
2639
{
2640
- qemu_mutex_lock_iothread();
2641
+ bql_lock();
2642
hyperv_x86_synic_update(X86_CPU(cs));
2643
- qemu_mutex_unlock_iothread();
2644
+ bql_unlock();
2645
}
2646
2647
int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
2648
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
2649
index XXXXXXX..XXXXXXX 100644
2650
--- a/target/i386/kvm/kvm.c
2651
+++ b/target/i386/kvm/kvm.c
2652
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2653
/* Inject NMI */
2654
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2655
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2656
- qemu_mutex_lock_iothread();
2657
+ bql_lock();
2658
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2659
- qemu_mutex_unlock_iothread();
2660
+ bql_unlock();
2661
DPRINTF("injected NMI\n");
2662
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2663
if (ret < 0) {
2664
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2665
}
2666
}
2667
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2668
- qemu_mutex_lock_iothread();
2669
+ bql_lock();
2670
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2671
- qemu_mutex_unlock_iothread();
2672
+ bql_unlock();
2673
DPRINTF("injected SMI\n");
2674
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2675
if (ret < 0) {
2676
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2677
}
2678
2679
if (!kvm_pic_in_kernel()) {
2680
- qemu_mutex_lock_iothread();
2681
+ bql_lock();
2682
}
2683
2684
/* Force the VCPU out of its inner loop to process any INIT requests
2685
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2686
DPRINTF("setting tpr\n");
2687
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2688
2689
- qemu_mutex_unlock_iothread();
2690
+ bql_unlock();
2691
}
2692
}
2693
2694
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2695
/* We need to protect the apic state against concurrent accesses from
2696
* different threads in case the userspace irqchip is used. */
2697
if (!kvm_irqchip_in_kernel()) {
2698
- qemu_mutex_lock_iothread();
2699
+ bql_lock();
2700
}
2701
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2702
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2703
if (!kvm_irqchip_in_kernel()) {
2704
- qemu_mutex_unlock_iothread();
2705
+ bql_unlock();
2706
}
2707
return cpu_get_mem_attrs(env);
2708
}
2709
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2710
switch (run->exit_reason) {
2711
case KVM_EXIT_HLT:
2712
DPRINTF("handle_hlt\n");
2713
- qemu_mutex_lock_iothread();
2714
+ bql_lock();
2715
ret = kvm_handle_halt(cpu);
2716
- qemu_mutex_unlock_iothread();
2717
+ bql_unlock();
2718
break;
2719
case KVM_EXIT_SET_TPR:
2720
ret = 0;
2721
break;
2722
case KVM_EXIT_TPR_ACCESS:
2723
- qemu_mutex_lock_iothread();
2724
+ bql_lock();
2725
ret = kvm_handle_tpr_access(cpu);
2726
- qemu_mutex_unlock_iothread();
2727
+ bql_unlock();
2728
break;
2729
case KVM_EXIT_FAIL_ENTRY:
2730
code = run->fail_entry.hardware_entry_failure_reason;
2731
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2732
break;
2733
case KVM_EXIT_DEBUG:
2734
DPRINTF("kvm_exit_debug\n");
2735
- qemu_mutex_lock_iothread();
2736
+ bql_lock();
2737
ret = kvm_handle_debug(cpu, &run->debug.arch);
2738
- qemu_mutex_unlock_iothread();
2739
+ bql_unlock();
2740
break;
2741
case KVM_EXIT_HYPERV:
2742
ret = kvm_hv_handle_exit(cpu, &run->hyperv);
2743
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
2744
index XXXXXXX..XXXXXXX 100644
2745
--- a/target/i386/kvm/xen-emu.c
2746
+++ b/target/i386/kvm/xen-emu.c
2747
@@ -XXX,XX +XXX,XX @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
2748
2749
/* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */
2750
if (!vi->evtchn_upcall_pending) {
2751
- qemu_mutex_lock_iothread();
2752
+ bql_lock();
2753
/*
2754
* Check again now we have the lock, because it may have been
2755
* asserted in the interim. And we don't want to take the lock
2756
@@ -XXX,XX +XXX,XX @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
2757
X86_CPU(cs)->env.xen_callback_asserted = false;
2758
xen_evtchn_set_callback_level(0);
2759
}
2760
- qemu_mutex_unlock_iothread();
2761
+ bql_unlock();
2762
}
2763
}
2764
2765
@@ -XXX,XX +XXX,XX @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu,
2766
2767
switch (hp.index) {
2768
case HVM_PARAM_CALLBACK_IRQ:
2769
- qemu_mutex_lock_iothread();
2770
+ bql_lock();
2771
err = xen_evtchn_set_callback_param(hp.value);
2772
- qemu_mutex_unlock_iothread();
2773
+ bql_unlock();
2774
xen_set_long_mode(exit->u.hcall.longmode);
2775
break;
2776
default:
2777
@@ -XXX,XX +XXX,XX @@ int kvm_xen_soft_reset(void)
2778
CPUState *cpu;
2779
int err;
2780
2781
- assert(qemu_mutex_iothread_locked());
2782
+ assert(bql_locked());
2783
2784
trace_kvm_xen_soft_reset();
2785
2786
@@ -XXX,XX +XXX,XX @@ static int schedop_shutdown(CPUState *cs, uint64_t arg)
2787
break;
2788
2789
case SHUTDOWN_soft_reset:
2790
- qemu_mutex_lock_iothread();
2791
+ bql_lock();
2792
ret = kvm_xen_soft_reset();
2793
- qemu_mutex_unlock_iothread();
2794
+ bql_unlock();
2795
break;
2796
2797
default:
2798
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
2799
index XXXXXXX..XXXXXXX 100644
2800
--- a/target/i386/nvmm/nvmm-accel-ops.c
2801
+++ b/target/i386/nvmm/nvmm-accel-ops.c
2802
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
2803
2804
rcu_register_thread();
2805
2806
- qemu_mutex_lock_iothread();
2807
+ bql_lock();
2808
qemu_thread_get_self(cpu->thread);
2809
cpu->thread_id = qemu_get_thread_id();
2810
current_cpu = cpu;
2811
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
2812
2813
nvmm_destroy_vcpu(cpu);
2814
cpu_thread_signal_destroyed(cpu);
2815
- qemu_mutex_unlock_iothread();
2816
+ bql_unlock();
2817
rcu_unregister_thread();
2818
return NULL;
2819
}
2820
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
2821
index XXXXXXX..XXXXXXX 100644
2822
--- a/target/i386/nvmm/nvmm-all.c
2823
+++ b/target/i386/nvmm/nvmm-all.c
2824
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_pre_run(CPUState *cpu)
2825
uint8_t tpr;
2826
int ret;
2827
2828
- qemu_mutex_lock_iothread();
2829
+ bql_lock();
2830
2831
tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
2832
if (tpr != qcpu->tpr) {
2833
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_pre_run(CPUState *cpu)
2834
}
2835
}
2836
2837
- qemu_mutex_unlock_iothread();
2838
+ bql_unlock();
2839
}
2840
2841
/*
2842
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
2843
tpr = exit->exitstate.cr8;
2844
if (qcpu->tpr != tpr) {
2845
qcpu->tpr = tpr;
2846
- qemu_mutex_lock_iothread();
2847
+ bql_lock();
2848
cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
2849
- qemu_mutex_unlock_iothread();
2850
+ bql_unlock();
2851
}
2852
}
2853
2854
@@ -XXX,XX +XXX,XX @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
2855
CPUX86State *env = cpu_env(cpu);
2856
int ret = 0;
2857
2858
- qemu_mutex_lock_iothread();
2859
+ bql_lock();
2860
2861
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2862
(env->eflags & IF_MASK)) &&
2863
@@ -XXX,XX +XXX,XX @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
2864
ret = 1;
2865
}
2866
2867
- qemu_mutex_unlock_iothread();
2868
+ bql_unlock();
2869
2870
return ret;
2871
}
2872
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_loop(CPUState *cpu)
2873
return 0;
2874
}
2875
2876
- qemu_mutex_unlock_iothread();
2877
+ bql_unlock();
2878
cpu_exec_start(cpu);
2879
2880
/*
2881
@@ -XXX,XX +XXX,XX @@ nvmm_vcpu_loop(CPUState *cpu)
2882
error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
2883
exit->reason, exit->u.inv.hwcode);
2884
nvmm_get_registers(cpu);
2885
- qemu_mutex_lock_iothread();
2886
+ bql_lock();
2887
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2888
- qemu_mutex_unlock_iothread();
2889
+ bql_unlock();
2890
ret = -1;
2891
break;
2892
}
2893
} while (ret == 0);
2894
2895
cpu_exec_end(cpu);
2896
- qemu_mutex_lock_iothread();
2897
+ bql_lock();
2898
2899
qatomic_set(&cpu->exit_request, false);
2900
2901
diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c
2902
index XXXXXXX..XXXXXXX 100644
2903
--- a/target/i386/tcg/sysemu/fpu_helper.c
2904
+++ b/target/i386/tcg/sysemu/fpu_helper.c
2905
@@ -XXX,XX +XXX,XX @@ void x86_register_ferr_irq(qemu_irq irq)
2906
void fpu_check_raise_ferr_irq(CPUX86State *env)
2907
{
2908
if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
2909
- qemu_mutex_lock_iothread();
2910
+ bql_lock();
2911
qemu_irq_raise(ferr_irq);
2912
- qemu_mutex_unlock_iothread();
2913
+ bql_unlock();
2914
return;
2915
}
2916
}
2917
@@ -XXX,XX +XXX,XX @@ void cpu_set_ignne(void)
2918
{
2919
CPUX86State *env = &X86_CPU(first_cpu)->env;
2920
2921
- assert(qemu_mutex_iothread_locked());
2922
+ assert(bql_locked());
2923
2924
env->hflags2 |= HF2_IGNNE_MASK;
2925
/*
2926
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
2927
index XXXXXXX..XXXXXXX 100644
2928
--- a/target/i386/tcg/sysemu/misc_helper.c
2929
+++ b/target/i386/tcg/sysemu/misc_helper.c
2930
@@ -XXX,XX +XXX,XX @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
2931
break;
2932
case 8:
2933
if (!(env->hflags2 & HF2_VINTR_MASK)) {
2934
- qemu_mutex_lock_iothread();
2935
+ bql_lock();
2936
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
2937
- qemu_mutex_unlock_iothread();
2938
+ bql_unlock();
2939
}
2940
env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
2941
2942
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
2943
index XXXXXXX..XXXXXXX 100644
2944
--- a/target/i386/whpx/whpx-accel-ops.c
2945
+++ b/target/i386/whpx/whpx-accel-ops.c
2946
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
2947
2948
rcu_register_thread();
2949
2950
- qemu_mutex_lock_iothread();
2951
+ bql_lock();
2952
qemu_thread_get_self(cpu->thread);
2953
cpu->thread_id = qemu_get_thread_id();
2954
current_cpu = cpu;
2955
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
2956
2957
whpx_destroy_vcpu(cpu);
2958
cpu_thread_signal_destroyed(cpu);
2959
- qemu_mutex_unlock_iothread();
2960
+ bql_unlock();
2961
rcu_unregister_thread();
2962
return NULL;
2963
}
2964
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
2965
index XXXXXXX..XXXXXXX 100644
2966
--- a/target/i386/whpx/whpx-all.c
2967
+++ b/target/i386/whpx/whpx-all.c
2968
@@ -XXX,XX +XXX,XX @@ static int whpx_first_vcpu_starting(CPUState *cpu)
2969
struct whpx_state *whpx = &whpx_global;
2970
HRESULT hr;
2971
2972
- g_assert(qemu_mutex_iothread_locked());
2973
+ g_assert(bql_locked());
2974
2975
if (!QTAILQ_EMPTY(&cpu->breakpoints) ||
2976
(whpx->breakpoints.breakpoints &&
2977
@@ -XXX,XX +XXX,XX @@ static int whpx_handle_halt(CPUState *cpu)
2978
CPUX86State *env = cpu_env(cpu);
2979
int ret = 0;
2980
2981
- qemu_mutex_lock_iothread();
2982
+ bql_lock();
2983
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2984
(env->eflags & IF_MASK)) &&
2985
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
2986
@@ -XXX,XX +XXX,XX @@ static int whpx_handle_halt(CPUState *cpu)
2987
cpu->halted = true;
2988
ret = 1;
2989
}
2990
- qemu_mutex_unlock_iothread();
2991
+ bql_unlock();
2992
2993
return ret;
2994
}
2995
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_pre_run(CPUState *cpu)
2996
memset(&new_int, 0, sizeof(new_int));
2997
memset(reg_values, 0, sizeof(reg_values));
2998
2999
- qemu_mutex_lock_iothread();
3000
+ bql_lock();
3001
3002
/* Inject NMI */
3003
if (!vcpu->interruption_pending &&
3004
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_pre_run(CPUState *cpu)
3005
reg_count += 1;
3006
}
3007
3008
- qemu_mutex_unlock_iothread();
3009
+ bql_unlock();
3010
vcpu->ready_for_pic_interrupt = false;
3011
3012
if (reg_count) {
3013
@@ -XXX,XX +XXX,XX @@ static void whpx_vcpu_post_run(CPUState *cpu)
3014
uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
3015
if (vcpu->tpr != tpr) {
3016
vcpu->tpr = tpr;
3017
- qemu_mutex_lock_iothread();
3018
+ bql_lock();
3019
cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr));
3020
- qemu_mutex_unlock_iothread();
3021
+ bql_unlock();
3022
}
3023
3024
vcpu->interruption_pending =
3025
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3026
WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE;
3027
int ret;
3028
3029
- g_assert(qemu_mutex_iothread_locked());
3030
+ g_assert(bql_locked());
3031
3032
if (whpx->running_cpus++ == 0) {
3033
/* Insert breakpoints into memory, update exception exit bitmap. */
3034
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3035
}
3036
}
3037
3038
- qemu_mutex_unlock_iothread();
3039
+ bql_unlock();
3040
3041
if (exclusive_step_mode != WHPX_STEP_NONE) {
3042
start_exclusive();
3043
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3044
error_report("WHPX: Unexpected VP exit code %d",
3045
vcpu->exit_ctx.ExitReason);
3046
whpx_get_registers(cpu);
3047
- qemu_mutex_lock_iothread();
3048
+ bql_lock();
3049
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3050
- qemu_mutex_unlock_iothread();
3051
+ bql_unlock();
3052
break;
3053
}
3054
3055
@@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu)
3056
cpu_exec_end(cpu);
3057
}
3058
3059
- qemu_mutex_lock_iothread();
3060
+ bql_lock();
3061
current_cpu = cpu;
3062
3063
if (--whpx->running_cpus == 0) {
3064
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
3065
index XXXXXXX..XXXXXXX 100644
3066
--- a/target/loongarch/tcg/csr_helper.c
3067
+++ b/target/loongarch/tcg/csr_helper.c
3068
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val)
3069
int64_t old_v = 0;
3070
3071
if (val & 0x1) {
3072
- qemu_mutex_lock_iothread();
3073
+ bql_lock();
3074
loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0);
3075
- qemu_mutex_unlock_iothread();
3076
+ bql_unlock();
3077
}
3078
return old_v;
3079
}
3080
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
3081
index XXXXXXX..XXXXXXX 100644
3082
--- a/target/mips/kvm.c
3083
+++ b/target/mips/kvm.c
3084
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
3085
int r;
3086
struct kvm_mips_interrupt intr;
3087
3088
- qemu_mutex_lock_iothread();
3089
+ bql_lock();
3090
3091
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3092
cpu_mips_io_interrupts_pending(cpu)) {
3093
@@ -XXX,XX +XXX,XX @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
3094
}
3095
}
3096
3097
- qemu_mutex_unlock_iothread();
3098
+ bql_unlock();
3099
}
3100
3101
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
3102
diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c
3103
index XXXXXXX..XXXXXXX 100644
3104
--- a/target/mips/tcg/sysemu/cp0_helper.c
3105
+++ b/target/mips/tcg/sysemu/cp0_helper.c
3106
@@ -XXX,XX +XXX,XX @@ static inline void mips_vpe_wake(MIPSCPU *c)
3107
* because there might be other conditions that state that c should
3108
* be sleeping.
3109
*/
3110
- qemu_mutex_lock_iothread();
3111
+ bql_lock();
3112
cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
3113
- qemu_mutex_unlock_iothread();
3114
+ bql_unlock();
3115
}
3116
3117
static inline void mips_vpe_sleep(MIPSCPU *cpu)
3118
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
3119
index XXXXXXX..XXXXXXX 100644
3120
--- a/target/openrisc/sys_helper.c
3121
+++ b/target/openrisc/sys_helper.c
3122
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
3123
break;
3124
case TO_SPR(9, 0): /* PICMR */
3125
env->picmr = rb;
3126
- qemu_mutex_lock_iothread();
3127
+ bql_lock();
3128
if (env->picsr & env->picmr) {
3129
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
3130
} else {
3131
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
3132
}
3133
- qemu_mutex_unlock_iothread();
3134
+ bql_unlock();
3135
break;
3136
case TO_SPR(9, 2): /* PICSR */
3137
env->picsr &= ~rb;
3138
break;
3139
case TO_SPR(10, 0): /* TTMR */
3140
{
3141
- qemu_mutex_lock_iothread();
3142
+ bql_lock();
3143
if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) {
3144
switch (rb & TTMR_M) {
3145
case TIMER_NONE:
3146
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
3147
cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
3148
}
3149
cpu_openrisc_timer_update(cpu);
3150
- qemu_mutex_unlock_iothread();
3151
+ bql_unlock();
3152
}
3153
break;
3154
3155
case TO_SPR(10, 1): /* TTCR */
3156
- qemu_mutex_lock_iothread();
3157
+ bql_lock();
3158
cpu_openrisc_count_set(cpu, rb);
3159
cpu_openrisc_timer_update(cpu);
3160
- qemu_mutex_unlock_iothread();
3161
+ bql_unlock();
3162
break;
3163
}
3164
#endif
3165
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
3166
return env->ttmr;
3167
3168
case TO_SPR(10, 1): /* TTCR */
3169
- qemu_mutex_lock_iothread();
3170
+ bql_lock();
3171
cpu_openrisc_count_update(cpu);
3172
- qemu_mutex_unlock_iothread();
3173
+ bql_unlock();
3174
return cpu_openrisc_count_get(cpu);
3175
}
3176
#endif
3177
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
3178
index XXXXXXX..XXXXXXX 100644
3179
--- a/target/ppc/excp_helper.c
3180
+++ b/target/ppc/excp_helper.c
3181
@@ -XXX,XX +XXX,XX @@ void helper_msgsnd(target_ulong rb)
3182
return;
3183
}
3184
3185
- qemu_mutex_lock_iothread();
3186
+ bql_lock();
3187
CPU_FOREACH(cs) {
3188
PowerPCCPU *cpu = POWERPC_CPU(cs);
3189
CPUPPCState *cenv = &cpu->env;
3190
@@ -XXX,XX +XXX,XX @@ void helper_msgsnd(target_ulong rb)
3191
ppc_set_irq(cpu, irq, 1);
3192
}
3193
}
3194
- qemu_mutex_unlock_iothread();
3195
+ bql_unlock();
3196
}
3197
3198
/* Server Processor Control */
3199
@@ -XXX,XX +XXX,XX @@ static void book3s_msgsnd_common(int pir, int irq)
3200
{
3201
CPUState *cs;
3202
3203
- qemu_mutex_lock_iothread();
3204
+ bql_lock();
3205
CPU_FOREACH(cs) {
3206
PowerPCCPU *cpu = POWERPC_CPU(cs);
3207
CPUPPCState *cenv = &cpu->env;
3208
@@ -XXX,XX +XXX,XX @@ static void book3s_msgsnd_common(int pir, int irq)
3209
ppc_set_irq(cpu, irq, 1);
3210
}
3211
}
3212
- qemu_mutex_unlock_iothread();
3213
+ bql_unlock();
3214
}
3215
3216
void helper_book3s_msgsnd(target_ulong rb)
3217
@@ -XXX,XX +XXX,XX @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
3218
}
3219
3220
/* Does iothread need to be locked for walking CPU list? */
3221
- qemu_mutex_lock_iothread();
3222
+ bql_lock();
3223
THREAD_SIBLING_FOREACH(cs, ccs) {
3224
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3225
uint32_t thread_id = ppc_cpu_tir(ccpu);
3226
3227
if (ttir == thread_id) {
3228
ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1);
3229
- qemu_mutex_unlock_iothread();
3230
+ bql_unlock();
3231
return;
3232
}
3233
}
3234
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
3235
index XXXXXXX..XXXXXXX 100644
3236
--- a/target/ppc/kvm.c
3237
+++ b/target/ppc/kvm.c
3238
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3239
CPUPPCState *env = &cpu->env;
3240
int ret;
3241
3242
- qemu_mutex_lock_iothread();
3243
+ bql_lock();
3244
3245
switch (run->exit_reason) {
3246
case KVM_EXIT_DCR:
3247
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3248
break;
3249
}
3250
3251
- qemu_mutex_unlock_iothread();
3252
+ bql_unlock();
3253
return ret;
3254
}
3255
3256
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
3257
index XXXXXXX..XXXXXXX 100644
3258
--- a/target/ppc/misc_helper.c
3259
+++ b/target/ppc/misc_helper.c
3260
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dpdes(CPUPPCState *env)
3261
return dpdes;
3262
}
3263
3264
- qemu_mutex_lock_iothread();
3265
+ bql_lock();
3266
THREAD_SIBLING_FOREACH(cs, ccs) {
3267
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3268
CPUPPCState *cenv = &ccpu->env;
3269
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dpdes(CPUPPCState *env)
3270
dpdes |= (0x1 << thread_id);
3271
}
3272
}
3273
- qemu_mutex_unlock_iothread();
3274
+ bql_unlock();
3275
3276
return dpdes;
3277
}
3278
@@ -XXX,XX +XXX,XX @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val)
3279
}
3280
3281
/* Does iothread need to be locked for walking CPU list? */
3282
- qemu_mutex_lock_iothread();
3283
+ bql_lock();
3284
THREAD_SIBLING_FOREACH(cs, ccs) {
3285
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3286
uint32_t thread_id = ppc_cpu_tir(ccpu);
3287
3288
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
3289
}
3290
- qemu_mutex_unlock_iothread();
3291
+ bql_unlock();
3292
}
3293
#endif /* defined(TARGET_PPC64) */
3294
3295
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
3296
index XXXXXXX..XXXXXXX 100644
3297
--- a/target/ppc/timebase_helper.c
3298
+++ b/target/ppc/timebase_helper.c
3299
@@ -XXX,XX +XXX,XX @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
3300
} else {
3301
int ret;
3302
3303
- qemu_mutex_lock_iothread();
3304
+ bql_lock();
3305
ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
3306
- qemu_mutex_unlock_iothread();
3307
+ bql_unlock();
3308
if (unlikely(ret != 0)) {
3309
qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
3310
(uint32_t)dcrn, (uint32_t)dcrn);
3311
@@ -XXX,XX +XXX,XX @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
3312
POWERPC_EXCP_INVAL_INVAL, GETPC());
3313
} else {
3314
int ret;
3315
- qemu_mutex_lock_iothread();
3316
+ bql_lock();
3317
ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
3318
- qemu_mutex_unlock_iothread();
3319
+ bql_unlock();
3320
if (unlikely(ret != 0)) {
3321
qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
3322
(uint32_t)dcrn, (uint32_t)dcrn);
3323
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
3324
index XXXXXXX..XXXXXXX 100644
3325
--- a/target/s390x/kvm/kvm.c
3326
+++ b/target/s390x/kvm/kvm.c
3327
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3328
S390CPU *cpu = S390_CPU(cs);
3329
int ret = 0;
3330
3331
- qemu_mutex_lock_iothread();
3332
+ bql_lock();
3333
3334
kvm_cpu_synchronize_state(cs);
3335
3336
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3337
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
3338
break;
3339
}
3340
- qemu_mutex_unlock_iothread();
3341
+ bql_unlock();
3342
3343
if (ret == 0) {
3344
ret = EXCP_INTERRUPT;
3345
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
3346
index XXXXXXX..XXXXXXX 100644
3347
--- a/target/s390x/tcg/misc_helper.c
3348
+++ b/target/s390x/tcg/misc_helper.c
3349
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(stck)(CPUS390XState *env)
3350
/* SCLP service call */
3351
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
3352
{
3353
- qemu_mutex_lock_iothread();
3354
+ bql_lock();
3355
int r = sclp_service_call(env_archcpu(env), r1, r2);
3356
- qemu_mutex_unlock_iothread();
3357
+ bql_unlock();
3358
if (r < 0) {
3359
tcg_s390_program_interrupt(env, -r, GETPC());
3360
}
3361
@@ -XXX,XX +XXX,XX @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
3362
switch (num) {
3363
case 0x500:
3364
/* KVM hypercall */
3365
- qemu_mutex_lock_iothread();
3366
+ bql_lock();
3367
r = s390_virtio_hypercall(env);
3368
- qemu_mutex_unlock_iothread();
3369
+ bql_unlock();
3370
break;
3371
case 0x44:
3372
/* yield */
3373
@@ -XXX,XX +XXX,XX @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
3374
break;
3375
case 0x308:
3376
/* ipl */
3377
- qemu_mutex_lock_iothread();
3378
+ bql_lock();
3379
handle_diag_308(env, r1, r3, GETPC());
3380
- qemu_mutex_unlock_iothread();
3381
+ bql_unlock();
3382
r = 0;
3383
break;
3384
case 0x288:
3385
@@ -XXX,XX +XXX,XX @@ static void update_ckc_timer(CPUS390XState *env)
3386
3387
/* stop the timer and remove pending CKC IRQs */
3388
timer_del(env->tod_timer);
3389
- g_assert(qemu_mutex_iothread_locked());
3390
+ g_assert(bql_locked());
3391
env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
3392
3393
/* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
3394
@@ -XXX,XX +XXX,XX @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
3395
{
3396
env->ckc = ckc;
3397
3398
- qemu_mutex_lock_iothread();
3399
+ bql_lock();
3400
update_ckc_timer(env);
3401
- qemu_mutex_unlock_iothread();
3402
+ bql_unlock();
3403
}
3404
3405
void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
3406
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
3407
.low = tod_low,
3408
};
3409
3410
- qemu_mutex_lock_iothread();
3411
+ bql_lock();
3412
tdc->set(td, &tod, &error_abort);
3413
- qemu_mutex_unlock_iothread();
3414
+ bql_unlock();
3415
return 0;
3416
}
3417
3418
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
3419
int cc;
3420
3421
/* TODO: needed to inject interrupts - push further down */
3422
- qemu_mutex_lock_iothread();
3423
+ bql_lock();
3424
cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
3425
- qemu_mutex_unlock_iothread();
3426
+ bql_unlock();
3427
3428
return cc;
3429
}
3430
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
3431
void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
3432
{
3433
S390CPU *cpu = env_archcpu(env);
3434
- qemu_mutex_lock_iothread();
3435
+ bql_lock();
3436
ioinst_handle_xsch(cpu, r1, GETPC());
3437
- qemu_mutex_unlock_iothread();
3438
+ bql_unlock();
3439
}
3440
3441
void HELPER(csch)(CPUS390XState *env, uint64_t r1)
3442
{
3443
S390CPU *cpu = env_archcpu(env);
3444
- qemu_mutex_lock_iothread();
3445
+ bql_lock();
3446
ioinst_handle_csch(cpu, r1, GETPC());
3447
- qemu_mutex_unlock_iothread();
3448
+ bql_unlock();
3449
}
3450
3451
void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
3452
{
3453
S390CPU *cpu = env_archcpu(env);
3454
- qemu_mutex_lock_iothread();
3455
+ bql_lock();
3456
ioinst_handle_hsch(cpu, r1, GETPC());
3457
- qemu_mutex_unlock_iothread();
3458
+ bql_unlock();
3459
}
3460
3461
void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3462
{
3463
S390CPU *cpu = env_archcpu(env);
3464
- qemu_mutex_lock_iothread();
3465
+ bql_lock();
3466
ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
3467
- qemu_mutex_unlock_iothread();
3468
+ bql_unlock();
3469
}
3470
3471
void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
3472
{
3473
S390CPU *cpu = env_archcpu(env);
3474
- qemu_mutex_lock_iothread();
3475
+ bql_lock();
3476
ioinst_handle_rchp(cpu, r1, GETPC());
3477
- qemu_mutex_unlock_iothread();
3478
+ bql_unlock();
3479
}
3480
3481
void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
3482
{
3483
S390CPU *cpu = env_archcpu(env);
3484
- qemu_mutex_lock_iothread();
3485
+ bql_lock();
3486
ioinst_handle_rsch(cpu, r1, GETPC());
3487
- qemu_mutex_unlock_iothread();
3488
+ bql_unlock();
3489
}
3490
3491
void HELPER(sal)(CPUS390XState *env, uint64_t r1)
3492
{
3493
S390CPU *cpu = env_archcpu(env);
3494
3495
- qemu_mutex_lock_iothread();
3496
+ bql_lock();
3497
ioinst_handle_sal(cpu, r1, GETPC());
3498
- qemu_mutex_unlock_iothread();
3499
+ bql_unlock();
3500
}
3501
3502
void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
3503
{
3504
S390CPU *cpu = env_archcpu(env);
3505
3506
- qemu_mutex_lock_iothread();
3507
+ bql_lock();
3508
ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
3509
- qemu_mutex_unlock_iothread();
3510
+ bql_unlock();
3511
}
3512
3513
void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3514
{
3515
S390CPU *cpu = env_archcpu(env);
3516
- qemu_mutex_lock_iothread();
3517
+ bql_lock();
3518
ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
3519
- qemu_mutex_unlock_iothread();
3520
+ bql_unlock();
3521
}
3522
3523
void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
3524
{
3525
S390CPU *cpu = env_archcpu(env);
3526
3527
- qemu_mutex_lock_iothread();
3528
+ bql_lock();
3529
ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
3530
- qemu_mutex_unlock_iothread();
3531
+ bql_unlock();
3532
}
3533
3534
void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3535
{
3536
S390CPU *cpu = env_archcpu(env);
3537
- qemu_mutex_lock_iothread();
3538
+ bql_lock();
3539
ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
3540
- qemu_mutex_unlock_iothread();
3541
+ bql_unlock();
3542
}
3543
3544
uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3545
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3546
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
3547
}
3548
3549
- qemu_mutex_lock_iothread();
3550
+ bql_lock();
3551
io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
3552
if (!io) {
3553
- qemu_mutex_unlock_iothread();
3554
+ bql_unlock();
3555
return 0;
3556
}
3557
3558
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3559
if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
3560
/* writing failed, reinject and properly clean up */
3561
s390_io_interrupt(io->id, io->nr, io->parm, io->word);
3562
- qemu_mutex_unlock_iothread();
3563
+ bql_unlock();
3564
g_free(io);
3565
s390_cpu_virt_mem_handle_exc(cpu, ra);
3566
return 0;
3567
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
3568
}
3569
3570
g_free(io);
3571
- qemu_mutex_unlock_iothread();
3572
+ bql_unlock();
3573
return 1;
3574
}
3575
3576
void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
3577
{
3578
S390CPU *cpu = env_archcpu(env);
3579
- qemu_mutex_lock_iothread();
3580
+ bql_lock();
3581
ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
3582
- qemu_mutex_unlock_iothread();
3583
+ bql_unlock();
3584
}
3585
3586
void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
3587
{
3588
S390CPU *cpu = env_archcpu(env);
3589
- qemu_mutex_lock_iothread();
3590
+ bql_lock();
3591
ioinst_handle_chsc(cpu, inst >> 16, GETPC());
3592
- qemu_mutex_unlock_iothread();
3593
+ bql_unlock();
3594
}
3595
#endif
3596
3597
@@ -XXX,XX +XXX,XX @@ void HELPER(clp)(CPUS390XState *env, uint32_t r2)
3598
{
3599
S390CPU *cpu = env_archcpu(env);
3600
3601
- qemu_mutex_lock_iothread();
3602
+ bql_lock();
3603
clp_service_call(cpu, r2, GETPC());
3604
- qemu_mutex_unlock_iothread();
3605
+ bql_unlock();
3606
}
3607
3608
void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3609
{
3610
S390CPU *cpu = env_archcpu(env);
3611
3612
- qemu_mutex_lock_iothread();
3613
+ bql_lock();
3614
pcilg_service_call(cpu, r1, r2, GETPC());
3615
- qemu_mutex_unlock_iothread();
3616
+ bql_unlock();
3617
}
3618
3619
void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3620
{
3621
S390CPU *cpu = env_archcpu(env);
3622
3623
- qemu_mutex_lock_iothread();
3624
+ bql_lock();
3625
pcistg_service_call(cpu, r1, r2, GETPC());
3626
- qemu_mutex_unlock_iothread();
3627
+ bql_unlock();
3628
}
3629
3630
void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3631
@@ -XXX,XX +XXX,XX @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3632
{
3633
S390CPU *cpu = env_archcpu(env);
3634
3635
- qemu_mutex_lock_iothread();
3636
+ bql_lock();
3637
stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
3638
- qemu_mutex_unlock_iothread();
3639
+ bql_unlock();
3640
}
3641
3642
void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
3643
@@ -XXX,XX +XXX,XX @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
3644
S390CPU *cpu = env_archcpu(env);
3645
int r;
3646
3647
- qemu_mutex_lock_iothread();
3648
+ bql_lock();
3649
r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff);
3650
- qemu_mutex_unlock_iothread();
3651
+ bql_unlock();
3652
/* css_do_sic() may actually return a PGM_xxx value to inject */
3653
if (r) {
3654
tcg_s390_program_interrupt(env, -r, GETPC());
3655
@@ -XXX,XX +XXX,XX @@ void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
3656
{
3657
S390CPU *cpu = env_archcpu(env);
3658
3659
- qemu_mutex_lock_iothread();
3660
+ bql_lock();
3661
rpcit_service_call(cpu, r1, r2, GETPC());
3662
- qemu_mutex_unlock_iothread();
3663
+ bql_unlock();
3664
}
3665
3666
void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
3667
@@ -XXX,XX +XXX,XX @@ void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
3668
{
3669
S390CPU *cpu = env_archcpu(env);
3670
3671
- qemu_mutex_lock_iothread();
3672
+ bql_lock();
3673
pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
3674
- qemu_mutex_unlock_iothread();
3675
+ bql_unlock();
3676
}
3677
3678
void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3679
@@ -XXX,XX +XXX,XX @@ void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
3680
{
3681
S390CPU *cpu = env_archcpu(env);
3682
3683
- qemu_mutex_lock_iothread();
3684
+ bql_lock();
3685
mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
3686
- qemu_mutex_unlock_iothread();
3687
+ bql_unlock();
3688
}
3689
#endif
3690
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
3691
index XXXXXXX..XXXXXXX 100644
3692
--- a/target/sparc/int32_helper.c
3693
+++ b/target/sparc/int32_helper.c
3694
@@ -XXX,XX +XXX,XX @@ void cpu_check_irqs(CPUSPARCState *env)
3695
CPUState *cs;
3696
3697
/* We should be holding the BQL before we mess with IRQs */
3698
- g_assert(qemu_mutex_iothread_locked());
3699
+ g_assert(bql_locked());
3700
3701
if (env->pil_in && (env->interrupt_index == 0 ||
3702
(env->interrupt_index & ~15) == TT_EXTINT)) {
3703
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
3704
index XXXXXXX..XXXXXXX 100644
3705
--- a/target/sparc/int64_helper.c
3706
+++ b/target/sparc/int64_helper.c
3707
@@ -XXX,XX +XXX,XX @@ void cpu_check_irqs(CPUSPARCState *env)
3708
(env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER));
3709
3710
/* We should be holding the BQL before we mess with IRQs */
3711
- g_assert(qemu_mutex_iothread_locked());
3712
+ g_assert(bql_locked());
3713
3714
/* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */
3715
if (env->ivec_status & 0x20) {
3716
@@ -XXX,XX +XXX,XX @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value)
3717
env->softint = value;
3718
#if !defined(CONFIG_USER_ONLY)
3719
if (cpu_interrupts_enabled(env)) {
3720
- qemu_mutex_lock_iothread();
3721
+ bql_lock();
3722
cpu_check_irqs(env);
3723
- qemu_mutex_unlock_iothread();
3724
+ bql_unlock();
3725
}
3726
#endif
3727
return true;
3728
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
3729
index XXXXXXX..XXXXXXX 100644
3730
--- a/target/sparc/win_helper.c
3731
+++ b/target/sparc/win_helper.c
3732
@@ -XXX,XX +XXX,XX @@ void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
3733
cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
3734
} else {
3735
/* cpu_put_psr may trigger interrupts, hence BQL */
3736
- qemu_mutex_lock_iothread();
3737
+ bql_lock();
3738
cpu_put_psr(env, new_psr);
3739
- qemu_mutex_unlock_iothread();
3740
+ bql_unlock();
3741
}
3742
}
3743
3744
@@ -XXX,XX +XXX,XX @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
3745
3746
#if !defined(CONFIG_USER_ONLY)
3747
if (cpu_interrupts_enabled(env)) {
3748
- qemu_mutex_lock_iothread();
3749
+ bql_lock();
3750
cpu_check_irqs(env);
3751
- qemu_mutex_unlock_iothread();
3752
+ bql_unlock();
3753
}
3754
#endif
3755
}
3756
@@ -XXX,XX +XXX,XX @@ void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
3757
env->psrpil = new_pil;
3758
3759
if (cpu_interrupts_enabled(env)) {
3760
- qemu_mutex_lock_iothread();
3761
+ bql_lock();
3762
cpu_check_irqs(env);
3763
- qemu_mutex_unlock_iothread();
3764
+ bql_unlock();
3765
}
3766
#endif
3767
}
3768
@@ -XXX,XX +XXX,XX @@ void helper_done(CPUSPARCState *env)
3769
3770
#if !defined(CONFIG_USER_ONLY)
3771
if (cpu_interrupts_enabled(env)) {
3772
- qemu_mutex_lock_iothread();
3773
+ bql_lock();
3774
cpu_check_irqs(env);
3775
- qemu_mutex_unlock_iothread();
3776
+ bql_unlock();
3777
}
3778
#endif
3779
}
3780
@@ -XXX,XX +XXX,XX @@ void helper_retry(CPUSPARCState *env)
3781
3782
#if !defined(CONFIG_USER_ONLY)
3783
if (cpu_interrupts_enabled(env)) {
3784
- qemu_mutex_lock_iothread();
3785
+ bql_lock();
3786
cpu_check_irqs(env);
3787
- qemu_mutex_unlock_iothread();
3788
+ bql_unlock();
3789
}
3790
#endif
3791
}
3792
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
3793
index XXXXXXX..XXXXXXX 100644
3794
--- a/target/xtensa/exc_helper.c
3795
+++ b/target/xtensa/exc_helper.c
3796
@@ -XXX,XX +XXX,XX @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
3797
env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
3798
(intlevel << PS_INTLEVEL_SHIFT);
3799
3800
- qemu_mutex_lock_iothread();
3801
+ bql_lock();
3802
check_interrupts(env);
3803
- qemu_mutex_unlock_iothread();
3804
+ bql_unlock();
3805
3806
if (env->pending_irq_level) {
3807
cpu_loop_exit(cpu);
3808
@@ -XXX,XX +XXX,XX @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
3809
3810
void HELPER(check_interrupts)(CPUXtensaState *env)
3811
{
3812
- qemu_mutex_lock_iothread();
3813
+ bql_lock();
3814
check_interrupts(env);
3815
- qemu_mutex_unlock_iothread();
3816
+ bql_unlock();
3817
}
3818
3819
void HELPER(intset)(CPUXtensaState *env, uint32_t v)
3820
diff --git a/ui/spice-core.c b/ui/spice-core.c
3821
index XXXXXXX..XXXXXXX 100644
3822
--- a/ui/spice-core.c
3823
+++ b/ui/spice-core.c
3824
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
3825
*/
3826
bool need_lock = !qemu_thread_is_self(&me);
3827
if (need_lock) {
3828
- qemu_mutex_lock_iothread();
3829
+ bql_lock();
3830
}
3831
3832
if (info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT) {
3833
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
3834
}
3835
3836
if (need_lock) {
3837
- qemu_mutex_unlock_iothread();
3838
+ bql_unlock();
3839
}
3840
3841
qapi_free_SpiceServerInfo(server);
3842
diff --git a/util/async.c b/util/async.c
3843
index XXXXXXX..XXXXXXX 100644
3844
--- a/util/async.c
3845
+++ b/util/async.c
3846
@@ -XXX,XX +XXX,XX @@ AioContext *qemu_get_current_aio_context(void)
3847
if (ctx) {
3848
return ctx;
3849
}
3850
- if (qemu_mutex_iothread_locked()) {
3851
+ if (bql_locked()) {
3852
/* Possibly in a vCPU thread. */
3853
return qemu_get_aio_context();
3854
}
3855
diff --git a/util/main-loop.c b/util/main-loop.c
3856
index XXXXXXX..XXXXXXX 100644
3857
--- a/util/main-loop.c
3858
+++ b/util/main-loop.c
3859
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3860
3861
glib_pollfds_fill(&timeout);
3862
3863
- qemu_mutex_unlock_iothread();
3864
+ bql_unlock();
3865
replay_mutex_unlock();
3866
3867
ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
3868
3869
replay_mutex_lock();
3870
- qemu_mutex_lock_iothread();
3871
+ bql_lock();
3872
3873
glib_pollfds_poll();
3874
3875
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3876
3877
poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
3878
3879
- qemu_mutex_unlock_iothread();
3880
+ bql_unlock();
3881
3882
replay_mutex_unlock();
3883
3884
@@ -XXX,XX +XXX,XX @@ static int os_host_main_loop_wait(int64_t timeout)
3885
3886
replay_mutex_lock();
3887
3888
- qemu_mutex_lock_iothread();
3889
+ bql_lock();
3890
if (g_poll_ret > 0) {
3891
for (i = 0; i < w->num; i++) {
3892
w->revents[i] = poll_fds[n_poll_fds + i].revents;
3893
diff --git a/util/qsp.c b/util/qsp.c
3894
index XXXXXXX..XXXXXXX 100644
3895
--- a/util/qsp.c
3896
+++ b/util/qsp.c
3897
@@ -XXX,XX +XXX,XX @@ static const char * const qsp_typenames[] = {
3898
[QSP_CONDVAR] = "condvar",
3899
};
3900
3901
-QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl;
3902
+QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl;
3903
QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl;
3904
QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl;
3905
QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl;
3906
@@ -XXX,XX +XXX,XX @@ void qsp_enable(void)
3907
{
3908
qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock);
3909
qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock);
3910
- qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock);
3911
+ qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock);
3912
qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
3913
qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
3914
qatomic_set(&qemu_cond_wait_func, qsp_cond_wait);
3915
@@ -XXX,XX +XXX,XX @@ void qsp_disable(void)
3916
{
3917
qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl);
3918
qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl);
3919
- qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl);
3920
+ qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl);
3921
qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
3922
qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
3923
qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
3924
diff --git a/util/rcu.c b/util/rcu.c
3925
index XXXXXXX..XXXXXXX 100644
3926
--- a/util/rcu.c
3927
+++ b/util/rcu.c
3928
@@ -XXX,XX +XXX,XX @@ static void *call_rcu_thread(void *opaque)
3929
3930
qatomic_sub(&rcu_call_count, n);
3931
synchronize_rcu();
3932
- qemu_mutex_lock_iothread();
3933
+ bql_lock();
3934
while (n > 0) {
3935
node = try_dequeue();
3936
while (!node) {
3937
- qemu_mutex_unlock_iothread();
3938
+ bql_unlock();
3939
qemu_event_reset(&rcu_call_ready_event);
3940
node = try_dequeue();
3941
if (!node) {
3942
qemu_event_wait(&rcu_call_ready_event);
3943
node = try_dequeue();
3944
}
3945
- qemu_mutex_lock_iothread();
3946
+ bql_lock();
3947
}
3948
3949
n--;
3950
node->func(node);
3951
}
3952
- qemu_mutex_unlock_iothread();
3953
+ bql_unlock();
3954
}
3955
abort();
3956
}
3957
@@ -XXX,XX +XXX,XX @@ static void drain_rcu_callback(struct rcu_head *node)
3958
void drain_call_rcu(void)
3959
{
3960
struct rcu_drain rcu_drain;
3961
- bool locked = qemu_mutex_iothread_locked();
3962
+ bool locked = bql_locked();
3963
3964
memset(&rcu_drain, 0, sizeof(struct rcu_drain));
3965
qemu_event_init(&rcu_drain.drain_complete_event, false);
3966
3967
if (locked) {
3968
- qemu_mutex_unlock_iothread();
3969
+ bql_unlock();
3970
}
3971
3972
3973
@@ -XXX,XX +XXX,XX @@ void drain_call_rcu(void)
3974
qatomic_dec(&in_drain_call_rcu);
3975
3976
if (locked) {
3977
- qemu_mutex_lock_iothread();
3978
+ bql_lock();
3979
}
3980
3981
}
3982
diff --git a/audio/coreaudio.m b/audio/coreaudio.m
3983
index XXXXXXX..XXXXXXX 100644
3984
--- a/audio/coreaudio.m
3985
+++ b/audio/coreaudio.m
3986
@@ -XXX,XX +XXX,XX @@ static OSStatus handle_voice_change(
3987
{
3988
coreaudioVoiceOut *core = in_client_data;
3989
3990
- qemu_mutex_lock_iothread();
3991
+ bql_lock();
3992
3993
if (core->outputDeviceID) {
3994
fini_out_device(core);
3995
@@ -XXX,XX +XXX,XX @@ static OSStatus handle_voice_change(
3996
update_device_playback_state(core);
3997
}
3998
3999
- qemu_mutex_unlock_iothread();
4000
+ bql_unlock();
4001
return 0;
4002
}
4003
4004
diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc
4005
index XXXXXXX..XXXXXXX 100644
4006
--- a/memory_ldst.c.inc
4007
+++ b/memory_ldst.c.inc
4008
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
4009
*result = r;
4010
}
4011
if (release_lock) {
4012
- qemu_mutex_unlock_iothread();
4013
+ bql_unlock();
4014
}
4015
RCU_READ_UNLOCK();
4016
return val;
4017
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
4018
*result = r;
4019
}
4020
if (release_lock) {
4021
- qemu_mutex_unlock_iothread();
4022
+ bql_unlock();
4023
}
4024
RCU_READ_UNLOCK();
4025
return val;
4026
@@ -XXX,XX +XXX,XX @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
4027
*result = r;
4028
}
4029
if (release_lock) {
4030
- qemu_mutex_unlock_iothread();
4031
+ bql_unlock();
4032
}
4033
RCU_READ_UNLOCK();
4034
return val;
4035
@@ -XXX,XX +XXX,XX @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
4036
*result = r;
4037
}
4038
if (release_lock) {
4039
- qemu_mutex_unlock_iothread();
4040
+ bql_unlock();
4041
}
4042
RCU_READ_UNLOCK();
4043
return val;
4044
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
4045
*result = r;
4046
}
4047
if (release_lock) {
4048
- qemu_mutex_unlock_iothread();
4049
+ bql_unlock();
4050
}
4051
RCU_READ_UNLOCK();
4052
}
4053
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
4054
*result = r;
4055
}
4056
if (release_lock) {
4057
- qemu_mutex_unlock_iothread();
4058
+ bql_unlock();
4059
}
4060
RCU_READ_UNLOCK();
4061
}
4062
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
4063
*result = r;
4064
}
4065
if (release_lock) {
4066
- qemu_mutex_unlock_iothread();
4067
+ bql_unlock();
4068
}
4069
RCU_READ_UNLOCK();
4070
}
4071
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
4072
*result = r;
4073
}
4074
if (release_lock) {
4075
- qemu_mutex_unlock_iothread();
4076
+ bql_unlock();
4077
}
4078
RCU_READ_UNLOCK();
4079
}
4080
@@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
4081
*result = r;
4082
}
4083
if (release_lock) {
4084
- qemu_mutex_unlock_iothread();
4085
+ bql_unlock();
4086
}
4087
RCU_READ_UNLOCK();
4088
}
4089
diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md
4090
index XXXXXXX..XXXXXXX 100644
4091
--- a/target/i386/hvf/README.md
4092
+++ b/target/i386/hvf/README.md
4093
@@ -XXX,XX +XXX,XX @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk
4094
4095
1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
4096
2. Removal of `apic_page` and hyperv-related functionality.
4097
-3. More relaxed use of `qemu_mutex_lock_iothread`.
4098
+3. More relaxed use of `bql_lock`.
4099
diff --git a/ui/cocoa.m b/ui/cocoa.m
4100
index XXXXXXX..XXXXXXX 100644
4101
--- a/ui/cocoa.m
4102
+++ b/ui/cocoa.m
4103
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
4104
typedef void (^CodeBlock)(void);
4105
typedef bool (^BoolCodeBlock)(void);
4106
4107
-static void with_iothread_lock(CodeBlock block)
4108
+static void with_bql(CodeBlock block)
4109
{
4110
- bool locked = qemu_mutex_iothread_locked();
4111
+ bool locked = bql_locked();
4112
if (!locked) {
4113
- qemu_mutex_lock_iothread();
4114
+ bql_lock();
4115
}
4116
block();
4117
if (!locked) {
4118
- qemu_mutex_unlock_iothread();
4119
+ bql_unlock();
4120
}
4121
}
4122
4123
-static bool bool_with_iothread_lock(BoolCodeBlock block)
4124
+static bool bool_with_bql(BoolCodeBlock block)
4125
{
4126
- bool locked = qemu_mutex_iothread_locked();
4127
+ bool locked = bql_locked();
4128
bool val;
4129
4130
if (!locked) {
4131
- qemu_mutex_lock_iothread();
4132
+ bql_lock();
4133
}
4134
val = block();
4135
if (!locked) {
4136
- qemu_mutex_unlock_iothread();
4137
+ bql_unlock();
4138
}
4139
return val;
4140
}
4141
@@ -XXX,XX +XXX,XX @@ - (void) updateUIInfo
4142
return;
4143
}
4144
4145
- with_iothread_lock(^{
4146
+ with_bql(^{
4147
[self updateUIInfoLocked];
4148
});
4149
}
4150
@@ -XXX,XX +XXX,XX @@ - (void) handleMonitorInput:(NSEvent *)event
4151
4152
- (bool) handleEvent:(NSEvent *)event
4153
{
4154
- return bool_with_iothread_lock(^{
4155
+ return bool_with_bql(^{
4156
return [self handleEventLocked:event];
4157
});
4158
}
4159
@@ -XXX,XX +XXX,XX @@ - (QEMUScreen) gscreen {return screen;}
4160
*/
4161
- (void) raiseAllKeys
4162
{
4163
- with_iothread_lock(^{
4164
+ with_bql(^{
4165
qkbd_state_lift_all_keys(kbd);
4166
});
4167
}
4168
@@ -XXX,XX +XXX,XX @@ - (void)applicationWillTerminate:(NSNotification *)aNotification
4169
{
4170
COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n");
4171
4172
- with_iothread_lock(^{
4173
+ with_bql(^{
4174
shutdown_action = SHUTDOWN_ACTION_POWEROFF;
4175
qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI);
4176
});
4177
@@ -XXX,XX +XXX,XX @@ - (void)displayConsole:(id)sender
4178
/* Pause the guest */
4179
- (void)pauseQEMU:(id)sender
4180
{
4181
- with_iothread_lock(^{
4182
+ with_bql(^{
4183
qmp_stop(NULL);
4184
});
4185
[sender setEnabled: NO];
4186
@@ -XXX,XX +XXX,XX @@ - (void)pauseQEMU:(id)sender
4187
/* Resume running the guest operating system */
4188
- (void)resumeQEMU:(id) sender
4189
{
4190
- with_iothread_lock(^{
4191
+ with_bql(^{
4192
qmp_cont(NULL);
4193
});
4194
[sender setEnabled: NO];
4195
@@ -XXX,XX +XXX,XX @@ - (void)removePause
4196
/* Restarts QEMU */
4197
- (void)restartQEMU:(id)sender
4198
{
4199
- with_iothread_lock(^{
4200
+ with_bql(^{
4201
qmp_system_reset(NULL);
4202
});
4203
}
4204
@@ -XXX,XX +XXX,XX @@ - (void)restartQEMU:(id)sender
4205
/* Powers down QEMU */
4206
- (void)powerDownQEMU:(id)sender
4207
{
4208
- with_iothread_lock(^{
4209
+ with_bql(^{
4210
qmp_system_powerdown(NULL);
4211
});
4212
}
4213
@@ -XXX,XX +XXX,XX @@ - (void)ejectDeviceMedia:(id)sender
4214
}
4215
4216
__block Error *err = NULL;
4217
- with_iothread_lock(^{
4218
+ with_bql(^{
4219
qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding],
4220
NULL, false, false, &err);
4221
});
4222
@@ -XXX,XX +XXX,XX @@ - (void)changeDeviceMedia:(id)sender
4223
}
4224
4225
__block Error *err = NULL;
4226
- with_iothread_lock(^{
4227
+ with_bql(^{
4228
qmp_blockdev_change_medium([drive cStringUsingEncoding:
4229
NSASCIIStringEncoding],
4230
NULL,
4231
@@ -XXX,XX +XXX,XX @@ - (void)adjustSpeed:(id)sender
4232
// get the throttle percentage
4233
throttle_pct = [sender tag];
4234
4235
- with_iothread_lock(^{
4236
+ with_bql(^{
4237
cpu_throttle_set(throttle_pct);
4238
});
4239
COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%');
4240
@@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t
4241
return;
4242
}
4243
4244
- with_iothread_lock(^{
4245
+ with_bql(^{
4246
QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo);
4247
qemu_event_reset(&cbevent);
4248
qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT);
4249
@@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t
4250
while (info == cbinfo &&
4251
info->types[QEMU_CLIPBOARD_TYPE_TEXT].available &&
4252
info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) {
4253
- qemu_mutex_unlock_iothread();
4254
+ bql_unlock();
4255
qemu_event_wait(&cbevent);
4256
- qemu_mutex_lock_iothread();
4257
+ bql_lock();
4258
}
4259
4260
if (info == cbinfo) {
4261
@@ -XXX,XX +XXX,XX @@ static void cocoa_clipboard_request(QemuClipboardInfo *info,
4262
int status;
4263
4264
COCOA_DEBUG("Second thread: calling qemu_default_main()\n");
4265
- qemu_mutex_lock_iothread();
4266
+ bql_lock();
4267
status = qemu_default_main();
4268
- qemu_mutex_unlock_iothread();
4269
+ bql_unlock();
4270
COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n");
4271
[cbowner release];
4272
exit(status);
4273
@@ -XXX,XX +XXX,XX @@ static int cocoa_main(void)
4274
4275
COCOA_DEBUG("Entered %s()\n", __func__);
4276
4277
- qemu_mutex_unlock_iothread();
4278
+ bql_unlock();
4279
qemu_thread_create(&thread, "qemu_main", call_qemu_main,
4280
NULL, QEMU_THREAD_DETACHED);
123
4281
124
--
4282
--
125
2.13.5
4283
2.43.0
126
4284
127
4285
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
LeakyBucket.burst_length is defined as an unsigned integer but the
4
code never checks for overflows and it only makes sure that the value
5
is not 0.
6
7
In practice this means that the user can set something like
8
throttling.iops-total-max-length=4294967300 despite being larger than
9
UINT_MAX and the final value after casting to unsigned int will be 4.
10
11
This patch changes the data type to uint64_t. This does not increase
12
the storage size of LeakyBucket, and allows us to assign the value
13
directly from qemu_opt_get_number() or BlockIOThrottle and then do the
14
checks directly in throttle_is_valid().
15
16
The value of burst_length does not have a specific upper limit,
17
but since the bucket size is defined by max * burst_length we have
18
to prevent overflows. Instead of going for UINT64_MAX or something
19
similar this patch reuses THROTTLE_VALUE_MAX, which allows I/O bursts
20
of 1 GiB/s for 10 days in a row.
21
22
Signed-off-by: Alberto Garcia <berto@igalia.com>
23
Message-id: 1b2e3049803f71cafb2e1fa1be4fb47147a0d398.1503580370.git.berto@igalia.com
24
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
25
---
26
include/qemu/throttle.h | 2 +-
27
util/throttle.c | 5 +++++
28
2 files changed, 6 insertions(+), 1 deletion(-)
29
30
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/include/qemu/throttle.h
33
+++ b/include/qemu/throttle.h
34
@@ -XXX,XX +XXX,XX @@ typedef struct LeakyBucket {
35
uint64_t max; /* leaky bucket max burst in units */
36
double level; /* bucket level in units */
37
double burst_level; /* bucket level in units (for computing bursts) */
38
- unsigned burst_length; /* max length of the burst period, in seconds */
39
+ uint64_t burst_length; /* max length of the burst period, in seconds */
40
} LeakyBucket;
41
42
/* The following structure is used to configure a ThrottleState
43
diff --git a/util/throttle.c b/util/throttle.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/util/throttle.c
46
+++ b/util/throttle.c
47
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
48
return false;
49
}
50
51
+ if (bkt->max && bkt->burst_length > THROTTLE_VALUE_MAX / bkt->max) {
52
+ error_setg(errp, "burst length too high for this burst rate");
53
+ return false;
54
+ }
55
+
56
if (bkt->max && !bkt->avg) {
57
error_setg(errp, "bps_max/iops_max require corresponding"
58
" bps/iops values");
59
--
60
2.13.5
61
62
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
The name "iothread" is overloaded. Use the term Big QEMU Lock (BQL)
2
2
instead, it is already widely used and unambiguous.
3
There's a few cases which we're passing an Error pointer to a function
3
4
only to discard it immediately afterwards without checking it. In
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
these cases we can simply remove the variable and pass NULL instead.
5
Reviewed-by: Paul Durrant <paul@xen.org>
6
6
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
7
Signed-off-by: Alberto Garcia <berto@igalia.com>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
10
Message-id: 20170829120836.16091-1-berto@igalia.com
10
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
11
Message-id: 20240102153529.486531-3-stefanha@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
---
13
block/qcow.c | 12 +++---------
14
include/qemu/main-loop.h | 19 +++++++++----------
14
block/qcow2.c | 8 ++------
15
hw/i386/kvm/xen_evtchn.c | 14 +++++++-------
15
dump.c | 4 +---
16
hw/i386/kvm/xen_gnttab.c | 2 +-
16
3 files changed, 6 insertions(+), 18 deletions(-)
17
hw/mips/mips_int.c | 2 +-
17
18
hw/ppc/ppc.c | 2 +-
18
diff --git a/block/qcow.c b/block/qcow.c
19
target/i386/kvm/xen-emu.c | 2 +-
19
index XXXXXXX..XXXXXXX 100644
20
target/ppc/excp_helper.c | 2 +-
20
--- a/block/qcow.c
21
target/ppc/helper_regs.c | 2 +-
21
+++ b/block/qcow.c
22
target/riscv/cpu_helper.c | 4 ++--
22
@@ -XXX,XX +XXX,XX @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
23
9 files changed, 24 insertions(+), 25 deletions(-)
23
start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
24
24
for(i = 0; i < s->cluster_sectors; i++) {
25
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
25
if (i < n_start || i >= n_end) {
26
index XXXXXXX..XXXXXXX 100644
26
- Error *err = NULL;
27
--- a/include/qemu/main-loop.h
27
memset(s->cluster_data, 0x00, 512);
28
+++ b/include/qemu/main-loop.h
28
if (qcrypto_block_encrypt(s->crypto, start_sect + i,
29
@@ -XXX,XX +XXX,XX @@ void bql_lock_impl(const char *file, int line);
29
s->cluster_data,
30
void bql_unlock(void);
30
BDRV_SECTOR_SIZE,
31
31
- &err) < 0) {
32
/**
32
- error_free(err);
33
- * QEMU_IOTHREAD_LOCK_GUARD
33
+ NULL) < 0) {
34
+ * BQL_LOCK_GUARD
34
errno = EIO;
35
*
35
return -1;
36
* Wrap a block of code in a conditional bql_{lock,unlock}.
36
}
37
*/
37
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
38
-typedef struct IOThreadLockAuto IOThreadLockAuto;
38
QEMUIOVector hd_qiov;
39
+typedef struct BQLLockAuto BQLLockAuto;
39
uint8_t *buf;
40
40
void *orig_buf;
41
-static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file,
41
- Error *err = NULL;
42
- int line)
42
43
+static inline BQLLockAuto *bql_auto_lock(const char *file, int line)
43
if (qiov->niov > 1) {
44
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
45
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
46
if (bs->encrypted) {
47
assert(s->crypto);
48
if (qcrypto_block_decrypt(s->crypto, sector_num, buf,
49
- n * BDRV_SECTOR_SIZE, &err) < 0) {
50
+ n * BDRV_SECTOR_SIZE, NULL) < 0) {
51
goto fail;
52
}
53
}
54
@@ -XXX,XX +XXX,XX @@ done:
55
return ret;
56
57
fail:
58
- error_free(err);
59
ret = -EIO;
60
goto done;
61
}
62
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
63
break;
64
}
65
if (bs->encrypted) {
66
- Error *err = NULL;
67
assert(s->crypto);
68
if (qcrypto_block_encrypt(s->crypto, sector_num, buf,
69
- n * BDRV_SECTOR_SIZE, &err) < 0) {
70
- error_free(err);
71
+ n * BDRV_SECTOR_SIZE, NULL) < 0) {
72
ret = -EIO;
73
break;
74
}
75
diff --git a/block/qcow2.c b/block/qcow2.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/block/qcow2.c
78
+++ b/block/qcow2.c
79
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
80
assert(s->crypto);
81
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
82
assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
83
- Error *err = NULL;
84
if (qcrypto_block_decrypt(s->crypto,
85
(s->crypt_physical_offset ?
86
cluster_offset + offset_in_cluster :
87
offset) >> BDRV_SECTOR_BITS,
88
cluster_data,
89
cur_bytes,
90
- &err) < 0) {
91
- error_free(err);
92
+ NULL) < 0) {
93
ret = -EIO;
94
goto fail;
95
}
96
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
97
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
98
99
if (bs->encrypted) {
100
- Error *err = NULL;
101
assert(s->crypto);
102
if (!cluster_data) {
103
cluster_data = qemu_try_blockalign(bs->file->bs,
104
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
105
cluster_offset + offset_in_cluster :
106
offset) >> BDRV_SECTOR_BITS,
107
cluster_data,
108
- cur_bytes, &err) < 0) {
109
- error_free(err);
110
+ cur_bytes, NULL) < 0) {
111
ret = -EIO;
112
goto fail;
113
}
114
diff --git a/dump.c b/dump.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/dump.c
117
+++ b/dump.c
118
@@ -XXX,XX +XXX,XX @@ static void dump_process(DumpState *s, Error **errp)
119
120
static void *dump_thread(void *data)
121
{
44
{
122
- Error *err = NULL;
45
if (bql_locked()) {
123
DumpState *s = (DumpState *)data;
46
return NULL;
124
- dump_process(s, &err);
47
}
125
- error_free(err);
48
bql_lock_impl(file, line);
126
+ dump_process(s, NULL);
49
/* Anything non-NULL causes the cleanup function to be called */
127
return NULL;
50
- return (IOThreadLockAuto *)(uintptr_t)1;
128
}
51
+ return (BQLLockAuto *)(uintptr_t)1;
52
}
53
54
-static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l)
55
+static inline void bql_auto_unlock(BQLLockAuto *l)
56
{
57
bql_unlock();
58
}
59
60
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock)
61
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
62
63
-#define QEMU_IOTHREAD_LOCK_GUARD() \
64
- g_autoptr(IOThreadLockAuto) _iothread_lock_auto __attribute__((unused)) \
65
- = qemu_iothread_auto_lock(__FILE__, __LINE__)
66
+#define BQL_LOCK_GUARD() \
67
+ g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \
68
+ = bql_auto_lock(__FILE__, __LINE__)
69
70
/*
71
* qemu_cond_wait_iothread: Wait on condition for the main loop mutex
72
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/i386/kvm/xen_evtchn.c
75
+++ b/hw/i386/kvm/xen_evtchn.c
76
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_reset_op(struct evtchn_reset *reset)
77
return -ESRCH;
78
}
79
80
- QEMU_IOTHREAD_LOCK_GUARD();
81
+ BQL_LOCK_GUARD();
82
return xen_evtchn_soft_reset();
83
}
84
85
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_close_op(struct evtchn_close *close)
86
return -EINVAL;
87
}
88
89
- QEMU_IOTHREAD_LOCK_GUARD();
90
+ BQL_LOCK_GUARD();
91
qemu_mutex_lock(&s->port_lock);
92
93
ret = close_port(s, close->port, &flush_kvm_routes);
94
@@ -XXX,XX +XXX,XX @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
95
return -EINVAL;
96
}
97
98
- QEMU_IOTHREAD_LOCK_GUARD();
99
+ BQL_LOCK_GUARD();
100
101
if (s->pirq[pirq->pirq].port) {
102
return -EBUSY;
103
@@ -XXX,XX +XXX,XX @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map)
104
return -ENOTSUP;
105
}
106
107
- QEMU_IOTHREAD_LOCK_GUARD();
108
+ BQL_LOCK_GUARD();
109
QEMU_LOCK_GUARD(&s->port_lock);
110
111
if (map->domid != DOMID_SELF && map->domid != xen_domid) {
112
@@ -XXX,XX +XXX,XX @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
113
return -EINVAL;
114
}
115
116
- QEMU_IOTHREAD_LOCK_GUARD();
117
+ BQL_LOCK_GUARD();
118
qemu_mutex_lock(&s->port_lock);
119
120
if (!pirq_inuse(s, pirq)) {
121
@@ -XXX,XX +XXX,XX @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
122
return -ENOTSUP;
123
}
124
125
- QEMU_IOTHREAD_LOCK_GUARD();
126
+ BQL_LOCK_GUARD();
127
QEMU_LOCK_GUARD(&s->port_lock);
128
129
if (!pirq_inuse(s, pirq)) {
130
@@ -XXX,XX +XXX,XX @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
131
return -ENOTSUP;
132
}
133
134
- QEMU_IOTHREAD_LOCK_GUARD();
135
+ BQL_LOCK_GUARD();
136
QEMU_LOCK_GUARD(&s->port_lock);
137
138
if (!pirq_inuse(s, pirq)) {
139
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/hw/i386/kvm/xen_gnttab.c
142
+++ b/hw/i386/kvm/xen_gnttab.c
143
@@ -XXX,XX +XXX,XX @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
144
return -EINVAL;
145
}
146
147
- QEMU_IOTHREAD_LOCK_GUARD();
148
+ BQL_LOCK_GUARD();
149
QEMU_LOCK_GUARD(&s->gnt_lock);
150
151
xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
152
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/hw/mips/mips_int.c
155
+++ b/hw/mips/mips_int.c
156
@@ -XXX,XX +XXX,XX @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
157
return;
158
}
159
160
- QEMU_IOTHREAD_LOCK_GUARD();
161
+ BQL_LOCK_GUARD();
162
163
if (level) {
164
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
165
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
166
index XXXXXXX..XXXXXXX 100644
167
--- a/hw/ppc/ppc.c
168
+++ b/hw/ppc/ppc.c
169
@@ -XXX,XX +XXX,XX @@ void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
170
unsigned int old_pending;
171
172
/* We may already have the BQL if coming from the reset path */
173
- QEMU_IOTHREAD_LOCK_GUARD();
174
+ BQL_LOCK_GUARD();
175
176
old_pending = env->pending_interrupts;
177
178
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/i386/kvm/xen-emu.c
181
+++ b/target/i386/kvm/xen-emu.c
182
@@ -XXX,XX +XXX,XX @@ static int xen_set_shared_info(uint64_t gfn)
183
uint64_t gpa = gfn << TARGET_PAGE_BITS;
184
int i, err;
185
186
- QEMU_IOTHREAD_LOCK_GUARD();
187
+ BQL_LOCK_GUARD();
188
189
/*
190
* The xen_overlay device tells KVM about it too, since it had to
191
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/target/ppc/excp_helper.c
194
+++ b/target/ppc/excp_helper.c
195
@@ -XXX,XX +XXX,XX @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
196
void ppc_maybe_interrupt(CPUPPCState *env)
197
{
198
CPUState *cs = env_cpu(env);
199
- QEMU_IOTHREAD_LOCK_GUARD();
200
+ BQL_LOCK_GUARD();
201
202
if (ppc_next_unmasked_interrupt(env)) {
203
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
204
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/ppc/helper_regs.c
207
+++ b/target/ppc/helper_regs.c
208
@@ -XXX,XX +XXX,XX @@ void cpu_interrupt_exittb(CPUState *cs)
209
* unless running with TCG.
210
*/
211
if (tcg_enabled()) {
212
- QEMU_IOTHREAD_LOCK_GUARD();
213
+ BQL_LOCK_GUARD();
214
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
215
}
216
}
217
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
218
index XXXXXXX..XXXXXXX 100644
219
--- a/target/riscv/cpu_helper.c
220
+++ b/target/riscv/cpu_helper.c
221
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_interrupt(CPURISCVState *env)
222
uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
223
CPUState *cs = env_cpu(env);
224
225
- QEMU_IOTHREAD_LOCK_GUARD();
226
+ BQL_LOCK_GUARD();
227
228
if (env->virt_enabled) {
229
gein = get_field(env->hstatus, HSTATUS_VGEIN);
230
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
231
/* No need to update mip for VSTIP */
232
mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
233
234
- QEMU_IOTHREAD_LOCK_GUARD();
235
+ BQL_LOCK_GUARD();
236
237
env->mip = (env->mip & ~mask) | (value & mask);
129
238
130
--
239
--
131
2.13.5
240
2.43.0
132
241
133
242
diff view generated by jsdifflib
1
Most qcow2 files are uncompressed so it is wasteful to allocate (32 + 1)
1
The name "iothread" is overloaded. Use the term Big QEMU Lock (BQL)
2
* cluster_size + 512 bytes upfront. Allocate s->cluster_cache and
2
instead, it is already widely used and unambiguous.
3
s->cluster_data when the first read operation is performance on a
4
compressed cluster.
5
3
6
The buffers are freed in .bdrv_close(). .bdrv_open() no longer has any
7
code paths that can allocate these buffers, so remove the free functions
8
in the error code path.
9
10
This patch can result in significant memory savings when many qcow2
11
disks are attached or backing file chains are long:
12
13
Before 12.81% (1,023,193,088B)
14
After 5.36% (393,893,888B)
15
16
Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
17
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
18
Reviewed-by: Eric Blake <eblake@redhat.com>
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Message-id: 20170821135530.32344-1-stefanha@redhat.com
5
Reviewed-by: Cédric Le Goater <clg@kaod.org>
21
Cc: Kevin Wolf <kwolf@redhat.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Paul Durrant <paul@xen.org>
8
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
9
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
10
Message-id: 20240102153529.486531-4-stefanha@redhat.com
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
---
12
---
24
block/qcow2-cluster.c | 17 +++++++++++++++++
13
include/qemu/main-loop.h | 10 +++++-----
25
block/qcow2.c | 12 ------------
14
accel/tcg/tcg-accel-ops-rr.c | 4 ++--
26
2 files changed, 17 insertions(+), 12 deletions(-)
15
hw/display/virtio-gpu.c | 2 +-
16
hw/ppc/spapr_events.c | 2 +-
17
system/cpu-throttle.c | 2 +-
18
system/cpus.c | 4 ++--
19
target/i386/nvmm/nvmm-accel-ops.c | 2 +-
20
target/i386/whpx/whpx-accel-ops.c | 2 +-
21
8 files changed, 14 insertions(+), 14 deletions(-)
27
22
28
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
23
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
29
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
30
--- a/block/qcow2-cluster.c
25
--- a/include/qemu/main-loop.h
31
+++ b/block/qcow2-cluster.c
26
+++ b/include/qemu/main-loop.h
32
@@ -XXX,XX +XXX,XX @@ int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
27
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
33
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
28
= bql_auto_lock(__FILE__, __LINE__)
34
sector_offset = coffset & 511;
29
35
csize = nb_csectors * 512 - sector_offset;
30
/*
36
+
31
- * qemu_cond_wait_iothread: Wait on condition for the main loop mutex
37
+ /* Allocate buffers on first decompress operation, most images are
32
+ * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL)
38
+ * uncompressed and the memory overhead can be avoided. The buffers
33
*
39
+ * are freed in .bdrv_close().
34
- * This function atomically releases the main loop mutex and causes
40
+ */
35
+ * This function atomically releases the Big QEMU Lock (BQL) and causes
41
+ if (!s->cluster_data) {
36
* the calling thread to block on the condition.
42
+ /* one more sector for decompressed data alignment */
37
*/
43
+ s->cluster_data = qemu_try_blockalign(bs->file->bs,
38
-void qemu_cond_wait_iothread(QemuCond *cond);
44
+ QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
39
+void qemu_cond_wait_bql(QemuCond *cond);
45
+ if (!s->cluster_data) {
40
46
+ return -ENOMEM;
41
/*
47
+ }
42
- * qemu_cond_timedwait_iothread: like the previous, but with timeout
48
+ }
43
+ * qemu_cond_timedwait_bql: like the previous, but with timeout
49
+ if (!s->cluster_cache) {
44
*/
50
+ s->cluster_cache = g_malloc(s->cluster_size);
45
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
51
+ }
46
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
52
+
47
53
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
48
/* internal interfaces */
54
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
49
55
nb_csectors);
50
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
56
diff --git a/block/qcow2.c b/block/qcow2.c
57
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
58
--- a/block/qcow2.c
52
--- a/accel/tcg/tcg-accel-ops-rr.c
59
+++ b/block/qcow2.c
53
+++ b/accel/tcg/tcg-accel-ops-rr.c
60
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
54
@@ -XXX,XX +XXX,XX @@ static void rr_wait_io_event(void)
61
goto fail;
55
56
while (all_cpu_threads_idle()) {
57
rr_stop_kick_timer();
58
- qemu_cond_wait_iothread(first_cpu->halt_cond);
59
+ qemu_cond_wait_bql(first_cpu->halt_cond);
62
}
60
}
63
61
64
- s->cluster_cache = g_malloc(s->cluster_size);
62
rr_start_kick_timer();
65
- /* one more sector for decompressed data alignment */
63
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
66
- s->cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS
64
67
- * s->cluster_size + 512);
65
/* wait for initial kick-off after machine start */
68
- if (s->cluster_data == NULL) {
66
while (first_cpu->stopped) {
69
- error_setg(errp, "Could not allocate temporary cluster buffer");
67
- qemu_cond_wait_iothread(first_cpu->halt_cond);
70
- ret = -ENOMEM;
68
+ qemu_cond_wait_bql(first_cpu->halt_cond);
71
- goto fail;
69
72
- }
70
/* process any pending work */
73
-
71
CPU_FOREACH(cpu) {
74
s->cluster_cache_offset = -1;
72
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
75
s->flags = flags;
73
index XXXXXXX..XXXXXXX 100644
76
74
--- a/hw/display/virtio-gpu.c
77
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
75
+++ b/hw/display/virtio-gpu.c
78
if (s->refcount_block_cache) {
76
@@ -XXX,XX +XXX,XX @@ void virtio_gpu_reset(VirtIODevice *vdev)
79
qcow2_cache_destroy(bs, s->refcount_block_cache);
77
g->reset_finished = false;
80
}
78
qemu_bh_schedule(g->reset_bh);
81
- g_free(s->cluster_cache);
79
while (!g->reset_finished) {
82
- qemu_vfree(s->cluster_data);
80
- qemu_cond_wait_iothread(&g->reset_cond);
83
qcrypto_block_free(s->crypto);
81
+ qemu_cond_wait_bql(&g->reset_cond);
84
qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
82
}
85
return ret;
83
} else {
84
virtio_gpu_reset_bh(g);
85
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/ppc/spapr_events.c
88
+++ b/hw/ppc/spapr_events.c
89
@@ -XXX,XX +XXX,XX @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
90
}
91
return;
92
}
93
- qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond);
94
+ qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond);
95
if (spapr->fwnmi_machine_check_addr == -1) {
96
/*
97
* If the machine was reset while waiting for the interlock,
98
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/system/cpu-throttle.c
101
+++ b/system/cpu-throttle.c
102
@@ -XXX,XX +XXX,XX @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
103
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
104
while (sleeptime_ns > 0 && !cpu->stop) {
105
if (sleeptime_ns > SCALE_MS) {
106
- qemu_cond_timedwait_iothread(cpu->halt_cond,
107
+ qemu_cond_timedwait_bql(cpu->halt_cond,
108
sleeptime_ns / SCALE_MS);
109
} else {
110
bql_unlock();
111
diff --git a/system/cpus.c b/system/cpus.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/system/cpus.c
114
+++ b/system/cpus.c
115
@@ -XXX,XX +XXX,XX @@ void bql_unlock(void)
116
qemu_mutex_unlock(&bql);
117
}
118
119
-void qemu_cond_wait_iothread(QemuCond *cond)
120
+void qemu_cond_wait_bql(QemuCond *cond)
121
{
122
qemu_cond_wait(cond, &bql);
123
}
124
125
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
126
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms)
127
{
128
qemu_cond_timedwait(cond, &bql, ms);
129
}
130
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/target/i386/nvmm/nvmm-accel-ops.c
133
+++ b/target/i386/nvmm/nvmm-accel-ops.c
134
@@ -XXX,XX +XXX,XX @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
135
}
136
}
137
while (cpu_thread_is_idle(cpu)) {
138
- qemu_cond_wait_iothread(cpu->halt_cond);
139
+ qemu_cond_wait_bql(cpu->halt_cond);
140
}
141
qemu_wait_io_event_common(cpu);
142
} while (!cpu->unplug || cpu_can_run(cpu));
143
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/target/i386/whpx/whpx-accel-ops.c
146
+++ b/target/i386/whpx/whpx-accel-ops.c
147
@@ -XXX,XX +XXX,XX @@ static void *whpx_cpu_thread_fn(void *arg)
148
}
149
}
150
while (cpu_thread_is_idle(cpu)) {
151
- qemu_cond_wait_iothread(cpu->halt_cond);
152
+ qemu_cond_wait_bql(cpu->halt_cond);
153
}
154
qemu_wait_io_event_common(cpu);
155
} while (!cpu->unplug || cpu_can_run(cpu));
86
--
156
--
87
2.13.5
157
2.43.0
88
158
89
159
diff view generated by jsdifflib
1
From: Fred Rolland <rollandf@gmail.com>
1
The term "iothread lock" is obsolete. The APIs use Big QEMU Lock (BQL)
2
in their names. Update the code comments to use "BQL" instead of
3
"iothread lock".
2
4
3
Update doc with the usage of UUID for initiator name.
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Related-To: https://bugzilla.redhat.com/1006468
7
Reviewed-by: Paul Durrant <paul@xen.org>
6
Signed-off-by: Fred Rolland <frolland@redhat.com>
8
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-id: 20170823084830.30500-1-frolland@redhat.com
9
Reviewed-by: Cédric Le Goater <clg@kaod.org>
10
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
11
Message-id: 20240102153529.486531-5-stefanha@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
13
---
10
qemu-doc.texi | 5 +++--
14
docs/devel/reset.rst | 2 +-
11
1 file changed, 3 insertions(+), 2 deletions(-)
15
hw/display/qxl.h | 2 +-
16
include/exec/cpu-common.h | 2 +-
17
include/exec/memory.h | 4 ++--
18
include/exec/ramblock.h | 2 +-
19
include/migration/register.h | 8 ++++----
20
target/arm/internals.h | 4 ++--
21
accel/tcg/cputlb.c | 4 ++--
22
accel/tcg/tcg-accel-ops-icount.c | 2 +-
23
hw/remote/mpqemu-link.c | 2 +-
24
migration/block-dirty-bitmap.c | 10 +++++-----
25
migration/block.c | 22 +++++++++++-----------
26
migration/colo.c | 2 +-
27
migration/migration.c | 2 +-
28
migration/ram.c | 4 ++--
29
system/physmem.c | 6 +++---
30
target/arm/helper.c | 2 +-
31
ui/spice-core.c | 2 +-
32
util/rcu.c | 2 +-
33
audio/coreaudio.m | 4 ++--
34
ui/cocoa.m | 6 +++---
35
21 files changed, 47 insertions(+), 47 deletions(-)
12
36
13
diff --git a/qemu-doc.texi b/qemu-doc.texi
37
diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst
14
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
15
--- a/qemu-doc.texi
39
--- a/docs/devel/reset.rst
16
+++ b/qemu-doc.texi
40
+++ b/docs/devel/reset.rst
17
@@ -XXX,XX +XXX,XX @@ in a configuration file provided via '-readconfig' or directly on the
41
@@ -XXX,XX +XXX,XX @@ Triggering reset
18
command line.
42
19
43
This section documents the APIs which "users" of a resettable object should use
20
If the initiator-name is not specified qemu will use a default name
44
to control it. All resettable control functions must be called while holding
21
-of 'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the
45
-the iothread lock.
22
+of 'iqn.2008-11.org.linux-kvm[:<uuid>'] where <uuid> is the UUID of the
46
+the BQL.
23
+virtual machine. If the UUID is not specified qemu will use
47
24
+'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the
48
You can apply a reset to an object using ``resettable_assert_reset()``. You need
25
virtual machine.
49
to call ``resettable_release_reset()`` to release the object from reset. To
26
50
diff --git a/hw/display/qxl.h b/hw/display/qxl.h
27
-
51
index XXXXXXX..XXXXXXX 100644
28
@example
52
--- a/hw/display/qxl.h
29
Setting a specific initiator name to use when logging in to the target
53
+++ b/hw/display/qxl.h
30
-iscsi initiator-name=iqn.qemu.test:my-initiator
54
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
55
*
56
* Use with care; by the time this function returns, the returned pointer is
57
* not protected by RCU anymore. If the caller is not within an RCU critical
58
- * section and does not hold the iothread lock, it must have other means of
59
+ * section and does not hold the BQL, it must have other means of
60
* protecting the pointer, such as a reference to the region that includes
61
* the incoming ram_addr_t.
62
*
63
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/include/exec/cpu-common.h
66
+++ b/include/exec/cpu-common.h
67
@@ -XXX,XX +XXX,XX @@ RAMBlock *qemu_ram_block_by_name(const char *name);
68
*
69
* By the time this function returns, the returned pointer is not protected
70
* by RCU anymore. If the caller is not within an RCU critical section and
71
- * does not hold the iothread lock, it must have other means of protecting the
72
+ * does not hold the BQL, it must have other means of protecting the
73
* pointer, such as a reference to the memory region that owns the RAMBlock.
74
*/
75
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
76
diff --git a/include/exec/memory.h b/include/exec/memory.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/exec/memory.h
79
+++ b/include/exec/memory.h
80
@@ -XXX,XX +XXX,XX @@ int memory_region_get_fd(MemoryRegion *mr);
81
*
82
* Use with care; by the time this function returns, the returned pointer is
83
* not protected by RCU anymore. If the caller is not within an RCU critical
84
- * section and does not hold the iothread lock, it must have other means of
85
+ * section and does not hold the BQL, it must have other means of
86
* protecting the pointer, such as a reference to the region that includes
87
* the incoming ram_addr_t.
88
*
89
@@ -XXX,XX +XXX,XX @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
90
*
91
* Use with care; by the time this function returns, the returned pointer is
92
* not protected by RCU anymore. If the caller is not within an RCU critical
93
- * section and does not hold the iothread lock, it must have other means of
94
+ * section and does not hold the BQL, it must have other means of
95
* protecting the pointer, such as a reference to the region that includes
96
* the incoming ram_addr_t.
97
*
98
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/include/exec/ramblock.h
101
+++ b/include/exec/ramblock.h
102
@@ -XXX,XX +XXX,XX @@ struct RAMBlock {
103
ram_addr_t max_length;
104
void (*resized)(const char*, uint64_t length, void *host);
105
uint32_t flags;
106
- /* Protected by iothread lock. */
107
+ /* Protected by the BQL. */
108
char idstr[256];
109
/* RCU-enabled, writes protected by the ramlist lock */
110
QLIST_ENTRY(RAMBlock) next;
111
diff --git a/include/migration/register.h b/include/migration/register.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/include/migration/register.h
114
+++ b/include/migration/register.h
115
@@ -XXX,XX +XXX,XX @@
116
#include "hw/vmstate-if.h"
117
118
typedef struct SaveVMHandlers {
119
- /* This runs inside the iothread lock. */
120
+ /* This runs inside the BQL. */
121
SaveStateHandler *save_state;
122
123
/*
124
@@ -XXX,XX +XXX,XX @@ typedef struct SaveVMHandlers {
125
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
126
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
127
128
- /* This runs both outside and inside the iothread lock. */
129
+ /* This runs both outside and inside the BQL. */
130
bool (*is_active)(void *opaque);
131
bool (*has_postcopy)(void *opaque);
132
133
@@ -XXX,XX +XXX,XX @@ typedef struct SaveVMHandlers {
134
*/
135
bool (*is_active_iterate)(void *opaque);
136
137
- /* This runs outside the iothread lock in the migration case, and
138
+ /* This runs outside the BQL in the migration case, and
139
* within the lock in the savevm case. The callback had better only
140
* use data that is local to the migration thread or protected
141
* by other locks.
142
*/
143
int (*save_live_iterate)(QEMUFile *f, void *opaque);
144
145
- /* This runs outside the iothread lock! */
146
+ /* This runs outside the BQL! */
147
/* Note for save_live_pending:
148
* must_precopy:
149
* - must be migrated in precopy or in stopped state
150
diff --git a/target/arm/internals.h b/target/arm/internals.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/arm/internals.h
153
+++ b/target/arm/internals.h
154
@@ -XXX,XX +XXX,XX @@ static inline const char *aarch32_mode_name(uint32_t psr)
155
*
156
* Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
157
* a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
158
- * Must be called with the iothread lock held.
159
+ * Must be called with the BQL held.
160
*/
161
void arm_cpu_update_virq(ARMCPU *cpu);
162
163
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_virq(ARMCPU *cpu);
164
*
165
* Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
166
* a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
167
- * Must be called with the iothread lock held.
168
+ * Must be called with the BQL held.
169
*/
170
void arm_cpu_update_vfiq(ARMCPU *cpu);
171
172
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/accel/tcg/cputlb.c
175
+++ b/accel/tcg/cputlb.c
176
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
177
* @size: number of bytes
178
* @mmu_idx: virtual address context
179
* @ra: return address into tcg generated code, or 0
180
- * Context: iothread lock held
181
+ * Context: BQL held
182
*
183
* Load @size bytes from @addr, which is memory-mapped i/o.
184
* The bytes are concatenated in big-endian order with @ret_be.
185
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
186
* @size: number of bytes
187
* @mmu_idx: virtual address context
188
* @ra: return address into tcg generated code, or 0
189
- * Context: iothread lock held
190
+ * Context: BQL held
191
*
192
* Store @size bytes at @addr, which is memory-mapped i/o.
193
* The bytes to store are extracted in little-endian order from @val_le;
194
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
195
index XXXXXXX..XXXXXXX 100644
196
--- a/accel/tcg/tcg-accel-ops-icount.c
197
+++ b/accel/tcg/tcg-accel-ops-icount.c
198
@@ -XXX,XX +XXX,XX @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
199
200
if (cpu->icount_budget == 0) {
201
/*
202
- * We're called without the iothread lock, so must take it while
203
+ * We're called without the BQL, so must take it while
204
* we're calling timer handlers.
205
*/
206
bql_lock();
207
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
208
index XXXXXXX..XXXXXXX 100644
209
--- a/hw/remote/mpqemu-link.c
210
+++ b/hw/remote/mpqemu-link.c
211
@@ -XXX,XX +XXX,XX @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
212
assert(qemu_in_coroutine() || !iothread);
213
214
/*
215
- * Skip unlocking/locking iothread lock when the IOThread is running
216
+ * Skip unlocking/locking BQL when the IOThread is running
217
* in co-routine context. Co-routine context is asserted above
218
* for IOThread case.
219
* Also skip lock handling while in a co-routine in the main context.
220
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/migration/block-dirty-bitmap.c
223
+++ b/migration/block-dirty-bitmap.c
224
@@ -XXX,XX +XXX,XX @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
225
g_free(buf);
226
}
227
228
-/* Called with iothread lock taken. */
229
+/* Called with the BQL taken. */
230
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
231
{
232
SaveBitmapState *dbms;
233
@@ -XXX,XX +XXX,XX @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
234
}
235
}
236
237
-/* Called with iothread lock taken. */
238
+/* Called with the BQL taken. */
239
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
240
const char *bs_name, GHashTable *alias_map)
241
{
242
@@ -XXX,XX +XXX,XX @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
243
return 0;
244
}
245
246
-/* Called with iothread lock taken. */
247
+/* Called with the BQL taken. */
248
static int init_dirty_bitmap_migration(DBMSaveState *s)
249
{
250
BlockDriverState *bs;
251
@@ -XXX,XX +XXX,XX @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
252
BlockBackend *blk;
253
GHashTable *alias_map = NULL;
254
255
- /* Runs in the migration thread, but holds the iothread lock */
256
+ /* Runs in the migration thread, but holds the BQL */
257
GLOBAL_STATE_CODE();
258
GRAPH_RDLOCK_GUARD_MAINLOOP();
259
260
@@ -XXX,XX +XXX,XX @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
261
return s->bulk_completed;
262
}
263
264
-/* Called with iothread lock taken. */
265
+/* Called with the BQL taken. */
266
267
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
268
{
269
diff --git a/migration/block.c b/migration/block.c
270
index XXXXXXX..XXXXXXX 100644
271
--- a/migration/block.c
272
+++ b/migration/block.c
273
@@ -XXX,XX +XXX,XX @@ typedef struct BlkMigState {
274
int prev_progress;
275
int bulk_completed;
276
277
- /* Lock must be taken _inside_ the iothread lock. */
278
+ /* Lock must be taken _inside_ the BQL. */
279
QemuMutex lock;
280
} BlkMigState;
281
282
@@ -XXX,XX +XXX,XX @@ static void blk_mig_unlock(void)
283
qemu_mutex_unlock(&block_mig_state.lock);
284
}
285
286
-/* Must run outside of the iothread lock during the bulk phase,
287
+/* Must run outside of the BQL during the bulk phase,
288
* or the VM will stall.
289
*/
290
291
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
292
return (bmds->cur_sector >= total_sectors);
293
}
294
295
-/* Called with iothread lock taken. */
296
+/* Called with the BQL taken. */
297
298
static int set_dirty_tracking(void)
299
{
300
@@ -XXX,XX +XXX,XX @@ fail:
301
return ret;
302
}
303
304
-/* Called with iothread lock taken. */
305
+/* Called with the BQL taken. */
306
307
static void unset_dirty_tracking(void)
308
{
309
@@ -XXX,XX +XXX,XX @@ static void blk_mig_reset_dirty_cursor(void)
310
}
311
}
312
313
-/* Called with iothread lock taken. */
314
+/* Called with the BQL taken. */
315
316
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
317
int is_async)
318
@@ -XXX,XX +XXX,XX @@ error:
319
return ret;
320
}
321
322
-/* Called with iothread lock taken.
323
+/* Called with the BQL taken.
324
*
325
* return value:
326
* 0: too much data for max_downtime
327
@@ -XXX,XX +XXX,XX @@ static int flush_blks(QEMUFile *f)
328
return ret;
329
}
330
331
-/* Called with iothread lock taken. */
332
+/* Called with the BQL taken. */
333
334
static int64_t get_remaining_dirty(void)
335
{
336
@@ -XXX,XX +XXX,XX @@ static int64_t get_remaining_dirty(void)
337
338
339
340
-/* Called with iothread lock taken. */
341
+/* Called with the BQL taken. */
342
static void block_migration_cleanup_bmds(void)
343
{
344
BlkMigDevState *bmds;
345
@@ -XXX,XX +XXX,XX @@ static void block_migration_cleanup_bmds(void)
346
}
347
}
348
349
-/* Called with iothread lock taken. */
350
+/* Called with the BQL taken. */
351
static void block_migration_cleanup(void *opaque)
352
{
353
BlkMigBlock *blk;
354
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
355
}
356
ret = 0;
357
} else {
358
- /* Always called with iothread lock taken for
359
+ /* Always called with the BQL taken for
360
* simplicity, block_save_complete also calls it.
361
*/
362
bql_lock();
363
@@ -XXX,XX +XXX,XX @@ static int block_save_iterate(QEMUFile *f, void *opaque)
364
return (delta_bytes > 0);
365
}
366
367
-/* Called with iothread lock taken. */
368
+/* Called with the BQL taken. */
369
370
static int block_save_complete(QEMUFile *f, void *opaque)
371
{
372
diff --git a/migration/colo.c b/migration/colo.c
373
index XXXXXXX..XXXXXXX 100644
374
--- a/migration/colo.c
375
+++ b/migration/colo.c
376
@@ -XXX,XX +XXX,XX @@ int coroutine_fn colo_incoming_co(void)
377
qemu_thread_join(&th);
378
bql_lock();
379
380
- /* We hold the global iothread lock, so it is safe here */
381
+ /* We hold the global BQL, so it is safe here */
382
colo_release_ram_cache();
383
384
return 0;
385
diff --git a/migration/migration.c b/migration/migration.c
386
index XXXXXXX..XXXXXXX 100644
387
--- a/migration/migration.c
388
+++ b/migration/migration.c
389
@@ -XXX,XX +XXX,XX @@ fail:
390
391
/**
392
* migration_maybe_pause: Pause if required to by
393
- * migrate_pause_before_switchover called with the iothread locked
394
+ * migrate_pause_before_switchover called with the BQL locked
395
* Returns: 0 on success
396
*/
397
static int migration_maybe_pause(MigrationState *s,
398
diff --git a/migration/ram.c b/migration/ram.c
399
index XXXXXXX..XXXXXXX 100644
400
--- a/migration/ram.c
401
+++ b/migration/ram.c
402
@@ -XXX,XX +XXX,XX @@ static void ram_save_cleanup(void *opaque)
403
404
/* We don't use dirty log with background snapshots */
405
if (!migrate_background_snapshot()) {
406
- /* caller have hold iothread lock or is in a bh, so there is
407
+ /* caller have hold BQL or is in a bh, so there is
408
* no writing race against the migration bitmap
409
*/
410
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
411
@@ -XXX,XX +XXX,XX @@ out:
412
*
413
* Returns zero to indicate success or negative on error
414
*
415
- * Called with iothread lock
416
+ * Called with the BQL
417
*
418
* @f: QEMUFile where to send the data
419
* @opaque: RAMState pointer
420
diff --git a/system/physmem.c b/system/physmem.c
421
index XXXXXXX..XXXXXXX 100644
422
--- a/system/physmem.c
423
+++ b/system/physmem.c
424
@@ -XXX,XX +XXX,XX @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
425
abort();
426
427
found:
428
- /* It is safe to write mru_block outside the iothread lock. This
429
+ /* It is safe to write mru_block outside the BQL. This
430
* is what happens:
431
*
432
* mru_block = xxx
433
@@ -XXX,XX +XXX,XX @@ int qemu_ram_get_fd(RAMBlock *rb)
434
return rb->fd;
435
}
436
437
-/* Called with iothread lock held. */
438
+/* Called with the BQL held. */
439
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
440
{
441
RAMBlock *block;
442
@@ -XXX,XX +XXX,XX @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
443
}
444
}
445
446
-/* Called with iothread lock held. */
447
+/* Called with the BQL held. */
448
void qemu_ram_unset_idstr(RAMBlock *block)
449
{
450
/* FIXME: arch_init.c assumes that this is not called throughout
451
diff --git a/target/arm/helper.c b/target/arm/helper.c
452
index XXXXXXX..XXXXXXX 100644
453
--- a/target/arm/helper.c
454
+++ b/target/arm/helper.c
455
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
456
* Updates to VI and VF require us to update the status of
457
* virtual interrupts, which are the logical OR of these bits
458
* and the state of the input lines from the GIC. (This requires
459
- * that we have the iothread lock, which is done by marking the
460
+ * that we have the BQL, which is done by marking the
461
* reginfo structs as ARM_CP_IO.)
462
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
463
* possible for it to be taken immediately, because VIRQ and
464
diff --git a/ui/spice-core.c b/ui/spice-core.c
465
index XXXXXXX..XXXXXXX 100644
466
--- a/ui/spice-core.c
467
+++ b/ui/spice-core.c
468
@@ -XXX,XX +XXX,XX @@ static void channel_event(int event, SpiceChannelEventInfo *info)
469
* not do that. It isn't that easy to fix it in spice and even
470
* when it is fixed we still should cover the already released
471
* spice versions. So detect that we've been called from another
472
- * thread and grab the iothread lock if so before calling qemu
473
+ * thread and grab the BQL if so before calling qemu
474
* functions.
475
*/
476
bool need_lock = !qemu_thread_is_self(&me);
477
diff --git a/util/rcu.c b/util/rcu.c
478
index XXXXXXX..XXXXXXX 100644
479
--- a/util/rcu.c
480
+++ b/util/rcu.c
481
@@ -XXX,XX +XXX,XX @@ static void rcu_init_complete(void)
482
483
qemu_event_init(&rcu_call_ready_event, false);
484
485
- /* The caller is assumed to have iothread lock, so the call_rcu thread
486
+ /* The caller is assumed to have BQL, so the call_rcu thread
487
* must have been quiescent even after forking, just recreate it.
488
*/
489
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
490
diff --git a/audio/coreaudio.m b/audio/coreaudio.m
491
index XXXXXXX..XXXXXXX 100644
492
--- a/audio/coreaudio.m
493
+++ b/audio/coreaudio.m
494
@@ -XXX,XX +XXX,XX @@ static ret_type glue(coreaudio_, name)args_decl \
495
#undef COREAUDIO_WRAPPER_FUNC
496
497
/*
498
- * callback to feed audiooutput buffer. called without iothread lock.
499
+ * callback to feed audiooutput buffer. called without BQL.
500
* allowed to lock "buf_mutex", but disallowed to have any other locks.
501
*/
502
static OSStatus audioDeviceIOProc(
503
@@ -XXX,XX +XXX,XX @@ static void update_device_playback_state(coreaudioVoiceOut *core)
504
}
505
}
506
507
-/* called without iothread lock. */
508
+/* called without BQL. */
509
static OSStatus handle_voice_change(
510
AudioObjectID in_object_id,
511
UInt32 in_number_addresses,
512
diff --git a/ui/cocoa.m b/ui/cocoa.m
513
index XXXXXXX..XXXXXXX 100644
514
--- a/ui/cocoa.m
515
+++ b/ui/cocoa.m
516
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
517
static QemuClipboardInfo *cbinfo;
518
static QemuEvent cbevent;
519
520
-// Utility functions to run specified code block with iothread lock held
521
+// Utility functions to run specified code block with the BQL held
522
typedef void (^CodeBlock)(void);
523
typedef bool (^BoolCodeBlock)(void);
524
525
@@ -XXX,XX +XXX,XX @@ - (void) setContentDimensions
526
527
- (void) updateUIInfoLocked
528
{
529
- /* Must be called with the iothread lock, i.e. via updateUIInfo */
530
+ /* Must be called with the BQL, i.e. via updateUIInfo */
531
NSSize frameSize;
532
QemuUIInfo info;
533
534
@@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
535
* Create the menu entries which depend on QEMU state (for consoles
536
* and removable devices). These make calls back into QEMU functions,
537
* which is OK because at this point we know that the second thread
538
- * holds the iothread lock and is synchronously waiting for us to
539
+ * holds the BQL and is synchronously waiting for us to
540
* finish.
541
*/
542
add_console_menu_entries();
31
--
543
--
32
2.13.5
544
2.43.0
33
545
34
546
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
The term "QEMU global mutex" is identical to the more widely used Big
2
2
QEMU Lock ("BQL"). Update the code comments and documentation to use
3
Signed-off-by: Alberto Garcia <berto@igalia.com>
3
"BQL" instead of "QEMU global mutex".
4
Message-id: a57dd6274e1b6dc9c28769fec4c7ea543be5c5e3.1503580370.git.berto@igalia.com
4
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Acked-by: Markus Armbruster <armbru@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Paul Durrant <paul@xen.org>
9
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
10
Reviewed-by: Cédric Le Goater <clg@kaod.org>
11
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
12
Message-id: 20240102153529.486531-6-stefanha@redhat.com
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
14
---
7
tests/test-throttle.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++
15
docs/devel/multi-thread-tcg.rst | 7 +++----
8
1 file changed, 77 insertions(+)
16
docs/devel/qapi-code-gen.rst | 2 +-
9
17
docs/devel/replay.rst | 2 +-
10
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
18
docs/devel/multiple-iothreads.txt | 14 +++++++-------
11
index XXXXXXX..XXXXXXX 100644
19
include/block/blockjob.h | 6 +++---
12
--- a/tests/test-throttle.c
20
include/io/task.h | 2 +-
13
+++ b/tests/test-throttle.c
21
include/qemu/coroutine-core.h | 2 +-
14
@@ -XXX,XX +XXX,XX @@ static void test_is_valid(void)
22
include/qemu/coroutine.h | 2 +-
15
test_is_valid_for_value(1, true);
23
hw/block/dataplane/virtio-blk.c | 8 ++++----
16
}
24
hw/block/virtio-blk.c | 2 +-
17
25
hw/scsi/virtio-scsi-dataplane.c | 6 +++---
18
+static void test_ranges(void)
26
net/tap.c | 2 +-
19
+{
27
12 files changed, 27 insertions(+), 28 deletions(-)
20
+ int i;
28
21
+
29
diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst
22
+ for (i = 0; i < BUCKETS_COUNT; i++) {
30
index XXXXXXX..XXXXXXX 100644
23
+ LeakyBucket *b = &cfg.buckets[i];
31
--- a/docs/devel/multi-thread-tcg.rst
24
+ throttle_config_init(&cfg);
32
+++ b/docs/devel/multi-thread-tcg.rst
25
+
33
@@ -XXX,XX +XXX,XX @@ instruction. This could be a future optimisation.
26
+ /* avg = 0 means throttling is disabled, but the config is valid */
34
Emulated hardware state
27
+ b->avg = 0;
35
-----------------------
28
+ g_assert(throttle_is_valid(&cfg, NULL));
36
29
+ g_assert(!throttle_enabled(&cfg));
37
-Currently thanks to KVM work any access to IO memory is automatically
30
+
38
-protected by the global iothread mutex, also known as the BQL (Big
31
+ /* These are valid configurations (values <= THROTTLE_VALUE_MAX) */
39
-QEMU Lock). Any IO region that doesn't use global mutex is expected to
32
+ b->avg = 1;
40
-do its own locking.
33
+ g_assert(throttle_is_valid(&cfg, NULL));
41
+Currently thanks to KVM work any access to IO memory is automatically protected
34
+
42
+by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected
35
+ b->avg = THROTTLE_VALUE_MAX;
43
+to do its own locking.
36
+ g_assert(throttle_is_valid(&cfg, NULL));
44
37
+
45
However IO memory isn't the only way emulated hardware state can be
38
+ b->avg = THROTTLE_VALUE_MAX;
46
modified. Some architectures have model specific registers that
39
+ b->max = THROTTLE_VALUE_MAX;
47
diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst
40
+ g_assert(throttle_is_valid(&cfg, NULL));
48
index XXXXXXX..XXXXXXX 100644
41
+
49
--- a/docs/devel/qapi-code-gen.rst
42
+ /* Values over THROTTLE_VALUE_MAX are not allowed */
50
+++ b/docs/devel/qapi-code-gen.rst
43
+ b->avg = THROTTLE_VALUE_MAX + 1;
51
@@ -XXX,XX +XXX,XX @@ blocking the guest and other background operations.
44
+ g_assert(!throttle_is_valid(&cfg, NULL));
52
Coroutine safety can be hard to prove, similar to thread safety. Common
45
+
53
pitfalls are:
46
+ b->avg = THROTTLE_VALUE_MAX;
54
47
+ b->max = THROTTLE_VALUE_MAX + 1;
55
-- The global mutex isn't held across ``qemu_coroutine_yield()``, so
48
+ g_assert(!throttle_is_valid(&cfg, NULL));
56
+- The BQL isn't held across ``qemu_coroutine_yield()``, so
49
+
57
operations that used to assume that they execute atomically may have
50
+ /* burst_length must be between 1 and THROTTLE_VALUE_MAX */
58
to be more careful to protect against changes in the global state.
51
+ b->avg = 1;
59
52
+ b->max = 1;
60
diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst
53
+ b->burst_length = 0;
61
index XXXXXXX..XXXXXXX 100644
54
+ g_assert(!throttle_is_valid(&cfg, NULL));
62
--- a/docs/devel/replay.rst
55
+
63
+++ b/docs/devel/replay.rst
56
+ b->avg = 1;
64
@@ -XXX,XX +XXX,XX @@ modes.
57
+ b->max = 1;
65
Reading and writing requests are created by CPU thread of QEMU. Later these
58
+ b->burst_length = 1;
66
requests proceed to block layer which creates "bottom halves". Bottom
59
+ g_assert(throttle_is_valid(&cfg, NULL));
67
halves consist of callback and its parameters. They are processed when
60
+
68
-main loop locks the global mutex. These locks are not synchronized with
61
+ b->avg = 1;
69
+main loop locks the BQL. These locks are not synchronized with
62
+ b->max = 1;
70
replaying process because main loop also processes the events that do not
63
+ b->burst_length = THROTTLE_VALUE_MAX;
71
affect the virtual machine state (like user interaction with monitor).
64
+ g_assert(throttle_is_valid(&cfg, NULL));
72
65
+
73
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
66
+ b->avg = 1;
74
index XXXXXXX..XXXXXXX 100644
67
+ b->max = 1;
75
--- a/docs/devel/multiple-iothreads.txt
68
+ b->burst_length = THROTTLE_VALUE_MAX + 1;
76
+++ b/docs/devel/multiple-iothreads.txt
69
+ g_assert(!throttle_is_valid(&cfg, NULL));
77
@@ -XXX,XX +XXX,XX @@ the COPYING file in the top-level directory.
70
+
78
71
+ /* burst_length * max cannot exceed THROTTLE_VALUE_MAX */
79
72
+ b->avg = 1;
80
This document explains the IOThread feature and how to write code that runs
73
+ b->max = 2;
81
-outside the QEMU global mutex.
74
+ b->burst_length = THROTTLE_VALUE_MAX / 2;
82
+outside the BQL.
75
+ g_assert(throttle_is_valid(&cfg, NULL));
83
76
+
84
The main loop and IOThreads
77
+ b->avg = 1;
85
---------------------------
78
+ b->max = 3;
86
@@ -XXX,XX +XXX,XX @@ scalability bottleneck on hosts with many CPUs. Work can be spread across
79
+ b->burst_length = THROTTLE_VALUE_MAX / 2;
87
several IOThreads instead of just one main loop. When set up correctly this
80
+ g_assert(!throttle_is_valid(&cfg, NULL));
88
can improve I/O latency and reduce jitter seen by the guest.
81
+
89
82
+ b->avg = 1;
90
-The main loop is also deeply associated with the QEMU global mutex, which is a
83
+ b->max = THROTTLE_VALUE_MAX;
91
-scalability bottleneck in itself. vCPU threads and the main loop use the QEMU
84
+ b->burst_length = 1;
92
-global mutex to serialize execution of QEMU code. This mutex is necessary
85
+ g_assert(throttle_is_valid(&cfg, NULL));
93
-because a lot of QEMU's code historically was not thread-safe.
86
+
94
+The main loop is also deeply associated with the BQL, which is a
87
+ b->avg = 1;
95
+scalability bottleneck in itself. vCPU threads and the main loop use the BQL
88
+ b->max = THROTTLE_VALUE_MAX;
96
+to serialize execution of QEMU code. This mutex is necessary because a lot of
89
+ b->burst_length = 2;
97
+QEMU's code historically was not thread-safe.
90
+ g_assert(!throttle_is_valid(&cfg, NULL));
98
91
+ }
99
The fact that all I/O processing is done in a single main loop and that the
92
+}
100
-QEMU global mutex is contended by all vCPU threads and the main loop explain
93
+
101
+BQL is contended by all vCPU threads and the main loop explain
94
static void test_max_is_missing_limit(void)
102
why it is desirable to place work into IOThreads.
103
104
The experimental virtio-blk data-plane implementation has been benchmarked and
105
@@ -XXX,XX +XXX,XX @@ There are several old APIs that use the main loop AioContext:
106
107
Since they implicitly work on the main loop they cannot be used in code that
108
runs in an IOThread. They might cause a crash or deadlock if called from an
109
-IOThread since the QEMU global mutex is not held.
110
+IOThread since the BQL is not held.
111
112
Instead, use the AioContext functions directly (see include/block/aio.h):
113
* aio_set_fd_handler() - monitor a file descriptor
114
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
115
index XXXXXXX..XXXXXXX 100644
116
--- a/include/block/blockjob.h
117
+++ b/include/block/blockjob.h
118
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
119
120
/**
121
* Speed that was set with @block_job_set_speed.
122
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
123
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
124
*/
125
int64_t speed;
126
127
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
128
129
/**
130
* Block other operations when block job is running.
131
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
132
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
133
*/
134
Error *blocker;
135
136
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
137
138
/**
139
* BlockDriverStates that are involved in this block job.
140
- * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
141
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
142
*/
143
GSList *nodes;
144
} BlockJob;
145
diff --git a/include/io/task.h b/include/io/task.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/include/io/task.h
148
+++ b/include/io/task.h
149
@@ -XXX,XX +XXX,XX @@ typedef void (*QIOTaskWorker)(QIOTask *task,
150
* lookups) to be easily run non-blocking. Reporting the
151
* results in the main thread context means that the caller
152
* typically does not need to be concerned about thread
153
- * safety wrt the QEMU global mutex.
154
+ * safety wrt the BQL.
155
*
156
* For example, the socket_listen() method will block the caller
157
* while DNS lookups take place if given a name, instead of IP
158
diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h
159
index XXXXXXX..XXXXXXX 100644
160
--- a/include/qemu/coroutine-core.h
161
+++ b/include/qemu/coroutine-core.h
162
@@ -XXX,XX +XXX,XX @@
163
* rather than callbacks, for operations that need to give up control while
164
* waiting for events to complete.
165
*
166
- * These functions are re-entrant and may be used outside the global mutex.
167
+ * These functions are re-entrant and may be used outside the BQL.
168
*
169
* Functions that execute in coroutine context cannot be called
170
* directly from normal functions. Use @coroutine_fn to mark such
171
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
172
index XXXXXXX..XXXXXXX 100644
173
--- a/include/qemu/coroutine.h
174
+++ b/include/qemu/coroutine.h
175
@@ -XXX,XX +XXX,XX @@
176
* rather than callbacks, for operations that need to give up control while
177
* waiting for events to complete.
178
*
179
- * These functions are re-entrant and may be used outside the global mutex.
180
+ * These functions are re-entrant and may be used outside the BQL.
181
*
182
* Functions that execute in coroutine context cannot be called
183
* directly from normal functions. Use @coroutine_fn to mark such
184
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
185
index XXXXXXX..XXXXXXX 100644
186
--- a/hw/block/dataplane/virtio-blk.c
187
+++ b/hw/block/dataplane/virtio-blk.c
188
@@ -XXX,XX +XXX,XX @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
189
}
190
}
191
192
-/* Context: QEMU global mutex held */
193
+/* Context: BQL held */
194
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
195
VirtIOBlockDataPlane **dataplane,
196
Error **errp)
197
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
198
return true;
199
}
200
201
-/* Context: QEMU global mutex held */
202
+/* Context: BQL held */
203
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
204
{
205
VirtIOBlock *vblk;
206
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
207
g_free(s);
208
}
209
210
-/* Context: QEMU global mutex held */
211
+/* Context: BQL held */
212
int virtio_blk_data_plane_start(VirtIODevice *vdev)
213
{
214
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
215
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
216
virtio_queue_host_notifier_read(host_notifier);
217
}
218
219
-/* Context: QEMU global mutex held */
220
+/* Context: BQL held */
221
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
222
{
223
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
224
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/hw/block/virtio-blk.c
227
+++ b/hw/block/virtio-blk.c
228
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_resize(void *opaque)
229
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
230
231
/*
232
- * virtio_notify_config() needs to acquire the global mutex,
233
+ * virtio_notify_config() needs to acquire the BQL,
234
* so it can't be called from an iothread. Instead, schedule
235
* it to be run in the main context BH.
236
*/
237
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/hw/scsi/virtio-scsi-dataplane.c
240
+++ b/hw/scsi/virtio-scsi-dataplane.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "scsi/constants.h"
243
#include "hw/virtio/virtio-bus.h"
244
245
-/* Context: QEMU global mutex held */
246
+/* Context: BQL held */
247
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
248
{
249
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
250
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
251
}
252
}
253
254
-/* Context: QEMU global mutex held */
255
+/* Context: BQL held */
256
int virtio_scsi_dataplane_start(VirtIODevice *vdev)
95
{
257
{
96
int i;
258
int i;
97
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
259
@@ -XXX,XX +XXX,XX @@ fail_guest_notifiers:
98
g_test_add_func("/throttle/config/enabled", test_enabled);
260
return -ENOSYS;
99
g_test_add_func("/throttle/config/conflicting", test_conflicting_config);
261
}
100
g_test_add_func("/throttle/config/is_valid", test_is_valid);
262
101
+ g_test_add_func("/throttle/config/ranges", test_ranges);
263
-/* Context: QEMU global mutex held */
102
g_test_add_func("/throttle/config/max", test_max_is_missing_limit);
264
+/* Context: BQL held */
103
g_test_add_func("/throttle/config/iops_size",
265
void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
104
test_iops_size_is_missing_limit);
266
{
267
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
268
diff --git a/net/tap.c b/net/tap.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/net/tap.c
271
+++ b/net/tap.c
272
@@ -XXX,XX +XXX,XX @@ static void tap_send(void *opaque)
273
274
/*
275
* When the host keeps receiving more packets while tap_send() is
276
- * running we can hog the QEMU global mutex. Limit the number of
277
+ * running we can hog the BQL. Limit the number of
278
* packets that are processed per tap_send() callback to prevent
279
* stalling the guest.
280
*/
105
--
281
--
106
2.13.5
282
2.43.0
107
283
108
284
diff view generated by jsdifflib
Deleted patch
1
From: Eduardo Habkost <ehabkost@redhat.com>
2
1
3
If QEMU is running on a system that's out of memory and mmap()
4
fails, QEMU aborts with no error message at all, making it hard
5
to debug the reason for the failure.
6
7
Add perror() calls that will print error information before
8
aborting.
9
10
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20170829212053.6003-1-ehabkost@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
util/oslib-posix.c | 2 ++
17
1 file changed, 2 insertions(+)
18
19
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/util/oslib-posix.c
22
+++ b/util/oslib-posix.c
23
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
24
ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE,
25
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
26
if (ptr == MAP_FAILED) {
27
+ perror("failed to allocate memory for stack");
28
abort();
29
}
30
31
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
32
guardpage = ptr;
33
#endif
34
if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
35
+ perror("failed to set up stack guard page");
36
abort();
37
}
38
39
--
40
2.13.5
41
42
diff view generated by jsdifflib
Deleted patch
1
The minimum Python version supported by QEMU is 2.6. The argparse
2
standard library module was only added in Python 2.7. Many scripts
3
would like to use argparse because it supports command-line
4
sub-commands.
5
1
6
This patch adds argparse. See the top of argparse.py for details.
7
8
Suggested-by: Daniel P. Berrange <berrange@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Acked-by: John Snow <jsnow@redhat.com>
11
Message-id: 20170825155732.15665-2-stefanha@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
COPYING.PYTHON | 270 ++++++
15
scripts/argparse.py | 2406 +++++++++++++++++++++++++++++++++++++++++++++++++++
16
2 files changed, 2676 insertions(+)
17
create mode 100644 COPYING.PYTHON
18
create mode 100644 scripts/argparse.py
19
20
diff --git a/COPYING.PYTHON b/COPYING.PYTHON
21
new file mode 100644
22
index XXXXXXX..XXXXXXX
23
--- /dev/null
24
+++ b/COPYING.PYTHON
25
@@ -XXX,XX +XXX,XX @@
26
+A. HISTORY OF THE SOFTWARE
27
+==========================
28
+
29
+Python was created in the early 1990s by Guido van Rossum at Stichting
30
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
31
+as a successor of a language called ABC. Guido remains Python's
32
+principal author, although it includes many contributions from others.
33
+
34
+In 1995, Guido continued his work on Python at the Corporation for
35
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
36
+in Reston, Virginia where he released several versions of the
37
+software.
38
+
39
+In May 2000, Guido and the Python core development team moved to
40
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
41
+year, the PythonLabs team moved to Digital Creations (now Zope
42
+Corporation, see http://www.zope.com). In 2001, the Python Software
43
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
44
+non-profit organization created specifically to own Python-related
45
+Intellectual Property. Zope Corporation is a sponsoring member of
46
+the PSF.
47
+
48
+All Python releases are Open Source (see http://www.opensource.org for
49
+the Open Source Definition). Historically, most, but not all, Python
50
+releases have also been GPL-compatible; the table below summarizes
51
+the various releases.
52
+
53
+ Release Derived Year Owner GPL-
54
+ from compatible? (1)
55
+
56
+ 0.9.0 thru 1.2 1991-1995 CWI yes
57
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
58
+ 1.6 1.5.2 2000 CNRI no
59
+ 2.0 1.6 2000 BeOpen.com no
60
+ 1.6.1 1.6 2001 CNRI yes (2)
61
+ 2.1 2.0+1.6.1 2001 PSF no
62
+ 2.0.1 2.0+1.6.1 2001 PSF yes
63
+ 2.1.1 2.1+2.0.1 2001 PSF yes
64
+ 2.2 2.1.1 2001 PSF yes
65
+ 2.1.2 2.1.1 2002 PSF yes
66
+ 2.1.3 2.1.2 2002 PSF yes
67
+ 2.2.1 2.2 2002 PSF yes
68
+ 2.2.2 2.2.1 2002 PSF yes
69
+ 2.2.3 2.2.2 2003 PSF yes
70
+ 2.3 2.2.2 2002-2003 PSF yes
71
+ 2.3.1 2.3 2002-2003 PSF yes
72
+ 2.3.2 2.3.1 2002-2003 PSF yes
73
+ 2.3.3 2.3.2 2002-2003 PSF yes
74
+ 2.3.4 2.3.3 2004 PSF yes
75
+ 2.3.5 2.3.4 2005 PSF yes
76
+ 2.4 2.3 2004 PSF yes
77
+ 2.4.1 2.4 2005 PSF yes
78
+ 2.4.2 2.4.1 2005 PSF yes
79
+ 2.4.3 2.4.2 2006 PSF yes
80
+ 2.5 2.4 2006 PSF yes
81
+ 2.7 2.6 2010 PSF yes
82
+
83
+Footnotes:
84
+
85
+(1) GPL-compatible doesn't mean that we're distributing Python under
86
+ the GPL. All Python licenses, unlike the GPL, let you distribute
87
+ a modified version without making your changes open source. The
88
+ GPL-compatible licenses make it possible to combine Python with
89
+ other software that is released under the GPL; the others don't.
90
+
91
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
92
+ because its license has a choice of law clause. According to
93
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
94
+ is "not incompatible" with the GPL.
95
+
96
+Thanks to the many outside volunteers who have worked under Guido's
97
+direction to make these releases possible.
98
+
99
+
100
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
101
+===============================================================
102
+
103
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
104
+--------------------------------------------
105
+
106
+1. This LICENSE AGREEMENT is between the Python Software Foundation
107
+("PSF"), and the Individual or Organization ("Licensee") accessing and
108
+otherwise using this software ("Python") in source or binary form and
109
+its associated documentation.
110
+
111
+2. Subject to the terms and conditions of this License Agreement, PSF
112
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
113
+license to reproduce, analyze, test, perform and/or display publicly,
114
+prepare derivative works, distribute, and otherwise use Python
115
+alone or in any derivative version, provided, however, that PSF's
116
+License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
117
+2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
118
+Reserved" are retained in Python alone or in any derivative version
119
+prepared by Licensee.
120
+
121
+3. In the event Licensee prepares a derivative work that is based on
122
+or incorporates Python or any part thereof, and wants to make
123
+the derivative work available to others as provided herein, then
124
+Licensee hereby agrees to include in any such work a brief summary of
125
+the changes made to Python.
126
+
127
+4. PSF is making Python available to Licensee on an "AS IS"
128
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
129
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
130
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
131
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
132
+INFRINGE ANY THIRD PARTY RIGHTS.
133
+
134
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
135
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
136
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
137
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
138
+
139
+6. This License Agreement will automatically terminate upon a material
140
+breach of its terms and conditions.
141
+
142
+7. Nothing in this License Agreement shall be deemed to create any
143
+relationship of agency, partnership, or joint venture between PSF and
144
+Licensee. This License Agreement does not grant permission to use PSF
145
+trademarks or trade name in a trademark sense to endorse or promote
146
+products or services of Licensee, or any third party.
147
+
148
+8. By copying, installing or otherwise using Python, Licensee
149
+agrees to be bound by the terms and conditions of this License
150
+Agreement.
151
+
152
+
153
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
154
+-------------------------------------------
155
+
156
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
157
+
158
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
159
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
160
+Individual or Organization ("Licensee") accessing and otherwise using
161
+this software in source or binary form and its associated
162
+documentation ("the Software").
163
+
164
+2. Subject to the terms and conditions of this BeOpen Python License
165
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
166
+royalty-free, world-wide license to reproduce, analyze, test, perform
167
+and/or display publicly, prepare derivative works, distribute, and
168
+otherwise use the Software alone or in any derivative version,
169
+provided, however, that the BeOpen Python License is retained in the
170
+Software, alone or in any derivative version prepared by Licensee.
171
+
172
+3. BeOpen is making the Software available to Licensee on an "AS IS"
173
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
174
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
175
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
176
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
177
+INFRINGE ANY THIRD PARTY RIGHTS.
178
+
179
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
180
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
181
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
182
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
183
+
184
+5. This License Agreement will automatically terminate upon a material
185
+breach of its terms and conditions.
186
+
187
+6. This License Agreement shall be governed by and interpreted in all
188
+respects by the law of the State of California, excluding conflict of
189
+law provisions. Nothing in this License Agreement shall be deemed to
190
+create any relationship of agency, partnership, or joint venture
191
+between BeOpen and Licensee. This License Agreement does not grant
192
+permission to use BeOpen trademarks or trade names in a trademark
193
+sense to endorse or promote products or services of Licensee, or any
194
+third party. As an exception, the "BeOpen Python" logos available at
195
+http://www.pythonlabs.com/logos.html may be used according to the
196
+permissions granted on that web page.
197
+
198
+7. By copying, installing or otherwise using the software, Licensee
199
+agrees to be bound by the terms and conditions of this License
200
+Agreement.
201
+
202
+
203
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
204
+---------------------------------------
205
+
206
+1. This LICENSE AGREEMENT is between the Corporation for National
207
+Research Initiatives, having an office at 1895 Preston White Drive,
208
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
209
+("Licensee") accessing and otherwise using Python 1.6.1 software in
210
+source or binary form and its associated documentation.
211
+
212
+2. Subject to the terms and conditions of this License Agreement, CNRI
213
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
214
+license to reproduce, analyze, test, perform and/or display publicly,
215
+prepare derivative works, distribute, and otherwise use Python 1.6.1
216
+alone or in any derivative version, provided, however, that CNRI's
217
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
218
+1995-2001 Corporation for National Research Initiatives; All Rights
219
+Reserved" are retained in Python 1.6.1 alone or in any derivative
220
+version prepared by Licensee. Alternately, in lieu of CNRI's License
221
+Agreement, Licensee may substitute the following text (omitting the
222
+quotes): "Python 1.6.1 is made available subject to the terms and
223
+conditions in CNRI's License Agreement. This Agreement together with
224
+Python 1.6.1 may be located on the Internet using the following
225
+unique, persistent identifier (known as a handle): 1895.22/1013. This
226
+Agreement may also be obtained from a proxy server on the Internet
227
+using the following URL: http://hdl.handle.net/1895.22/1013".
228
+
229
+3. In the event Licensee prepares a derivative work that is based on
230
+or incorporates Python 1.6.1 or any part thereof, and wants to make
231
+the derivative work available to others as provided herein, then
232
+Licensee hereby agrees to include in any such work a brief summary of
233
+the changes made to Python 1.6.1.
234
+
235
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
236
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
237
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
238
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
239
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
240
+INFRINGE ANY THIRD PARTY RIGHTS.
241
+
242
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
243
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
244
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
245
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
246
+
247
+6. This License Agreement will automatically terminate upon a material
248
+breach of its terms and conditions.
249
+
250
+7. This License Agreement shall be governed by the federal
251
+intellectual property law of the United States, including without
252
+limitation the federal copyright law, and, to the extent such
253
+U.S. federal law does not apply, by the law of the Commonwealth of
254
+Virginia, excluding Virginia's conflict of law provisions.
255
+Notwithstanding the foregoing, with regard to derivative works based
256
+on Python 1.6.1 that incorporate non-separable material that was
257
+previously distributed under the GNU General Public License (GPL), the
258
+law of the Commonwealth of Virginia shall govern this License
259
+Agreement only as to issues arising under or with respect to
260
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
261
+License Agreement shall be deemed to create any relationship of
262
+agency, partnership, or joint venture between CNRI and Licensee. This
263
+License Agreement does not grant permission to use CNRI trademarks or
264
+trade name in a trademark sense to endorse or promote products or
265
+services of Licensee, or any third party.
266
+
267
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
268
+installing or otherwise using Python 1.6.1, Licensee agrees to be
269
+bound by the terms and conditions of this License Agreement.
270
+
271
+ ACCEPT
272
+
273
+
274
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
275
+--------------------------------------------------
276
+
277
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
278
+The Netherlands. All rights reserved.
279
+
280
+Permission to use, copy, modify, and distribute this software and its
281
+documentation for any purpose and without fee is hereby granted,
282
+provided that the above copyright notice appear in all copies and that
283
+both that copyright notice and this permission notice appear in
284
+supporting documentation, and that the name of Stichting Mathematisch
285
+Centrum or CWI not be used in advertising or publicity pertaining to
286
+distribution of the software without specific, written prior
287
+permission.
288
+
289
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
290
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
291
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
292
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
293
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
294
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
295
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
296
diff --git a/scripts/argparse.py b/scripts/argparse.py
297
new file mode 100644
298
index XXXXXXX..XXXXXXX
299
--- /dev/null
300
+++ b/scripts/argparse.py
301
@@ -XXX,XX +XXX,XX @@
302
+# This is a local copy of the standard library argparse module taken from PyPI.
303
+# It is licensed under the Python Software Foundation License. This is a
304
+# fallback for Python 2.6 which does not include this module. Python 2.7+ and
305
+# 3+ will never load this module because built-in modules are loaded before
306
+# anything in sys.path.
307
+#
308
+# If your script is not located in the same directory as this file, import it
309
+# like this:
310
+#
311
+# import os
312
+# import sys
313
+# sys.path.append(os.path.join(os.path.dirname(__file__), ..., 'scripts'))
314
+# import argparse
315
+
316
+# Author: Steven J. Bethard <steven.bethard@gmail.com>.
317
+# Maintainer: Thomas Waldmann <tw@waldmann-edv.de>
318
+
319
+"""Command-line parsing library
320
+
321
+This module is an optparse-inspired command-line parsing library that:
322
+
323
+ - handles both optional and positional arguments
324
+ - produces highly informative usage messages
325
+ - supports parsers that dispatch to sub-parsers
326
+
327
+The following is a simple usage example that sums integers from the
328
+command-line and writes the result to a file::
329
+
330
+ parser = argparse.ArgumentParser(
331
+ description='sum the integers at the command line')
332
+ parser.add_argument(
333
+ 'integers', metavar='int', nargs='+', type=int,
334
+ help='an integer to be summed')
335
+ parser.add_argument(
336
+ '--log', default=sys.stdout, type=argparse.FileType('w'),
337
+ help='the file where the sum should be written')
338
+ args = parser.parse_args()
339
+ args.log.write('%s' % sum(args.integers))
340
+ args.log.close()
341
+
342
+The module contains the following public classes:
343
+
344
+ - ArgumentParser -- The main entry point for command-line parsing. As the
345
+ example above shows, the add_argument() method is used to populate
346
+ the parser with actions for optional and positional arguments. Then
347
+ the parse_args() method is invoked to convert the args at the
348
+ command-line into an object with attributes.
349
+
350
+ - ArgumentError -- The exception raised by ArgumentParser objects when
351
+ there are errors with the parser's actions. Errors raised while
352
+ parsing the command-line are caught by ArgumentParser and emitted
353
+ as command-line messages.
354
+
355
+ - FileType -- A factory for defining types of files to be created. As the
356
+ example above shows, instances of FileType are typically passed as
357
+ the type= argument of add_argument() calls.
358
+
359
+ - Action -- The base class for parser actions. Typically actions are
360
+ selected by passing strings like 'store_true' or 'append_const' to
361
+ the action= argument of add_argument(). However, for greater
362
+ customization of ArgumentParser actions, subclasses of Action may
363
+ be defined and passed as the action= argument.
364
+
365
+ - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
366
+ ArgumentDefaultsHelpFormatter -- Formatter classes which
367
+ may be passed as the formatter_class= argument to the
368
+ ArgumentParser constructor. HelpFormatter is the default,
369
+ RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
370
+ not to change the formatting for help text, and
371
+ ArgumentDefaultsHelpFormatter adds information about argument defaults
372
+ to the help.
373
+
374
+All other classes in this module are considered implementation details.
375
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
376
+considered public as object names -- the API of the formatter objects is
377
+still considered an implementation detail.)
378
+"""
379
+
380
+__version__ = '1.4.0' # we use our own version number independant of the
381
+ # one in stdlib and we release this on pypi.
382
+
383
+__external_lib__ = True # to make sure the tests really test THIS lib,
384
+ # not the builtin one in Python stdlib
385
+
386
+__all__ = [
387
+ 'ArgumentParser',
388
+ 'ArgumentError',
389
+ 'ArgumentTypeError',
390
+ 'FileType',
391
+ 'HelpFormatter',
392
+ 'ArgumentDefaultsHelpFormatter',
393
+ 'RawDescriptionHelpFormatter',
394
+ 'RawTextHelpFormatter',
395
+ 'Namespace',
396
+ 'Action',
397
+ 'ONE_OR_MORE',
398
+ 'OPTIONAL',
399
+ 'PARSER',
400
+ 'REMAINDER',
401
+ 'SUPPRESS',
402
+ 'ZERO_OR_MORE',
403
+]
404
+
405
+
406
+import copy as _copy
407
+import os as _os
408
+import re as _re
409
+import sys as _sys
410
+import textwrap as _textwrap
411
+
412
+from gettext import gettext as _
413
+
414
+try:
415
+ set
416
+except NameError:
417
+ # for python < 2.4 compatibility (sets module is there since 2.3):
418
+ from sets import Set as set
419
+
420
+try:
421
+ basestring
422
+except NameError:
423
+ basestring = str
424
+
425
+try:
426
+ sorted
427
+except NameError:
428
+ # for python < 2.4 compatibility:
429
+ def sorted(iterable, reverse=False):
430
+ result = list(iterable)
431
+ result.sort()
432
+ if reverse:
433
+ result.reverse()
434
+ return result
435
+
436
+
437
+def _callable(obj):
438
+ return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
439
+
440
+
441
+SUPPRESS = '==SUPPRESS=='
442
+
443
+OPTIONAL = '?'
444
+ZERO_OR_MORE = '*'
445
+ONE_OR_MORE = '+'
446
+PARSER = 'A...'
447
+REMAINDER = '...'
448
+_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
449
+
450
+# =============================
451
+# Utility functions and classes
452
+# =============================
453
+
454
+class _AttributeHolder(object):
455
+ """Abstract base class that provides __repr__.
456
+
457
+ The __repr__ method returns a string in the format::
458
+ ClassName(attr=name, attr=name, ...)
459
+ The attributes are determined either by a class-level attribute,
460
+ '_kwarg_names', or by inspecting the instance __dict__.
461
+ """
462
+
463
+ def __repr__(self):
464
+ type_name = type(self).__name__
465
+ arg_strings = []
466
+ for arg in self._get_args():
467
+ arg_strings.append(repr(arg))
468
+ for name, value in self._get_kwargs():
469
+ arg_strings.append('%s=%r' % (name, value))
470
+ return '%s(%s)' % (type_name, ', '.join(arg_strings))
471
+
472
+ def _get_kwargs(self):
473
+ return sorted(self.__dict__.items())
474
+
475
+ def _get_args(self):
476
+ return []
477
+
478
+
479
+def _ensure_value(namespace, name, value):
480
+ if getattr(namespace, name, None) is None:
481
+ setattr(namespace, name, value)
482
+ return getattr(namespace, name)
483
+
484
+
485
+# ===============
486
+# Formatting Help
487
+# ===============
488
+
489
+class HelpFormatter(object):
490
+ """Formatter for generating usage messages and argument help strings.
491
+
492
+ Only the name of this class is considered a public API. All the methods
493
+ provided by the class are considered an implementation detail.
494
+ """
495
+
496
+ def __init__(self,
497
+ prog,
498
+ indent_increment=2,
499
+ max_help_position=24,
500
+ width=None):
501
+
502
+ # default setting for width
503
+ if width is None:
504
+ try:
505
+ width = int(_os.environ['COLUMNS'])
506
+ except (KeyError, ValueError):
507
+ width = 80
508
+ width -= 2
509
+
510
+ self._prog = prog
511
+ self._indent_increment = indent_increment
512
+ self._max_help_position = max_help_position
513
+ self._width = width
514
+
515
+ self._current_indent = 0
516
+ self._level = 0
517
+ self._action_max_length = 0
518
+
519
+ self._root_section = self._Section(self, None)
520
+ self._current_section = self._root_section
521
+
522
+ self._whitespace_matcher = _re.compile(r'\s+')
523
+ self._long_break_matcher = _re.compile(r'\n\n\n+')
524
+
525
+ # ===============================
526
+ # Section and indentation methods
527
+ # ===============================
528
+ def _indent(self):
529
+ self._current_indent += self._indent_increment
530
+ self._level += 1
531
+
532
+ def _dedent(self):
533
+ self._current_indent -= self._indent_increment
534
+ assert self._current_indent >= 0, 'Indent decreased below 0.'
535
+ self._level -= 1
536
+
537
+ class _Section(object):
538
+
539
+ def __init__(self, formatter, parent, heading=None):
540
+ self.formatter = formatter
541
+ self.parent = parent
542
+ self.heading = heading
543
+ self.items = []
544
+
545
+ def format_help(self):
546
+ # format the indented section
547
+ if self.parent is not None:
548
+ self.formatter._indent()
549
+ join = self.formatter._join_parts
550
+ for func, args in self.items:
551
+ func(*args)
552
+ item_help = join([func(*args) for func, args in self.items])
553
+ if self.parent is not None:
554
+ self.formatter._dedent()
555
+
556
+ # return nothing if the section was empty
557
+ if not item_help:
558
+ return ''
559
+
560
+ # add the heading if the section was non-empty
561
+ if self.heading is not SUPPRESS and self.heading is not None:
562
+ current_indent = self.formatter._current_indent
563
+ heading = '%*s%s:\n' % (current_indent, '', self.heading)
564
+ else:
565
+ heading = ''
566
+
567
+ # join the section-initial newline, the heading and the help
568
+ return join(['\n', heading, item_help, '\n'])
569
+
570
+ def _add_item(self, func, args):
571
+ self._current_section.items.append((func, args))
572
+
573
+ # ========================
574
+ # Message building methods
575
+ # ========================
576
+ def start_section(self, heading):
577
+ self._indent()
578
+ section = self._Section(self, self._current_section, heading)
579
+ self._add_item(section.format_help, [])
580
+ self._current_section = section
581
+
582
+ def end_section(self):
583
+ self._current_section = self._current_section.parent
584
+ self._dedent()
585
+
586
+ def add_text(self, text):
587
+ if text is not SUPPRESS and text is not None:
588
+ self._add_item(self._format_text, [text])
589
+
590
+ def add_usage(self, usage, actions, groups, prefix=None):
591
+ if usage is not SUPPRESS:
592
+ args = usage, actions, groups, prefix
593
+ self._add_item(self._format_usage, args)
594
+
595
+ def add_argument(self, action):
596
+ if action.help is not SUPPRESS:
597
+
598
+ # find all invocations
599
+ get_invocation = self._format_action_invocation
600
+ invocations = [get_invocation(action)]
601
+ for subaction in self._iter_indented_subactions(action):
602
+ invocations.append(get_invocation(subaction))
603
+
604
+ # update the maximum item length
605
+ invocation_length = max([len(s) for s in invocations])
606
+ action_length = invocation_length + self._current_indent
607
+ self._action_max_length = max(self._action_max_length,
608
+ action_length)
609
+
610
+ # add the item to the list
611
+ self._add_item(self._format_action, [action])
612
+
613
+ def add_arguments(self, actions):
614
+ for action in actions:
615
+ self.add_argument(action)
616
+
617
+ # =======================
618
+ # Help-formatting methods
619
+ # =======================
620
+ def format_help(self):
621
+ help = self._root_section.format_help()
622
+ if help:
623
+ help = self._long_break_matcher.sub('\n\n', help)
624
+ help = help.strip('\n') + '\n'
625
+ return help
626
+
627
+ def _join_parts(self, part_strings):
628
+ return ''.join([part
629
+ for part in part_strings
630
+ if part and part is not SUPPRESS])
631
+
632
+ def _format_usage(self, usage, actions, groups, prefix):
633
+ if prefix is None:
634
+ prefix = _('usage: ')
635
+
636
+ # if usage is specified, use that
637
+ if usage is not None:
638
+ usage = usage % dict(prog=self._prog)
639
+
640
+ # if no optionals or positionals are available, usage is just prog
641
+ elif usage is None and not actions:
642
+ usage = '%(prog)s' % dict(prog=self._prog)
643
+
644
+ # if optionals and positionals are available, calculate usage
645
+ elif usage is None:
646
+ prog = '%(prog)s' % dict(prog=self._prog)
647
+
648
+ # split optionals from positionals
649
+ optionals = []
650
+ positionals = []
651
+ for action in actions:
652
+ if action.option_strings:
653
+ optionals.append(action)
654
+ else:
655
+ positionals.append(action)
656
+
657
+ # build full usage string
658
+ format = self._format_actions_usage
659
+ action_usage = format(optionals + positionals, groups)
660
+ usage = ' '.join([s for s in [prog, action_usage] if s])
661
+
662
+ # wrap the usage parts if it's too long
663
+ text_width = self._width - self._current_indent
664
+ if len(prefix) + len(usage) > text_width:
665
+
666
+ # break usage into wrappable parts
667
+ part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
668
+ opt_usage = format(optionals, groups)
669
+ pos_usage = format(positionals, groups)
670
+ opt_parts = _re.findall(part_regexp, opt_usage)
671
+ pos_parts = _re.findall(part_regexp, pos_usage)
672
+ assert ' '.join(opt_parts) == opt_usage
673
+ assert ' '.join(pos_parts) == pos_usage
674
+
675
+ # helper for wrapping lines
676
+ def get_lines(parts, indent, prefix=None):
677
+ lines = []
678
+ line = []
679
+ if prefix is not None:
680
+ line_len = len(prefix) - 1
681
+ else:
682
+ line_len = len(indent) - 1
683
+ for part in parts:
684
+ if line_len + 1 + len(part) > text_width:
685
+ lines.append(indent + ' '.join(line))
686
+ line = []
687
+ line_len = len(indent) - 1
688
+ line.append(part)
689
+ line_len += len(part) + 1
690
+ if line:
691
+ lines.append(indent + ' '.join(line))
692
+ if prefix is not None:
693
+ lines[0] = lines[0][len(indent):]
694
+ return lines
695
+
696
+ # if prog is short, follow it with optionals or positionals
697
+ if len(prefix) + len(prog) <= 0.75 * text_width:
698
+ indent = ' ' * (len(prefix) + len(prog) + 1)
699
+ if opt_parts:
700
+ lines = get_lines([prog] + opt_parts, indent, prefix)
701
+ lines.extend(get_lines(pos_parts, indent))
702
+ elif pos_parts:
703
+ lines = get_lines([prog] + pos_parts, indent, prefix)
704
+ else:
705
+ lines = [prog]
706
+
707
+ # if prog is long, put it on its own line
708
+ else:
709
+ indent = ' ' * len(prefix)
710
+ parts = opt_parts + pos_parts
711
+ lines = get_lines(parts, indent)
712
+ if len(lines) > 1:
713
+ lines = []
714
+ lines.extend(get_lines(opt_parts, indent))
715
+ lines.extend(get_lines(pos_parts, indent))
716
+ lines = [prog] + lines
717
+
718
+ # join lines into usage
719
+ usage = '\n'.join(lines)
720
+
721
+ # prefix with 'usage:'
722
+ return '%s%s\n\n' % (prefix, usage)
723
+
724
+ def _format_actions_usage(self, actions, groups):
725
+ # find group indices and identify actions in groups
726
+ group_actions = set()
727
+ inserts = {}
728
+ for group in groups:
729
+ try:
730
+ start = actions.index(group._group_actions[0])
731
+ except ValueError:
732
+ continue
733
+ else:
734
+ end = start + len(group._group_actions)
735
+ if actions[start:end] == group._group_actions:
736
+ for action in group._group_actions:
737
+ group_actions.add(action)
738
+ if not group.required:
739
+ if start in inserts:
740
+ inserts[start] += ' ['
741
+ else:
742
+ inserts[start] = '['
743
+ inserts[end] = ']'
744
+ else:
745
+ if start in inserts:
746
+ inserts[start] += ' ('
747
+ else:
748
+ inserts[start] = '('
749
+ inserts[end] = ')'
750
+ for i in range(start + 1, end):
751
+ inserts[i] = '|'
752
+
753
+ # collect all actions format strings
754
+ parts = []
755
+ for i, action in enumerate(actions):
756
+
757
+ # suppressed arguments are marked with None
758
+ # remove | separators for suppressed arguments
759
+ if action.help is SUPPRESS:
760
+ parts.append(None)
761
+ if inserts.get(i) == '|':
762
+ inserts.pop(i)
763
+ elif inserts.get(i + 1) == '|':
764
+ inserts.pop(i + 1)
765
+
766
+ # produce all arg strings
767
+ elif not action.option_strings:
768
+ part = self._format_args(action, action.dest)
769
+
770
+ # if it's in a group, strip the outer []
771
+ if action in group_actions:
772
+ if part[0] == '[' and part[-1] == ']':
773
+ part = part[1:-1]
774
+
775
+ # add the action string to the list
776
+ parts.append(part)
777
+
778
+ # produce the first way to invoke the option in brackets
779
+ else:
780
+ option_string = action.option_strings[0]
781
+
782
+ # if the Optional doesn't take a value, format is:
783
+ # -s or --long
784
+ if action.nargs == 0:
785
+ part = '%s' % option_string
786
+
787
+ # if the Optional takes a value, format is:
788
+ # -s ARGS or --long ARGS
789
+ else:
790
+ default = action.dest.upper()
791
+ args_string = self._format_args(action, default)
792
+ part = '%s %s' % (option_string, args_string)
793
+
794
+ # make it look optional if it's not required or in a group
795
+ if not action.required and action not in group_actions:
796
+ part = '[%s]' % part
797
+
798
+ # add the action string to the list
799
+ parts.append(part)
800
+
801
+ # insert things at the necessary indices
802
+ for i in sorted(inserts, reverse=True):
803
+ parts[i:i] = [inserts[i]]
804
+
805
+ # join all the action items with spaces
806
+ text = ' '.join([item for item in parts if item is not None])
807
+
808
+ # clean up separators for mutually exclusive groups
809
+ open = r'[\[(]'
810
+ close = r'[\])]'
811
+ text = _re.sub(r'(%s) ' % open, r'\1', text)
812
+ text = _re.sub(r' (%s)' % close, r'\1', text)
813
+ text = _re.sub(r'%s *%s' % (open, close), r'', text)
814
+ text = _re.sub(r'\(([^|]*)\)', r'\1', text)
815
+ text = text.strip()
816
+
817
+ # return the text
818
+ return text
819
+
820
+ def _format_text(self, text):
821
+ if '%(prog)' in text:
822
+ text = text % dict(prog=self._prog)
823
+ text_width = self._width - self._current_indent
824
+ indent = ' ' * self._current_indent
825
+ return self._fill_text(text, text_width, indent) + '\n\n'
826
+
827
+ def _format_action(self, action):
828
+ # determine the required width and the entry label
829
+ help_position = min(self._action_max_length + 2,
830
+ self._max_help_position)
831
+ help_width = self._width - help_position
832
+ action_width = help_position - self._current_indent - 2
833
+ action_header = self._format_action_invocation(action)
834
+
835
+ # ho nelp; start on same line and add a final newline
836
+ if not action.help:
837
+ tup = self._current_indent, '', action_header
838
+ action_header = '%*s%s\n' % tup
839
+
840
+ # short action name; start on the same line and pad two spaces
841
+ elif len(action_header) <= action_width:
842
+ tup = self._current_indent, '', action_width, action_header
843
+ action_header = '%*s%-*s ' % tup
844
+ indent_first = 0
845
+
846
+ # long action name; start on the next line
847
+ else:
848
+ tup = self._current_indent, '', action_header
849
+ action_header = '%*s%s\n' % tup
850
+ indent_first = help_position
851
+
852
+ # collect the pieces of the action help
853
+ parts = [action_header]
854
+
855
+ # if there was help for the action, add lines of help text
856
+ if action.help:
857
+ help_text = self._expand_help(action)
858
+ help_lines = self._split_lines(help_text, help_width)
859
+ parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
860
+ for line in help_lines[1:]:
861
+ parts.append('%*s%s\n' % (help_position, '', line))
862
+
863
+ # or add a newline if the description doesn't end with one
864
+ elif not action_header.endswith('\n'):
865
+ parts.append('\n')
866
+
867
+ # if there are any sub-actions, add their help as well
868
+ for subaction in self._iter_indented_subactions(action):
869
+ parts.append(self._format_action(subaction))
870
+
871
+ # return a single string
872
+ return self._join_parts(parts)
873
+
874
+ def _format_action_invocation(self, action):
875
+ if not action.option_strings:
876
+ metavar, = self._metavar_formatter(action, action.dest)(1)
877
+ return metavar
878
+
879
+ else:
880
+ parts = []
881
+
882
+ # if the Optional doesn't take a value, format is:
883
+ # -s, --long
884
+ if action.nargs == 0:
885
+ parts.extend(action.option_strings)
886
+
887
+ # if the Optional takes a value, format is:
888
+ # -s ARGS, --long ARGS
889
+ else:
890
+ default = action.dest.upper()
891
+ args_string = self._format_args(action, default)
892
+ for option_string in action.option_strings:
893
+ parts.append('%s %s' % (option_string, args_string))
894
+
895
+ return ', '.join(parts)
896
+
897
+ def _metavar_formatter(self, action, default_metavar):
898
+ if action.metavar is not None:
899
+ result = action.metavar
900
+ elif action.choices is not None:
901
+ choice_strs = [str(choice) for choice in action.choices]
902
+ result = '{%s}' % ','.join(choice_strs)
903
+ else:
904
+ result = default_metavar
905
+
906
+ def format(tuple_size):
907
+ if isinstance(result, tuple):
908
+ return result
909
+ else:
910
+ return (result, ) * tuple_size
911
+ return format
912
+
913
+ def _format_args(self, action, default_metavar):
914
+ get_metavar = self._metavar_formatter(action, default_metavar)
915
+ if action.nargs is None:
916
+ result = '%s' % get_metavar(1)
917
+ elif action.nargs == OPTIONAL:
918
+ result = '[%s]' % get_metavar(1)
919
+ elif action.nargs == ZERO_OR_MORE:
920
+ result = '[%s [%s ...]]' % get_metavar(2)
921
+ elif action.nargs == ONE_OR_MORE:
922
+ result = '%s [%s ...]' % get_metavar(2)
923
+ elif action.nargs == REMAINDER:
924
+ result = '...'
925
+ elif action.nargs == PARSER:
926
+ result = '%s ...' % get_metavar(1)
927
+ else:
928
+ formats = ['%s' for _ in range(action.nargs)]
929
+ result = ' '.join(formats) % get_metavar(action.nargs)
930
+ return result
931
+
932
+ def _expand_help(self, action):
933
+ params = dict(vars(action), prog=self._prog)
934
+ for name in list(params):
935
+ if params[name] is SUPPRESS:
936
+ del params[name]
937
+ for name in list(params):
938
+ if hasattr(params[name], '__name__'):
939
+ params[name] = params[name].__name__
940
+ if params.get('choices') is not None:
941
+ choices_str = ', '.join([str(c) for c in params['choices']])
942
+ params['choices'] = choices_str
943
+ return self._get_help_string(action) % params
944
+
945
+ def _iter_indented_subactions(self, action):
946
+ try:
947
+ get_subactions = action._get_subactions
948
+ except AttributeError:
949
+ pass
950
+ else:
951
+ self._indent()
952
+ for subaction in get_subactions():
953
+ yield subaction
954
+ self._dedent()
955
+
956
+ def _split_lines(self, text, width):
957
+ text = self._whitespace_matcher.sub(' ', text).strip()
958
+ return _textwrap.wrap(text, width)
959
+
960
+ def _fill_text(self, text, width, indent):
961
+ text = self._whitespace_matcher.sub(' ', text).strip()
962
+ return _textwrap.fill(text, width, initial_indent=indent,
963
+ subsequent_indent=indent)
964
+
965
+ def _get_help_string(self, action):
966
+ return action.help
967
+
968
+
969
+class RawDescriptionHelpFormatter(HelpFormatter):
970
+ """Help message formatter which retains any formatting in descriptions.
971
+
972
+ Only the name of this class is considered a public API. All the methods
973
+ provided by the class are considered an implementation detail.
974
+ """
975
+
976
+ def _fill_text(self, text, width, indent):
977
+ return ''.join([indent + line for line in text.splitlines(True)])
978
+
979
+
980
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
981
+ """Help message formatter which retains formatting of all help text.
982
+
983
+ Only the name of this class is considered a public API. All the methods
984
+ provided by the class are considered an implementation detail.
985
+ """
986
+
987
+ def _split_lines(self, text, width):
988
+ return text.splitlines()
989
+
990
+
991
+class ArgumentDefaultsHelpFormatter(HelpFormatter):
992
+ """Help message formatter which adds default values to argument help.
993
+
994
+ Only the name of this class is considered a public API. All the methods
995
+ provided by the class are considered an implementation detail.
996
+ """
997
+
998
+ def _get_help_string(self, action):
999
+ help = action.help
1000
+ if '%(default)' not in action.help:
1001
+ if action.default is not SUPPRESS:
1002
+ defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
1003
+ if action.option_strings or action.nargs in defaulting_nargs:
1004
+ help += ' (default: %(default)s)'
1005
+ return help
1006
+
1007
+
1008
+# =====================
1009
+# Options and Arguments
1010
+# =====================
1011
+
1012
+def _get_action_name(argument):
1013
+ if argument is None:
1014
+ return None
1015
+ elif argument.option_strings:
1016
+ return '/'.join(argument.option_strings)
1017
+ elif argument.metavar not in (None, SUPPRESS):
1018
+ return argument.metavar
1019
+ elif argument.dest not in (None, SUPPRESS):
1020
+ return argument.dest
1021
+ else:
1022
+ return None
1023
+
1024
+
1025
+class ArgumentError(Exception):
1026
+ """An error from creating or using an argument (optional or positional).
1027
+
1028
+ The string value of this exception is the message, augmented with
1029
+ information about the argument that caused it.
1030
+ """
1031
+
1032
+ def __init__(self, argument, message):
1033
+ self.argument_name = _get_action_name(argument)
1034
+ self.message = message
1035
+
1036
+ def __str__(self):
1037
+ if self.argument_name is None:
1038
+ format = '%(message)s'
1039
+ else:
1040
+ format = 'argument %(argument_name)s: %(message)s'
1041
+ return format % dict(message=self.message,
1042
+ argument_name=self.argument_name)
1043
+
1044
+
1045
+class ArgumentTypeError(Exception):
1046
+ """An error from trying to convert a command line string to a type."""
1047
+ pass
1048
+
1049
+
1050
+# ==============
1051
+# Action classes
1052
+# ==============
1053
+
1054
+class Action(_AttributeHolder):
1055
+ """Information about how to convert command line strings to Python objects.
1056
+
1057
+ Action objects are used by an ArgumentParser to represent the information
1058
+ needed to parse a single argument from one or more strings from the
1059
+ command line. The keyword arguments to the Action constructor are also
1060
+ all attributes of Action instances.
1061
+
1062
+ Keyword Arguments:
1063
+
1064
+ - option_strings -- A list of command-line option strings which
1065
+ should be associated with this action.
1066
+
1067
+ - dest -- The name of the attribute to hold the created object(s)
1068
+
1069
+ - nargs -- The number of command-line arguments that should be
1070
+ consumed. By default, one argument will be consumed and a single
1071
+ value will be produced. Other values include:
1072
+ - N (an integer) consumes N arguments (and produces a list)
1073
+ - '?' consumes zero or one arguments
1074
+ - '*' consumes zero or more arguments (and produces a list)
1075
+ - '+' consumes one or more arguments (and produces a list)
1076
+ Note that the difference between the default and nargs=1 is that
1077
+ with the default, a single value will be produced, while with
1078
+ nargs=1, a list containing a single value will be produced.
1079
+
1080
+ - const -- The value to be produced if the option is specified and the
1081
+ option uses an action that takes no values.
1082
+
1083
+ - default -- The value to be produced if the option is not specified.
1084
+
1085
+ - type -- The type which the command-line arguments should be converted
1086
+ to, should be one of 'string', 'int', 'float', 'complex' or a
1087
+ callable object that accepts a single string argument. If None,
1088
+ 'string' is assumed.
1089
+
1090
+ - choices -- A container of values that should be allowed. If not None,
1091
+ after a command-line argument has been converted to the appropriate
1092
+ type, an exception will be raised if it is not a member of this
1093
+ collection.
1094
+
1095
+ - required -- True if the action must always be specified at the
1096
+ command line. This is only meaningful for optional command-line
1097
+ arguments.
1098
+
1099
+ - help -- The help string describing the argument.
1100
+
1101
+ - metavar -- The name to be used for the option's argument with the
1102
+ help string. If None, the 'dest' value will be used as the name.
1103
+ """
1104
+
1105
+ def __init__(self,
1106
+ option_strings,
1107
+ dest,
1108
+ nargs=None,
1109
+ const=None,
1110
+ default=None,
1111
+ type=None,
1112
+ choices=None,
1113
+ required=False,
1114
+ help=None,
1115
+ metavar=None):
1116
+ self.option_strings = option_strings
1117
+ self.dest = dest
1118
+ self.nargs = nargs
1119
+ self.const = const
1120
+ self.default = default
1121
+ self.type = type
1122
+ self.choices = choices
1123
+ self.required = required
1124
+ self.help = help
1125
+ self.metavar = metavar
1126
+
1127
+ def _get_kwargs(self):
1128
+ names = [
1129
+ 'option_strings',
1130
+ 'dest',
1131
+ 'nargs',
1132
+ 'const',
1133
+ 'default',
1134
+ 'type',
1135
+ 'choices',
1136
+ 'help',
1137
+ 'metavar',
1138
+ ]
1139
+ return [(name, getattr(self, name)) for name in names]
1140
+
1141
+ def __call__(self, parser, namespace, values, option_string=None):
1142
+ raise NotImplementedError(_('.__call__() not defined'))
1143
+
1144
+
1145
+class _StoreAction(Action):
1146
+
1147
+ def __init__(self,
1148
+ option_strings,
1149
+ dest,
1150
+ nargs=None,
1151
+ const=None,
1152
+ default=None,
1153
+ type=None,
1154
+ choices=None,
1155
+ required=False,
1156
+ help=None,
1157
+ metavar=None):
1158
+ if nargs == 0:
1159
+ raise ValueError('nargs for store actions must be > 0; if you '
1160
+ 'have nothing to store, actions such as store '
1161
+ 'true or store const may be more appropriate')
1162
+ if const is not None and nargs != OPTIONAL:
1163
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
1164
+ super(_StoreAction, self).__init__(
1165
+ option_strings=option_strings,
1166
+ dest=dest,
1167
+ nargs=nargs,
1168
+ const=const,
1169
+ default=default,
1170
+ type=type,
1171
+ choices=choices,
1172
+ required=required,
1173
+ help=help,
1174
+ metavar=metavar)
1175
+
1176
+ def __call__(self, parser, namespace, values, option_string=None):
1177
+ setattr(namespace, self.dest, values)
1178
+
1179
+
1180
+class _StoreConstAction(Action):
1181
+
1182
+ def __init__(self,
1183
+ option_strings,
1184
+ dest,
1185
+ const,
1186
+ default=None,
1187
+ required=False,
1188
+ help=None,
1189
+ metavar=None):
1190
+ super(_StoreConstAction, self).__init__(
1191
+ option_strings=option_strings,
1192
+ dest=dest,
1193
+ nargs=0,
1194
+ const=const,
1195
+ default=default,
1196
+ required=required,
1197
+ help=help)
1198
+
1199
+ def __call__(self, parser, namespace, values, option_string=None):
1200
+ setattr(namespace, self.dest, self.const)
1201
+
1202
+
1203
+class _StoreTrueAction(_StoreConstAction):
1204
+
1205
+ def __init__(self,
1206
+ option_strings,
1207
+ dest,
1208
+ default=False,
1209
+ required=False,
1210
+ help=None):
1211
+ super(_StoreTrueAction, self).__init__(
1212
+ option_strings=option_strings,
1213
+ dest=dest,
1214
+ const=True,
1215
+ default=default,
1216
+ required=required,
1217
+ help=help)
1218
+
1219
+
1220
+class _StoreFalseAction(_StoreConstAction):
1221
+
1222
+ def __init__(self,
1223
+ option_strings,
1224
+ dest,
1225
+ default=True,
1226
+ required=False,
1227
+ help=None):
1228
+ super(_StoreFalseAction, self).__init__(
1229
+ option_strings=option_strings,
1230
+ dest=dest,
1231
+ const=False,
1232
+ default=default,
1233
+ required=required,
1234
+ help=help)
1235
+
1236
+
1237
+class _AppendAction(Action):
1238
+
1239
+ def __init__(self,
1240
+ option_strings,
1241
+ dest,
1242
+ nargs=None,
1243
+ const=None,
1244
+ default=None,
1245
+ type=None,
1246
+ choices=None,
1247
+ required=False,
1248
+ help=None,
1249
+ metavar=None):
1250
+ if nargs == 0:
1251
+ raise ValueError('nargs for append actions must be > 0; if arg '
1252
+ 'strings are not supplying the value to append, '
1253
+ 'the append const action may be more appropriate')
1254
+ if const is not None and nargs != OPTIONAL:
1255
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
1256
+ super(_AppendAction, self).__init__(
1257
+ option_strings=option_strings,
1258
+ dest=dest,
1259
+ nargs=nargs,
1260
+ const=const,
1261
+ default=default,
1262
+ type=type,
1263
+ choices=choices,
1264
+ required=required,
1265
+ help=help,
1266
+ metavar=metavar)
1267
+
1268
+ def __call__(self, parser, namespace, values, option_string=None):
1269
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
1270
+ items.append(values)
1271
+ setattr(namespace, self.dest, items)
1272
+
1273
+
1274
+class _AppendConstAction(Action):
1275
+
1276
+ def __init__(self,
1277
+ option_strings,
1278
+ dest,
1279
+ const,
1280
+ default=None,
1281
+ required=False,
1282
+ help=None,
1283
+ metavar=None):
1284
+ super(_AppendConstAction, self).__init__(
1285
+ option_strings=option_strings,
1286
+ dest=dest,
1287
+ nargs=0,
1288
+ const=const,
1289
+ default=default,
1290
+ required=required,
1291
+ help=help,
1292
+ metavar=metavar)
1293
+
1294
+ def __call__(self, parser, namespace, values, option_string=None):
1295
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
1296
+ items.append(self.const)
1297
+ setattr(namespace, self.dest, items)
1298
+
1299
+
1300
+class _CountAction(Action):
1301
+
1302
+ def __init__(self,
1303
+ option_strings,
1304
+ dest,
1305
+ default=None,
1306
+ required=False,
1307
+ help=None):
1308
+ super(_CountAction, self).__init__(
1309
+ option_strings=option_strings,
1310
+ dest=dest,
1311
+ nargs=0,
1312
+ default=default,
1313
+ required=required,
1314
+ help=help)
1315
+
1316
+ def __call__(self, parser, namespace, values, option_string=None):
1317
+ new_count = _ensure_value(namespace, self.dest, 0) + 1
1318
+ setattr(namespace, self.dest, new_count)
1319
+
1320
+
1321
+class _HelpAction(Action):
1322
+
1323
+ def __init__(self,
1324
+ option_strings,
1325
+ dest=SUPPRESS,
1326
+ default=SUPPRESS,
1327
+ help=None):
1328
+ super(_HelpAction, self).__init__(
1329
+ option_strings=option_strings,
1330
+ dest=dest,
1331
+ default=default,
1332
+ nargs=0,
1333
+ help=help)
1334
+
1335
+ def __call__(self, parser, namespace, values, option_string=None):
1336
+ parser.print_help()
1337
+ parser.exit()
1338
+
1339
+
1340
+class _VersionAction(Action):
1341
+
1342
+ def __init__(self,
1343
+ option_strings,
1344
+ version=None,
1345
+ dest=SUPPRESS,
1346
+ default=SUPPRESS,
1347
+ help="show program's version number and exit"):
1348
+ super(_VersionAction, self).__init__(
1349
+ option_strings=option_strings,
1350
+ dest=dest,
1351
+ default=default,
1352
+ nargs=0,
1353
+ help=help)
1354
+ self.version = version
1355
+
1356
+ def __call__(self, parser, namespace, values, option_string=None):
1357
+ version = self.version
1358
+ if version is None:
1359
+ version = parser.version
1360
+ formatter = parser._get_formatter()
1361
+ formatter.add_text(version)
1362
+ parser.exit(message=formatter.format_help())
1363
+
1364
+
1365
+class _SubParsersAction(Action):
1366
+
1367
+ class _ChoicesPseudoAction(Action):
1368
+
1369
+ def __init__(self, name, aliases, help):
1370
+ metavar = dest = name
1371
+ if aliases:
1372
+ metavar += ' (%s)' % ', '.join(aliases)
1373
+ sup = super(_SubParsersAction._ChoicesPseudoAction, self)
1374
+ sup.__init__(option_strings=[], dest=dest, help=help,
1375
+ metavar=metavar)
1376
+
1377
+ def __init__(self,
1378
+ option_strings,
1379
+ prog,
1380
+ parser_class,
1381
+ dest=SUPPRESS,
1382
+ help=None,
1383
+ metavar=None):
1384
+
1385
+ self._prog_prefix = prog
1386
+ self._parser_class = parser_class
1387
+ self._name_parser_map = {}
1388
+ self._choices_actions = []
1389
+
1390
+ super(_SubParsersAction, self).__init__(
1391
+ option_strings=option_strings,
1392
+ dest=dest,
1393
+ nargs=PARSER,
1394
+ choices=self._name_parser_map,
1395
+ help=help,
1396
+ metavar=metavar)
1397
+
1398
+ def add_parser(self, name, **kwargs):
1399
+ # set prog from the existing prefix
1400
+ if kwargs.get('prog') is None:
1401
+ kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
1402
+
1403
+ aliases = kwargs.pop('aliases', ())
1404
+
1405
+ # create a pseudo-action to hold the choice help
1406
+ if 'help' in kwargs:
1407
+ help = kwargs.pop('help')
1408
+ choice_action = self._ChoicesPseudoAction(name, aliases, help)
1409
+ self._choices_actions.append(choice_action)
1410
+
1411
+ # create the parser and add it to the map
1412
+ parser = self._parser_class(**kwargs)
1413
+ self._name_parser_map[name] = parser
1414
+
1415
+ # make parser available under aliases also
1416
+ for alias in aliases:
1417
+ self._name_parser_map[alias] = parser
1418
+
1419
+ return parser
1420
+
1421
+ def _get_subactions(self):
1422
+ return self._choices_actions
1423
+
1424
+ def __call__(self, parser, namespace, values, option_string=None):
1425
+ parser_name = values[0]
1426
+ arg_strings = values[1:]
1427
+
1428
+ # set the parser name if requested
1429
+ if self.dest is not SUPPRESS:
1430
+ setattr(namespace, self.dest, parser_name)
1431
+
1432
+ # select the parser
1433
+ try:
1434
+ parser = self._name_parser_map[parser_name]
1435
+ except KeyError:
1436
+ tup = parser_name, ', '.join(self._name_parser_map)
1437
+ msg = _('unknown parser %r (choices: %s)' % tup)
1438
+ raise ArgumentError(self, msg)
1439
+
1440
+ # parse all the remaining options into the namespace
1441
+ # store any unrecognized options on the object, so that the top
1442
+ # level parser can decide what to do with them
1443
+ namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
1444
+ if arg_strings:
1445
+ vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
1446
+ getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
1447
+
1448
+
1449
+# ==============
1450
+# Type classes
1451
+# ==============
1452
+
1453
+class FileType(object):
1454
+ """Factory for creating file object types
1455
+
1456
+ Instances of FileType are typically passed as type= arguments to the
1457
+ ArgumentParser add_argument() method.
1458
+
1459
+ Keyword Arguments:
1460
+ - mode -- A string indicating how the file is to be opened. Accepts the
1461
+ same values as the builtin open() function.
1462
+ - bufsize -- The file's desired buffer size. Accepts the same values as
1463
+ the builtin open() function.
1464
+ """
1465
+
1466
+ def __init__(self, mode='r', bufsize=None):
1467
+ self._mode = mode
1468
+ self._bufsize = bufsize
1469
+
1470
+ def __call__(self, string):
1471
+ # the special argument "-" means sys.std{in,out}
1472
+ if string == '-':
1473
+ if 'r' in self._mode:
1474
+ return _sys.stdin
1475
+ elif 'w' in self._mode:
1476
+ return _sys.stdout
1477
+ else:
1478
+ msg = _('argument "-" with mode %r' % self._mode)
1479
+ raise ValueError(msg)
1480
+
1481
+ try:
1482
+ # all other arguments are used as file names
1483
+ if self._bufsize:
1484
+ return open(string, self._mode, self._bufsize)
1485
+ else:
1486
+ return open(string, self._mode)
1487
+ except IOError:
1488
+ err = _sys.exc_info()[1]
1489
+ message = _("can't open '%s': %s")
1490
+ raise ArgumentTypeError(message % (string, err))
1491
+
1492
+ def __repr__(self):
1493
+ args = [self._mode, self._bufsize]
1494
+ args_str = ', '.join([repr(arg) for arg in args if arg is not None])
1495
+ return '%s(%s)' % (type(self).__name__, args_str)
1496
+
1497
+# ===========================
1498
+# Optional and Positional Parsing
1499
+# ===========================
1500
+
1501
+class Namespace(_AttributeHolder):
1502
+ """Simple object for storing attributes.
1503
+
1504
+ Implements equality by attribute names and values, and provides a simple
1505
+ string representation.
1506
+ """
1507
+
1508
+ def __init__(self, **kwargs):
1509
+ for name in kwargs:
1510
+ setattr(self, name, kwargs[name])
1511
+
1512
+ __hash__ = None
1513
+
1514
+ def __eq__(self, other):
1515
+ return vars(self) == vars(other)
1516
+
1517
+ def __ne__(self, other):
1518
+ return not (self == other)
1519
+
1520
+ def __contains__(self, key):
1521
+ return key in self.__dict__
1522
+
1523
+
1524
+class _ActionsContainer(object):
1525
+
1526
+ def __init__(self,
1527
+ description,
1528
+ prefix_chars,
1529
+ argument_default,
1530
+ conflict_handler):
1531
+ super(_ActionsContainer, self).__init__()
1532
+
1533
+ self.description = description
1534
+ self.argument_default = argument_default
1535
+ self.prefix_chars = prefix_chars
1536
+ self.conflict_handler = conflict_handler
1537
+
1538
+ # set up registries
1539
+ self._registries = {}
1540
+
1541
+ # register actions
1542
+ self.register('action', None, _StoreAction)
1543
+ self.register('action', 'store', _StoreAction)
1544
+ self.register('action', 'store_const', _StoreConstAction)
1545
+ self.register('action', 'store_true', _StoreTrueAction)
1546
+ self.register('action', 'store_false', _StoreFalseAction)
1547
+ self.register('action', 'append', _AppendAction)
1548
+ self.register('action', 'append_const', _AppendConstAction)
1549
+ self.register('action', 'count', _CountAction)
1550
+ self.register('action', 'help', _HelpAction)
1551
+ self.register('action', 'version', _VersionAction)
1552
+ self.register('action', 'parsers', _SubParsersAction)
1553
+
1554
+ # raise an exception if the conflict handler is invalid
1555
+ self._get_handler()
1556
+
1557
+ # action storage
1558
+ self._actions = []
1559
+ self._option_string_actions = {}
1560
+
1561
+ # groups
1562
+ self._action_groups = []
1563
+ self._mutually_exclusive_groups = []
1564
+
1565
+ # defaults storage
1566
+ self._defaults = {}
1567
+
1568
+ # determines whether an "option" looks like a negative number
1569
+ self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
1570
+
1571
+ # whether or not there are any optionals that look like negative
1572
+ # numbers -- uses a list so it can be shared and edited
1573
+ self._has_negative_number_optionals = []
1574
+
1575
+ # ====================
1576
+ # Registration methods
1577
+ # ====================
1578
+ def register(self, registry_name, value, object):
1579
+ registry = self._registries.setdefault(registry_name, {})
1580
+ registry[value] = object
1581
+
1582
+ def _registry_get(self, registry_name, value, default=None):
1583
+ return self._registries[registry_name].get(value, default)
1584
+
1585
+ # ==================================
1586
+ # Namespace default accessor methods
1587
+ # ==================================
1588
+ def set_defaults(self, **kwargs):
1589
+ self._defaults.update(kwargs)
1590
+
1591
+ # if these defaults match any existing arguments, replace
1592
+ # the previous default on the object with the new one
1593
+ for action in self._actions:
1594
+ if action.dest in kwargs:
1595
+ action.default = kwargs[action.dest]
1596
+
1597
+ def get_default(self, dest):
1598
+ for action in self._actions:
1599
+ if action.dest == dest and action.default is not None:
1600
+ return action.default
1601
+ return self._defaults.get(dest, None)
1602
+
1603
+
1604
+ # =======================
1605
+ # Adding argument actions
1606
+ # =======================
1607
+ def add_argument(self, *args, **kwargs):
1608
+ """
1609
+ add_argument(dest, ..., name=value, ...)
1610
+ add_argument(option_string, option_string, ..., name=value, ...)
1611
+ """
1612
+
1613
+ # if no positional args are supplied or only one is supplied and
1614
+ # it doesn't look like an option string, parse a positional
1615
+ # argument
1616
+ chars = self.prefix_chars
1617
+ if not args or len(args) == 1 and args[0][0] not in chars:
1618
+ if args and 'dest' in kwargs:
1619
+ raise ValueError('dest supplied twice for positional argument')
1620
+ kwargs = self._get_positional_kwargs(*args, **kwargs)
1621
+
1622
+ # otherwise, we're adding an optional argument
1623
+ else:
1624
+ kwargs = self._get_optional_kwargs(*args, **kwargs)
1625
+
1626
+ # if no default was supplied, use the parser-level default
1627
+ if 'default' not in kwargs:
1628
+ dest = kwargs['dest']
1629
+ if dest in self._defaults:
1630
+ kwargs['default'] = self._defaults[dest]
1631
+ elif self.argument_default is not None:
1632
+ kwargs['default'] = self.argument_default
1633
+
1634
+ # create the action object, and add it to the parser
1635
+ action_class = self._pop_action_class(kwargs)
1636
+ if not _callable(action_class):
1637
+ raise ValueError('unknown action "%s"' % action_class)
1638
+ action = action_class(**kwargs)
1639
+
1640
+ # raise an error if the action type is not callable
1641
+ type_func = self._registry_get('type', action.type, action.type)
1642
+ if not _callable(type_func):
1643
+ raise ValueError('%r is not callable' % type_func)
1644
+
1645
+ return self._add_action(action)
1646
+
1647
+ def add_argument_group(self, *args, **kwargs):
1648
+ group = _ArgumentGroup(self, *args, **kwargs)
1649
+ self._action_groups.append(group)
1650
+ return group
1651
+
1652
+ def add_mutually_exclusive_group(self, **kwargs):
1653
+ group = _MutuallyExclusiveGroup(self, **kwargs)
1654
+ self._mutually_exclusive_groups.append(group)
1655
+ return group
1656
+
1657
+ def _add_action(self, action):
1658
+ # resolve any conflicts
1659
+ self._check_conflict(action)
1660
+
1661
+ # add to actions list
1662
+ self._actions.append(action)
1663
+ action.container = self
1664
+
1665
+ # index the action by any option strings it has
1666
+ for option_string in action.option_strings:
1667
+ self._option_string_actions[option_string] = action
1668
+
1669
+ # set the flag if any option strings look like negative numbers
1670
+ for option_string in action.option_strings:
1671
+ if self._negative_number_matcher.match(option_string):
1672
+ if not self._has_negative_number_optionals:
1673
+ self._has_negative_number_optionals.append(True)
1674
+
1675
+ # return the created action
1676
+ return action
1677
+
1678
+ def _remove_action(self, action):
1679
+ self._actions.remove(action)
1680
+
1681
+ def _add_container_actions(self, container):
1682
+ # collect groups by titles
1683
+ title_group_map = {}
1684
+ for group in self._action_groups:
1685
+ if group.title in title_group_map:
1686
+ msg = _('cannot merge actions - two groups are named %r')
1687
+ raise ValueError(msg % (group.title))
1688
+ title_group_map[group.title] = group
1689
+
1690
+ # map each action to its group
1691
+ group_map = {}
1692
+ for group in container._action_groups:
1693
+
1694
+ # if a group with the title exists, use that, otherwise
1695
+ # create a new group matching the container's group
1696
+ if group.title not in title_group_map:
1697
+ title_group_map[group.title] = self.add_argument_group(
1698
+ title=group.title,
1699
+ description=group.description,
1700
+ conflict_handler=group.conflict_handler)
1701
+
1702
+ # map the actions to their new group
1703
+ for action in group._group_actions:
1704
+ group_map[action] = title_group_map[group.title]
1705
+
1706
+ # add container's mutually exclusive groups
1707
+ # NOTE: if add_mutually_exclusive_group ever gains title= and
1708
+ # description= then this code will need to be expanded as above
1709
+ for group in container._mutually_exclusive_groups:
1710
+ mutex_group = self.add_mutually_exclusive_group(
1711
+ required=group.required)
1712
+
1713
+ # map the actions to their new mutex group
1714
+ for action in group._group_actions:
1715
+ group_map[action] = mutex_group
1716
+
1717
+ # add all actions to this container or their group
1718
+ for action in container._actions:
1719
+ group_map.get(action, self)._add_action(action)
1720
+
1721
+ def _get_positional_kwargs(self, dest, **kwargs):
1722
+ # make sure required is not specified
1723
+ if 'required' in kwargs:
1724
+ msg = _("'required' is an invalid argument for positionals")
1725
+ raise TypeError(msg)
1726
+
1727
+ # mark positional arguments as required if at least one is
1728
+ # always required
1729
+ if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
1730
+ kwargs['required'] = True
1731
+ if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
1732
+ kwargs['required'] = True
1733
+
1734
+ # return the keyword arguments with no option strings
1735
+ return dict(kwargs, dest=dest, option_strings=[])
1736
+
1737
+ def _get_optional_kwargs(self, *args, **kwargs):
1738
+ # determine short and long option strings
1739
+ option_strings = []
1740
+ long_option_strings = []
1741
+ for option_string in args:
1742
+ # error on strings that don't start with an appropriate prefix
1743
+ if not option_string[0] in self.prefix_chars:
1744
+ msg = _('invalid option string %r: '
1745
+ 'must start with a character %r')
1746
+ tup = option_string, self.prefix_chars
1747
+ raise ValueError(msg % tup)
1748
+
1749
+ # strings starting with two prefix characters are long options
1750
+ option_strings.append(option_string)
1751
+ if option_string[0] in self.prefix_chars:
1752
+ if len(option_string) > 1:
1753
+ if option_string[1] in self.prefix_chars:
1754
+ long_option_strings.append(option_string)
1755
+
1756
+ # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
1757
+ dest = kwargs.pop('dest', None)
1758
+ if dest is None:
1759
+ if long_option_strings:
1760
+ dest_option_string = long_option_strings[0]
1761
+ else:
1762
+ dest_option_string = option_strings[0]
1763
+ dest = dest_option_string.lstrip(self.prefix_chars)
1764
+ if not dest:
1765
+ msg = _('dest= is required for options like %r')
1766
+ raise ValueError(msg % option_string)
1767
+ dest = dest.replace('-', '_')
1768
+
1769
+ # return the updated keyword arguments
1770
+ return dict(kwargs, dest=dest, option_strings=option_strings)
1771
+
1772
+ def _pop_action_class(self, kwargs, default=None):
1773
+ action = kwargs.pop('action', default)
1774
+ return self._registry_get('action', action, action)
1775
+
1776
+ def _get_handler(self):
1777
+ # determine function from conflict handler string
1778
+ handler_func_name = '_handle_conflict_%s' % self.conflict_handler
1779
+ try:
1780
+ return getattr(self, handler_func_name)
1781
+ except AttributeError:
1782
+ msg = _('invalid conflict_resolution value: %r')
1783
+ raise ValueError(msg % self.conflict_handler)
1784
+
1785
+ def _check_conflict(self, action):
1786
+
1787
+ # find all options that conflict with this option
1788
+ confl_optionals = []
1789
+ for option_string in action.option_strings:
1790
+ if option_string in self._option_string_actions:
1791
+ confl_optional = self._option_string_actions[option_string]
1792
+ confl_optionals.append((option_string, confl_optional))
1793
+
1794
+ # resolve any conflicts
1795
+ if confl_optionals:
1796
+ conflict_handler = self._get_handler()
1797
+ conflict_handler(action, confl_optionals)
1798
+
1799
+ def _handle_conflict_error(self, action, conflicting_actions):
1800
+ message = _('conflicting option string(s): %s')
1801
+ conflict_string = ', '.join([option_string
1802
+ for option_string, action
1803
+ in conflicting_actions])
1804
+ raise ArgumentError(action, message % conflict_string)
1805
+
1806
+ def _handle_conflict_resolve(self, action, conflicting_actions):
1807
+
1808
+ # remove all conflicting options
1809
+ for option_string, action in conflicting_actions:
1810
+
1811
+ # remove the conflicting option
1812
+ action.option_strings.remove(option_string)
1813
+ self._option_string_actions.pop(option_string, None)
1814
+
1815
+ # if the option now has no option string, remove it from the
1816
+ # container holding it
1817
+ if not action.option_strings:
1818
+ action.container._remove_action(action)
1819
+
1820
+
1821
+class _ArgumentGroup(_ActionsContainer):
1822
+
1823
+ def __init__(self, container, title=None, description=None, **kwargs):
1824
+ # add any missing keyword arguments by checking the container
1825
+ update = kwargs.setdefault
1826
+ update('conflict_handler', container.conflict_handler)
1827
+ update('prefix_chars', container.prefix_chars)
1828
+ update('argument_default', container.argument_default)
1829
+ super_init = super(_ArgumentGroup, self).__init__
1830
+ super_init(description=description, **kwargs)
1831
+
1832
+ # group attributes
1833
+ self.title = title
1834
+ self._group_actions = []
1835
+
1836
+ # share most attributes with the container
1837
+ self._registries = container._registries
1838
+ self._actions = container._actions
1839
+ self._option_string_actions = container._option_string_actions
1840
+ self._defaults = container._defaults
1841
+ self._has_negative_number_optionals = \
1842
+ container._has_negative_number_optionals
1843
+
1844
+ def _add_action(self, action):
1845
+ action = super(_ArgumentGroup, self)._add_action(action)
1846
+ self._group_actions.append(action)
1847
+ return action
1848
+
1849
+ def _remove_action(self, action):
1850
+ super(_ArgumentGroup, self)._remove_action(action)
1851
+ self._group_actions.remove(action)
1852
+
1853
+
1854
+class _MutuallyExclusiveGroup(_ArgumentGroup):
1855
+
1856
+ def __init__(self, container, required=False):
1857
+ super(_MutuallyExclusiveGroup, self).__init__(container)
1858
+ self.required = required
1859
+ self._container = container
1860
+
1861
+ def _add_action(self, action):
1862
+ if action.required:
1863
+ msg = _('mutually exclusive arguments must be optional')
1864
+ raise ValueError(msg)
1865
+ action = self._container._add_action(action)
1866
+ self._group_actions.append(action)
1867
+ return action
1868
+
1869
+ def _remove_action(self, action):
1870
+ self._container._remove_action(action)
1871
+ self._group_actions.remove(action)
1872
+
1873
+
1874
+class ArgumentParser(_AttributeHolder, _ActionsContainer):
1875
+ """Object for parsing command line strings into Python objects.
1876
+
1877
+ Keyword Arguments:
1878
+ - prog -- The name of the program (default: sys.argv[0])
1879
+ - usage -- A usage message (default: auto-generated from arguments)
1880
+ - description -- A description of what the program does
1881
+ - epilog -- Text following the argument descriptions
1882
+ - parents -- Parsers whose arguments should be copied into this one
1883
+ - formatter_class -- HelpFormatter class for printing help messages
1884
+ - prefix_chars -- Characters that prefix optional arguments
1885
+ - fromfile_prefix_chars -- Characters that prefix files containing
1886
+ additional arguments
1887
+ - argument_default -- The default value for all arguments
1888
+ - conflict_handler -- String indicating how to handle conflicts
1889
+ - add_help -- Add a -h/-help option
1890
+ """
1891
+
1892
+ def __init__(self,
1893
+ prog=None,
1894
+ usage=None,
1895
+ description=None,
1896
+ epilog=None,
1897
+ version=None,
1898
+ parents=[],
1899
+ formatter_class=HelpFormatter,
1900
+ prefix_chars='-',
1901
+ fromfile_prefix_chars=None,
1902
+ argument_default=None,
1903
+ conflict_handler='error',
1904
+ add_help=True):
1905
+
1906
+ if version is not None:
1907
+ import warnings
1908
+ warnings.warn(
1909
+ """The "version" argument to ArgumentParser is deprecated. """
1910
+ """Please use """
1911
+ """"add_argument(..., action='version', version="N", ...)" """
1912
+ """instead""", DeprecationWarning)
1913
+
1914
+ superinit = super(ArgumentParser, self).__init__
1915
+ superinit(description=description,
1916
+ prefix_chars=prefix_chars,
1917
+ argument_default=argument_default,
1918
+ conflict_handler=conflict_handler)
1919
+
1920
+ # default setting for prog
1921
+ if prog is None:
1922
+ prog = _os.path.basename(_sys.argv[0])
1923
+
1924
+ self.prog = prog
1925
+ self.usage = usage
1926
+ self.epilog = epilog
1927
+ self.version = version
1928
+ self.formatter_class = formatter_class
1929
+ self.fromfile_prefix_chars = fromfile_prefix_chars
1930
+ self.add_help = add_help
1931
+
1932
+ add_group = self.add_argument_group
1933
+ self._positionals = add_group(_('positional arguments'))
1934
+ self._optionals = add_group(_('optional arguments'))
1935
+ self._subparsers = None
1936
+
1937
+ # register types
1938
+ def identity(string):
1939
+ return string
1940
+ self.register('type', None, identity)
1941
+
1942
+ # add help and version arguments if necessary
1943
+ # (using explicit default to override global argument_default)
1944
+ if '-' in prefix_chars:
1945
+ default_prefix = '-'
1946
+ else:
1947
+ default_prefix = prefix_chars[0]
1948
+ if self.add_help:
1949
+ self.add_argument(
1950
+ default_prefix+'h', default_prefix*2+'help',
1951
+ action='help', default=SUPPRESS,
1952
+ help=_('show this help message and exit'))
1953
+ if self.version:
1954
+ self.add_argument(
1955
+ default_prefix+'v', default_prefix*2+'version',
1956
+ action='version', default=SUPPRESS,
1957
+ version=self.version,
1958
+ help=_("show program's version number and exit"))
1959
+
1960
+ # add parent arguments and defaults
1961
+ for parent in parents:
1962
+ self._add_container_actions(parent)
1963
+ try:
1964
+ defaults = parent._defaults
1965
+ except AttributeError:
1966
+ pass
1967
+ else:
1968
+ self._defaults.update(defaults)
1969
+
1970
+ # =======================
1971
+ # Pretty __repr__ methods
1972
+ # =======================
1973
+ def _get_kwargs(self):
1974
+ names = [
1975
+ 'prog',
1976
+ 'usage',
1977
+ 'description',
1978
+ 'version',
1979
+ 'formatter_class',
1980
+ 'conflict_handler',
1981
+ 'add_help',
1982
+ ]
1983
+ return [(name, getattr(self, name)) for name in names]
1984
+
1985
+ # ==================================
1986
+ # Optional/Positional adding methods
1987
+ # ==================================
1988
+ def add_subparsers(self, **kwargs):
1989
+ if self._subparsers is not None:
1990
+ self.error(_('cannot have multiple subparser arguments'))
1991
+
1992
+ # add the parser class to the arguments if it's not present
1993
+ kwargs.setdefault('parser_class', type(self))
1994
+
1995
+ if 'title' in kwargs or 'description' in kwargs:
1996
+ title = _(kwargs.pop('title', 'subcommands'))
1997
+ description = _(kwargs.pop('description', None))
1998
+ self._subparsers = self.add_argument_group(title, description)
1999
+ else:
2000
+ self._subparsers = self._positionals
2001
+
2002
+ # prog defaults to the usage message of this parser, skipping
2003
+ # optional arguments and with no "usage:" prefix
2004
+ if kwargs.get('prog') is None:
2005
+ formatter = self._get_formatter()
2006
+ positionals = self._get_positional_actions()
2007
+ groups = self._mutually_exclusive_groups
2008
+ formatter.add_usage(self.usage, positionals, groups, '')
2009
+ kwargs['prog'] = formatter.format_help().strip()
2010
+
2011
+ # create the parsers action and add it to the positionals list
2012
+ parsers_class = self._pop_action_class(kwargs, 'parsers')
2013
+ action = parsers_class(option_strings=[], **kwargs)
2014
+ self._subparsers._add_action(action)
2015
+
2016
+ # return the created parsers action
2017
+ return action
2018
+
2019
+ def _add_action(self, action):
2020
+ if action.option_strings:
2021
+ self._optionals._add_action(action)
2022
+ else:
2023
+ self._positionals._add_action(action)
2024
+ return action
2025
+
2026
+ def _get_optional_actions(self):
2027
+ return [action
2028
+ for action in self._actions
2029
+ if action.option_strings]
2030
+
2031
+ def _get_positional_actions(self):
2032
+ return [action
2033
+ for action in self._actions
2034
+ if not action.option_strings]
2035
+
2036
+ # =====================================
2037
+ # Command line argument parsing methods
2038
+ # =====================================
2039
+ def parse_args(self, args=None, namespace=None):
2040
+ args, argv = self.parse_known_args(args, namespace)
2041
+ if argv:
2042
+ msg = _('unrecognized arguments: %s')
2043
+ self.error(msg % ' '.join(argv))
2044
+ return args
2045
+
2046
+ def parse_known_args(self, args=None, namespace=None):
2047
+ # args default to the system args
2048
+ if args is None:
2049
+ args = _sys.argv[1:]
2050
+
2051
+ # default Namespace built from parser defaults
2052
+ if namespace is None:
2053
+ namespace = Namespace()
2054
+
2055
+ # add any action defaults that aren't present
2056
+ for action in self._actions:
2057
+ if action.dest is not SUPPRESS:
2058
+ if not hasattr(namespace, action.dest):
2059
+ if action.default is not SUPPRESS:
2060
+ setattr(namespace, action.dest, action.default)
2061
+
2062
+ # add any parser defaults that aren't present
2063
+ for dest in self._defaults:
2064
+ if not hasattr(namespace, dest):
2065
+ setattr(namespace, dest, self._defaults[dest])
2066
+
2067
+ # parse the arguments and exit if there are any errors
2068
+ try:
2069
+ namespace, args = self._parse_known_args(args, namespace)
2070
+ if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
2071
+ args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
2072
+ delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
2073
+ return namespace, args
2074
+ except ArgumentError:
2075
+ err = _sys.exc_info()[1]
2076
+ self.error(str(err))
2077
+
2078
+ def _parse_known_args(self, arg_strings, namespace):
2079
+ # replace arg strings that are file references
2080
+ if self.fromfile_prefix_chars is not None:
2081
+ arg_strings = self._read_args_from_files(arg_strings)
2082
+
2083
+ # map all mutually exclusive arguments to the other arguments
2084
+ # they can't occur with
2085
+ action_conflicts = {}
2086
+ for mutex_group in self._mutually_exclusive_groups:
2087
+ group_actions = mutex_group._group_actions
2088
+ for i, mutex_action in enumerate(mutex_group._group_actions):
2089
+ conflicts = action_conflicts.setdefault(mutex_action, [])
2090
+ conflicts.extend(group_actions[:i])
2091
+ conflicts.extend(group_actions[i + 1:])
2092
+
2093
+ # find all option indices, and determine the arg_string_pattern
2094
+ # which has an 'O' if there is an option at an index,
2095
+ # an 'A' if there is an argument, or a '-' if there is a '--'
2096
+ option_string_indices = {}
2097
+ arg_string_pattern_parts = []
2098
+ arg_strings_iter = iter(arg_strings)
2099
+ for i, arg_string in enumerate(arg_strings_iter):
2100
+
2101
+ # all args after -- are non-options
2102
+ if arg_string == '--':
2103
+ arg_string_pattern_parts.append('-')
2104
+ for arg_string in arg_strings_iter:
2105
+ arg_string_pattern_parts.append('A')
2106
+
2107
+ # otherwise, add the arg to the arg strings
2108
+ # and note the index if it was an option
2109
+ else:
2110
+ option_tuple = self._parse_optional(arg_string)
2111
+ if option_tuple is None:
2112
+ pattern = 'A'
2113
+ else:
2114
+ option_string_indices[i] = option_tuple
2115
+ pattern = 'O'
2116
+ arg_string_pattern_parts.append(pattern)
2117
+
2118
+ # join the pieces together to form the pattern
2119
+ arg_strings_pattern = ''.join(arg_string_pattern_parts)
2120
+
2121
+ # converts arg strings to the appropriate and then takes the action
2122
+ seen_actions = set()
2123
+ seen_non_default_actions = set()
2124
+
2125
+ def take_action(action, argument_strings, option_string=None):
2126
+ seen_actions.add(action)
2127
+ argument_values = self._get_values(action, argument_strings)
2128
+
2129
+ # error if this argument is not allowed with other previously
2130
+ # seen arguments, assuming that actions that use the default
2131
+ # value don't really count as "present"
2132
+ if argument_values is not action.default:
2133
+ seen_non_default_actions.add(action)
2134
+ for conflict_action in action_conflicts.get(action, []):
2135
+ if conflict_action in seen_non_default_actions:
2136
+ msg = _('not allowed with argument %s')
2137
+ action_name = _get_action_name(conflict_action)
2138
+ raise ArgumentError(action, msg % action_name)
2139
+
2140
+ # take the action if we didn't receive a SUPPRESS value
2141
+ # (e.g. from a default)
2142
+ if argument_values is not SUPPRESS:
2143
+ action(self, namespace, argument_values, option_string)
2144
+
2145
+ # function to convert arg_strings into an optional action
2146
+ def consume_optional(start_index):
2147
+
2148
+ # get the optional identified at this index
2149
+ option_tuple = option_string_indices[start_index]
2150
+ action, option_string, explicit_arg = option_tuple
2151
+
2152
+ # identify additional optionals in the same arg string
2153
+ # (e.g. -xyz is the same as -x -y -z if no args are required)
2154
+ match_argument = self._match_argument
2155
+ action_tuples = []
2156
+ while True:
2157
+
2158
+ # if we found no optional action, skip it
2159
+ if action is None:
2160
+ extras.append(arg_strings[start_index])
2161
+ return start_index + 1
2162
+
2163
+ # if there is an explicit argument, try to match the
2164
+ # optional's string arguments to only this
2165
+ if explicit_arg is not None:
2166
+ arg_count = match_argument(action, 'A')
2167
+
2168
+ # if the action is a single-dash option and takes no
2169
+ # arguments, try to parse more single-dash options out
2170
+ # of the tail of the option string
2171
+ chars = self.prefix_chars
2172
+ if arg_count == 0 and option_string[1] not in chars:
2173
+ action_tuples.append((action, [], option_string))
2174
+ char = option_string[0]
2175
+ option_string = char + explicit_arg[0]
2176
+ new_explicit_arg = explicit_arg[1:] or None
2177
+ optionals_map = self._option_string_actions
2178
+ if option_string in optionals_map:
2179
+ action = optionals_map[option_string]
2180
+ explicit_arg = new_explicit_arg
2181
+ else:
2182
+ msg = _('ignored explicit argument %r')
2183
+ raise ArgumentError(action, msg % explicit_arg)
2184
+
2185
+ # if the action expect exactly one argument, we've
2186
+ # successfully matched the option; exit the loop
2187
+ elif arg_count == 1:
2188
+ stop = start_index + 1
2189
+ args = [explicit_arg]
2190
+ action_tuples.append((action, args, option_string))
2191
+ break
2192
+
2193
+ # error if a double-dash option did not use the
2194
+ # explicit argument
2195
+ else:
2196
+ msg = _('ignored explicit argument %r')
2197
+ raise ArgumentError(action, msg % explicit_arg)
2198
+
2199
+ # if there is no explicit argument, try to match the
2200
+ # optional's string arguments with the following strings
2201
+ # if successful, exit the loop
2202
+ else:
2203
+ start = start_index + 1
2204
+ selected_patterns = arg_strings_pattern[start:]
2205
+ arg_count = match_argument(action, selected_patterns)
2206
+ stop = start + arg_count
2207
+ args = arg_strings[start:stop]
2208
+ action_tuples.append((action, args, option_string))
2209
+ break
2210
+
2211
+ # add the Optional to the list and return the index at which
2212
+ # the Optional's string args stopped
2213
+ assert action_tuples
2214
+ for action, args, option_string in action_tuples:
2215
+ take_action(action, args, option_string)
2216
+ return stop
2217
+
2218
+ # the list of Positionals left to be parsed; this is modified
2219
+ # by consume_positionals()
2220
+ positionals = self._get_positional_actions()
2221
+
2222
+ # function to convert arg_strings into positional actions
2223
+ def consume_positionals(start_index):
2224
+ # match as many Positionals as possible
2225
+ match_partial = self._match_arguments_partial
2226
+ selected_pattern = arg_strings_pattern[start_index:]
2227
+ arg_counts = match_partial(positionals, selected_pattern)
2228
+
2229
+ # slice off the appropriate arg strings for each Positional
2230
+ # and add the Positional and its args to the list
2231
+ for action, arg_count in zip(positionals, arg_counts):
2232
+ args = arg_strings[start_index: start_index + arg_count]
2233
+ start_index += arg_count
2234
+ take_action(action, args)
2235
+
2236
+ # slice off the Positionals that we just parsed and return the
2237
+ # index at which the Positionals' string args stopped
2238
+ positionals[:] = positionals[len(arg_counts):]
2239
+ return start_index
2240
+
2241
+ # consume Positionals and Optionals alternately, until we have
2242
+ # passed the last option string
2243
+ extras = []
2244
+ start_index = 0
2245
+ if option_string_indices:
2246
+ max_option_string_index = max(option_string_indices)
2247
+ else:
2248
+ max_option_string_index = -1
2249
+ while start_index <= max_option_string_index:
2250
+
2251
+ # consume any Positionals preceding the next option
2252
+ next_option_string_index = min([
2253
+ index
2254
+ for index in option_string_indices
2255
+ if index >= start_index])
2256
+ if start_index != next_option_string_index:
2257
+ positionals_end_index = consume_positionals(start_index)
2258
+
2259
+ # only try to parse the next optional if we didn't consume
2260
+ # the option string during the positionals parsing
2261
+ if positionals_end_index > start_index:
2262
+ start_index = positionals_end_index
2263
+ continue
2264
+ else:
2265
+ start_index = positionals_end_index
2266
+
2267
+ # if we consumed all the positionals we could and we're not
2268
+ # at the index of an option string, there were extra arguments
2269
+ if start_index not in option_string_indices:
2270
+ strings = arg_strings[start_index:next_option_string_index]
2271
+ extras.extend(strings)
2272
+ start_index = next_option_string_index
2273
+
2274
+ # consume the next optional and any arguments for it
2275
+ start_index = consume_optional(start_index)
2276
+
2277
+ # consume any positionals following the last Optional
2278
+ stop_index = consume_positionals(start_index)
2279
+
2280
+ # if we didn't consume all the argument strings, there were extras
2281
+ extras.extend(arg_strings[stop_index:])
2282
+
2283
+ # if we didn't use all the Positional objects, there were too few
2284
+ # arg strings supplied.
2285
+ if positionals:
2286
+ self.error(_('too few arguments'))
2287
+
2288
+ # make sure all required actions were present, and convert defaults.
2289
+ for action in self._actions:
2290
+ if action not in seen_actions:
2291
+ if action.required:
2292
+ name = _get_action_name(action)
2293
+ self.error(_('argument %s is required') % name)
2294
+ else:
2295
+ # Convert action default now instead of doing it before
2296
+ # parsing arguments to avoid calling convert functions
2297
+ # twice (which may fail) if the argument was given, but
2298
+ # only if it was defined already in the namespace
2299
+ if (action.default is not None and
2300
+ isinstance(action.default, basestring) and
2301
+ hasattr(namespace, action.dest) and
2302
+ action.default is getattr(namespace, action.dest)):
2303
+ setattr(namespace, action.dest,
2304
+ self._get_value(action, action.default))
2305
+
2306
+ # make sure all required groups had one option present
2307
+ for group in self._mutually_exclusive_groups:
2308
+ if group.required:
2309
+ for action in group._group_actions:
2310
+ if action in seen_non_default_actions:
2311
+ break
2312
+
2313
+ # if no actions were used, report the error
2314
+ else:
2315
+ names = [_get_action_name(action)
2316
+ for action in group._group_actions
2317
+ if action.help is not SUPPRESS]
2318
+ msg = _('one of the arguments %s is required')
2319
+ self.error(msg % ' '.join(names))
2320
+
2321
+ # return the updated namespace and the extra arguments
2322
+ return namespace, extras
2323
+
2324
+ def _read_args_from_files(self, arg_strings):
2325
+ # expand arguments referencing files
2326
+ new_arg_strings = []
2327
+ for arg_string in arg_strings:
2328
+
2329
+ # for regular arguments, just add them back into the list
2330
+ if arg_string[0] not in self.fromfile_prefix_chars:
2331
+ new_arg_strings.append(arg_string)
2332
+
2333
+ # replace arguments referencing files with the file content
2334
+ else:
2335
+ try:
2336
+ args_file = open(arg_string[1:])
2337
+ try:
2338
+ arg_strings = []
2339
+ for arg_line in args_file.read().splitlines():
2340
+ for arg in self.convert_arg_line_to_args(arg_line):
2341
+ arg_strings.append(arg)
2342
+ arg_strings = self._read_args_from_files(arg_strings)
2343
+ new_arg_strings.extend(arg_strings)
2344
+ finally:
2345
+ args_file.close()
2346
+ except IOError:
2347
+ err = _sys.exc_info()[1]
2348
+ self.error(str(err))
2349
+
2350
+ # return the modified argument list
2351
+ return new_arg_strings
2352
+
2353
+ def convert_arg_line_to_args(self, arg_line):
2354
+ return [arg_line]
2355
+
2356
+ def _match_argument(self, action, arg_strings_pattern):
2357
+ # match the pattern for this action to the arg strings
2358
+ nargs_pattern = self._get_nargs_pattern(action)
2359
+ match = _re.match(nargs_pattern, arg_strings_pattern)
2360
+
2361
+ # raise an exception if we weren't able to find a match
2362
+ if match is None:
2363
+ nargs_errors = {
2364
+ None: _('expected one argument'),
2365
+ OPTIONAL: _('expected at most one argument'),
2366
+ ONE_OR_MORE: _('expected at least one argument'),
2367
+ }
2368
+ default = _('expected %s argument(s)') % action.nargs
2369
+ msg = nargs_errors.get(action.nargs, default)
2370
+ raise ArgumentError(action, msg)
2371
+
2372
+ # return the number of arguments matched
2373
+ return len(match.group(1))
2374
+
2375
+ def _match_arguments_partial(self, actions, arg_strings_pattern):
2376
+ # progressively shorten the actions list by slicing off the
2377
+ # final actions until we find a match
2378
+ result = []
2379
+ for i in range(len(actions), 0, -1):
2380
+ actions_slice = actions[:i]
2381
+ pattern = ''.join([self._get_nargs_pattern(action)
2382
+ for action in actions_slice])
2383
+ match = _re.match(pattern, arg_strings_pattern)
2384
+ if match is not None:
2385
+ result.extend([len(string) for string in match.groups()])
2386
+ break
2387
+
2388
+ # return the list of arg string counts
2389
+ return result
2390
+
2391
+ def _parse_optional(self, arg_string):
2392
+ # if it's an empty string, it was meant to be a positional
2393
+ if not arg_string:
2394
+ return None
2395
+
2396
+ # if it doesn't start with a prefix, it was meant to be positional
2397
+ if not arg_string[0] in self.prefix_chars:
2398
+ return None
2399
+
2400
+ # if the option string is present in the parser, return the action
2401
+ if arg_string in self._option_string_actions:
2402
+ action = self._option_string_actions[arg_string]
2403
+ return action, arg_string, None
2404
+
2405
+ # if it's just a single character, it was meant to be positional
2406
+ if len(arg_string) == 1:
2407
+ return None
2408
+
2409
+ # if the option string before the "=" is present, return the action
2410
+ if '=' in arg_string:
2411
+ option_string, explicit_arg = arg_string.split('=', 1)
2412
+ if option_string in self._option_string_actions:
2413
+ action = self._option_string_actions[option_string]
2414
+ return action, option_string, explicit_arg
2415
+
2416
+ # search through all possible prefixes of the option string
2417
+ # and all actions in the parser for possible interpretations
2418
+ option_tuples = self._get_option_tuples(arg_string)
2419
+
2420
+ # if multiple actions match, the option string was ambiguous
2421
+ if len(option_tuples) > 1:
2422
+ options = ', '.join([option_string
2423
+ for action, option_string, explicit_arg in option_tuples])
2424
+ tup = arg_string, options
2425
+ self.error(_('ambiguous option: %s could match %s') % tup)
2426
+
2427
+ # if exactly one action matched, this segmentation is good,
2428
+ # so return the parsed action
2429
+ elif len(option_tuples) == 1:
2430
+ option_tuple, = option_tuples
2431
+ return option_tuple
2432
+
2433
+ # if it was not found as an option, but it looks like a negative
2434
+ # number, it was meant to be positional
2435
+ # unless there are negative-number-like options
2436
+ if self._negative_number_matcher.match(arg_string):
2437
+ if not self._has_negative_number_optionals:
2438
+ return None
2439
+
2440
+ # if it contains a space, it was meant to be a positional
2441
+ if ' ' in arg_string:
2442
+ return None
2443
+
2444
+ # it was meant to be an optional but there is no such option
2445
+ # in this parser (though it might be a valid option in a subparser)
2446
+ return None, arg_string, None
2447
+
2448
+ def _get_option_tuples(self, option_string):
2449
+ result = []
2450
+
2451
+ # option strings starting with two prefix characters are only
2452
+ # split at the '='
2453
+ chars = self.prefix_chars
2454
+ if option_string[0] in chars and option_string[1] in chars:
2455
+ if '=' in option_string:
2456
+ option_prefix, explicit_arg = option_string.split('=', 1)
2457
+ else:
2458
+ option_prefix = option_string
2459
+ explicit_arg = None
2460
+ for option_string in self._option_string_actions:
2461
+ if option_string.startswith(option_prefix):
2462
+ action = self._option_string_actions[option_string]
2463
+ tup = action, option_string, explicit_arg
2464
+ result.append(tup)
2465
+
2466
+ # single character options can be concatenated with their arguments
2467
+ # but multiple character options always have to have their argument
2468
+ # separate
2469
+ elif option_string[0] in chars and option_string[1] not in chars:
2470
+ option_prefix = option_string
2471
+ explicit_arg = None
2472
+ short_option_prefix = option_string[:2]
2473
+ short_explicit_arg = option_string[2:]
2474
+
2475
+ for option_string in self._option_string_actions:
2476
+ if option_string == short_option_prefix:
2477
+ action = self._option_string_actions[option_string]
2478
+ tup = action, option_string, short_explicit_arg
2479
+ result.append(tup)
2480
+ elif option_string.startswith(option_prefix):
2481
+ action = self._option_string_actions[option_string]
2482
+ tup = action, option_string, explicit_arg
2483
+ result.append(tup)
2484
+
2485
+ # shouldn't ever get here
2486
+ else:
2487
+ self.error(_('unexpected option string: %s') % option_string)
2488
+
2489
+ # return the collected option tuples
2490
+ return result
2491
+
2492
+ def _get_nargs_pattern(self, action):
2493
+ # in all examples below, we have to allow for '--' args
2494
+ # which are represented as '-' in the pattern
2495
+ nargs = action.nargs
2496
+
2497
+ # the default (None) is assumed to be a single argument
2498
+ if nargs is None:
2499
+ nargs_pattern = '(-*A-*)'
2500
+
2501
+ # allow zero or one arguments
2502
+ elif nargs == OPTIONAL:
2503
+ nargs_pattern = '(-*A?-*)'
2504
+
2505
+ # allow zero or more arguments
2506
+ elif nargs == ZERO_OR_MORE:
2507
+ nargs_pattern = '(-*[A-]*)'
2508
+
2509
+ # allow one or more arguments
2510
+ elif nargs == ONE_OR_MORE:
2511
+ nargs_pattern = '(-*A[A-]*)'
2512
+
2513
+ # allow any number of options or arguments
2514
+ elif nargs == REMAINDER:
2515
+ nargs_pattern = '([-AO]*)'
2516
+
2517
+ # allow one argument followed by any number of options or arguments
2518
+ elif nargs == PARSER:
2519
+ nargs_pattern = '(-*A[-AO]*)'
2520
+
2521
+ # all others should be integers
2522
+ else:
2523
+ nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
2524
+
2525
+ # if this is an optional action, -- is not allowed
2526
+ if action.option_strings:
2527
+ nargs_pattern = nargs_pattern.replace('-*', '')
2528
+ nargs_pattern = nargs_pattern.replace('-', '')
2529
+
2530
+ # return the pattern
2531
+ return nargs_pattern
2532
+
2533
+ # ========================
2534
+ # Value conversion methods
2535
+ # ========================
2536
+ def _get_values(self, action, arg_strings):
2537
+ # for everything but PARSER args, strip out '--'
2538
+ if action.nargs not in [PARSER, REMAINDER]:
2539
+ arg_strings = [s for s in arg_strings if s != '--']
2540
+
2541
+ # optional argument produces a default when not present
2542
+ if not arg_strings and action.nargs == OPTIONAL:
2543
+ if action.option_strings:
2544
+ value = action.const
2545
+ else:
2546
+ value = action.default
2547
+ if isinstance(value, basestring):
2548
+ value = self._get_value(action, value)
2549
+ self._check_value(action, value)
2550
+
2551
+ # when nargs='*' on a positional, if there were no command-line
2552
+ # args, use the default if it is anything other than None
2553
+ elif (not arg_strings and action.nargs == ZERO_OR_MORE and
2554
+ not action.option_strings):
2555
+ if action.default is not None:
2556
+ value = action.default
2557
+ else:
2558
+ value = arg_strings
2559
+ self._check_value(action, value)
2560
+
2561
+ # single argument or optional argument produces a single value
2562
+ elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
2563
+ arg_string, = arg_strings
2564
+ value = self._get_value(action, arg_string)
2565
+ self._check_value(action, value)
2566
+
2567
+ # REMAINDER arguments convert all values, checking none
2568
+ elif action.nargs == REMAINDER:
2569
+ value = [self._get_value(action, v) for v in arg_strings]
2570
+
2571
+ # PARSER arguments convert all values, but check only the first
2572
+ elif action.nargs == PARSER:
2573
+ value = [self._get_value(action, v) for v in arg_strings]
2574
+ self._check_value(action, value[0])
2575
+
2576
+ # all other types of nargs produce a list
2577
+ else:
2578
+ value = [self._get_value(action, v) for v in arg_strings]
2579
+ for v in value:
2580
+ self._check_value(action, v)
2581
+
2582
+ # return the converted value
2583
+ return value
2584
+
2585
+ def _get_value(self, action, arg_string):
2586
+ type_func = self._registry_get('type', action.type, action.type)
2587
+ if not _callable(type_func):
2588
+ msg = _('%r is not callable')
2589
+ raise ArgumentError(action, msg % type_func)
2590
+
2591
+ # convert the value to the appropriate type
2592
+ try:
2593
+ result = type_func(arg_string)
2594
+
2595
+ # ArgumentTypeErrors indicate errors
2596
+ except ArgumentTypeError:
2597
+ name = getattr(action.type, '__name__', repr(action.type))
2598
+ msg = str(_sys.exc_info()[1])
2599
+ raise ArgumentError(action, msg)
2600
+
2601
+ # TypeErrors or ValueErrors also indicate errors
2602
+ except (TypeError, ValueError):
2603
+ name = getattr(action.type, '__name__', repr(action.type))
2604
+ msg = _('invalid %s value: %r')
2605
+ raise ArgumentError(action, msg % (name, arg_string))
2606
+
2607
+ # return the converted value
2608
+ return result
2609
+
2610
+ def _check_value(self, action, value):
2611
+ # converted value must be one of the choices (if specified)
2612
+ if action.choices is not None and value not in action.choices:
2613
+ tup = value, ', '.join(map(repr, action.choices))
2614
+ msg = _('invalid choice: %r (choose from %s)') % tup
2615
+ raise ArgumentError(action, msg)
2616
+
2617
+ # =======================
2618
+ # Help-formatting methods
2619
+ # =======================
2620
+ def format_usage(self):
2621
+ formatter = self._get_formatter()
2622
+ formatter.add_usage(self.usage, self._actions,
2623
+ self._mutually_exclusive_groups)
2624
+ return formatter.format_help()
2625
+
2626
+ def format_help(self):
2627
+ formatter = self._get_formatter()
2628
+
2629
+ # usage
2630
+ formatter.add_usage(self.usage, self._actions,
2631
+ self._mutually_exclusive_groups)
2632
+
2633
+ # description
2634
+ formatter.add_text(self.description)
2635
+
2636
+ # positionals, optionals and user-defined groups
2637
+ for action_group in self._action_groups:
2638
+ formatter.start_section(action_group.title)
2639
+ formatter.add_text(action_group.description)
2640
+ formatter.add_arguments(action_group._group_actions)
2641
+ formatter.end_section()
2642
+
2643
+ # epilog
2644
+ formatter.add_text(self.epilog)
2645
+
2646
+ # determine help from format above
2647
+ return formatter.format_help()
2648
+
2649
+ def format_version(self):
2650
+ import warnings
2651
+ warnings.warn(
2652
+ 'The format_version method is deprecated -- the "version" '
2653
+ 'argument to ArgumentParser is no longer supported.',
2654
+ DeprecationWarning)
2655
+ formatter = self._get_formatter()
2656
+ formatter.add_text(self.version)
2657
+ return formatter.format_help()
2658
+
2659
+ def _get_formatter(self):
2660
+ return self.formatter_class(prog=self.prog)
2661
+
2662
+ # =====================
2663
+ # Help-printing methods
2664
+ # =====================
2665
+ def print_usage(self, file=None):
2666
+ if file is None:
2667
+ file = _sys.stdout
2668
+ self._print_message(self.format_usage(), file)
2669
+
2670
+ def print_help(self, file=None):
2671
+ if file is None:
2672
+ file = _sys.stdout
2673
+ self._print_message(self.format_help(), file)
2674
+
2675
+ def print_version(self, file=None):
2676
+ import warnings
2677
+ warnings.warn(
2678
+ 'The print_version method is deprecated -- the "version" '
2679
+ 'argument to ArgumentParser is no longer supported.',
2680
+ DeprecationWarning)
2681
+ self._print_message(self.format_version(), file)
2682
+
2683
+ def _print_message(self, message, file=None):
2684
+ if message:
2685
+ if file is None:
2686
+ file = _sys.stderr
2687
+ file.write(message)
2688
+
2689
+ # ===============
2690
+ # Exiting methods
2691
+ # ===============
2692
+ def exit(self, status=0, message=None):
2693
+ if message:
2694
+ self._print_message(message, _sys.stderr)
2695
+ _sys.exit(status)
2696
+
2697
+ def error(self, message):
2698
+ """error(message: string)
2699
+
2700
+ Prints a usage message incorporating the message to stderr and
2701
+ exits.
2702
+
2703
+ If you override this in a subclass, it should not return -- it
2704
+ should either exit or raise an exception.
2705
+ """
2706
+ self.print_usage(_sys.stderr)
2707
+ self.exit(2, _('%s: error: %s\n') % (self.prog, message))
2708
--
2709
2.13.5
2710
2711
diff view generated by jsdifflib
Deleted patch
1
Add the scripts/ directory to sys.path so Python 2.6 will be able to
2
import argparse.
3
1
4
Cc: Fam Zheng <famz@redhat.com>
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Acked-by: John Snow <jsnow@redhat.com>
7
Acked-by: Fam Zheng <famz@redhat.com>
8
Message-id: 20170825155732.15665-3-stefanha@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
tests/docker/docker.py | 4 +++-
12
1 file changed, 3 insertions(+), 1 deletion(-)
13
14
diff --git a/tests/docker/docker.py b/tests/docker/docker.py
15
index XXXXXXX..XXXXXXX 100755
16
--- a/tests/docker/docker.py
17
+++ b/tests/docker/docker.py
18
@@ -XXX,XX +XXX,XX @@
19
20
import os
21
import sys
22
+sys.path.append(os.path.join(os.path.dirname(__file__),
23
+ '..', '..', 'scripts'))
24
+import argparse
25
import subprocess
26
import json
27
import hashlib
28
import atexit
29
import uuid
30
-import argparse
31
import tempfile
32
import re
33
import signal
34
--
35
2.13.5
36
37
diff view generated by jsdifflib
Deleted patch
1
Add the scripts/ directory to sys.path so Python 2.6 will be able to
2
import argparse.
3
1
4
Cc: Daniel P. Berrange <berrange@redhat.com>
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Acked-by: John Snow <jsnow@redhat.com>
7
Acked-by: Fam Zheng <famz@redhat.com>
8
Message-id: 20170825155732.15665-4-stefanha@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
tests/migration/guestperf/shell.py | 8 +++++---
12
1 file changed, 5 insertions(+), 3 deletions(-)
13
14
diff --git a/tests/migration/guestperf/shell.py b/tests/migration/guestperf/shell.py
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/migration/guestperf/shell.py
17
+++ b/tests/migration/guestperf/shell.py
18
@@ -XXX,XX +XXX,XX @@
19
#
20
21
22
-import argparse
23
-import fnmatch
24
import os
25
import os.path
26
-import platform
27
import sys
28
+sys.path.append(os.path.join(os.path.dirname(__file__),
29
+ '..', '..', '..', 'scripts'))
30
+import argparse
31
+import fnmatch
32
+import platform
33
34
from guestperf.hardware import Hardware
35
from guestperf.engine import Engine
36
--
37
2.13.5
38
39
diff view generated by jsdifflib