1 | The following changes since commit d992f2f1368ceb92e6bfd8efece174110f4236ff: | 1 | The following changes since commit 2b483739791b33c46e6084b51edcf62107058ae1: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/artyom/tags/pull-sun4v-20170226' into staging (2017-02-26 22:40:23 +0000) | 3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170904-2' into staging (2017-09-04 17:21:24 +0100) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the git repository at: |
6 | 6 | ||
7 | git://github.com/stefanha/qemu.git tags/block-pull-request | 7 | git://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 1ab17f9f5c63c2798d707aeb22588e4fcc17b2cd: | 9 | for you to fetch changes up to b461151ff31c7925f271c297e8abed20231ac7d3: |
10 | 10 | ||
11 | tests-aio-multithread: use atomic_read properly (2017-02-27 14:00:53 +0000) | 11 | block: document semantics of bdrv_co_preadv|pwritev (2017-09-05 11:07:02 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | 14 | ||
15 | ---------------------------------------------------------------- | 15 | ---------------------------------------------------------------- |
16 | 16 | ||
17 | Paolo Bonzini (4): | 17 | Daniel P. Berrange (1): |
18 | curl: do not use aio_context_acquire/release | 18 | block: document semantics of bdrv_co_preadv|pwritev |
19 | nfs: do not use aio_context_acquire/release | ||
20 | iscsi: do not use aio_context_acquire/release | ||
21 | tests-aio-multithread: use atomic_read properly | ||
22 | 19 | ||
23 | block/curl.c | 24 ++++++++----- | 20 | Stefan Hajnoczi (3): |
24 | block/iscsi.c | 83 ++++++++++++++++++++++++++++++++++---------- | 21 | qemu.py: make VM() a context manager |
25 | block/nfs.c | 23 +++++++++--- | 22 | iotests.py: add FilePath context manager |
26 | tests/test-aio-multithread.c | 4 +-- | 23 | qemu-iotests: use context managers for resource cleanup in 194 |
27 | 4 files changed, 100 insertions(+), 34 deletions(-) | 24 | |
25 | include/block/block_int.h | 31 +++++++++++ | ||
26 | scripts/qemu.py | 16 +++++- | ||
27 | tests/qemu-iotests/194 | 117 +++++++++++++++++++++--------------------- | ||
28 | tests/qemu-iotests/iotests.py | 26 ++++++++++ | ||
29 | 4 files changed, 130 insertions(+), 60 deletions(-) | ||
28 | 30 | ||
29 | -- | 31 | -- |
30 | 2.9.3 | 32 | 2.13.5 |
31 | 33 | ||
32 | 34 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | There are a number of ways to ensure that the QEMU process is shut down |
---|---|---|---|
2 | when the test ends, including atexit.register(), try: finally:, or | ||
3 | unittest.teardown() methods. All of these require extra code and the | ||
4 | programmer must remember to add vm.shutdown(). | ||
2 | 5 | ||
3 | nodes[id].next is written by other threads. If atomic_read is not used | 6 | A nice solution is context managers: |
4 | (matching atomic_set in mcs_mutex_lock!) the compiler can optimize the | ||
5 | whole "if" away! | ||
6 | 7 | ||
7 | Reported-by: Alex Bennée <alex.bennee@linaro.org> | 8 | with VM(binary) as vm: |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 9 | ... |
9 | Tested-by: Greg Kurz <groug@kaod.org> | 10 | # vm is guaranteed to be shut down here |
10 | Message-id: 20170227111726.9237-1-pbonzini@redhat.com | 11 | |
12 | Cc: Eduardo Habkost <ehabkost@redhat.com> | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> | ||
15 | Message-id: 20170824072202.26818-2-stefanha@redhat.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 17 | --- |
13 | tests/test-aio-multithread.c | 4 ++-- | 18 | scripts/qemu.py | 16 +++++++++++++++- |
14 | 1 file changed, 2 insertions(+), 2 deletions(-) | 19 | 1 file changed, 15 insertions(+), 1 deletion(-) |
15 | 20 | ||
16 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | 21 | diff --git a/scripts/qemu.py b/scripts/qemu.py |
17 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/tests/test-aio-multithread.c | 23 | --- a/scripts/qemu.py |
19 | +++ b/tests/test-aio-multithread.c | 24 | +++ b/scripts/qemu.py |
20 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void) | 25 | @@ -XXX,XX +XXX,XX @@ import qmp.qmp |
21 | static void mcs_mutex_unlock(void) | 26 | |
22 | { | 27 | |
23 | int next; | 28 | class QEMUMachine(object): |
24 | - if (nodes[id].next == -1) { | 29 | - '''A QEMU VM''' |
25 | + if (atomic_read(&nodes[id].next) == -1) { | 30 | + '''A QEMU VM |
26 | if (atomic_read(&mutex_head) == id && | 31 | + |
27 | atomic_cmpxchg(&mutex_head, id, -1) == id) { | 32 | + Use this object as a context manager to ensure the QEMU process terminates:: |
28 | /* Last item in the list, exit. */ | 33 | + |
29 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_unlock(void) | 34 | + with VM(binary) as vm: |
30 | } | 35 | + ... |
31 | 36 | + # vm is guaranteed to be shut down here | |
32 | /* Wake up the next in line. */ | 37 | + ''' |
33 | - next = nodes[id].next; | 38 | |
34 | + next = atomic_read(&nodes[id].next); | 39 | def __init__(self, binary, args=[], wrapper=[], name=None, test_dir="/var/tmp", |
35 | nodes[next].locked = 0; | 40 | monitor_address=None, socket_scm_helper=None, debug=False): |
36 | qemu_futex_wake(&nodes[next].locked, 1); | 41 | @@ -XXX,XX +XXX,XX @@ class QEMUMachine(object): |
37 | } | 42 | self._socket_scm_helper = socket_scm_helper |
43 | self._debug = debug | ||
44 | |||
45 | + def __enter__(self): | ||
46 | + return self | ||
47 | + | ||
48 | + def __exit__(self, exc_type, exc_val, exc_tb): | ||
49 | + self.shutdown() | ||
50 | + return False | ||
51 | + | ||
52 | # This can be used to add an unused monitor instance. | ||
53 | def add_monitor_telnet(self, ip, port): | ||
54 | args = 'tcp:%s:%d,server,nowait,telnet' % (ip, port) | ||
38 | -- | 55 | -- |
39 | 2.9.3 | 56 | 2.13.5 |
40 | 57 | ||
41 | 58 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | The scratch/ (TEST_DIR) directory is not automatically cleaned up after |
---|---|---|---|
2 | test execution. It is the responsibility of tests to remove any files | ||
3 | they create. | ||
2 | 4 | ||
3 | Now that all bottom halves and callbacks take care of taking the | 5 | A nice way of doing this is to declare files at the beginning of the |
4 | AioContext lock, we can migrate some users away from it and to a | 6 | test and automatically remove them with a context manager: |
5 | specific QemuMutex or CoMutex. | ||
6 | 7 | ||
7 | Protect libnfs calls with a QemuMutex. Callbacks are invoked | 8 | with iotests.FilePath('test.img') as img_path: |
8 | using bottom halves, so we don't even have to drop it around | 9 | qemu_img(...) |
9 | callback invocations. | 10 | qemu_io(...) |
11 | # img_path is guaranteed to be deleted here | ||
10 | 12 | ||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | Message-id: 20170222180725.28611-3-pbonzini@redhat.com | 14 | Message-id: 20170824072202.26818-3-stefanha@redhat.com |
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | --- | 16 | --- |
15 | block/nfs.c | 23 +++++++++++++++++++---- | 17 | tests/qemu-iotests/iotests.py | 26 ++++++++++++++++++++++++++ |
16 | 1 file changed, 19 insertions(+), 4 deletions(-) | 18 | 1 file changed, 26 insertions(+) |
17 | 19 | ||
18 | diff --git a/block/nfs.c b/block/nfs.c | 20 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py |
19 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/block/nfs.c | 22 | --- a/tests/qemu-iotests/iotests.py |
21 | +++ b/block/nfs.c | 23 | +++ b/tests/qemu-iotests/iotests.py |
22 | @@ -XXX,XX +XXX,XX @@ typedef struct NFSClient { | 24 | @@ -XXX,XX +XXX,XX @@ class Timeout: |
23 | int events; | 25 | def timeout(self, signum, frame): |
24 | bool has_zero_init; | 26 | raise Exception(self.errmsg) |
25 | AioContext *aio_context; | 27 | |
26 | + QemuMutex mutex; | 28 | + |
27 | blkcnt_t st_blocks; | 29 | +class FilePath(object): |
28 | bool cache_used; | 30 | + '''An auto-generated filename that cleans itself up. |
29 | NFSServer *server; | 31 | + |
30 | @@ -XXX,XX +XXX,XX @@ static void nfs_parse_filename(const char *filename, QDict *options, | 32 | + Use this context manager to generate filenames and ensure that the file |
31 | static void nfs_process_read(void *arg); | 33 | + gets deleted:: |
32 | static void nfs_process_write(void *arg); | 34 | + |
33 | 35 | + with TestFilePath('test.img') as img_path: | |
34 | +/* Called with QemuMutex held. */ | 36 | + qemu_img('create', img_path, '1G') |
35 | static void nfs_set_events(NFSClient *client) | 37 | + # migration_sock_path is automatically deleted |
36 | { | 38 | + ''' |
37 | int ev = nfs_which_events(client->context); | 39 | + def __init__(self, name): |
38 | @@ -XXX,XX +XXX,XX @@ static void nfs_process_read(void *arg) | 40 | + filename = '{0}-{1}'.format(os.getpid(), name) |
39 | { | 41 | + self.path = os.path.join(test_dir, filename) |
40 | NFSClient *client = arg; | 42 | + |
41 | 43 | + def __enter__(self): | |
42 | - aio_context_acquire(client->aio_context); | 44 | + return self.path |
43 | + qemu_mutex_lock(&client->mutex); | 45 | + |
44 | nfs_service(client->context, POLLIN); | 46 | + def __exit__(self, exc_type, exc_val, exc_tb): |
45 | nfs_set_events(client); | 47 | + try: |
46 | - aio_context_release(client->aio_context); | 48 | + os.remove(self.path) |
47 | + qemu_mutex_unlock(&client->mutex); | 49 | + except OSError: |
48 | } | 50 | + pass |
49 | 51 | + return False | |
50 | static void nfs_process_write(void *arg) | 52 | + |
51 | { | 53 | + |
52 | NFSClient *client = arg; | 54 | class VM(qtest.QEMUQtestMachine): |
53 | 55 | '''A QEMU VM''' | |
54 | - aio_context_acquire(client->aio_context); | 56 | |
55 | + qemu_mutex_lock(&client->mutex); | ||
56 | nfs_service(client->context, POLLOUT); | ||
57 | nfs_set_events(client); | ||
58 | - aio_context_release(client->aio_context); | ||
59 | + qemu_mutex_unlock(&client->mutex); | ||
60 | } | ||
61 | |||
62 | static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
63 | @@ -XXX,XX +XXX,XX @@ static void nfs_co_generic_bh_cb(void *opaque) | ||
64 | aio_co_wake(task->co); | ||
65 | } | ||
66 | |||
67 | +/* Called (via nfs_service) with QemuMutex held. */ | ||
68 | static void | ||
69 | nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, | ||
70 | void *private_data) | ||
71 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
72 | nfs_co_init_task(bs, &task); | ||
73 | task.iov = iov; | ||
74 | |||
75 | + qemu_mutex_lock(&client->mutex); | ||
76 | if (nfs_pread_async(client->context, client->fh, | ||
77 | offset, bytes, nfs_co_generic_cb, &task) != 0) { | ||
78 | + qemu_mutex_unlock(&client->mutex); | ||
79 | return -ENOMEM; | ||
80 | } | ||
81 | |||
82 | nfs_set_events(client); | ||
83 | + qemu_mutex_unlock(&client->mutex); | ||
84 | while (!task.complete) { | ||
85 | qemu_coroutine_yield(); | ||
86 | } | ||
87 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
88 | buf = iov->iov[0].iov_base; | ||
89 | } | ||
90 | |||
91 | + qemu_mutex_lock(&client->mutex); | ||
92 | if (nfs_pwrite_async(client->context, client->fh, | ||
93 | offset, bytes, buf, | ||
94 | nfs_co_generic_cb, &task) != 0) { | ||
95 | + qemu_mutex_unlock(&client->mutex); | ||
96 | if (my_buffer) { | ||
97 | g_free(buf); | ||
98 | } | ||
99 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
100 | } | ||
101 | |||
102 | nfs_set_events(client); | ||
103 | + qemu_mutex_unlock(&client->mutex); | ||
104 | while (!task.complete) { | ||
105 | qemu_coroutine_yield(); | ||
106 | } | ||
107 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) | ||
108 | |||
109 | nfs_co_init_task(bs, &task); | ||
110 | |||
111 | + qemu_mutex_lock(&client->mutex); | ||
112 | if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, | ||
113 | &task) != 0) { | ||
114 | + qemu_mutex_unlock(&client->mutex); | ||
115 | return -ENOMEM; | ||
116 | } | ||
117 | |||
118 | nfs_set_events(client); | ||
119 | + qemu_mutex_unlock(&client->mutex); | ||
120 | while (!task.complete) { | ||
121 | qemu_coroutine_yield(); | ||
122 | } | ||
123 | @@ -XXX,XX +XXX,XX @@ static void nfs_file_close(BlockDriverState *bs) | ||
124 | { | ||
125 | NFSClient *client = bs->opaque; | ||
126 | nfs_client_close(client); | ||
127 | + qemu_mutex_destroy(&client->mutex); | ||
128 | } | ||
129 | |||
130 | static NFSServer *nfs_config(QDict *options, Error **errp) | ||
131 | @@ -XXX,XX +XXX,XX @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, | ||
132 | if (ret < 0) { | ||
133 | return ret; | ||
134 | } | ||
135 | + qemu_mutex_init(&client->mutex); | ||
136 | bs->total_sectors = ret; | ||
137 | ret = 0; | ||
138 | return ret; | ||
139 | @@ -XXX,XX +XXX,XX @@ static int nfs_has_zero_init(BlockDriverState *bs) | ||
140 | return client->has_zero_init; | ||
141 | } | ||
142 | |||
143 | +/* Called (via nfs_service) with QemuMutex held. */ | ||
144 | static void | ||
145 | nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, | ||
146 | void *private_data) | ||
147 | -- | 57 | -- |
148 | 2.9.3 | 58 | 2.13.5 |
149 | 59 | ||
150 | 60 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | Switch from atexit.register() to a more elegant idiom of declaring |
---|---|---|---|
2 | resources in a with statement: | ||
2 | 3 | ||
3 | Now that all bottom halves and callbacks take care of taking the | 4 | with FilePath('monitor.sock') as monitor_path, |
4 | AioContext lock, we can migrate some users away from it and to a | 5 | VM() as vm: |
5 | specific QemuMutex or CoMutex. | 6 | ... |
6 | 7 | ||
7 | Protect BDRVCURLState access with a QemuMutex. | 8 | The files and VMs will be automatically cleaned up whether the test |
9 | passes or fails. | ||
8 | 10 | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 12 | Message-id: 20170824072202.26818-4-stefanha@redhat.com |
11 | Message-id: 20170222180725.28611-2-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 14 | --- |
14 | block/curl.c | 24 +++++++++++++++--------- | 15 | tests/qemu-iotests/194 | 117 ++++++++++++++++++++++++------------------------- |
15 | 1 file changed, 15 insertions(+), 9 deletions(-) | 16 | 1 file changed, 58 insertions(+), 59 deletions(-) |
16 | 17 | ||
17 | diff --git a/block/curl.c b/block/curl.c | 18 | diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 |
18 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100755 |
19 | --- a/block/curl.c | 20 | --- a/tests/qemu-iotests/194 |
20 | +++ b/block/curl.c | 21 | +++ b/tests/qemu-iotests/194 |
21 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVCURLState { | 22 | @@ -XXX,XX +XXX,XX @@ |
22 | char *cookie; | 23 | # |
23 | bool accept_range; | 24 | # Non-shared storage migration test using NBD server and drive-mirror |
24 | AioContext *aio_context; | 25 | |
25 | + QemuMutex mutex; | 26 | -import os |
26 | char *username; | 27 | -import atexit |
27 | char *password; | 28 | import iotests |
28 | char *proxyusername; | 29 | |
29 | @@ -XXX,XX +XXX,XX @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, | 30 | iotests.verify_platform(['linux']) |
30 | return FIND_RET_NONE; | 31 | |
31 | } | 32 | -img_size = '1G' |
32 | 33 | -source_img_path = os.path.join(iotests.test_dir, 'source.img') | |
33 | +/* Called with s->mutex held. */ | 34 | -dest_img_path = os.path.join(iotests.test_dir, 'dest.img') |
34 | static void curl_multi_check_completion(BDRVCURLState *s) | 35 | -iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) |
35 | { | 36 | -iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) |
36 | int msgs_in_queue; | 37 | - |
37 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s) | 38 | -iotests.log('Launching VMs...') |
38 | continue; | 39 | -migration_sock_path = os.path.join(iotests.test_dir, 'migration.sock') |
39 | } | 40 | -nbd_sock_path = os.path.join(iotests.test_dir, 'nbd.sock') |
40 | 41 | -source_vm = iotests.VM('source').add_drive(source_img_path) | |
41 | + qemu_mutex_unlock(&s->mutex); | 42 | -dest_vm = (iotests.VM('dest').add_drive(dest_img_path) |
42 | acb->common.cb(acb->common.opaque, -EPROTO); | 43 | - .add_incoming('unix:{0}'.format(migration_sock_path))) |
43 | + qemu_mutex_lock(&s->mutex); | 44 | -source_vm.launch() |
44 | qemu_aio_unref(acb); | 45 | -atexit.register(source_vm.shutdown) |
45 | state->acb[i] = NULL; | 46 | -dest_vm.launch() |
46 | } | 47 | -atexit.register(dest_vm.shutdown) |
47 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s) | 48 | - |
48 | } | 49 | -iotests.log('Launching NBD server on destination...') |
49 | } | 50 | -iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}})) |
50 | 51 | -iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True)) | |
51 | +/* Called with s->mutex held. */ | 52 | - |
52 | static void curl_multi_do_locked(CURLState *s) | 53 | -iotests.log('Starting `drive-mirror` on source...') |
53 | { | 54 | -iotests.log(source_vm.qmp( |
54 | CURLSocket *socket, *next_socket; | 55 | - 'drive-mirror', |
55 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg) | 56 | - device='drive0', |
56 | { | 57 | - target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path), |
57 | CURLState *s = (CURLState *)arg; | 58 | - sync='full', |
58 | 59 | - format='raw', # always raw, the server handles the format | |
59 | - aio_context_acquire(s->s->aio_context); | 60 | - mode='existing', |
60 | + qemu_mutex_lock(&s->s->mutex); | 61 | - job_id='mirror-job0')) |
61 | curl_multi_do_locked(s); | 62 | - |
62 | - aio_context_release(s->s->aio_context); | 63 | -iotests.log('Waiting for `drive-mirror` to complete...') |
63 | + qemu_mutex_unlock(&s->s->mutex); | 64 | -iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), |
64 | } | 65 | - filters=[iotests.filter_qmp_event]) |
65 | 66 | - | |
66 | static void curl_multi_read(void *arg) | 67 | -iotests.log('Starting migration...') |
67 | { | 68 | -source_vm.qmp('migrate-set-capabilities', |
68 | CURLState *s = (CURLState *)arg; | 69 | - capabilities=[{'capability': 'events', 'state': True}]) |
69 | 70 | -dest_vm.qmp('migrate-set-capabilities', | |
70 | - aio_context_acquire(s->s->aio_context); | 71 | - capabilities=[{'capability': 'events', 'state': True}]) |
71 | + qemu_mutex_lock(&s->s->mutex); | 72 | -iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path))) |
72 | curl_multi_do_locked(s); | 73 | - |
73 | curl_multi_check_completion(s->s); | 74 | -while True: |
74 | - aio_context_release(s->s->aio_context); | 75 | - event1 = source_vm.event_wait('MIGRATION') |
75 | + qemu_mutex_unlock(&s->s->mutex); | 76 | - iotests.log(event1, filters=[iotests.filter_qmp_event]) |
76 | } | 77 | - if event1['data']['status'] in ('completed', 'failed'): |
77 | 78 | - iotests.log('Gracefully ending the `drive-mirror` job on source...') | |
78 | static void curl_multi_timeout_do(void *arg) | 79 | - iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0')) |
79 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg) | 80 | - break |
80 | return; | 81 | - |
81 | } | 82 | -while True: |
82 | 83 | - event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED') | |
83 | - aio_context_acquire(s->aio_context); | 84 | - iotests.log(event2, filters=[iotests.filter_qmp_event]) |
84 | + qemu_mutex_lock(&s->mutex); | 85 | - if event2['event'] == 'BLOCK_JOB_COMPLETED': |
85 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | 86 | - iotests.log('Stopping the NBD server on destination...') |
86 | 87 | - iotests.log(dest_vm.qmp('nbd-server-stop')) | |
87 | curl_multi_check_completion(s); | 88 | - break |
88 | - aio_context_release(s->aio_context); | 89 | +with iotests.FilePath('source.img') as source_img_path, \ |
89 | + qemu_mutex_unlock(&s->mutex); | 90 | + iotests.FilePath('dest.img') as dest_img_path, \ |
90 | #else | 91 | + iotests.FilePath('migration.sock') as migration_sock_path, \ |
91 | abort(); | 92 | + iotests.FilePath('nbd.sock') as nbd_sock_path, \ |
92 | #endif | 93 | + iotests.VM('source') as source_vm, \ |
93 | @@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, | 94 | + iotests.VM('dest') as dest_vm: |
94 | curl_easy_cleanup(state->curl); | 95 | + |
95 | state->curl = NULL; | 96 | + img_size = '1G' |
96 | 97 | + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) | |
97 | + qemu_mutex_init(&s->mutex); | 98 | + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) |
98 | curl_attach_aio_context(bs, bdrv_get_aio_context(bs)); | 99 | + |
99 | 100 | + iotests.log('Launching VMs...') | |
100 | qemu_opts_del(opts); | 101 | + (source_vm.add_drive(source_img_path) |
101 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | 102 | + .launch()) |
102 | CURLAIOCB *acb = p; | 103 | + (dest_vm.add_drive(dest_img_path) |
103 | BlockDriverState *bs = acb->common.bs; | 104 | + .add_incoming('unix:{0}'.format(migration_sock_path)) |
104 | BDRVCURLState *s = bs->opaque; | 105 | + .launch()) |
105 | - AioContext *ctx = bdrv_get_aio_context(bs); | 106 | + |
106 | 107 | + iotests.log('Launching NBD server on destination...') | |
107 | size_t start = acb->sector_num * BDRV_SECTOR_SIZE; | 108 | + iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}})) |
108 | size_t end; | 109 | + iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True)) |
109 | 110 | + | |
110 | - aio_context_acquire(ctx); | 111 | + iotests.log('Starting `drive-mirror` on source...') |
111 | + qemu_mutex_lock(&s->mutex); | 112 | + iotests.log(source_vm.qmp( |
112 | 113 | + 'drive-mirror', | |
113 | // In case we have the requested data already (e.g. read-ahead), | 114 | + device='drive0', |
114 | // we can just call the callback and be done. | 115 | + target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path), |
115 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | 116 | + sync='full', |
116 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | 117 | + format='raw', # always raw, the server handles the format |
117 | 118 | + mode='existing', | |
118 | out: | 119 | + job_id='mirror-job0')) |
119 | - aio_context_release(ctx); | 120 | + |
120 | + qemu_mutex_unlock(&s->mutex); | 121 | + iotests.log('Waiting for `drive-mirror` to complete...') |
121 | if (ret != -EINPROGRESS) { | 122 | + iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), |
122 | acb->common.cb(acb->common.opaque, ret); | 123 | + filters=[iotests.filter_qmp_event]) |
123 | qemu_aio_unref(acb); | 124 | + |
124 | @@ -XXX,XX +XXX,XX @@ static void curl_close(BlockDriverState *bs) | 125 | + iotests.log('Starting migration...') |
125 | 126 | + source_vm.qmp('migrate-set-capabilities', | |
126 | DPRINTF("CURL: Close\n"); | 127 | + capabilities=[{'capability': 'events', 'state': True}]) |
127 | curl_detach_aio_context(bs); | 128 | + dest_vm.qmp('migrate-set-capabilities', |
128 | + qemu_mutex_destroy(&s->mutex); | 129 | + capabilities=[{'capability': 'events', 'state': True}]) |
129 | 130 | + iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path))) | |
130 | g_free(s->cookie); | 131 | + |
131 | g_free(s->url); | 132 | + while True: |
133 | + event1 = source_vm.event_wait('MIGRATION') | ||
134 | + iotests.log(event1, filters=[iotests.filter_qmp_event]) | ||
135 | + if event1['data']['status'] in ('completed', 'failed'): | ||
136 | + iotests.log('Gracefully ending the `drive-mirror` job on source...') | ||
137 | + iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0')) | ||
138 | + break | ||
139 | + | ||
140 | + while True: | ||
141 | + event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED') | ||
142 | + iotests.log(event2, filters=[iotests.filter_qmp_event]) | ||
143 | + if event2['event'] == 'BLOCK_JOB_COMPLETED': | ||
144 | + iotests.log('Stopping the NBD server on destination...') | ||
145 | + iotests.log(dest_vm.qmp('nbd-server-stop')) | ||
146 | + break | ||
132 | -- | 147 | -- |
133 | 2.9.3 | 148 | 2.13.5 |
134 | 149 | ||
135 | 150 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: "Daniel P. Berrange" <berrange@redhat.com> |
---|---|---|---|
2 | |||
3 | Now that all bottom halves and callbacks take care of taking the | ||
4 | AioContext lock, we can migrate some users away from it and to a | ||
5 | specific QemuMutex or CoMutex. | ||
6 | |||
7 | Protect libiscsi calls with a QemuMutex. Callbacks are invoked | ||
8 | using bottom halves, so we don't even have to drop it around | ||
9 | callback invocations. | ||
10 | 2 | ||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 4 | Reviewed-by: Eric Blake <eblake@redhat.com> |
13 | Message-id: 20170222180725.28611-4-pbonzini@redhat.com | 5 | Signed-off-by: Daniel P. Berrange <berrange@redhat.com> |
6 | Message-id: 20170831105456.9558-1-berrange@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | --- | 8 | --- |
16 | block/iscsi.c | 83 +++++++++++++++++++++++++++++++++++++++++++++-------------- | 9 | include/block/block_int.h | 31 +++++++++++++++++++++++++++++++ |
17 | 1 file changed, 64 insertions(+), 19 deletions(-) | 10 | 1 file changed, 31 insertions(+) |
18 | 11 | ||
19 | diff --git a/block/iscsi.c b/block/iscsi.c | 12 | diff --git a/include/block/block_int.h b/include/block/block_int.h |
20 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/block/iscsi.c | 14 | --- a/include/block/block_int.h |
22 | +++ b/block/iscsi.c | 15 | +++ b/include/block/block_int.h |
23 | @@ -XXX,XX +XXX,XX @@ typedef struct IscsiLun { | 16 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { |
24 | int events; | 17 | |
25 | QEMUTimer *nop_timer; | 18 | int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, |
26 | QEMUTimer *event_timer; | 19 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); |
27 | + QemuMutex mutex; | 20 | + |
28 | struct scsi_inquiry_logical_block_provisioning lbp; | 21 | + /** |
29 | struct scsi_inquiry_block_limits bl; | 22 | + * @offset: position in bytes to read at |
30 | unsigned char *zeroblock; | 23 | + * @bytes: number of bytes to read |
31 | @@ -XXX,XX +XXX,XX @@ static int iscsi_translate_sense(struct scsi_sense *sense) | 24 | + * @qiov: the buffers to fill with read data |
32 | return ret; | 25 | + * @flags: currently unused, always 0 |
33 | } | 26 | + * |
34 | 27 | + * @offset and @bytes will be a multiple of 'request_alignment', | |
35 | +/* Called (via iscsi_service) with QemuMutex held. */ | 28 | + * but the length of individual @qiov elements does not have to |
36 | static void | 29 | + * be a multiple. |
37 | iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, | 30 | + * |
38 | void *command_data, void *opaque) | 31 | + * @bytes will always equal the total size of @qiov, and will be |
39 | @@ -XXX,XX +XXX,XX @@ static const AIOCBInfo iscsi_aiocb_info = { | 32 | + * no larger than 'max_transfer'. |
40 | static void iscsi_process_read(void *arg); | 33 | + * |
41 | static void iscsi_process_write(void *arg); | 34 | + * The buffer in @qiov may point directly to guest memory. |
42 | 35 | + */ | |
43 | +/* Called with QemuMutex held. */ | 36 | int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, |
44 | static void | 37 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); |
45 | iscsi_set_events(IscsiLun *iscsilun) | 38 | int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, |
46 | { | 39 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); |
47 | @@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg) | 40 | int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs, |
48 | IscsiLun *iscsilun = arg; | 41 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); |
49 | struct iscsi_context *iscsi = iscsilun->iscsi; | 42 | + /** |
50 | 43 | + * @offset: position in bytes to write at | |
51 | - aio_context_acquire(iscsilun->aio_context); | 44 | + * @bytes: number of bytes to write |
52 | + qemu_mutex_lock(&iscsilun->mutex); | 45 | + * @qiov: the buffers containing data to write |
53 | iscsi_service(iscsi, POLLIN); | 46 | + * @flags: zero or more bits allowed by 'supported_write_flags' |
54 | iscsi_set_events(iscsilun); | 47 | + * |
55 | - aio_context_release(iscsilun->aio_context); | 48 | + * @offset and @bytes will be a multiple of 'request_alignment', |
56 | + qemu_mutex_unlock(&iscsilun->mutex); | 49 | + * but the length of individual @qiov elements does not have to |
57 | } | 50 | + * be a multiple. |
58 | 51 | + * | |
59 | static void | 52 | + * @bytes will always equal the total size of @qiov, and will be |
60 | @@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg) | 53 | + * no larger than 'max_transfer'. |
61 | IscsiLun *iscsilun = arg; | 54 | + * |
62 | struct iscsi_context *iscsi = iscsilun->iscsi; | 55 | + * The buffer in @qiov may point directly to guest memory. |
63 | 56 | + */ | |
64 | - aio_context_acquire(iscsilun->aio_context); | 57 | int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, |
65 | + qemu_mutex_lock(&iscsilun->mutex); | 58 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); |
66 | iscsi_service(iscsi, POLLOUT); | ||
67 | iscsi_set_events(iscsilun); | ||
68 | - aio_context_release(iscsilun->aio_context); | ||
69 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
70 | } | ||
71 | |||
72 | static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) | ||
73 | @@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | ||
74 | uint64_t lba; | ||
75 | uint32_t num_sectors; | ||
76 | bool fua = flags & BDRV_REQ_FUA; | ||
77 | + int r = 0; | ||
78 | |||
79 | if (fua) { | ||
80 | assert(iscsilun->dpofua); | ||
81 | @@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | ||
82 | lba = sector_qemu2lun(sector_num, iscsilun); | ||
83 | num_sectors = sector_qemu2lun(nb_sectors, iscsilun); | ||
84 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
85 | + qemu_mutex_lock(&iscsilun->mutex); | ||
86 | retry: | ||
87 | if (iscsilun->use_16_for_rw) { | ||
88 | #if LIBISCSI_API_VERSION >= (20160603) | ||
89 | @@ -XXX,XX +XXX,XX @@ retry: | ||
90 | #endif | ||
91 | while (!iTask.complete) { | ||
92 | iscsi_set_events(iscsilun); | ||
93 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
94 | qemu_coroutine_yield(); | ||
95 | + qemu_mutex_lock(&iscsilun->mutex); | ||
96 | } | ||
97 | |||
98 | if (iTask.task != NULL) { | ||
99 | @@ -XXX,XX +XXX,XX @@ retry: | ||
100 | |||
101 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
102 | iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors); | ||
103 | - return iTask.err_code; | ||
104 | + r = iTask.err_code; | ||
105 | + goto out_unlock; | ||
106 | } | ||
107 | |||
108 | iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors); | ||
109 | |||
110 | - return 0; | ||
111 | +out_unlock: | ||
112 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
113 | + return r; | ||
114 | } | ||
115 | |||
116 | |||
117 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, | ||
118 | goto out; | ||
119 | } | ||
120 | |||
121 | + qemu_mutex_lock(&iscsilun->mutex); | ||
122 | retry: | ||
123 | if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, | ||
124 | sector_qemu2lun(sector_num, iscsilun), | ||
125 | 8 + 16, iscsi_co_generic_cb, | ||
126 | &iTask) == NULL) { | ||
127 | ret = -ENOMEM; | ||
128 | - goto out; | ||
129 | + goto out_unlock; | ||
130 | } | ||
131 | |||
132 | while (!iTask.complete) { | ||
133 | iscsi_set_events(iscsilun); | ||
134 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
135 | qemu_coroutine_yield(); | ||
136 | + qemu_mutex_lock(&iscsilun->mutex); | ||
137 | } | ||
138 | |||
139 | if (iTask.do_retry) { | ||
140 | @@ -XXX,XX +XXX,XX @@ retry: | ||
141 | * because the device is busy or the cmd is not | ||
142 | * supported) we pretend all blocks are allocated | ||
143 | * for backwards compatibility */ | ||
144 | - goto out; | ||
145 | + goto out_unlock; | ||
146 | } | ||
147 | |||
148 | lbas = scsi_datain_unmarshall(iTask.task); | ||
149 | if (lbas == NULL) { | ||
150 | ret = -EIO; | ||
151 | - goto out; | ||
152 | + goto out_unlock; | ||
153 | } | ||
154 | |||
155 | lbasd = &lbas->descriptors[0]; | ||
156 | |||
157 | if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { | ||
158 | ret = -EIO; | ||
159 | - goto out; | ||
160 | + goto out_unlock; | ||
161 | } | ||
162 | |||
163 | *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); | ||
164 | @@ -XXX,XX +XXX,XX @@ retry: | ||
165 | if (*pnum > nb_sectors) { | ||
166 | *pnum = nb_sectors; | ||
167 | } | ||
168 | +out_unlock: | ||
169 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
170 | out: | ||
171 | if (iTask.task != NULL) { | ||
172 | scsi_free_scsi_task(iTask.task); | ||
173 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, | ||
174 | num_sectors = sector_qemu2lun(nb_sectors, iscsilun); | ||
175 | |||
176 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
177 | + qemu_mutex_lock(&iscsilun->mutex); | ||
178 | retry: | ||
179 | if (iscsilun->use_16_for_rw) { | ||
180 | #if LIBISCSI_API_VERSION >= (20160603) | ||
181 | @@ -XXX,XX +XXX,XX @@ retry: | ||
182 | #endif | ||
183 | while (!iTask.complete) { | ||
184 | iscsi_set_events(iscsilun); | ||
185 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
186 | qemu_coroutine_yield(); | ||
187 | + qemu_mutex_lock(&iscsilun->mutex); | ||
188 | } | ||
189 | |||
190 | if (iTask.task != NULL) { | ||
191 | @@ -XXX,XX +XXX,XX @@ retry: | ||
192 | iTask.complete = 0; | ||
193 | goto retry; | ||
194 | } | ||
195 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
196 | |||
197 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
198 | return iTask.err_code; | ||
199 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) | ||
200 | struct IscsiTask iTask; | ||
201 | |||
202 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
203 | + qemu_mutex_lock(&iscsilun->mutex); | ||
204 | retry: | ||
205 | if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, | ||
206 | 0, iscsi_co_generic_cb, &iTask) == NULL) { | ||
207 | @@ -XXX,XX +XXX,XX @@ retry: | ||
208 | |||
209 | while (!iTask.complete) { | ||
210 | iscsi_set_events(iscsilun); | ||
211 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
212 | qemu_coroutine_yield(); | ||
213 | + qemu_mutex_lock(&iscsilun->mutex); | ||
214 | } | ||
215 | |||
216 | if (iTask.task != NULL) { | ||
217 | @@ -XXX,XX +XXX,XX @@ retry: | ||
218 | iTask.complete = 0; | ||
219 | goto retry; | ||
220 | } | ||
221 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
222 | |||
223 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
224 | return iTask.err_code; | ||
225 | @@ -XXX,XX +XXX,XX @@ retry: | ||
226 | } | ||
227 | |||
228 | #ifdef __linux__ | ||
229 | +/* Called (via iscsi_service) with QemuMutex held. */ | ||
230 | static void | ||
231 | iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, | ||
232 | void *command_data, void *opaque) | ||
233 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | ||
234 | acb->task->expxferlen = acb->ioh->dxfer_len; | ||
235 | |||
236 | data.size = 0; | ||
237 | + qemu_mutex_lock(&iscsilun->mutex); | ||
238 | if (acb->task->xfer_dir == SCSI_XFER_WRITE) { | ||
239 | if (acb->ioh->iovec_count == 0) { | ||
240 | data.data = acb->ioh->dxferp; | ||
241 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | ||
242 | iscsi_aio_ioctl_cb, | ||
243 | (data.size > 0) ? &data : NULL, | ||
244 | acb) != 0) { | ||
245 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
246 | scsi_free_scsi_task(acb->task); | ||
247 | qemu_aio_unref(acb); | ||
248 | return NULL; | ||
249 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | ||
250 | } | ||
251 | |||
252 | iscsi_set_events(iscsilun); | ||
253 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
254 | |||
255 | return &acb->common; | ||
256 | } | ||
257 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) | ||
258 | IscsiLun *iscsilun = bs->opaque; | ||
259 | struct IscsiTask iTask; | ||
260 | struct unmap_list list; | ||
261 | + int r = 0; | ||
262 | |||
263 | if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { | ||
264 | return -ENOTSUP; | ||
265 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) | ||
266 | list.num = count / iscsilun->block_size; | ||
267 | |||
268 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
269 | + qemu_mutex_lock(&iscsilun->mutex); | ||
270 | retry: | ||
271 | if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, | ||
272 | iscsi_co_generic_cb, &iTask) == NULL) { | ||
273 | - return -ENOMEM; | ||
274 | + r = -ENOMEM; | ||
275 | + goto out_unlock; | ||
276 | } | ||
277 | |||
278 | while (!iTask.complete) { | ||
279 | iscsi_set_events(iscsilun); | ||
280 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
281 | qemu_coroutine_yield(); | ||
282 | + qemu_mutex_lock(&iscsilun->mutex); | ||
283 | } | ||
284 | |||
285 | if (iTask.task != NULL) { | ||
286 | @@ -XXX,XX +XXX,XX @@ retry: | ||
287 | /* the target might fail with a check condition if it | ||
288 | is not happy with the alignment of the UNMAP request | ||
289 | we silently fail in this case */ | ||
290 | - return 0; | ||
291 | + goto out_unlock; | ||
292 | } | ||
293 | |||
294 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
295 | - return iTask.err_code; | ||
296 | + r = iTask.err_code; | ||
297 | + goto out_unlock; | ||
298 | } | ||
299 | |||
300 | iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, | ||
301 | count >> BDRV_SECTOR_BITS); | ||
302 | |||
303 | - return 0; | ||
304 | +out_unlock: | ||
305 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
306 | + return r; | ||
307 | } | ||
308 | |||
309 | static int | ||
310 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, | ||
311 | uint64_t lba; | ||
312 | uint32_t nb_blocks; | ||
313 | bool use_16_for_ws = iscsilun->use_16_for_rw; | ||
314 | + int r = 0; | ||
315 | |||
316 | if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { | ||
317 | return -ENOTSUP; | ||
318 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, | ||
319 | } | ||
320 | } | ||
321 | |||
322 | + qemu_mutex_lock(&iscsilun->mutex); | ||
323 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
324 | retry: | ||
325 | if (use_16_for_ws) { | ||
326 | @@ -XXX,XX +XXX,XX @@ retry: | ||
327 | |||
328 | while (!iTask.complete) { | ||
329 | iscsi_set_events(iscsilun); | ||
330 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
331 | qemu_coroutine_yield(); | ||
332 | + qemu_mutex_lock(&iscsilun->mutex); | ||
333 | } | ||
334 | |||
335 | if (iTask.status == SCSI_STATUS_CHECK_CONDITION && | ||
336 | @@ -XXX,XX +XXX,XX @@ retry: | ||
337 | /* WRITE SAME is not supported by the target */ | ||
338 | iscsilun->has_write_same = false; | ||
339 | scsi_free_scsi_task(iTask.task); | ||
340 | - return -ENOTSUP; | ||
341 | + r = -ENOTSUP; | ||
342 | + goto out_unlock; | ||
343 | } | ||
344 | |||
345 | if (iTask.task != NULL) { | ||
346 | @@ -XXX,XX +XXX,XX @@ retry: | ||
347 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
348 | iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, | ||
349 | count >> BDRV_SECTOR_BITS); | ||
350 | - return iTask.err_code; | ||
351 | + r = iTask.err_code; | ||
352 | + goto out_unlock; | ||
353 | } | ||
354 | |||
355 | if (flags & BDRV_REQ_MAY_UNMAP) { | ||
356 | @@ -XXX,XX +XXX,XX @@ retry: | ||
357 | count >> BDRV_SECTOR_BITS); | ||
358 | } | ||
359 | |||
360 | - return 0; | ||
361 | +out_unlock: | ||
362 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
363 | + return r; | ||
364 | } | ||
365 | |||
366 | static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts, | ||
367 | @@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque) | ||
368 | { | ||
369 | IscsiLun *iscsilun = opaque; | ||
370 | |||
371 | - aio_context_acquire(iscsilun->aio_context); | ||
372 | + qemu_mutex_lock(&iscsilun->mutex); | ||
373 | if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { | ||
374 | error_report("iSCSI: NOP timeout. Reconnecting..."); | ||
375 | iscsilun->request_timed_out = true; | ||
376 | @@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque) | ||
377 | iscsi_set_events(iscsilun); | ||
378 | |||
379 | out: | ||
380 | - aio_context_release(iscsilun->aio_context); | ||
381 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
382 | } | ||
383 | |||
384 | static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) | ||
385 | @@ -XXX,XX +XXX,XX @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, | ||
386 | scsi_free_scsi_task(task); | ||
387 | task = NULL; | ||
388 | |||
389 | + qemu_mutex_init(&iscsilun->mutex); | ||
390 | iscsi_attach_aio_context(bs, iscsilun->aio_context); | ||
391 | |||
392 | /* Guess the internal cluster (page) size of the iscsi target by the means | ||
393 | @@ -XXX,XX +XXX,XX @@ static void iscsi_close(BlockDriverState *bs) | ||
394 | iscsi_destroy_context(iscsi); | ||
395 | g_free(iscsilun->zeroblock); | ||
396 | iscsi_allocmap_free(iscsilun); | ||
397 | + qemu_mutex_destroy(&iscsilun->mutex); | ||
398 | memset(iscsilun, 0, sizeof(IscsiLun)); | ||
399 | } | ||
400 | 59 | ||
401 | -- | 60 | -- |
402 | 2.9.3 | 61 | 2.13.5 |
403 | 62 | ||
404 | 63 | diff view generated by jsdifflib |