These functions end up calling bdrv_common_block_status_above(), a
generated_co_wrapper function.
In addition, they also happen to be always called in coroutine context,
meaning all callers are coroutine_fn.
This means that the g_c_w function will enter the qemu_in_coroutine()
case and eventually suspend (or in other words call qemu_coroutine_yield()).
Therefore we need to mark such functions coroutine_fn too.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
---
block/block-copy.c | 21 ++++++++++++---------
include/block/block-copy.h | 5 +++--
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index bb947afdda..5e59d6262f 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -577,8 +577,9 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
return ret;
}
-static int block_copy_block_status(BlockCopyState *s, int64_t offset,
- int64_t bytes, int64_t *pnum)
+static coroutine_fn int block_copy_block_status(BlockCopyState *s,
+ int64_t offset,
+ int64_t bytes, int64_t *pnum)
{
int64_t num;
BlockDriverState *base;
@@ -590,8 +591,8 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
base = NULL;
}
- ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
- NULL, NULL);
+ ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
+ NULL, NULL);
if (ret < 0 || num < s->cluster_size) {
/*
* On error or if failed to obtain large enough chunk just fallback to
@@ -613,8 +614,9 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
-static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
- int64_t *pnum)
+static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
+ int64_t offset,
+ int64_t *pnum)
{
BlockDriverState *bs = s->source->bs;
int64_t count, total_count = 0;
@@ -624,7 +626,7 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
- ret = bdrv_is_allocated(bs, offset, bytes, &count);
+ ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
@@ -669,8 +671,9 @@ void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
-int64_t block_copy_reset_unallocated(BlockCopyState *s,
- int64_t offset, int64_t *count)
+int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
+ int64_t offset,
+ int64_t *count)
{
int ret;
int64_t clusters, bytes;
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index ba0b425d78..8cea4f9b90 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -36,8 +36,9 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
void block_copy_state_free(BlockCopyState *s);
void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
-int64_t block_copy_reset_unallocated(BlockCopyState *s,
- int64_t offset, int64_t *count);
+int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
+ int64_t offset,
+ int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
bool ignore_ratelimit, uint64_t timeout_ns,
--
2.31.1