We are going to implement compressed write cache to improve performance
of compressed backup when target is opened in O_DIRECT mode. We
definitely want to flush the cache at backup finish, and if flush fails
it should be reported as block-job failure, not simply ignored in
bdrv_close(). So, teach all block-jobs to flush their targets at the
end.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
include/block/blockjob_int.h | 18 ++++++++++++++++++
block/backup.c | 8 +++++---
block/commit.c | 2 ++
block/mirror.c | 2 ++
block/stream.c | 2 ++
blockjob.c | 16 ++++++++++++++++
6 files changed, 45 insertions(+), 3 deletions(-)
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index 6633d83da2..6ef3123120 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -119,4 +119,22 @@ int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
int is_read, int error);
+/**
+ * block_job_final_target_flush:
+ * @job: The job to signal an error for if flush failed.
+ * @target_bs: The bs to flush.
+ * @ret: Will be updated (to return code of bdrv_flush()) only if it is zero
+ * now. This is a bit unusual interface but all callers are comfortable
+ * with it.
+ *
+ * The function is intended to be called at the end of .run() for any data
+ * copying job.
+ *
+ * There are may be some internal caches in format layers of target,
+ * like compressed_cache in qcow2 format. So we should call flush to
+ * be sure that all data reached the destination protocol layer.
+ */
+void block_job_final_target_flush(BlockJob *job, BlockDriverState *target_bs,
+ int *ret);
+
#endif
diff --git a/block/backup.c b/block/backup.c
index 94e6dcd72e..d3ba8e0f75 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -255,7 +255,7 @@ static void backup_init_bcs_bitmap(BackupBlockJob *job)
static int coroutine_fn backup_run(Job *job, Error **errp)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
- int ret;
+ int ret = 0;
backup_init_bcs_bitmap(s);
@@ -297,10 +297,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
job_yield(job);
}
} else {
- return backup_loop(s);
+ ret = backup_loop(s);
}
- return 0;
+ block_job_final_target_flush(&s->common, s->target_bs, &ret);
+
+ return ret;
}
static void coroutine_fn backup_pause(Job *job)
diff --git a/block/commit.c b/block/commit.c
index dd9ba87349..1b61b60ccd 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -193,6 +193,8 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
ret = 0;
out:
+ block_job_final_target_flush(&s->common, blk_bs(s->base), &ret);
+
qemu_vfree(buf);
return ret;
diff --git a/block/mirror.c b/block/mirror.c
index 1803c6988b..bc559bd053 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -1095,6 +1095,8 @@ immediate_exit:
g_free(s->in_flight_bitmap);
bdrv_dirty_iter_free(s->dbi);
+ block_job_final_target_flush(&s->common, blk_bs(s->target), &ret);
+
if (need_drain) {
s->in_drain = true;
bdrv_drained_begin(bs);
diff --git a/block/stream.c b/block/stream.c
index 1fa742b0db..cda41e4a64 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -182,6 +182,8 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
}
}
+ block_job_final_target_flush(&s->common, s->target_bs, &error);
+
/* Do not remove the backing file if an error was there but ignored. */
return error;
}
diff --git a/blockjob.c b/blockjob.c
index f2feff051d..e226bfbbfb 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -525,3 +525,19 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
}
return action;
}
+
+void block_job_final_target_flush(BlockJob *job, BlockDriverState *target_bs,
+ int *ret)
+{
+ int flush_ret = bdrv_flush(target_bs);
+
+ if (flush_ret < 0 && !block_job_is_internal(job)) {
+ qapi_event_send_block_job_error(job->job.id,
+ IO_OPERATION_TYPE_WRITE,
+ BLOCK_ERROR_ACTION_REPORT);
+ }
+
+ if (*ret == 0) {
+ *ret = flush_ret;
+ }
+}
--
2.29.2
On 05.03.21 18:35, Vladimir Sementsov-Ogievskiy wrote: > We are going to implement compressed write cache to improve performance > of compressed backup when target is opened in O_DIRECT mode. We > definitely want to flush the cache at backup finish, and if flush fails > it should be reported as block-job failure, not simply ignored in > bdrv_close(). So, teach all block-jobs to flush their targets at the > end. > > Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > --- > include/block/blockjob_int.h | 18 ++++++++++++++++++ > block/backup.c | 8 +++++--- > block/commit.c | 2 ++ > block/mirror.c | 2 ++ > block/stream.c | 2 ++ > blockjob.c | 16 ++++++++++++++++ > 6 files changed, 45 insertions(+), 3 deletions(-) Reviewed-by: Max Reitz <mreitz@redhat.com> Just a nit on the function’s description. > diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h > index 6633d83da2..6ef3123120 100644 > --- a/include/block/blockjob_int.h > +++ b/include/block/blockjob_int.h > @@ -119,4 +119,22 @@ int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n); > BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, > int is_read, int error); > > +/** > + * block_job_final_target_flush: > + * @job: The job to signal an error for if flush failed. > + * @target_bs: The bs to flush. > + * @ret: Will be updated (to return code of bdrv_flush()) only if it is zero > + * now. This is a bit unusual interface but all callers are comfortable > + * with it. > + * > + * The function is intended to be called at the end of .run() for any data > + * copying job. > + * > + * There are may be some internal caches in format layers of target, -are, *in the format layers of the target > + * like compressed_cache in qcow2 format. So we should call flush to > + * be sure that all data reached the destination protocol layer. > + */ > +void block_job_final_target_flush(BlockJob *job, BlockDriverState *target_bs, > + int *ret); > + > #endif
© 2016 - 2026 Red Hat, Inc.