QED's bdrv_invalidate_cache implementation would like to reuse functions
that acquire/release the metadata locks. Call it from coroutine context
to simplify the logic.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
block.c | 41 +++++++++++++++++++++++++++++++++++++----
block/iscsi.c | 6 +++---
block/nfs.c | 6 +++---
block/qcow2.c | 7 +++++--
block/qed.c | 13 +++++--------
block/rbd.c | 6 +++---
include/block/block_int.h | 3 ++-
7 files changed, 58 insertions(+), 24 deletions(-)
diff --git a/block.c b/block.c
index 223ca3f85f..c8856cc594 100644
--- a/block.c
+++ b/block.c
@@ -4014,7 +4014,8 @@ void bdrv_init_with_whitelist(void)
bdrv_init();
}
-void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
+static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
BdrvChild *child, *parent;
uint64_t perm, shared_perm;
@@ -4030,7 +4031,7 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
}
QLIST_FOREACH(child, &bs->children, next) {
- bdrv_invalidate_cache(child->bs, &local_err);
+ bdrv_co_invalidate_cache(child->bs, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -4038,8 +4039,8 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
}
bs->open_flags &= ~BDRV_O_INACTIVE;
- if (bs->drv->bdrv_invalidate_cache) {
- bs->drv->bdrv_invalidate_cache(bs, &local_err);
+ if (bs->drv->bdrv_co_invalidate_cache) {
+ bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
if (local_err) {
bs->open_flags |= BDRV_O_INACTIVE;
error_propagate(errp, local_err);
@@ -4075,6 +4076,38 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
}
}
+typedef struct InvalidateCacheCo {
+ BlockDriverState *bs;
+ Error **errp;
+ bool done;
+} InvalidateCacheCo;
+
+static void coroutine_fn bdrv_invalidate_cache_co_entry(void *opaque)
+{
+ InvalidateCacheCo *ico = opaque;
+ bdrv_co_invalidate_cache(ico->bs, ico->errp);
+ ico->done = true;
+}
+
+void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
+{
+ Coroutine *co;
+ InvalidateCacheCo ico = {
+ .bs = bs,
+ .done = false,
+ .errp = errp
+ };
+
+ if (qemu_in_coroutine()) {
+ /* Fast-path if already in coroutine context */
+ bdrv_invalidate_cache_co_entry(&ico);
+ } else {
+ co = qemu_coroutine_create(bdrv_invalidate_cache_co_entry, &ico);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(bs, !ico.done);
+ }
+}
+
void bdrv_invalidate_cache_all(Error **errp)
{
BlockDriverState *bs;
diff --git a/block/iscsi.c b/block/iscsi.c
index 7aab379c48..01a4063d93 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -2168,8 +2168,8 @@ static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
return 0;
}
-static void iscsi_invalidate_cache(BlockDriverState *bs,
- Error **errp)
+static void coroutine_fn iscsi_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
IscsiLun *iscsilun = bs->opaque;
iscsi_allocmap_invalidate(iscsilun);
@@ -2200,7 +2200,7 @@ static BlockDriver bdrv_iscsi = {
.create_opts = &iscsi_create_opts,
.bdrv_reopen_prepare = iscsi_reopen_prepare,
.bdrv_reopen_commit = iscsi_reopen_commit,
- .bdrv_invalidate_cache = iscsi_invalidate_cache,
+ .bdrv_co_invalidate_cache = iscsi_co_invalidate_cache,
.bdrv_getlength = iscsi_getlength,
.bdrv_get_info = iscsi_get_info,
diff --git a/block/nfs.c b/block/nfs.c
index 06e0c6f5f8..455240677f 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -865,8 +865,8 @@ static void nfs_refresh_filename(BlockDriverState *bs, QDict *options)
}
#ifdef LIBNFS_FEATURE_PAGECACHE
-static void nfs_invalidate_cache(BlockDriverState *bs,
- Error **errp)
+static void coroutine_fn nfs_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
NFSClient *client = bs->opaque;
nfs_pagecache_invalidate(client->context, client->fh);
@@ -899,7 +899,7 @@ static BlockDriver bdrv_nfs = {
.bdrv_refresh_filename = nfs_refresh_filename,
#ifdef LIBNFS_FEATURE_PAGECACHE
- .bdrv_invalidate_cache = nfs_invalidate_cache,
+ .bdrv_co_invalidate_cache = nfs_co_invalidate_cache,
#endif
};
diff --git a/block/qcow2.c b/block/qcow2.c
index b5de67d113..4ff7e89009 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1854,7 +1854,8 @@ static void qcow2_close(BlockDriverState *bs)
qcow2_free_snapshots(bs);
}
-static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
+static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int flags = s->flags;
@@ -1877,7 +1878,9 @@ static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
options = qdict_clone_shallow(bs->options);
flags &= ~BDRV_O_INACTIVE;
+ qemu_co_mutex_lock(&s->lock);
ret = qcow2_do_open(bs, options, flags, &local_err);
+ qemu_co_mutex_unlock(&s->lock);
QDECREF(options);
if (local_err) {
error_propagate(errp, local_err);
@@ -3540,7 +3543,7 @@ BlockDriver bdrv_qcow2 = {
.bdrv_change_backing_file = qcow2_change_backing_file,
.bdrv_refresh_limits = qcow2_refresh_limits,
- .bdrv_invalidate_cache = qcow2_invalidate_cache,
+ .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
.bdrv_inactivate = qcow2_inactivate,
.create_opts = &qcow2_create_opts,
diff --git a/block/qed.c b/block/qed.c
index 3c2867c946..56254faee8 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -1543,7 +1543,8 @@ static int bdrv_qed_change_backing_file(BlockDriverState *bs,
return ret;
}
-static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
+static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
BDRVQEDState *s = bs->opaque;
Error *local_err = NULL;
@@ -1552,13 +1553,9 @@ static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
bdrv_qed_close(bs);
bdrv_qed_init_state(bs);
- if (qemu_in_coroutine()) {
- qemu_co_mutex_lock(&s->table_lock);
- }
+ qemu_co_mutex_lock(&s->table_lock);
ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
- if (qemu_in_coroutine()) {
- qemu_co_mutex_unlock(&s->table_lock);
- }
+ qemu_co_mutex_unlock(&s->table_lock);
if (local_err) {
error_propagate(errp, local_err);
error_prepend(errp, "Could not reopen qed layer: ");
@@ -1633,7 +1630,7 @@ static BlockDriver bdrv_qed = {
.bdrv_get_info = bdrv_qed_get_info,
.bdrv_refresh_limits = bdrv_qed_refresh_limits,
.bdrv_change_backing_file = bdrv_qed_change_backing_file,
- .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
+ .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
.bdrv_check = bdrv_qed_check,
.bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
.bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
diff --git a/block/rbd.c b/block/rbd.c
index 68c04a0d9d..2ccb9e41ca 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -1077,8 +1077,8 @@ static BlockAIOCB *qemu_rbd_aio_pdiscard(BlockDriverState *bs,
#endif
#ifdef LIBRBD_SUPPORTS_INVALIDATE
-static void qemu_rbd_invalidate_cache(BlockDriverState *bs,
- Error **errp)
+static void coroutine_fn qemu_rbd_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
{
BDRVRBDState *s = bs->opaque;
int r = rbd_invalidate_cache(s->image);
@@ -1144,7 +1144,7 @@ static BlockDriver bdrv_rbd = {
.bdrv_snapshot_list = qemu_rbd_snap_list,
.bdrv_snapshot_goto = qemu_rbd_snap_rollback,
#ifdef LIBRBD_SUPPORTS_INVALIDATE
- .bdrv_invalidate_cache = qemu_rbd_invalidate_cache,
+ .bdrv_co_invalidate_cache = qemu_rbd_co_invalidate_cache,
#endif
};
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 50ba349db7..8558d7b78d 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -181,7 +181,8 @@ struct BlockDriver {
/*
* Invalidate any cached meta-data.
*/
- void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
+ void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs,
+ Error **errp);
int (*bdrv_inactivate)(BlockDriverState *bs);
/*
--
2.13.0
Am 10.07.2017 um 18:58 hat Paolo Bonzini geschrieben:
> QED's bdrv_invalidate_cache implementation would like to reuse functions
> that acquire/release the metadata locks. Call it from coroutine context
> to simplify the logic.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> diff --git a/block/qcow2.c b/block/qcow2.c
> index b5de67d113..4ff7e89009 100644
> --- a/block/qcow2.c
> +++ b/block/qcow2.c
> @@ -1854,7 +1854,8 @@ static void qcow2_close(BlockDriverState *bs)
> qcow2_free_snapshots(bs);
> }
>
> -static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
> +static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
> + Error **errp)
> {
> BDRVQcow2State *s = bs->opaque;
> int flags = s->flags;
> @@ -1877,7 +1878,9 @@ static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
> options = qdict_clone_shallow(bs->options);
>
> flags &= ~BDRV_O_INACTIVE;
> + qemu_co_mutex_lock(&s->lock);
> ret = qcow2_do_open(bs, options, flags, &local_err);
> + qemu_co_mutex_unlock(&s->lock);
> QDECREF(options);
> if (local_err) {
> error_propagate(errp, local_err);
How useful is this lock really? If we expect any requests while this
function is running (we don't, it would break horribly), it should
probably cover qcow2_close(), too.
Or maybe the cleaner way would be bdrv_drained_begin/end() around
everything directly in bdrv_co_invalidate_cache().
Kevin
On 11/07/2017 10:24, Kevin Wolf wrote:
> Am 10.07.2017 um 18:58 hat Paolo Bonzini geschrieben:
>> QED's bdrv_invalidate_cache implementation would like to reuse functions
>> that acquire/release the metadata locks. Call it from coroutine context
>> to simplify the logic.
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>
>> diff --git a/block/qcow2.c b/block/qcow2.c
>> index b5de67d113..4ff7e89009 100644
>> --- a/block/qcow2.c
>> +++ b/block/qcow2.c
>> @@ -1854,7 +1854,8 @@ static void qcow2_close(BlockDriverState *bs)
>> qcow2_free_snapshots(bs);
>> }
>>
>> -static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
>> +static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
>> + Error **errp)
>> {
>> BDRVQcow2State *s = bs->opaque;
>> int flags = s->flags;
>> @@ -1877,7 +1878,9 @@ static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
>> options = qdict_clone_shallow(bs->options);
>>
>> flags &= ~BDRV_O_INACTIVE;
>> + qemu_co_mutex_lock(&s->lock);
>> ret = qcow2_do_open(bs, options, flags, &local_err);
>> + qemu_co_mutex_unlock(&s->lock);
>> QDECREF(options);
>> if (local_err) {
>> error_propagate(errp, local_err);
>
> How useful is this lock really? If we expect any requests while this
> function is running (we don't, it would break horribly), it should
> probably cover qcow2_close(), too.
In the case of QED, this was needed because qed_read_table drops
s->table_lock. Here I just copied the idiom.
However, I think it's cleaner to define which functions are called with
which lock held. Sometimes it leads to taking a lock unnecessarily, but
it's clearer and it can also help once we annotate the source and let
the compiler do static analysis (see clang's -Wthread-safety).
Paolo
> Or maybe the cleaner way would be bdrv_drained_begin/end() around
> everything directly in bdrv_co_invalidate_cache().
>
> Kevin
>
Am 11.07.2017 um 10:33 hat Paolo Bonzini geschrieben:
> On 11/07/2017 10:24, Kevin Wolf wrote:
> > Am 10.07.2017 um 18:58 hat Paolo Bonzini geschrieben:
> >> QED's bdrv_invalidate_cache implementation would like to reuse functions
> >> that acquire/release the metadata locks. Call it from coroutine context
> >> to simplify the logic.
> >>
> >> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> >
> >> diff --git a/block/qcow2.c b/block/qcow2.c
> >> index b5de67d113..4ff7e89009 100644
> >> --- a/block/qcow2.c
> >> +++ b/block/qcow2.c
> >> @@ -1854,7 +1854,8 @@ static void qcow2_close(BlockDriverState *bs)
> >> qcow2_free_snapshots(bs);
> >> }
> >>
> >> -static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
> >> +static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
> >> + Error **errp)
> >> {
> >> BDRVQcow2State *s = bs->opaque;
> >> int flags = s->flags;
> >> @@ -1877,7 +1878,9 @@ static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
> >> options = qdict_clone_shallow(bs->options);
> >>
> >> flags &= ~BDRV_O_INACTIVE;
> >> + qemu_co_mutex_lock(&s->lock);
> >> ret = qcow2_do_open(bs, options, flags, &local_err);
> >> + qemu_co_mutex_unlock(&s->lock);
> >> QDECREF(options);
> >> if (local_err) {
> >> error_propagate(errp, local_err);
> >
> > How useful is this lock really? If we expect any requests while this
> > function is running (we don't, it would break horribly), it should
> > probably cover qcow2_close(), too.
>
> In the case of QED, this was needed because qed_read_table drops
> s->table_lock. Here I just copied the idiom.
Ok, makes sense.
> However, I think it's cleaner to define which functions are called
> with which lock held. Sometimes it leads to taking a lock
> unnecessarily, but it's clearer and it can also help once we annotate
> the source and let the compiler do static analysis (see clang's
> -Wthread-safety).
Can you just add a comment to this effect to qcow2/qed_do_open() then?
> > Or maybe the cleaner way would be bdrv_drained_begin/end() around
> > everything directly in bdrv_co_invalidate_cache().
Should we do this anyway? Or assert that no requests are in flight?
Kevin
On 11/07/2017 10:39, Kevin Wolf wrote: > Can you just add a comment to this effect to qcow2/qed_do_open() then? Good point. >>> Or maybe the cleaner way would be bdrv_drained_begin/end() around >>> everything directly in bdrv_co_invalidate_cache(). > > Should we do this anyway? Or assert that no requests are in flight? I can certainly add assertions for clarity, but wouldn't requests have failed the permission checks and caused assertion failures already? Paolo
Am 11.07.2017 um 10:41 hat Paolo Bonzini geschrieben: > On 11/07/2017 10:39, Kevin Wolf wrote: > > Can you just add a comment to this effect to qcow2/qed_do_open() then? > > Good point. > > >>> Or maybe the cleaner way would be bdrv_drained_begin/end() around > >>> everything directly in bdrv_co_invalidate_cache(). > > > > Should we do this anyway? Or assert that no requests are in flight? > > I can certainly add assertions for clarity, but wouldn't requests have > failed the permission checks and caused assertion failures already? I'm pretty sure that read requests are allowed on inactive images. Kevin
© 2016 - 2025 Red Hat, Inc.