drivers/vhost/net.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
When operating on struct vhost_net_ubuf_ref, the following execution
sequence is theoretically possible:
CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND
// &ubufs->refcount == 2
vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs)
vhost_net_ubuf_put_and_wait()
vhost_net_ubuf_put()
int r = atomic_sub_return(1, &ubufs->refcount);
// r = 1
int r = atomic_sub_return(1, &ubufs->refcount);
// r = 0
wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
// no wait occurs here because condition is already true
kfree(ubufs);
if (unlikely(!r))
wake_up(&ubufs->wait); // use-after-free
This leads to use-after-free on ubufs access. This happens because CPU1
skips waiting for wake_up() when refcount is already zero.
To prevent that use a completion instead of wait_queue as the ubufs
notification mechanism. wait_for_completion() guarantees that there will
be complete() call prior to its return.
We also need to reinit completion because refcnt == 0 does not mean
freeing in case of vhost_net_flush() - it then sets refcnt back to 1.
AFAIK concurrent calls to vhost_net_ubuf_put_and_wait() with the same
ubufs object aren't possible since those calls (through vhost_net_flush()
or vhost_net_set_backend()) are protected by the device mutex.
So reinit_completion() right after wait_for_completion() should be fine.
Cc: stable@vger.kernel.org
Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock")
Reported-by: Andrey Ryabinin <arbn@yandex-team.com>
Suggested-by: Andrey Smetanin <asmetanin@yandex-team.ru>
Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
---
drivers/vhost/net.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7cbfc7d718b3..454d179fffeb 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref {
* >1: outstanding ubufs
*/
atomic_t refcount;
- wait_queue_head_t wait;
+ struct completion wait;
struct vhost_virtqueue *vq;
};
@@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
if (!ubufs)
return ERR_PTR(-ENOMEM);
atomic_set(&ubufs->refcount, 1);
- init_waitqueue_head(&ubufs->wait);
+ init_completion(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
@@ -249,14 +249,15 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
int r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r))
- wake_up(&ubufs->wait);
+ complete_all(&ubufs->wait);
return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put(ubufs);
- wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+ wait_for_completion(&ubufs->wait);
+ reinit_completion(&ubufs->wait);
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
--
2.34.1
On Wed, 16 Jul 2025 19:22:43 +0300 Nikolay Kuratov wrote: > drivers/vhost/net.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index 7cbfc7d718b3..454d179fffeb 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref { > * >1: outstanding ubufs > */ > atomic_t refcount; > - wait_queue_head_t wait; > + struct completion wait; > struct vhost_virtqueue *vq; > }; > > @@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) > if (!ubufs) > return ERR_PTR(-ENOMEM); > atomic_set(&ubufs->refcount, 1); > - init_waitqueue_head(&ubufs->wait); > + init_completion(&ubufs->wait); > ubufs->vq = vq; > return ubufs; > } > @@ -249,14 +249,15 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > { > int r = atomic_sub_return(1, &ubufs->refcount); > if (unlikely(!r)) > - wake_up(&ubufs->wait); > + complete_all(&ubufs->wait); > return r; > } > > static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) > { > vhost_net_ubuf_put(ubufs); > - wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > + wait_for_completion(&ubufs->wait); > + reinit_completion(&ubufs->wait); In the case of 5 waiters for example, after the first waiter reinitializes the completion, the 3rd waiter misses the wakeup, no? > } > > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > -- > 2.34.1
Yes, if multiple waiters call vhost_net_ubuf_put_and_wait() concurrently we are screwed. Furthermore, it was not the case before this patch. While it was explicitly mentioned in the commit message, now I changed my mind, because amount of vhost_net_ubuf_put_and_wait() users may change when this patch will be backported to older LTSes. In 6.6+ kernels there are only two put_and_wait() callers, both are ensuring that there is only one thread calling put_and_wait() at a time. I think its better to preserve thread-safety of vhost_net_ubuf_put_and_wait() and move reinit_completion() call to vhost_net_flush(). We don't need reinit on free'ing path anyway. I will send v2 with the fix. Thank you for noticing this.
Tested this patch with virtio-net regression tests, everything works fine. Tested-by: Lei Yang <leiyang@redhat.com> On Thu, Jul 17, 2025 at 12:24 AM Nikolay Kuratov <kniv@yandex-team.ru> wrote: > > When operating on struct vhost_net_ubuf_ref, the following execution > sequence is theoretically possible: > CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND > // &ubufs->refcount == 2 > vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs) > vhost_net_ubuf_put_and_wait() > vhost_net_ubuf_put() > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 1 > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 0 > wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > // no wait occurs here because condition is already true > kfree(ubufs); > if (unlikely(!r)) > wake_up(&ubufs->wait); // use-after-free > > This leads to use-after-free on ubufs access. This happens because CPU1 > skips waiting for wake_up() when refcount is already zero. > > To prevent that use a completion instead of wait_queue as the ubufs > notification mechanism. wait_for_completion() guarantees that there will > be complete() call prior to its return. > > We also need to reinit completion because refcnt == 0 does not mean > freeing in case of vhost_net_flush() - it then sets refcnt back to 1. > AFAIK concurrent calls to vhost_net_ubuf_put_and_wait() with the same > ubufs object aren't possible since those calls (through vhost_net_flush() > or vhost_net_set_backend()) are protected by the device mutex. > So reinit_completion() right after wait_for_completion() should be fine. > > Cc: stable@vger.kernel.org > Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock") > Reported-by: Andrey Ryabinin <arbn@yandex-team.com> > Suggested-by: Andrey Smetanin <asmetanin@yandex-team.ru> > Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru> > --- > drivers/vhost/net.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index 7cbfc7d718b3..454d179fffeb 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref { > * >1: outstanding ubufs > */ > atomic_t refcount; > - wait_queue_head_t wait; > + struct completion wait; > struct vhost_virtqueue *vq; > }; > > @@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) > if (!ubufs) > return ERR_PTR(-ENOMEM); > atomic_set(&ubufs->refcount, 1); > - init_waitqueue_head(&ubufs->wait); > + init_completion(&ubufs->wait); > ubufs->vq = vq; > return ubufs; > } > @@ -249,14 +249,15 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > { > int r = atomic_sub_return(1, &ubufs->refcount); > if (unlikely(!r)) > - wake_up(&ubufs->wait); > + complete_all(&ubufs->wait); > return r; > } > > static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) > { > vhost_net_ubuf_put(ubufs); > - wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > + wait_for_completion(&ubufs->wait); > + reinit_completion(&ubufs->wait); > } > > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > -- > 2.34.1 > >
© 2016 - 2025 Red Hat, Inc.