drivers/vhost/net.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
When operating on struct vhost_net_ubuf_ref, the following execution
sequence is theoretically possible:
CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND
// &ubufs->refcount == 2
vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs)
vhost_net_ubuf_put_and_wait()
vhost_net_ubuf_put()
int r = atomic_sub_return(1, &ubufs->refcount);
// r = 1
int r = atomic_sub_return(1, &ubufs->refcount);
// r = 0
wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
// no wait occurs here because condition is already true
kfree(ubufs);
if (unlikely(!r))
wake_up(&ubufs->wait); // use-after-free
This leads to use-after-free on ubufs access. This happens because CPU1
skips waiting for wake_up() when refcount is already zero.
To prevent that use a completion instead of wait_queue as the ubufs
notification mechanism. wait_for_completion() guarantees that there will
be complete() call prior to its return.
We also need to reinit completion in vhost_net_flush(), because
refcnt == 0 does not mean freeing in that case.
Cc: stable@vger.kernel.org
Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock")
Reported-by: Andrey Ryabinin <arbn@yandex-team.com>
Suggested-by: Andrey Smetanin <asmetanin@yandex-team.ru>
Suggested-by: Hillf Danton <hdanton@sina.com>
Tested-by: Lei Yang <leiyang@redhat.com> (v1)
Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
---
v2:
* move reinit_completion() into vhost_net_flush(), thanks
to Hillf Danton
* add Tested-by: Lei Yang
* check that usages of put_and_wait() are consistent across
LTS kernels
drivers/vhost/net.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7cbfc7d718b3..69e1bfb9627e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref {
* >1: outstanding ubufs
*/
atomic_t refcount;
- wait_queue_head_t wait;
+ struct completion wait;
struct vhost_virtqueue *vq;
};
@@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
if (!ubufs)
return ERR_PTR(-ENOMEM);
atomic_set(&ubufs->refcount, 1);
- init_waitqueue_head(&ubufs->wait);
+ init_completion(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
@@ -249,14 +249,14 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
int r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r))
- wake_up(&ubufs->wait);
+ complete_all(&ubufs->wait);
return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put(ubufs);
- wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+ wait_for_completion(&ubufs->wait);
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -1381,6 +1381,7 @@ static void vhost_net_flush(struct vhost_net *n)
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
+ reinit_completion(&n->vqs[VHOST_NET_VQ_TX].ubufs->wait);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
--
2.34.1
On Fri, Jul 18, 2025 at 02:03:55PM +0300, Nikolay Kuratov wrote: > When operating on struct vhost_net_ubuf_ref, the following execution > sequence is theoretically possible: > CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND > // &ubufs->refcount == 2 > vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs) > vhost_net_ubuf_put_and_wait() > vhost_net_ubuf_put() > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 1 > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 0 > wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > // no wait occurs here because condition is already true > kfree(ubufs); > if (unlikely(!r)) > wake_up(&ubufs->wait); // use-after-free > > This leads to use-after-free on ubufs access. This happens because CPU1 > skips waiting for wake_up() when refcount is already zero. > > To prevent that use a completion instead of wait_queue as the ubufs > notification mechanism. wait_for_completion() guarantees that there will > be complete() call prior to its return. > > We also need to reinit completion in vhost_net_flush(), because > refcnt == 0 does not mean freeing in that case. > > Cc: stable@vger.kernel.org > Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock") > Reported-by: Andrey Ryabinin <arbn@yandex-team.com> > Suggested-by: Andrey Smetanin <asmetanin@yandex-team.ru> > Suggested-by: Hillf Danton <hdanton@sina.com> > Tested-by: Lei Yang <leiyang@redhat.com> (v1) > Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru> Nikolay should I expect v3? > --- > v2: > * move reinit_completion() into vhost_net_flush(), thanks > to Hillf Danton > * add Tested-by: Lei Yang > * check that usages of put_and_wait() are consistent across > LTS kernels > > drivers/vhost/net.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index 7cbfc7d718b3..69e1bfb9627e 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref { > * >1: outstanding ubufs > */ > atomic_t refcount; > - wait_queue_head_t wait; > + struct completion wait; > struct vhost_virtqueue *vq; > }; > > @@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) > if (!ubufs) > return ERR_PTR(-ENOMEM); > atomic_set(&ubufs->refcount, 1); > - init_waitqueue_head(&ubufs->wait); > + init_completion(&ubufs->wait); > ubufs->vq = vq; > return ubufs; > } > @@ -249,14 +249,14 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > { > int r = atomic_sub_return(1, &ubufs->refcount); > if (unlikely(!r)) > - wake_up(&ubufs->wait); > + complete_all(&ubufs->wait); > return r; > } > > static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) > { > vhost_net_ubuf_put(ubufs); > - wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > + wait_for_completion(&ubufs->wait); > } > > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > @@ -1381,6 +1381,7 @@ static void vhost_net_flush(struct vhost_net *n) > mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); > n->tx_flush = false; > atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); > + reinit_completion(&n->vqs[VHOST_NET_VQ_TX].ubufs->wait); > mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); > } > } > -- > 2.34.1
On Fri, 18 Jul 2025 14:03:55 +0300 Nikolay Kuratov wrote: > When operating on struct vhost_net_ubuf_ref, the following execution > sequence is theoretically possible: > CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND > // &ubufs->refcount == 2 > vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs) > vhost_net_ubuf_put_and_wait() > vhost_net_ubuf_put() > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 1 > int r = atomic_sub_return(1, &ubufs->refcount); > // r = 0 > wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > // no wait occurs here because condition is already true > kfree(ubufs); > if (unlikely(!r)) > wake_up(&ubufs->wait); // use-after-free > > This leads to use-after-free on ubufs access. This happens because CPU1 > skips waiting for wake_up() when refcount is already zero. > > To prevent that use a completion instead of wait_queue as the ubufs > notification mechanism. wait_for_completion() guarantees that there will > be complete() call prior to its return. > Alternatively rcu helps. --- x/drivers/vhost/net.c +++ y/drivers/vhost/net.c @@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref { atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; + struct rcu_head rcu; }; #define VHOST_NET_BATCH 64 @@ -247,9 +248,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqu static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - int r = atomic_sub_return(1, &ubufs->refcount); + int r; + + rcu_read_lock(); + r = atomic_sub_return(1, &ubufs->refcount); if (unlikely(!r)) wake_up(&ubufs->wait); + rcu_read_unlock(); return r; } @@ -262,7 +267,7 @@ static void vhost_net_ubuf_put_and_wait( static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put_and_wait(ubufs); - kfree(ubufs); + kfree_rcu(ubufs, rcu); } static void vhost_net_clear_ubuf_info(struct vhost_net *n)
On Sat, Jul 19, 2025 at 07:03:23AM +0800, Hillf Danton wrote: > On Fri, 18 Jul 2025 14:03:55 +0300 Nikolay Kuratov wrote: > > When operating on struct vhost_net_ubuf_ref, the following execution > > sequence is theoretically possible: > > CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND > > // &ubufs->refcount == 2 > > vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs) > > vhost_net_ubuf_put_and_wait() > > vhost_net_ubuf_put() > > int r = atomic_sub_return(1, &ubufs->refcount); > > // r = 1 > > int r = atomic_sub_return(1, &ubufs->refcount); > > // r = 0 > > wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > > // no wait occurs here because condition is already true > > kfree(ubufs); > > if (unlikely(!r)) > > wake_up(&ubufs->wait); // use-after-free > > > > This leads to use-after-free on ubufs access. This happens because CPU1 > > skips waiting for wake_up() when refcount is already zero. > > > > To prevent that use a completion instead of wait_queue as the ubufs > > notification mechanism. wait_for_completion() guarantees that there will > > be complete() call prior to its return. > > > Alternatively rcu helps. > > --- x/drivers/vhost/net.c > +++ y/drivers/vhost/net.c > @@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref { > atomic_t refcount; > wait_queue_head_t wait; > struct vhost_virtqueue *vq; > + struct rcu_head rcu; > }; > > #define VHOST_NET_BATCH 64 > @@ -247,9 +248,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqu > > static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > { > - int r = atomic_sub_return(1, &ubufs->refcount); > + int r; > + > + rcu_read_lock(); > + r = atomic_sub_return(1, &ubufs->refcount); > if (unlikely(!r)) > wake_up(&ubufs->wait); > + rcu_read_unlock(); > return r; > } > > @@ -262,7 +267,7 @@ static void vhost_net_ubuf_put_and_wait( > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > { > vhost_net_ubuf_put_and_wait(ubufs); > - kfree(ubufs); > + kfree_rcu(ubufs, rcu); > } > > static void vhost_net_clear_ubuf_info(struct vhost_net *n) I like that. -- MST
Tested this patch's V2 with the virtio-net regression test, everything works fine. Tested-by: Lei Yang <leiyang@redhat.com> On Mon, Jul 21, 2025 at 12:13 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Sat, Jul 19, 2025 at 07:03:23AM +0800, Hillf Danton wrote: > > On Fri, 18 Jul 2025 14:03:55 +0300 Nikolay Kuratov wrote: > > > When operating on struct vhost_net_ubuf_ref, the following execution > > > sequence is theoretically possible: > > > CPU0 is finalizing DMA operation CPU1 is doing VHOST_NET_SET_BACKEND > > > // &ubufs->refcount == 2 > > > vhost_net_ubuf_put() vhost_net_ubuf_put_wait_and_free(oldubufs) > > > vhost_net_ubuf_put_and_wait() > > > vhost_net_ubuf_put() > > > int r = atomic_sub_return(1, &ubufs->refcount); > > > // r = 1 > > > int r = atomic_sub_return(1, &ubufs->refcount); > > > // r = 0 > > > wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > > > // no wait occurs here because condition is already true > > > kfree(ubufs); > > > if (unlikely(!r)) > > > wake_up(&ubufs->wait); // use-after-free > > > > > > This leads to use-after-free on ubufs access. This happens because CPU1 > > > skips waiting for wake_up() when refcount is already zero. > > > > > > To prevent that use a completion instead of wait_queue as the ubufs > > > notification mechanism. wait_for_completion() guarantees that there will > > > be complete() call prior to its return. > > > > > Alternatively rcu helps. > > > > --- x/drivers/vhost/net.c > > +++ y/drivers/vhost/net.c > > @@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref { > > atomic_t refcount; > > wait_queue_head_t wait; > > struct vhost_virtqueue *vq; > > + struct rcu_head rcu; > > }; > > > > #define VHOST_NET_BATCH 64 > > @@ -247,9 +248,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqu > > > > static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > > { > > - int r = atomic_sub_return(1, &ubufs->refcount); > > + int r; > > + > > + rcu_read_lock(); > > + r = atomic_sub_return(1, &ubufs->refcount); > > if (unlikely(!r)) > > wake_up(&ubufs->wait); > > + rcu_read_unlock(); > > return r; > > } > > > > @@ -262,7 +267,7 @@ static void vhost_net_ubuf_put_and_wait( > > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > > { > > vhost_net_ubuf_put_and_wait(ubufs); > > - kfree(ubufs); > > + kfree_rcu(ubufs, rcu); > > } > > > > static void vhost_net_clear_ubuf_info(struct vhost_net *n) > > I like that. > > -- > MST >
On Fri, 18 Jul 2025 14:03:55 +0300 Nikolay Kuratov wrote: > > drivers/vhost/net.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index 7cbfc7d718b3..69e1bfb9627e 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref { > * >1: outstanding ubufs > */ > atomic_t refcount; > - wait_queue_head_t wait; > + struct completion wait; > struct vhost_virtqueue *vq; > }; > > @@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) > if (!ubufs) > return ERR_PTR(-ENOMEM); > atomic_set(&ubufs->refcount, 1); > - init_waitqueue_head(&ubufs->wait); > + init_completion(&ubufs->wait); > ubufs->vq = vq; > return ubufs; > } > @@ -249,14 +249,14 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) > { > int r = atomic_sub_return(1, &ubufs->refcount); > if (unlikely(!r)) > - wake_up(&ubufs->wait); > + complete_all(&ubufs->wait); > return r; > } > > static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) > { > vhost_net_ubuf_put(ubufs); > - wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); > + wait_for_completion(&ubufs->wait); > } > > static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) > @@ -1381,6 +1381,7 @@ static void vhost_net_flush(struct vhost_net *n) > mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); > n->tx_flush = false; > atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); > + reinit_completion(&n->vqs[VHOST_NET_VQ_TX].ubufs->wait); > mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); > } > } > -- > 2.34.1 > In the sequence below, vhost_net_flush() vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); wait_for_completion(&ubufs->wait); reinit_completion(&n->vqs[VHOST_NET_VQ_TX].ubufs->wait); reinit after wait, so the chance for missing wakeup still exists.
> reinit after wait, so the chance for missing wakeup still exists. Can you please provide more details on this? Yes, it is reinit after wait, but wait should not be concurrent. I checked multiple code pathes towards vhost_net_flush(), they're all protected by device mutex, except vhost_net_release(). In case of vhost_net_release() - it would be a problem itself if it was called in parallel with some ioctl on a device? Also rationale for this is that put_and_wait() is waiting for zero refcount condition. Zero refcount means that after put_and_wait() calling thread is the only owner of an ubufs structure. If multiple threads got ubufs structure with zero refcount - how either thread can be sure that another one is not free'ing it?
On Fri, 18 Jul 2025 16:24:14 +0300 Nikolay Kuratov wrote: > > reinit after wait, so the chance for missing wakeup still exists. > > Can you please provide more details on this? Yes, it is reinit after wait, The missing wakeup exists if complete_all() is used in combination with reinit after wait, with nothing to do with vhost. Your patch was checked simply because of reinit, which hints the chance for mess in mind without exception. Of course feel free to prove that missing wakeup disappears in vhost even if reinit is deployed. > but wait should not be concurrent. I checked multiple code pathes towards > vhost_net_flush(), they're all protected by device mutex, except > vhost_net_release(). In case of vhost_net_release() - it would be a > problem itself if it was called in parallel with some ioctl on a device? > > Also rationale for this is that put_and_wait() is waiting for zero > refcount condition. Zero refcount means that after put_and_wait() calling > thread is the only owner of an ubufs structure. If multiple threads got > ubufs structure with zero refcount - how either thread can be sure that > another one is not free'ing it? > >
© 2016 - 2025 Red Hat, Inc.