From: Bobby Eshleman <bobbyeshleman@meta.com>
Add the ability to isolate vsock flows using namespaces.
The VM, via the vhost_vsock struct, inherits its namespace from the
process that opens the vhost-vsock device. vhost_vsock lookup functions
are modified to take into account the mode (e.g., if CIDs are matching
but modes don't align, then return NULL).
vhost_vsock now acquires a reference to the namespace.
Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
---
Changes in v5:
- respect pid namespaces when assigning namespace to vhost_vsock
---
drivers/vhost/vsock.c | 74 +++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 66 insertions(+), 8 deletions(-)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 34adf0cf9124..1aabe9f85503 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -46,6 +46,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
+ struct net *net;
+ netns_tracker ns_tracker;
+
+ /* The ns mode at the time vhost_vsock was created */
+ enum vsock_net_mode orig_net_mode;
/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
struct hlist_node hash;
@@ -64,10 +69,40 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
+/* Return true if the namespace net can access the vhost_vsock vsock.
+ * Otherwise, return false.
+ *
+ * If the netns is the same, it doesn't matter if it is local or global. The
+ * vsock sockets within a namespace can always communicate.
+ *
+ * If the netns is different, then we need to check if the current namespace
+ * mode is global and if the namespace mode at the time of the vhost_vsock
+ * being created is global. If so, then we allow it. By checking the namespace
+ * mode at the time of the vhost_vsock's creation we allow the flow to continue
+ * working even if the namespace mode changes to "local" in the middle of a
+ * socket's lifetime. If we used the current namespace mode instead, then any
+ * socket that was alive prior to the mode change would suddenly fail.
+ */
+static bool vhost_vsock_net_check_mode(struct net *net,
+ struct vhost_vsock *vsock,
+ bool check_global)
+{
+ if (net_eq(net, vsock->net))
+ return true;
+
+ return check_global &&
+ (vsock_net_mode(net) == VSOCK_NET_MODE_GLOBAL &&
+ vsock->orig_net_mode == VSOCK_NET_MODE_GLOBAL);
+}
+
/* Callers that dereference the return value must hold vhost_vsock_mutex or the
* RCU read lock.
+ *
+ * If check_global is true, evaluate the vhost_vsock namespace and namespace
+ * net argument as matching if they are both in global mode.
*/
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid, struct net *net,
+ bool check_global)
{
struct vhost_vsock *vsock;
@@ -78,9 +113,9 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
if (other_cid == 0)
continue;
- if (other_cid == guest_cid)
+ if (other_cid == guest_cid &&
+ vhost_vsock_net_check_mode(net, vsock, check_global))
return vsock;
-
}
return NULL;
@@ -272,13 +307,14 @@ static int
vhost_transport_send_pkt(struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct net *net = virtio_vsock_skb_net(skb);
struct vhost_vsock *vsock;
int len = skb->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid), net, true);
if (!vsock) {
rcu_read_unlock();
kfree_skb(skb);
@@ -305,7 +341,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk_vsock(vsk)), true);
if (!vsock)
goto out;
@@ -462,11 +498,12 @@ static struct virtio_transport vhost_transport = {
static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk, u32 remote_cid)
{
+ struct net *net = sock_net(sk_vsock(vsk));
struct vhost_vsock *vsock;
bool seqpacket_allow = false;
rcu_read_lock();
- vsock = vhost_vsock_get(remote_cid);
+ vsock = vhost_vsock_get(remote_cid, net, true);
if (vsock)
seqpacket_allow = vsock->seqpacket_allow;
@@ -526,6 +563,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
continue;
}
+ virtio_vsock_skb_set_net(skb, vsock->net);
+ virtio_vsock_skb_set_orig_net_mode(skb, vsock->orig_net_mode);
total_len += sizeof(*hdr) + skb->len;
/* Deliver to monitoring devices all received packets */
@@ -652,10 +691,14 @@ static void vhost_vsock_free(struct vhost_vsock *vsock)
static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
{
+
struct vhost_virtqueue **vqs;
struct vhost_vsock *vsock;
+ struct net *net;
int ret;
+ net = current->nsproxy->net_ns;
+
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
@@ -669,6 +712,12 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ vsock->net = get_net_track(net, &vsock->ns_tracker, GFP_KERNEL);
+
+ /* Cache the mode of the namespace so that if that netns mode changes,
+ * the vhost_vsock will continue to function as expected. */
+ vsock->orig_net_mode = vsock_net_mode(net);
+
vsock->guest_cid = 0; /* no CID assigned yet */
vsock->seqpacket_allow = false;
@@ -707,8 +756,16 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
+ /* DELETE ME:
+ *
+ * for each connected socket:
+ * vhost_vsock = vsock_sk(sk)
+ *
+ * find the peer
+ */
+
/* If the peer is still valid, no need to reset connection */
- if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk), false))
return;
/* If the close timeout is pending, let it expire. This avoids races
@@ -753,6 +810,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
+ put_net_track(vsock->net, &vsock->ns_tracker);
kfree(vsock->dev.vqs);
vhost_vsock_free(vsock);
return 0;
@@ -779,7 +837,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
- other = vhost_vsock_get(guest_cid);
+ other = vhost_vsock_get(guest_cid, vsock->net, true);
if (other && other != vsock) {
mutex_unlock(&vhost_vsock_mutex);
return -EADDRINUSE;
--
2.47.3
On Tue, Sep 16, 2025 at 04:43:50PM -0700, Bobby Eshleman wrote: >From: Bobby Eshleman <bobbyeshleman@meta.com> > >Add the ability to isolate vsock flows using namespaces. > >The VM, via the vhost_vsock struct, inherits its namespace from the >process that opens the vhost-vsock device. vhost_vsock lookup functions >are modified to take into account the mode (e.g., if CIDs are matching >but modes don't align, then return NULL). > >vhost_vsock now acquires a reference to the namespace. > >Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com> > >--- >Changes in v5: >- respect pid namespaces when assigning namespace to vhost_vsock >--- > drivers/vhost/vsock.c | 74 +++++++++++++++++++++++++++++++++++++++++++++------ > 1 file changed, 66 insertions(+), 8 deletions(-) > >diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c >index 34adf0cf9124..1aabe9f85503 100644 >--- a/drivers/vhost/vsock.c >+++ b/drivers/vhost/vsock.c >@@ -46,6 +46,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); > struct vhost_vsock { > struct vhost_dev dev; > struct vhost_virtqueue vqs[2]; >+ struct net *net; >+ netns_tracker ns_tracker; >+ >+ /* The ns mode at the time vhost_vsock was created */ >+ enum vsock_net_mode orig_net_mode; > > /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */ > struct hlist_node hash; >@@ -64,10 +69,40 @@ static u32 vhost_transport_get_local_cid(void) > return VHOST_VSOCK_DEFAULT_HOST_CID; > } > >+/* Return true if the namespace net can access the vhost_vsock vsock. >+ * Otherwise, return false. >+ * >+ * If the netns is the same, it doesn't matter if it is local or global. The >+ * vsock sockets within a namespace can always communicate. >+ * >+ * If the netns is different, then we need to check if the current namespace >+ * mode is global and if the namespace mode at the time of the vhost_vsock >+ * being created is global. If so, then we allow it. By checking the namespace >+ * mode at the time of the vhost_vsock's creation we allow the flow to continue >+ * working even if the namespace mode changes to "local" in the middle of a >+ * socket's lifetime. If we used the current namespace mode instead, then any >+ * socket that was alive prior to the mode change would suddenly fail. >+ */ >+static bool vhost_vsock_net_check_mode(struct net *net, >+ struct vhost_vsock *vsock, >+ bool check_global) >+{ >+ if (net_eq(net, vsock->net)) >+ return true; >+ >+ return check_global && >+ (vsock_net_mode(net) == VSOCK_NET_MODE_GLOBAL && >+ vsock->orig_net_mode == VSOCK_NET_MODE_GLOBAL); >+} >+ > /* Callers that dereference the return value must hold vhost_vsock_mutex or the > * RCU read lock. >+ * >+ * If check_global is true, evaluate the vhost_vsock namespace and namespace >+ * net argument as matching if they are both in global mode. > */ >-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) >+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid, struct net *net, >+ bool check_global) > { > struct vhost_vsock *vsock; > >@@ -78,9 +113,9 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) > if (other_cid == 0) > continue; > >- if (other_cid == guest_cid) >+ if (other_cid == guest_cid && >+ vhost_vsock_net_check_mode(net, vsock, check_global)) > return vsock; >- > } > > return NULL; >@@ -272,13 +307,14 @@ static int > vhost_transport_send_pkt(struct sk_buff *skb) > { > struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); >+ struct net *net = virtio_vsock_skb_net(skb); > struct vhost_vsock *vsock; > int len = skb->len; > > rcu_read_lock(); > > /* Find the vhost_vsock according to guest context id */ >- vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid)); >+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid), net, true); > if (!vsock) { > rcu_read_unlock(); > kfree_skb(skb); >@@ -305,7 +341,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) > rcu_read_lock(); > > /* Find the vhost_vsock according to guest context id */ >- vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); >+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk_vsock(vsk)), true); > if (!vsock) > goto out; > >@@ -462,11 +498,12 @@ static struct virtio_transport vhost_transport = { > > static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk, u32 remote_cid) > { >+ struct net *net = sock_net(sk_vsock(vsk)); > struct vhost_vsock *vsock; > bool seqpacket_allow = false; > > rcu_read_lock(); >- vsock = vhost_vsock_get(remote_cid); >+ vsock = vhost_vsock_get(remote_cid, net, true); > > if (vsock) > seqpacket_allow = vsock->seqpacket_allow; >@@ -526,6 +563,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) > continue; > } > >+ virtio_vsock_skb_set_net(skb, vsock->net); >+ virtio_vsock_skb_set_orig_net_mode(skb, vsock->orig_net_mode); In virtio_transport_common.c we do this in the alloc_skb function, can we do the same also here? And maybe also in the virtio_transport.c (i.e. in virtio_vsock_rx_fill() or adding a wrapper around virtio_vsock_alloc_linear_skb()). > total_len += sizeof(*hdr) + skb->len; > > /* Deliver to monitoring devices all received packets */ >@@ -652,10 +691,14 @@ static void vhost_vsock_free(struct vhost_vsock *vsock) > > static int vhost_vsock_dev_open(struct inode *inode, struct file *file) > { >+ > struct vhost_virtqueue **vqs; > struct vhost_vsock *vsock; >+ struct net *net; > int ret; > >+ net = current->nsproxy->net_ns; >+ > /* This struct is large and allocation could fail, fall back to vmalloc > * if there is no other way. > */ >@@ -669,6 +712,12 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) > goto out; > } > >+ vsock->net = get_net_track(net, &vsock->ns_tracker, GFP_KERNEL); >+ >+ /* Cache the mode of the namespace so that if that netns mode changes, >+ * the vhost_vsock will continue to function as expected. */ >+ vsock->orig_net_mode = vsock_net_mode(net); >+ > vsock->guest_cid = 0; /* no CID assigned yet */ > vsock->seqpacket_allow = false; > >@@ -707,8 +756,16 @@ static void vhost_vsock_reset_orphans(struct sock *sk) > * executing. > */ > >+ /* DELETE ME: mmm, to be deleted, right? :-) >+ * >+ * for each connected socket: >+ * vhost_vsock = vsock_sk(sk) >+ * >+ * find the peer >+ */ >+ > /* If the peer is still valid, no need to reset connection */ >- if (vhost_vsock_get(vsk->remote_addr.svm_cid)) >+ if (vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk), false)) Can we add a comment here to explain why `check_global` is false? Thanks, Stefano > return; > > /* If the close timeout is pending, let it expire. This avoids races >@@ -753,6 +810,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) > virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); > > vhost_dev_cleanup(&vsock->dev); >+ put_net_track(vsock->net, &vsock->ns_tracker); > kfree(vsock->dev.vqs); > vhost_vsock_free(vsock); > return 0; >@@ -779,7 +837,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) > > /* Refuse if CID is already in use */ > mutex_lock(&vhost_vsock_mutex); >- other = vhost_vsock_get(guest_cid); >+ other = vhost_vsock_get(guest_cid, vsock->net, true); > if (other && other != vsock) { > mutex_unlock(&vhost_vsock_mutex); > return -EADDRINUSE; > >-- >2.47.3 >
© 2016 - 2025 Red Hat, Inc.