In the process of making queue management API rtnl_lock-less, we
need a separate lock to protect xa that keeps a global list of bindings.
Also change the ordering of 'posting' binding to
net_devmem_dmabuf_bindings: xa_alloc is done after binding is fully
initialized (so xa_load lookups fully instantiated bindings) and
xa_erase is done as a first step during unbind.
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
---
net/core/devmem.c | 29 ++++++++++++++---------------
1 file changed, 14 insertions(+), 15 deletions(-)
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 7c6e0b5b6acb..c16cdac46bed 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -25,7 +25,7 @@
/* Device memory support */
-/* Protected by rtnl_lock() */
+static DEFINE_MUTEX(net_devmem_bindings_mutex);
static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
static const struct memory_provider_ops dmabuf_devmem_ops;
@@ -119,6 +119,10 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
unsigned long xa_idx;
unsigned int rxq_idx;
+ mutex_lock(&net_devmem_bindings_mutex);
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+ mutex_unlock(&net_devmem_bindings_mutex);
+
if (binding->list.next)
list_del(&binding->list);
@@ -133,8 +137,6 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
}
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
-
net_devmem_dmabuf_binding_put(binding);
}
@@ -220,24 +222,15 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
}
binding->dev = dev;
-
- err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
- binding, xa_limit_32b, &id_alloc_next,
- GFP_KERNEL);
- if (err < 0)
- goto err_free_binding;
-
xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
-
refcount_set(&binding->ref, 1);
-
binding->dmabuf = dmabuf;
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
if (IS_ERR(binding->attachment)) {
err = PTR_ERR(binding->attachment);
NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
- goto err_free_id;
+ goto err_free_binding;
}
binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
@@ -305,6 +298,14 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
virtual += len;
}
+ mutex_lock(&net_devmem_bindings_mutex);
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+ binding, xa_limit_32b, &id_alloc_next,
+ GFP_KERNEL);
+ mutex_unlock(&net_devmem_bindings_mutex);
+ if (err < 0)
+ goto err_free_chunks;
+
return binding;
err_free_chunks:
@@ -316,8 +317,6 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
DMA_FROM_DEVICE);
err_detach:
dma_buf_detach(dmabuf, binding->attachment);
-err_free_id:
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
err_free_binding:
kfree(binding);
err_put_dmabuf:
--
2.48.1
On Fri, 7 Mar 2025 07:57:23 -0800 Stanislav Fomichev wrote: > In the process of making queue management API rtnl_lock-less, we > need a separate lock to protect xa that keeps a global list of bindings. > > Also change the ordering of 'posting' binding to > net_devmem_dmabuf_bindings: xa_alloc is done after binding is fully > initialized (so xa_load lookups fully instantiated bindings) and > xa_erase is done as a first step during unbind. You're just wrapping the calls to xarray here, is there a plan to use this new lock for other things? xarray has a built in spin lock, we don't have to protect it.
On 03/07, Jakub Kicinski wrote: > On Fri, 7 Mar 2025 07:57:23 -0800 Stanislav Fomichev wrote: > > In the process of making queue management API rtnl_lock-less, we > > need a separate lock to protect xa that keeps a global list of bindings. > > > > Also change the ordering of 'posting' binding to > > net_devmem_dmabuf_bindings: xa_alloc is done after binding is fully > > initialized (so xa_load lookups fully instantiated bindings) and > > xa_erase is done as a first step during unbind. > > You're just wrapping the calls to xarray here, is there a plan to use > this new lock for other things? xarray has a built in spin lock, we > don't have to protect it. Oh, that is true, I completely missed that. In this case I can drop this patch, thanks!
© 2016 - 2026 Red Hat, Inc.