On Wed, Nov 19, 2025 at 07:37:10PM -0800, Bobby Eshleman wrote:
...
@@ -292,25 +327,67 @@ net_devmem_bind_dmabuf(struct net_device *dev,
...
- /* Enforce system-wide autorelease mode consistency for RX bindings.
* TX bindings don't use autorelease (always false) since tokens aren't* leaked in TX path. Only RX bindings must all have the same* autorelease mode, never mixed.** We use the xarray's lock to atomically check xa_empty() and toggle* the static key, avoiding the race where two new bindings may try to* set the static key to different states.*/- xa_lock(&net_devmem_dmabuf_bindings);
- if (direction == DMA_FROM_DEVICE) {
if (!xa_empty(&net_devmem_dmabuf_bindings)) {bool mode;mode = static_key_enabled(&tcp_devmem_ar_key);/* When bindings exist, enforce that the mode does not* change.*/if (mode != autorelease) {NL_SET_ERR_MSG_FMT(extack,"System already configured with autorelease=%d",mode);err = -EINVAL;goto err_unlock_xa;}} else {/* First binding sets the mode for all subsequent* bindings.*/if (autorelease)static_branch_enable(&tcp_devmem_ar_key);elsestatic_branch_disable(&tcp_devmem_ar_key);
Hi Bobby,
This code runs inside xa_lock, which is a spinlock.
But static_branch_enable() and static_branch_disable() may sleep due to some combination of taking a mutex and cpu_read_lock.
Flagged by Claude Code with https://github.com/masoncl/review-prompts/
}- }
- err = __xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
binding, xa_limit_32b, &id_alloc_next, if (err < 0)GFP_KERNEL);
goto err_free_chunks;
goto err_unlock_xa;- xa_unlock(&net_devmem_dmabuf_bindings);
list_add(&binding->list, &priv->bindings); return binding; +err_unlock_xa:
- xa_unlock(&net_devmem_dmabuf_bindings);
...