On Mon, 5 Aug 2024 21:25:16 +0000 Mina Almasry wrote:
+/* Protected by rtnl_lock() */ +static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
nit: global variable declarations before any code
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) +{
- struct netdev_rx_queue *rxq;
- unsigned long xa_idx;
- unsigned int rxq_idx;
- if (binding->list.next)
list_del(&binding->list);
- xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
if (rxq->mp_params.mp_priv == binding) {
rxq->mp_params.mp_priv = NULL;
rxq_idx = get_netdev_rx_queue_index(rxq);
netdev_rx_queue_restart(binding->dev, rxq_idx);
Throw in a WARN_ON() around this, hopefully we'll get to addressing it later..
}
- }
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
- net_devmem_dmabuf_binding_put(binding);
+}
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
struct net_devmem_dmabuf_binding *binding)
+{
- struct netdev_rx_queue *rxq;
- u32 xa_idx;
- int err;
- if (rxq_idx >= dev->real_num_rx_queues)
return -ERANGE;
If we prevent binding to an inactive queue we should also prevent deactivation.
Please take a look at the (two?) callers of ethtool_get_max_rxnfc_channel() and ethtool_get_max_rxfh_channel(). Wrap those into a new function for reading max active channel, and take mp binds into account as well (send the refactor separately from the series to avoid making it longer).
- rxq = __netif_get_rx_queue(dev, rxq_idx);
- if (rxq->mp_params.mp_priv)
return -EEXIST;
- err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
GFP_KERNEL);
- if (err)
return err;
- rxq->mp_params.mp_priv = binding;
- err = netdev_rx_queue_restart(dev, rxq_idx);
- if (err)
goto err_xa_erase;
- return 0;
+err_xa_erase:
- rxq->mp_params.mp_priv = NULL;
- xa_erase(&binding->bound_rxqs, xa_idx);
- return err;
+}
+void dev_dmabuf_uninstall(struct net_device *dev) +{
- unsigned int i, count = dev->num_rx_queues;
nit: why stash the value of num_rx_queues ?
- struct net_devmem_dmabuf_binding *binding;
- struct netdev_rx_queue *rxq;
- unsigned long xa_idx;
- for (i = 0; i < count; i++) {
binding = dev->_rx[i].mp_params.mp_priv;
if (binding)
xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
if (rxq == &dev->_rx[i])
xa_erase(&binding->bound_rxqs, xa_idx);
nit: Please use "continue", this is too deeply indented
- nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
genlmsg_data(info->genlhdr),
genlmsg_len(info->genlhdr), rem) {
err = nla_parse_nested(
tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
netdev_queue_id_nl_policy, info->extack);
if (err < 0)
goto err_unbind;
rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
How do we know this attribute is present? NL_REQ_ATTR_CHECK()
err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding);
if (err)
goto err_unbind;
- }
- list_add(&binding->list, sock_binding_list);
- nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
- genlmsg_end(rsp, hdr);
- rtnl_unlock();
nit: for symmetry you should also unlock after list_add(), netlink msg alloc and prep are before rtnl_lock()
- return genlmsg_reply(rsp, info);
+err_unbind:
- net_devmem_unbind_dmabuf(binding);
+err_unlock:
- rtnl_unlock();
+err_genlmsg_free:
- nlmsg_free(rsp);
- return err;
}
+void netdev_nl_sock_priv_init(struct list_head *priv) +{
- INIT_LIST_HEAD(priv);
+}
+void netdev_nl_sock_priv_destroy(struct list_head *priv) +{
- struct net_devmem_dmabuf_binding *binding;
- struct net_devmem_dmabuf_binding *temp;
- list_for_each_entry_safe(binding, temp, priv, list) {
rtnl_lock();
net_devmem_unbind_dmabuf(binding);
rtnl_unlock();
- }
+}
nit: move these before the subsys_initcall.. and what it calls