Allow memory providers to configure rx queues with a specific receive buffer length. Pass it in struct pp_memory_provider_params, which is copied into the queue, so it's preserved across queue restarts. It's an opt-in feature for drivers, which they can enable by setting NDO_QUEUE_RX_BUF_SIZE to their struct netdev_queue_mgmt_ops.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com --- include/net/netdev_queues.h | 9 +++++++++ include/net/page_pool/types.h | 1 + net/core/netdev_rx_queue.c | 4 ++++ 3 files changed, 14 insertions(+)
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index cd00e0406cf4..2e6bcec1e1e3 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -111,6 +111,11 @@ void netdev_stat_queue_sum(struct net_device *netdev, int tx_start, int tx_end, struct netdev_queue_stats_tx *tx_sum);
+enum { + /* queue restart support custom rx buffer sizes */ + NDO_QUEUE_RX_BUF_SIZE = 0x1, +}; + /** * struct netdev_queue_mgmt_ops - netdev ops for queue management * @@ -130,6 +135,8 @@ void netdev_stat_queue_sum(struct net_device *netdev, * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used * for this queue. Return NULL on error. * + * @supported_params: bitmask of supported features, see NDO_QUEUE_* + * * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only * be called for an interface which is open. @@ -149,6 +156,8 @@ struct netdev_queue_mgmt_ops { int idx); struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev, int idx); + + unsigned supported_params; };
bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx); diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 1509a536cb85..be74e4aec7b5 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -161,6 +161,7 @@ struct memory_provider_ops; struct pp_memory_provider_params { void *mp_priv; const struct memory_provider_ops *mp_ops; + u32 rx_buf_len; };
struct page_pool { diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index a0083f176a9c..09d6f97e910e 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -29,6 +29,10 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start) return -EOPNOTSUPP;
+ if (!(qops->supported_params & NDO_QUEUE_RX_BUF_SIZE) && + rxq->mp_params.rx_buf_len) + return -EOPNOTSUPP; + netdev_assert_locked(dev);
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);