5.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Eric Dumazet edumazet@google.com
[ Upstream commit 1ddabdfaf70c202b88925edd74c66f4707dbd92e ]
Some callers want to know if the packet has been sent or dropped, to inform upper stacks.
Signed-off-by: Eric Dumazet edumazet@google.com Signed-off-by: David S. Miller davem@davemloft.net Stable-dep-of: 505ead7ab77f ("netpoll: hold rcu read lock in __netpoll_send_skb()") Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/netpoll.h | 2 +- net/core/netpoll.c | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 2db513437d2c0..f8f5270a891d0 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -63,7 +63,7 @@ int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5eefbb2e145a4..408fc07007900 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -323,7 +323,7 @@ static int netpoll_owner_active(struct net_device *dev) }
/* call with IRQ disabled */ -static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; struct net_device *dev; @@ -338,7 +338,7 @@ static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return; + return NET_XMIT_DROP; }
/* don't get messages out of order, and no recursion */ @@ -377,15 +377,18 @@ static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } + return NETDEV_TX_OK; }
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { unsigned long flags; + netdev_tx_t ret;
local_irq_save(flags); - __netpoll_send_skb(np, skb); + ret = __netpoll_send_skb(np, skb); local_irq_restore(flags); + return ret; } EXPORT_SYMBOL(netpoll_send_skb);