From: Peter Oskolkov posk@google.com
[ Upstream commit 385114dec8a49b5e5945e77ba7de6356106713f4 ]
Tested: see the next patch is the series.
Suggested-by: Eric Dumazet edumazet@google.com Signed-off-by: Peter Oskolkov posk@google.com Signed-off-by: Eric Dumazet edumazet@google.com Cc: Florian Westphal fw@strlen.de Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Mao Wenan maowenan@huawei.com --- include/linux/skbuff.h | 2 +- net/core/skbuff.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a490dd7..8bfefdd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2273,7 +2273,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); }
-void skb_rbtree_purge(struct rb_root *root); +unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8a57bba..49f73fb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2380,23 +2380,27 @@ EXPORT_SYMBOL(skb_queue_purge); /** * skb_rbtree_purge - empty a skb rbtree * @root: root of the rbtree to empty + * Return value: the sum of truesizes of all purged skbs. * * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from * the list and one reference dropped. This function does not take * any lock. Synchronization should be handled by the caller (e.g., TCP * out-of-order queue is protected by the socket lock). */ -void skb_rbtree_purge(struct rb_root *root) +unsigned int skb_rbtree_purge(struct rb_root *root) { struct rb_node *p = rb_first(root); + unsigned int sum = 0;
while (p) { struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p = rb_next(p); rb_erase(&skb->rbnode, root); + sum += skb->truesize; kfree_skb(skb); } + return sum; }
/**