From: Yunsheng Lin linyunsheng@huawei.com
[ Upstream commit 368d3cb406cdd074d1df2ad9ec06d1bfcb664882 ]
page_pool_ring_[un]lock() use in_softirq() to decide which spin lock variant to use, and when they are called in the context with in_softirq() being false, spin_lock_bh() is called in page_pool_ring_lock() while spin_unlock() is called in page_pool_ring_unlock(), because spin_lock_bh() has disabled the softirq in page_pool_ring_lock(), which causes inconsistency for spin lock pair calling.
This patch fixes it by returning in_softirq state from page_pool_producer_lock(), and use it to decide which spin lock variant to use in page_pool_producer_unlock().
As pool->ring has both producer and consumer lock, so rename it to page_pool_producer_[un]lock() to reflect the actual usage. Also move them to page_pool.c as they are only used there, and remove the 'inline' as the compiler may have better idea to do inlining or not.
Fixes: 7886244736a4 ("net: page_pool: Add bulk support for ptr_ring") Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Acked-by: Jesper Dangaard Brouer brouer@redhat.com Acked-by: Ilias Apalodimas ilias.apalodimas@linaro.org Link: https://lore.kernel.org/r/20230522031714.5089-1-linyunsheng@huawei.com Signed-off-by: Jakub Kicinski kuba@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/net/page_pool.h | 18 ------------------ net/core/page_pool.c | 28 ++++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 34bf531ffc8d6..ad0bafc877d48 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -383,22 +383,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) page_pool_update_nid(pool, new_nid); }
-static inline void page_pool_ring_lock(struct page_pool *pool) - __acquires(&pool->ring.producer_lock) -{ - if (in_softirq()) - spin_lock(&pool->ring.producer_lock); - else - spin_lock_bh(&pool->ring.producer_lock); -} - -static inline void page_pool_ring_unlock(struct page_pool *pool) - __releases(&pool->ring.producer_lock) -{ - if (in_softirq()) - spin_unlock(&pool->ring.producer_lock); - else - spin_unlock_bh(&pool->ring.producer_lock); -} - #endif /* _NET_PAGE_POOL_H */ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 193c187998650..2396c99bedeaa 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -133,6 +133,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get); #define recycle_stat_add(pool, __stat, val) #endif
+static bool page_pool_producer_lock(struct page_pool *pool) + __acquires(&pool->ring.producer_lock) +{ + bool in_softirq = in_softirq(); + + if (in_softirq) + spin_lock(&pool->ring.producer_lock); + else + spin_lock_bh(&pool->ring.producer_lock); + + return in_softirq; +} + +static void page_pool_producer_unlock(struct page_pool *pool, + bool in_softirq) + __releases(&pool->ring.producer_lock) +{ + if (in_softirq) + spin_unlock(&pool->ring.producer_lock); + else + spin_unlock_bh(&pool->ring.producer_lock); +} + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -615,6 +638,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) { int i, bulk_len = 0; + bool in_softirq;
for (i = 0; i < count; i++) { struct page *page = virt_to_head_page(data[i]); @@ -633,7 +657,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, return;
/* Bulk producer into ptr_ring page_pool cache */ - page_pool_ring_lock(pool); + in_softirq = page_pool_producer_lock(pool); for (i = 0; i < bulk_len; i++) { if (__ptr_ring_produce(&pool->ring, data[i])) { /* ring full */ @@ -642,7 +666,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, } } recycle_stat_add(pool, ring, i); - page_pool_ring_unlock(pool); + page_pool_producer_unlock(pool, in_softirq);
/* Hopefully all pages was return into ptr_ring */ if (likely(i == bulk_len))