5.0-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jens Axboe axboe@kernel.dk
commit e861857545567adec8da3bdff728efdf7db12285 upstream.
We now wrap sbitmap waitqueues in an active counter, so we can avoid iterating wakeups unless we have waiters there. This works as long as everyone that's manipulating the waitqueues use the proper helpers. For the tag wait case for shared tags, however, we add ourselves to the waitqueue without incrementing/decrementing the ->ws_active count. This means that wakeups can take a long time to happen.
Fix this by manually doing the inc/dec as needed for the wait queue handling.
Reported-by: Michael Leun kbug@newton.leun.net Tested-by: Michael Leun kbug@newton.leun.net Cc: stable@vger.kernel.org Reviewed-by: Omar Sandoval osandov@fb.com Fixes: 5d2ee7122c73 ("sbitmap: optimize wakeup check") Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- block/blk-mq.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)
--- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1076,7 +1076,13 @@ static int blk_mq_dispatch_wake(wait_que hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
spin_lock(&hctx->dispatch_wait_lock); - list_del_init(&wait->entry); + if (!list_empty(&wait->entry)) { + struct sbitmap_queue *sbq; + + list_del_init(&wait->entry); + sbq = &hctx->tags->bitmap_tags; + atomic_dec(&sbq->ws_active); + } spin_unlock(&hctx->dispatch_wait_lock);
blk_mq_run_hw_queue(hctx, true); @@ -1092,6 +1098,7 @@ static int blk_mq_dispatch_wake(wait_que static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { + struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; @@ -1115,7 +1122,7 @@ static bool blk_mq_mark_tag_wait(struct if (!list_empty_careful(&wait->entry)) return false;
- wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait; + wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock); spin_lock(&hctx->dispatch_wait_lock); @@ -1125,6 +1132,7 @@ static bool blk_mq_mark_tag_wait(struct return false; }
+ atomic_inc(&sbq->ws_active); wait->flags &= ~WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq, wait);
@@ -1145,6 +1153,7 @@ static bool blk_mq_mark_tag_wait(struct * someone else gets the wakeup. */ list_del_init(&wait->entry); + atomic_dec(&sbq->ws_active); spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock);