From: Ionut Nechita ionut.nechita@windriver.com
Commit 58bf93580fec ("blk-mq: move cpuhp callback registering out of q->sysfs_lock") introduced a global mutex blk_mq_cpuhp_lock to avoid lockdep warnings between sysfs_lock and CPU hotplug lock.
On RT kernels (CONFIG_PREEMPT_RT), regular mutexes are converted to rt_mutex (sleeping locks). When block layer operations need to acquire blk_mq_cpuhp_lock, IRQ threads processing I/O completions may sleep, causing additional contention on top of the queue_lock issue from commit 679b1874eba7 ("block: fix ordering between checking QUEUE_FLAG_QUIESCED request adding").
Test case (MegaRAID 12GSAS with 8 MSI-X vectors on RT kernel): - v6.6.68-rt with queue_lock fix: 640 MB/s (queue_lock fixed) - v6.6.69-rt: still exhibits contention due to cpuhp_lock mutex
The functions protected by blk_mq_cpuhp_lock only perform fast, non-sleeping operations: - hlist_unhashed() checks - cpuhp_state_add_instance_nocalls() - just hlist manipulation - cpuhp_state_remove_instance_nocalls() - just hlist manipulation - INIT_HLIST_NODE() initialization
The _nocalls variants do not invoke state callbacks and only manipulate data structures, making them safe to call under raw_spinlock.
Convert blk_mq_cpuhp_lock from mutex to raw_spinlock to prevent it from becoming a sleeping lock in RT kernel. This eliminates the contention bottleneck while maintaining the lockdep fix's original intent.
Fixes: 58bf93580fec ("blk-mq: move cpuhp callback registering out of q->sysfs_lock") Cc: stable@vger.kernel.org Signed-off-by: Ionut Nechita ionut.nechita@windriver.com --- block/blk-mq.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index 5fb8da4958d0..3982e24b1081 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -43,7 +43,7 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); -static DEFINE_MUTEX(blk_mq_cpuhp_lock); +static DEFINE_RAW_SPINLOCK(blk_mq_cpuhp_lock);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_request_bypass_insert(struct request *rq, @@ -3641,9 +3641,9 @@ static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) { - mutex_lock(&blk_mq_cpuhp_lock); + raw_spin_lock(&blk_mq_cpuhp_lock); __blk_mq_remove_cpuhp(hctx); - mutex_unlock(&blk_mq_cpuhp_lock); + raw_spin_unlock(&blk_mq_cpuhp_lock); }
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx) @@ -3683,9 +3683,9 @@ static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q) list_splice_init(&q->unused_hctx_list, &hctx_list); spin_unlock(&q->unused_hctx_lock);
- mutex_lock(&blk_mq_cpuhp_lock); + raw_spin_lock(&blk_mq_cpuhp_lock); __blk_mq_remove_cpuhp_list(&hctx_list); - mutex_unlock(&blk_mq_cpuhp_lock); + raw_spin_unlock(&blk_mq_cpuhp_lock);
spin_lock(&q->unused_hctx_lock); list_splice(&hctx_list, &q->unused_hctx_list); @@ -3702,10 +3702,10 @@ static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned long i;
- mutex_lock(&blk_mq_cpuhp_lock); + raw_spin_lock(&blk_mq_cpuhp_lock); queue_for_each_hw_ctx(q, hctx, i) __blk_mq_add_cpuhp(hctx); - mutex_unlock(&blk_mq_cpuhp_lock); + raw_spin_unlock(&blk_mq_cpuhp_lock); }
/*
linux-stable-mirror@lists.linaro.org