The patch below does not apply to the 5.9-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From e1777d099728a76a8f8090f89649aac961e7e530 Mon Sep 17 00:00:00 2001
From: Damien Le Moal damien.lemoal@wdc.com Date: Fri, 6 Nov 2020 20:01:41 +0900 Subject: [PATCH] null_blk: Fix scheduling in atomic with zoned mode
Commit aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") changed zone locking to using the potentially sleeping wait_on_bit_io() function. This is acceptable when memory backing is enabled as the device queue is in that case marked as blocking, but this triggers a scheduling while in atomic context with memory backing disabled.
Fix this by relying solely on the device zone spinlock for zone information protection without temporarily releasing this lock around null_process_cmd() execution in null_zone_write(). This is OK to do since when memory backing is disabled, command processing does not block and the memory backing lock nullb->lock is unused. This solution avoids the overhead of having to mark a zoned null_blk device queue as blocking when memory backing is unused.
This patch also adds comments to the zone locking code to explain the unusual locking scheme.
Fixes: aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") Reported-by: kernel test robot lkp@intel.com Signed-off-by: Damien Le Moal damien.lemoal@wdc.com Reviewed-by: Christoph Hellwig hch@lst.de Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index cfd00ad40355..c24d9b5ad81a 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -47,7 +47,7 @@ struct nullb_device { unsigned int nr_zones_closed; struct blk_zone *zones; sector_t zone_size_sects; - spinlock_t zone_dev_lock; + spinlock_t zone_lock; unsigned long *zone_locks;
unsigned long size; /* device size in MB */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 8775acbb4f8f..beb34b4f76b0 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -46,11 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM;
- spin_lock_init(&dev->zone_dev_lock); - dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); - if (!dev->zone_locks) { - kvfree(dev->zones); - return -ENOMEM; + /* + * With memory backing, the zone_lock spinlock needs to be temporarily + * released to avoid scheduling in atomic context. To guarantee zone + * information protection, use a bitmap to lock zones with + * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing + * implies that the queue is marked with BLK_MQ_F_BLOCKING. + */ + spin_lock_init(&dev->zone_lock); + if (dev->memory_backed) { + dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); + if (!dev->zone_locks) { + kvfree(dev->zones); + return -ENOMEM; + } }
if (dev->zone_nr_conv >= dev->nr_zones) { @@ -137,12 +146,17 @@ void null_free_zoned_dev(struct nullb_device *dev)
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) { - wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + if (dev->memory_backed) + wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + spin_lock_irq(&dev->zone_lock); }
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) { - clear_and_wake_up_bit(zno, dev->zone_locks); + spin_unlock_irq(&dev->zone_lock); + + if (dev->memory_backed) + clear_and_wake_up_bit(zno, dev->zone_locks); }
int null_report_zones(struct gendisk *disk, sector_t sector, @@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
null_lock_zone(dev, zno); - spin_lock(&dev->zone_dev_lock);
switch (zone->cond) { case BLK_ZONE_COND_FULL: @@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN;
- spin_unlock(&dev->zone_dev_lock); + /* + * Memory backing allocation may sleep: release the zone_lock spinlock + * to avoid scheduling in atomic context. Zone operation atomicity is + * still guaranteed through the zone_locks bitmap. + */ + if (dev->memory_backed) + spin_unlock_irq(&dev->zone_lock); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); - spin_lock(&dev->zone_dev_lock); + if (dev->memory_backed) + spin_lock_irq(&dev->zone_lock); + if (ret != BLK_STS_OK) goto unlock;
@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ret = BLK_STS_OK;
unlock: - spin_unlock(&dev->zone_dev_lock); null_unlock_zone(dev, zno);
return ret; @@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, null_lock_zone(dev, i); zone = &dev->zones[i]; if (zone->cond != BLK_ZONE_COND_EMPTY) { - spin_lock(&dev->zone_dev_lock); null_reset_zone(dev, zone); - spin_unlock(&dev->zone_dev_lock); trace_nullb_zone_op(cmd, i, zone->cond); } null_unlock_zone(dev, i); @@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, zone = &dev->zones[zone_no];
null_lock_zone(dev, zone_no); - spin_lock(&dev->zone_dev_lock);
switch (op) { case REQ_OP_ZONE_RESET: @@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, break; }
- spin_unlock(&dev->zone_dev_lock); - if (ret == BLK_STS_OK) trace_nullb_zone_op(cmd, zone_no, zone->cond);
commit e1777d099728a76a8f8090f89649aac961e7e530 upstream.
Commit aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") changed zone locking to using the potentially sleeping wait_on_bit_io() function. This is acceptable when memory backing is enabled as the device queue is in that case marked as blocking, but this triggers a scheduling while in atomic context with memory backing disabled.
Fix this by relying solely on the device zone spinlock for zone information protection without temporarily releasing this lock around null_process_cmd() execution in null_zone_write(). This is OK to do since when memory backing is disabled, command processing does not block and the memory backing lock nullb->lock is unused. This solution avoids the overhead of having to mark a zoned null_blk device queue as blocking when memory backing is unused.
This patch also adds comments to the zone locking code to explain the unusual locking scheme.
Fixes: aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") Reported-by: kernel test robot lkp@intel.com Signed-off-by: Damien Le Moal damien.lemoal@wdc.com Reviewed-by: Christoph Hellwig hch@lst.de Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk --- drivers/block/null_blk.h | 1 + drivers/block/null_blk_zoned.c | 31 +++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index 206309ecc7e4..7562cd6cd681 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -44,6 +44,7 @@ struct nullb_device { unsigned int nr_zones; struct blk_zone *zones; sector_t zone_size_sects; + spinlock_t zone_lock; unsigned long *zone_locks;
unsigned long size; /* device size in MB */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 495713d6c989..d9102327357c 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -46,10 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM;
- dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); - if (!dev->zone_locks) { - kvfree(dev->zones); - return -ENOMEM; + /* + * With memory backing, the zone_lock spinlock needs to be temporarily + * released to avoid scheduling in atomic context. To guarantee zone + * information protection, use a bitmap to lock zones with + * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing + * implies that the queue is marked with BLK_MQ_F_BLOCKING. + */ + spin_lock_init(&dev->zone_lock); + if (dev->memory_backed) { + dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); + if (!dev->zone_locks) { + kvfree(dev->zones); + return -ENOMEM; + } }
if (dev->zone_nr_conv >= dev->nr_zones) { @@ -118,12 +128,16 @@ void null_free_zoned_dev(struct nullb_device *dev)
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) { - wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + if (dev->memory_backed) + wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + spin_lock_irq(&dev->zone_lock); }
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) { - clear_and_wake_up_bit(zno, dev->zone_locks); + spin_unlock_irq(&dev->zone_lock); + if (dev->memory_backed) + clear_and_wake_up_bit(zno, dev->zone_locks); }
int null_report_zones(struct gendisk *disk, sector_t sector, @@ -233,7 +247,12 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ if (dev->memory_backed) + spin_unlock_irq(&dev->zone_lock); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + if (dev->memory_backed) + spin_lock_irq(&dev->zone_lock); + if (ret != BLK_STS_OK) break;
On Tue, Nov 10, 2020 at 04:36:05PM +0900, Damien Le Moal wrote:
commit e1777d099728a76a8f8090f89649aac961e7e530 upstream.
Commit aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") changed zone locking to using the potentially sleeping wait_on_bit_io() function. This is acceptable when memory backing is enabled as the device queue is in that case marked as blocking, but this triggers a scheduling while in atomic context with memory backing disabled.
Fix this by relying solely on the device zone spinlock for zone information protection without temporarily releasing this lock around null_process_cmd() execution in null_zone_write(). This is OK to do since when memory backing is disabled, command processing does not block and the memory backing lock nullb->lock is unused. This solution avoids the overhead of having to mark a zoned null_blk device queue as blocking when memory backing is unused.
This patch also adds comments to the zone locking code to explain the unusual locking scheme.
Fixes: aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") Reported-by: kernel test robot lkp@intel.com Signed-off-by: Damien Le Moal damien.lemoal@wdc.com Reviewed-by: Christoph Hellwig hch@lst.de Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk
drivers/block/null_blk.h | 1 + drivers/block/null_blk_zoned.c | 31 +++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-)
Now queued up, thanks.
greg k-h
On Mon, 2020-11-09 at 11:25 +0100, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.9-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
Greg,
I just sent a backported patch. Tested it on top of current 5.9.6 with blktests (block and zbd groups) and zonefs tests.
Thanks.
------------------ original commit in Linus's tree ------------------
From e1777d099728a76a8f8090f89649aac961e7e530 Mon Sep 17 00:00:00 2001 From: Damien Le Moal damien.lemoal@wdc.com Date: Fri, 6 Nov 2020 20:01:41 +0900 Subject: [PATCH] null_blk: Fix scheduling in atomic with zoned mode
Commit aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") changed zone locking to using the potentially sleeping wait_on_bit_io() function. This is acceptable when memory backing is enabled as the device queue is in that case marked as blocking, but this triggers a scheduling while in atomic context with memory backing disabled.
Fix this by relying solely on the device zone spinlock for zone information protection without temporarily releasing this lock around null_process_cmd() execution in null_zone_write(). This is OK to do since when memory backing is disabled, command processing does not block and the memory backing lock nullb->lock is unused. This solution avoids the overhead of having to mark a zoned null_blk device queue as blocking when memory backing is unused.
This patch also adds comments to the zone locking code to explain the unusual locking scheme.
Fixes: aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") Reported-by: kernel test robot lkp@intel.com Signed-off-by: Damien Le Moal damien.lemoal@wdc.com Reviewed-by: Christoph Hellwig hch@lst.de Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index cfd00ad40355..c24d9b5ad81a 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -47,7 +47,7 @@ struct nullb_device { unsigned int nr_zones_closed; struct blk_zone *zones; sector_t zone_size_sects;
- spinlock_t zone_dev_lock;
- spinlock_t zone_lock; unsigned long *zone_locks;
unsigned long size; /* device size in MB */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 8775acbb4f8f..beb34b4f76b0 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -46,11 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM;
- spin_lock_init(&dev->zone_dev_lock);
- dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
- if (!dev->zone_locks) {
kvfree(dev->zones);
return -ENOMEM;
- /*
* With memory backing, the zone_lock spinlock needs to be temporarily
* released to avoid scheduling in atomic context. To guarantee zone
* information protection, use a bitmap to lock zones with
* wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
* implies that the queue is marked with BLK_MQ_F_BLOCKING.
*/
- spin_lock_init(&dev->zone_lock);
- if (dev->memory_backed) {
dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
if (!dev->zone_locks) {
kvfree(dev->zones);
return -ENOMEM;
}}
if (dev->zone_nr_conv >= dev->nr_zones) { @@ -137,12 +146,17 @@ void null_free_zoned_dev(struct nullb_device *dev) static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) {
- wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
- if (dev->memory_backed)
wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
- spin_lock_irq(&dev->zone_lock);
} static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) {
- clear_and_wake_up_bit(zno, dev->zone_locks);
- spin_unlock_irq(&dev->zone_lock);
- if (dev->memory_backed)
clear_and_wake_up_bit(zno, dev->zone_locks);
} int null_report_zones(struct gendisk *disk, sector_t sector, @@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); null_lock_zone(dev, zno);
- spin_lock(&dev->zone_dev_lock);
switch (zone->cond) { case BLK_ZONE_COND_FULL: @@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN;
- spin_unlock(&dev->zone_dev_lock);
- /*
* Memory backing allocation may sleep: release the zone_lock spinlock
* to avoid scheduling in atomic context. Zone operation atomicity is
* still guaranteed through the zone_locks bitmap.
*/
- if (dev->memory_backed)
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);spin_unlock_irq(&dev->zone_lock);
- spin_lock(&dev->zone_dev_lock);
- if (dev->memory_backed)
spin_lock_irq(&dev->zone_lock);
- if (ret != BLK_STS_OK) goto unlock;
@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ret = BLK_STS_OK; unlock:
- spin_unlock(&dev->zone_dev_lock); null_unlock_zone(dev, zno);
return ret; @@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, null_lock_zone(dev, i); zone = &dev->zones[i]; if (zone->cond != BLK_ZONE_COND_EMPTY) {
spin_lock(&dev->zone_dev_lock); null_reset_zone(dev, zone);
spin_unlock(&dev->zone_dev_lock); trace_nullb_zone_op(cmd, i, zone->cond); } null_unlock_zone(dev, i);
@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, zone = &dev->zones[zone_no]; null_lock_zone(dev, zone_no);
- spin_lock(&dev->zone_dev_lock);
switch (op) { case REQ_OP_ZONE_RESET: @@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, break; }
- spin_unlock(&dev->zone_dev_lock);
- if (ret == BLK_STS_OK) trace_nullb_zone_op(cmd, zone_no, zone->cond);
linux-stable-mirror@lists.linaro.org