From: Mikulas Patocka mpatocka@redhat.com
commit 5df96f2b9f58a5d2dc1f30fe7de75e197f2c25f2 upstream.
Commit adc0daad366b62ca1bce3e2958a40b0b71a8b8b3 ("dm: report suspended device during destroy") broke integrity recalculation.
The problem is dm_suspended() returns true not only during suspend, but also during resume. So this race condition could occur: 1. dm_integrity_resume calls queue_work(ic->recalc_wq, &ic->recalc_work) 2. integrity_recalc (&ic->recalc_work) preempts the current thread 3. integrity_recalc calls if (unlikely(dm_suspended(ic->ti))) goto unlock_ret; 4. integrity_recalc exits and no recalculating is done.
To fix this race condition, add a function dm_post_suspending that is only true during the postsuspend phase and use it instead of dm_suspended().
Signed-off-by: Mikulas Patocka <mpatocka redhat com> Fixes: adc0daad366b ("dm: report suspended device during destroy") Cc: stable vger kernel org # v4.18+ Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- drivers/md/dm-integrity.c | 4 ++-- drivers/md/dm.c | 17 +++++++++++++++++ include/linux/device-mapper.h | 1 + 3 files changed, 20 insertions(+), 2 deletions(-)
--- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2424,7 +2424,7 @@ static void integrity_writer(struct work unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return;
spin_lock_irq(&ic->endio_wait.lock); @@ -2485,7 +2485,7 @@ static void integrity_recalc(struct work
next_chunk:
- if (unlikely(dm_suspended(ic->ti))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -142,6 +142,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_ #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; @@ -2385,6 +2386,7 @@ static void __dm_destroy(struct mapped_d if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ @@ -2743,7 +2745,9 @@ retry: if (r) goto out_unlock;
+ set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock: mutex_unlock(&md->suspend_lock); @@ -2840,7 +2844,9 @@ static void __dm_internal_suspend(struct (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY);
+ set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); }
static void __dm_internal_resume(struct mapped_device *md) @@ -3001,6 +3007,11 @@ int dm_suspended_md(struct mapped_device return test_bit(DMF_SUSPENDED, &md->flags); }
+static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -3017,6 +3028,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended);
+int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -426,6 +426,7 @@ const char *dm_device_name(struct mapped int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); union map_info *dm_get_rq_mapinfo(struct request *rq);