The patch below does not apply to the 5.4-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From a4c8dd9c2d0987cf542a2a0c42684c9c6d78a04e Mon Sep 17 00:00:00 2001
From: Jeffle Xu jefflexu@linux.alibaba.com Date: Tue, 2 Feb 2021 11:35:28 +0800 Subject: [PATCH] dm table: fix iterate_devices based device capability checks
According to the definition of dm_iterate_devices_fn: * This function must iterate through each section of device used by the * target until it encounters a non-zero return code, which it then returns. * Returns zero if no callout returned non-zero.
For some target type (e.g. dm-stripe), one call of iterate_devices() may iterate multiple underlying devices internally, in which case a non-zero return code returned by iterate_devices_callout_fn will stop the iteration in advance. No iterate_devices_callout_fn should return non-zero unless device iteration should stop.
Rename dm_table_requires_stable_pages() to dm_table_any_dev_attr() and elevate it for reuse to stop iterating (and return non-zero) on the first device that causes iterate_devices_callout_fn to return non-zero. Use dm_table_any_dev_attr() to properly iterate through devices.
Rename device_is_nonrot() to device_is_rotational() and invert logic accordingly to fix improper disposition.
Fixes: c3c4555edd10 ("dm table: clear add_random unless all devices have it set") Fixes: 4693c9668fdc ("dm table: propagate non rotational flag") Cc: stable@vger.kernel.org Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Mike Snitzer snitzer@redhat.com
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 4acf2342f7ad..3e211e64f348 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1295,6 +1295,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; }
+/* + * type->iterate_devices() should be called when the sanity check needs to + * iterate and check all underlying data devices. iterate_devices() will + * iterate all underlying data devices until it encounters a non-zero return + * code, returned by whether the input iterate_devices_callout_fn, or + * iterate_devices() itself internally. + * + * For some target type (e.g. dm-stripe), one call of iterate_devices() may + * iterate multiple underlying devices internally, in which case a non-zero + * return code returned by iterate_devices_callout_fn will stop the iteration + * in advance. + * + * Cases requiring _any_ underlying device supporting some kind of attribute, + * should use the iteration structure like dm_table_any_dev_attr(), or call + * it directly. @func should handle semantics of positive examples, e.g. + * capable of something. + * + * Cases requiring _all_ underlying devices supporting some kind of attribute, + * should use the iteration structure like dm_table_supports_nowait() or + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that + * uses an @anti_func that handle semantics of counter examples, e.g. not + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func); + */ +static bool dm_table_any_dev_attr(struct dm_table *t, + iterate_devices_callout_fn func) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, func, NULL)) + return true; + } + + return false; +} + static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1595,12 +1635,12 @@ static int dm_table_supports_dax_write_cache(struct dm_table *t) return false; }
-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q); + return q && !blk_queue_nonrot(q); }
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1611,23 +1651,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); }
-static bool dm_table_all_devices_attribute(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, func, NULL)) - return false; - } - - return true; -} - static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1779,27 +1802,6 @@ static int device_requires_stable_pages(struct dm_target *ti, return q && blk_queue_stable_writes(q); }
-/* - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ -static bool dm_table_requires_stable_pages(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) - return true; - } - - return false; -} - void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1849,10 +1851,10 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ - if (dm_table_all_devices_attribute(t, device_is_nonrot)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else + if (dm_table_any_dev_attr(t, device_is_rotational)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1864,8 +1866,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. */ - if (dm_table_requires_stable_pages(t)) + if (dm_table_any_dev_attr(t, device_requires_stable_pages)) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); else blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); @@ -1876,7 +1881,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
- patch 1/3/4 is from upstream - patch 2 is to fix the code specific to 4.4 (has been removed in upstream)
Jeffle Xu (4): dm table: fix iterate_devices based device capability checks dm table: fix partial completion iterate_devices based device capability checks dm table: fix DAX iterate_devices based device capability checks dm table: fix zoned iterate_devices based device capability checks
drivers/md/dm-table.c | 174 ++++++++++++++++++------------------------ drivers/md/dm.c | 2 +- drivers/md/dm.h | 2 +- 3 files changed, 75 insertions(+), 103 deletions(-)
commit a4c8dd9c2d0987cf542a2a0c42684c9c6d78a04e upstream.
According to the definition of dm_iterate_devices_fn: * This function must iterate through each section of device used by the * target until it encounters a non-zero return code, which it then returns. * Returns zero if no callout returned non-zero.
For some target type (e.g. dm-stripe), one call of iterate_devices() may iterate multiple underlying devices internally, in which case a non-zero return code returned by iterate_devices_callout_fn will stop the iteration in advance. No iterate_devices_callout_fn should return non-zero unless device iteration should stop.
Rename dm_table_requires_stable_pages() to dm_table_any_dev_attr() and elevate it for reuse to stop iterating (and return non-zero) on the first device that causes iterate_devices_callout_fn to return non-zero. Use dm_table_any_dev_attr() to properly iterate through devices.
Rename device_is_nonrot() to device_is_rotational() and invert logic accordingly to fix improper disposition.
Fixes: c3c4555edd10 ("dm table: clear add_random unless all devices have it set") Fixes: 4693c9668fdc ("dm table: propagate non rotational flag") Cc: stable@vger.kernel.org Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Mike Snitzer snitzer@redhat.com --- drivers/md/dm-table.c | 97 +++++++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 46 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 98be040cf958..bf704d238662 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1376,6 +1376,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; }
+/* + * type->iterate_devices() should be called when the sanity check needs to + * iterate and check all underlying data devices. iterate_devices() will + * iterate all underlying data devices until it encounters a non-zero return + * code, returned by whether the input iterate_devices_callout_fn, or + * iterate_devices() itself internally. + * + * For some target type (e.g. dm-stripe), one call of iterate_devices() may + * iterate multiple underlying devices internally, in which case a non-zero + * return code returned by iterate_devices_callout_fn will stop the iteration + * in advance. + * + * Cases requiring _any_ underlying device supporting some kind of attribute, + * should use the iteration structure like dm_table_any_dev_attr(), or call + * it directly. @func should handle semantics of positive examples, e.g. + * capable of something. + * + * Cases requiring _all_ underlying devices supporting some kind of attribute, + * should use the iteration structure like dm_table_supports_nowait() or + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that + * uses an @anti_func that handle semantics of counter examples, e.g. not + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func); + */ +static bool dm_table_any_dev_attr(struct dm_table *t, + iterate_devices_callout_fn func) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, func, NULL)) + return true; + } + + return false; +} + static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1692,12 +1732,12 @@ static int dm_table_supports_dax_write_cache(struct dm_table *t) return false; }
-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q); + return q && !blk_queue_nonrot(q); }
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1708,23 +1748,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); }
-static bool dm_table_all_devices_attribute(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, func, NULL)) - return false; - } - - return true; -} - static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1863,27 +1886,6 @@ static int device_requires_stable_pages(struct dm_target *ti, return q && bdi_cap_stable_pages_required(q->backing_dev_info); }
-/* - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ -static bool dm_table_requires_stable_pages(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) - return true; - } - - return false; -} - void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1928,10 +1930,10 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ - if (dm_table_all_devices_attribute(t, device_is_nonrot)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else + if (dm_table_any_dev_attr(t, device_is_rotational)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1943,8 +1945,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. */ - if (dm_table_requires_stable_pages(t)) + if (dm_table_any_dev_attr(t, device_requires_stable_pages)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; @@ -1955,7 +1960,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
On Fri, Mar 05, 2021 at 02:57:19PM +0800, Jeffle Xu wrote:
commit a4c8dd9c2d0987cf542a2a0c42684c9c6d78a04e upstream.
According to the definition of dm_iterate_devices_fn:
- This function must iterate through each section of device used by the
- target until it encounters a non-zero return code, which it then returns.
- Returns zero if no callout returned non-zero.
For some target type (e.g. dm-stripe), one call of iterate_devices() may iterate multiple underlying devices internally, in which case a non-zero return code returned by iterate_devices_callout_fn will stop the iteration in advance. No iterate_devices_callout_fn should return non-zero unless device iteration should stop.
Rename dm_table_requires_stable_pages() to dm_table_any_dev_attr() and elevate it for reuse to stop iterating (and return non-zero) on the first device that causes iterate_devices_callout_fn to return non-zero. Use dm_table_any_dev_attr() to properly iterate through devices.
Rename device_is_nonrot() to device_is_rotational() and invert logic accordingly to fix improper disposition.
Fixes: c3c4555edd10 ("dm table: clear add_random unless all devices have it set") Fixes: 4693c9668fdc ("dm table: propagate non rotational flag") Cc: stable@vger.kernel.org Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Mike Snitzer snitzer@redhat.com
drivers/md/dm-table.c | 97 +++++++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 46 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 98be040cf958..bf704d238662 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1376,6 +1376,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/*
- type->iterate_devices() should be called when the sanity check needs to
- iterate and check all underlying data devices. iterate_devices() will
- iterate all underlying data devices until it encounters a non-zero return
- code, returned by whether the input iterate_devices_callout_fn, or
- iterate_devices() itself internally.
- For some target type (e.g. dm-stripe), one call of iterate_devices() may
- iterate multiple underlying devices internally, in which case a non-zero
- return code returned by iterate_devices_callout_fn will stop the iteration
- in advance.
- Cases requiring _any_ underlying device supporting some kind of attribute,
- should use the iteration structure like dm_table_any_dev_attr(), or call
- it directly. @func should handle semantics of positive examples, e.g.
- capable of something.
- Cases requiring _all_ underlying devices supporting some kind of attribute,
- should use the iteration structure like dm_table_supports_nowait() or
- dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
- uses an @anti_func that handle semantics of counter examples, e.g. not
- capable of something. So: return !dm_table_any_dev_attr(t, anti_func);
- */
+static bool dm_table_any_dev_attr(struct dm_table *t,
iterate_devices_callout_fn func)
+{
- struct dm_target *ti;
- unsigned int i;
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, func, NULL))
return true;
}
- return false;
+}
static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1692,12 +1732,12 @@ static int dm_table_supports_dax_write_cache(struct dm_table *t) return false; } -static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
+static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q);
- return q && !blk_queue_nonrot(q);
} static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1708,23 +1748,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } -static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
-{
- struct dm_target *ti;
- unsigned i;
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, func, NULL))
return false;
- }
- return true;
-}
static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1863,27 +1886,6 @@ static int device_requires_stable_pages(struct dm_target *ti, return q && bdi_cap_stable_pages_required(q->backing_dev_info); } -/*
- If any underlying device requires stable pages, a table must require
- them as well. Only targets that support iterate_devices are considered:
- don't want error, zero, etc to require stable pages.
- */
-static bool dm_table_requires_stable_pages(struct dm_table *t) -{
- struct dm_target *ti;
- unsigned i;
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
return true;
- }
- return false;
-}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1928,10 +1930,10 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */
- if (dm_table_all_devices_attribute(t, device_is_nonrot))
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
- if (dm_table_any_dev_attr(t, device_is_rotational)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1943,8 +1945,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming.
* If any underlying device requires stable pages, a table must require
* them as well. Only targets that support iterate_devices are considered:
*/* don't want error, zero, etc to require stable pages.
- if (dm_table_requires_stable_pages(t))
- if (dm_table_any_dev_attr(t, device_requires_stable_pages)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
@@ -1955,7 +1960,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */
- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
- if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/* -- 2.27.0
This patch breaks the build, how did you test it???
Similar to commit a4c8dd9c2d09 ("dm table: fix iterate_devices based device capability checks"), fix partial completion capability check and invert logic of the corresponding iterate_devices_callout_fn so that all devices' partial completion capabilities are properly checked.
Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Fixes: 22c11858e800 ("dm: introduce DM_TYPE_NVME_BIO_BASED") --- drivers/md/dm-table.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bf704d238662..c470e174e686 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1748,18 +1748,18 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); }
-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, +static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { char b[BDEVNAME_SIZE];
/* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); + return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0); }
static bool dm_table_does_not_support_partial_completion(struct dm_table *t) { - return dm_table_all_devices_attribute(t, device_no_partial_completion); + return !dm_table_any_dev_attr(t, device_is_partial_completion); }
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
On Fri, Mar 05, 2021 at 02:57:20PM +0800, Jeffle Xu wrote:
Similar to commit a4c8dd9c2d09 ("dm table: fix iterate_devices based device capability checks"), fix partial completion capability check and invert logic of the corresponding iterate_devices_callout_fn so that all devices' partial completion capabilities are properly checked.
Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Fixes: 22c11858e800 ("dm: introduce DM_TYPE_NVME_BIO_BASED")
drivers/md/dm-table.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
Why isn't this a commit in Linus's tree? That needs to be really really explicitly documented here.
thanks,
greg k-h
On 3/5/21 4:44 PM, Greg KH wrote:
On Fri, Mar 05, 2021 at 02:57:20PM +0800, Jeffle Xu wrote:
Similar to commit a4c8dd9c2d09 ("dm table: fix iterate_devices based device capability checks"), fix partial completion capability check and invert logic of the corresponding iterate_devices_callout_fn so that all devices' partial completion capabilities are properly checked.
Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Fixes: 22c11858e800 ("dm: introduce DM_TYPE_NVME_BIO_BASED")
drivers/md/dm-table.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
Why isn't this a commit in Linus's tree? That needs to be really really explicitly documented here.
Sorry, as I stated in the reply in the patch set for 4.19. (The replying mail doesn't appear in the archive yet, so I just copy the content here in the quotation format.)
Similarly, the code this patch fixes, i.e., commit 22c11858e800 ("dm: introduce DM_TYPE_NVME_BIO_BASED"), was removed since commit 9c37de297f65 ("dm: remove special-casing of bio-based immutable singleton target on NVMe") in v5.10. Thus the code base doesn't exist in the latest master branch.
It needs Mike's review.
I could update the commit log and document all the information once Mike has reviewed.
commit 57ba3e506c30a84b1ba1dd77ddd9f2be9d472e98 upstream.
Fix dm_table_supports_dax() and invert logic of both iterate_devices_callout_fn so that all devices' DAX capabilities are properly checked.
Fixes: 545ed20e6df6 ("dm: add infrastructure for DAX support") Cc: stable@vger.kernel.org Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Mike Snitzer snitzer@redhat.com --- drivers/md/dm-table.c | 37 ++++++++++--------------------------- drivers/md/dm.c | 2 +- drivers/md/dm.h | 2 +- 3 files changed, 12 insertions(+), 29 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index c470e174e686..07530f2aa027 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -888,24 +888,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */ -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { int blocksize = *(int *) data, id; bool rc;
id = dax_read_lock(); - rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); dax_read_unlock(id);
return rc; }
/* Check devices support synchronous DAX */ -static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - return dev->dax_dev && dax_synchronous(dev->dax_dev); + return !dev->dax_dev || !dax_synchronous(dev->dax_dev); }
bool dm_table_supports_dax(struct dm_table *t, @@ -922,7 +922,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false;
if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, iterate_fn, blocksize)) + ti->type->iterate_devices(ti, iterate_fn, blocksize)) return false; }
@@ -996,7 +996,7 @@ static int dm_table_determine_type(struct dm_table *t) verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; - if (dm_table_supports_dax(t, device_supports_dax, &page_size) || + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; } else { @@ -1715,23 +1715,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; }
-static int dm_table_supports_dax_write_cache(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, - device_dax_write_cache_enabled, NULL)) - return true; - } - - return false; -} - static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1918,15 +1901,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua);
- if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) set_dax_synchronous(t->md->dax_dev); } else blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax_write_cache(t)) + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled)) dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c6ce42daff27..a615926c532a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1119,7 +1119,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd if (!map) goto out;
- ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); + ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
out: dm_put_live_table(md, srcu_idx); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index d7c4f6606b5f..9fbf87e04019 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -74,7 +74,7 @@ void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, int *blocksize); -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data);
void dm_lock_md_type(struct mapped_device *md);
On Fri, Mar 05, 2021 at 02:57:21PM +0800, Jeffle Xu wrote:
commit 57ba3e506c30a84b1ba1dd77ddd9f2be9d472e98 upstream.
There is no such git id "upstream" :(
Please fix up all of these series with the needed information on the non-upstream patch, and make sure you have the correct git commit ids on your patches.
thanks,
greg k-h
On 3/7/21 11:06 PM, Greg KH wrote:
On Fri, Mar 05, 2021 at 02:57:21PM +0800, Jeffle Xu wrote:
commit 57ba3e506c30a84b1ba1dd77ddd9f2be9d472e98 upstream.
There is no such git id "upstream" :(
Please fix up all of these series with the needed information on the non-upstream patch, and make sure you have the correct git commit ids on your patches.
Sorry, it shall be 5b0fab508992c2e120971da658ce80027acbc405. I will correct all these for all 4.9~5.4 in the next version.
commit 24f6b6036c9eec21191646930ad42808e6180510 upstream.
Fix dm_table_supports_zoned_model() and invert logic of both iterate_devices_callout_fn so that all devices' zoned capabilities are properly checked.
Add one more parameter to dm_table_any_dev_attr(), which is actually used as the @data parameter of iterate_devices_callout_fn, so that dm_table_matches_zone_sectors() can be replaced by dm_table_any_dev_attr().
Fixes: dd88d313bef02 ("dm table: add zoned block devices validation") Cc: stable@vger.kernel.org Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Mike Snitzer snitzer@redhat.com [jeffle: also convert partial completion check] --- drivers/md/dm-table.c | 50 +++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 33 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 07530f2aa027..06b382304d92 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1397,10 +1397,10 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) * should use the iteration structure like dm_table_supports_nowait() or * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that * uses an @anti_func that handle semantics of counter examples, e.g. not - * capable of something. So: return !dm_table_any_dev_attr(t, anti_func); + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); */ static bool dm_table_any_dev_attr(struct dm_table *t, - iterate_devices_callout_fn func) + iterate_devices_callout_fn func, void *data) { struct dm_target *ti; unsigned int i; @@ -1409,7 +1409,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t, ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, func, NULL)) + ti->type->iterate_devices(ti, func, data)) return true; }
@@ -1452,13 +1452,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) return true; }
-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); enum blk_zoned_model *zoned_model = data;
- return q && blk_queue_zoned_model(q) == *zoned_model; + return !q || blk_queue_zoned_model(q) != *zoned_model; }
static bool dm_table_supports_zoned_model(struct dm_table *t, @@ -1475,37 +1475,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, return false;
if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) + ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) return false; }
return true; }
-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data;
- return q && blk_queue_zone_sectors(q) == *zone_sectors; -} - -static bool dm_table_matches_zone_sectors(struct dm_table *t, - unsigned int zone_sectors) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) - return false; - } - - return true; + return !q || blk_queue_zone_sectors(q) != *zone_sectors; }
static int validate_hardware_zoned_model(struct dm_table *table, @@ -1525,7 +1508,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, if (!zone_sectors || !is_power_of_2(zone_sectors)) return -EINVAL;
- if (!dm_table_matches_zone_sectors(table, zone_sectors)) { + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { DMERR("%s: zone sectors is not consistent across all devices", dm_device_name(table->md)); return -EINVAL; @@ -1742,7 +1725,7 @@ static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev
static bool dm_table_does_not_support_partial_completion(struct dm_table *t) { - return !dm_table_any_dev_attr(t, device_is_partial_completion); + return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL); }
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1909,11 +1892,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
- if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled)) + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ - if (dm_table_any_dev_attr(t, device_is_rotational)) + if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); else blk_queue_flag_set(QUEUE_FLAG_NONROT, q); @@ -1932,7 +1915,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * them as well. Only targets that support iterate_devices are considered: * don't want error, zero, etc to require stable pages. */ - if (dm_table_any_dev_attr(t, device_requires_stable_pages)) + if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; @@ -1943,7 +1926,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) + if (blk_queue_add_random(q) && + dm_table_any_dev_attr(t, device_is_not_random, NULL)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
On Fri, Mar 05, 2021 at 02:57:18PM +0800, Jeffle Xu wrote:
- patch 1/3/4 is from upstream
- patch 2 is to fix the code specific to 4.4 (has been removed in upstream)
What commit removed it? Why not just take that instead?
thanks,
greg k-h
linux-stable-mirror@lists.linaro.org