commit a4c8dd9c2d0987cf542a2a0c42684c9c6d78a04e upstream.
According to the definition of dm_iterate_devices_fn:
* This function must iterate through each section of device used by the
* target until it encounters a non-zero return code, which it then returns.
* Returns zero if no callout returned non-zero.
For some target type (e.g. dm-stripe), one call of iterate_devices() may
iterate multiple underlying devices internally, in which case a non-zero
return code returned by iterate_devices_callout_fn will stop the iteration
in advance. No iterate_devices_callout_fn should return non-zero unless
device iteration should stop.
Rename dm_table_requires_stable_pages() to dm_table_any_dev_attr() and
elevate it for reuse to stop iterating (and return non-zero) on the
first device that causes iterate_devices_callout_fn to return non-zero.
Use dm_table_any_dev_attr() to properly iterate through devices.
Rename device_is_nonrot() to device_is_rotational() and invert logic
accordingly to fix improper disposition.
[jeffle: backport notes]
No stable writes. Also convert the no_sg_merge capability check,
which is introduced by commit 200612ec33e5 ("dm table: propagate
QUEUE_FLAG_NO_SG_MERGE"), and removed since commit 2705c93742e9 ("block:
kill QUEUE_FLAG_NO_SG_MERGE") in v5.1.
Fixes: c3c4555edd10 ("dm table: clear add_random unless all devices have it set")
Fixes: 4693c9668fdc ("dm table: propagate non rotational flag")
Cc: stable(a)vger.kernel.org
Signed-off-by: Jeffle Xu <jefflexu(a)linux.alibaba.com>
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
---
drivers/md/dm-table.c | 83 +++++++++++++++++++++++++++----------------
1 file changed, 53 insertions(+), 30 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a5a6c7f073af..7ee520d4d216 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1210,6 +1210,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
+/*
+ * type->iterate_devices() should be called when the sanity check needs to
+ * iterate and check all underlying data devices. iterate_devices() will
+ * iterate all underlying data devices until it encounters a non-zero return
+ * code, returned by whether the input iterate_devices_callout_fn, or
+ * iterate_devices() itself internally.
+ *
+ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
+ * iterate multiple underlying devices internally, in which case a non-zero
+ * return code returned by iterate_devices_callout_fn will stop the iteration
+ * in advance.
+ *
+ * Cases requiring _any_ underlying device supporting some kind of attribute,
+ * should use the iteration structure like dm_table_any_dev_attr(), or call
+ * it directly. @func should handle semantics of positive examples, e.g.
+ * capable of something.
+ *
+ * Cases requiring _all_ underlying devices supporting some kind of attribute,
+ * should use the iteration structure like dm_table_supports_nowait() or
+ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
+ * uses an @anti_func that handle semantics of counter examples, e.g. not
+ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func);
+ */
+static bool dm_table_any_dev_attr(struct dm_table *t,
+ iterate_devices_callout_fn func)
+{
+ struct dm_target *ti;
+ unsigned int i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, func, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1380,12 +1420,12 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t)
return true;
}
-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q);
+ return q && !blk_queue_nonrot(q);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1396,29 +1436,12 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
return q && !blk_queue_add_random(q);
}
-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
-}
-
-static bool dm_table_all_devices_attribute(struct dm_table *t,
- iterate_devices_callout_fn func)
-{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < dm_table_get_num_targets(t)) {
- ti = dm_table_get_target(t, i++);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, func, NULL))
- return false;
- }
-
- return true;
+ return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1511,18 +1534,18 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits.discard_zeroes_data = 0;
/* Ensure that all underlying devices are non-rotational. */
- if (dm_table_all_devices_attribute(t, device_is_nonrot))
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
- else
+ if (dm_table_any_dev_attr(t, device_is_rotational))
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
- if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
- queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
- else
+ if (dm_table_any_dev_attr(t, queue_no_sg_merge))
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_verify_integrity(t);
@@ -1532,7 +1555,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+ if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
/*
--
2.27.0
Fix a couple of issues with setting the gadget's speed for DWC_usb32 IP. We
should not notice this issue with other IPs.
Thinh Nguyen (2):
usb: dwc3: gadget: Set gadget_max_speed when set ssp_rate
usb: dwc3: gadget: Use max speed if unspecified
drivers/usb/dwc3/gadget.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
base-commit: 7f6f181b3e2c4d08f5d693eebe7901a28fab8666
--
2.28.0
The patch titled
Subject: mm/mmap.c: fix the adjusted length error
has been removed from the -mm tree. Its filename was
mm-mmap-fix-the-adjusted-length-error.patch
This patch was dropped because an updated version will be merged
------------------------------------------------------
From: jianhong chen <chenjianhong2(a)huawei.com>
Subject: mm/mmap.c: fix the adjusted length error
In linux version 4.4, a 32-bit process may fail to allocate 64M hugepage
memory by function shmat even though there is a 64M memory gap in the
process.
It is the adjusted length that causes the problem, introduced by
db4fbfb9523c935 ("mm: vm_unmapped_area() lookup function"). Accounting
for the worst case alignment overhead, unmapped_area() and
unmapped_area_topdown() adjust the search length before searching for
available vma gap. This is an estimated length, sum of the desired length
and the longest alignment offset, which can cause misjudgement if the
system has very little virtual memory left. For example, if the longest
memory gap available is 64M, we can't get it from the system by allocating
64M hugepage memory via shmat function. The reason is that it requires a
longer length, the sum of the desired length(64M) and the longest
alignment offset.
To fix this error we can calculate the alignment offset of gap_start or
gap_end to get a desired gap_start or gap_end value, before searching for
the available gap. In this way, we don't need to adjust the search
length.
Problem reproduces procedure:
1. allocate a lot of virtual memory segments via shmat and malloc
2. release one of the biggest memory segment via shmdt
3. attach the biggest memory segment via shmat
e.g.
process maps:
00008000-00009000 r-xp 00000000 00:12 3385 /tmp/memory_mmap
00011000-00012000 rw-p 00001000 00:12 3385 /tmp/memory_mmap
27536000-f756a000 rw-p 00000000 00:00 0
f756a000-f7691000 r-xp 00000000 01:00 560 /lib/libc-2.11.1.so
f7691000-f7699000 ---p 00127000 01:00 560 /lib/libc-2.11.1.so
f7699000-f769b000 r--p 00127000 01:00 560 /lib/libc-2.11.1.so
f769b000-f769c000 rw-p 00129000 01:00 560 /lib/libc-2.11.1.so
f769c000-f769f000 rw-p 00000000 00:00 0
f769f000-f76c0000 r-xp 00000000 01:00 583 /lib/libgcc_s.so.1
f76c0000-f76c7000 ---p 00021000 01:00 583 /lib/libgcc_s.so.1
f76c7000-f76c8000 rw-p 00020000 01:00 583 /lib/libgcc_s.so.1
f76c8000-f76e5000 r-xp 00000000 01:00 543 /lib/ld-2.11.1.so
f76e9000-f76ea000 rw-p 00000000 00:00 0
f76ea000-f76ec000 rw-p 00000000 00:00 0
f76ec000-f76ed000 r--p 0001c000 01:00 543 /lib/ld-2.11.1.so
f76ed000-f76ee000 rw-p 0001d000 01:00 543 /lib/ld-2.11.1.so
f7800000-f7a00000 rw-s 00000000 00:0e 0 /SYSV000000ea (deleted)
fba00000-fca00000 rw-s 00000000 00:0e 65538 /SYSV000000ec (deleted)
fca00000-fce00000 rw-s 00000000 00:0e 98307 /SYSV000000ed (deleted)
fce00000-fd800000 rw-s 00000000 00:0e 131076 /SYSV000000ee (deleted)
ff913000-ff934000 rw-p 00000000 00:00 0 [stack]
ffff0000-ffff1000 r-xp 00000000 00:00 0 [vectors]
from 0xf7a00000 to fba00000, it has 64M memory gap, but we can't get it
from kernel.
Link: http://lkml.kernel.org/r/1558073209-79549-1-git-send-email-chenjianhong2@hu…
Fixes: db4fbfb9523c ("mm: vm_unmapped_area() lookup function")
Signed-off-by: jianhong chen <chenjianhong2(a)huawei.com>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Cc: Michal Hocko <mhocko(a)suse.com>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: "Kirill A . Shutemov" <kirill.shutemov(a)linux.intel.com>
Cc: Yang Shi <yang.shi(a)linux.alibaba.com>
Cc: Jann Horn <jannh(a)google.com>
Cc: Steve Capper <steve.capper(a)arm.com>
Cc: Yangtao Li <tiny.windzz(a)gmail.com>
Cc: Michel Lespinasse <walken(a)google.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/mmap.c | 43 +++++++++++++++++++++++++++++--------------
1 file changed, 29 insertions(+), 14 deletions(-)
--- a/mm/mmap.c~mm-mmap-fix-the-adjusted-length-error
+++ a/mm/mmap.c
@@ -1914,6 +1914,22 @@ unacct_error:
return error;
}
+static inline unsigned long gap_start_offset(struct vm_unmapped_area_info *info,
+ unsigned long addr)
+{
+ /* get gap_start offset to adjust gap address to the
+ * desired alignment
+ */
+ return (info->align_offset - addr) & info->align_mask;
+}
+
+static inline unsigned long gap_end_offset(struct vm_unmapped_area_info *info,
+ unsigned long addr)
+{
+ /* get gap_end offset to adjust gap address to the desired alignment */
+ return (addr - info->align_offset) & info->align_mask;
+}
+
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
@@ -1928,10 +1944,7 @@ static unsigned long unmapped_area(struc
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
- /* Adjust search length to account for worst case alignment overhead */
- length = info->length + info->align_mask;
- if (length < info->length)
- return -ENOMEM;
+ length = info->length;
/* Adjust search limits by the desired length */
if (info->high_limit < length)
@@ -1963,6 +1976,7 @@ static unsigned long unmapped_area(struc
}
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+ gap_start += gap_start_offset(info, gap_start);
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
@@ -1991,6 +2005,7 @@ check_current:
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
gap_start = vm_end_gap(vma->vm_prev);
+ gap_start += gap_start_offset(info, gap_start);
gap_end = vm_start_gap(vma);
goto check_current;
}
@@ -2000,17 +2015,17 @@ check_current:
check_highest:
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
+ gap_start += gap_start_offset(info, gap_start);
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
if (gap_start > high_limit)
return -ENOMEM;
found:
/* We found a suitable gap. Clip it with the original low_limit. */
- if (gap_start < info->low_limit)
+ if (gap_start < info->low_limit) {
gap_start = info->low_limit;
-
- /* Adjust gap address to the desired alignment */
- gap_start += (info->align_offset - gap_start) & info->align_mask;
+ gap_start += gap_start_offset(info, gap_start);
+ }
VM_BUG_ON(gap_start + info->length > info->high_limit);
VM_BUG_ON(gap_start + info->length > gap_end);
@@ -2023,16 +2038,14 @@ static unsigned long unmapped_area_topdo
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
- /* Adjust search length to account for worst case alignment overhead */
- length = info->length + info->align_mask;
- if (length < info->length)
- return -ENOMEM;
+ length = info->length;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
*/
gap_end = info->high_limit;
+ gap_end -= gap_end_offset(info, gap_end);
if (gap_end < length)
return -ENOMEM;
high_limit = gap_end - length;
@@ -2069,6 +2082,7 @@ static unsigned long unmapped_area_topdo
check_current:
/* Check if current node has a suitable gap */
gap_end = vm_start_gap(vma);
+ gap_end -= gap_end_offset(info, gap_end);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit &&
@@ -2103,13 +2117,14 @@ check_current:
found:
/* We found a suitable gap. Clip it with the original high_limit. */
- if (gap_end > info->high_limit)
+ if (gap_end > info->high_limit) {
gap_end = info->high_limit;
+ gap_end -= gap_end_offset(info, gap_end);
+ }
found_highest:
/* Compute highest gap address at the desired alignment */
gap_end -= info->length;
- gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit);
VM_BUG_ON(gap_end < gap_start);
_
Patches currently in -mm which might be from chenjianhong2(a)huawei.com are
Dear Greg,
Would you consider KASan for ARM patches for LTS (5.10) kernel? Those
are 7a1be318f579..421015713b30 if I understand correctly. They are
not normal stable material, but I think they will help tremendously in
discovering kernel bugs on 32-bit ARMs.
Best Regards
Michał Mirosław